Private
Public Access
1
0

feat(api): add Grafana time range endpoint for scores

- Add /api/v2/server/scores/{server}/{mode} endpoint
- Support time range queries with from/to parameters
- Return data in Grafana table format for visualization
- Fix routing pattern to handle IP addresses correctly
- Add comprehensive parameter validation and error handling
This commit is contained in:
2025-07-26 12:16:55 -07:00
parent d4bf8d9e16
commit 8262b1442f
5 changed files with 618 additions and 19 deletions

View File

@@ -3,6 +3,7 @@ package chdb
import (
"context"
"fmt"
"strings"
"time"
"github.com/ClickHouse/clickhouse-go/v2"
@@ -105,3 +106,127 @@ func (d *ClickHouse) Logscores(ctx context.Context, serverID, monitorID int, sin
return rv, nil
}
// LogscoresTimeRange queries log scores within a specific time range for Grafana integration
func (d *ClickHouse) LogscoresTimeRange(ctx context.Context, serverID, monitorID int, from, to time.Time, limit int) ([]ntpdb.LogScore, error) {
log := logger.Setup()
ctx, span := tracing.Tracer().Start(ctx, "CH LogscoresTimeRange")
defer span.End()
args := []interface{}{serverID, from, to}
query := `select id,monitor_id,server_id,ts,
toFloat64(score),toFloat64(step),offset,
rtt,leap,warning,error
from log_scores
where
server_id = ?
and ts >= ?
and ts <= ?`
if monitorID > 0 {
query += " and monitor_id = ?"
args = append(args, monitorID)
}
// Always order by timestamp ASC for Grafana convention
query += " order by ts ASC"
// Apply limit to prevent memory issues
if limit > 0 {
query += " limit ?"
args = append(args, limit)
}
log.DebugContext(ctx, "clickhouse time range query",
"query", query,
"args", args,
"server_id", serverID,
"monitor_id", monitorID,
"from", from.Format(time.RFC3339),
"to", to.Format(time.RFC3339),
"limit", limit,
"full_sql_with_params", func() string {
// Build a readable SQL query with parameters substituted for debugging
sqlDebug := query
paramIndex := 0
for strings.Contains(sqlDebug, "?") && paramIndex < len(args) {
var replacement string
switch v := args[paramIndex].(type) {
case int:
replacement = fmt.Sprintf("%d", v)
case time.Time:
replacement = fmt.Sprintf("'%s'", v.Format("2006-01-02 15:04:05"))
default:
replacement = fmt.Sprintf("'%v'", v)
}
sqlDebug = strings.Replace(sqlDebug, "?", replacement, 1)
paramIndex++
}
return sqlDebug
}(),
)
rows, err := d.Scores.Query(
clickhouse.Context(
ctx, clickhouse.WithSpan(span.SpanContext()),
),
query, args...,
)
if err != nil {
log.ErrorContext(ctx, "time range query error", "err", err)
return nil, fmt.Errorf("database error")
}
rv := []ntpdb.LogScore{}
for rows.Next() {
row := ntpdb.LogScore{}
var leap uint8
if err := rows.Scan(
&row.ID,
&row.MonitorID,
&row.ServerID,
&row.Ts,
&row.Score,
&row.Step,
&row.Offset,
&row.Rtt,
&leap,
&row.Attributes.Warning,
&row.Attributes.Error,
); err != nil {
log.Error("could not parse row", "err", err)
continue
}
row.Attributes.Leap = int8(leap)
rv = append(rv, row)
}
log.InfoContext(ctx, "time range query results",
"rows_returned", len(rv),
"server_id", serverID,
"monitor_id", monitorID,
"time_range", fmt.Sprintf("%s to %s", from.Format(time.RFC3339), to.Format(time.RFC3339)),
"limit", limit,
"sample_rows", func() []map[string]interface{} {
samples := make([]map[string]interface{}, 0, 3)
for i, row := range rv {
if i >= 3 { break }
samples = append(samples, map[string]interface{}{
"id": row.ID,
"monitor_id": row.MonitorID,
"ts": row.Ts.Format(time.RFC3339),
"score": row.Score,
"rtt_valid": row.Rtt.Valid,
"offset_valid": row.Offset.Valid,
})
}
return samples
}(),
)
return rv, nil
}

View File

@@ -1,20 +1,20 @@
# DETAILED IMPLEMENTATION PLAN: Grafana Time Range API with Future Downsampling Support
## Overview
Implement a new Grafana-compatible API endpoint `/api/v2/server/scores/{server}.{mode}` that returns time series data in Grafana format with time range support and future downsampling capabilities.
Implement a new Grafana-compatible API endpoint `/api/v2/server/scores/{server}/{mode}` that returns time series data in Grafana format with time range support and future downsampling capabilities.
## API Specification
### Endpoint
- **URL**: `/api/v2/server/scores/{server}.{mode}`
- **URL**: `/api/v2/server/scores/{server}/{mode}`
- **Method**: GET
- **Path Parameters**:
- `server`: Server IP address or ID (same validation as existing API)
- `mode`: Only `json` supported initially
### Query Parameters (following Grafana conventions)
- `from`: Unix timestamp in milliseconds (required)
- `to`: Unix timestamp in milliseconds (required)
- `from`: Unix timestamp in seconds (required)
- `to`: Unix timestamp in seconds (required)
- `maxDataPoints`: Integer, default 50000, max 50000 (for future downsampling)
- `monitor`: Monitor ID, name prefix, or "*" for all (optional, same as existing)
- `interval`: Future downsampling interval like "1m", "5m", "1h" (optional, not implemented initially)
@@ -52,9 +52,11 @@ Grafana table format JSON array (more efficient than separate series):
### 1. Server Routing (`server/server.go`)
Add new route after existing scores routes:
```go
e.GET("/api/v2/server/scores/:server.:mode", srv.scoresTimeRange)
e.GET("/api/v2/server/scores/:server/:mode", srv.scoresTimeRange)
```
**Note**: Initially attempted `:server.:mode` pattern, but Echo router cannot properly parse IP addresses with dots using this pattern. Changed to `:server/:mode` to match existing API pattern and ensure compatibility with IP addresses like `23.155.40.38`.
## Key Implementation Clarifications
### Monitor Filtering Behavior
@@ -73,7 +75,7 @@ e.GET("/api/v2/server/scores/:server.:mode", srv.scoresTimeRange)
- **Minimum range**: 1 second
- **Maximum range**: 90 days
### 2. New Handler Function (`server/history.go`)
### 2. New Handler Function (`server/grafana.go`)
#### Function Signature
```go
@@ -98,7 +100,7 @@ func (srv *Server) parseTimeRangeParams(ctx context.Context, c echo.Context) (ti
return timeRangeParams{}, err
}
// Parse and validate from/to millisecond timestamps
// Parse and validate from/to second timestamps
// Validate time range (max 90 days, min 1 second)
// Parse maxDataPoints (default 50000, max 50000)
// Return extended parameters
@@ -159,7 +161,7 @@ ORDER BY ts ASC
LIMIT ?
```
### 4. Data Transformation Logic (`server/history.go`)
### 4. Data Transformation Logic (`server/grafana.go`)
#### Core Transformation Function
```go
@@ -260,7 +262,7 @@ timestampMs := logScore.Ts.Unix() * 1000
```markdown
### 7. Server Scores Time Range (v2)
**GET** `/api/v2/server/scores/{server}.{mode}`
**GET** `/api/v2/server/scores/{server}/{mode}`
Grafana-compatible time series endpoint for NTP server scoring data.
@@ -269,8 +271,8 @@ Grafana-compatible time series endpoint for NTP server scoring data.
- `mode`: Response format (`json` only)
#### Query Parameters
- `from`: Start time as Unix timestamp in milliseconds (required)
- `to`: End time as Unix timestamp in milliseconds (required)
- `from`: Start time as Unix timestamp in seconds (required)
- `to`: End time as Unix timestamp in seconds (required)
- `maxDataPoints`: Maximum data points to return (default: 50000, max: 50000)
- `monitor`: Monitor filter (ID, name prefix, or "*" for all)
@@ -319,14 +321,51 @@ Grafana table format array with one series per monitor containing all metrics as
**Recommended Grafana Data Source**: JSON API plugin (`marcusolsson-json-datasource`) - ideal for REST APIs returning table format JSON
### Phase 1: Core Implementation
- [ ] Add route in server.go
- [ ] Implement parseTimeRangeParams function
- [ ] Add LogscoresTimeRange method to ClickHouse
- [ ] Implement transformToGrafanaTableFormat function
- [ ] Add scoresTimeRange handler
- [ ] Error handling and validation (reuse existing Echo patterns)
- [ ] Cache control headers (reuse setHistoryCacheControl)
### Phase 1: Core Implementation ✅ **COMPLETED**
- [x] Add route in server.go (fixed routing pattern from `:server.:mode` to `:server/:mode`)
- [x] Implement parseTimeRangeParams function for parameter validation
- [x] Add LogscoresTimeRange method to ClickHouse with time range filtering
- [x] Implement transformToGrafanaTableFormat function with monitor grouping
- [x] Add scoresTimeRange handler with full error handling
- [x] Error handling and validation (reuse existing Echo patterns)
- [x] Cache control headers (reuse setHistoryCacheControl)
#### Phase 1 Implementation Details
**Key Components Built:**
- **Route Pattern**: `/api/v2/server/scores/:server/:mode` (matches existing API consistency)
- **Parameter Validation**: Full validation of `from`/`to` timestamps, `maxDataPoints`, time ranges
- **ClickHouse Integration**: `LogscoresTimeRange()` with time-based WHERE clauses and ASC ordering
- **Data Transformation**: Grafana table format with monitor grouping and null value handling
- **Complete Handler**: `scoresTimeRange()` with server validation, error handling, caching, and CORS
**Routing Fix**: Changed from `:server.:mode` to `:server/:mode` to resolve Echo router issue with IP addresses containing dots (e.g., `23.155.40.38`).
**Files Created/Modified in Phase 1:**
- `server/grafana.go`: Complete implementation with all structures and functions
- `timeRangeParams` struct and `parseTimeRangeParams()` function
- `transformToGrafanaTableFormat()` function with monitor grouping
- `scoresTimeRange()` handler with full error handling
- `sanitizeMonitorName()` utility function
- `server/server.go`: Added route `e.GET("/api/v2/server/scores/:server/:mode", srv.scoresTimeRange)`
- `chdb/logscores.go`: Added `LogscoresTimeRange()` method for time-based queries
**Production Testing Results** (July 25, 2025):
-**Real Data Verification**: Successfully tested with server `102.64.112.164` over 12-hour time range
-**Multiple Monitor Support**: Returns data for multiple monitors (`defra1-210hw9t`, `recentmedian`)
-**Data Quality Validation**:
- RTT conversion (microseconds → milliseconds): ✅ Working
- Timestamp conversion (seconds → milliseconds): ✅ Working
- Null value handling: ✅ Working (recentmedian has null RTT/offset as expected)
- Monitor grouping: ✅ Working (one series per monitor)
-**API Parameter Changes**: Successfully changed from milliseconds to seconds for user-friendliness
-**Volume Testing**: Handles 100+ data points per monitor efficiently
-**Error Handling**: All validation working (400 for invalid params, 404 for missing servers)
-**Performance**: Sub-second response times for 12-hour ranges
**Sample Working Request:**
```bash
curl 'http://localhost:8030/api/v2/server/scores/102.64.112.164/json?from=1753457764&to=1753500964&monitor=*'
```
### Phase 2: Testing & Polish
- [ ] Unit tests for all functions

View File

@@ -1,12 +1,17 @@
package server
import (
"context"
"net/http"
"strconv"
"strings"
"time"
"github.com/labstack/echo/v4"
"go.ntppool.org/common/logger"
"go.ntppool.org/common/tracing"
"go.ntppool.org/data-api/logscores"
"go.ntppool.org/data-api/ntpdb"
)
// ColumnDef represents a Grafana table column definition
@@ -27,6 +32,434 @@ type GrafanaTableSeries struct {
// GrafanaTimeSeriesResponse represents the complete Grafana table response
type GrafanaTimeSeriesResponse []GrafanaTableSeries
// timeRangeParams extends historyParameters with time range support
type timeRangeParams struct {
historyParameters // embed existing struct
from time.Time
to time.Time
maxDataPoints int
interval string // for future downsampling
}
// parseTimeRangeParams parses and validates time range parameters
func (srv *Server) parseTimeRangeParams(ctx context.Context, c echo.Context) (timeRangeParams, error) {
log := logger.FromContext(ctx)
// Start with existing parameter parsing logic
baseParams, err := srv.getHistoryParameters(ctx, c)
if err != nil {
return timeRangeParams{}, err
}
trParams := timeRangeParams{
historyParameters: baseParams,
maxDataPoints: 50000, // default
}
// Parse from timestamp (required)
fromParam := c.QueryParam("from")
if fromParam == "" {
return timeRangeParams{}, echo.NewHTTPError(http.StatusBadRequest, "from parameter is required")
}
fromSec, err := strconv.ParseInt(fromParam, 10, 64)
if err != nil {
return timeRangeParams{}, echo.NewHTTPError(http.StatusBadRequest, "invalid from timestamp format")
}
trParams.from = time.Unix(fromSec, 0)
// Parse to timestamp (required)
toParam := c.QueryParam("to")
if toParam == "" {
return timeRangeParams{}, echo.NewHTTPError(http.StatusBadRequest, "to parameter is required")
}
toSec, err := strconv.ParseInt(toParam, 10, 64)
if err != nil {
return timeRangeParams{}, echo.NewHTTPError(http.StatusBadRequest, "invalid to timestamp format")
}
trParams.to = time.Unix(toSec, 0)
// Validate time range
if trParams.from.Equal(trParams.to) || trParams.from.After(trParams.to) {
return timeRangeParams{}, echo.NewHTTPError(http.StatusBadRequest, "from must be before to")
}
// Check minimum range (1 second)
if trParams.to.Sub(trParams.from) < time.Second {
return timeRangeParams{}, echo.NewHTTPError(http.StatusBadRequest, "time range must be at least 1 second")
}
// Check maximum range (90 days)
if trParams.to.Sub(trParams.from) > 90*24*time.Hour {
return timeRangeParams{}, echo.NewHTTPError(http.StatusBadRequest, "time range cannot exceed 90 days")
}
// Parse maxDataPoints (optional)
if maxDataPointsParam := c.QueryParam("maxDataPoints"); maxDataPointsParam != "" {
maxDP, err := strconv.Atoi(maxDataPointsParam)
if err != nil {
return timeRangeParams{}, echo.NewHTTPError(http.StatusBadRequest, "invalid maxDataPoints format")
}
if maxDP > 50000 {
return timeRangeParams{}, echo.NewHTTPError(http.StatusBadRequest, "maxDataPoints cannot exceed 50000")
}
if maxDP > 0 {
trParams.maxDataPoints = maxDP
}
}
// Parse interval (optional, for future downsampling)
trParams.interval = c.QueryParam("interval")
log.DebugContext(ctx, "parsed time range params",
"from", trParams.from,
"to", trParams.to,
"maxDataPoints", trParams.maxDataPoints,
"interval", trParams.interval,
"monitor", trParams.monitorID,
)
return trParams, nil
}
// sanitizeMonitorName sanitizes monitor names for Grafana target format
func sanitizeMonitorName(name string) string {
// Replace problematic characters for Grafana target format
result := strings.ReplaceAll(name, " ", "_")
result = strings.ReplaceAll(result, ".", "-")
result = strings.ReplaceAll(result, "/", "-")
return result
}
// transformToGrafanaTableFormat converts LogScoreHistory to Grafana table format
func transformToGrafanaTableFormat(history *logscores.LogScoreHistory, monitors []ntpdb.Monitor) GrafanaTimeSeriesResponse {
// Group data by monitor_id (one series per monitor)
monitorData := make(map[int][]ntpdb.LogScore)
monitorInfo := make(map[int]ntpdb.Monitor)
// Group log scores by monitor ID
skippedInvalidMonitors := 0
for _, ls := range history.LogScores {
if !ls.MonitorID.Valid {
skippedInvalidMonitors++
continue
}
monitorID := int(ls.MonitorID.Int32)
monitorData[monitorID] = append(monitorData[monitorID], ls)
}
// Debug logging for transformation
logger.Setup().Info("transformation grouping debug",
"total_log_scores", len(history.LogScores),
"skipped_invalid_monitors", skippedInvalidMonitors,
"grouped_monitor_ids", func() []int {
keys := make([]int, 0, len(monitorData))
for k := range monitorData {
keys = append(keys, k)
}
return keys
}(),
"monitor_data_counts", func() map[int]int {
counts := make(map[int]int)
for k, v := range monitorData {
counts[k] = len(v)
}
return counts
}(),
)
// Index monitors by ID for quick lookup
for _, monitor := range monitors {
monitorInfo[int(monitor.ID)] = monitor
}
var response GrafanaTimeSeriesResponse
// Create one table series per monitor
logger.Setup().Info("creating grafana series",
"monitor_data_entries", len(monitorData),
)
for monitorID, logScores := range monitorData {
if len(logScores) == 0 {
logger.Setup().Info("skipping monitor with no data", "monitor_id", monitorID)
continue
}
logger.Setup().Info("processing monitor series",
"monitor_id", monitorID,
"log_scores_count", len(logScores),
)
// Get monitor name from history.Monitors map or from monitor info
monitorName := "unknown"
if name, exists := history.Monitors[monitorID]; exists && name != "" {
monitorName = name
} else if monitor, exists := monitorInfo[monitorID]; exists {
monitorName = monitor.DisplayName()
}
// Build target name and tags
sanitizedName := sanitizeMonitorName(monitorName)
target := "monitor{name=" + sanitizedName + "}"
tags := map[string]string{
"monitor_id": strconv.Itoa(monitorID),
"monitor_name": monitorName,
"type": "monitor",
}
// Add status (we'll use active as default since we have data for this monitor)
tags["status"] = "active"
// Define table columns
columns := []ColumnDef{
{Text: "time", Type: "time"},
{Text: "score", Type: "number"},
{Text: "rtt", Type: "number", Unit: "ms"},
{Text: "offset", Type: "number", Unit: "s"},
}
// Build values array
var values [][]interface{}
for _, ls := range logScores {
// Convert timestamp to milliseconds
timestampMs := ls.Ts.Unix() * 1000
// Create row: [time, score, rtt, offset]
row := []interface{}{
timestampMs,
ls.Score,
}
// Add RTT (convert from microseconds to milliseconds, handle null)
if ls.Rtt.Valid {
rttMs := float64(ls.Rtt.Int32) / 1000.0
row = append(row, rttMs)
} else {
row = append(row, nil)
}
// Add offset (handle null)
if ls.Offset.Valid {
row = append(row, ls.Offset.Float64)
} else {
row = append(row, nil)
}
values = append(values, row)
}
// Create table series
series := GrafanaTableSeries{
Target: target,
Tags: tags,
Columns: columns,
Values: values,
}
response = append(response, series)
logger.Setup().Info("created series for monitor",
"monitor_id", monitorID,
"target", series.Target,
"values_count", len(series.Values),
)
}
logger.Setup().Info("transformation complete",
"final_response_count", len(response),
"response_is_nil", response == nil,
)
return response
}
// scoresTimeRange handles Grafana time range requests for NTP server scores
func (srv *Server) scoresTimeRange(c echo.Context) error {
log := logger.Setup()
ctx, span := tracing.Tracer().Start(c.Request().Context(), "scoresTimeRange")
defer span.End()
// Set reasonable default cache time; adjusted later based on data
c.Response().Header().Set("Cache-Control", "public,max-age=240")
// Validate mode parameter
mode := c.Param("mode")
if mode != "json" {
return echo.NewHTTPError(http.StatusNotFound, "invalid mode - only json supported")
}
// Parse and validate time range parameters
params, err := srv.parseTimeRangeParams(ctx, c)
if err != nil {
if he, ok := err.(*echo.HTTPError); ok {
return he
}
log.ErrorContext(ctx, "parse time range parameters", "err", err)
span.RecordError(err)
return echo.NewHTTPError(http.StatusInternalServerError, "internal error")
}
// Find and validate server
server, err := srv.FindServer(ctx, c.Param("server"))
if err != nil {
log.ErrorContext(ctx, "find server", "err", err)
if he, ok := err.(*echo.HTTPError); ok {
return he
}
span.RecordError(err)
return echo.NewHTTPError(http.StatusInternalServerError, "internal error")
}
if server.DeletionAge(30 * 24 * time.Hour) {
span.AddEvent("server deleted")
return echo.NewHTTPError(http.StatusNotFound, "server not found")
}
if server.ID == 0 {
span.AddEvent("server not found")
return echo.NewHTTPError(http.StatusNotFound, "server not found")
}
// Query ClickHouse for time range data
log.InfoContext(ctx, "executing clickhouse time range query",
"server_id", server.ID,
"server_ip", server.Ip,
"monitor_id", params.monitorID,
"from", params.from,
"to", params.to,
"max_data_points", params.maxDataPoints,
"time_range_duration", params.to.Sub(params.from).String(),
)
logScores, err := srv.ch.LogscoresTimeRange(ctx, int(server.ID), params.monitorID, params.from, params.to, params.maxDataPoints)
if err != nil {
log.ErrorContext(ctx, "clickhouse time range query", "err", err,
"server_id", server.ID,
"monitor_id", params.monitorID,
"from", params.from,
"to", params.to,
)
span.RecordError(err)
return echo.NewHTTPError(http.StatusInternalServerError, "internal error")
}
log.InfoContext(ctx, "clickhouse query results",
"server_id", server.ID,
"rows_returned", len(logScores),
"first_few_ids", func() []uint64 {
ids := make([]uint64, 0, 3)
for i, ls := range logScores {
if i >= 3 { break }
ids = append(ids, ls.ID)
}
return ids
}(),
)
// Build LogScoreHistory structure for compatibility with existing functions
history := &logscores.LogScoreHistory{
LogScores: logScores,
Monitors: make(map[int]string),
}
// Get monitor names for the returned data
monitorIDs := []uint32{}
for _, ls := range logScores {
if ls.MonitorID.Valid {
monitorID := uint32(ls.MonitorID.Int32)
if _, exists := history.Monitors[int(monitorID)]; !exists {
history.Monitors[int(monitorID)] = ""
monitorIDs = append(monitorIDs, monitorID)
}
}
}
log.InfoContext(ctx, "monitor processing",
"unique_monitor_ids", monitorIDs,
"monitor_count", len(monitorIDs),
)
// Get monitor details from database for status and display names
var monitors []ntpdb.Monitor
if len(monitorIDs) > 0 {
q := ntpdb.NewWrappedQuerier(ntpdb.New(srv.db))
logScoreMonitors, err := q.GetServerScores(ctx, ntpdb.GetServerScoresParams{
MonitorIDs: monitorIDs,
ServerID: server.ID,
})
if err != nil {
log.ErrorContext(ctx, "get monitor details", "err", err)
// Don't fail the request, just use basic info
} else {
for _, lsm := range logScoreMonitors {
// Create monitor entry for transformation (we mainly need the display name)
tempMon := ntpdb.Monitor{
TlsName: lsm.TlsName,
Location: lsm.Location,
ID: lsm.ID,
}
monitors = append(monitors, tempMon)
// Update monitor name in history
history.Monitors[int(lsm.ID)] = tempMon.DisplayName()
}
}
}
// Transform to Grafana table format
log.InfoContext(ctx, "starting grafana transformation",
"log_scores_count", len(logScores),
"monitors_count", len(monitors),
"history_monitors", history.Monitors,
)
grafanaResponse := transformToGrafanaTableFormat(history, monitors)
log.InfoContext(ctx, "grafana transformation complete",
"response_series_count", len(grafanaResponse),
"response_preview", func() interface{} {
if len(grafanaResponse) == 0 {
return "empty_response"
}
first := grafanaResponse[0]
return map[string]interface{}{
"target": first.Target,
"tags": first.Tags,
"columns_count": len(first.Columns),
"values_count": len(first.Values),
"first_few_values": func() [][]interface{} {
if len(first.Values) == 0 { return [][]interface{}{} }
count := 2
if len(first.Values) < count { count = len(first.Values) }
return first.Values[:count]
}(),
}
}(),
)
// Set cache control headers based on data characteristics
setHistoryCacheControl(c, history)
// Set CORS headers
c.Response().Header().Set("Access-Control-Allow-Origin", "*")
c.Response().Header().Set("Content-Type", "application/json")
log.InfoContext(ctx, "time range response final",
"server_id", server.ID,
"server_ip", server.Ip,
"monitor_id", params.monitorID,
"time_range", params.to.Sub(params.from).String(),
"raw_data_points", len(logScores),
"grafana_series_count", len(grafanaResponse),
"max_data_points", params.maxDataPoints,
"response_is_null", grafanaResponse == nil,
"response_is_empty", len(grafanaResponse) == 0,
)
return c.JSON(http.StatusOK, grafanaResponse)
}
// testGrafanaTable returns sample data in Grafana table format for validation
func (srv *Server) testGrafanaTable(c echo.Context) error {
log := logger.Setup()

View File

@@ -456,3 +456,4 @@ func setHistoryCacheControl(c echo.Context, history *logscores.LogScoreHistory)
}
}
}

View File

@@ -209,6 +209,7 @@ func (srv *Server) Run() error {
e.GET("/api/server/scores/:server/:mode", srv.history)
e.GET("/api/dns/counts", srv.dnsQueryCounts)
e.GET("/api/v2/test/grafana-table", srv.testGrafanaTable)
e.GET("/api/v2/server/scores/:server/:mode", srv.scoresTimeRange)
if len(ntpconf.WebHostname()) > 0 {
e.POST("/api/server/scores/:server/:mode", func(c echo.Context) error {