Private
Public Access
1
0

2 Commits

Author SHA1 Message Date
a22d5ebc7e feat(api): add RTT data to history endpoints
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
- Add RTT column to CSV output (before leap column)
- Add RTT field to JSON ScoresEntry
- Add avg_rtt field to JSON MonitorEntry
- Convert RTT from microseconds to milliseconds
- Calculate average RTT per monitor from history data
2025-07-04 09:41:17 -07:00
42ce22e83e adjust cache-control for history api
All checks were successful
continuous-integration/drone/push Build is passing
it seems like there's a bug in the data calculations so many
servers get the too long maximum cache time; make it shorter
while we debug
2025-06-27 17:48:40 +08:00
2 changed files with 46 additions and 12 deletions

View File

@@ -469,14 +469,17 @@ CREATE TABLE `server_scores` (
`score_ts` datetime DEFAULT NULL,
`score_raw` double NOT NULL DEFAULT '0',
`stratum` tinyint unsigned DEFAULT NULL,
`status` enum('new','testing','active') NOT NULL DEFAULT 'new',
`status` enum('new','candidate','testing','active') NOT NULL DEFAULT 'new',
`queue_ts` datetime DEFAULT NULL,
`created_on` datetime NOT NULL,
`modified_on` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`constraint_violation_type` varchar(50) DEFAULT NULL,
`constraint_violation_since` datetime DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `server_id` (`server_id`,`monitor_id`),
KEY `monitor_id` (`monitor_id`,`server_id`),
KEY `monitor_id_2` (`monitor_id`,`score_ts`),
KEY `idx_constraint_violation` (`constraint_violation_type`,`constraint_violation_since`),
CONSTRAINT `server_score_monitor_fk` FOREIGN KEY (`monitor_id`) REFERENCES `monitors` (`id`),
CONSTRAINT `server_score_server_id` FOREIGN KEY (`server_id`) REFERENCES `servers` (`id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3;
@@ -873,4 +876,4 @@ CREATE TABLE `zones` (
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*M!100616 SET NOTE_VERBOSITY=@OLD_NOTE_VERBOSITY */;
-- Dump completed on 2025-06-21 2:40:11
-- Dump completed on 2025-06-27 9:46:22

View File

@@ -237,6 +237,7 @@ func (srv *Server) historyJSON(ctx context.Context, c echo.Context, server ntpdb
Step float64 `json:"step"`
Score float64 `json:"score"`
MonitorID int `json:"monitor_id"`
Rtt *float64 `json:"rtt,omitempty"`
}
type MonitorEntry struct {
@@ -246,6 +247,7 @@ func (srv *Server) historyJSON(ctx context.Context, c echo.Context, server ntpdb
Ts string `json:"ts"`
Score float64 `json:"score"`
Status string `json:"status"`
AvgRtt *float64 `json:"avg_rtt,omitempty"`
}
res := struct {
History []ScoresEntry `json:"history"`
@@ -280,11 +282,23 @@ func (srv *Server) historyJSON(ctx context.Context, c echo.Context, server ntpdb
// log.InfoContext(ctx, "got logScoreMonitors", "count", len(logScoreMonitors))
// Calculate average RTT per monitor
monitorRttSums := make(map[uint32]float64)
monitorRttCounts := make(map[uint32]int)
for _, ls := range history.LogScores {
if ls.MonitorID.Valid && ls.Rtt.Valid {
monitorID := uint32(ls.MonitorID.Int32)
monitorRttSums[monitorID] += float64(ls.Rtt.Int32) / 1000.0
monitorRttCounts[monitorID]++
}
}
for _, lsm := range logScoreMonitors {
score := math.Round(lsm.ScoreRaw*10) / 10 // round to one decimal
tempMon := ntpdb.Monitor{
Hostname: lsm.Hostname,
// Hostname: lsm.Hostname,
TlsName: lsm.TlsName,
Location: lsm.Location,
ID: lsm.ID,
@@ -299,6 +313,13 @@ func (srv *Server) historyJSON(ctx context.Context, c echo.Context, server ntpdb
Score: score,
Status: string(lsm.Status),
}
// Add average RTT if available
if count, exists := monitorRttCounts[lsm.ID]; exists && count > 0 {
avgRtt := monitorRttSums[lsm.ID] / float64(count)
me.AvgRtt = &avgRtt
}
res.Monitors = append(res.Monitors, me)
}
@@ -315,6 +336,10 @@ func (srv *Server) historyJSON(ctx context.Context, c echo.Context, server ntpdb
offset := ls.Offset.Float64
res.History[i].Offset = &offset
}
if ls.Rtt.Valid {
rtt := float64(ls.Rtt.Int32) / 1000.0
res.History[i].Rtt = &rtt
}
}
setHistoryCacheControl(c, history)
@@ -337,7 +362,7 @@ func (srv *Server) historyCSV(ctx context.Context, c echo.Context, history *logs
return s
}
err := w.Write([]string{"ts_epoch", "ts", "offset", "step", "score", "monitor_id", "monitor_name", "leap", "error"})
err := w.Write([]string{"ts_epoch", "ts", "offset", "step", "score", "monitor_id", "monitor_name", "rtt", "leap", "error"})
if err != nil {
log.ErrorContext(ctx, "could not write csv header", "err", err)
return err
@@ -361,6 +386,11 @@ func (srv *Server) historyCSV(ctx context.Context, c echo.Context, history *logs
leap = fmt.Sprintf("%d", l.Attributes.Leap)
}
var rtt string
if l.Rtt.Valid {
rtt = ff(float64(l.Rtt.Int32) / 1000.0)
}
err := w.Write([]string{
strconv.Itoa(int(l.Ts.Unix())),
// l.Ts.Format(time.RFC3339),
@@ -370,6 +400,7 @@ func (srv *Server) historyCSV(ctx context.Context, c echo.Context, history *logs
score,
fmt.Sprintf("%d", l.MonitorID.Int32),
monName,
rtt,
leap,
l.Attributes.Error,
})
@@ -399,12 +430,12 @@ func setHistoryCacheControl(c echo.Context, history *logscores.LogScoreHistory)
// cache for longer if data hasn't updated for a while; or we didn't
// find any.
(time.Now().Add(-8 * time.Hour).After(history.LogScores[len(history.LogScores)-1].Ts)) {
hdr.Set("Cache-Control", "s-maxage=3600,max-age=1800")
hdr.Set("Cache-Control", "s-maxage=260,max-age=360")
} else {
if len(history.LogScores) == 1 {
hdr.Set("Cache-Control", "s-maxage=60,max-age=35")
} else {
hdr.Set("Cache-Control", "s-maxage=240,max-age=120")
hdr.Set("Cache-Control", "s-maxage=90,max-age=120")
}
}
}