fix: store new metrics in buffer before resetting

Borg repo information can take a long time to gather.
Before resetting the old metrics we store the new ones in a buffer
and update everything at the end.
This commit is contained in:
Valentin Doreau 2024-04-21 15:22:29 +02:00
parent 064444db38
commit f0f68e485f
Signed by: vdoreau
GPG key ID: F3E456CF9A14098B
3 changed files with 46 additions and 11 deletions

14
buffer.go Normal file
View file

@ -0,0 +1,14 @@
package main
type MetricsBuffer struct {
RepoName string
ArchiveCount float64
LastArchiveTime float64
LastModified float64
TotalChunks float64
TotalCsize float64
TotalSize float64
TotalUniqueChunks float64
UniqueCsize float64
UniqueSize float64
}

25
main.go
View file

@ -57,13 +57,12 @@ func main() {
func RecordMetrics(m Metrics) { func RecordMetrics(m Metrics) {
for { for {
m.Reset()
entries, err := os.ReadDir(*backupDir) entries, err := os.ReadDir(*backupDir)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
buffer := []MetricsBuffer{}
for _, entry := range entries { for _, entry := range entries {
if !entry.IsDir() || strings.HasPrefix(entry.Name(), ".") { if !entry.IsDir() || strings.HasPrefix(entry.Name(), ".") {
log.Printf(">> Ignoring %v\n", entry.Name()) log.Printf(">> Ignoring %v\n", entry.Name())
@ -86,17 +85,21 @@ func RecordMetrics(m Metrics) {
stats := info.Cache.Stats stats := info.Cache.Stats
log.Printf("> Got info for: %v\n", path) log.Printf("> Got info for: %v\n", path)
m.ArchiveCount.With(prometheus.Labels{"repo_name": entry.Name()}).Set(float64(len(list.Archives))) buffer = append(buffer, MetricsBuffer{
m.LastArchiveTime.With(prometheus.Labels{"repo_name": entry.Name()}).Set(list.LastArchiveUnix()) RepoName: entry.Name(),
m.LastModified.With(prometheus.Labels{"repo_name": entry.Name()}).Set(info.LastmodUnix()) ArchiveCount: float64(len(list.Archives)),
m.TotalChunks.With(prometheus.Labels{"repo_name": entry.Name()}).Set(stats.Total_chunks) LastArchiveTime: list.LastArchiveUnix(),
m.TotalCsize.With(prometheus.Labels{"repo_name": entry.Name()}).Set(stats.Total_csize) LastModified: info.LastmodUnix(),
m.TotalSize.With(prometheus.Labels{"repo_name": entry.Name()}).Set(stats.Total_size) TotalChunks: stats.Total_chunks,
m.TotalUniqueChunks.With(prometheus.Labels{"repo_name": entry.Name()}).Set(stats.Total_unique_chunks) TotalCsize: stats.Total_csize,
m.UniqueCsize.With(prometheus.Labels{"repo_name": entry.Name()}).Set(stats.Unique_csize) TotalSize: stats.Total_size,
m.UniqueSize.With(prometheus.Labels{"repo_name": entry.Name()}).Set(stats.Unique_size) TotalUniqueChunks: stats.Total_unique_chunks,
UniqueCsize: stats.Unique_csize,
UniqueSize: stats.Unique_size,
})
} }
m.Update(buffer)
log.Printf("> Waiting %v\n", INTERVAL) log.Printf("> Waiting %v\n", INTERVAL)
time.Sleep(INTERVAL) time.Sleep(INTERVAL)
} }

View file

@ -56,3 +56,21 @@ func (m *Metrics) Reset() {
m.UniqueCsize.Reset() m.UniqueCsize.Reset()
m.UniqueSize.Reset() m.UniqueSize.Reset()
} }
// Update these metrics with the given buffer.
//
// Resets the metrics beforehand. Expected to be called on every loop iteration.
func (m *Metrics) Update(buffer []MetricsBuffer) {
m.Reset()
for _, buf := range buffer {
m.ArchiveCount.With(prometheus.Labels{"repo_name": buf.RepoName}).Set(buf.ArchiveCount)
m.LastArchiveTime.With(prometheus.Labels{"repo_name": buf.RepoName}).Set(buf.LastArchiveTime)
m.LastModified.With(prometheus.Labels{"repo_name": buf.RepoName}).Set(buf.LastModified)
m.TotalChunks.With(prometheus.Labels{"repo_name": buf.RepoName}).Set(buf.TotalChunks)
m.TotalCsize.With(prometheus.Labels{"repo_name": buf.RepoName}).Set(buf.TotalCsize)
m.TotalSize.With(prometheus.Labels{"repo_name": buf.RepoName}).Set(buf.TotalSize)
m.TotalUniqueChunks.With(prometheus.Labels{"repo_name": buf.RepoName}).Set(buf.TotalUniqueChunks)
m.UniqueCsize.With(prometheus.Labels{"repo_name": buf.RepoName}).Set(buf.UniqueCsize)
m.UniqueSize.With(prometheus.Labels{"repo_name": buf.RepoName}).Set(buf.UniqueSize)
}
}