Skip to content
This repository was archived by the owner on Aug 9, 2023. It is now read-only.

Commit f3ae662

Browse files
author
Bjørn
authored
Log duration on scrape failures (#54)
Currently we only log the duration of successful data collections but not on failures. This makes it hard to understand the nature of eg. context deadline exceeded. This change moves the duration tracking up to make it availble for error logs giving better insights into how long it took.
1 parent ae0e38e commit f3ae662

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

main.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -287,8 +287,9 @@ func collect(ctx context.Context, client *client, organization org) ([]gaugeResu
287287
for _, project := range projects.Projects {
288288
start := time.Now()
289289
issues, err := client.getIssues(organization.ID, project.ID)
290+
duration := time.Since(start)
290291
if err != nil {
291-
log.Errorf("Failed to get issues for organization %s (%s) and project %s (%s): %v", organization.Name, organization.ID, project.Name, project.ID, err)
292+
log.Errorf("Failed to get issues for organization %s (%s) and project %s (%s): duration %v: %v", organization.Name, organization.ID, project.Name, project.ID, duration, err)
292293
continue
293294
}
294295
results := aggregateIssues(issues.Issues)
@@ -298,7 +299,6 @@ func collect(ctx context.Context, client *client, organization org) ([]gaugeResu
298299
results: results,
299300
isMonitored: project.IsMonitored,
300301
})
301-
duration := time.Since(start)
302302
log.Debugf("Collected data in %v for %s %s", duration, project.ID, project.Name)
303303
// stop right away in case of the context being cancelled. This ensures that
304304
// we don't wait for a complete collect run for all projects before

0 commit comments

Comments
 (0)