Fix sentinel error aliasing, hot-path allocations, and resource leaks

- Deduplicate sentinel errors: httpx.ErrNoHealthy, ErrCircuitOpen, and
  ErrRetryExhausted are now aliases to the canonical sub-package values
  so errors.Is works across package boundaries
- Retry transport returns ErrRetryExhausted only when all attempts are
  actually exhausted, not on early policy exit
- Balancer: pre-parse endpoint URLs at construction, replace req.Clone
  with cheap shallow struct copy to avoid per-request allocations
- Circuit breaker: Load before LoadOrStore to avoid allocating a Breaker
  on every request for known hosts
- Health checker: drain response body before close for connection reuse,
  probe endpoints concurrently, run initial probe synchronously in Start
- Client: add Close() to shut down health checker goroutine, propagate
  URL resolution errors instead of silently discarding them
- MockClock: fix lock ordering in Reset (clock.mu before t.mu), fix
  timer slice compaction to avoid backing-array aliasing, extract
  fireExpired to deduplicate Advance/Set
This commit is contained in:
2026-03-20 15:21:32 +03:00
parent f9a05f5c57
commit 5cfd1a7400
8 changed files with 155 additions and 48 deletions

View File

@@ -2,6 +2,7 @@ package balancer
import (
"context"
"io"
"net/http"
"sync"
"time"
@@ -70,8 +71,10 @@ func newHealthChecker(opts ...HealthOption) *HealthChecker {
}
// Start begins the background health checking loop for the given endpoints.
// All endpoints are initially considered healthy.
// An initial probe is run synchronously so that unhealthy endpoints are
// detected before the first request.
func (h *HealthChecker) Start(endpoints []Endpoint) {
// Mark all healthy as a safe default, then immediately probe.
h.mu.Lock()
for _, ep := range endpoints {
h.status[ep.URL] = true
@@ -82,6 +85,9 @@ func (h *HealthChecker) Start(endpoints []Endpoint) {
h.cancel = cancel
h.stopped = make(chan struct{})
// Run initial probe synchronously so callers don't hit stale state.
h.probe(ctx, endpoints)
go h.loop(ctx, endpoints)
}
@@ -111,7 +117,7 @@ func (h *HealthChecker) Healthy(endpoints []Endpoint) []Endpoint {
h.mu.RLock()
defer h.mu.RUnlock()
var result []Endpoint
result := make([]Endpoint, 0, len(endpoints))
for _, ep := range endpoints {
if h.status[ep.URL] {
result = append(result, ep)
@@ -137,13 +143,18 @@ func (h *HealthChecker) loop(ctx context.Context, endpoints []Endpoint) {
}
func (h *HealthChecker) probe(ctx context.Context, endpoints []Endpoint) {
var wg sync.WaitGroup
wg.Add(len(endpoints))
for _, ep := range endpoints {
healthy := h.check(ctx, ep)
h.mu.Lock()
h.status[ep.URL] = healthy
h.mu.Unlock()
go func() {
defer wg.Done()
healthy := h.check(ctx, ep)
h.mu.Lock()
h.status[ep.URL] = healthy
h.mu.Unlock()
}()
}
wg.Wait()
}
func (h *HealthChecker) check(ctx context.Context, ep Endpoint) bool {
@@ -156,6 +167,7 @@ func (h *HealthChecker) check(ctx context.Context, ep Endpoint) bool {
if err != nil {
return false
}
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
return resp.StatusCode >= 200 && resp.StatusCode < 300