Add graceful stopping to server, and extend middleware and pipeline logic
This commit is contained in:
@@ -9,7 +9,16 @@ import (
|
||||
"github.com/urfave/negroni"
|
||||
)
|
||||
|
||||
func RequestLogger(next http.Handler) http.Handler {
|
||||
type RequestLogger struct{}
|
||||
|
||||
func (_ *RequestLogger) Stop() {
|
||||
log.Info().Msg("Stopped Logging")
|
||||
}
|
||||
|
||||
func (_ *RequestLogger) Manage() {
|
||||
}
|
||||
|
||||
func (_ *RequestLogger) Use(next http.Handler) http.Handler {
|
||||
log.Info().Msg("Enabling Logging")
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
|
||||
@@ -17,6 +17,7 @@ type Metrics struct {
|
||||
endpointMetrics []EndpointMetrics
|
||||
ticker *time.Ticker
|
||||
file string
|
||||
stop chan bool
|
||||
}
|
||||
|
||||
type EndpointMetrics struct {
|
||||
@@ -44,7 +45,7 @@ func NewMetrics(bufferSize int, flushTimeout time.Duration, file string) *Metric
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Metrics) RequestMetrics(next http.Handler) http.Handler {
|
||||
func (m *Metrics) Use(next http.Handler) http.Handler {
|
||||
log.Info().Msg("Enabling Request Metrics")
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
@@ -71,6 +72,8 @@ func (m *Metrics) Manage() {
|
||||
m.calculateDuration(rm)
|
||||
case <-m.ticker.C:
|
||||
m.Flush()
|
||||
case <-m.stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -129,3 +132,13 @@ func (m *Metrics) Flush() {
|
||||
|
||||
log.Info().Str("file", m.file).Int("count", len(a)).Msg("Completed Metrics flush")
|
||||
}
|
||||
|
||||
func (m *Metrics) Stop() {
|
||||
log.Info().Msg("Stopping Request Metrics")
|
||||
for len(m.c) > 0 {
|
||||
rm := <- m.c
|
||||
m.calculateDuration(rm)
|
||||
}
|
||||
m.Flush()
|
||||
log.Info().Msg("Stopped Request Metrics")
|
||||
}
|
||||
|
||||
@@ -5,14 +5,52 @@ import (
|
||||
"slices"
|
||||
)
|
||||
|
||||
type Middleware func(http.Handler) http.Handler
|
||||
type Middleware interface {
|
||||
Use(http.Handler) http.Handler
|
||||
Manage()
|
||||
Stop()
|
||||
}
|
||||
|
||||
func Pipeline(funcs ...Middleware) Middleware {
|
||||
type Pipeline struct {
|
||||
middleware []Middleware
|
||||
}
|
||||
|
||||
func NewPipeline() *Pipeline {
|
||||
return &Pipeline{}
|
||||
}
|
||||
|
||||
func (p *Pipeline) AddMiddleware(m Middleware) {
|
||||
p.middleware = append(p.middleware, m)
|
||||
}
|
||||
|
||||
func (p *Pipeline) Use() func(http.Handler) http.Handler {
|
||||
return func(next http.Handler) http.Handler {
|
||||
for _, m := range slices.Backward(funcs) {
|
||||
next = m(next)
|
||||
for _, m := range slices.Backward(p.middleware) {
|
||||
next = m.Use(next)
|
||||
}
|
||||
|
||||
return next
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pipeline) Stop() {
|
||||
for _, m := range p.middleware {
|
||||
m.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pipeline) Manage() {
|
||||
for _, m := range p.middleware {
|
||||
go m.Manage()
|
||||
}
|
||||
}
|
||||
|
||||
// func Pipeline(funcs ...Middleware) func(http.Handler) http.Handler {
|
||||
// return func(next http.Handler) http.Handler {
|
||||
// for _, m := range slices.Backward(funcs) {
|
||||
// next = m.Use(next)
|
||||
// }
|
||||
//
|
||||
// return next
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -18,10 +18,11 @@ type Limiter struct {
|
||||
bucketRefill int
|
||||
rwLock *sync.RWMutex
|
||||
rateChannel chan string
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
func NewLimiter(maxRequests int, refills int, refillInterval time.Duration, cleanupInterval time.Duration) Limiter {
|
||||
return Limiter{
|
||||
func NewLimiter(maxRequests int, refills int, refillInterval time.Duration, cleanupInterval time.Duration) *Limiter {
|
||||
return &Limiter{
|
||||
currentBuckets: make(map[string]*atomic.Int64),
|
||||
bucketSize: maxRequests,
|
||||
refillTicker: time.NewTicker(refillInterval),
|
||||
@@ -32,14 +33,15 @@ func NewLimiter(maxRequests int, refills int, refillInterval time.Duration, clea
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Limiter) Start() {
|
||||
go l.Manage()
|
||||
}
|
||||
|
||||
func (l *Limiter) UpdateCleanupTime(new time.Duration) {
|
||||
l.cleanupTicker.Reset(new)
|
||||
}
|
||||
|
||||
func (l *Limiter) Stop() {
|
||||
l.stop <- struct{}{}
|
||||
log.Info().Msg("Stopped Ratelimits")
|
||||
}
|
||||
|
||||
func (l *Limiter) Manage() {
|
||||
for {
|
||||
select {
|
||||
@@ -77,6 +79,8 @@ func (l *Limiter) Manage() {
|
||||
l.rwLock.Unlock()
|
||||
duration := time.Since(start)
|
||||
log.Debug().Str("duration", duration.String()).Int("deleted_buckets", deletedBuckets).Msg("Cleaned up Buckets")
|
||||
case <- l.stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -93,7 +97,7 @@ func (l *Limiter) AddIfExists(ip string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (l *Limiter) RateLimiter(next http.Handler) http.Handler {
|
||||
func (l *Limiter) Use(next http.Handler) http.Handler {
|
||||
log.Info().Int("bucket_size", l.bucketSize).Int("bucket_refill", l.bucketRefill).Msg("Enabling Ratelimits")
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
addr := strings.Split(r.RemoteAddr, ":")[0]
|
||||
|
||||
Reference in New Issue
Block a user