Skip to content

Commit ff278b8

Browse files
authored
redis: add the option to use a separate redis pool for per second limits (#41)
1 parent 1189b6a commit ff278b8

File tree

8 files changed

+390
-202
lines changed

8 files changed

+390
-202
lines changed

.travis.yml

+1-2
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ sudo: required
22
language: go
33
go: "1.10"
44
services: redis-server
5-
env:
6-
- REDIS_SOCKET_TYPE=tcp REDIS_URL="localhost:6379"
75
install: make bootstrap
6+
before_script: redis-server --port 6380 &
87
script: make check_format tests

README.md

+40-5
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,9 @@
2020
- [Request Fields](#request-fields)
2121
- [Statistics](#statistics)
2222
- [Debug Port](#debug-port)
23+
- [Redis](#redis)
24+
- [One Redis Instance](#one-redis-instance)
25+
- [Two Redis Instances](#two-redis-instances)
2326
- [Contact](#contact)
2427

2528
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
@@ -28,7 +31,7 @@
2831

2932
The rate limit service is a Go/gRPC service designed to enable generic rate limit scenarios from different types of
3033
applications. Applications request a rate limit decision based on a domain and a set of descriptors. The service
31-
reads the configuration from disk via [runtime](https://github.com/lyft/goruntime), composes a cache key, and talks to the redis cache. A
34+
reads the configuration from disk via [runtime](https://github.com/lyft/goruntime), composes a cache key, and talks to the Redis cache. A
3235
decision is then returned to the caller.
3336

3437
# Deprecation of Legacy Ratelimit Proto
@@ -55,13 +58,13 @@ to give time to community members running ratelimit off of `master`.
5558

5659
# Building and Testing
5760

58-
* Install redis-server.
61+
* Install Redis-server.
5962
* Make sure go is setup correctly and checkout rate limit service into your go path. More information about installing
6063
go [here](https://golang.org/doc/install).
61-
* In order to run the integration tests using a local default redis install you will also need these environment variables set:
64+
* In order to run the integration tests using a local Redis server please run two Redis-server instances: one on port `6379` and another on port `6380`
6265
```bash
63-
export REDIS_SOCKET_TYPE=tcp
64-
export REDIS_URL=localhost:6379
66+
Redis-server --port 6379 &
67+
Redis-server --port 6380 &
6568
```
6669
* To setup for the first time (only done once):
6770
```bash
@@ -352,6 +355,38 @@ $ curl 0:6070/
352355

353356
You can specify the debug port with the `DEBUG_PORT` environment variable. It defaults to `6070`.
354357

358+
# Redis
359+
360+
Ratelimit uses Redis as its caching layer. Ratelimit supports two operation modes:
361+
362+
1. One Redis server for all limits.
363+
1. Two Redis instances: one for per second limits and another one for all other limits.
364+
365+
## One Redis Instance
366+
367+
To configure one Redis instance use the following environment variables:
368+
369+
1. `REDIS_SOCKET_TYPE`
370+
1. `REDIS_URL`
371+
1. `REDIS_POOL_SIZE`
372+
373+
This setup will use the same Redis server for all limits.
374+
375+
## Two Redis Instances
376+
377+
To configure two Redis instances use the following environment variables:
378+
379+
1. `REDIS_SOCKET_TYPE`
380+
1. `REDIS_URL`
381+
1. `REDIS_POOL_SIZE`
382+
1. `REDIS_PERSECOND`: set this to `"true"`.
383+
1. `REDIS_PERSECOND_SOCKET_TYPE`
384+
1. `REDIS_PERSECOND_URL`
385+
1. `REDIS_PERSECOND_POOL_SIZE`
386+
387+
This setup will use the Redis server configured with the `_PERSECOND_` vars for
388+
per second limits, and the other Redis server for all other limits.
389+
355390
# Contact
356391

357392
* [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce): Low frequency mailing

src/redis/cache_impl.go

+99-20
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
package redis
22

33
import (
4+
"bytes"
45
"math"
56
"math/rand"
67
"strconv"
@@ -16,10 +17,17 @@ import (
1617
)
1718

1819
type rateLimitCacheImpl struct {
19-
pool Pool
20+
pool Pool
21+
// Optional Pool for a dedicated cache of per second limits.
22+
// If this pool is nil, then the Cache will use the pool for all
23+
// limits regardless of unit. If this pool is not nil, then it
24+
// is used for limits that have a SECOND unit.
25+
perSecondPool Pool
2026
timeSource TimeSource
2127
jitterRand *rand.Rand
2228
expirationJitterMaxSeconds int64
29+
// bytes.Buffer pool used to efficiently generate cache keys.
30+
bufferPool sync.Pool
2331
}
2432

2533
// Convert a rate limit into a time divider.
@@ -45,23 +53,41 @@ func unitToDivider(unit pb.RateLimitResponse_RateLimit_Unit) int64 {
4553
// @param descriptor supplies the descriptor to generate the key for.
4654
// @param limit supplies the rate limit to generate the key for (may be nil).
4755
// @param now supplies the current unix time.
48-
// @return the cache key.
56+
// @return cacheKey struct.
4957
func (this *rateLimitCacheImpl) generateCacheKey(
50-
domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64) string {
58+
domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64) cacheKey {
5159

5260
if limit == nil {
53-
return ""
61+
return cacheKey{
62+
key: "",
63+
perSecond: false,
64+
}
5465
}
5566

56-
var cacheKey string = domain + "_"
67+
b := this.bufferPool.Get().(*bytes.Buffer)
68+
defer this.bufferPool.Put(b)
69+
b.Reset()
70+
71+
b.WriteString(domain)
72+
b.WriteByte('_')
73+
5774
for _, entry := range descriptor.Entries {
58-
cacheKey += entry.Key + "_"
59-
cacheKey += entry.Value + "_"
75+
b.WriteString(entry.Key)
76+
b.WriteByte('_')
77+
b.WriteString(entry.Value)
78+
b.WriteByte('_')
6079
}
6180

6281
divider := unitToDivider(limit.Limit.Unit)
63-
cacheKey += strconv.FormatInt((now/divider)*divider, 10)
64-
return cacheKey
82+
b.WriteString(strconv.FormatInt((now/divider)*divider, 10))
83+
84+
return cacheKey{
85+
key: b.String(),
86+
perSecond: isPerSecondLimit(limit.Limit.Unit)}
87+
}
88+
89+
func isPerSecondLimit(unit pb.RateLimitResponse_RateLimit_Unit) bool {
90+
return unit == pb.RateLimitResponse_RateLimit_SECOND
6591
}
6692

6793
func max(a uint32, b uint32) uint32 {
@@ -71,22 +97,50 @@ func max(a uint32, b uint32) uint32 {
7197
return b
7298
}
7399

100+
type cacheKey struct {
101+
key string
102+
// True if the key corresponds to a limit with a SECOND unit. False otherwise.
103+
perSecond bool
104+
}
105+
106+
func pipelineAppend(conn Connection, key string, hitsAddend uint32, expirationSeconds int64) {
107+
conn.PipeAppend("INCRBY", key, hitsAddend)
108+
conn.PipeAppend("EXPIRE", key, expirationSeconds)
109+
}
110+
111+
func pipelineFetch(conn Connection) uint32 {
112+
ret := uint32(conn.PipeResponse().Int())
113+
// Pop off EXPIRE response and check for error.
114+
conn.PipeResponse()
115+
return ret
116+
}
117+
74118
func (this *rateLimitCacheImpl) DoLimit(
75119
ctx context.Context,
76120
request *pb.RateLimitRequest,
77121
limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus {
78122

79123
logger.Debugf("starting cache lookup")
124+
80125
conn := this.pool.Get()
81126
defer this.pool.Put(conn)
82127

128+
// Optional connection for per second limits. If the cache has a perSecondPool setup,
129+
// then use a connection from the pool for per second limits.
130+
var perSecondConn Connection = nil
131+
if this.perSecondPool != nil {
132+
perSecondConn = this.perSecondPool.Get()
133+
defer this.perSecondPool.Put(perSecondConn)
134+
}
135+
83136
// request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request.
84137
hitsAddend := max(1, request.HitsAddend)
85138

86139
// First build a list of all cache keys that we are actually going to hit. generateCacheKey()
87-
// returns "" if there is no limit so that we can keep the arrays all the same size.
140+
// returns an empty string in the key if there is no limit so that we can keep the arrays
141+
// all the same size.
88142
assert.Assert(len(request.Descriptors) == len(limits))
89-
cacheKeys := make([]string, len(request.Descriptors))
143+
cacheKeys := make([]cacheKey, len(request.Descriptors))
90144
now := this.timeSource.UnixNow()
91145
for i := 0; i < len(request.Descriptors); i++ {
92146
cacheKeys[i] = this.generateCacheKey(request.Domain, request.Descriptors[i], limits[i], now)
@@ -99,7 +153,7 @@ func (this *rateLimitCacheImpl) DoLimit(
99153

100154
// Now, actually setup the pipeline, skipping empty cache keys.
101155
for i, cacheKey := range cacheKeys {
102-
if cacheKey == "" {
156+
if cacheKey.key == "" {
103157
continue
104158
}
105159
logger.Debugf("looking up cache key: %s", cacheKey)
@@ -109,15 +163,19 @@ func (this *rateLimitCacheImpl) DoLimit(
109163
expirationSeconds += this.jitterRand.Int63n(this.expirationJitterMaxSeconds)
110164
}
111165

112-
conn.PipeAppend("INCRBY", cacheKey, hitsAddend)
113-
conn.PipeAppend("EXPIRE", cacheKey, expirationSeconds)
166+
// Use the perSecondConn if it is not nil and the cacheKey represents a per second Limit.
167+
if perSecondConn != nil && cacheKey.perSecond {
168+
pipelineAppend(perSecondConn, cacheKey.key, hitsAddend, expirationSeconds)
169+
} else {
170+
pipelineAppend(conn, cacheKey.key, hitsAddend, expirationSeconds)
171+
}
114172
}
115173

116174
// Now fetch the pipeline.
117175
responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus,
118176
len(request.Descriptors))
119177
for i, cacheKey := range cacheKeys {
120-
if cacheKey == "" {
178+
if cacheKey.key == "" {
121179
responseDescriptorStatuses[i] =
122180
&pb.RateLimitResponse_DescriptorStatus{
123181
Code: pb.RateLimitResponse_OK,
@@ -126,16 +184,22 @@ func (this *rateLimitCacheImpl) DoLimit(
126184
}
127185
continue
128186
}
129-
limitAfterIncrease := uint32(conn.PipeResponse().Int())
130-
conn.PipeResponse() // Pop off EXPIRE response and check for error.
187+
188+
var limitAfterIncrease uint32
189+
// Use the perSecondConn if it is not nil and the cacheKey represents a per second Limit.
190+
if this.perSecondPool != nil && cacheKey.perSecond {
191+
limitAfterIncrease = pipelineFetch(perSecondConn)
192+
} else {
193+
limitAfterIncrease = pipelineFetch(conn)
194+
}
131195

132196
limitBeforeIncrease := limitAfterIncrease - hitsAddend
133197
overLimitThreshold := limits[i].Limit.RequestsPerUnit
134198
// The nearLimitThreshold is the number of requests that can be made before hitting the NearLimitRatio.
135199
// We need to know it in both the OK and OVER_LIMIT scenarios.
136200
nearLimitThreshold := uint32(math.Floor(float64(float32(overLimitThreshold) * config.NearLimitRatio)))
137201

138-
logger.Debugf("cache key: %s current: %d", cacheKey, limitAfterIncrease)
202+
logger.Debugf("cache key: %s current: %d", cacheKey.key, limitAfterIncrease)
139203
if limitAfterIncrease > overLimitThreshold {
140204
responseDescriptorStatuses[i] =
141205
&pb.RateLimitResponse_DescriptorStatus{
@@ -184,8 +248,23 @@ func (this *rateLimitCacheImpl) DoLimit(
184248
return responseDescriptorStatuses
185249
}
186250

187-
func NewRateLimitCacheImpl(pool Pool, timeSource TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) RateLimitCache {
188-
return &rateLimitCacheImpl{pool, timeSource, jitterRand, expirationJitterMaxSeconds}
251+
func NewRateLimitCacheImpl(pool Pool, perSecondPool Pool, timeSource TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) RateLimitCache {
252+
return &rateLimitCacheImpl{
253+
pool: pool,
254+
perSecondPool: perSecondPool,
255+
timeSource: timeSource,
256+
jitterRand: jitterRand,
257+
expirationJitterMaxSeconds: expirationJitterMaxSeconds,
258+
bufferPool: newBufferPool(),
259+
}
260+
}
261+
262+
func newBufferPool() sync.Pool {
263+
return sync.Pool{
264+
New: func() interface{} {
265+
return new(bytes.Buffer)
266+
},
267+
}
189268
}
190269

191270
type timeSourceImpl struct{}

src/redis/driver_impl.go

+6-7
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ package redis
33
import (
44
"github.com/lyft/gostats"
55
"github.com/lyft/ratelimit/src/assert"
6-
"github.com/lyft/ratelimit/src/settings"
76
"github.com/mediocregopher/radix.v2/pool"
87
"github.com/mediocregopher/radix.v2/redis"
98
logger "github.com/sirupsen/logrus"
@@ -65,13 +64,13 @@ func (this *poolImpl) Put(c Connection) {
6564
}
6665
}
6766

68-
func NewPoolImpl(scope stats.Scope) Pool {
69-
s := settings.NewSettings()
70-
71-
logger.Warnf("connecting to redis on %s %s with pool size %d", s.RedisSocketType, s.RedisUrl, s.RedisPoolSize)
72-
pool, err := pool.New(s.RedisSocketType, s.RedisUrl, s.RedisPoolSize)
67+
func NewPoolImpl(scope stats.Scope, socketType string, url string, poolSize int) Pool {
68+
logger.Warnf("connecting to redis on %s %s with pool size %d", socketType, url, poolSize)
69+
pool, err := pool.New(socketType, url, poolSize)
7370
checkError(err)
74-
return &poolImpl{pool, newPoolStats(scope)}
71+
return &poolImpl{
72+
pool: pool,
73+
stats: newPoolStats(scope)}
7574
}
7675

7776
func (this *connectionImpl) PipeAppend(cmd string, args ...interface{}) {

src/service_cmd/runner/runner.go

+9-1
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,18 @@ func Run() {
2020
srv := server.NewServer("ratelimit", settings.GrpcUnaryInterceptor(nil))
2121

2222
s := settings.NewSettings()
23+
24+
var perSecondPool redis.Pool
25+
if s.RedisPerSecond {
26+
perSecondPool = redis.NewPoolImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondSocketType, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize)
27+
28+
}
29+
2330
service := ratelimit.NewService(
2431
srv.Runtime(),
2532
redis.NewRateLimitCacheImpl(
26-
redis.NewPoolImpl(srv.Scope().Scope("redis_pool")),
33+
redis.NewPoolImpl(srv.Scope().Scope("redis_pool"), s.RedisSocketType, s.RedisUrl, s.RedisPoolSize),
34+
perSecondPool,
2735
redis.NewTimeSourceImpl(),
2836
rand.New(redis.NewLockedSource(time.Now().Unix())),
2937
s.ExpirationJitterMaxSeconds),

src/settings/settings.go

+4
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,10 @@ type Settings struct {
2121
RedisSocketType string `envconfig:"REDIS_SOCKET_TYPE" default:"unix"`
2222
RedisUrl string `envconfig:"REDIS_URL" default:"/var/run/nutcracker/ratelimit.sock"`
2323
RedisPoolSize int `envconfig:"REDIS_POOL_SIZE" default:"10"`
24+
RedisPerSecond bool `envconfig:"REDIS_PERSECOND" default:"false"`
25+
RedisPerSecondSocketType string `envconfig:"REDIS_PERSECOND_SOCKET_TYPE" default:"unix"`
26+
RedisPerSecondUrl string `envconfig:"REDIS_PERSECOND_URL" default:"/var/run/nutcracker/ratelimitpersecond.sock"`
27+
RedisPerSecondPoolSize int `envconfig:"REDIS_PERSECOND_POOL_SIZE" default:"10"`
2428
ExpirationJitterMaxSeconds int64 `envconfig:"EXPIRATION_JITTER_MAX_SECONDS" default:"300"`
2529
}
2630

0 commit comments

Comments
 (0)