@@ -46,15 +46,24 @@ func benchmarkGridRequests(b *testing.B, n int) {
46
46
b .Fatal (err )
47
47
}
48
48
}
49
+ rpc := NewSingleHandler [* testRequest , * testResponse ](handlerTest2 , newTestRequest , newTestResponse )
49
50
grid , err := SetupTestGrid (n )
50
51
errFatal (err )
51
52
b .Cleanup (grid .Cleanup )
52
53
// Create n managers.
53
54
for _ , remote := range grid .Managers {
54
55
// Register a single handler which echos the payload.
55
56
errFatal (remote .RegisterSingleHandler (handlerTest , func (payload []byte ) ([]byte , * RemoteErr ) {
57
+ defer PutByteBuffer (payload )
56
58
return append (GetByteBuffer ()[:0 ], payload ... ), nil
57
59
}))
60
+ errFatal (rpc .Register (remote , func (req * testRequest ) (resp * testResponse , err * RemoteErr ) {
61
+ return & testResponse {
62
+ OrgNum : req .Num ,
63
+ OrgString : req .String ,
64
+ Embedded : * req ,
65
+ }, nil
66
+ }))
58
67
errFatal (err )
59
68
}
60
69
const payloadSize = 512
@@ -65,61 +74,124 @@ func benchmarkGridRequests(b *testing.B, n int) {
65
74
66
75
// Wait for all to connect
67
76
// Parallel writes per server.
68
- for par := 1 ; par <= 32 ; par *= 2 {
69
- b .Run ("par=" + strconv .Itoa (par * runtime .GOMAXPROCS (0 )), func (b * testing.B ) {
70
- defer timeout (30 * time .Second )()
71
- b .ReportAllocs ()
72
- b .SetBytes (int64 (len (payload ) * 2 ))
73
- b .ResetTimer ()
74
- t := time .Now ()
75
- var ops int64
76
- var lat int64
77
- b .SetParallelism (par )
78
- b .RunParallel (func (pb * testing.PB ) {
79
- rng := rand .New (rand .NewSource (time .Now ().UnixNano ()))
80
- n := 0
81
- var latency int64
82
- managers := grid .Managers
83
- hosts := grid .Hosts
84
- for pb .Next () {
85
- // Pick a random manager.
86
- src , dst := rng .Intn (len (managers )), rng .Intn (len (managers ))
87
- if src == dst {
88
- dst = (dst + 1 ) % len (managers )
77
+ b .Run ("bytes" , func (b * testing.B ) {
78
+ for par := 1 ; par <= 32 ; par *= 2 {
79
+ b .Run ("par=" + strconv .Itoa (par * runtime .GOMAXPROCS (0 )), func (b * testing.B ) {
80
+ defer timeout (60 * time .Second )()
81
+ ctx , cancel := context .WithTimeout (context .Background (), 30 * time .Second )
82
+ defer cancel ()
83
+ b .ReportAllocs ()
84
+ b .SetBytes (int64 (len (payload ) * 2 ))
85
+ b .ResetTimer ()
86
+ t := time .Now ()
87
+ var ops int64
88
+ var lat int64
89
+ b .SetParallelism (par )
90
+ b .RunParallel (func (pb * testing.PB ) {
91
+ rng := rand .New (rand .NewSource (time .Now ().UnixNano ()))
92
+ n := 0
93
+ var latency int64
94
+ managers := grid .Managers
95
+ hosts := grid .Hosts
96
+ for pb .Next () {
97
+ // Pick a random manager.
98
+ src , dst := rng .Intn (len (managers )), rng .Intn (len (managers ))
99
+ if src == dst {
100
+ dst = (dst + 1 ) % len (managers )
101
+ }
102
+ local := managers [src ]
103
+ conn := local .Connection (hosts [dst ])
104
+ if conn == nil {
105
+ b .Fatal ("No connection" )
106
+ }
107
+ // Send the payload.
108
+ t := time .Now ()
109
+ resp , err := conn .Request (ctx , handlerTest , payload )
110
+ latency += time .Since (t ).Nanoseconds ()
111
+ if err != nil {
112
+ if debugReqs {
113
+ fmt .Println (err .Error ())
114
+ }
115
+ b .Fatal (err .Error ())
116
+ }
117
+ PutByteBuffer (resp )
118
+ n ++
89
119
}
90
- local := managers [src ]
91
- conn := local .Connection (hosts [dst ])
92
- if conn == nil {
93
- b .Fatal ("No connection" )
120
+ atomic .AddInt64 (& ops , int64 (n ))
121
+ atomic .AddInt64 (& lat , latency )
122
+ })
123
+ spent := time .Since (t )
124
+ if spent > 0 && n > 0 {
125
+ // Since we are benchmarking n parallel servers we need to multiply by n.
126
+ // This will give an estimate of the total ops/s.
127
+ latency := float64 (atomic .LoadInt64 (& lat )) / float64 (time .Millisecond )
128
+ b .ReportMetric (float64 (n )* float64 (ops )/ spent .Seconds (), "vops/s" )
129
+ b .ReportMetric (latency / float64 (ops ), "ms/op" )
130
+ }
131
+ })
132
+ }
133
+ })
134
+ return
135
+ b .Run ("rpc" , func (b * testing.B ) {
136
+ for par := 1 ; par <= 32 ; par *= 2 {
137
+ b .Run ("par=" + strconv .Itoa (par * runtime .GOMAXPROCS (0 )), func (b * testing.B ) {
138
+ defer timeout (60 * time .Second )()
139
+ ctx , cancel := context .WithTimeout (context .Background (), 30 * time .Second )
140
+ defer cancel ()
141
+ b .ReportAllocs ()
142
+ b .ResetTimer ()
143
+ t := time .Now ()
144
+ var ops int64
145
+ var lat int64
146
+ b .SetParallelism (par )
147
+ b .RunParallel (func (pb * testing.PB ) {
148
+ rng := rand .New (rand .NewSource (time .Now ().UnixNano ()))
149
+ n := 0
150
+ var latency int64
151
+ managers := grid .Managers
152
+ hosts := grid .Hosts
153
+ req := testRequest {
154
+ Num : rng .Int (),
155
+ String : "hello" ,
94
156
}
95
- ctx , cancel := context .WithTimeout (context .Background (), 30 * time .Second )
96
- // Send the payload.
97
- t := time .Now ()
98
- resp , err := conn .Request (ctx , handlerTest , payload )
99
- latency += time .Since (t ).Nanoseconds ()
100
- cancel ()
101
- if err != nil {
102
- if debugReqs {
103
- fmt .Println (err .Error ())
157
+ for pb .Next () {
158
+ // Pick a random manager.
159
+ src , dst := rng .Intn (len (managers )), rng .Intn (len (managers ))
160
+ if src == dst {
161
+ dst = (dst + 1 ) % len (managers )
104
162
}
105
- b .Fatal (err .Error ())
163
+ local := managers [src ]
164
+ conn := local .Connection (hosts [dst ])
165
+ if conn == nil {
166
+ b .Fatal ("No connection" )
167
+ }
168
+ // Send the payload.
169
+ t := time .Now ()
170
+ resp , err := rpc .Call (ctx , conn , & req )
171
+ latency += time .Since (t ).Nanoseconds ()
172
+ if err != nil {
173
+ if debugReqs {
174
+ fmt .Println (err .Error ())
175
+ }
176
+ b .Fatal (err .Error ())
177
+ }
178
+ rpc .PutResponse (resp )
179
+ n ++
106
180
}
107
- PutByteBuffer (resp )
108
- n ++
181
+ atomic .AddInt64 (& ops , int64 (n ))
182
+ atomic .AddInt64 (& lat , latency )
183
+ })
184
+ spent := time .Since (t )
185
+ if spent > 0 && n > 0 {
186
+ // Since we are benchmarking n parallel servers we need to multiply by n.
187
+ // This will give an estimate of the total ops/s.
188
+ latency := float64 (atomic .LoadInt64 (& lat )) / float64 (time .Millisecond )
189
+ b .ReportMetric (float64 (n )* float64 (ops )/ spent .Seconds (), "vops/s" )
190
+ b .ReportMetric (latency / float64 (ops ), "ms/op" )
109
191
}
110
- atomic .AddInt64 (& ops , int64 (n ))
111
- atomic .AddInt64 (& lat , latency )
112
192
})
113
- spent := time .Since (t )
114
- if spent > 0 && n > 0 {
115
- // Since we are benchmarking n parallel servers we need to multiply by n.
116
- // This will give an estimate of the total ops/s.
117
- latency := float64 (atomic .LoadInt64 (& lat )) / float64 (time .Millisecond )
118
- b .ReportMetric (float64 (n )* float64 (ops )/ spent .Seconds (), "vops/s" )
119
- b .ReportMetric (latency / float64 (ops ), "ms/op" )
120
- }
121
- })
122
- }
193
+ }
194
+ })
123
195
}
124
196
125
197
func BenchmarkStream (b * testing.B ) {
0 commit comments