Skip to content

Commit f8b2314

Browse files
committedSep 26, 2016
runtime: optimize defer code
This optimizes deferproc and deferreturn in various ways. The most important optimization is that it more carefully arranges to prevent preemption or stack growth. Currently we do this by switching to the system stack on every deferproc and every deferreturn. While we need to be on the system stack for the slow path of allocating and freeing defers, in the common case we can fit in the nosplit stack. Hence, this change pushes the system stack switch down into the slow paths and makes everything now exposed to the user stack nosplit. This also eliminates the need for various acquirem/releasem pairs, since we are now preventing preemption by preventing stack split checks. As another smaller optimization, we special case the common cases of zero-sized and pointer-sized defer frames to respectively skip the copy and perform the copy in line instead of calling memmove. This speeds up the runtime defer benchmark by 42%: name old time/op new time/op delta Defer-4 75.1ns ± 1% 43.3ns ± 1% -42.31% (p=0.000 n=8+10) In reality, this speeds up defer by about 2.2X. The two benchmarks below compare a Lock/defer Unlock pair (DeferLock) with a Lock/Unlock pair (NoDeferLock). NoDeferLock establishes a baseline cost, so these two benchmarks together show that this change reduces the overhead of defer from 61.4ns to 27.9ns. name old time/op new time/op delta DeferLock-4 77.4ns ± 1% 43.9ns ± 1% -43.31% (p=0.000 n=10+10) NoDeferLock-4 16.0ns ± 0% 15.9ns ± 0% -0.39% (p=0.000 n=9+8) This also shaves 34ns off cgo calls: name old time/op new time/op delta CgoNoop-4 122ns ± 1% 88.3ns ± 1% -27.72% (p=0.000 n=8+9) Updates #14939, #16051. Change-Id: I2baa0dea378b7e4efebbee8fca919a97d5e15f38 Reviewed-on: https://go-review.googlesource.com/29656 Reviewed-by: Keith Randall <[email protected]>
1 parent d211c2d commit f8b2314

File tree

1 file changed

+80
-54
lines changed

1 file changed

+80
-54
lines changed
 

‎src/runtime/panic.go

+80-54
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ package runtime
66

77
import (
88
"runtime/internal/atomic"
9+
"runtime/internal/sys"
910
"unsafe"
1011
)
1112

@@ -84,16 +85,21 @@ func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
8485
argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn)
8586
callerpc := getcallerpc(unsafe.Pointer(&siz))
8687

87-
systemstack(func() {
88-
d := newdefer(siz)
89-
if d._panic != nil {
90-
throw("deferproc: d.panic != nil after newdefer")
91-
}
92-
d.fn = fn
93-
d.pc = callerpc
94-
d.sp = sp
95-
memmove(add(unsafe.Pointer(d), unsafe.Sizeof(*d)), unsafe.Pointer(argp), uintptr(siz))
96-
})
88+
d := newdefer(siz)
89+
if d._panic != nil {
90+
throw("deferproc: d.panic != nil after newdefer")
91+
}
92+
d.fn = fn
93+
d.pc = callerpc
94+
d.sp = sp
95+
switch siz {
96+
case 0:
97+
// Do nothing.
98+
case sys.PtrSize:
99+
*(*uintptr)(deferArgs(d)) = *(*uintptr)(unsafe.Pointer(argp))
100+
default:
101+
memmove(deferArgs(d), unsafe.Pointer(argp), uintptr(siz))
102+
}
97103

98104
// deferproc returns 0 normally.
99105
// a deferred func that stops a panic
@@ -175,22 +181,30 @@ func init() {
175181

176182
// Allocate a Defer, usually using per-P pool.
177183
// Each defer must be released with freedefer.
178-
// Note: runs on g0 stack
184+
//
185+
// This must not grow the stack because there may be a frame without
186+
// stack map information when this is called.
187+
//
188+
//go:nosplit
179189
func newdefer(siz int32) *_defer {
180190
var d *_defer
181191
sc := deferclass(uintptr(siz))
182-
mp := acquirem()
192+
gp := getg()
183193
if sc < uintptr(len(p{}.deferpool)) {
184-
pp := mp.p.ptr()
194+
pp := gp.m.p.ptr()
185195
if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
186-
lock(&sched.deferlock)
187-
for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
188-
d := sched.deferpool[sc]
189-
sched.deferpool[sc] = d.link
190-
d.link = nil
191-
pp.deferpool[sc] = append(pp.deferpool[sc], d)
192-
}
193-
unlock(&sched.deferlock)
196+
// Take the slow path on the system stack so
197+
// we don't grow newdefer's stack.
198+
systemstack(func() {
199+
lock(&sched.deferlock)
200+
for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
201+
d := sched.deferpool[sc]
202+
sched.deferpool[sc] = d.link
203+
d.link = nil
204+
pp.deferpool[sc] = append(pp.deferpool[sc], d)
205+
}
206+
unlock(&sched.deferlock)
207+
})
194208
}
195209
if n := len(pp.deferpool[sc]); n > 0 {
196210
d = pp.deferpool[sc][n-1]
@@ -200,19 +214,24 @@ func newdefer(siz int32) *_defer {
200214
}
201215
if d == nil {
202216
// Allocate new defer+args.
203-
total := roundupsize(totaldefersize(uintptr(siz)))
204-
d = (*_defer)(mallocgc(total, deferType, true))
217+
systemstack(func() {
218+
total := roundupsize(totaldefersize(uintptr(siz)))
219+
d = (*_defer)(mallocgc(total, deferType, true))
220+
})
205221
}
206222
d.siz = siz
207-
gp := mp.curg
208223
d.link = gp._defer
209224
gp._defer = d
210-
releasem(mp)
211225
return d
212226
}
213227

214228
// Free the given defer.
215229
// The defer cannot be used after this call.
230+
//
231+
// This must not grow the stack because there may be a frame without a
232+
// stack map when this is called.
233+
//
234+
//go:nosplit
216235
func freedefer(d *_defer) {
217236
if d._panic != nil {
218237
freedeferpanic()
@@ -222,31 +241,34 @@ func freedefer(d *_defer) {
222241
}
223242
sc := deferclass(uintptr(d.siz))
224243
if sc < uintptr(len(p{}.deferpool)) {
225-
mp := acquirem()
226-
pp := mp.p.ptr()
244+
pp := getg().m.p.ptr()
227245
if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
228246
// Transfer half of local cache to the central cache.
229-
var first, last *_defer
230-
for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
231-
n := len(pp.deferpool[sc])
232-
d := pp.deferpool[sc][n-1]
233-
pp.deferpool[sc][n-1] = nil
234-
pp.deferpool[sc] = pp.deferpool[sc][:n-1]
235-
if first == nil {
236-
first = d
237-
} else {
238-
last.link = d
247+
//
248+
// Take this slow path on the system stack so
249+
// we don't grow freedefer's stack.
250+
systemstack(func() {
251+
var first, last *_defer
252+
for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
253+
n := len(pp.deferpool[sc])
254+
d := pp.deferpool[sc][n-1]
255+
pp.deferpool[sc][n-1] = nil
256+
pp.deferpool[sc] = pp.deferpool[sc][:n-1]
257+
if first == nil {
258+
first = d
259+
} else {
260+
last.link = d
261+
}
262+
last = d
239263
}
240-
last = d
241-
}
242-
lock(&sched.deferlock)
243-
last.link = sched.deferpool[sc]
244-
sched.deferpool[sc] = first
245-
unlock(&sched.deferlock)
264+
lock(&sched.deferlock)
265+
last.link = sched.deferpool[sc]
266+
sched.deferpool[sc] = first
267+
unlock(&sched.deferlock)
268+
})
246269
}
247270
*d = _defer{}
248271
pp.deferpool[sc] = append(pp.deferpool[sc], d)
249-
releasem(mp)
250272
}
251273
}
252274

@@ -288,19 +310,23 @@ func deferreturn(arg0 uintptr) {
288310
}
289311

290312
// Moving arguments around.
291-
// Do not allow preemption here, because the garbage collector
292-
// won't know the form of the arguments until the jmpdefer can
293-
// flip the PC over to fn.
294-
mp := acquirem()
295-
memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz))
313+
//
314+
// Everything called after this point must be recursively
315+
// nosplit because the garbage collector won't know the form
316+
// of the arguments until the jmpdefer can flip the PC over to
317+
// fn.
318+
switch d.siz {
319+
case 0:
320+
// Do nothing.
321+
case sys.PtrSize:
322+
*(*uintptr)(unsafe.Pointer(&arg0)) = *(*uintptr)(deferArgs(d))
323+
default:
324+
memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz))
325+
}
296326
fn := d.fn
297327
d.fn = nil
298328
gp._defer = d.link
299-
// Switch to systemstack merely to save nosplit stack space.
300-
systemstack(func() {
301-
freedefer(d)
302-
})
303-
releasem(mp)
329+
freedefer(d)
304330
jmpdefer(fn, uintptr(unsafe.Pointer(&arg0)))
305331
}
306332

0 commit comments

Comments
 (0)
Please sign in to comment.