|
24 | 24 |
|
25 | 25 | Base.unlock(nrl::NonReentrantLock) = unlock(nrl.rl)
|
26 | 26 |
|
27 |
| -# the above lock is taken around code that might gc, which might reenter through finalizers. |
28 |
| -# avoid that by temporarily disabling finalizers running concurrently on this thread. |
29 |
| -enable_finalizers(on::Bool) = ccall(:jl_gc_enable_finalizers, Cvoid, |
30 |
| - (Ptr{Cvoid}, Int32,), Core.getptls(), on) |
31 |
| -macro safe_lock(l, ex) |
32 |
| - quote |
33 |
| - temp = $(esc(l)) |
34 |
| - lock(temp) |
35 |
| - enable_finalizers(false) |
36 |
| - try |
37 |
| - $(esc(ex)) |
38 |
| - finally |
39 |
| - unlock(temp) |
40 |
| - enable_finalizers(true) |
41 |
| - end |
42 |
| - end |
43 |
| -end |
44 |
| - |
45 |
| -# if we actually want to acquire these locks from a finalizer, we can't just wait on them |
46 |
| -# (which might cause a task switch). as the lock can only be taken by another thread that |
47 |
| -# should be running, and not a concurrent task we'd need to switch to, we can safely spin. |
48 |
| -macro safe_lock_spin(l, ex) |
| 27 | +# a safe way to acquire locks from finalizers, where we can't wait (which switches tasks) |
| 28 | +macro spinlock(l, ex) |
49 | 29 | quote
|
50 | 30 | temp = $(esc(l))
|
51 | 31 | while !trylock(temp)
|
52 | 32 | # we can't yield here
|
53 | 33 | end
|
54 |
| - enable_finalizers(false) # retains compatibility with non-finalizer callers |
55 | 34 | try
|
56 | 35 | $(esc(ex))
|
57 | 36 | finally
|
58 | 37 | unlock(temp)
|
59 |
| - enable_finalizers(true) |
60 | 38 | end
|
61 | 39 | end
|
62 | 40 | end
|
@@ -299,7 +277,7 @@ a [`OutOfGPUMemoryError`](@ref) if the allocation request cannot be satisfied.
|
299 | 277 |
|
300 | 278 | # record the memory block
|
301 | 279 | ptr = pointer(block)
|
302 |
| - @safe_lock allocated_lock begin |
| 280 | + @lock allocated_lock begin |
303 | 281 | @assert !haskey(allocated[dev], ptr)
|
304 | 282 | allocated[dev][ptr] = block, 1
|
305 | 283 | end
|
@@ -337,7 +315,7 @@ multiple calls to `free` before this buffer is put back into the memory pool.
|
337 | 315 | dev = device()
|
338 | 316 |
|
339 | 317 | # look up the memory block
|
340 |
| - @safe_lock_spin allocated_lock begin |
| 318 | + @spinlock allocated_lock begin |
341 | 319 | block, refcount = allocated[dev][ptr]
|
342 | 320 | allocated[dev][ptr] = block, refcount+1
|
343 | 321 | end
|
@@ -365,7 +343,7 @@ Releases a buffer pointed to by `ptr` to the memory pool.
|
365 | 343 | # so perform our own error handling.
|
366 | 344 | try
|
367 | 345 | # look up the memory block, and bail out if its refcount isn't 1
|
368 |
| - block = @safe_lock_spin allocated_lock begin |
| 346 | + block = @spinlock allocated_lock begin |
369 | 347 | block, refcount = allocated[dev][ptr]
|
370 | 348 | if refcount == 1
|
371 | 349 | delete!(allocated[dev], ptr)
|
|
473 | 451 |
|
474 | 452 | ## utilities
|
475 | 453 |
|
476 |
| -used_memory(dev=device()) = @safe_lock allocated_lock begin |
| 454 | +used_memory(dev=device()) = @lock allocated_lock begin |
477 | 455 | mapreduce(sizeof, +, values(allocated[dev]); init=0)
|
478 | 456 | end
|
479 | 457 |
|
|
0 commit comments