Skip to content

Commit 92122f9

Browse files
committed
WIP make shrinkFn optional in Allocator interface
This is work-in-progress because it's blocked by coroutines depending on the Allocator interface, which will be solved with the coroutine rewrite (#2377). closes #2292
1 parent 81767a6 commit 92122f9

File tree

2 files changed

+61
-80
lines changed

2 files changed

+61
-80
lines changed

std/heap.zig

+21-76
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ var c_allocator_state = Allocator{
1818
};
1919

2020
fn cRealloc(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
21-
assert(new_align <= @alignOf(c_longdouble));
21+
if (new_align > @alignOf(c_longdouble)) return error.OutOfMemory;
2222
const old_ptr = if (old_mem.len == 0) null else @ptrCast(*c_void, old_mem.ptr);
2323
const buf = c.realloc(old_ptr, new_size) orelse return error.OutOfMemory;
2424
return @ptrCast([*]u8, buf)[0..new_size];
@@ -143,7 +143,7 @@ pub const DirectAllocator = struct {
143143
const result = try alloc(allocator, new_size, new_align);
144144
if (old_mem.len != 0) {
145145
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
146-
_ = os.posix.munmap(@ptrToInt(old_mem.ptr), old_mem.len);
146+
_ = os.posix.munmap(@ptrToInt(old_mem.ptr), old_mem.len);
147147
}
148148
return result;
149149
},
@@ -155,12 +155,12 @@ pub const DirectAllocator = struct {
155155
const old_record_addr = old_adjusted_addr + old_mem.len;
156156
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
157157
const old_ptr = @intToPtr(*c_void, root_addr);
158-
159-
if(new_size == 0) {
158+
159+
if (new_size == 0) {
160160
if (os.windows.HeapFree(self.heap_handle.?, 0, old_ptr) == 0) unreachable;
161161
return old_mem[0..0];
162162
}
163-
163+
164164
const amt = new_size + new_align + @sizeOf(usize);
165165
const new_ptr = os.windows.HeapReAlloc(
166166
self.heap_handle.?,
@@ -178,11 +178,7 @@ pub const DirectAllocator = struct {
178178
// or the memory starting at the old offset would be outside of the new allocation,
179179
// then we need to copy the memory to a valid aligned address and use that
180180
const new_aligned_addr = mem.alignForward(new_root_addr, new_align);
181-
@memcpy(
182-
@intToPtr([*]u8, new_aligned_addr),
183-
@intToPtr([*]u8, new_adjusted_addr),
184-
std.math.min(old_mem.len, new_size),
185-
);
181+
@memcpy(@intToPtr([*]u8, new_aligned_addr), @intToPtr([*]u8, new_adjusted_addr), std.math.min(old_mem.len, new_size));
186182
new_adjusted_addr = new_aligned_addr;
187183
}
188184
const new_record_addr = new_adjusted_addr + new_size;
@@ -209,7 +205,7 @@ pub const ArenaAllocator = struct {
209205
return ArenaAllocator{
210206
.allocator = Allocator{
211207
.reallocFn = realloc,
212-
.shrinkFn = shrink,
208+
.shrinkFn = null,
213209
},
214210
.child_allocator = child_allocator,
215211
.buffer_list = std.LinkedList([]u8).init(),
@@ -231,8 +227,7 @@ pub const ArenaAllocator = struct {
231227
const actual_min_size = minimum_size + @sizeOf(BufNode);
232228
var len = prev_len;
233229
while (true) {
234-
len += len / 2;
235-
len += os.page_size - @rem(len, os.page_size);
230+
len = mem.alignForward(len + len / 2, os.page_size);
236231
if (len >= actual_min_size) break;
237232
}
238233
const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
@@ -248,9 +243,11 @@ pub const ArenaAllocator = struct {
248243
return buf_node;
249244
}
250245

251-
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
246+
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, n: usize, alignment: u29) ![]u8 {
252247
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
253248

249+
assert(old_mem.len == 0); // because shrinkFn == null
250+
254251
var cur_node = if (self.buffer_list.last) |last_node| last_node else try self.createNode(0, n + alignment);
255252
while (true) {
256253
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
@@ -267,21 +264,6 @@ pub const ArenaAllocator = struct {
267264
return result;
268265
}
269266
}
270-
271-
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
272-
if (new_size <= old_mem.len and new_align <= new_size) {
273-
// We can't do anything with the memory, so tell the client to keep it.
274-
return error.OutOfMemory;
275-
} else {
276-
const result = try alloc(allocator, new_size, new_align);
277-
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
278-
return result;
279-
}
280-
}
281-
282-
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
283-
return old_mem[0..new_size];
284-
}
285267
};
286268

287269
pub const FixedBufferAllocator = struct {
@@ -293,15 +275,17 @@ pub const FixedBufferAllocator = struct {
293275
return FixedBufferAllocator{
294276
.allocator = Allocator{
295277
.reallocFn = realloc,
296-
.shrinkFn = shrink,
278+
.shrinkFn = null,
297279
},
298280
.buffer = buffer,
299281
.end_index = 0,
300282
};
301283
}
302284

303-
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
285+
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, n: usize, alignment: u29) ![]u8 {
304286
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
287+
assert(old_mem.len == 0); // because shrinkFn == null
288+
305289
const addr = @ptrToInt(self.buffer.ptr) + self.end_index;
306290
const adjusted_addr = mem.alignForward(addr, alignment);
307291
const adjusted_index = self.end_index + (adjusted_addr - addr);
@@ -314,39 +298,14 @@ pub const FixedBufferAllocator = struct {
314298

315299
return result;
316300
}
317-
318-
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
319-
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
320-
assert(old_mem.len <= self.end_index);
321-
if (old_mem.ptr == self.buffer.ptr + self.end_index - old_mem.len and
322-
mem.alignForward(@ptrToInt(old_mem.ptr), new_align) == @ptrToInt(old_mem.ptr))
323-
{
324-
const start_index = self.end_index - old_mem.len;
325-
const new_end_index = start_index + new_size;
326-
if (new_end_index > self.buffer.len) return error.OutOfMemory;
327-
const result = self.buffer[start_index..new_end_index];
328-
self.end_index = new_end_index;
329-
return result;
330-
} else if (new_size <= old_mem.len and new_align <= old_align) {
331-
// We can't do anything with the memory, so tell the client to keep it.
332-
return error.OutOfMemory;
333-
} else {
334-
const result = try alloc(allocator, new_size, new_align);
335-
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
336-
return result;
337-
}
338-
}
339-
340-
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
341-
return old_mem[0..new_size];
342-
}
343301
};
344302

345-
// FIXME: Exposed LLVM intrinsics is a bug
303+
// TODO: Exposed LLVM intrinsics is a bug
346304
// See: https://github.com/ziglang/zig/issues/2291
347305
extern fn @"llvm.wasm.memory.size.i32"(u32) u32;
348306
extern fn @"llvm.wasm.memory.grow.i32"(u32, u32) i32;
349307

308+
/// TODO Currently this allocator does not actually reclaim memory, but it should.
350309
pub const wasm_allocator = &wasm_allocator_state.allocator;
351310
var wasm_allocator_state = WasmAllocator{
352311
.allocator = Allocator{
@@ -433,11 +392,11 @@ const WasmAllocator = struct {
433392
}
434393
};
435394

395+
/// Lock free
436396
pub const ThreadSafeFixedBufferAllocator = blk: {
437397
if (builtin.single_threaded) {
438398
break :blk FixedBufferAllocator;
439399
} else {
440-
// lock free
441400
break :blk struct {
442401
allocator: Allocator,
443402
end_index: usize,
@@ -447,14 +406,15 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
447406
return ThreadSafeFixedBufferAllocator{
448407
.allocator = Allocator{
449408
.reallocFn = realloc,
450-
.shrinkFn = shrink,
409+
.shrinkFn = null,
451410
},
452411
.buffer = buffer,
453412
.end_index = 0,
454413
};
455414
}
456415

457-
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
416+
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, n: usize, alignment: u29) ![]u8 {
417+
assert(old_mem.len == 0); // because shrinkFn == null
458418
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
459419
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
460420
while (true) {
@@ -468,21 +428,6 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
468428
end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
469429
}
470430
}
471-
472-
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
473-
if (new_size <= old_mem.len and new_align <= old_align) {
474-
// We can't do anything useful with the memory, tell the client to keep it.
475-
return error.OutOfMemory;
476-
} else {
477-
const result = try alloc(allocator, new_size, new_align);
478-
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
479-
return result;
480-
}
481-
}
482-
483-
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
484-
return old_mem[0..new_size];
485-
}
486431
};
487432
}
488433
};

std/mem.zig

+40-4
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ pub const Allocator = struct {
3838
/// `reallocFn` or `shrinkFn`.
3939
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
4040
/// is guaranteed to be >= 1.
41+
/// If `shrinkFn` is `null` then it is guaranteed that `old_mem.len == 0`.
4142
old_mem: []u8,
4243
/// If `old_mem.len == 0` then this is `undefined`, otherwise:
4344
/// Guaranteed to be the same as what was returned from most recent call to
@@ -55,7 +56,12 @@ pub const Allocator = struct {
5556
) Error![]u8,
5657

5758
/// This function deallocates memory. It must succeed.
58-
shrinkFn: fn (
59+
/// If this function is null, it means the allocator implementation cannot
60+
/// reclaim memory. The shrink functions of the
61+
/// Allocator interface will still work; they will trivially return the
62+
/// old memory with adjusted length. In this case, `reallocFn` with a smaller
63+
/// `new_byte_count` will always return `error.OutOfMemory`.
64+
shrinkFn: ?fn (
5965
self: *Allocator,
6066
/// Guaranteed to be the same as what was returned from most recent call to
6167
/// `reallocFn` or `shrinkFn`.
@@ -82,8 +88,9 @@ pub const Allocator = struct {
8288
pub fn destroy(self: *Allocator, ptr: var) void {
8389
const T = @typeOf(ptr).Child;
8490
if (@sizeOf(T) == 0) return;
91+
const shrinkFn = self.shrinkFn orelse return;
8592
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
86-
const shrink_result = self.shrinkFn(self, non_const_ptr[0..@sizeOf(T)], @alignOf(T), 0, 1);
93+
const shrink_result = shrinkFn(self, non_const_ptr[0..@sizeOf(T)], @alignOf(T), 0, 1);
8794
assert(shrink_result.len == 0);
8895
}
8996

@@ -147,6 +154,20 @@ pub const Allocator = struct {
147154
self.free(old_mem);
148155
return ([*]align(new_alignment) T)(undefined)[0..0];
149156
}
157+
if (!self.canReclaimMemory()) {
158+
if (new_n <= old_mem.len and new_alignment <= Slice.alignment) {
159+
// Cannot reclaim memory; tell the client to keep it.
160+
return error.OutOfMemory;
161+
}
162+
const result = try self.alignedAlloc(T, new_alignment, new_n);
163+
const end_len = std.math.min(old_mem.len, new_n);
164+
mem.copy(T, result, old_mem[0..end_len]);
165+
// This loop gets optimized out in ReleaseFast mode
166+
for (result[end_len..]) |*elem| {
167+
elem.* = undefined;
168+
}
169+
return result;
170+
}
150171

151172
const old_byte_slice = @sliceToBytes(old_mem);
152173
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
@@ -185,6 +206,7 @@ pub const Allocator = struct {
185206
) []align(new_alignment) @typeInfo(@typeOf(old_mem)).Pointer.child {
186207
const Slice = @typeInfo(@typeOf(old_mem)).Pointer;
187208
const T = Slice.child;
209+
const shrinkFn = self.shrinkFn orelse return old_mem[0..new_n];
188210

189211
if (new_n == 0) {
190212
self.free(old_mem);
@@ -199,19 +221,33 @@ pub const Allocator = struct {
199221
const byte_count = @sizeOf(T) * new_n;
200222

201223
const old_byte_slice = @sliceToBytes(old_mem);
202-
const byte_slice = self.shrinkFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
224+
const byte_slice = shrinkFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
203225
assert(byte_slice.len == byte_count);
204226
return @bytesToSlice(T, @alignCast(new_alignment, byte_slice));
205227
}
206228

207229
pub fn free(self: *Allocator, memory: var) void {
230+
const shrinkFn = self.shrinkFn orelse return;
208231
const Slice = @typeInfo(@typeOf(memory)).Pointer;
209232
const bytes = @sliceToBytes(memory);
210233
if (bytes.len == 0) return;
211234
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
212-
const shrink_result = self.shrinkFn(self, non_const_ptr[0..bytes.len], Slice.alignment, 0, 1);
235+
const shrink_result = shrinkFn(self, non_const_ptr[0..bytes.len], Slice.alignment, 0, 1);
213236
assert(shrink_result.len == 0);
214237
}
238+
239+
/// If this returns `false`, it means that the allocator implementation
240+
/// will only ever increase memory usage. In this case, `free` and `shrink`
241+
/// are no-ops and will not make the freed bytes available for use.
242+
/// It also means using `realloc` to resize downwards will always result
243+
/// in `error.OutOfMemory`.
244+
/// When creating an arena allocator on top of a backing allocator, it is
245+
/// best practice to check if the backing allocator can reclaim memory.
246+
/// If it cannot, then the backing allocator should be used directly, to
247+
/// avoid pointless overhead.
248+
pub fn canReclaimMemory(self: *Allocator) bool {
249+
return self.shrinkFn != null;
250+
}
215251
};
216252

217253
pub const Compare = enum {

0 commit comments

Comments
 (0)