1
1
// This file is a part of Julia. License is MIT: http://julialang.org/license
2
2
3
3
#include "gc.h"
4
+ #ifndef _OS_WINDOWS_
5
+ # include <sys/resource.h>
6
+ #endif
4
7
5
8
#ifdef __cplusplus
6
9
extern "C" {
@@ -9,62 +12,112 @@ extern "C" {
9
12
// A region is contiguous storage for up to REGION_PG_COUNT naturally aligned GC_PAGE_SZ pages
10
13
// It uses a very naive allocator (see jl_gc_alloc_page & jl_gc_free_page)
11
14
#if defined(_P64 ) && !defined(_COMPILER_MICROSOFT_ )
12
- #define REGION_PG_COUNT 16*8* 4096 // 8G because virtual memory is cheap
15
+ #define DEFAULT_REGION_PG_COUNT (16 * 8 * 4096) // 8 GB
13
16
#else
14
- #define REGION_PG_COUNT 8* 4096 // 512M
17
+ #define DEFAULT_REGION_PG_COUNT (8 * 4096) // 512 MB
15
18
#endif
19
+ #define MIN_REGION_PG_COUNT 64 // 1 MB
16
20
21
+ static int region_pg_cnt = DEFAULT_REGION_PG_COUNT ;
17
22
static jl_mutex_t pagealloc_lock ;
18
23
static size_t current_pg_count = 0 ;
19
24
25
+ void jl_gc_init_page (void )
26
+ {
27
+ #ifndef _OS_WINDOWS_
28
+ struct rlimit rl ;
29
+ if (getrlimit (RLIMIT_AS , & rl ) == 0 ) {
30
+ // This is not 100% precise and not the most efficient implementation
31
+ // but should be close enough and fast enough for the normal case.
32
+ while (rl .rlim_cur < region_pg_cnt * sizeof (jl_gc_page_t ) * 2 &&
33
+ region_pg_cnt >= MIN_REGION_PG_COUNT ) {
34
+ region_pg_cnt /= 2 ;
35
+ }
36
+ }
37
+ #endif
38
+ }
39
+
40
+ // Try to allocate a memory block for a region with `pg_cnt` pages.
41
+ // Return `NULL` if allocation failed. Result is aligned to `GC_PAGE_SZ`.
42
+ static char * jl_gc_try_alloc_region (int pg_cnt )
43
+ {
44
+ const size_t pages_sz = sizeof (jl_gc_page_t ) * pg_cnt ;
45
+ const size_t freemap_sz = sizeof (uint32_t ) * pg_cnt / 32 ;
46
+ const size_t meta_sz = sizeof (jl_gc_pagemeta_t ) * pg_cnt ;
47
+ size_t alloc_size = pages_sz + freemap_sz + meta_sz ;
48
+ #ifdef _OS_WINDOWS_
49
+ char * mem = (char * )VirtualAlloc (NULL , alloc_size + GC_PAGE_SZ ,
50
+ MEM_RESERVE , PAGE_READWRITE );
51
+ if (mem == NULL )
52
+ return NULL ;
53
+ #else
54
+ if (GC_PAGE_SZ > jl_page_size )
55
+ alloc_size += GC_PAGE_SZ ;
56
+ char * mem = (char * )mmap (0 , alloc_size , PROT_READ | PROT_WRITE ,
57
+ MAP_NORESERVE | MAP_PRIVATE | MAP_ANONYMOUS , -1 , 0 );
58
+ if (mem == MAP_FAILED )
59
+ return NULL ;
60
+ #endif
61
+ if (GC_PAGE_SZ > jl_page_size ) {
62
+ // round data pointer up to the nearest gc_page_data-aligned
63
+ // boundary if mmap didn't already do so.
64
+ mem = (char * )gc_page_data (mem + GC_PAGE_SZ - 1 );
65
+ }
66
+ return mem ;
67
+ }
68
+
69
+ // Allocate the memory for a `region_t`. Starts with `region_pg_cnt` number
70
+ // of pages. Decrease 4x every time so that there are enough space for a few.
71
+ // more regions (or other allocations). The final page count is recorded
72
+ // and will be used as the starting count next time. If the page count is
73
+ // smaller `MIN_REGION_PG_COUNT` a `jl_memory_exception` is thrown.
74
+ // Assume `pagealloc_lock` is acquired, the lock is released before the
75
+ // exception is thrown.
76
+ static void jl_gc_alloc_region (region_t * region )
77
+ {
78
+ int pg_cnt = region_pg_cnt ;
79
+ const size_t pages_sz = sizeof (jl_gc_page_t ) * pg_cnt ;
80
+ const size_t freemap_sz = sizeof (uint32_t ) * pg_cnt / 32 ;
81
+ char * mem = NULL ;
82
+ while (1 ) {
83
+ if (__likely ((mem = jl_gc_try_alloc_region (pg_cnt ))))
84
+ break ;
85
+ if (pg_cnt >= MIN_REGION_PG_COUNT * 4 ) {
86
+ pg_cnt /= 4 ;
87
+ region_pg_cnt = pg_cnt ;
88
+ }
89
+ else if (pg_cnt > MIN_REGION_PG_COUNT ) {
90
+ region_pg_cnt = pg_cnt = MIN_REGION_PG_COUNT ;
91
+ }
92
+ else {
93
+ JL_UNLOCK_NOGC (& pagealloc_lock );
94
+ jl_throw (jl_memory_exception );
95
+ }
96
+ }
97
+ region -> pages = (jl_gc_page_t * )mem ;
98
+ region -> freemap = (uint32_t * )(mem + pages_sz );
99
+ region -> meta = (jl_gc_pagemeta_t * )(mem + pages_sz + freemap_sz );
100
+ region -> lb = 0 ;
101
+ region -> ub = 0 ;
102
+ region -> pg_cnt = pg_cnt ;
103
+ #ifdef _OS_WINDOWS_
104
+ VirtualAlloc (region -> freemap , pg_cnt / 8 , MEM_COMMIT , PAGE_READWRITE );
105
+ VirtualAlloc (region -> meta , pg_cnt * sizeof (jl_gc_pagemeta_t ),
106
+ MEM_COMMIT , PAGE_READWRITE );
107
+ #endif
108
+ memset (region -> freemap , 0xff , pg_cnt / 8 );
109
+ }
110
+
20
111
NOINLINE void * jl_gc_alloc_page (void )
21
112
{
22
- void * ptr = NULL ;
23
113
int i ;
24
114
region_t * region ;
25
115
int region_i = 0 ;
26
116
JL_LOCK_NOGC (& pagealloc_lock );
27
- while (region_i < REGION_COUNT ) {
117
+ while (region_i < REGION_COUNT ) {
28
118
region = & regions [region_i ];
29
- if (region -> pages == NULL ) {
30
- int pg_cnt = REGION_PG_COUNT ;
31
- const size_t pages_sz = sizeof (jl_gc_page_t ) * pg_cnt ;
32
- const size_t freemap_sz = sizeof (uint32_t ) * pg_cnt / 32 ;
33
- const size_t meta_sz = sizeof (jl_gc_pagemeta_t ) * pg_cnt ;
34
- size_t alloc_size = pages_sz + freemap_sz + meta_sz ;
35
- #ifdef _OS_WINDOWS_
36
- char * mem = (char * )VirtualAlloc (NULL , alloc_size + GC_PAGE_SZ ,
37
- MEM_RESERVE , PAGE_READWRITE );
38
- #else
39
- if (GC_PAGE_SZ > jl_page_size )
40
- alloc_size += GC_PAGE_SZ ;
41
- char * mem = (char * )mmap (0 , alloc_size , PROT_READ | PROT_WRITE , MAP_NORESERVE | MAP_PRIVATE | MAP_ANONYMOUS , -1 , 0 );
42
- mem = mem == MAP_FAILED ? NULL : mem ;
43
- #endif
44
- if (mem == NULL ) {
45
- jl_printf (JL_STDERR , "could not allocate pools\n" );
46
- gc_debug_critical_error ();
47
- abort ();
48
- }
49
- if (GC_PAGE_SZ > jl_page_size ) {
50
- // round data pointer up to the nearest gc_page_data-aligned
51
- // boundary if mmap didn't already do so.
52
- mem = (char * )gc_page_data (mem + GC_PAGE_SZ - 1 );
53
- }
54
- region -> pages = (jl_gc_page_t * )mem ;
55
- region -> freemap = (uint32_t * )(mem + pages_sz );
56
- region -> meta = (jl_gc_pagemeta_t * )(mem + pages_sz + freemap_sz );
57
- region -> lb = 0 ;
58
- region -> ub = 0 ;
59
- region -> pg_cnt = pg_cnt ;
60
- #ifdef _OS_WINDOWS_
61
- VirtualAlloc (region -> freemap , region -> pg_cnt / 8 ,
62
- MEM_COMMIT , PAGE_READWRITE );
63
- VirtualAlloc (region -> meta , region -> pg_cnt * sizeof (jl_gc_pagemeta_t ),
64
- MEM_COMMIT , PAGE_READWRITE );
65
- #endif
66
- memset (region -> freemap , 0xff , region -> pg_cnt / 8 );
67
- }
119
+ if (region -> pages == NULL )
120
+ jl_gc_alloc_region (region );
68
121
for (i = region -> lb ; i < region -> pg_cnt / 32 ; i ++ ) {
69
122
if (region -> freemap [i ])
70
123
break ;
@@ -76,10 +129,9 @@ NOINLINE void *jl_gc_alloc_page(void)
76
129
}
77
130
break ;
78
131
}
79
- if (region_i >= REGION_COUNT ) {
80
- jl_printf (JL_STDERR , "increase REGION_COUNT or allocate less memory\n" );
81
- gc_debug_critical_error ();
82
- abort ();
132
+ if (__unlikely (region_i >= REGION_COUNT )) {
133
+ JL_UNLOCK_NOGC (& pagealloc_lock );
134
+ jl_throw (jl_memory_exception );
83
135
}
84
136
if (region -> lb < i )
85
137
region -> lb = i ;
@@ -96,7 +148,7 @@ NOINLINE void *jl_gc_alloc_page(void)
96
148
#endif
97
149
98
150
region -> freemap [i ] &= ~(uint32_t )(1 << j );
99
- ptr = region -> pages [i * 32 + j ].data ;
151
+ void * ptr = region -> pages [i * 32 + j ].data ;
100
152
#ifdef _OS_WINDOWS_
101
153
VirtualAlloc (ptr , GC_PAGE_SZ , MEM_COMMIT , PAGE_READWRITE );
102
154
#endif
0 commit comments