2 |
- |
1 |
/* Copyright (C) 1998, 2000, 2002 Aladdin Enterprises. All rights reserved.
|
|
|
2 |
|
|
|
3 |
This software is provided AS-IS with no warranty, either express or
|
|
|
4 |
implied.
|
|
|
5 |
|
|
|
6 |
This software is distributed under license and may not be copied,
|
|
|
7 |
modified or distributed except as expressly authorized under the terms
|
|
|
8 |
of the license contained in the file LICENSE in this distribution.
|
|
|
9 |
|
|
|
10 |
For more information about licensing, please refer to
|
|
|
11 |
http://www.ghostscript.com/licensing/. For information on
|
|
|
12 |
commercial licensing, go to http://www.artifex.com/licensing/ or
|
|
|
13 |
contact Artifex Software, Inc., 101 Lucas Valley Road #110,
|
|
|
14 |
San Rafael, CA 94903, U.S.A., +1(415)492-9861.
|
|
|
15 |
*/
|
|
|
16 |
|
|
|
17 |
/* $Id: gsmalloc.c,v 1.13 2004/08/18 22:24:47 stefan Exp $ */
|
|
|
18 |
/* C heap allocator */
|
|
|
19 |
#include "malloc_.h"
|
|
|
20 |
#include "gdebug.h"
|
|
|
21 |
#include "gserror.h"
|
|
|
22 |
#include "gserrors.h"
|
|
|
23 |
#include "gstypes.h"
|
|
|
24 |
#include "gsmemory.h"
|
|
|
25 |
#include "gsmdebug.h"
|
|
|
26 |
#include "gsstruct.h" /* for st_bytes */
|
|
|
27 |
#include "gsmalloc.h"
|
|
|
28 |
#include "gsmemlok.h" /* locking (multithreading) wrapper */
|
|
|
29 |
#include "gsmemret.h" /* retrying wrapper */
|
|
|
30 |
|
|
|
31 |
|
|
|
32 |
/* ------ Heap allocator ------ */
|
|
|
33 |
|
|
|
34 |
/*
|
|
|
35 |
* An implementation of Ghostscript's memory manager interface
|
|
|
36 |
* that works directly with the C heap. We keep track of all allocated
|
|
|
37 |
* blocks so we can free them at cleanup time.
|
|
|
38 |
*/
|
|
|
39 |
/* Raw memory procedures */
|
|
|
40 |
private gs_memory_proc_alloc_bytes(gs_heap_alloc_bytes);
|
|
|
41 |
private gs_memory_proc_resize_object(gs_heap_resize_object);
|
|
|
42 |
private gs_memory_proc_free_object(gs_heap_free_object);
|
|
|
43 |
private gs_memory_proc_stable(gs_heap_stable);
|
|
|
44 |
private gs_memory_proc_status(gs_heap_status);
|
|
|
45 |
private gs_memory_proc_free_all(gs_heap_free_all);
|
|
|
46 |
|
|
|
47 |
/* Object memory procedures */
|
|
|
48 |
private gs_memory_proc_alloc_struct(gs_heap_alloc_struct);
|
|
|
49 |
private gs_memory_proc_alloc_byte_array(gs_heap_alloc_byte_array);
|
|
|
50 |
private gs_memory_proc_alloc_struct_array(gs_heap_alloc_struct_array);
|
|
|
51 |
private gs_memory_proc_object_size(gs_heap_object_size);
|
|
|
52 |
private gs_memory_proc_object_type(gs_heap_object_type);
|
|
|
53 |
private gs_memory_proc_alloc_string(gs_heap_alloc_string);
|
|
|
54 |
private gs_memory_proc_resize_string(gs_heap_resize_string);
|
|
|
55 |
private gs_memory_proc_free_string(gs_heap_free_string);
|
|
|
56 |
private gs_memory_proc_register_root(gs_heap_register_root);
|
|
|
57 |
private gs_memory_proc_unregister_root(gs_heap_unregister_root);
|
|
|
58 |
private gs_memory_proc_enable_free(gs_heap_enable_free);
|
|
|
59 |
private const gs_memory_procs_t gs_malloc_memory_procs =
|
|
|
60 |
{
|
|
|
61 |
/* Raw memory procedures */
|
|
|
62 |
gs_heap_alloc_bytes,
|
|
|
63 |
gs_heap_resize_object,
|
|
|
64 |
gs_heap_free_object,
|
|
|
65 |
gs_heap_stable,
|
|
|
66 |
gs_heap_status,
|
|
|
67 |
gs_heap_free_all,
|
|
|
68 |
gs_ignore_consolidate_free,
|
|
|
69 |
/* Object memory procedures */
|
|
|
70 |
gs_heap_alloc_bytes,
|
|
|
71 |
gs_heap_alloc_struct,
|
|
|
72 |
gs_heap_alloc_struct,
|
|
|
73 |
gs_heap_alloc_byte_array,
|
|
|
74 |
gs_heap_alloc_byte_array,
|
|
|
75 |
gs_heap_alloc_struct_array,
|
|
|
76 |
gs_heap_alloc_struct_array,
|
|
|
77 |
gs_heap_object_size,
|
|
|
78 |
gs_heap_object_type,
|
|
|
79 |
gs_heap_alloc_string,
|
|
|
80 |
gs_heap_alloc_string,
|
|
|
81 |
gs_heap_resize_string,
|
|
|
82 |
gs_heap_free_string,
|
|
|
83 |
gs_heap_register_root,
|
|
|
84 |
gs_heap_unregister_root,
|
|
|
85 |
gs_heap_enable_free
|
|
|
86 |
};
|
|
|
87 |
|
|
|
88 |
/* We must make sure that malloc_blocks leave the block aligned. */
|
|
|
89 |
/*typedef struct gs_malloc_block_s gs_malloc_block_t; */
|
|
|
90 |
#define malloc_block_data\
|
|
|
91 |
gs_malloc_block_t *next;\
|
|
|
92 |
gs_malloc_block_t *prev;\
|
|
|
93 |
uint size;\
|
|
|
94 |
gs_memory_type_ptr_t type;\
|
|
|
95 |
client_name_t cname
|
|
|
96 |
struct malloc_block_data_s {
|
|
|
97 |
malloc_block_data;
|
|
|
98 |
};
|
|
|
99 |
struct gs_malloc_block_s {
|
|
|
100 |
malloc_block_data;
|
|
|
101 |
/* ANSI C does not allow zero-size arrays, so we need the following */
|
|
|
102 |
/* unnecessary and wasteful workaround: */
|
|
|
103 |
#define _npad (-size_of(struct malloc_block_data_s) & (arch_align_memory_mod - 1))
|
|
|
104 |
byte _pad[(_npad == 0 ? arch_align_memory_mod : _npad)];
|
|
|
105 |
#undef _npad
|
|
|
106 |
};
|
|
|
107 |
|
|
|
108 |
/* Initialize a malloc allocator. */
|
|
|
109 |
private long heap_available(void);
|
|
|
110 |
gs_malloc_memory_t *
|
|
|
111 |
gs_malloc_memory_init(void)
|
|
|
112 |
{
|
|
|
113 |
gs_malloc_memory_t *mem =
|
|
|
114 |
(gs_malloc_memory_t *)malloc(sizeof(gs_malloc_memory_t));
|
|
|
115 |
|
|
|
116 |
mem->stable_memory = 0; /* just for tidyness, never referenced */
|
|
|
117 |
mem->procs = gs_malloc_memory_procs;
|
|
|
118 |
mem->allocated = 0;
|
|
|
119 |
mem->limit = max_long;
|
|
|
120 |
mem->used = 0;
|
|
|
121 |
mem->max_used = 0;
|
|
|
122 |
mem->gs_lib_ctx = 0;
|
|
|
123 |
mem->non_gc_memory = (gs_memory_t *)mem;
|
|
|
124 |
|
|
|
125 |
return mem;
|
|
|
126 |
}
|
|
|
127 |
/*
|
|
|
128 |
* Estimate the amount of available memory by probing with mallocs.
|
|
|
129 |
* We may under-estimate by a lot, but that's better than winding up with
|
|
|
130 |
* a seriously inflated address space. This is quite a hack!
|
|
|
131 |
*/
|
|
|
132 |
#define max_malloc_probes 20
|
|
|
133 |
#define malloc_probe_size 64000
|
|
|
134 |
private long
|
|
|
135 |
heap_available()
|
|
|
136 |
{
|
|
|
137 |
long avail = 0;
|
|
|
138 |
void *probes[max_malloc_probes];
|
|
|
139 |
uint n;
|
|
|
140 |
|
|
|
141 |
for (n = 0; n < max_malloc_probes; n++) {
|
|
|
142 |
if ((probes[n] = malloc(malloc_probe_size)) == 0)
|
|
|
143 |
break;
|
|
|
144 |
if_debug2('a', "[a]heap_available probe[%d]=0x%lx\n",
|
|
|
145 |
n, (ulong) probes[n]);
|
|
|
146 |
avail += malloc_probe_size;
|
|
|
147 |
}
|
|
|
148 |
while (n)
|
|
|
149 |
free(probes[--n]);
|
|
|
150 |
return avail;
|
|
|
151 |
}
|
|
|
152 |
|
|
|
153 |
/* Allocate various kinds of blocks. */
|
|
|
154 |
private byte *
|
|
|
155 |
gs_heap_alloc_bytes(gs_memory_t * mem, uint size, client_name_t cname)
|
|
|
156 |
{
|
|
|
157 |
gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
|
|
|
158 |
byte *ptr = 0;
|
|
|
159 |
|
|
|
160 |
#ifdef DEBUG
|
|
|
161 |
const char *msg;
|
|
|
162 |
static const char *const ok_msg = "OK";
|
|
|
163 |
|
|
|
164 |
# define set_msg(str) (msg = (str))
|
|
|
165 |
#else
|
|
|
166 |
# define set_msg(str) DO_NOTHING
|
|
|
167 |
#endif
|
|
|
168 |
|
|
|
169 |
if (size > mmem->limit - sizeof(gs_malloc_block_t)) {
|
|
|
170 |
/* Definitely too large to allocate; also avoids overflow. */
|
|
|
171 |
set_msg("exceeded limit");
|
|
|
172 |
} else {
|
|
|
173 |
uint added = size + sizeof(gs_malloc_block_t);
|
|
|
174 |
|
|
|
175 |
if (mmem->limit - added < mmem->used)
|
|
|
176 |
set_msg("exceeded limit");
|
|
|
177 |
else if ((ptr = (byte *) malloc(added)) == 0)
|
|
|
178 |
set_msg("failed");
|
|
|
179 |
else {
|
|
|
180 |
gs_malloc_block_t *bp = (gs_malloc_block_t *) ptr;
|
|
|
181 |
|
|
|
182 |
/*
|
|
|
183 |
* We would like to check that malloc aligns blocks at least as
|
|
|
184 |
* strictly as the compiler (as defined by arch_align_memory_mod).
|
|
|
185 |
* However, Microsoft VC 6 does not satisfy this requirement.
|
|
|
186 |
* See gsmemory.h for more explanation.
|
|
|
187 |
*/
|
|
|
188 |
set_msg(ok_msg);
|
|
|
189 |
if (mmem->allocated)
|
|
|
190 |
mmem->allocated->prev = bp;
|
|
|
191 |
bp->next = mmem->allocated;
|
|
|
192 |
bp->prev = 0;
|
|
|
193 |
bp->size = size;
|
|
|
194 |
bp->type = &st_bytes;
|
|
|
195 |
bp->cname = cname;
|
|
|
196 |
mmem->allocated = bp;
|
|
|
197 |
ptr = (byte *) (bp + 1);
|
|
|
198 |
gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
|
|
|
199 |
mmem->used += size + sizeof(gs_malloc_block_t);
|
|
|
200 |
if (mmem->used > mmem->max_used)
|
|
|
201 |
mmem->max_used = mmem->used;
|
|
|
202 |
}
|
|
|
203 |
}
|
|
|
204 |
#ifdef DEBUG
|
|
|
205 |
if (gs_debug_c('a') || msg != ok_msg)
|
|
|
206 |
dlprintf4("[a+]gs_malloc(%s)(%u) = 0x%lx: %s\n",
|
|
|
207 |
client_name_string(cname), size, (ulong) ptr, msg);
|
|
|
208 |
#endif
|
|
|
209 |
return ptr;
|
|
|
210 |
#undef set_msg
|
|
|
211 |
}
|
|
|
212 |
private void *
|
|
|
213 |
gs_heap_alloc_struct(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
|
|
|
214 |
client_name_t cname)
|
|
|
215 |
{
|
|
|
216 |
void *ptr =
|
|
|
217 |
gs_heap_alloc_bytes(mem, gs_struct_type_size(pstype), cname);
|
|
|
218 |
|
|
|
219 |
if (ptr == 0)
|
|
|
220 |
return 0;
|
|
|
221 |
((gs_malloc_block_t *) ptr)[-1].type = pstype;
|
|
|
222 |
return ptr;
|
|
|
223 |
}
|
|
|
224 |
private byte *
|
|
|
225 |
gs_heap_alloc_byte_array(gs_memory_t * mem, uint num_elements, uint elt_size,
|
|
|
226 |
client_name_t cname)
|
|
|
227 |
{
|
|
|
228 |
ulong lsize = (ulong) num_elements * elt_size;
|
|
|
229 |
|
|
|
230 |
if (lsize != (uint) lsize)
|
|
|
231 |
return 0;
|
|
|
232 |
return gs_heap_alloc_bytes(mem, (uint) lsize, cname);
|
|
|
233 |
}
|
|
|
234 |
private void *
|
|
|
235 |
gs_heap_alloc_struct_array(gs_memory_t * mem, uint num_elements,
|
|
|
236 |
gs_memory_type_ptr_t pstype, client_name_t cname)
|
|
|
237 |
{
|
|
|
238 |
void *ptr =
|
|
|
239 |
gs_heap_alloc_byte_array(mem, num_elements,
|
|
|
240 |
gs_struct_type_size(pstype), cname);
|
|
|
241 |
|
|
|
242 |
if (ptr == 0)
|
|
|
243 |
return 0;
|
|
|
244 |
((gs_malloc_block_t *) ptr)[-1].type = pstype;
|
|
|
245 |
return ptr;
|
|
|
246 |
}
|
|
|
247 |
private void *
|
|
|
248 |
gs_heap_resize_object(gs_memory_t * mem, void *obj, uint new_num_elements,
|
|
|
249 |
client_name_t cname)
|
|
|
250 |
{
|
|
|
251 |
gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
|
|
|
252 |
gs_malloc_block_t *ptr = (gs_malloc_block_t *) obj - 1;
|
|
|
253 |
gs_memory_type_ptr_t pstype = ptr->type;
|
|
|
254 |
uint old_size = gs_object_size(mem, obj) + sizeof(gs_malloc_block_t);
|
|
|
255 |
uint new_size =
|
|
|
256 |
gs_struct_type_size(pstype) * new_num_elements +
|
|
|
257 |
sizeof(gs_malloc_block_t);
|
|
|
258 |
gs_malloc_block_t *new_ptr;
|
|
|
259 |
|
|
|
260 |
if (new_size == old_size)
|
|
|
261 |
return obj;
|
|
|
262 |
new_ptr = (gs_malloc_block_t *) gs_realloc(ptr, old_size, new_size);
|
|
|
263 |
if (new_ptr == 0)
|
|
|
264 |
return 0;
|
|
|
265 |
if (new_ptr->prev)
|
|
|
266 |
new_ptr->prev->next = new_ptr;
|
|
|
267 |
else
|
|
|
268 |
mmem->allocated = new_ptr;
|
|
|
269 |
if (new_ptr->next)
|
|
|
270 |
new_ptr->next->prev = new_ptr;
|
|
|
271 |
new_ptr->size = new_size - sizeof(gs_malloc_block_t);
|
|
|
272 |
mmem->used -= old_size;
|
|
|
273 |
mmem->used += new_size;
|
|
|
274 |
if (new_size > old_size)
|
|
|
275 |
gs_alloc_fill((byte *) new_ptr + old_size,
|
|
|
276 |
gs_alloc_fill_alloc, new_size - old_size);
|
|
|
277 |
return new_ptr + 1;
|
|
|
278 |
}
|
|
|
279 |
private uint
|
|
|
280 |
gs_heap_object_size(gs_memory_t * mem, const void *ptr)
|
|
|
281 |
{
|
|
|
282 |
return ((const gs_malloc_block_t *)ptr)[-1].size;
|
|
|
283 |
}
|
|
|
284 |
private gs_memory_type_ptr_t
|
|
|
285 |
gs_heap_object_type(gs_memory_t * mem, const void *ptr)
|
|
|
286 |
{
|
|
|
287 |
return ((const gs_malloc_block_t *)ptr)[-1].type;
|
|
|
288 |
}
|
|
|
289 |
private void
|
|
|
290 |
gs_heap_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
|
|
|
291 |
{
|
|
|
292 |
gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
|
|
|
293 |
gs_malloc_block_t *bp = mmem->allocated;
|
|
|
294 |
gs_memory_type_ptr_t pstype;
|
|
|
295 |
struct_proc_finalize((*finalize));
|
|
|
296 |
|
|
|
297 |
if_debug3('a', "[a-]gs_free(%s) 0x%lx(%u)\n",
|
|
|
298 |
client_name_string(cname), (ulong) ptr,
|
|
|
299 |
(ptr == 0 ? 0 : ((gs_malloc_block_t *) ptr)[-1].size));
|
|
|
300 |
if (ptr == 0)
|
|
|
301 |
return;
|
|
|
302 |
pstype = ((gs_malloc_block_t *) ptr)[-1].type;
|
|
|
303 |
finalize = pstype->finalize;
|
|
|
304 |
if (finalize != 0) {
|
|
|
305 |
if_debug3('u', "[u]finalizing %s 0x%lx (%s)\n",
|
|
|
306 |
struct_type_name_string(pstype),
|
|
|
307 |
(ulong) ptr, client_name_string(cname));
|
|
|
308 |
(*finalize) (ptr);
|
|
|
309 |
}
|
|
|
310 |
if (ptr == bp + 1) {
|
|
|
311 |
mmem->allocated = bp->next;
|
|
|
312 |
mmem->used -= bp->size + sizeof(gs_malloc_block_t);
|
|
|
313 |
|
|
|
314 |
if (mmem->allocated)
|
|
|
315 |
mmem->allocated->prev = 0;
|
|
|
316 |
gs_alloc_fill(bp, gs_alloc_fill_free,
|
|
|
317 |
bp->size + sizeof(gs_malloc_block_t));
|
|
|
318 |
free(bp);
|
|
|
319 |
} else {
|
|
|
320 |
gs_malloc_block_t *np;
|
|
|
321 |
|
|
|
322 |
/*
|
|
|
323 |
* bp == 0 at this point is an error, but we'd rather have an
|
|
|
324 |
* error message than an invalid access.
|
|
|
325 |
*/
|
|
|
326 |
if (bp) {
|
|
|
327 |
for (; (np = bp->next) != 0; bp = np) {
|
|
|
328 |
if (ptr == np + 1) {
|
|
|
329 |
bp->next = np->next;
|
|
|
330 |
if (np->next)
|
|
|
331 |
np->next->prev = bp;
|
|
|
332 |
mmem->used -= np->size + sizeof(gs_malloc_block_t);
|
|
|
333 |
gs_alloc_fill(np, gs_alloc_fill_free,
|
|
|
334 |
np->size + sizeof(gs_malloc_block_t));
|
|
|
335 |
free(np);
|
|
|
336 |
return;
|
|
|
337 |
}
|
|
|
338 |
}
|
|
|
339 |
}
|
|
|
340 |
lprintf2("%s: free 0x%lx not found!\n",
|
|
|
341 |
client_name_string(cname), (ulong) ptr);
|
|
|
342 |
free((char *)((gs_malloc_block_t *) ptr - 1));
|
|
|
343 |
}
|
|
|
344 |
}
|
|
|
345 |
private byte *
|
|
|
346 |
gs_heap_alloc_string(gs_memory_t * mem, uint nbytes, client_name_t cname)
|
|
|
347 |
{
|
|
|
348 |
return gs_heap_alloc_bytes(mem, nbytes, cname);
|
|
|
349 |
}
|
|
|
350 |
private byte *
|
|
|
351 |
gs_heap_resize_string(gs_memory_t * mem, byte * data, uint old_num, uint new_num,
|
|
|
352 |
client_name_t cname)
|
|
|
353 |
{
|
|
|
354 |
if (gs_heap_object_type(mem, data) != &st_bytes)
|
|
|
355 |
lprintf2("%s: resizing non-string 0x%lx!\n",
|
|
|
356 |
client_name_string(cname), (ulong) data);
|
|
|
357 |
return gs_heap_resize_object(mem, data, new_num, cname);
|
|
|
358 |
}
|
|
|
359 |
private void
|
|
|
360 |
gs_heap_free_string(gs_memory_t * mem, byte * data, uint nbytes,
|
|
|
361 |
client_name_t cname)
|
|
|
362 |
{
|
|
|
363 |
/****** SHOULD CHECK SIZE IF DEBUGGING ******/
|
|
|
364 |
gs_heap_free_object(mem, data, cname);
|
|
|
365 |
}
|
|
|
366 |
private int
|
|
|
367 |
gs_heap_register_root(gs_memory_t * mem, gs_gc_root_t * rp,
|
|
|
368 |
gs_ptr_type_t ptype, void **up, client_name_t cname)
|
|
|
369 |
{
|
|
|
370 |
return 0;
|
|
|
371 |
}
|
|
|
372 |
private void
|
|
|
373 |
gs_heap_unregister_root(gs_memory_t * mem, gs_gc_root_t * rp,
|
|
|
374 |
client_name_t cname)
|
|
|
375 |
{
|
|
|
376 |
}
|
|
|
377 |
private gs_memory_t *
|
|
|
378 |
gs_heap_stable(gs_memory_t *mem)
|
|
|
379 |
{
|
|
|
380 |
return mem; /* heap memory is stable */
|
|
|
381 |
}
|
|
|
382 |
private void
|
|
|
383 |
gs_heap_status(gs_memory_t * mem, gs_memory_status_t * pstat)
|
|
|
384 |
{
|
|
|
385 |
gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
|
|
|
386 |
|
|
|
387 |
pstat->allocated = mmem->used + heap_available();
|
|
|
388 |
pstat->used = mmem->used;
|
|
|
389 |
}
|
|
|
390 |
private void
|
|
|
391 |
gs_heap_enable_free(gs_memory_t * mem, bool enable)
|
|
|
392 |
{
|
|
|
393 |
if (enable)
|
|
|
394 |
mem->procs.free_object = gs_heap_free_object,
|
|
|
395 |
mem->procs.free_string = gs_heap_free_string;
|
|
|
396 |
else
|
|
|
397 |
mem->procs.free_object = gs_ignore_free_object,
|
|
|
398 |
mem->procs.free_string = gs_ignore_free_string;
|
|
|
399 |
}
|
|
|
400 |
|
|
|
401 |
/* Release all memory acquired by this allocator. */
|
|
|
402 |
private void
|
|
|
403 |
gs_heap_free_all(gs_memory_t * mem, uint free_mask, client_name_t cname)
|
|
|
404 |
{
|
|
|
405 |
gs_malloc_memory_t *const mmem = (gs_malloc_memory_t *) mem;
|
|
|
406 |
|
|
|
407 |
if (free_mask & FREE_ALL_DATA) {
|
|
|
408 |
gs_malloc_block_t *bp = mmem->allocated;
|
|
|
409 |
gs_malloc_block_t *np;
|
|
|
410 |
|
|
|
411 |
for (; bp != 0; bp = np) {
|
|
|
412 |
np = bp->next;
|
|
|
413 |
if_debug3('a', "[a]gs_heap_free_all(%s) 0x%lx(%u)\n",
|
|
|
414 |
client_name_string(bp->cname), (ulong) (bp + 1),
|
|
|
415 |
bp->size);
|
|
|
416 |
gs_alloc_fill(bp + 1, gs_alloc_fill_free, bp->size);
|
|
|
417 |
free(bp);
|
|
|
418 |
}
|
|
|
419 |
}
|
|
|
420 |
if (free_mask & FREE_ALL_ALLOCATOR)
|
|
|
421 |
free(mem);
|
|
|
422 |
}
|
|
|
423 |
|
|
|
424 |
/* ------ Wrapping ------ */
|
|
|
425 |
|
|
|
426 |
/* Create the retrying and the locked wrapper for the heap allocator. */
|
|
|
427 |
int
|
|
|
428 |
gs_malloc_wrap(gs_memory_t **wrapped, gs_malloc_memory_t *contents)
|
|
|
429 |
{
|
|
|
430 |
gs_memory_t *cmem = (gs_memory_t *)contents;
|
|
|
431 |
gs_memory_locked_t *lmem = (gs_memory_locked_t *)
|
|
|
432 |
gs_alloc_bytes_immovable(cmem, sizeof(gs_memory_locked_t),
|
|
|
433 |
"gs_malloc_wrap(locked)");
|
|
|
434 |
gs_memory_retrying_t *rmem;
|
|
|
435 |
int code;
|
|
|
436 |
|
|
|
437 |
if (lmem == 0)
|
|
|
438 |
return_error(gs_error_VMerror);
|
|
|
439 |
code = gs_memory_locked_init(lmem, cmem);
|
|
|
440 |
if (code < 0) {
|
|
|
441 |
gs_free_object(cmem, lmem, "gs_malloc_wrap(locked)");
|
|
|
442 |
return code;
|
|
|
443 |
}
|
|
|
444 |
|
|
|
445 |
rmem = (gs_memory_retrying_t *)
|
|
|
446 |
gs_alloc_bytes_immovable((gs_memory_t *)lmem,
|
|
|
447 |
sizeof(gs_memory_retrying_t),
|
|
|
448 |
"gs_malloc_wrap(retrying)");
|
|
|
449 |
if (rmem == 0) {
|
|
|
450 |
gs_memory_locked_release(lmem);
|
|
|
451 |
gs_free_object(cmem, lmem, "gs_malloc_wrap(locked)");
|
|
|
452 |
return_error(gs_error_VMerror);
|
|
|
453 |
}
|
|
|
454 |
code = gs_memory_retrying_init(rmem, (gs_memory_t *)lmem);
|
|
|
455 |
if (code < 0) {
|
|
|
456 |
gs_free_object((gs_memory_t *)lmem, rmem, "gs_malloc_wrap(retrying)");
|
|
|
457 |
gs_memory_locked_release(lmem);
|
|
|
458 |
gs_free_object(cmem, lmem, "gs_malloc_wrap(locked)");
|
|
|
459 |
return code;
|
|
|
460 |
}
|
|
|
461 |
|
|
|
462 |
*wrapped = (gs_memory_t *)rmem;
|
|
|
463 |
return 0;
|
|
|
464 |
}
|
|
|
465 |
|
|
|
466 |
/* Get the wrapped contents. */
|
|
|
467 |
gs_malloc_memory_t *
|
|
|
468 |
gs_malloc_wrapped_contents(gs_memory_t *wrapped)
|
|
|
469 |
{
|
|
|
470 |
gs_memory_retrying_t *rmem = (gs_memory_retrying_t *)wrapped;
|
|
|
471 |
gs_memory_locked_t *lmem =
|
|
|
472 |
(gs_memory_locked_t *)gs_memory_retrying_target(rmem);
|
|
|
473 |
if (lmem)
|
|
|
474 |
return (gs_malloc_memory_t *)gs_memory_locked_target(lmem);
|
|
|
475 |
return (gs_malloc_memory_t *) wrapped;
|
|
|
476 |
}
|
|
|
477 |
|
|
|
478 |
/* Free the wrapper, and return the wrapped contents. */
|
|
|
479 |
gs_malloc_memory_t *
|
|
|
480 |
gs_malloc_unwrap(gs_memory_t *wrapped)
|
|
|
481 |
{
|
|
|
482 |
gs_memory_retrying_t *rmem = (gs_memory_retrying_t *)wrapped;
|
|
|
483 |
gs_memory_locked_t *lmem =
|
|
|
484 |
(gs_memory_locked_t *)gs_memory_retrying_target(rmem);
|
|
|
485 |
gs_memory_t *contents = gs_memory_locked_target(lmem);
|
|
|
486 |
|
|
|
487 |
gs_free_object((gs_memory_t *)lmem, rmem, "gs_malloc_unwrap(retrying)");
|
|
|
488 |
gs_memory_locked_release(lmem);
|
|
|
489 |
gs_free_object(contents, lmem, "gs_malloc_unwrap(locked)");
|
|
|
490 |
return (gs_malloc_memory_t *)contents;
|
|
|
491 |
}
|
|
|
492 |
|
|
|
493 |
|
|
|
494 |
/* Create the default allocator, and return it. */
|
|
|
495 |
gs_memory_t *
|
|
|
496 |
gs_malloc_init(const gs_memory_t *parent)
|
|
|
497 |
{
|
|
|
498 |
gs_malloc_memory_t *malloc_memory_default = gs_malloc_memory_init();
|
|
|
499 |
gs_memory_t *memory_t_default;
|
|
|
500 |
|
|
|
501 |
if (parent)
|
|
|
502 |
malloc_memory_default->gs_lib_ctx = parent->gs_lib_ctx;
|
|
|
503 |
else
|
|
|
504 |
gs_lib_ctx_init((gs_memory_t *)malloc_memory_default);
|
|
|
505 |
|
|
|
506 |
gs_malloc_wrap(&memory_t_default, malloc_memory_default);
|
|
|
507 |
memory_t_default->stable_memory = memory_t_default;
|
|
|
508 |
return memory_t_default;
|
|
|
509 |
}
|
|
|
510 |
|
|
|
511 |
/* Release the default allocator. */
|
|
|
512 |
void
|
|
|
513 |
gs_malloc_release(gs_memory_t *mem)
|
|
|
514 |
{
|
|
|
515 |
gs_malloc_memory_t * malloc_memory_default = gs_malloc_unwrap(mem);
|
|
|
516 |
gs_malloc_memory_release(malloc_memory_default);
|
|
|
517 |
}
|