2 |
- |
1 |
/* Copyright (C) 1993, 1995, 1996, 1998, 1999 Aladdin Enterprises. All rights reserved.
|
|
|
2 |
|
|
|
3 |
This software is provided AS-IS with no warranty, either express or
|
|
|
4 |
implied.
|
|
|
5 |
|
|
|
6 |
This software is distributed under license and may not be copied,
|
|
|
7 |
modified or distributed except as expressly authorized under the terms
|
|
|
8 |
of the license contained in the file LICENSE in this distribution.
|
|
|
9 |
|
|
|
10 |
For more information about licensing, please refer to
|
|
|
11 |
http://www.ghostscript.com/licensing/. For information on
|
|
|
12 |
commercial licensing, go to http://www.artifex.com/licensing/ or
|
|
|
13 |
contact Artifex Software, Inc., 101 Lucas Valley Road #110,
|
|
|
14 |
San Rafael, CA 94903, U.S.A., +1(415)492-9861.
|
|
|
15 |
*/
|
|
|
16 |
|
|
|
17 |
/* $Id: ialloc.c,v 1.8 2005/10/12 10:45:21 leonardo Exp $ */
|
|
|
18 |
/* Memory allocator for Ghostscript interpreter */
|
|
|
19 |
#include "gx.h"
|
|
|
20 |
#include "memory_.h"
|
|
|
21 |
#include "ierrors.h"
|
|
|
22 |
#include "gsstruct.h"
|
|
|
23 |
#include "iref.h" /* must precede iastate.h */
|
|
|
24 |
#include "iastate.h"
|
|
|
25 |
#include "igc.h" /* for gs_gc_reclaim */
|
|
|
26 |
#include "ipacked.h"
|
|
|
27 |
#include "iutil.h"
|
|
|
28 |
#include "ivmspace.h"
|
|
|
29 |
#include "store.h"
|
|
|
30 |
|
|
|
31 |
/*
|
|
|
32 |
* Define global and local instances.
|
|
|
33 |
*/
|
|
|
34 |
public_st_gs_dual_memory();
|
|
|
35 |
|
|
|
36 |
/* Initialize the allocator */
|
|
|
37 |
int
|
|
|
38 |
ialloc_init(gs_dual_memory_t *dmem, gs_memory_t * rmem, uint chunk_size,
|
|
|
39 |
bool level2)
|
|
|
40 |
{
|
|
|
41 |
gs_ref_memory_t *ilmem = ialloc_alloc_state(rmem, chunk_size);
|
|
|
42 |
gs_ref_memory_t *ilmem_stable = ialloc_alloc_state(rmem, chunk_size);
|
|
|
43 |
gs_ref_memory_t *igmem = 0;
|
|
|
44 |
gs_ref_memory_t *igmem_stable = 0;
|
|
|
45 |
gs_ref_memory_t *ismem = ialloc_alloc_state(rmem, chunk_size);
|
|
|
46 |
int i;
|
|
|
47 |
|
|
|
48 |
if (ilmem == 0 || ilmem_stable == 0 || ismem == 0)
|
|
|
49 |
goto fail;
|
|
|
50 |
ilmem->stable_memory = (gs_memory_t *)ilmem_stable;
|
|
|
51 |
if (level2) {
|
|
|
52 |
igmem = ialloc_alloc_state(rmem, chunk_size);
|
|
|
53 |
igmem_stable = ialloc_alloc_state(rmem, chunk_size);
|
|
|
54 |
if (igmem == 0 || igmem_stable == 0)
|
|
|
55 |
goto fail;
|
|
|
56 |
igmem->stable_memory = (gs_memory_t *)igmem_stable;
|
|
|
57 |
} else
|
|
|
58 |
igmem = ilmem, igmem_stable = ilmem_stable;
|
|
|
59 |
for (i = 0; i < countof(dmem->spaces_indexed); i++)
|
|
|
60 |
dmem->spaces_indexed[i] = 0;
|
|
|
61 |
dmem->space_local = ilmem;
|
|
|
62 |
dmem->space_global = igmem;
|
|
|
63 |
dmem->space_system = ismem;
|
|
|
64 |
dmem->spaces.vm_reclaim = gs_gc_reclaim; /* real GC */
|
|
|
65 |
dmem->reclaim = 0; /* no interpreter GC yet */
|
|
|
66 |
/* Level 1 systems have only local VM. */
|
|
|
67 |
igmem->space = avm_global;
|
|
|
68 |
igmem_stable->space = avm_global;
|
|
|
69 |
ilmem->space = avm_local; /* overrides if ilmem == igmem */
|
|
|
70 |
ilmem_stable->space = avm_local; /* ditto */
|
|
|
71 |
ismem->space = avm_system;
|
|
|
72 |
# if IGC_PTR_STABILITY_CHECK
|
|
|
73 |
igmem->space_id = (i_vm_global << 1) + 1;
|
|
|
74 |
igmem_stable->space_id = i_vm_global << 1;
|
|
|
75 |
ilmem->space_id = (i_vm_local << 1) + 1; /* overrides if ilmem == igmem */
|
|
|
76 |
ilmem_stable->space_id = i_vm_local << 1; /* ditto */
|
|
|
77 |
ismem->space_id = (i_vm_system << 1);
|
|
|
78 |
# endif
|
|
|
79 |
ialloc_set_space(dmem, avm_global);
|
|
|
80 |
return 0;
|
|
|
81 |
fail:
|
|
|
82 |
gs_free_object(rmem, igmem_stable, "ialloc_init failure");
|
|
|
83 |
gs_free_object(rmem, igmem, "ialloc_init failure");
|
|
|
84 |
gs_free_object(rmem, ismem, "ialloc_init failure");
|
|
|
85 |
gs_free_object(rmem, ilmem_stable, "ialloc_init failure");
|
|
|
86 |
gs_free_object(rmem, ilmem, "ialloc_init failure");
|
|
|
87 |
return_error(e_VMerror);
|
|
|
88 |
}
|
|
|
89 |
|
|
|
90 |
/* ================ Local/global VM ================ */
|
|
|
91 |
|
|
|
92 |
/* Get the space attribute of an allocator */
|
|
|
93 |
uint
|
|
|
94 |
imemory_space(const gs_ref_memory_t * iimem)
|
|
|
95 |
{
|
|
|
96 |
return iimem->space;
|
|
|
97 |
}
|
|
|
98 |
|
|
|
99 |
/* Select the allocation space. */
|
|
|
100 |
void
|
|
|
101 |
ialloc_set_space(gs_dual_memory_t * dmem, uint space)
|
|
|
102 |
{
|
|
|
103 |
gs_ref_memory_t *mem = dmem->spaces_indexed[space >> r_space_shift];
|
|
|
104 |
|
|
|
105 |
dmem->current = mem;
|
|
|
106 |
dmem->current_space = mem->space;
|
|
|
107 |
}
|
|
|
108 |
|
|
|
109 |
/* Get the l_new attribute of a current allocator. */
|
|
|
110 |
/* (A copy of the new_mask in the gs_dual_memory_t.) */
|
|
|
111 |
uint
|
|
|
112 |
imemory_new_mask(const gs_ref_memory_t *imem)
|
|
|
113 |
{
|
|
|
114 |
return imem->new_mask;
|
|
|
115 |
}
|
|
|
116 |
|
|
|
117 |
/* Get the save level of an allocator. */
|
|
|
118 |
int
|
|
|
119 |
imemory_save_level(const gs_ref_memory_t *imem)
|
|
|
120 |
{
|
|
|
121 |
return imem->save_level;
|
|
|
122 |
}
|
|
|
123 |
|
|
|
124 |
/* Reset the requests. */
|
|
|
125 |
void
|
|
|
126 |
ialloc_reset_requested(gs_dual_memory_t * dmem)
|
|
|
127 |
{
|
|
|
128 |
dmem->space_system->gc_status.requested = 0;
|
|
|
129 |
dmem->space_global->gc_status.requested = 0;
|
|
|
130 |
dmem->space_local->gc_status.requested = 0;
|
|
|
131 |
}
|
|
|
132 |
|
|
|
133 |
/* ================ Refs ================ */
|
|
|
134 |
|
|
|
135 |
#ifdef DEBUG
|
|
|
136 |
private int
|
|
|
137 |
ialloc_trace_space(const gs_ref_memory_t *imem)
|
|
|
138 |
{
|
|
|
139 |
return imem->space + (imem->stable_memory == (const gs_memory_t *)imem);
|
|
|
140 |
}
|
|
|
141 |
#endif
|
|
|
142 |
|
|
|
143 |
/* Register a ref root. */
|
|
|
144 |
int
|
|
|
145 |
gs_register_ref_root(gs_memory_t *mem, gs_gc_root_t *root,
|
|
|
146 |
void **pp, client_name_t cname)
|
|
|
147 |
{
|
|
|
148 |
return gs_register_root(mem, root, ptr_ref_type, pp, cname);
|
|
|
149 |
}
|
|
|
150 |
|
|
|
151 |
/*
|
|
|
152 |
* As noted in iastate.h, every run of refs has an extra ref at the end
|
|
|
153 |
* to hold relocation information for the garbage collector;
|
|
|
154 |
* since sizeof(ref) % obj_align_mod == 0, we never need to
|
|
|
155 |
* allocate any additional padding space at the end of the block.
|
|
|
156 |
*/
|
|
|
157 |
|
|
|
158 |
/* Allocate an array of refs. */
|
|
|
159 |
int
|
|
|
160 |
gs_alloc_ref_array(gs_ref_memory_t * mem, ref * parr, uint attrs,
|
|
|
161 |
uint num_refs, client_name_t cname)
|
|
|
162 |
{
|
|
|
163 |
ref *obj;
|
|
|
164 |
|
|
|
165 |
/* If we're allocating a run of refs already, */
|
|
|
166 |
/* and we aren't about to overflow the maximum run length, use it. */
|
|
|
167 |
if (mem->cc.rtop == mem->cc.cbot &&
|
|
|
168 |
num_refs < (mem->cc.ctop - mem->cc.cbot) / sizeof(ref) &&
|
|
|
169 |
mem->cc.rtop - (byte *) mem->cc.rcur + num_refs * sizeof(ref) <
|
|
|
170 |
max_size_st_refs
|
|
|
171 |
) {
|
|
|
172 |
ref *end;
|
|
|
173 |
|
|
|
174 |
obj = (ref *) mem->cc.rtop - 1; /* back up over last ref */
|
|
|
175 |
if_debug4('A', "[a%d:+$ ]%s(%u) = 0x%lx\n",
|
|
|
176 |
ialloc_trace_space(mem), client_name_string(cname),
|
|
|
177 |
num_refs, (ulong) obj);
|
|
|
178 |
mem->cc.rcur[-1].o_size += num_refs * sizeof(ref);
|
|
|
179 |
end = (ref *) (mem->cc.rtop = mem->cc.cbot +=
|
|
|
180 |
num_refs * sizeof(ref));
|
|
|
181 |
make_mark(end - 1);
|
|
|
182 |
} else {
|
|
|
183 |
/*
|
|
|
184 |
* Allocate a new run. We have to distinguish 3 cases:
|
|
|
185 |
* - Same chunk: pcc unchanged, end == cc.cbot.
|
|
|
186 |
* - Large chunk: pcc unchanged, end != cc.cbot.
|
|
|
187 |
* - New chunk: pcc changed.
|
|
|
188 |
*/
|
|
|
189 |
chunk_t *pcc = mem->pcc;
|
|
|
190 |
ref *end;
|
|
|
191 |
|
|
|
192 |
obj = gs_alloc_struct_array((gs_memory_t *) mem, num_refs + 1,
|
|
|
193 |
ref, &st_refs, cname);
|
|
|
194 |
if (obj == 0)
|
|
|
195 |
return_error(e_VMerror);
|
|
|
196 |
/* Set the terminating ref now. */
|
|
|
197 |
end = (ref *) obj + num_refs;
|
|
|
198 |
make_mark(end);
|
|
|
199 |
/* Set has_refs in the chunk. */
|
|
|
200 |
if (mem->pcc != pcc || mem->cc.cbot == (byte *) (end + 1)) {
|
|
|
201 |
/* Ordinary chunk. */
|
|
|
202 |
mem->cc.rcur = (obj_header_t *) obj;
|
|
|
203 |
mem->cc.rtop = (byte *) (end + 1);
|
|
|
204 |
mem->cc.has_refs = true;
|
|
|
205 |
} else {
|
|
|
206 |
/* Large chunk. */
|
|
|
207 |
/* This happens only for very large arrays, */
|
|
|
208 |
/* so it doesn't need to be cheap. */
|
|
|
209 |
chunk_locator_t cl;
|
|
|
210 |
|
|
|
211 |
cl.memory = mem;
|
|
|
212 |
cl.cp = mem->clast;
|
|
|
213 |
chunk_locate_ptr(obj, &cl);
|
|
|
214 |
cl.cp->has_refs = true;
|
|
|
215 |
}
|
|
|
216 |
}
|
|
|
217 |
make_array(parr, attrs | mem->space, num_refs, obj);
|
|
|
218 |
return 0;
|
|
|
219 |
}
|
|
|
220 |
|
|
|
221 |
/* Resize an array of refs. Currently this is only implemented */
|
|
|
222 |
/* for shrinking, not for growing. */
|
|
|
223 |
int
|
|
|
224 |
gs_resize_ref_array(gs_ref_memory_t * mem, ref * parr,
|
|
|
225 |
uint new_num_refs, client_name_t cname)
|
|
|
226 |
{
|
|
|
227 |
uint old_num_refs = r_size(parr);
|
|
|
228 |
uint diff;
|
|
|
229 |
ref *obj = parr->value.refs;
|
|
|
230 |
|
|
|
231 |
if (new_num_refs > old_num_refs || !r_has_type(parr, t_array))
|
|
|
232 |
return_error(e_Fatal);
|
|
|
233 |
diff = old_num_refs - new_num_refs;
|
|
|
234 |
/* Check for LIFO. See gs_free_ref_array for more details. */
|
|
|
235 |
if (mem->cc.rtop == mem->cc.cbot &&
|
|
|
236 |
(byte *) (obj + (old_num_refs + 1)) == mem->cc.rtop
|
|
|
237 |
) {
|
|
|
238 |
/* Shorten the refs object. */
|
|
|
239 |
ref *end = (ref *) (mem->cc.cbot = mem->cc.rtop -=
|
|
|
240 |
diff * sizeof(ref));
|
|
|
241 |
|
|
|
242 |
if_debug4('A', "[a%d:<$ ]%s(%u) 0x%lx\n",
|
|
|
243 |
ialloc_trace_space(mem), client_name_string(cname), diff,
|
|
|
244 |
(ulong) obj);
|
|
|
245 |
mem->cc.rcur[-1].o_size -= diff * sizeof(ref);
|
|
|
246 |
make_mark(end - 1);
|
|
|
247 |
} else {
|
|
|
248 |
/* Punt. */
|
|
|
249 |
if_debug4('A', "[a%d:<$#]%s(%u) 0x%lx\n",
|
|
|
250 |
ialloc_trace_space(mem), client_name_string(cname), diff,
|
|
|
251 |
(ulong) obj);
|
|
|
252 |
mem->lost.refs += diff * sizeof(ref);
|
|
|
253 |
}
|
|
|
254 |
r_set_size(parr, new_num_refs);
|
|
|
255 |
return 0;
|
|
|
256 |
}
|
|
|
257 |
|
|
|
258 |
/* Deallocate an array of refs. Only do this if LIFO, or if */
|
|
|
259 |
/* the array occupies an entire chunk by itself. */
|
|
|
260 |
void
|
|
|
261 |
gs_free_ref_array(gs_ref_memory_t * mem, ref * parr, client_name_t cname)
|
|
|
262 |
{
|
|
|
263 |
uint num_refs = r_size(parr);
|
|
|
264 |
ref *obj = parr->value.refs;
|
|
|
265 |
|
|
|
266 |
/*
|
|
|
267 |
* Compute the storage size of the array, and check for LIFO
|
|
|
268 |
* freeing or a separate chunk. Note that the array might be packed;
|
|
|
269 |
* for the moment, if it's anything but a t_array, punt.
|
|
|
270 |
* The +1s are for the extra ref for the GC.
|
|
|
271 |
*/
|
|
|
272 |
if (!r_has_type(parr, t_array))
|
|
|
273 |
DO_NOTHING; /* don't look for special cases */
|
|
|
274 |
else if (mem->cc.rtop == mem->cc.cbot &&
|
|
|
275 |
(byte *) (obj + (num_refs + 1)) == mem->cc.rtop
|
|
|
276 |
) {
|
|
|
277 |
if ((obj_header_t *) obj == mem->cc.rcur) {
|
|
|
278 |
/* Deallocate the entire refs object. */
|
|
|
279 |
gs_free_object((gs_memory_t *) mem, obj, cname);
|
|
|
280 |
mem->cc.rcur = 0;
|
|
|
281 |
mem->cc.rtop = 0;
|
|
|
282 |
} else {
|
|
|
283 |
/* Deallocate it at the end of the refs object. */
|
|
|
284 |
if_debug4('A', "[a%d:-$ ]%s(%u) 0x%lx\n",
|
|
|
285 |
ialloc_trace_space(mem), client_name_string(cname),
|
|
|
286 |
num_refs, (ulong) obj);
|
|
|
287 |
mem->cc.rcur[-1].o_size -= num_refs * sizeof(ref);
|
|
|
288 |
mem->cc.rtop = mem->cc.cbot = (byte *) (obj + 1);
|
|
|
289 |
make_mark(obj);
|
|
|
290 |
}
|
|
|
291 |
return;
|
|
|
292 |
} else if (num_refs >= (mem->large_size / arch_sizeof_ref - 1)) {
|
|
|
293 |
/* See if this array has a chunk all to itself. */
|
|
|
294 |
/* We only make this check when freeing very large objects, */
|
|
|
295 |
/* so it doesn't need to be cheap. */
|
|
|
296 |
chunk_locator_t cl;
|
|
|
297 |
|
|
|
298 |
cl.memory = mem;
|
|
|
299 |
cl.cp = mem->clast;
|
|
|
300 |
if (chunk_locate_ptr(obj, &cl) &&
|
|
|
301 |
obj == (ref *) ((obj_header_t *) (cl.cp->cbase) + 1) &&
|
|
|
302 |
(byte *) (obj + (num_refs + 1)) == cl.cp->cend
|
|
|
303 |
) {
|
|
|
304 |
/* Free the chunk. */
|
|
|
305 |
if_debug4('a', "[a%d:-$L]%s(%u) 0x%lx\n",
|
|
|
306 |
ialloc_trace_space(mem), client_name_string(cname),
|
|
|
307 |
num_refs, (ulong) obj);
|
|
|
308 |
alloc_free_chunk(cl.cp, mem);
|
|
|
309 |
return;
|
|
|
310 |
}
|
|
|
311 |
}
|
|
|
312 |
/* Punt, but fill the array with nulls so that there won't be */
|
|
|
313 |
/* dangling references to confuse the garbage collector. */
|
|
|
314 |
if_debug4('A', "[a%d:-$#]%s(%u) 0x%lx\n",
|
|
|
315 |
ialloc_trace_space(mem), client_name_string(cname), num_refs,
|
|
|
316 |
(ulong) obj);
|
|
|
317 |
{
|
|
|
318 |
uint size;
|
|
|
319 |
|
|
|
320 |
switch (r_type(parr)) {
|
|
|
321 |
case t_shortarray:
|
|
|
322 |
size = num_refs * sizeof(ref_packed);
|
|
|
323 |
break;
|
|
|
324 |
case t_mixedarray:{
|
|
|
325 |
/* We have to parse the array to compute the storage size. */
|
|
|
326 |
uint i = 0;
|
|
|
327 |
const ref_packed *p = parr->value.packed;
|
|
|
328 |
|
|
|
329 |
for (; i < num_refs; ++i)
|
|
|
330 |
p = packed_next(p);
|
|
|
331 |
size = (const byte *)p - (const byte *)parr->value.packed;
|
|
|
332 |
break;
|
|
|
333 |
}
|
|
|
334 |
case t_array:
|
|
|
335 |
size = num_refs * sizeof(ref);
|
|
|
336 |
break;
|
|
|
337 |
default:
|
|
|
338 |
lprintf3("Unknown type 0x%x in free_ref_array(%u,0x%lx)!",
|
|
|
339 |
r_type(parr), num_refs, (ulong) obj);
|
|
|
340 |
return;
|
|
|
341 |
}
|
|
|
342 |
/*
|
|
|
343 |
* If there are any leftover packed elements, we don't
|
|
|
344 |
* worry about them, since they can't be dangling references.
|
|
|
345 |
*/
|
|
|
346 |
refset_null_new(obj, size / sizeof(ref), 0);
|
|
|
347 |
mem->lost.refs += size;
|
|
|
348 |
}
|
|
|
349 |
}
|
|
|
350 |
|
|
|
351 |
/* Allocate a string ref. */
|
|
|
352 |
int
|
|
|
353 |
gs_alloc_string_ref(gs_ref_memory_t * mem, ref * psref,
|
|
|
354 |
uint attrs, uint nbytes, client_name_t cname)
|
|
|
355 |
{
|
|
|
356 |
byte *str = gs_alloc_string((gs_memory_t *) mem, nbytes, cname);
|
|
|
357 |
|
|
|
358 |
if (str == 0)
|
|
|
359 |
return_error(e_VMerror);
|
|
|
360 |
make_string(psref, attrs | mem->space, nbytes, str);
|
|
|
361 |
return 0;
|
|
|
362 |
}
|