Subversion Repositories planix.SVN

Rev

Rev 2 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 - 1
/* Copyright (C) 1993, 2000 Aladdin Enterprises.  All rights reserved.
2
 
3
  This software is provided AS-IS with no warranty, either express or
4
  implied.
5
 
6
  This software is distributed under license and may not be copied,
7
  modified or distributed except as expressly authorized under the terms
8
  of the license contained in the file LICENSE in this distribution.
9
 
10
  For more information about licensing, please refer to
11
  http://www.ghostscript.com/licensing/. For information on
12
  commercial licensing, go to http://www.artifex.com/licensing/ or
13
  contact Artifex Software, Inc., 101 Lucas Valley Road #110,
14
  San Rafael, CA  94903, U.S.A., +1(415)492-9861.
15
*/
16
 
17
/* $Id: isave.c,v 1.14 2005/06/23 07:35:30 igor Exp $ */
18
/* Save/restore manager for Ghostscript interpreter */
19
#include "ghost.h"
20
#include "memory_.h"
21
#include "ierrors.h"
22
#include "gsexit.h"
23
#include "gsstruct.h"
24
#include "stream.h"		/* for linking for forgetsave */
25
#include "iastate.h"
26
#include "inamedef.h"
27
#include "iname.h"
28
#include "ipacked.h"
29
#include "isave.h"
30
#include "isstate.h"
31
#include "store.h"		/* for ref_assign */
32
#include "ivmspace.h"
33
#include "gsutil.h"		/* gs_next_ids prototype */
34
 
35
 
36
/* Structure descriptor */
37
private_st_alloc_save();
38
 
39
/* Define the maximum amount of data we are willing to scan repeatedly -- */
40
/* see below for details. */
41
private const long max_repeated_scan = 100000;
42
 
43
/* Define the minimum space for creating an inner chunk. */
44
/* Must be at least sizeof(chunk_head_t). */
45
private const long min_inner_chunk_space = sizeof(chunk_head_t) + 500;
46
 
47
/*
48
 * The logic for saving and restoring the state is complex.
49
 * Both the changes to individual objects, and the overall state
50
 * of the memory manager, must be saved and restored.
51
 */
52
 
53
/*
54
 * To save the state of the memory manager:
55
 *      Save the state of the current chunk in which we are allocating.
56
 *      Shrink all chunks to their inner unallocated region.
57
 *      Save and reset the free block chains.
58
 * By doing this, we guarantee that no object older than the save
59
 * can be freed.
60
 *
61
 * To restore the state of the memory manager:
62
 *      Free all chunks newer than the save, and the descriptors for
63
 *        the inner chunks created by the save.
64
 *      Make current the chunk that was current at the time of the save.
65
 *      Restore the state of the current chunk.
66
 *
67
 * In addition to save ("start transaction") and restore ("abort transaction"),
68
 * we support forgetting a save ("commit transation").  To forget a save:
69
 *      Reassign to the next outer save all chunks newer than the save.
70
 *      Free the descriptors for the inners chunk, updating their outer
71
 *        chunks to reflect additional allocations in the inner chunks.
72
 *      Concatenate the free block chains with those of the outer save.
73
 */
74
 
75
/*
76
 * For saving changes to individual objects, we add an "attribute" bit
77
 * (l_new) that logically belongs to the slot where the ref is stored,
78
 * not to the ref itself.  The bit means "the contents of this slot
79
 * have been changed, or the slot was allocated, since the last save."
80
 * To keep track of changes since the save, we associate a chain of
81
 * <slot, old_contents> pairs that remembers the old contents of slots.
82
 *
83
 * When creating an object, if the save level is non-zero:
84
 *      Set l_new in all slots.
85
 *
86
 * When storing into a slot, if the save level is non-zero:
87
 *      If l_new isn't set, save the address and contents of the slot
88
 *        on the current contents chain.
89
 *      Set l_new after storing the new value.
90
 *
91
 * To do a save:
92
 *      If the save level is non-zero:
93
 *              Reset l_new in all slots on the contents chain, and in all
94
 *                objects created since the previous save.
95
 *      Push the head of the contents chain, and reset the chain to empty.
96
 *
97
 * To do a restore:
98
 *      Check all the stacks to make sure they don't contain references
99
 *        to objects created since the save.
100
 *      Restore all the slots on the contents chain.
101
 *      Pop the contents chain head.
102
 *      If the save level is now non-zero:
103
 *              Scan the newly restored contents chain, and set l_new in all
104
 *                the slots it references.
105
 *              Scan all objects created since the previous save, and set
106
 *                l_new in all the slots of each object.
107
 *
108
 * To forget a save:
109
 *      If the save level is greater than 1:
110
 *              Set l_new as for a restore, per the next outer save.
111
 *              Concatenate the next outer contents chain to the end of
112
 *                the current one.
113
 *      If the save level is 1:
114
 *              Reset l_new as for a save.
115
 *              Free the contents chain.
116
 */
117
 
118
/*
119
 * A consequence of the foregoing algorithms is that the cost of a save is
120
 * proportional to the total amount of data allocated since the previous
121
 * save.  If a PostScript program reads in a large amount of setup code and
122
 * then uses save/restore heavily, each save/restore will be expensive.  To
123
 * mitigate this, we check to see how much data we have scanned at this save
124
 * level: if it is large, we do a second, invisible save.  This greatly
125
 * reduces the cost of inner saves, at the expense of possibly saving some
126
 * changes twice that otherwise would only have to be saved once.
127
 */
128
 
129
/*
130
 * The presence of global and local VM complicates the situation further.
131
 * There is a separate save chain and contents chain for each VM space.
132
 * When multiple contexts are fully implemented, save and restore will have
133
 * the following effects, according to the privacy status of the current
134
 * context's global and local VM:
135
 *      Private global, private local:
136
 *              The outermost save saves both global and local VM;
137
 *                otherwise, save only saves local VM.
138
 *      Shared global, private local:
139
 *              Save only saves local VM.
140
 *      Shared global, shared local:
141
 *              Save only saves local VM, and suspends all other contexts
142
 *                sharing the same local VM until the matching restore.
143
 * Since we do not currently implement multiple contexts, only the first
144
 * case is relevant.
145
 *
146
 * Note that when saving the contents of a slot, the choice of chain
147
 * is determined by the VM space in which the slot is allocated,
148
 * not by the current allocation mode.
149
 */
150
 
151
/* Tracing printout */
152
private void
153
print_save(const char *str, uint spacen, const alloc_save_t *sav)
154
{
155
  if_debug5('u', "[u]%s space %u 0x%lx: cdata = 0x%lx, id = %lu\n",\
156
	    str, spacen, (ulong)sav, (ulong)sav->client_data, (ulong)sav->id);
157
}
158
 
159
/*
160
 * Structure for saved change chain for save/restore.  Because of the
161
 * garbage collector, we need to distinguish the cases where the change
162
 * is in a static object, a dynamic ref, or a dynamic struct.
163
 */
164
typedef struct alloc_change_s alloc_change_t;
165
struct alloc_change_s {
166
    alloc_change_t *next;
167
    ref_packed *where;
168
    ref contents;
169
#define AC_OFFSET_STATIC (-2)	/* static object */
170
#define AC_OFFSET_REF (-1)	/* dynamic ref */
171
    short offset;		/* if >= 0, offset within struct */
172
};
173
 
174
private 
175
CLEAR_MARKS_PROC(change_clear_marks)
176
{
177
    alloc_change_t *const ptr = (alloc_change_t *)vptr;
178
 
179
    if (r_is_packed(&ptr->contents))
180
	r_clear_pmark((ref_packed *) & ptr->contents);
181
    else
182
	r_clear_attrs(&ptr->contents, l_mark);
183
}
184
private 
185
ENUM_PTRS_WITH(change_enum_ptrs, alloc_change_t *ptr) return 0;
186
ENUM_PTR(0, alloc_change_t, next);
187
case 1:
188
    if (ptr->offset >= 0)
189
	ENUM_RETURN((byte *) ptr->where - ptr->offset);
190
    else
191
	ENUM_RETURN_REF(ptr->where);
192
case 2:
193
    ENUM_RETURN_REF(&ptr->contents);
194
ENUM_PTRS_END
195
private RELOC_PTRS_WITH(change_reloc_ptrs, alloc_change_t *ptr)
196
{
197
    RELOC_VAR(ptr->next);
198
    switch (ptr->offset) {
199
	case AC_OFFSET_STATIC:
200
	    break;
201
	case AC_OFFSET_REF:
202
	    RELOC_REF_PTR_VAR(ptr->where);
203
	    break;
204
	default:
205
	    {
206
		byte *obj = (byte *) ptr->where - ptr->offset;
207
 
208
		RELOC_VAR(obj);
209
		ptr->where = (ref_packed *) (obj + ptr->offset);
210
	    }
211
	    break;
212
    }
213
    if (r_is_packed(&ptr->contents))
214
	r_clear_pmark((ref_packed *) & ptr->contents);
215
    else {
216
	RELOC_REF_VAR(ptr->contents);
217
	r_clear_attrs(&ptr->contents, l_mark);
218
    }
219
}
220
RELOC_PTRS_END
221
gs_private_st_complex_only(st_alloc_change, alloc_change_t, "alloc_change",
222
		change_clear_marks, change_enum_ptrs, change_reloc_ptrs, 0);
223
 
224
/* Debugging printout */
225
#ifdef DEBUG
226
private void
227
alloc_save_print(alloc_change_t * cp, bool print_current)
228
{
229
    dprintf2(" 0x%lx: 0x%lx: ", (ulong) cp, (ulong) cp->where);
230
    if (r_is_packed(&cp->contents)) {
231
	if (print_current)
232
	    dprintf2("saved=%x cur=%x\n", *(ref_packed *) & cp->contents,
233
		     *cp->where);
234
	else
235
	    dprintf1("%x\n", *(ref_packed *) & cp->contents);
236
    } else {
237
	if (print_current)
238
	    dprintf6("saved=%x %x %lx cur=%x %x %lx\n",
239
		     r_type_attrs(&cp->contents), r_size(&cp->contents),
240
		     (ulong) cp->contents.value.intval,
241
		     r_type_attrs((ref *) cp->where),
242
		     r_size((ref *) cp->where),
243
		     (ulong) ((ref *) cp->where)->value.intval);
244
	else
245
	    dprintf3("%x %x %lx\n",
246
		     r_type_attrs(&cp->contents), r_size(&cp->contents),
247
		     (ulong) cp->contents.value.intval);
248
    }
249
}
250
#endif
251
 
252
/* Forward references */
253
private void restore_resources(alloc_save_t *, gs_ref_memory_t *);
254
private void restore_free(gs_ref_memory_t *);
255
private long save_set_new(gs_ref_memory_t *, bool);
256
private void save_set_new_changes(gs_ref_memory_t *, bool);
257
 
258
/* Initialize the save/restore machinery. */
259
void
260
alloc_save_init(gs_dual_memory_t * dmem)
261
{
262
    alloc_set_not_in_save(dmem);
263
}
264
 
265
/* Record that we are in a save. */
266
private void
267
alloc_set_masks(gs_dual_memory_t *dmem, uint new_mask, uint test_mask)
268
{
269
    int i;
270
    gs_ref_memory_t *mem;
271
 
272
    dmem->new_mask = new_mask;
273
    dmem->test_mask = test_mask;
274
    for (i = 0; i < countof(dmem->spaces.memories.indexed); ++i)
275
	if ((mem = dmem->spaces.memories.indexed[i]) != 0) {
276
	    mem->new_mask = new_mask, mem->test_mask = test_mask;
277
	    if (mem->stable_memory != (gs_memory_t *)mem) {
278
		mem = (gs_ref_memory_t *)mem->stable_memory;
279
		mem->new_mask = new_mask, mem->test_mask = test_mask;
280
	    }
281
	}
282
}
283
void
284
alloc_set_in_save(gs_dual_memory_t *dmem)
285
{
286
    alloc_set_masks(dmem, l_new, l_new);
287
}
288
 
289
/* Record that we are not in a save. */
290
void
291
alloc_set_not_in_save(gs_dual_memory_t *dmem)
292
{
293
    alloc_set_masks(dmem, 0, ~0);
294
}
295
 
296
/* Save the state. */
297
private alloc_save_t *alloc_save_space(gs_ref_memory_t *mem,
298
				       gs_dual_memory_t *dmem,
299
				       ulong sid);
300
private void
301
alloc_free_save(gs_ref_memory_t *mem, alloc_save_t *save, const char *scn)
302
{
303
    gs_free_object((gs_memory_t *)mem, save, scn);
304
    /* Free any inner chunk structures.  This is the easiest way to do it. */
305
    restore_free(mem);
306
}
307
ulong
308
alloc_save_state(gs_dual_memory_t * dmem, void *cdata)
309
{
310
    gs_ref_memory_t *lmem = dmem->space_local;
311
    gs_ref_memory_t *gmem = dmem->space_global;
312
    ulong sid = gs_next_ids((const gs_memory_t *)lmem->stable_memory, 2);
313
    bool global =
314
	lmem->save_level == 0 && gmem != lmem &&
315
	gmem->num_contexts == 1;
316
    alloc_save_t *gsave =
317
	(global ? alloc_save_space(gmem, dmem, sid + 1) : (alloc_save_t *) 0);
318
    alloc_save_t *lsave = alloc_save_space(lmem, dmem, sid);
319
 
320
    if (lsave == 0 || (global && gsave == 0)) {
321
	if (lsave != 0)
322
	    alloc_free_save(lmem, lsave, "alloc_save_state(local save)");
323
	if (gsave != 0)
324
	    alloc_free_save(gmem, gsave, "alloc_save_state(global save)");
325
	return 0;
326
    }
327
    if (gsave != 0) {
328
	gsave->client_data = 0;
329
	print_save("save", gmem->space, gsave);
330
	/* Restore names when we do the local restore. */
331
	lsave->restore_names = gsave->restore_names;
332
	gsave->restore_names = false;
333
    }
334
    lsave->id = sid;
335
    lsave->client_data = cdata;
336
    print_save("save", lmem->space, lsave);
337
    /* Reset the l_new attribute in all slots.  The only slots that */
338
    /* can have the attribute set are the ones on the changes chain, */
339
    /* and ones in objects allocated since the last save. */
340
    if (lmem->save_level > 1) {
341
	long scanned = save_set_new(&lsave->state, false);
342
 
343
	if ((lsave->state.total_scanned += scanned) > max_repeated_scan) {
344
	    /* Do a second, invisible save. */
345
	    alloc_save_t *rsave;
346
 
347
	    rsave = alloc_save_space(lmem, dmem, 0L);
348
	    if (rsave != 0) {
349
		rsave->client_data = cdata;
350
#if 0 /* Bug 688153 */
351
		rsave->id = lsave->id;
352
		print_save("save", lmem->space, rsave);
353
		lsave->id = 0;	/* mark as invisible */
354
		rsave->state.save_level--; /* ditto */
355
		lsave->client_data = 0;
356
#else
357
		rsave->id = 0;  /* mark as invisible */
358
		print_save("save", lmem->space, rsave);
359
		rsave->state.save_level--; /* ditto */
360
		rsave->client_data = 0;
361
#endif
362
		/* Inherit the allocated space count -- */
363
		/* we need this for triggering a GC. */
364
		rsave->state.inherited =
365
		    lsave->state.allocated + lsave->state.inherited;
366
		lmem->inherited = rsave->state.inherited;
367
		print_save("save", lmem->space, lsave);
368
	    }
369
	}
370
    }
371
    alloc_set_in_save(dmem);
372
    return sid;
373
}
374
/* Save the state of one space (global or local). */
375
private alloc_save_t *
376
alloc_save_space(gs_ref_memory_t * mem, gs_dual_memory_t * dmem, ulong sid)
377
{
378
    gs_ref_memory_t save_mem;
379
    alloc_save_t *save;
380
    chunk_t *cp;
381
    chunk_t *new_pcc = 0;
382
 
383
    save_mem = *mem;
384
    alloc_close_chunk(mem);
385
    mem->pcc = 0;
386
    gs_memory_status((gs_memory_t *) mem, &mem->previous_status);
387
    ialloc_reset(mem);
388
 
389
    /* Create inner chunks wherever it's worthwhile. */
390
 
391
    for (cp = save_mem.cfirst; cp != 0; cp = cp->cnext) {
392
	if (cp->ctop - cp->cbot > min_inner_chunk_space) {
393
	    /* Create an inner chunk to cover only the unallocated part. */
394
	    chunk_t *inner =
395
		gs_raw_alloc_struct_immovable(mem->non_gc_memory, &st_chunk,
396
					      "alloc_save_space(inner)");
397
 
398
	    if (inner == 0)
399
		break;		/* maybe should fail */
400
	    alloc_init_chunk(inner, cp->cbot, cp->ctop, cp->sreloc != 0, cp);
401
	    alloc_link_chunk(inner, mem);
402
	    if_debug2('u', "[u]inner chunk: cbot=0x%lx ctop=0x%lx\n",
403
		      (ulong) inner->cbot, (ulong) inner->ctop);
404
	    if (cp == save_mem.pcc)
405
		new_pcc = inner;
406
	}
407
    }
408
    mem->pcc = new_pcc;
409
    alloc_open_chunk(mem);
410
 
411
    save = gs_alloc_struct((gs_memory_t *) mem, alloc_save_t,
412
			   &st_alloc_save, "alloc_save_space(save)");
413
    if_debug2('u', "[u]save space %u at 0x%lx\n",
414
	      mem->space, (ulong) save);
415
    if (save == 0) {
416
	/* Free the inner chunk structures.  This is the easiest way. */
417
	restore_free(mem);
418
	*mem = save_mem;
419
	return 0;
420
    }
421
    save->state = save_mem;
422
    save->spaces = dmem->spaces;
423
    save->restore_names = (name_memory(mem) == (gs_memory_t *) mem);
424
    save->is_current = (dmem->current == mem);
425
    save->id = sid;
426
    mem->saved = save;
427
    if_debug2('u', "[u%u]file_save 0x%lx\n",
428
	      mem->space, (ulong) mem->streams);
429
    mem->streams = 0;
430
    mem->total_scanned = 0;
431
    if (sid)
432
	mem->save_level++;
433
    return save;
434
}
435
 
436
/* Record a state change that must be undone for restore, */
437
/* and mark it as having been saved. */
438
int
439
alloc_save_change_in(gs_ref_memory_t *mem, const ref * pcont,
440
		  ref_packed * where, client_name_t cname)
441
{
442
    register alloc_change_t *cp;
443
 
444
    if (mem->new_mask == 0)
445
	return 0;		/* no saving */
446
    cp = gs_alloc_struct((gs_memory_t *)mem, alloc_change_t,
447
			 &st_alloc_change, "alloc_save_change");
448
    if (cp == 0)
449
	return -1;
450
    cp->next = mem->changes;
451
    cp->where = where;
452
    if (pcont == NULL)
453
	cp->offset = AC_OFFSET_STATIC;
454
    else if (r_is_array(pcont) || r_has_type(pcont, t_dictionary))
455
	cp->offset = AC_OFFSET_REF;
456
    else if (r_is_struct(pcont))
457
	cp->offset = (byte *) where - (byte *) pcont->value.pstruct;
458
    else {
459
	lprintf3("Bad type %u for save!  pcont = 0x%lx, where = 0x%lx\n",
460
		 r_type(pcont), (ulong) pcont, (ulong) where);
461
	gs_abort((const gs_memory_t *)mem);
462
    }
463
    if (r_is_packed(where))
464
	*(ref_packed *)&cp->contents = *where;
465
    else {
466
	ref_assign_inline(&cp->contents, (ref *) where);
467
	r_set_attrs((ref *) where, l_new);
468
    }
469
    mem->changes = cp;
470
#ifdef DEBUG
471
    if (gs_debug_c('U')) {
472
	dlprintf1("[U]save(%s)", client_name_string(cname));
473
	alloc_save_print(cp, false);
474
    }
475
#endif
476
    return 0;
477
}
478
int
479
alloc_save_change(gs_dual_memory_t * dmem, const ref * pcont,
480
		  ref_packed * where, client_name_t cname)
481
{
482
    gs_ref_memory_t *mem =
483
	(pcont == NULL ? dmem->space_local :
484
	 dmem->spaces_indexed[r_space(pcont) >> r_space_shift]);
485
 
486
    return alloc_save_change_in(mem, pcont, where, cname);
487
}
488
 
489
/* Return (the id of) the innermost externally visible save object, */
490
/* i.e., the innermost save with a non-zero ID. */
491
ulong
492
alloc_save_current_id(const gs_dual_memory_t * dmem)
493
{
494
    const alloc_save_t *save = dmem->space_local->saved;
495
 
496
    while (save != 0 && save->id == 0)
497
	save = save->state.saved;
498
    return save->id;
499
}
500
alloc_save_t *
501
alloc_save_current(const gs_dual_memory_t * dmem)
502
{
503
    return alloc_find_save(dmem, alloc_save_current_id(dmem));
504
}
505
 
506
/* Test whether a reference would be invalidated by a restore. */
507
bool
508
alloc_is_since_save(const void *vptr, const alloc_save_t * save)
509
{
510
    /* A reference postdates a save iff it is in a chunk allocated */
511
    /* since the save (including any carried-over inner chunks). */
512
 
513
    const char *const ptr = (const char *)vptr;
514
    register const gs_ref_memory_t *mem = save->space_local;
515
 
516
    if_debug2('U', "[U]is_since_save 0x%lx, 0x%lx:\n",
517
	      (ulong) ptr, (ulong) save);
518
    if (mem->saved == 0) {	/* This is a special case, the final 'restore' from */
519
	/* alloc_restore_all. */
520
	return true;
521
    }
522
    /* Check against chunks allocated since the save. */
523
    /* (There may have been intermediate saves as well.) */
524
    for (;; mem = &mem->saved->state) {
525
	const chunk_t *cp;
526
 
527
	if_debug1('U', "[U]checking mem=0x%lx\n", (ulong) mem);
528
	for (cp = mem->cfirst; cp != 0; cp = cp->cnext) {
529
	    if (ptr_is_within_chunk(ptr, cp)) {
530
		if_debug3('U', "[U+]in new chunk 0x%lx: 0x%lx, 0x%lx\n",
531
			  (ulong) cp,
532
			  (ulong) cp->cbase, (ulong) cp->cend);
533
		return true;
534
	    }
535
	    if_debug1('U', "[U-]not in 0x%lx\n", (ulong) cp);
536
	}
537
	if (mem->saved == save) {	/* We've checked all the more recent saves, */
538
	    /* must be OK. */
539
	    break;
540
	}
541
    }
542
 
543
    /*
544
     * If we're about to do a global restore (a restore to the level 0),
545
     * and there is only one context using this global VM
546
     * (the normal case, in which global VM is saved by the
547
     * outermost save), we also have to check the global save.
548
     * Global saves can't be nested, which makes things easy.
549
     */
550
    if (save->state.save_level == 0 /* Restoring to save level 0 - see bug 688157, 688161 */ &&
551
	(mem = save->space_global) != save->space_local &&
552
	save->space_global->num_contexts == 1
553
	) {
554
	const chunk_t *cp;
555
 
556
	if_debug1('U', "[U]checking global mem=0x%lx\n", (ulong) mem);
557
	for (cp = mem->cfirst; cp != 0; cp = cp->cnext)
558
	    if (ptr_is_within_chunk(ptr, cp)) {
559
		if_debug3('U', "[U+]  new chunk 0x%lx: 0x%lx, 0x%lx\n",
560
			  (ulong) cp, (ulong) cp->cbase, (ulong) cp->cend);
561
		return true;
562
	    }
563
    }
564
    return false;
565
 
566
#undef ptr
567
}
568
 
569
/* Test whether a name would be invalidated by a restore. */
570
bool
571
alloc_name_is_since_save(const gs_memory_t *mem,
572
			 const ref * pnref, const alloc_save_t * save)
573
{
574
    const name_string_t *pnstr;
575
 
576
    if (!save->restore_names)
577
	return false;
578
    pnstr = names_string_inline(mem->gs_lib_ctx->gs_name_table, pnref);
579
    if (pnstr->foreign_string)
580
	return false;
581
    return alloc_is_since_save(pnstr->string_bytes, save);
582
}
583
bool
584
alloc_name_index_is_since_save(const gs_memory_t *mem,
585
			       uint nidx, const alloc_save_t *save)
586
{
587
    const name_string_t *pnstr;
588
 
589
    if (!save->restore_names)
590
	return false;
591
    pnstr = names_index_string_inline(mem->gs_lib_ctx->gs_name_table, nidx);
592
    if (pnstr->foreign_string)
593
	return false;
594
    return alloc_is_since_save(pnstr->string_bytes, save);
595
}
596
 
597
/* Check whether any names have been created since a given save */
598
/* that might be released by the restore. */
599
bool
600
alloc_any_names_since_save(const alloc_save_t * save)
601
{
602
    return save->restore_names;
603
}
604
 
605
/* Get the saved state with a given ID. */
606
alloc_save_t *
607
alloc_find_save(const gs_dual_memory_t * dmem, ulong sid)
608
{
609
    alloc_save_t *sprev = dmem->space_local->saved;
610
 
611
    if (sid == 0)
612
	return 0;		/* invalid id */
613
    while (sprev != 0) {
614
	if (sprev->id == sid)
615
	    return sprev;
616
	sprev = sprev->state.saved;
617
    }
618
    return 0;
619
}
620
 
621
/* Get the client data from a saved state. */
622
void *
623
alloc_save_client_data(const alloc_save_t * save)
624
{
625
    return save->client_data;
626
}
627
 
628
/*
629
 * Do one step of restoring the state.  The client is responsible for
630
 * calling alloc_find_save to get the save object, and for ensuring that
631
 * there are no surviving pointers for which alloc_is_since_save is true.
632
 * Return true if the argument was the innermost save, in which case
633
 * this is the last (or only) step.
634
 * Note that "one step" may involve multiple internal steps,
635
 * if this is the outermost restore (which requires restoring both local
636
 * and global VM) or if we created extra save levels to reduce scanning.
637
 */
638
private void restore_finalize(gs_ref_memory_t *);
639
private void restore_space(gs_ref_memory_t *, gs_dual_memory_t *);
640
 
641
bool
642
alloc_restore_step_in(gs_dual_memory_t *dmem, alloc_save_t * save)
643
{
644
    /* Get save->space_* now, because the save object will be freed. */
645
    gs_ref_memory_t *lmem = save->space_local;
646
    gs_ref_memory_t *gmem = save->space_global;
647
    gs_ref_memory_t *mem = lmem;
648
    alloc_save_t *sprev;
649
 
650
    /* Finalize all objects before releasing resources or undoing changes. */
651
    do {
652
	ulong sid;
653
 
654
	sprev = mem->saved;
655
	sid = sprev->id;
656
	restore_finalize(mem);	/* finalize objects */
657
	mem = &sprev->state;
658
	if (sid != 0)
659
	    break;
660
    }
661
    while (sprev != save);
662
    if (mem->save_level == 0) {
663
	/* This is the outermost save, which might also */
664
	/* need to restore global VM. */
665
	mem = gmem;
666
	if (mem != lmem && mem->saved != 0)
667
	    restore_finalize(mem);
668
    }
669
 
670
    /* Do one (externally visible) step of restoring the state. */
671
    mem = lmem;
672
    do {
673
	ulong sid;
674
 
675
	sprev = mem->saved;
676
	sid = sprev->id;
677
	restore_resources(sprev, mem);	/* release other resources */
678
	restore_space(mem, dmem);	/* release memory */
679
	if (sid != 0)
680
	    break;
681
    }
682
    while (sprev != save);
683
 
684
    if (mem->save_level == 0) {
685
	/* This is the outermost save, which might also */
686
	/* need to restore global VM. */
687
	mem = gmem;
688
	if (mem != lmem && mem->saved != 0) {
689
	    restore_resources(mem->saved, mem);
690
	    restore_space(mem, dmem);
691
	}
692
	alloc_set_not_in_save(dmem);
693
    } else {			/* Set the l_new attribute in all slots that are now new. */
694
	save_set_new(mem, true);
695
    }
696
 
697
    return sprev == save;
698
}
699
/* Restore the memory of one space, by undoing changes and freeing */
700
/* memory allocated since the save. */
701
private void
702
restore_space(gs_ref_memory_t * mem, gs_dual_memory_t *dmem)
703
{
704
    alloc_save_t *save = mem->saved;
705
    alloc_save_t saved;
706
 
707
    print_save("restore", mem->space, save);
708
 
709
    /* Undo changes since the save. */
710
    {
711
	register alloc_change_t *cp = mem->changes;
712
 
713
	while (cp) {
714
#ifdef DEBUG
715
	    if (gs_debug_c('U')) {
716
		dlputs("[U]restore");
717
		alloc_save_print(cp, true);
718
	    }
719
#endif
720
	    if (r_is_packed(&cp->contents))
721
		*cp->where = *(ref_packed *) & cp->contents;
722
	    else
723
		ref_assign_inline((ref *) cp->where, &cp->contents);
724
	    cp = cp->next;
725
	}
726
    }
727
 
728
    /* Free memory allocated since the save. */
729
    /* Note that this frees all chunks except the inner ones */
730
    /* belonging to this level. */
731
    saved = *save;
732
    restore_free(mem);
733
 
734
    /* Restore the allocator state. */
735
    {
736
	int num_contexts = mem->num_contexts;	/* don't restore */
737
 
738
	*mem = saved.state;
739
	mem->num_contexts = num_contexts;
740
    }
741
    alloc_open_chunk(mem);
742
 
743
    /* Make the allocator current if it was current before the save. */
744
    if (saved.is_current) {
745
	dmem->current = mem;
746
	dmem->current_space = mem->space;
747
    }
748
}
749
 
750
/* Restore to the initial state, releasing all resources. */
751
/* The allocator is no longer usable after calling this routine! */
752
void
753
alloc_restore_all(gs_dual_memory_t * dmem)
754
{
755
    /*
756
     * Save the memory pointers, since freeing space_local will also
757
     * free dmem itself.
758
     */
759
    gs_ref_memory_t *lmem = dmem->space_local;
760
    gs_ref_memory_t *gmem = dmem->space_global;
761
    gs_ref_memory_t *smem = dmem->space_system;
762
    gs_ref_memory_t *mem;
763
 
764
    /* Restore to a state outside any saves. */
765
    while (lmem->save_level != 0)
766
	discard(alloc_restore_step_in(dmem, lmem->saved));
767
 
768
    /* Finalize memory. */
769
    restore_finalize(lmem);
770
    if ((mem = (gs_ref_memory_t *)lmem->stable_memory) != lmem)
771
	restore_finalize(mem);
772
    if (gmem != lmem && gmem->num_contexts == 1) {
773
	restore_finalize(gmem);
774
	if ((mem = (gs_ref_memory_t *)gmem->stable_memory) != gmem)
775
	    restore_finalize(mem);
776
    }
777
    restore_finalize(smem);
778
 
779
    /* Release resources other than memory, using fake */
780
    /* save and memory objects. */
781
    {
782
	alloc_save_t empty_save;
783
 
784
	empty_save.spaces = dmem->spaces;
785
	empty_save.restore_names = false;	/* don't bother to release */
786
	restore_resources(&empty_save, NULL);
787
    }
788
 
789
    /* Finally, release memory. */
790
    restore_free(lmem);
791
    if ((mem = (gs_ref_memory_t *)lmem->stable_memory) != lmem)
792
	restore_free(mem);
793
    if (gmem != lmem) {
794
	if (!--(gmem->num_contexts)) {
795
	    restore_free(gmem);
796
	    if ((mem = (gs_ref_memory_t *)gmem->stable_memory) != gmem)
797
		restore_free(mem);
798
	}
799
    }
800
    restore_free(smem);
801
 
802
}
803
 
804
/*
805
 * Finalize objects that will be freed by a restore.
806
 * Note that we must temporarily disable the freeing operations
807
 * of the allocator while doing this.
808
 */
809
private void
810
restore_finalize(gs_ref_memory_t * mem)
811
{
812
    chunk_t *cp;
813
 
814
    alloc_close_chunk(mem);
815
    gs_enable_free((gs_memory_t *) mem, false);
816
    for (cp = mem->clast; cp != 0; cp = cp->cprev) {
817
	SCAN_CHUNK_OBJECTS(cp)
818
	    DO_ALL
819
	    struct_proc_finalize((*finalize)) =
820
	    pre->o_type->finalize;
821
	if (finalize != 0) {
822
	    if_debug2('u', "[u]restore finalizing %s 0x%lx\n",
823
		      struct_type_name_string(pre->o_type),
824
		      (ulong) (pre + 1));
825
	    (*finalize) (pre + 1);
826
	}
827
	END_OBJECTS_SCAN
828
    }
829
    gs_enable_free((gs_memory_t *) mem, true);
830
}
831
 
832
/* Release resources for a restore */
833
private void
834
restore_resources(alloc_save_t * sprev, gs_ref_memory_t * mem)
835
{
836
#ifdef DEBUG
837
    if (mem) {
838
	/* Note restoring of the file list. */
839
	if_debug4('u', "[u%u]file_restore 0x%lx => 0x%lx for 0x%lx\n",
840
		  mem->space, (ulong)mem->streams,
841
		  (ulong)sprev->state.streams, (ulong) sprev);
842
    }
843
#endif
844
 
845
    /* Remove entries from font and character caches. */
846
    font_restore(sprev);
847
 
848
    /* Adjust the name table. */
849
    if (sprev->restore_names)
850
	names_restore(mem->gs_lib_ctx->gs_name_table, sprev);
851
}
852
 
853
/* Release memory for a restore. */
854
private void
855
restore_free(gs_ref_memory_t * mem)
856
{
857
    /* Free chunks allocated since the save. */
858
    gs_free_all((gs_memory_t *) mem);
859
}
860
 
861
/* Forget a save, by merging this level with the next outer one. */
862
private void file_forget_save(gs_ref_memory_t *);
863
private void combine_space(gs_ref_memory_t *);
864
private void forget_changes(gs_ref_memory_t *);
865
void
866
alloc_forget_save_in(gs_dual_memory_t *dmem, alloc_save_t * save)
867
{
868
    gs_ref_memory_t *mem = save->space_local;
869
    alloc_save_t *sprev;
870
 
871
    print_save("forget_save", mem->space, save);
872
 
873
    /* Iteratively combine the current level with the previous one. */
874
    do {
875
	sprev = mem->saved;
876
	if (sprev->id != 0)
877
	    mem->save_level--;
878
	if (mem->save_level != 0) {
879
	    alloc_change_t *chp = mem->changes;
880
 
881
	    save_set_new(&sprev->state, true);
882
	    /* Concatenate the changes chains. */
883
	    if (chp == 0)
884
		mem->changes = sprev->state.changes;
885
	    else {
886
		while (chp->next != 0)
887
		    chp = chp->next;
888
		chp->next = sprev->state.changes;
889
	    }
890
	    file_forget_save(mem);
891
	    combine_space(mem);	/* combine memory */
892
	} else {
893
	    forget_changes(mem);
894
	    save_set_new(mem, false);
895
	    file_forget_save(mem);
896
	    combine_space(mem);	/* combine memory */
897
	    /* This is the outermost save, which might also */
898
	    /* need to combine global VM. */
899
	    mem = save->space_global;
900
	    if (mem != save->space_local && mem->saved != 0) {
901
		forget_changes(mem);
902
		save_set_new(mem, false);
903
		file_forget_save(mem);
904
		combine_space(mem);
905
	    }
906
	    alloc_set_not_in_save(dmem);
907
	    break;		/* must be outermost */
908
	}
909
    }
910
    while (sprev != save);
911
}
912
/* Combine the chunks of the next outer level with those of the current one, */
913
/* and free the bookkeeping structures. */
914
private void
915
combine_space(gs_ref_memory_t * mem)
916
{
917
    alloc_save_t *saved = mem->saved;
918
    gs_ref_memory_t *omem = &saved->state;
919
    chunk_t *cp;
920
    chunk_t *csucc;
921
 
922
    alloc_close_chunk(mem);
923
    for (cp = mem->cfirst; cp != 0; cp = csucc) {
924
	csucc = cp->cnext;	/* save before relinking */
925
	if (cp->outer == 0)
926
	    alloc_link_chunk(cp, omem);
927
	else {
928
	    chunk_t *outer = cp->outer;
929
 
930
	    outer->inner_count--;
931
	    if (mem->pcc == cp)
932
		mem->pcc = outer;
933
	    if (mem->cfreed.cp == cp)
934
		mem->cfreed.cp = outer;
935
	    /* "Free" the header of the inner chunk, */
936
	    /* and any immediately preceding gap left by */
937
	    /* the GC having compacted the outer chunk. */
938
	    {
939
		obj_header_t *hp = (obj_header_t *) outer->cbot;
940
 
941
		hp->o_alone = 0;
942
		hp->o_size = (char *)(cp->chead + 1)
943
		    - (char *)(hp + 1);
944
		hp->o_type = &st_bytes;
945
		/* The following call is probably not safe. */
946
#if 0				/* **************** */
947
		gs_free_object((gs_memory_t *) mem,
948
			       hp + 1, "combine_space(header)");
949
#endif /* **************** */
950
	    }
951
	    /* Update the outer chunk's allocation pointers. */
952
	    outer->cbot = cp->cbot;
953
	    outer->rcur = cp->rcur;
954
	    outer->rtop = cp->rtop;
955
	    outer->ctop = cp->ctop;
956
	    outer->has_refs |= cp->has_refs;
957
	    gs_free_object(mem->non_gc_memory, cp,
958
			   "combine_space(inner)");
959
	}
960
    }
961
    /* Update relevant parts of allocator state. */
962
    mem->cfirst = omem->cfirst;
963
    mem->clast = omem->clast;
964
    mem->allocated += omem->allocated;
965
    mem->gc_allocated += omem->allocated;
966
    mem->lost.objects += omem->lost.objects;
967
    mem->lost.refs += omem->lost.refs;
968
    mem->lost.strings += omem->lost.strings;
969
    mem->saved = omem->saved;
970
    mem->previous_status = omem->previous_status;
971
    {				/* Concatenate free lists. */
972
	int i;
973
 
974
	for (i = 0; i < num_freelists; i++) {
975
	    obj_header_t *olist = omem->freelists[i];
976
	    obj_header_t *list = mem->freelists[i];
977
 
978
	    if (olist == 0);
979
	    else if (list == 0)
980
		mem->freelists[i] = olist;
981
	    else {
982
		while (*(obj_header_t **) list != 0)
983
		    list = *(obj_header_t **) list;
984
		*(obj_header_t **) list = olist;
985
	    }
986
	}
987
	if (omem->largest_free_size > mem->largest_free_size)
988
	    mem->largest_free_size = omem->largest_free_size;
989
    }
990
    gs_free_object((gs_memory_t *) mem, saved, "combine_space(saved)");
991
    alloc_open_chunk(mem);
992
}
993
/* Free the changes chain for a level 0 .forgetsave, */
994
/* resetting the l_new flag in the changed refs. */
995
private void
996
forget_changes(gs_ref_memory_t * mem)
997
{
998
    register alloc_change_t *chp = mem->changes;
999
    alloc_change_t *next;
1000
 
1001
    for (; chp; chp = next) {
1002
	ref_packed *prp = chp->where;
1003
 
1004
	if_debug1('U', "[U]forgetting change 0x%lx\n", (ulong) chp);
1005
	if (!r_is_packed(prp))
1006
	    r_clear_attrs((ref *) prp, l_new);
1007
	next = chp->next;
1008
	gs_free_object((gs_memory_t *) mem, chp, "forget_changes");
1009
    }
1010
    mem->changes = 0;
1011
}
1012
/* Update the streams list when forgetting a save. */
1013
private void
1014
file_forget_save(gs_ref_memory_t * mem)
1015
{
1016
    const alloc_save_t *save = mem->saved;
1017
    stream *streams = mem->streams;
1018
    stream *saved_streams = save->state.streams;
1019
 
1020
    if_debug4('u', "[u%d]file_forget_save 0x%lx + 0x%lx for 0x%lx\n",
1021
	      mem->space, (ulong) streams, (ulong) saved_streams,
1022
	      (ulong) save);
1023
    if (streams == 0)
1024
	mem->streams = saved_streams;
1025
    else if (saved_streams != 0) {
1026
	while (streams->next != 0)
1027
	    streams = streams->next;
1028
	streams->next = saved_streams;
1029
	saved_streams->prev = streams;
1030
    }
1031
}
1032
 
1033
/* ------ Internal routines ------ */
1034
 
1035
/* Set or reset the l_new attribute in every relevant slot. */
1036
/* This includes every slot on the current change chain, */
1037
/* and every (ref) slot allocated at this save level. */
1038
/* Return the number of bytes of data scanned. */
1039
private long
1040
save_set_new(gs_ref_memory_t * mem, bool to_new)
1041
{
1042
    long scanned = 0;
1043
 
1044
    /* Handle the change chain. */
1045
    save_set_new_changes(mem, to_new);
1046
 
1047
    /* Handle newly allocated ref objects. */
1048
    SCAN_MEM_CHUNKS(mem, cp) {
1049
	if (cp->has_refs) {
1050
	    bool has_refs = false;
1051
 
1052
	    SCAN_CHUNK_OBJECTS(cp)
1053
		DO_ALL
1054
		if_debug3('U', "[U]set_new scan(0x%lx(%u), %d)\n",
1055
			  (ulong) pre, size, to_new);
1056
	    if (pre->o_type == &st_refs) {
1057
		/* These are refs, scan them. */
1058
		ref_packed *prp = (ref_packed *) (pre + 1);
1059
		ref_packed *next = (ref_packed *) ((char *)prp + size);
1060
#ifdef ALIGNMENT_ALIASING_BUG
1061
		ref *rpref;
1062
# define RP_REF(rp) (rpref = (ref *)rp, rpref)
1063
#else
1064
# define RP_REF(rp) ((ref *)rp)
1065
#endif
1066
 
1067
		if_debug2('U', "[U]refs 0x%lx to 0x%lx\n",
1068
			  (ulong) prp, (ulong) next);
1069
		has_refs = true;
1070
		scanned += size;
1071
		/* We know that every block of refs ends with */
1072
		/* a full-size ref, so we only need the end check */
1073
		/* when we encounter one of those. */
1074
		if (to_new)
1075
		    while (1) {
1076
			if (r_is_packed(prp))
1077
			    prp++;
1078
			else {
1079
			    RP_REF(prp)->tas.type_attrs |= l_new;
1080
			    prp += packed_per_ref;
1081
			    if (prp >= next)
1082
				break;
1083
			}
1084
		} else
1085
		    while (1) {
1086
			if (r_is_packed(prp))
1087
			    prp++;
1088
			else {
1089
			    RP_REF(prp)->tas.type_attrs &= ~l_new;
1090
			    prp += packed_per_ref;
1091
			    if (prp >= next)
1092
				break;
1093
			}
1094
		    }
1095
#undef RP_REF
1096
	    } else
1097
		scanned += sizeof(obj_header_t);
1098
	    END_OBJECTS_SCAN
1099
		cp->has_refs = has_refs;
1100
	}
1101
    }
1102
    END_CHUNKS_SCAN
1103
	if_debug2('u', "[u]set_new (%s) scanned %ld\n",
1104
		  (to_new ? "restore" : "save"), scanned);
1105
    return scanned;
1106
}
1107
 
1108
/* Set or reset the l_new attribute on the changes chain. */
1109
private void
1110
save_set_new_changes(gs_ref_memory_t * mem, bool to_new)
1111
{
1112
    register alloc_change_t *chp = mem->changes;
1113
    register uint new = (to_new ? l_new : 0);
1114
 
1115
    for (; chp; chp = chp->next) {
1116
	ref_packed *prp = chp->where;
1117
 
1118
	if_debug3('U', "[U]set_new 0x%lx: (0x%lx, %d)\n",
1119
		  (ulong)chp, (ulong)prp, new);
1120
	if (!r_is_packed(prp)) {
1121
	    ref *const rp = (ref *) prp;
1122
 
1123
	    rp->tas.type_attrs =
1124
		(rp->tas.type_attrs & ~l_new) + new;
1125
	}
1126
    }
1127
}