2 |
- |
1 |
/* Copyright (C) 1989, 2000, 2001 Aladdin Enterprises. All rights reserved.
|
|
|
2 |
|
|
|
3 |
This software is provided AS-IS with no warranty, either express or
|
|
|
4 |
implied.
|
|
|
5 |
|
|
|
6 |
This software is distributed under license and may not be copied,
|
|
|
7 |
modified or distributed except as expressly authorized under the terms
|
|
|
8 |
of the license contained in the file LICENSE in this distribution.
|
|
|
9 |
|
|
|
10 |
For more information about licensing, please refer to
|
|
|
11 |
http://www.ghostscript.com/licensing/. For information on
|
|
|
12 |
commercial licensing, go to http://www.artifex.com/licensing/ or
|
|
|
13 |
contact Artifex Software, Inc., 101 Lucas Valley Road #110,
|
|
|
14 |
San Rafael, CA 94903, U.S.A., +1(415)492-9861.
|
|
|
15 |
*/
|
|
|
16 |
|
|
|
17 |
/* $Id: interp.c,v 1.20 2004/09/03 20:23:10 ray Exp $ */
|
|
|
18 |
/* Ghostscript language interpreter */
|
|
|
19 |
#include "memory_.h"
|
|
|
20 |
#include "string_.h"
|
|
|
21 |
#include "ghost.h"
|
|
|
22 |
#include "gsstruct.h" /* for iastruct.h */
|
|
|
23 |
#include "stream.h"
|
|
|
24 |
#include "ierrors.h"
|
|
|
25 |
#include "estack.h"
|
|
|
26 |
#include "ialloc.h"
|
|
|
27 |
#include "iastruct.h"
|
|
|
28 |
#include "icontext.h"
|
|
|
29 |
#include "icremap.h"
|
|
|
30 |
#include "idebug.h"
|
|
|
31 |
#include "igstate.h" /* for handling e_RemapColor */
|
|
|
32 |
#include "inamedef.h"
|
|
|
33 |
#include "iname.h" /* for the_name_table */
|
|
|
34 |
#include "interp.h"
|
|
|
35 |
#include "ipacked.h"
|
|
|
36 |
#include "ostack.h" /* must precede iscan.h */
|
|
|
37 |
#include "strimpl.h" /* for sfilter.h */
|
|
|
38 |
#include "sfilter.h" /* for iscan.h */
|
|
|
39 |
#include "iscan.h"
|
|
|
40 |
#include "iddict.h"
|
|
|
41 |
#include "isave.h"
|
|
|
42 |
#include "istack.h"
|
|
|
43 |
#include "itoken.h"
|
|
|
44 |
#include "iutil.h" /* for array_get */
|
|
|
45 |
#include "ivmspace.h"
|
|
|
46 |
#include "dstack.h"
|
|
|
47 |
#include "files.h" /* for file_check_read */
|
|
|
48 |
#include "oper.h"
|
|
|
49 |
#include "store.h"
|
|
|
50 |
#include "gpcheck.h"
|
|
|
51 |
|
|
|
52 |
/*
|
|
|
53 |
* We may or may not optimize the handling of the special fast operators
|
|
|
54 |
* in packed arrays. If we do this, they run much faster when packed, but
|
|
|
55 |
* slightly slower when not packed.
|
|
|
56 |
*/
|
|
|
57 |
#define PACKED_SPECIAL_OPS 1
|
|
|
58 |
|
|
|
59 |
/*
|
|
|
60 |
* Pseudo-operators (procedures of type t_oparray) record
|
|
|
61 |
* the operand and dictionary stack pointers, and restore them if an error
|
|
|
62 |
* occurs during the execution of the procedure and if the procedure hasn't
|
|
|
63 |
* (net) decreased the depth of the stack. While this obviously doesn't
|
|
|
64 |
* do all the work of restoring the state if a pseudo-operator gets an
|
|
|
65 |
* error, it's a big help. The only downside is that pseudo-operators run
|
|
|
66 |
* a little slower.
|
|
|
67 |
*/
|
|
|
68 |
|
|
|
69 |
/* GC descriptors for stacks */
|
|
|
70 |
extern_st(st_ref_stack);
|
|
|
71 |
public_st_dict_stack();
|
|
|
72 |
public_st_exec_stack();
|
|
|
73 |
public_st_op_stack();
|
|
|
74 |
|
|
|
75 |
/*
|
|
|
76 |
* The procedure to call if an operator requests rescheduling.
|
|
|
77 |
* This causes an error unless the context machinery has been installed.
|
|
|
78 |
*/
|
|
|
79 |
private int
|
|
|
80 |
no_reschedule(i_ctx_t **pi_ctx_p)
|
|
|
81 |
{
|
|
|
82 |
return_error(e_invalidcontext);
|
|
|
83 |
}
|
|
|
84 |
int (*gs_interp_reschedule_proc)(i_ctx_t **) = no_reschedule;
|
|
|
85 |
|
|
|
86 |
/*
|
|
|
87 |
* The procedure to call for time-slicing.
|
|
|
88 |
* This is a no-op unless the context machinery has been installed.
|
|
|
89 |
*/
|
|
|
90 |
int (*gs_interp_time_slice_proc)(i_ctx_t **) = 0;
|
|
|
91 |
|
|
|
92 |
/*
|
|
|
93 |
* The number of interpreter "ticks" between calls on the time_slice_proc.
|
|
|
94 |
* Currently, the clock ticks before each operator, and at each
|
|
|
95 |
* procedure return.
|
|
|
96 |
*/
|
|
|
97 |
int gs_interp_time_slice_ticks = 0x7fff;
|
|
|
98 |
|
|
|
99 |
/*
|
|
|
100 |
* Apply an operator. When debugging, we route all operator calls
|
|
|
101 |
* through a procedure.
|
|
|
102 |
*/
|
|
|
103 |
#ifdef DEBUG
|
|
|
104 |
private int
|
|
|
105 |
call_operator(op_proc_t op_proc, i_ctx_t *i_ctx_p)
|
|
|
106 |
{
|
|
|
107 |
int code = op_proc(i_ctx_p);
|
|
|
108 |
|
|
|
109 |
return code;
|
|
|
110 |
}
|
|
|
111 |
#else
|
|
|
112 |
# define call_operator(proc, p) ((*(proc))(p))
|
|
|
113 |
#endif
|
|
|
114 |
|
|
|
115 |
/* Define debugging statistics. */
|
|
|
116 |
#ifdef DEBUG
|
|
|
117 |
struct stats_interp_s {
|
|
|
118 |
long top;
|
|
|
119 |
long lit, lit_array, exec_array, exec_operator, exec_name;
|
|
|
120 |
long x_add, x_def, x_dup, x_exch, x_if, x_ifelse,
|
|
|
121 |
x_index, x_pop, x_roll, x_sub;
|
|
|
122 |
long find_name, name_lit, name_proc, name_oparray, name_operator;
|
|
|
123 |
long p_full, p_exec_operator, p_exec_oparray, p_exec_non_x_operator,
|
|
|
124 |
p_integer, p_lit_name, p_exec_name;
|
|
|
125 |
long p_find_name, p_name_lit, p_name_proc;
|
|
|
126 |
} stats_interp;
|
|
|
127 |
# define INCR(v) (++(stats_interp.v))
|
|
|
128 |
#else
|
|
|
129 |
# define INCR(v) DO_NOTHING
|
|
|
130 |
#endif
|
|
|
131 |
|
|
|
132 |
/* Forward references */
|
|
|
133 |
private int estack_underflow(i_ctx_t *);
|
|
|
134 |
private int interp(i_ctx_t **, const ref *, ref *);
|
|
|
135 |
private int interp_exit(i_ctx_t *);
|
|
|
136 |
private void set_gc_signal(i_ctx_t *, int *, int);
|
|
|
137 |
private int copy_stack(i_ctx_t *, const ref_stack_t *, ref *);
|
|
|
138 |
private int oparray_pop(i_ctx_t *);
|
|
|
139 |
private int oparray_cleanup(i_ctx_t *);
|
|
|
140 |
private int zsetstackprotect(i_ctx_t *);
|
|
|
141 |
private int zcurrentstackprotect(i_ctx_t *);
|
|
|
142 |
|
|
|
143 |
/* Stack sizes */
|
|
|
144 |
|
|
|
145 |
/* The maximum stack sizes may all be set in the makefile. */
|
|
|
146 |
|
|
|
147 |
/*
|
|
|
148 |
* Define the initial maximum size of the operand stack (MaxOpStack
|
|
|
149 |
* user parameter).
|
|
|
150 |
*/
|
|
|
151 |
#ifndef MAX_OSTACK
|
|
|
152 |
# define MAX_OSTACK 800
|
|
|
153 |
#endif
|
|
|
154 |
/*
|
|
|
155 |
* The minimum block size for extending the operand stack is the larger of:
|
|
|
156 |
* - the maximum number of parameters to an operator
|
|
|
157 |
* (currently setcolorscreen, with 12 parameters);
|
|
|
158 |
* - the maximum number of values pushed by an operator
|
|
|
159 |
* (currently setcolortransfer, which calls zcolor_remap_one 4 times
|
|
|
160 |
* and therefore pushes 16 values).
|
|
|
161 |
*/
|
|
|
162 |
#define MIN_BLOCK_OSTACK 16
|
|
|
163 |
const int gs_interp_max_op_num_args = MIN_BLOCK_OSTACK; /* for iinit.c */
|
|
|
164 |
|
|
|
165 |
/*
|
|
|
166 |
* Define the initial maximum size of the execution stack (MaxExecStack
|
|
|
167 |
* user parameter).
|
|
|
168 |
*/
|
|
|
169 |
#ifndef MAX_ESTACK
|
|
|
170 |
# define MAX_ESTACK 5000
|
|
|
171 |
#endif
|
|
|
172 |
/*
|
|
|
173 |
* The minimum block size for extending the execution stack is the largest
|
|
|
174 |
* size of a contiguous block surrounding an e-stack mark. (At least,
|
|
|
175 |
* that's what the minimum value would be if we supported multi-block
|
|
|
176 |
* estacks, which we currently don't.) Currently, the largest such block is
|
|
|
177 |
* the one created for text processing, which is 8 (snumpush) slots.
|
|
|
178 |
*/
|
|
|
179 |
#define MIN_BLOCK_ESTACK 8
|
|
|
180 |
/*
|
|
|
181 |
* If we get an e-stack overflow, we need to cut it back far enough to
|
|
|
182 |
* have some headroom for executing the error procedure.
|
|
|
183 |
*/
|
|
|
184 |
#define ES_HEADROOM 20
|
|
|
185 |
|
|
|
186 |
/*
|
|
|
187 |
* Define the initial maximum size of the dictionary stack (MaxDictStack
|
|
|
188 |
* user parameter). Again, this is also currently the block size for
|
|
|
189 |
* extending the d-stack.
|
|
|
190 |
*/
|
|
|
191 |
#ifndef MAX_DSTACK
|
|
|
192 |
# define MAX_DSTACK 20
|
|
|
193 |
#endif
|
|
|
194 |
/*
|
|
|
195 |
* The minimum block size for extending the dictionary stack is the number
|
|
|
196 |
* of permanent entries on the dictionary stack, currently 3.
|
|
|
197 |
*/
|
|
|
198 |
#define MIN_BLOCK_DSTACK 3
|
|
|
199 |
|
|
|
200 |
/* See estack.h for a description of the execution stack. */
|
|
|
201 |
|
|
|
202 |
/* The logic for managing icount and iref below assumes that */
|
|
|
203 |
/* there are no control operators which pop and then push */
|
|
|
204 |
/* information on the execution stack. */
|
|
|
205 |
|
|
|
206 |
/* Stacks */
|
|
|
207 |
extern_st(st_ref_stack);
|
|
|
208 |
#define OS_GUARD_UNDER 10
|
|
|
209 |
#define OS_GUARD_OVER 10
|
|
|
210 |
#define OS_REFS_SIZE(body_size)\
|
|
|
211 |
(stack_block_refs + OS_GUARD_UNDER + (body_size) + OS_GUARD_OVER)
|
|
|
212 |
|
|
|
213 |
#define ES_GUARD_UNDER 1
|
|
|
214 |
#define ES_GUARD_OVER 10
|
|
|
215 |
#define ES_REFS_SIZE(body_size)\
|
|
|
216 |
(stack_block_refs + ES_GUARD_UNDER + (body_size) + ES_GUARD_OVER)
|
|
|
217 |
|
|
|
218 |
#define DS_REFS_SIZE(body_size)\
|
|
|
219 |
(stack_block_refs + (body_size))
|
|
|
220 |
|
|
|
221 |
/* Extended types. The interpreter may replace the type of operators */
|
|
|
222 |
/* in procedures with these, to speed up the interpretation loop. */
|
|
|
223 |
/****** NOTE: If you add or change entries in this list, */
|
|
|
224 |
/****** you must change the three dispatches in the interpreter loop. */
|
|
|
225 |
/* The operator procedures are declared in opextern.h. */
|
|
|
226 |
#define tx_op t_next_index
|
|
|
227 |
typedef enum {
|
|
|
228 |
tx_op_add = tx_op,
|
|
|
229 |
tx_op_def,
|
|
|
230 |
tx_op_dup,
|
|
|
231 |
tx_op_exch,
|
|
|
232 |
tx_op_if,
|
|
|
233 |
tx_op_ifelse,
|
|
|
234 |
tx_op_index,
|
|
|
235 |
tx_op_pop,
|
|
|
236 |
tx_op_roll,
|
|
|
237 |
tx_op_sub,
|
|
|
238 |
tx_next_op
|
|
|
239 |
} special_op_types;
|
|
|
240 |
|
|
|
241 |
#define num_special_ops ((int)tx_next_op - tx_op)
|
|
|
242 |
const int gs_interp_num_special_ops = num_special_ops; /* for iinit.c */
|
|
|
243 |
const int tx_next_index = tx_next_op;
|
|
|
244 |
|
|
|
245 |
/*
|
|
|
246 |
* Define the interpreter operators, which include the extended-type
|
|
|
247 |
* operators defined in the list above. NOTE: if the size of this table
|
|
|
248 |
* ever exceeds 15 real entries, it will have to be split.
|
|
|
249 |
*/
|
|
|
250 |
const op_def interp_op_defs[] = {
|
|
|
251 |
/*
|
|
|
252 |
* The very first entry, which corresponds to operator index 0,
|
|
|
253 |
* must not contain an actual operator.
|
|
|
254 |
*/
|
|
|
255 |
op_def_begin_dict("systemdict"),
|
|
|
256 |
/*
|
|
|
257 |
* The next entries must be the extended-type operators, in the
|
|
|
258 |
* correct order.
|
|
|
259 |
*/
|
|
|
260 |
{"2add", zadd},
|
|
|
261 |
{"2def", zdef},
|
|
|
262 |
{"1dup", zdup},
|
|
|
263 |
{"2exch", zexch},
|
|
|
264 |
{"2if", zif},
|
|
|
265 |
{"3ifelse", zifelse},
|
|
|
266 |
{"1index", zindex},
|
|
|
267 |
{"1pop", zpop},
|
|
|
268 |
{"2roll", zroll},
|
|
|
269 |
{"2sub", zsub},
|
|
|
270 |
/*
|
|
|
271 |
* The remaining entries are internal operators.
|
|
|
272 |
*/
|
|
|
273 |
{"0.currentstackprotect", zcurrentstackprotect},
|
|
|
274 |
{"1.setstackprotect", zsetstackprotect},
|
|
|
275 |
{"0%interp_exit", interp_exit},
|
|
|
276 |
{"0%oparray_pop", oparray_pop},
|
|
|
277 |
op_def_end(0)
|
|
|
278 |
};
|
|
|
279 |
|
|
|
280 |
#define make_null_proc(pref)\
|
|
|
281 |
make_empty_const_array(pref, a_executable + a_readonly)
|
|
|
282 |
|
|
|
283 |
/* Initialize the interpreter. */
|
|
|
284 |
int
|
|
|
285 |
gs_interp_init(i_ctx_t **pi_ctx_p, const ref *psystem_dict,
|
|
|
286 |
gs_dual_memory_t *dmem)
|
|
|
287 |
{
|
|
|
288 |
/* Create and initialize a context state. */
|
|
|
289 |
gs_context_state_t *pcst = 0;
|
|
|
290 |
int code = context_state_alloc(&pcst, psystem_dict, dmem);
|
|
|
291 |
|
|
|
292 |
if (code >= 0)
|
|
|
293 |
code = context_state_load(pcst);
|
|
|
294 |
if (code < 0)
|
|
|
295 |
lprintf1("Fatal error %d in gs_interp_init!", code);
|
|
|
296 |
*pi_ctx_p = pcst;
|
|
|
297 |
return code;
|
|
|
298 |
}
|
|
|
299 |
/*
|
|
|
300 |
* Create initial stacks for the interpreter.
|
|
|
301 |
* We export this for creating new contexts.
|
|
|
302 |
*/
|
|
|
303 |
int
|
|
|
304 |
gs_interp_alloc_stacks(gs_ref_memory_t *mem, gs_context_state_t * pcst)
|
|
|
305 |
{
|
|
|
306 |
gs_ref_memory_t *smem =
|
|
|
307 |
(gs_ref_memory_t *)gs_memory_stable((gs_memory_t *)mem);
|
|
|
308 |
ref stk;
|
|
|
309 |
|
|
|
310 |
#define REFS_SIZE_OSTACK OS_REFS_SIZE(MAX_OSTACK)
|
|
|
311 |
#define REFS_SIZE_ESTACK ES_REFS_SIZE(MAX_ESTACK)
|
|
|
312 |
#define REFS_SIZE_DSTACK DS_REFS_SIZE(MAX_DSTACK)
|
|
|
313 |
gs_alloc_ref_array(smem, &stk, 0,
|
|
|
314 |
REFS_SIZE_OSTACK + REFS_SIZE_ESTACK +
|
|
|
315 |
REFS_SIZE_DSTACK, "gs_interp_alloc_stacks");
|
|
|
316 |
|
|
|
317 |
{
|
|
|
318 |
ref_stack_t *pos = &pcst->op_stack.stack;
|
|
|
319 |
|
|
|
320 |
r_set_size(&stk, REFS_SIZE_OSTACK);
|
|
|
321 |
ref_stack_init(pos, &stk, OS_GUARD_UNDER, OS_GUARD_OVER, NULL,
|
|
|
322 |
smem, NULL);
|
|
|
323 |
ref_stack_set_error_codes(pos, e_stackunderflow, e_stackoverflow);
|
|
|
324 |
ref_stack_set_max_count(pos, MAX_OSTACK);
|
|
|
325 |
stk.value.refs += REFS_SIZE_OSTACK;
|
|
|
326 |
}
|
|
|
327 |
|
|
|
328 |
{
|
|
|
329 |
ref_stack_t *pes = &pcst->exec_stack.stack;
|
|
|
330 |
ref euop;
|
|
|
331 |
|
|
|
332 |
r_set_size(&stk, REFS_SIZE_ESTACK);
|
|
|
333 |
make_oper(&euop, 0, estack_underflow);
|
|
|
334 |
ref_stack_init(pes, &stk, ES_GUARD_UNDER, ES_GUARD_OVER, &euop,
|
|
|
335 |
smem, NULL);
|
|
|
336 |
ref_stack_set_error_codes(pes, e_ExecStackUnderflow,
|
|
|
337 |
e_execstackoverflow);
|
|
|
338 |
/**************** E-STACK EXPANSION IS NYI. ****************/
|
|
|
339 |
ref_stack_allow_expansion(pes, false);
|
|
|
340 |
ref_stack_set_max_count(pes, MAX_ESTACK);
|
|
|
341 |
stk.value.refs += REFS_SIZE_ESTACK;
|
|
|
342 |
}
|
|
|
343 |
|
|
|
344 |
{
|
|
|
345 |
ref_stack_t *pds = &pcst->dict_stack.stack;
|
|
|
346 |
|
|
|
347 |
r_set_size(&stk, REFS_SIZE_DSTACK);
|
|
|
348 |
ref_stack_init(pds, &stk, 0, 0, NULL, smem, NULL);
|
|
|
349 |
ref_stack_set_error_codes(pds, e_dictstackunderflow,
|
|
|
350 |
e_dictstackoverflow);
|
|
|
351 |
ref_stack_set_max_count(pds, MAX_DSTACK);
|
|
|
352 |
}
|
|
|
353 |
|
|
|
354 |
#undef REFS_SIZE_OSTACK
|
|
|
355 |
#undef REFS_SIZE_ESTACK
|
|
|
356 |
#undef REFS_SIZE_DSTACK
|
|
|
357 |
return 0;
|
|
|
358 |
}
|
|
|
359 |
/*
|
|
|
360 |
* Free the stacks when destroying a context. This is the inverse of
|
|
|
361 |
* create_stacks.
|
|
|
362 |
*/
|
|
|
363 |
void
|
|
|
364 |
gs_interp_free_stacks(gs_ref_memory_t * smem, gs_context_state_t * pcst)
|
|
|
365 |
{
|
|
|
366 |
/* Free the stacks in inverse order of allocation. */
|
|
|
367 |
ref_stack_release(&pcst->dict_stack.stack);
|
|
|
368 |
ref_stack_release(&pcst->exec_stack.stack);
|
|
|
369 |
ref_stack_release(&pcst->op_stack.stack);
|
|
|
370 |
}
|
|
|
371 |
void
|
|
|
372 |
gs_interp_reset(i_ctx_t *i_ctx_p)
|
|
|
373 |
{ /* Reset the stacks. */
|
|
|
374 |
ref_stack_clear(&o_stack);
|
|
|
375 |
ref_stack_clear(&e_stack);
|
|
|
376 |
esp++;
|
|
|
377 |
make_oper(esp, 0, interp_exit);
|
|
|
378 |
ref_stack_pop_to(&d_stack, min_dstack_size);
|
|
|
379 |
dict_set_top();
|
|
|
380 |
}
|
|
|
381 |
/* Report an e-stack block underflow. The bottom guard slots of */
|
|
|
382 |
/* e-stack blocks contain a pointer to this procedure. */
|
|
|
383 |
private int
|
|
|
384 |
estack_underflow(i_ctx_t *i_ctx_p)
|
|
|
385 |
{
|
|
|
386 |
return e_ExecStackUnderflow;
|
|
|
387 |
}
|
|
|
388 |
|
|
|
389 |
/*
|
|
|
390 |
* Create an operator during initialization.
|
|
|
391 |
* If operator is hard-coded into the interpreter,
|
|
|
392 |
* assign it a special type and index.
|
|
|
393 |
*/
|
|
|
394 |
void
|
|
|
395 |
gs_interp_make_oper(ref * opref, op_proc_t proc, int idx)
|
|
|
396 |
{
|
|
|
397 |
int i;
|
|
|
398 |
|
|
|
399 |
for (i = num_special_ops; i > 0 && proc != interp_op_defs[i].proc; --i)
|
|
|
400 |
DO_NOTHING;
|
|
|
401 |
if (i > 0)
|
|
|
402 |
make_tasv(opref, tx_op + (i - 1), a_executable, i, opproc, proc);
|
|
|
403 |
else
|
|
|
404 |
make_tasv(opref, t_operator, a_executable, idx, opproc, proc);
|
|
|
405 |
}
|
|
|
406 |
|
|
|
407 |
/*
|
|
|
408 |
* Call the garbage collector, updating the context pointer properly.
|
|
|
409 |
*/
|
|
|
410 |
int
|
|
|
411 |
interp_reclaim(i_ctx_t **pi_ctx_p, int space)
|
|
|
412 |
{
|
|
|
413 |
i_ctx_t *i_ctx_p = *pi_ctx_p;
|
|
|
414 |
gs_gc_root_t ctx_root;
|
|
|
415 |
int code;
|
|
|
416 |
|
|
|
417 |
gs_register_struct_root(imemory_system, &ctx_root,
|
|
|
418 |
(void **)pi_ctx_p, "interp_reclaim(pi_ctx_p)");
|
|
|
419 |
code = (*idmemory->reclaim)(idmemory, space);
|
|
|
420 |
i_ctx_p = *pi_ctx_p; /* may have moved */
|
|
|
421 |
gs_unregister_root(imemory_system, &ctx_root, "interp_reclaim(pi_ctx_p)");
|
|
|
422 |
return code;
|
|
|
423 |
}
|
|
|
424 |
|
|
|
425 |
/*
|
|
|
426 |
* Invoke the interpreter. If execution completes normally, return 0.
|
|
|
427 |
* If an error occurs, the action depends on user_errors as follows:
|
|
|
428 |
* user_errors < 0: always return an error code.
|
|
|
429 |
* user_errors >= 0: let the PostScript machinery handle all errors.
|
|
|
430 |
* (This will eventually result in a fatal error if no 'stopped'
|
|
|
431 |
* is active.)
|
|
|
432 |
* In case of a quit or a fatal error, also store the exit code.
|
|
|
433 |
* Set *perror_object to null or the error object.
|
|
|
434 |
*/
|
|
|
435 |
private int gs_call_interp(i_ctx_t **, ref *, int, int *, ref *);
|
|
|
436 |
int
|
|
|
437 |
gs_interpret(i_ctx_t **pi_ctx_p, ref * pref, int user_errors, int *pexit_code,
|
|
|
438 |
ref * perror_object)
|
|
|
439 |
{
|
|
|
440 |
i_ctx_t *i_ctx_p = *pi_ctx_p;
|
|
|
441 |
gs_gc_root_t error_root;
|
|
|
442 |
int code;
|
|
|
443 |
|
|
|
444 |
gs_register_ref_root(imemory_system, &error_root,
|
|
|
445 |
(void **)&perror_object, "gs_interpret");
|
|
|
446 |
code = gs_call_interp(pi_ctx_p, pref, user_errors, pexit_code,
|
|
|
447 |
perror_object);
|
|
|
448 |
i_ctx_p = *pi_ctx_p;
|
|
|
449 |
gs_unregister_root(imemory_system, &error_root, "gs_interpret");
|
|
|
450 |
/* Avoid a dangling reference to a stack-allocated GC signal. */
|
|
|
451 |
set_gc_signal(i_ctx_p, NULL, 0);
|
|
|
452 |
return code;
|
|
|
453 |
}
|
|
|
454 |
private int
|
|
|
455 |
gs_call_interp(i_ctx_t **pi_ctx_p, ref * pref, int user_errors,
|
|
|
456 |
int *pexit_code, ref * perror_object)
|
|
|
457 |
{
|
|
|
458 |
ref *epref = pref;
|
|
|
459 |
ref doref;
|
|
|
460 |
ref *perrordict;
|
|
|
461 |
ref error_name;
|
|
|
462 |
int code, ccode;
|
|
|
463 |
ref saref;
|
|
|
464 |
int gc_signal = 0;
|
|
|
465 |
i_ctx_t *i_ctx_p = *pi_ctx_p;
|
|
|
466 |
|
|
|
467 |
*pexit_code = 0;
|
|
|
468 |
ialloc_reset_requested(idmemory);
|
|
|
469 |
again:
|
|
|
470 |
/* Avoid a dangling error object that might get traced by a future GC. */
|
|
|
471 |
make_null(perror_object);
|
|
|
472 |
o_stack.requested = e_stack.requested = d_stack.requested = 0;
|
|
|
473 |
while (gc_signal) { /* Some routine below triggered a GC. */
|
|
|
474 |
gs_gc_root_t epref_root;
|
|
|
475 |
|
|
|
476 |
gc_signal = 0;
|
|
|
477 |
/* Make sure that doref will get relocated properly if */
|
|
|
478 |
/* a garbage collection happens with epref == &doref. */
|
|
|
479 |
gs_register_ref_root(imemory_system, &epref_root,
|
|
|
480 |
(void **)&epref, "gs_call_interp(epref)");
|
|
|
481 |
code = interp_reclaim(pi_ctx_p, -1);
|
|
|
482 |
i_ctx_p = *pi_ctx_p;
|
|
|
483 |
gs_unregister_root(imemory_system, &epref_root,
|
|
|
484 |
"gs_call_interp(epref)");
|
|
|
485 |
if (code < 0)
|
|
|
486 |
return code;
|
|
|
487 |
}
|
|
|
488 |
code = interp(pi_ctx_p, epref, perror_object);
|
|
|
489 |
i_ctx_p = *pi_ctx_p;
|
|
|
490 |
/* Prevent a dangling reference to the GC signal in ticks_left */
|
|
|
491 |
/* in the frame of interp, but be prepared to do a GC if */
|
|
|
492 |
/* an allocation in this routine asks for it. */
|
|
|
493 |
set_gc_signal(i_ctx_p, &gc_signal, 1);
|
|
|
494 |
if (esp < esbot) /* popped guard entry */
|
|
|
495 |
esp = esbot;
|
|
|
496 |
switch (code) {
|
|
|
497 |
case e_Fatal:
|
|
|
498 |
*pexit_code = 255;
|
|
|
499 |
return code;
|
|
|
500 |
case e_Quit:
|
|
|
501 |
*perror_object = osp[-1];
|
|
|
502 |
*pexit_code = code = osp->value.intval;
|
|
|
503 |
osp -= 2;
|
|
|
504 |
return
|
|
|
505 |
(code == 0 ? e_Quit :
|
|
|
506 |
code < 0 && code > -100 ? code : e_Fatal);
|
|
|
507 |
case e_InterpreterExit:
|
|
|
508 |
return 0;
|
|
|
509 |
case e_ExecStackUnderflow:
|
|
|
510 |
/****** WRONG -- must keep mark blocks intact ******/
|
|
|
511 |
ref_stack_pop_block(&e_stack);
|
|
|
512 |
doref = *perror_object;
|
|
|
513 |
epref = &doref;
|
|
|
514 |
goto again;
|
|
|
515 |
case e_VMreclaim:
|
|
|
516 |
/* Do the GC and continue. */
|
|
|
517 |
code = interp_reclaim(pi_ctx_p,
|
|
|
518 |
(osp->value.intval == 2 ?
|
|
|
519 |
avm_global : avm_local));
|
|
|
520 |
i_ctx_p = *pi_ctx_p;
|
|
|
521 |
/****** What if code < 0? ******/
|
|
|
522 |
make_oper(&doref, 0, zpop);
|
|
|
523 |
epref = &doref;
|
|
|
524 |
goto again;
|
|
|
525 |
case e_NeedInput:
|
|
|
526 |
case e_NeedStdin:
|
|
|
527 |
case e_NeedStdout:
|
|
|
528 |
case e_NeedStderr:
|
|
|
529 |
return code;
|
|
|
530 |
}
|
|
|
531 |
/* Adjust osp in case of operand stack underflow */
|
|
|
532 |
if (osp < osbot - 1)
|
|
|
533 |
osp = osbot - 1;
|
|
|
534 |
/* We have to handle stack over/underflow specially, because */
|
|
|
535 |
/* we might be able to recover by adding or removing a block. */
|
|
|
536 |
switch (code) {
|
|
|
537 |
case e_dictstackoverflow:
|
|
|
538 |
if (ref_stack_extend(&d_stack, d_stack.requested) >= 0) {
|
|
|
539 |
dict_set_top();
|
|
|
540 |
doref = *perror_object;
|
|
|
541 |
epref = &doref;
|
|
|
542 |
goto again;
|
|
|
543 |
}
|
|
|
544 |
if (osp >= ostop) {
|
|
|
545 |
if ((ccode = ref_stack_extend(&o_stack, 1)) < 0)
|
|
|
546 |
return ccode;
|
|
|
547 |
}
|
|
|
548 |
ccode = copy_stack(i_ctx_p, &d_stack, &saref);
|
|
|
549 |
if (ccode < 0)
|
|
|
550 |
return ccode;
|
|
|
551 |
ref_stack_pop_to(&d_stack, min_dstack_size);
|
|
|
552 |
dict_set_top();
|
|
|
553 |
*++osp = saref;
|
|
|
554 |
break;
|
|
|
555 |
case e_dictstackunderflow:
|
|
|
556 |
if (ref_stack_pop_block(&d_stack) >= 0) {
|
|
|
557 |
dict_set_top();
|
|
|
558 |
doref = *perror_object;
|
|
|
559 |
epref = &doref;
|
|
|
560 |
goto again;
|
|
|
561 |
}
|
|
|
562 |
break;
|
|
|
563 |
case e_execstackoverflow:
|
|
|
564 |
/* We don't have to handle this specially: */
|
|
|
565 |
/* The only places that could generate it */
|
|
|
566 |
/* use check_estack, which does a ref_stack_extend, */
|
|
|
567 |
/* so if we get this error, it's a real one. */
|
|
|
568 |
if (osp >= ostop) {
|
|
|
569 |
if ((ccode = ref_stack_extend(&o_stack, 1)) < 0)
|
|
|
570 |
return ccode;
|
|
|
571 |
}
|
|
|
572 |
ccode = copy_stack(i_ctx_p, &e_stack, &saref);
|
|
|
573 |
if (ccode < 0)
|
|
|
574 |
return ccode;
|
|
|
575 |
{
|
|
|
576 |
uint count = ref_stack_count(&e_stack);
|
|
|
577 |
uint limit = ref_stack_max_count(&e_stack) - ES_HEADROOM;
|
|
|
578 |
|
|
|
579 |
if (count > limit) {
|
|
|
580 |
/*
|
|
|
581 |
* If there is an e-stack mark within MIN_BLOCK_ESTACK of
|
|
|
582 |
* the new top, cut the stack back to remove the mark.
|
|
|
583 |
*/
|
|
|
584 |
int skip = count - limit;
|
|
|
585 |
int i;
|
|
|
586 |
|
|
|
587 |
for (i = skip; i < skip + MIN_BLOCK_ESTACK; ++i) {
|
|
|
588 |
const ref *ep = ref_stack_index(&e_stack, i);
|
|
|
589 |
|
|
|
590 |
if (r_has_type_attrs(ep, t_null, a_executable)) {
|
|
|
591 |
skip = i + 1;
|
|
|
592 |
break;
|
|
|
593 |
}
|
|
|
594 |
}
|
|
|
595 |
pop_estack(i_ctx_p, skip);
|
|
|
596 |
}
|
|
|
597 |
}
|
|
|
598 |
*++osp = saref;
|
|
|
599 |
break;
|
|
|
600 |
case e_stackoverflow:
|
|
|
601 |
if (ref_stack_extend(&o_stack, o_stack.requested) >= 0) { /* We can't just re-execute the object, because */
|
|
|
602 |
/* it might be a procedure being pushed as a */
|
|
|
603 |
/* literal. We check for this case specially. */
|
|
|
604 |
doref = *perror_object;
|
|
|
605 |
if (r_is_proc(&doref)) {
|
|
|
606 |
*++osp = doref;
|
|
|
607 |
make_null_proc(&doref);
|
|
|
608 |
}
|
|
|
609 |
epref = &doref;
|
|
|
610 |
goto again;
|
|
|
611 |
}
|
|
|
612 |
ccode = copy_stack(i_ctx_p, &o_stack, &saref);
|
|
|
613 |
if (ccode < 0)
|
|
|
614 |
return ccode;
|
|
|
615 |
ref_stack_clear(&o_stack);
|
|
|
616 |
*++osp = saref;
|
|
|
617 |
break;
|
|
|
618 |
case e_stackunderflow:
|
|
|
619 |
if (ref_stack_pop_block(&o_stack) >= 0) {
|
|
|
620 |
doref = *perror_object;
|
|
|
621 |
epref = &doref;
|
|
|
622 |
goto again;
|
|
|
623 |
}
|
|
|
624 |
break;
|
|
|
625 |
}
|
|
|
626 |
if (user_errors < 0)
|
|
|
627 |
return code;
|
|
|
628 |
if (gs_errorname(i_ctx_p, code, &error_name) < 0)
|
|
|
629 |
return code; /* out-of-range error code! */
|
|
|
630 |
if (dict_find_string(systemdict, "errordict", &perrordict) <= 0 ||
|
|
|
631 |
dict_find(perrordict, &error_name, &epref) <= 0
|
|
|
632 |
)
|
|
|
633 |
return code; /* error name not in errordict??? */
|
|
|
634 |
doref = *epref;
|
|
|
635 |
epref = &doref;
|
|
|
636 |
/* Push the error object on the operand stack if appropriate. */
|
|
|
637 |
if (!ERROR_IS_INTERRUPT(code))
|
|
|
638 |
*++osp = *perror_object;
|
|
|
639 |
goto again;
|
|
|
640 |
}
|
|
|
641 |
private int
|
|
|
642 |
interp_exit(i_ctx_t *i_ctx_p)
|
|
|
643 |
{
|
|
|
644 |
return e_InterpreterExit;
|
|
|
645 |
}
|
|
|
646 |
|
|
|
647 |
/* Set the GC signal for all VMs. */
|
|
|
648 |
private void
|
|
|
649 |
set_gc_signal(i_ctx_t *i_ctx_p, int *psignal, int value)
|
|
|
650 |
{
|
|
|
651 |
gs_memory_gc_status_t stat;
|
|
|
652 |
int i;
|
|
|
653 |
|
|
|
654 |
for (i = 0; i < countof(idmemory->spaces_indexed); i++) {
|
|
|
655 |
gs_ref_memory_t *mem = idmemory->spaces_indexed[i];
|
|
|
656 |
gs_ref_memory_t *mem_stable;
|
|
|
657 |
|
|
|
658 |
if (mem == 0)
|
|
|
659 |
continue;
|
|
|
660 |
for (;; mem = mem_stable) {
|
|
|
661 |
mem_stable = (gs_ref_memory_t *)
|
|
|
662 |
gs_memory_stable((gs_memory_t *)mem);
|
|
|
663 |
gs_memory_gc_status(mem, &stat);
|
|
|
664 |
stat.psignal = psignal;
|
|
|
665 |
stat.signal_value = value;
|
|
|
666 |
gs_memory_set_gc_status(mem, &stat);
|
|
|
667 |
if (mem_stable == mem)
|
|
|
668 |
break;
|
|
|
669 |
}
|
|
|
670 |
}
|
|
|
671 |
}
|
|
|
672 |
|
|
|
673 |
/* Copy the contents of an overflowed stack into a (local) array. */
|
|
|
674 |
private int
|
|
|
675 |
copy_stack(i_ctx_t *i_ctx_p, const ref_stack_t * pstack, ref * arr)
|
|
|
676 |
{
|
|
|
677 |
uint size = ref_stack_count(pstack);
|
|
|
678 |
uint save_space = ialloc_space(idmemory);
|
|
|
679 |
int code;
|
|
|
680 |
|
|
|
681 |
ialloc_set_space(idmemory, avm_local);
|
|
|
682 |
code = ialloc_ref_array(arr, a_all, size, "copy_stack");
|
|
|
683 |
if (code >= 0)
|
|
|
684 |
code = ref_stack_store(pstack, arr, size, 0, 1, true, idmemory,
|
|
|
685 |
"copy_stack");
|
|
|
686 |
ialloc_set_space(idmemory, save_space);
|
|
|
687 |
return code;
|
|
|
688 |
}
|
|
|
689 |
|
|
|
690 |
/* Get the name corresponding to an error number. */
|
|
|
691 |
int
|
|
|
692 |
gs_errorname(i_ctx_t *i_ctx_p, int code, ref * perror_name)
|
|
|
693 |
{
|
|
|
694 |
ref *perrordict, *pErrorNames;
|
|
|
695 |
|
|
|
696 |
if (dict_find_string(systemdict, "errordict", &perrordict) <= 0 ||
|
|
|
697 |
dict_find_string(systemdict, "ErrorNames", &pErrorNames) <= 0
|
|
|
698 |
)
|
|
|
699 |
return_error(e_undefined); /* errordict or ErrorNames not found?! */
|
|
|
700 |
return array_get(imemory, pErrorNames, (long)(-code - 1), perror_name);
|
|
|
701 |
}
|
|
|
702 |
|
|
|
703 |
/* Store an error string in $error.errorinfo. */
|
|
|
704 |
/* This routine is here because of the proximity to the error handler. */
|
|
|
705 |
int
|
|
|
706 |
gs_errorinfo_put_string(i_ctx_t *i_ctx_p, const char *str)
|
|
|
707 |
{
|
|
|
708 |
ref rstr;
|
|
|
709 |
ref *pderror;
|
|
|
710 |
int code = string_to_ref(str, &rstr, iimemory, "gs_errorinfo_put_string");
|
|
|
711 |
|
|
|
712 |
if (code < 0)
|
|
|
713 |
return code;
|
|
|
714 |
if (dict_find_string(systemdict, "$error", &pderror) <= 0 ||
|
|
|
715 |
!r_has_type(pderror, t_dictionary) ||
|
|
|
716 |
idict_put_string(pderror, "errorinfo", &rstr) < 0
|
|
|
717 |
)
|
|
|
718 |
return_error(e_Fatal);
|
|
|
719 |
return 0;
|
|
|
720 |
}
|
|
|
721 |
|
|
|
722 |
/* Main interpreter. */
|
|
|
723 |
/* If execution terminates normally, return e_InterpreterExit. */
|
|
|
724 |
/* If an error occurs, leave the current object in *perror_object */
|
|
|
725 |
/* and return a (negative) error code. */
|
|
|
726 |
private int
|
|
|
727 |
interp(i_ctx_t **pi_ctx_p /* context for execution, updated if resched */,
|
|
|
728 |
const ref * pref /* object to interpret */,
|
|
|
729 |
ref * perror_object)
|
|
|
730 |
{
|
|
|
731 |
i_ctx_t *i_ctx_p = *pi_ctx_p;
|
|
|
732 |
/*
|
|
|
733 |
* Note that iref may actually be either a ref * or a ref_packed *.
|
|
|
734 |
* Certain DEC compilers assume that a ref * is ref-aligned even if it
|
|
|
735 |
* is cast to a short *, and generate code on this assumption, leading
|
|
|
736 |
* to "unaligned access" errors. For this reason, we declare
|
|
|
737 |
* iref_packed, and use a macro to cast it to the more aligned type
|
|
|
738 |
* where necessary (which is almost everywhere it is used). This may
|
|
|
739 |
* lead to compiler warnings about "cast increases alignment
|
|
|
740 |
* requirements", but this is less harmful than expensive traps at run
|
|
|
741 |
* time.
|
|
|
742 |
*/
|
|
|
743 |
register const ref_packed *iref_packed = (const ref_packed *)pref;
|
|
|
744 |
/*
|
|
|
745 |
* To make matters worse, some versions of gcc/egcs have a bug that
|
|
|
746 |
* leads them to assume that if iref_packed is EVER cast to a ref *,
|
|
|
747 |
* it is ALWAYS ref-aligned. We detect this in stdpre.h and provide
|
|
|
748 |
* the following workaround:
|
|
|
749 |
*/
|
|
|
750 |
#ifdef ALIGNMENT_ALIASING_BUG
|
|
|
751 |
const ref *iref_temp;
|
|
|
752 |
# define IREF (iref_temp = (const ref *)iref_packed, iref_temp)
|
|
|
753 |
#else
|
|
|
754 |
# define IREF ((const ref *)iref_packed)
|
|
|
755 |
#endif
|
|
|
756 |
#define SET_IREF(rp) (iref_packed = (const ref_packed *)(rp))
|
|
|
757 |
register int icount = 0; /* # of consecutive tokens at iref */
|
|
|
758 |
register os_ptr iosp = osp; /* private copy of osp */
|
|
|
759 |
register es_ptr iesp = esp; /* private copy of esp */
|
|
|
760 |
int code;
|
|
|
761 |
ref token; /* token read from file or string, */
|
|
|
762 |
/* must be declared in this scope */
|
|
|
763 |
register const ref *pvalue;
|
|
|
764 |
os_ptr whichp;
|
|
|
765 |
|
|
|
766 |
/*
|
|
|
767 |
* We have to make the error information into a struct;
|
|
|
768 |
* otherwise, the Watcom compiler will assign it to registers
|
|
|
769 |
* strictly on the basis of textual frequency.
|
|
|
770 |
* We also have to use ref_assign_inline everywhere, and
|
|
|
771 |
* avoid direct assignments of refs, so that esi and edi
|
|
|
772 |
* will remain available on Intel processors.
|
|
|
773 |
*/
|
|
|
774 |
struct interp_error_s {
|
|
|
775 |
int code;
|
|
|
776 |
int line;
|
|
|
777 |
const ref *obj;
|
|
|
778 |
ref full;
|
|
|
779 |
} ierror;
|
|
|
780 |
|
|
|
781 |
/*
|
|
|
782 |
* Get a pointer to the name table so that we can use the
|
|
|
783 |
* inline version of name_index_ref.
|
|
|
784 |
*/
|
|
|
785 |
const name_table *const int_nt = imemory->gs_lib_ctx->gs_name_table;
|
|
|
786 |
|
|
|
787 |
#define set_error(ecode)\
|
|
|
788 |
{ ierror.code = ecode; ierror.line = __LINE__; }
|
|
|
789 |
#define return_with_error(ecode, objp)\
|
|
|
790 |
{ set_error(ecode); ierror.obj = objp; goto rwe; }
|
|
|
791 |
#define return_with_error_iref(ecode)\
|
|
|
792 |
{ set_error(ecode); goto rwei; }
|
|
|
793 |
#define return_with_code_iref()\
|
|
|
794 |
{ ierror.line = __LINE__; goto rweci; }
|
|
|
795 |
#define return_with_error_code_op(nargs)\
|
|
|
796 |
return_with_code_iref()
|
|
|
797 |
#define return_with_stackoverflow(objp)\
|
|
|
798 |
{ o_stack.requested = 1; return_with_error(e_stackoverflow, objp); }
|
|
|
799 |
#define return_with_stackoverflow_iref()\
|
|
|
800 |
{ o_stack.requested = 1; return_with_error_iref(e_stackoverflow); }
|
|
|
801 |
int ticks_left = gs_interp_time_slice_ticks;
|
|
|
802 |
|
|
|
803 |
/*
|
|
|
804 |
* If we exceed the VMThreshold, set ticks_left to -100
|
|
|
805 |
* to alert the interpreter that we need to garbage collect.
|
|
|
806 |
*/
|
|
|
807 |
set_gc_signal(i_ctx_p, &ticks_left, -100);
|
|
|
808 |
|
|
|
809 |
esfile_clear_cache();
|
|
|
810 |
/*
|
|
|
811 |
* From here on, if icount > 0, iref and icount correspond
|
|
|
812 |
* to the top entry on the execution stack: icount is the count
|
|
|
813 |
* of sequential entries remaining AFTER the current one.
|
|
|
814 |
*/
|
|
|
815 |
#define IREF_NEXT(ip)\
|
|
|
816 |
((const ref_packed *)((const ref *)(ip) + 1))
|
|
|
817 |
#define IREF_NEXT_EITHER(ip)\
|
|
|
818 |
( r_is_packed(ip) ? (ip) + 1 : IREF_NEXT(ip) )
|
|
|
819 |
#define store_state(ep)\
|
|
|
820 |
( icount > 0 ? (ep->value.const_refs = IREF + 1, r_set_size(ep, icount)) : 0 )
|
|
|
821 |
#define store_state_short(ep)\
|
|
|
822 |
( icount > 0 ? (ep->value.packed = iref_packed + 1, r_set_size(ep, icount)) : 0 )
|
|
|
823 |
#define store_state_either(ep)\
|
|
|
824 |
( icount > 0 ? (ep->value.packed = IREF_NEXT_EITHER(iref_packed), r_set_size(ep, icount)) : 0 )
|
|
|
825 |
#define next()\
|
|
|
826 |
if ( --icount > 0 ) { iref_packed = IREF_NEXT(iref_packed); goto top; } else goto out
|
|
|
827 |
#define next_short()\
|
|
|
828 |
if ( --icount <= 0 ) { if ( icount < 0 ) goto up; iesp--; }\
|
|
|
829 |
++iref_packed; goto top
|
|
|
830 |
#define next_either()\
|
|
|
831 |
if ( --icount <= 0 ) { if ( icount < 0 ) goto up; iesp--; }\
|
|
|
832 |
iref_packed = IREF_NEXT_EITHER(iref_packed); goto top
|
|
|
833 |
|
|
|
834 |
#if !PACKED_SPECIAL_OPS
|
|
|
835 |
# undef next_either
|
|
|
836 |
# define next_either() next()
|
|
|
837 |
# undef store_state_either
|
|
|
838 |
# define store_state_either(ep) store_state(ep)
|
|
|
839 |
#endif
|
|
|
840 |
|
|
|
841 |
/* We want to recognize executable arrays here, */
|
|
|
842 |
/* so we push the argument on the estack and enter */
|
|
|
843 |
/* the loop at the bottom. */
|
|
|
844 |
if (iesp >= estop)
|
|
|
845 |
return_with_error(e_execstackoverflow, pref);
|
|
|
846 |
++iesp;
|
|
|
847 |
ref_assign_inline(iesp, pref);
|
|
|
848 |
goto bot;
|
|
|
849 |
top:
|
|
|
850 |
/*
|
|
|
851 |
* This is the top of the interpreter loop.
|
|
|
852 |
* iref points to the ref being interpreted.
|
|
|
853 |
* Note that this might be an element of a packed array,
|
|
|
854 |
* not a real ref: we carefully arranged the first 16 bits of
|
|
|
855 |
* a ref and of a packed array element so they could be distinguished
|
|
|
856 |
* from each other. (See ghost.h and packed.h for more detail.)
|
|
|
857 |
*/
|
|
|
858 |
INCR(top);
|
|
|
859 |
#ifdef DEBUG
|
|
|
860 |
/* Do a little validation on the top o-stack entry. */
|
|
|
861 |
if (iosp >= osbot &&
|
|
|
862 |
(r_type(iosp) == t__invalid || r_type(iosp) >= tx_next_op)
|
|
|
863 |
) {
|
|
|
864 |
lprintf("Invalid value on o-stack!\n");
|
|
|
865 |
return_with_error_iref(e_Fatal);
|
|
|
866 |
}
|
|
|
867 |
if (gs_debug['I'] ||
|
|
|
868 |
(gs_debug['i'] &&
|
|
|
869 |
(r_is_packed(iref_packed) ?
|
|
|
870 |
r_packed_is_name(iref_packed) :
|
|
|
871 |
r_has_type(IREF, t_name)))
|
|
|
872 |
) {
|
|
|
873 |
os_ptr save_osp = osp; /* avoid side-effects */
|
|
|
874 |
es_ptr save_esp = esp;
|
|
|
875 |
|
|
|
876 |
osp = iosp;
|
|
|
877 |
esp = iesp;
|
|
|
878 |
dlprintf5("d%u,e%u<%u>0x%lx(%d): ",
|
|
|
879 |
ref_stack_count(&d_stack), ref_stack_count(&e_stack),
|
|
|
880 |
ref_stack_count(&o_stack), (ulong)IREF, icount);
|
|
|
881 |
debug_print_ref(imemory, IREF);
|
|
|
882 |
if (iosp >= osbot) {
|
|
|
883 |
dputs(" // ");
|
|
|
884 |
debug_print_ref(imemory, iosp);
|
|
|
885 |
}
|
|
|
886 |
dputc('\n');
|
|
|
887 |
osp = save_osp;
|
|
|
888 |
esp = save_esp;
|
|
|
889 |
fflush(dstderr);
|
|
|
890 |
}
|
|
|
891 |
#endif
|
|
|
892 |
/* Objects that have attributes (arrays, dictionaries, files, and strings) */
|
|
|
893 |
/* use lit and exec; other objects use plain and plain_exec. */
|
|
|
894 |
#define lit(t) type_xe_value(t, a_execute)
|
|
|
895 |
#define exec(t) type_xe_value(t, a_execute + a_executable)
|
|
|
896 |
#define nox(t) type_xe_value(t, 0)
|
|
|
897 |
#define nox_exec(t) type_xe_value(t, a_executable)
|
|
|
898 |
#define plain(t) type_xe_value(t, 0)
|
|
|
899 |
#define plain_exec(t) type_xe_value(t, a_executable)
|
|
|
900 |
/*
|
|
|
901 |
* We have to populate enough cases of the switch statement to force
|
|
|
902 |
* some compilers to use a dispatch rather than a testing loop.
|
|
|
903 |
* What a nuisance!
|
|
|
904 |
*/
|
|
|
905 |
switch (r_type_xe(iref_packed)) {
|
|
|
906 |
/* Access errors. */
|
|
|
907 |
#define cases_invalid()\
|
|
|
908 |
case plain(t__invalid): case plain_exec(t__invalid)
|
|
|
909 |
cases_invalid():
|
|
|
910 |
return_with_error_iref(e_Fatal);
|
|
|
911 |
#define cases_nox()\
|
|
|
912 |
case nox_exec(t_array): case nox_exec(t_dictionary):\
|
|
|
913 |
case nox_exec(t_file): case nox_exec(t_string):\
|
|
|
914 |
case nox_exec(t_mixedarray): case nox_exec(t_shortarray)
|
|
|
915 |
cases_nox():
|
|
|
916 |
return_with_error_iref(e_invalidaccess);
|
|
|
917 |
/*
|
|
|
918 |
* Literal objects. We have to enumerate all the types.
|
|
|
919 |
* In fact, we have to include some extra plain_exec entries
|
|
|
920 |
* just to populate the switch. We break them up into groups
|
|
|
921 |
* to avoid overflowing some preprocessors.
|
|
|
922 |
*/
|
|
|
923 |
#define cases_lit_1()\
|
|
|
924 |
case lit(t_array): case nox(t_array):\
|
|
|
925 |
case plain(t_boolean): case plain_exec(t_boolean):\
|
|
|
926 |
case lit(t_dictionary): case nox(t_dictionary)
|
|
|
927 |
#define cases_lit_2()\
|
|
|
928 |
case lit(t_file): case nox(t_file):\
|
|
|
929 |
case plain(t_fontID): case plain_exec(t_fontID):\
|
|
|
930 |
case plain(t_integer): case plain_exec(t_integer):\
|
|
|
931 |
case plain(t_mark): case plain_exec(t_mark)
|
|
|
932 |
#define cases_lit_3()\
|
|
|
933 |
case plain(t_name):\
|
|
|
934 |
case plain(t_null):\
|
|
|
935 |
case plain(t_oparray):\
|
|
|
936 |
case plain(t_operator)
|
|
|
937 |
#define cases_lit_4()\
|
|
|
938 |
case plain(t_real): case plain_exec(t_real):\
|
|
|
939 |
case plain(t_save): case plain_exec(t_save):\
|
|
|
940 |
case lit(t_string): case nox(t_string)
|
|
|
941 |
#define cases_lit_5()\
|
|
|
942 |
case lit(t_mixedarray): case nox(t_mixedarray):\
|
|
|
943 |
case lit(t_shortarray): case nox(t_shortarray):\
|
|
|
944 |
case plain(t_device): case plain_exec(t_device):\
|
|
|
945 |
case plain(t_struct): case plain_exec(t_struct):\
|
|
|
946 |
case plain(t_astruct): case plain_exec(t_astruct)
|
|
|
947 |
/* Executable arrays are treated as literals in direct execution. */
|
|
|
948 |
#define cases_lit_array()\
|
|
|
949 |
case exec(t_array): case exec(t_mixedarray): case exec(t_shortarray)
|
|
|
950 |
cases_lit_1():
|
|
|
951 |
cases_lit_2():
|
|
|
952 |
cases_lit_3():
|
|
|
953 |
cases_lit_4():
|
|
|
954 |
cases_lit_5():
|
|
|
955 |
INCR(lit);
|
|
|
956 |
break;
|
|
|
957 |
cases_lit_array():
|
|
|
958 |
INCR(lit_array);
|
|
|
959 |
break;
|
|
|
960 |
/* Special operators. */
|
|
|
961 |
case plain_exec(tx_op_add):
|
|
|
962 |
x_add: INCR(x_add);
|
|
|
963 |
if ((code = zop_add(iosp)) < 0)
|
|
|
964 |
return_with_error_code_op(2);
|
|
|
965 |
iosp--;
|
|
|
966 |
next_either();
|
|
|
967 |
case plain_exec(tx_op_def):
|
|
|
968 |
x_def: INCR(x_def);
|
|
|
969 |
osp = iosp; /* sync o_stack */
|
|
|
970 |
if ((code = zop_def(i_ctx_p)) < 0)
|
|
|
971 |
return_with_error_code_op(2);
|
|
|
972 |
iosp -= 2;
|
|
|
973 |
next_either();
|
|
|
974 |
case plain_exec(tx_op_dup):
|
|
|
975 |
x_dup: INCR(x_dup);
|
|
|
976 |
if (iosp < osbot)
|
|
|
977 |
return_with_error_iref(e_stackunderflow);
|
|
|
978 |
if (iosp >= ostop)
|
|
|
979 |
return_with_stackoverflow_iref();
|
|
|
980 |
iosp++;
|
|
|
981 |
ref_assign_inline(iosp, iosp - 1);
|
|
|
982 |
next_either();
|
|
|
983 |
case plain_exec(tx_op_exch):
|
|
|
984 |
x_exch: INCR(x_exch);
|
|
|
985 |
if (iosp <= osbot)
|
|
|
986 |
return_with_error_iref(e_stackunderflow);
|
|
|
987 |
ref_assign_inline(&token, iosp);
|
|
|
988 |
ref_assign_inline(iosp, iosp - 1);
|
|
|
989 |
ref_assign_inline(iosp - 1, &token);
|
|
|
990 |
next_either();
|
|
|
991 |
case plain_exec(tx_op_if):
|
|
|
992 |
x_if: INCR(x_if);
|
|
|
993 |
if (!r_has_type(iosp - 1, t_boolean))
|
|
|
994 |
return_with_error_iref((iosp <= osbot ?
|
|
|
995 |
e_stackunderflow : e_typecheck));
|
|
|
996 |
if (!r_is_proc(iosp))
|
|
|
997 |
return_with_error_iref(check_proc_failed(iosp));
|
|
|
998 |
if (!iosp[-1].value.boolval) {
|
|
|
999 |
iosp -= 2;
|
|
|
1000 |
next_either();
|
|
|
1001 |
}
|
|
|
1002 |
if (iesp >= estop)
|
|
|
1003 |
return_with_error_iref(e_execstackoverflow);
|
|
|
1004 |
store_state_either(iesp);
|
|
|
1005 |
whichp = iosp;
|
|
|
1006 |
iosp -= 2;
|
|
|
1007 |
goto ifup;
|
|
|
1008 |
case plain_exec(tx_op_ifelse):
|
|
|
1009 |
x_ifelse: INCR(x_ifelse);
|
|
|
1010 |
if (!r_has_type(iosp - 2, t_boolean))
|
|
|
1011 |
return_with_error_iref((iosp < osbot + 2 ?
|
|
|
1012 |
e_stackunderflow : e_typecheck));
|
|
|
1013 |
if (!r_is_proc(iosp - 1))
|
|
|
1014 |
return_with_error_iref(check_proc_failed(iosp - 1));
|
|
|
1015 |
if (!r_is_proc(iosp))
|
|
|
1016 |
return_with_error_iref(check_proc_failed(iosp));
|
|
|
1017 |
if (iesp >= estop)
|
|
|
1018 |
return_with_error_iref(e_execstackoverflow);
|
|
|
1019 |
store_state_either(iesp);
|
|
|
1020 |
whichp = (iosp[-2].value.boolval ? iosp - 1 : iosp);
|
|
|
1021 |
iosp -= 3;
|
|
|
1022 |
/* Open code "up" for the array case(s) */
|
|
|
1023 |
ifup:if ((icount = r_size(whichp) - 1) <= 0) {
|
|
|
1024 |
if (icount < 0)
|
|
|
1025 |
goto up; /* 0-element proc */
|
|
|
1026 |
SET_IREF(whichp->value.refs); /* 1-element proc */
|
|
|
1027 |
if (--ticks_left > 0)
|
|
|
1028 |
goto top;
|
|
|
1029 |
}
|
|
|
1030 |
++iesp;
|
|
|
1031 |
/* Do a ref_assign, but also set iref. */
|
|
|
1032 |
iesp->tas = whichp->tas;
|
|
|
1033 |
SET_IREF(iesp->value.refs = whichp->value.refs);
|
|
|
1034 |
if (--ticks_left > 0)
|
|
|
1035 |
goto top;
|
|
|
1036 |
goto slice;
|
|
|
1037 |
case plain_exec(tx_op_index):
|
|
|
1038 |
x_index: INCR(x_index);
|
|
|
1039 |
osp = iosp; /* zindex references o_stack */
|
|
|
1040 |
if ((code = zindex(i_ctx_p)) < 0)
|
|
|
1041 |
return_with_error_code_op(1);
|
|
|
1042 |
next_either();
|
|
|
1043 |
case plain_exec(tx_op_pop):
|
|
|
1044 |
x_pop: INCR(x_pop);
|
|
|
1045 |
if (iosp < osbot)
|
|
|
1046 |
return_with_error_iref(e_stackunderflow);
|
|
|
1047 |
iosp--;
|
|
|
1048 |
next_either();
|
|
|
1049 |
case plain_exec(tx_op_roll):
|
|
|
1050 |
x_roll: INCR(x_roll);
|
|
|
1051 |
osp = iosp; /* zroll references o_stack */
|
|
|
1052 |
if ((code = zroll(i_ctx_p)) < 0)
|
|
|
1053 |
return_with_error_code_op(2);
|
|
|
1054 |
iosp -= 2;
|
|
|
1055 |
next_either();
|
|
|
1056 |
case plain_exec(tx_op_sub):
|
|
|
1057 |
x_sub: INCR(x_sub);
|
|
|
1058 |
if ((code = zop_sub(iosp)) < 0)
|
|
|
1059 |
return_with_error_code_op(2);
|
|
|
1060 |
iosp--;
|
|
|
1061 |
next_either();
|
|
|
1062 |
/* Executable types. */
|
|
|
1063 |
case plain_exec(t_null):
|
|
|
1064 |
goto bot;
|
|
|
1065 |
case plain_exec(t_oparray):
|
|
|
1066 |
/* Replace with the definition and go again. */
|
|
|
1067 |
INCR(exec_array);
|
|
|
1068 |
pvalue = IREF->value.const_refs;
|
|
|
1069 |
opst: /* Prepare to call a t_oparray procedure in *pvalue. */
|
|
|
1070 |
store_state(iesp);
|
|
|
1071 |
oppr: /* Record the stack depths in case of failure. */
|
|
|
1072 |
if (iesp >= estop - 3)
|
|
|
1073 |
return_with_error_iref(e_execstackoverflow);
|
|
|
1074 |
iesp += 4;
|
|
|
1075 |
osp = iosp; /* ref_stack_count_inline needs this */
|
|
|
1076 |
make_mark_estack(iesp - 3, es_other, oparray_cleanup);
|
|
|
1077 |
make_int(iesp - 2, ref_stack_count_inline(&o_stack));
|
|
|
1078 |
make_int(iesp - 1, ref_stack_count_inline(&d_stack));
|
|
|
1079 |
make_op_estack(iesp, oparray_pop);
|
|
|
1080 |
goto pr;
|
|
|
1081 |
prst: /* Prepare to call the procedure (array) in *pvalue. */
|
|
|
1082 |
store_state(iesp);
|
|
|
1083 |
pr: /* Call the array in *pvalue. State has been stored. */
|
|
|
1084 |
if ((icount = r_size(pvalue) - 1) <= 0) {
|
|
|
1085 |
if (icount < 0)
|
|
|
1086 |
goto up; /* 0-element proc */
|
|
|
1087 |
SET_IREF(pvalue->value.refs); /* 1-element proc */
|
|
|
1088 |
if (--ticks_left > 0)
|
|
|
1089 |
goto top;
|
|
|
1090 |
}
|
|
|
1091 |
if (iesp >= estop)
|
|
|
1092 |
return_with_error_iref(e_execstackoverflow);
|
|
|
1093 |
++iesp;
|
|
|
1094 |
/* Do a ref_assign, but also set iref. */
|
|
|
1095 |
iesp->tas = pvalue->tas;
|
|
|
1096 |
SET_IREF(iesp->value.refs = pvalue->value.refs);
|
|
|
1097 |
if (--ticks_left > 0)
|
|
|
1098 |
goto top;
|
|
|
1099 |
goto slice;
|
|
|
1100 |
case plain_exec(t_operator):
|
|
|
1101 |
INCR(exec_operator);
|
|
|
1102 |
if (--ticks_left <= 0) { /* The following doesn't work, */
|
|
|
1103 |
/* and I can't figure out why. */
|
|
|
1104 |
/****** goto sst; ******/
|
|
|
1105 |
}
|
|
|
1106 |
esp = iesp; /* save for operator */
|
|
|
1107 |
osp = iosp; /* ditto */
|
|
|
1108 |
/* Operator routines take osp as an argument. */
|
|
|
1109 |
/* This is just a convenience, since they adjust */
|
|
|
1110 |
/* osp themselves to reflect the results. */
|
|
|
1111 |
/* Operators that (net) push information on the */
|
|
|
1112 |
/* operand stack must check for overflow: */
|
|
|
1113 |
/* this normally happens automatically through */
|
|
|
1114 |
/* the push macro (in oper.h). */
|
|
|
1115 |
/* Operators that do not typecheck their operands, */
|
|
|
1116 |
/* or take a variable number of arguments, */
|
|
|
1117 |
/* must check explicitly for stack underflow. */
|
|
|
1118 |
/* (See oper.h for more detail.) */
|
|
|
1119 |
/* Note that each case must set iosp = osp: */
|
|
|
1120 |
/* this is so we can switch on code without having to */
|
|
|
1121 |
/* store it and reload it (for dumb compilers). */
|
|
|
1122 |
switch (code = call_operator(real_opproc(IREF), i_ctx_p)) {
|
|
|
1123 |
case 0: /* normal case */
|
|
|
1124 |
case 1: /* alternative success case */
|
|
|
1125 |
iosp = osp;
|
|
|
1126 |
next();
|
|
|
1127 |
case o_push_estack: /* store the state and go to up */
|
|
|
1128 |
store_state(iesp);
|
|
|
1129 |
opush:iosp = osp;
|
|
|
1130 |
iesp = esp;
|
|
|
1131 |
if (--ticks_left > 0)
|
|
|
1132 |
goto up;
|
|
|
1133 |
goto slice;
|
|
|
1134 |
case o_pop_estack: /* just go to up */
|
|
|
1135 |
opop:iosp = osp;
|
|
|
1136 |
if (esp == iesp)
|
|
|
1137 |
goto bot;
|
|
|
1138 |
iesp = esp;
|
|
|
1139 |
goto up;
|
|
|
1140 |
case o_reschedule:
|
|
|
1141 |
store_state(iesp);
|
|
|
1142 |
goto res;
|
|
|
1143 |
case e_RemapColor:
|
|
|
1144 |
oe_remap: store_state(iesp);
|
|
|
1145 |
remap: if (iesp + 2 >= estop) {
|
|
|
1146 |
esp = iesp;
|
|
|
1147 |
code = ref_stack_extend(&e_stack, 2);
|
|
|
1148 |
if (code < 0)
|
|
|
1149 |
return_with_error_iref(code);
|
|
|
1150 |
iesp = esp;
|
|
|
1151 |
}
|
|
|
1152 |
packed_get(imemory, iref_packed, iesp + 1);
|
|
|
1153 |
make_oper(iesp + 2, 0,
|
|
|
1154 |
r_ptr(&istate->remap_color_info,
|
|
|
1155 |
int_remap_color_info_t)->proc);
|
|
|
1156 |
iesp += 2;
|
|
|
1157 |
goto up;
|
|
|
1158 |
}
|
|
|
1159 |
iosp = osp;
|
|
|
1160 |
iesp = esp;
|
|
|
1161 |
return_with_code_iref();
|
|
|
1162 |
case plain_exec(t_name):
|
|
|
1163 |
INCR(exec_name);
|
|
|
1164 |
pvalue = IREF->value.pname->pvalue;
|
|
|
1165 |
if (!pv_valid(pvalue)) {
|
|
|
1166 |
uint nidx = names_index(int_nt, IREF);
|
|
|
1167 |
uint htemp;
|
|
|
1168 |
|
|
|
1169 |
INCR(find_name);
|
|
|
1170 |
if ((pvalue = dict_find_name_by_index_inline(nidx, htemp)) == 0)
|
|
|
1171 |
return_with_error_iref(e_undefined);
|
|
|
1172 |
}
|
|
|
1173 |
/* Dispatch on the type of the value. */
|
|
|
1174 |
/* Again, we have to over-populate the switch. */
|
|
|
1175 |
switch (r_type_xe(pvalue)) {
|
|
|
1176 |
cases_invalid():
|
|
|
1177 |
return_with_error_iref(e_Fatal);
|
|
|
1178 |
cases_nox(): /* access errors */
|
|
|
1179 |
return_with_error_iref(e_invalidaccess);
|
|
|
1180 |
cases_lit_1():
|
|
|
1181 |
cases_lit_2():
|
|
|
1182 |
cases_lit_3():
|
|
|
1183 |
cases_lit_4():
|
|
|
1184 |
cases_lit_5():
|
|
|
1185 |
INCR(name_lit);
|
|
|
1186 |
/* Just push the value */
|
|
|
1187 |
if (iosp >= ostop)
|
|
|
1188 |
return_with_stackoverflow(pvalue);
|
|
|
1189 |
++iosp;
|
|
|
1190 |
ref_assign_inline(iosp, pvalue);
|
|
|
1191 |
next();
|
|
|
1192 |
case exec(t_array):
|
|
|
1193 |
case exec(t_mixedarray):
|
|
|
1194 |
case exec(t_shortarray):
|
|
|
1195 |
INCR(name_proc);
|
|
|
1196 |
/* This is an executable procedure, execute it. */
|
|
|
1197 |
goto prst;
|
|
|
1198 |
case plain_exec(tx_op_add):
|
|
|
1199 |
goto x_add;
|
|
|
1200 |
case plain_exec(tx_op_def):
|
|
|
1201 |
goto x_def;
|
|
|
1202 |
case plain_exec(tx_op_dup):
|
|
|
1203 |
goto x_dup;
|
|
|
1204 |
case plain_exec(tx_op_exch):
|
|
|
1205 |
goto x_exch;
|
|
|
1206 |
case plain_exec(tx_op_if):
|
|
|
1207 |
goto x_if;
|
|
|
1208 |
case plain_exec(tx_op_ifelse):
|
|
|
1209 |
goto x_ifelse;
|
|
|
1210 |
case plain_exec(tx_op_index):
|
|
|
1211 |
goto x_index;
|
|
|
1212 |
case plain_exec(tx_op_pop):
|
|
|
1213 |
goto x_pop;
|
|
|
1214 |
case plain_exec(tx_op_roll):
|
|
|
1215 |
goto x_roll;
|
|
|
1216 |
case plain_exec(tx_op_sub):
|
|
|
1217 |
goto x_sub;
|
|
|
1218 |
case plain_exec(t_null):
|
|
|
1219 |
goto bot;
|
|
|
1220 |
case plain_exec(t_oparray):
|
|
|
1221 |
INCR(name_oparray);
|
|
|
1222 |
pvalue = (const ref *)pvalue->value.const_refs;
|
|
|
1223 |
goto opst;
|
|
|
1224 |
case plain_exec(t_operator):
|
|
|
1225 |
INCR(name_operator);
|
|
|
1226 |
{ /* Shortcut for operators. */
|
|
|
1227 |
/* See above for the logic. */
|
|
|
1228 |
if (--ticks_left <= 0) { /* The following doesn't work, */
|
|
|
1229 |
/* and I can't figure out why. */
|
|
|
1230 |
/****** goto sst; ******/
|
|
|
1231 |
}
|
|
|
1232 |
esp = iesp;
|
|
|
1233 |
osp = iosp;
|
|
|
1234 |
switch (code = call_operator(real_opproc(pvalue),
|
|
|
1235 |
i_ctx_p)
|
|
|
1236 |
) {
|
|
|
1237 |
case 0: /* normal case */
|
|
|
1238 |
case 1: /* alternative success case */
|
|
|
1239 |
iosp = osp;
|
|
|
1240 |
next();
|
|
|
1241 |
case o_push_estack:
|
|
|
1242 |
store_state(iesp);
|
|
|
1243 |
goto opush;
|
|
|
1244 |
case o_pop_estack:
|
|
|
1245 |
goto opop;
|
|
|
1246 |
case o_reschedule:
|
|
|
1247 |
store_state(iesp);
|
|
|
1248 |
goto res;
|
|
|
1249 |
case e_RemapColor:
|
|
|
1250 |
goto oe_remap;
|
|
|
1251 |
}
|
|
|
1252 |
iosp = osp;
|
|
|
1253 |
iesp = esp;
|
|
|
1254 |
return_with_error(code, pvalue);
|
|
|
1255 |
}
|
|
|
1256 |
case plain_exec(t_name):
|
|
|
1257 |
case exec(t_file):
|
|
|
1258 |
case exec(t_string):
|
|
|
1259 |
default:
|
|
|
1260 |
/* Not a procedure, reinterpret it. */
|
|
|
1261 |
store_state(iesp);
|
|
|
1262 |
icount = 0;
|
|
|
1263 |
SET_IREF(pvalue);
|
|
|
1264 |
goto top;
|
|
|
1265 |
}
|
|
|
1266 |
case exec(t_file):
|
|
|
1267 |
{ /* Executable file. Read the next token and interpret it. */
|
|
|
1268 |
stream *s;
|
|
|
1269 |
scanner_state sstate;
|
|
|
1270 |
|
|
|
1271 |
check_read_known_file(s, IREF, return_with_error_iref);
|
|
|
1272 |
rt:
|
|
|
1273 |
if (iosp >= ostop) /* check early */
|
|
|
1274 |
return_with_stackoverflow_iref();
|
|
|
1275 |
osp = iosp; /* scan_token uses ostack */
|
|
|
1276 |
scanner_state_init_options(&sstate, i_ctx_p->scanner_options);
|
|
|
1277 |
again:
|
|
|
1278 |
code = scan_token(i_ctx_p, s, &token, &sstate);
|
|
|
1279 |
iosp = osp; /* ditto */
|
|
|
1280 |
switch (code) {
|
|
|
1281 |
case 0: /* read a token */
|
|
|
1282 |
/* It's worth checking for literals, which make up */
|
|
|
1283 |
/* the majority of input tokens, before storing the */
|
|
|
1284 |
/* state on the e-stack. Note that because of //, */
|
|
|
1285 |
/* the token may have *any* type and attributes. */
|
|
|
1286 |
/* Note also that executable arrays aren't executed */
|
|
|
1287 |
/* at the top level -- they're treated as literals. */
|
|
|
1288 |
if (!r_has_attr(&token, a_executable) ||
|
|
|
1289 |
r_is_array(&token)
|
|
|
1290 |
) { /* If scan_token used the o-stack, */
|
|
|
1291 |
/* we know we can do a push now; if not, */
|
|
|
1292 |
/* the pre-check is still valid. */
|
|
|
1293 |
iosp++;
|
|
|
1294 |
ref_assign_inline(iosp, &token);
|
|
|
1295 |
goto rt;
|
|
|
1296 |
}
|
|
|
1297 |
store_state(iesp);
|
|
|
1298 |
/* Push the file on the e-stack */
|
|
|
1299 |
if (iesp >= estop)
|
|
|
1300 |
return_with_error_iref(e_execstackoverflow);
|
|
|
1301 |
esfile_set_cache(++iesp);
|
|
|
1302 |
ref_assign_inline(iesp, IREF);
|
|
|
1303 |
SET_IREF(&token);
|
|
|
1304 |
icount = 0;
|
|
|
1305 |
goto top;
|
|
|
1306 |
case e_undefined: /* //name undefined */
|
|
|
1307 |
return_with_error(code, &token);
|
|
|
1308 |
case scan_EOF: /* end of file */
|
|
|
1309 |
esfile_clear_cache();
|
|
|
1310 |
goto bot;
|
|
|
1311 |
case scan_BOS:
|
|
|
1312 |
/* Binary object sequences */
|
|
|
1313 |
/* ARE executed at the top level. */
|
|
|
1314 |
store_state(iesp);
|
|
|
1315 |
/* Push the file on the e-stack */
|
|
|
1316 |
if (iesp >= estop)
|
|
|
1317 |
return_with_error_iref(e_execstackoverflow);
|
|
|
1318 |
esfile_set_cache(++iesp);
|
|
|
1319 |
ref_assign_inline(iesp, IREF);
|
|
|
1320 |
pvalue = &token;
|
|
|
1321 |
goto pr;
|
|
|
1322 |
case scan_Refill:
|
|
|
1323 |
store_state(iesp);
|
|
|
1324 |
/* iref may point into the exec stack; */
|
|
|
1325 |
/* save its referent now. */
|
|
|
1326 |
ref_assign_inline(&token, IREF);
|
|
|
1327 |
/* Push the file on the e-stack */
|
|
|
1328 |
if (iesp >= estop)
|
|
|
1329 |
return_with_error_iref(e_execstackoverflow);
|
|
|
1330 |
++iesp;
|
|
|
1331 |
ref_assign_inline(iesp, &token);
|
|
|
1332 |
esp = iesp;
|
|
|
1333 |
osp = iosp;
|
|
|
1334 |
code = scan_handle_refill(i_ctx_p, &token, &sstate,
|
|
|
1335 |
true, true,
|
|
|
1336 |
ztokenexec_continue);
|
|
|
1337 |
scan_cont:
|
|
|
1338 |
iosp = osp;
|
|
|
1339 |
iesp = esp;
|
|
|
1340 |
switch (code) {
|
|
|
1341 |
case 0:
|
|
|
1342 |
iesp--; /* don't push the file */
|
|
|
1343 |
goto again; /* stacks are unchanged */
|
|
|
1344 |
case o_push_estack:
|
|
|
1345 |
esfile_clear_cache();
|
|
|
1346 |
if (--ticks_left > 0)
|
|
|
1347 |
goto up;
|
|
|
1348 |
goto slice;
|
|
|
1349 |
}
|
|
|
1350 |
/* must be an error */
|
|
|
1351 |
iesp--; /* don't push the file */
|
|
|
1352 |
return_with_code_iref();
|
|
|
1353 |
case scan_Comment:
|
|
|
1354 |
case scan_DSC_Comment: {
|
|
|
1355 |
/* See scan_Refill above for comments. */
|
|
|
1356 |
ref file_token;
|
|
|
1357 |
|
|
|
1358 |
store_state(iesp);
|
|
|
1359 |
ref_assign_inline(&file_token, IREF);
|
|
|
1360 |
if (iesp >= estop)
|
|
|
1361 |
return_with_error_iref(e_execstackoverflow);
|
|
|
1362 |
++iesp;
|
|
|
1363 |
ref_assign_inline(iesp, &file_token);
|
|
|
1364 |
esp = iesp;
|
|
|
1365 |
osp = iosp;
|
|
|
1366 |
code = ztoken_handle_comment(i_ctx_p, &file_token,
|
|
|
1367 |
&sstate, &token,
|
|
|
1368 |
code, true, true,
|
|
|
1369 |
ztokenexec_continue);
|
|
|
1370 |
}
|
|
|
1371 |
goto scan_cont;
|
|
|
1372 |
default: /* error */
|
|
|
1373 |
return_with_code_iref();
|
|
|
1374 |
}
|
|
|
1375 |
}
|
|
|
1376 |
case exec(t_string):
|
|
|
1377 |
{ /* Executable string. Read a token and interpret it. */
|
|
|
1378 |
stream ss;
|
|
|
1379 |
scanner_state sstate;
|
|
|
1380 |
|
|
|
1381 |
scanner_state_init_options(&sstate, SCAN_FROM_STRING);
|
|
|
1382 |
s_init(&ss, NULL);
|
|
|
1383 |
sread_string(&ss, IREF->value.bytes, r_size(IREF));
|
|
|
1384 |
osp = iosp; /* scan_token uses ostack */
|
|
|
1385 |
code = scan_token(i_ctx_p, &ss, &token, &sstate);
|
|
|
1386 |
iosp = osp; /* ditto */
|
|
|
1387 |
switch (code) {
|
|
|
1388 |
case 0: /* read a token */
|
|
|
1389 |
case scan_BOS: /* binary object sequence */
|
|
|
1390 |
store_state(iesp);
|
|
|
1391 |
/* If the updated string isn't empty, push it back */
|
|
|
1392 |
/* on the e-stack. */
|
|
|
1393 |
{
|
|
|
1394 |
uint size = sbufavailable(&ss);
|
|
|
1395 |
|
|
|
1396 |
if (size) {
|
|
|
1397 |
if (iesp >= estop)
|
|
|
1398 |
return_with_error_iref(e_execstackoverflow);
|
|
|
1399 |
++iesp;
|
|
|
1400 |
iesp->tas.type_attrs = IREF->tas.type_attrs;
|
|
|
1401 |
iesp->value.const_bytes = sbufptr(&ss);
|
|
|
1402 |
r_set_size(iesp, size);
|
|
|
1403 |
}
|
|
|
1404 |
}
|
|
|
1405 |
if (code == 0) {
|
|
|
1406 |
SET_IREF(&token);
|
|
|
1407 |
icount = 0;
|
|
|
1408 |
goto top;
|
|
|
1409 |
}
|
|
|
1410 |
/* Handle BOS specially */
|
|
|
1411 |
pvalue = &token;
|
|
|
1412 |
goto pr;
|
|
|
1413 |
case scan_EOF: /* end of string */
|
|
|
1414 |
goto bot;
|
|
|
1415 |
case scan_Refill: /* error */
|
|
|
1416 |
code = gs_note_error(e_syntaxerror);
|
|
|
1417 |
default: /* error */
|
|
|
1418 |
return_with_code_iref();
|
|
|
1419 |
}
|
|
|
1420 |
}
|
|
|
1421 |
/* Handle packed arrays here by re-dispatching. */
|
|
|
1422 |
/* This also picks up some anomalous cases of non-packed arrays. */
|
|
|
1423 |
default:
|
|
|
1424 |
{
|
|
|
1425 |
uint index;
|
|
|
1426 |
|
|
|
1427 |
switch (*iref_packed >> r_packed_type_shift) {
|
|
|
1428 |
case pt_full_ref:
|
|
|
1429 |
case pt_full_ref + 1:
|
|
|
1430 |
INCR(p_full);
|
|
|
1431 |
if (iosp >= ostop)
|
|
|
1432 |
return_with_stackoverflow_iref();
|
|
|
1433 |
/* We know this can't be an executable object */
|
|
|
1434 |
/* requiring special handling, so we just push it. */
|
|
|
1435 |
++iosp;
|
|
|
1436 |
/* We know that refs are properly aligned: */
|
|
|
1437 |
/* see packed.h for details. */
|
|
|
1438 |
ref_assign_inline(iosp, IREF);
|
|
|
1439 |
next();
|
|
|
1440 |
case pt_executable_operator:
|
|
|
1441 |
index = *iref_packed & packed_value_mask;
|
|
|
1442 |
if (--ticks_left <= 0) { /* The following doesn't work, */
|
|
|
1443 |
/* and I can't figure out why. */
|
|
|
1444 |
/****** goto sst_short; ******/
|
|
|
1445 |
}
|
|
|
1446 |
if (!op_index_is_operator(index)) {
|
|
|
1447 |
INCR(p_exec_oparray);
|
|
|
1448 |
store_state_short(iesp);
|
|
|
1449 |
/* Call the operator procedure. */
|
|
|
1450 |
index -= op_def_count;
|
|
|
1451 |
pvalue = (const ref *)
|
|
|
1452 |
(index < r_size(&op_array_table_global.table) ?
|
|
|
1453 |
op_array_table_global.table.value.const_refs +
|
|
|
1454 |
index :
|
|
|
1455 |
op_array_table_local.table.value.const_refs +
|
|
|
1456 |
(index - r_size(&op_array_table_global.table)));
|
|
|
1457 |
goto oppr;
|
|
|
1458 |
}
|
|
|
1459 |
INCR(p_exec_operator);
|
|
|
1460 |
/* See the main plain_exec(t_operator) case */
|
|
|
1461 |
/* for details of what happens here. */
|
|
|
1462 |
#if PACKED_SPECIAL_OPS
|
|
|
1463 |
/*
|
|
|
1464 |
* We arranged in iinit.c that the special ops
|
|
|
1465 |
* have operator indices starting at 1.
|
|
|
1466 |
*
|
|
|
1467 |
* The (int) cast in the next line is required
|
|
|
1468 |
* because some compilers don't allow arithmetic
|
|
|
1469 |
* involving two different enumerated types.
|
|
|
1470 |
*/
|
|
|
1471 |
# define case_xop(xop) case xop - (int)tx_op + 1
|
|
|
1472 |
switch (index) {
|
|
|
1473 |
case_xop(tx_op_add):goto x_add;
|
|
|
1474 |
case_xop(tx_op_def):goto x_def;
|
|
|
1475 |
case_xop(tx_op_dup):goto x_dup;
|
|
|
1476 |
case_xop(tx_op_exch):goto x_exch;
|
|
|
1477 |
case_xop(tx_op_if):goto x_if;
|
|
|
1478 |
case_xop(tx_op_ifelse):goto x_ifelse;
|
|
|
1479 |
case_xop(tx_op_index):goto x_index;
|
|
|
1480 |
case_xop(tx_op_pop):goto x_pop;
|
|
|
1481 |
case_xop(tx_op_roll):goto x_roll;
|
|
|
1482 |
case_xop(tx_op_sub):goto x_sub;
|
|
|
1483 |
case 0: /* for dumb compilers */
|
|
|
1484 |
default:
|
|
|
1485 |
;
|
|
|
1486 |
}
|
|
|
1487 |
# undef case_xop
|
|
|
1488 |
#endif
|
|
|
1489 |
INCR(p_exec_non_x_operator);
|
|
|
1490 |
esp = iesp;
|
|
|
1491 |
osp = iosp;
|
|
|
1492 |
switch (code = call_operator(op_index_proc(index), i_ctx_p)) {
|
|
|
1493 |
case 0:
|
|
|
1494 |
case 1:
|
|
|
1495 |
iosp = osp;
|
|
|
1496 |
next_short();
|
|
|
1497 |
case o_push_estack:
|
|
|
1498 |
store_state_short(iesp);
|
|
|
1499 |
goto opush;
|
|
|
1500 |
case o_pop_estack:
|
|
|
1501 |
iosp = osp;
|
|
|
1502 |
if (esp == iesp) {
|
|
|
1503 |
next_short();
|
|
|
1504 |
}
|
|
|
1505 |
iesp = esp;
|
|
|
1506 |
goto up;
|
|
|
1507 |
case o_reschedule:
|
|
|
1508 |
store_state_short(iesp);
|
|
|
1509 |
goto res;
|
|
|
1510 |
case e_RemapColor:
|
|
|
1511 |
store_state_short(iesp);
|
|
|
1512 |
goto remap;
|
|
|
1513 |
}
|
|
|
1514 |
iosp = osp;
|
|
|
1515 |
iesp = esp;
|
|
|
1516 |
return_with_code_iref();
|
|
|
1517 |
case pt_integer:
|
|
|
1518 |
INCR(p_integer);
|
|
|
1519 |
if (iosp >= ostop)
|
|
|
1520 |
return_with_stackoverflow_iref();
|
|
|
1521 |
++iosp;
|
|
|
1522 |
make_int(iosp,
|
|
|
1523 |
((int)*iref_packed & packed_int_mask) +
|
|
|
1524 |
packed_min_intval);
|
|
|
1525 |
next_short();
|
|
|
1526 |
case pt_literal_name:
|
|
|
1527 |
INCR(p_lit_name);
|
|
|
1528 |
{
|
|
|
1529 |
uint nidx = *iref_packed & packed_value_mask;
|
|
|
1530 |
|
|
|
1531 |
if (iosp >= ostop)
|
|
|
1532 |
return_with_stackoverflow_iref();
|
|
|
1533 |
++iosp;
|
|
|
1534 |
name_index_ref_inline(int_nt, nidx, iosp);
|
|
|
1535 |
next_short();
|
|
|
1536 |
}
|
|
|
1537 |
case pt_executable_name:
|
|
|
1538 |
INCR(p_exec_name);
|
|
|
1539 |
{
|
|
|
1540 |
uint nidx = *iref_packed & packed_value_mask;
|
|
|
1541 |
|
|
|
1542 |
pvalue = name_index_ptr_inline(int_nt, nidx)->pvalue;
|
|
|
1543 |
if (!pv_valid(pvalue)) {
|
|
|
1544 |
uint htemp;
|
|
|
1545 |
|
|
|
1546 |
INCR(p_find_name);
|
|
|
1547 |
if ((pvalue = dict_find_name_by_index_inline(nidx, htemp)) == 0) {
|
|
|
1548 |
names_index_ref(int_nt, nidx, &token);
|
|
|
1549 |
return_with_error(e_undefined, &token);
|
|
|
1550 |
}
|
|
|
1551 |
}
|
|
|
1552 |
if (r_has_masked_attrs(pvalue, a_execute, a_execute + a_executable)) { /* Literal, push it. */
|
|
|
1553 |
INCR(p_name_lit);
|
|
|
1554 |
if (iosp >= ostop)
|
|
|
1555 |
return_with_stackoverflow_iref();
|
|
|
1556 |
++iosp;
|
|
|
1557 |
ref_assign_inline(iosp, pvalue);
|
|
|
1558 |
next_short();
|
|
|
1559 |
}
|
|
|
1560 |
if (r_is_proc(pvalue)) { /* This is an executable procedure, */
|
|
|
1561 |
/* execute it. */
|
|
|
1562 |
INCR(p_name_proc);
|
|
|
1563 |
store_state_short(iesp);
|
|
|
1564 |
goto pr;
|
|
|
1565 |
}
|
|
|
1566 |
/* Not a literal or procedure, reinterpret it. */
|
|
|
1567 |
store_state_short(iesp);
|
|
|
1568 |
icount = 0;
|
|
|
1569 |
SET_IREF(pvalue);
|
|
|
1570 |
goto top;
|
|
|
1571 |
}
|
|
|
1572 |
/* default can't happen here */
|
|
|
1573 |
}
|
|
|
1574 |
}
|
|
|
1575 |
}
|
|
|
1576 |
/* Literal type, just push it. */
|
|
|
1577 |
if (iosp >= ostop)
|
|
|
1578 |
return_with_stackoverflow_iref();
|
|
|
1579 |
++iosp;
|
|
|
1580 |
ref_assign_inline(iosp, IREF);
|
|
|
1581 |
bot:next();
|
|
|
1582 |
out: /* At most 1 more token in the current procedure. */
|
|
|
1583 |
/* (We already decremented icount.) */
|
|
|
1584 |
if (!icount) {
|
|
|
1585 |
/* Pop the execution stack for tail recursion. */
|
|
|
1586 |
iesp--;
|
|
|
1587 |
iref_packed = IREF_NEXT(iref_packed);
|
|
|
1588 |
goto top;
|
|
|
1589 |
}
|
|
|
1590 |
up:if (--ticks_left < 0)
|
|
|
1591 |
goto slice;
|
|
|
1592 |
/* See if there is anything left on the execution stack. */
|
|
|
1593 |
if (!r_is_proc(iesp)) {
|
|
|
1594 |
SET_IREF(iesp--);
|
|
|
1595 |
icount = 0;
|
|
|
1596 |
goto top;
|
|
|
1597 |
}
|
|
|
1598 |
SET_IREF(iesp->value.refs); /* next element of array */
|
|
|
1599 |
icount = r_size(iesp) - 1;
|
|
|
1600 |
if (icount <= 0) { /* <= 1 more elements */
|
|
|
1601 |
iesp--; /* pop, or tail recursion */
|
|
|
1602 |
if (icount < 0)
|
|
|
1603 |
goto up;
|
|
|
1604 |
}
|
|
|
1605 |
goto top;
|
|
|
1606 |
res:
|
|
|
1607 |
/* Some operator has asked for context rescheduling. */
|
|
|
1608 |
/* We've done a store_state. */
|
|
|
1609 |
*pi_ctx_p = i_ctx_p;
|
|
|
1610 |
code = (*gs_interp_reschedule_proc)(pi_ctx_p);
|
|
|
1611 |
i_ctx_p = *pi_ctx_p;
|
|
|
1612 |
sched: /* We've just called a scheduling procedure. */
|
|
|
1613 |
/* The interpreter state is in memory; iref is not current. */
|
|
|
1614 |
if (code < 0) {
|
|
|
1615 |
set_error(code);
|
|
|
1616 |
/*
|
|
|
1617 |
* We need a real object to return as the error object.
|
|
|
1618 |
* (It only has to last long enough to store in
|
|
|
1619 |
* *perror_object.)
|
|
|
1620 |
*/
|
|
|
1621 |
make_null_proc(&ierror.full);
|
|
|
1622 |
SET_IREF(ierror.obj = &ierror.full);
|
|
|
1623 |
goto error_exit;
|
|
|
1624 |
}
|
|
|
1625 |
/* Reload state information from memory. */
|
|
|
1626 |
iosp = osp;
|
|
|
1627 |
iesp = esp;
|
|
|
1628 |
goto up;
|
|
|
1629 |
#if 0 /****** ****** ***** */
|
|
|
1630 |
sst: /* Time-slice, but push the current object first. */
|
|
|
1631 |
store_state(iesp);
|
|
|
1632 |
if (iesp >= estop)
|
|
|
1633 |
return_with_error_iref(e_execstackoverflow);
|
|
|
1634 |
iesp++;
|
|
|
1635 |
ref_assign_inline(iesp, iref);
|
|
|
1636 |
#endif /****** ****** ***** */
|
|
|
1637 |
slice: /* It's time to time-slice or garbage collect. */
|
|
|
1638 |
/* iref is not live, so we don't need to do a store_state. */
|
|
|
1639 |
osp = iosp;
|
|
|
1640 |
esp = iesp;
|
|
|
1641 |
/* If ticks_left <= -100, we need to GC now. */
|
|
|
1642 |
if (ticks_left <= -100) { /* We need to garbage collect now. */
|
|
|
1643 |
*pi_ctx_p = i_ctx_p;
|
|
|
1644 |
code = interp_reclaim(pi_ctx_p, -1);
|
|
|
1645 |
i_ctx_p = *pi_ctx_p;
|
|
|
1646 |
} else if (gs_interp_time_slice_proc) {
|
|
|
1647 |
*pi_ctx_p = i_ctx_p;
|
|
|
1648 |
code = (*gs_interp_time_slice_proc)(pi_ctx_p);
|
|
|
1649 |
i_ctx_p = *pi_ctx_p;
|
|
|
1650 |
} else
|
|
|
1651 |
code = 0;
|
|
|
1652 |
ticks_left = gs_interp_time_slice_ticks;
|
|
|
1653 |
set_code_on_interrupt(imemory, &code);
|
|
|
1654 |
goto sched;
|
|
|
1655 |
|
|
|
1656 |
/* Error exits. */
|
|
|
1657 |
|
|
|
1658 |
rweci:
|
|
|
1659 |
ierror.code = code;
|
|
|
1660 |
rwei:
|
|
|
1661 |
ierror.obj = IREF;
|
|
|
1662 |
rwe:
|
|
|
1663 |
if (!r_is_packed(iref_packed))
|
|
|
1664 |
store_state(iesp);
|
|
|
1665 |
else {
|
|
|
1666 |
/*
|
|
|
1667 |
* We need a real object to return as the error object.
|
|
|
1668 |
* (It only has to last long enough to store in *perror_object.)
|
|
|
1669 |
*/
|
|
|
1670 |
packed_get(imemory, (const ref_packed *)ierror.obj, &ierror.full);
|
|
|
1671 |
store_state_short(iesp);
|
|
|
1672 |
if (IREF == ierror.obj)
|
|
|
1673 |
SET_IREF(&ierror.full);
|
|
|
1674 |
ierror.obj = &ierror.full;
|
|
|
1675 |
}
|
|
|
1676 |
error_exit:
|
|
|
1677 |
if (ERROR_IS_INTERRUPT(ierror.code)) { /* We must push the current object being interpreted */
|
|
|
1678 |
/* back on the e-stack so it will be re-executed. */
|
|
|
1679 |
/* Currently, this is always an executable operator, */
|
|
|
1680 |
/* but it might be something else someday if we check */
|
|
|
1681 |
/* for interrupts in the interpreter loop itself. */
|
|
|
1682 |
if (iesp >= estop)
|
|
|
1683 |
code = e_execstackoverflow;
|
|
|
1684 |
else {
|
|
|
1685 |
iesp++;
|
|
|
1686 |
ref_assign_inline(iesp, IREF);
|
|
|
1687 |
}
|
|
|
1688 |
}
|
|
|
1689 |
esp = iesp;
|
|
|
1690 |
osp = iosp;
|
|
|
1691 |
ref_assign_inline(perror_object, ierror.obj);
|
|
|
1692 |
return gs_log_error(ierror.code, __FILE__, ierror.line);
|
|
|
1693 |
}
|
|
|
1694 |
|
|
|
1695 |
/* Pop the bookkeeping information for a normal exit from a t_oparray. */
|
|
|
1696 |
private int
|
|
|
1697 |
oparray_pop(i_ctx_t *i_ctx_p)
|
|
|
1698 |
{
|
|
|
1699 |
esp -= 3;
|
|
|
1700 |
return o_pop_estack;
|
|
|
1701 |
}
|
|
|
1702 |
|
|
|
1703 |
/* Restore the stack pointers after an error inside a t_oparray procedure. */
|
|
|
1704 |
/* This procedure is called only from pop_estack. */
|
|
|
1705 |
private int
|
|
|
1706 |
oparray_cleanup(i_ctx_t *i_ctx_p)
|
|
|
1707 |
{ /* esp points just below the cleanup procedure. */
|
|
|
1708 |
es_ptr ep = esp;
|
|
|
1709 |
uint ocount_old = (uint) ep[2].value.intval;
|
|
|
1710 |
uint dcount_old = (uint) ep[3].value.intval;
|
|
|
1711 |
uint ocount = ref_stack_count(&o_stack);
|
|
|
1712 |
uint dcount = ref_stack_count(&d_stack);
|
|
|
1713 |
|
|
|
1714 |
if (ocount > ocount_old)
|
|
|
1715 |
ref_stack_pop(&o_stack, ocount - ocount_old);
|
|
|
1716 |
if (dcount > dcount_old) {
|
|
|
1717 |
ref_stack_pop(&d_stack, dcount - dcount_old);
|
|
|
1718 |
dict_set_top();
|
|
|
1719 |
}
|
|
|
1720 |
return 0;
|
|
|
1721 |
}
|
|
|
1722 |
|
|
|
1723 |
/* Don't restore the stack pointers. */
|
|
|
1724 |
private int
|
|
|
1725 |
oparray_no_cleanup(i_ctx_t *i_ctx_p)
|
|
|
1726 |
{
|
|
|
1727 |
return 0;
|
|
|
1728 |
}
|
|
|
1729 |
|
|
|
1730 |
/* Find the innermost oparray. */
|
|
|
1731 |
private ref *
|
|
|
1732 |
oparray_find(i_ctx_t *i_ctx_p)
|
|
|
1733 |
{
|
|
|
1734 |
long i;
|
|
|
1735 |
ref *ep;
|
|
|
1736 |
|
|
|
1737 |
for (i = 0; (ep = ref_stack_index(&e_stack, i)) != 0; ++i) {
|
|
|
1738 |
if (r_is_estack_mark(ep) &&
|
|
|
1739 |
(ep->value.opproc == oparray_cleanup ||
|
|
|
1740 |
ep->value.opproc == oparray_no_cleanup)
|
|
|
1741 |
)
|
|
|
1742 |
return ep;
|
|
|
1743 |
}
|
|
|
1744 |
return 0;
|
|
|
1745 |
}
|
|
|
1746 |
|
|
|
1747 |
/* <bool> .setstackprotect - */
|
|
|
1748 |
/* Set whether to protect the stack for the innermost oparray. */
|
|
|
1749 |
private int
|
|
|
1750 |
zsetstackprotect(i_ctx_t *i_ctx_p)
|
|
|
1751 |
{
|
|
|
1752 |
os_ptr op = osp;
|
|
|
1753 |
ref *ep = oparray_find(i_ctx_p);
|
|
|
1754 |
|
|
|
1755 |
check_type(*op, t_boolean);
|
|
|
1756 |
if (ep == 0)
|
|
|
1757 |
return_error(e_rangecheck);
|
|
|
1758 |
ep->value.opproc =
|
|
|
1759 |
(op->value.boolval ? oparray_cleanup : oparray_no_cleanup);
|
|
|
1760 |
pop(1);
|
|
|
1761 |
return 0;
|
|
|
1762 |
}
|
|
|
1763 |
|
|
|
1764 |
/* - .currentstackprotect <bool> */
|
|
|
1765 |
/* Return the stack protection status. */
|
|
|
1766 |
private int
|
|
|
1767 |
zcurrentstackprotect(i_ctx_t *i_ctx_p)
|
|
|
1768 |
{
|
|
|
1769 |
os_ptr op = osp;
|
|
|
1770 |
ref *ep = oparray_find(i_ctx_p);
|
|
|
1771 |
|
|
|
1772 |
if (ep == 0)
|
|
|
1773 |
return_error(e_rangecheck);
|
|
|
1774 |
push(1);
|
|
|
1775 |
make_bool(op, ep->value.opproc == oparray_cleanup);
|
|
|
1776 |
return 0;
|
|
|
1777 |
}
|