Subversion Repositories planix.SVN

Rev

Rev 2 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 - 1
/* Copyright (C) 1995, 1996, 1997, 1998, 1999 Aladdin Enterprises.  All rights reserved.
2
 
3
  This software is provided AS-IS with no warranty, either express or
4
  implied.
5
 
6
  This software is distributed under license and may not be copied,
7
  modified or distributed except as expressly authorized under the terms
8
  of the license contained in the file LICENSE in this distribution.
9
 
10
  For more information about licensing, please refer to
11
  http://www.ghostscript.com/licensing/. For information on
12
  commercial licensing, go to http://www.artifex.com/licensing/ or
13
  contact Artifex Software, Inc., 101 Lucas Valley Road #110,
14
  San Rafael, CA  94903, U.S.A., +1(415)492-9861.
15
*/
16
 
17
/* $Id: gxclbits.c,v 1.9 2004/08/04 19:36:12 stefan Exp $ */
18
/* Halftone and bitmap writing for command lists */
19
#include "memory_.h"
20
#include "gx.h"
21
#include "gpcheck.h"
22
#include "gserrors.h"
23
#include "gsbitops.h"
24
#include "gxdevice.h"
25
#include "gxdevmem.h"		/* must precede gxcldev.h */
26
#include "gxcldev.h"
27
#include "gxfmap.h"
28
 
29
/*
30
 * Define when, if ever, to write character bitmaps in all bands.
31
 * Set this to:
32
 *      0 to always write in all bands;
33
 *      N to write in all bands when the character has been seen in N+1
34
 *         bands on a page;
35
 *      max_ushort to never write in all bands.
36
 */
37
#define CHAR_ALL_BANDS_COUNT max_ushort
38
 
39
/* ------ Writing ------ */
40
 
41
/*
42
 * Determine the (possibly unpadded) width in bytes for writing a bitmap,
43
 * per the algorithm in gxcldev.h.  If compression_mask has any of the
44
 * cmd_mask_compress_any bits set, we assume the bitmap will be compressed.
45
 * Return the total size of the bitmap.
46
 */
47
uint
48
clist_bitmap_bytes(uint width_bits, uint height, int compression_mask,
49
		   uint * width_bytes, uint * raster)
50
{
51
    uint full_raster = *raster = bitmap_raster(width_bits);
52
    uint short_raster = (width_bits + 7) >> 3;
53
    uint width_bytes_last;
54
 
55
    if (compression_mask & cmd_mask_compress_any)
56
	*width_bytes = width_bytes_last = full_raster;
57
    else if (short_raster <= cmd_max_short_width_bytes ||
58
	     height <= 1 ||
59
	     (compression_mask & decompress_spread) != 0
60
	)
61
	*width_bytes = width_bytes_last = short_raster;
62
    else
63
	*width_bytes = full_raster, width_bytes_last = short_raster;
64
    return
65
	(height == 0 ? 0 : *width_bytes * (height - 1) + width_bytes_last);
66
}
67
 
68
/*
69
 * Compress a bitmap, skipping extra padding bytes at the end of each row if
70
 * necessary.  We require height >= 1, raster >= bitmap_raster(width_bits).
71
 */
72
private int
73
cmd_compress_bitmap(stream_state * st, const byte * data, uint width_bits,
74
		    uint raster, uint height, stream_cursor_write * pw)
75
{
76
    uint width_bytes = bitmap_raster(width_bits);
77
    int status = 0;
78
    stream_cursor_read r;
79
 
80
    r.ptr = data - 1;
81
    if (raster == width_bytes) {
82
	r.limit = r.ptr + raster * height;
83
	status = (*st->template->process) (st, &r, pw, true);
84
    } else {			/* Compress row-by-row. */
85
	uint y;
86
 
87
	for (y = 1; (r.limit = r.ptr + width_bytes), y < height; ++y) {
88
	    status = (*st->template->process) (st, &r, pw, false);
89
	    if (status)
90
		break;
91
	    if (r.ptr != r.limit) {	/* We don't attempt to handle compressors that */
92
		/* require >1 input byte to make progress. */
93
		status = -1;
94
		break;
95
	    }
96
	    r.ptr += raster - width_bytes;
97
	}
98
	if (status == 0)
99
	    status = (*st->template->process) (st, &r, pw, true);
100
    }
101
    if (st->template->release)
102
	(*st->template->release) (st);
103
    return status;
104
}
105
 
106
/*
107
 * Put a bitmap in the buffer, compressing if appropriate.
108
 * pcls == 0 means put the bitmap in all bands.
109
 * Return <0 if error, otherwise the compression method.
110
 * A return value of gs_error_limitcheck means that the bitmap was too big
111
 * to fit in the command reading buffer.
112
 * Note that this leaves room for the command and initial arguments,
113
 * but doesn't fill them in.
114
 */
115
int
116
cmd_put_bits(gx_device_clist_writer * cldev, gx_clist_state * pcls,
117
  const byte * data, uint width_bits, uint height, uint raster, int op_size,
118
	     int compression_mask, byte ** pdp, uint * psize)
119
{
120
    uint short_raster, full_raster;
121
    uint short_size =
122
    clist_bitmap_bytes(width_bits, height,
123
		       compression_mask & ~cmd_mask_compress_any,
124
		       &short_raster, &full_raster);
125
    uint uncompressed_raster;
126
    uint uncompressed_size =
127
    clist_bitmap_bytes(width_bits, height, compression_mask,
128
		       &uncompressed_raster, &full_raster);
129
    uint max_size = cbuf_size - op_size;
130
    gs_memory_t *mem = cldev->memory;
131
    byte *dp;
132
    int compress = 0;
133
 
134
    /*
135
     * See if compressing the bits is possible and worthwhile.
136
     * Currently we can't compress if the compressed data won't fit in
137
     * the command reading buffer, or if the decompressed data won't fit
138
     * in the buffer and decompress_elsewhere isn't set.
139
     */
140
    if (short_size >= 50 &&
141
	(compression_mask & cmd_mask_compress_any) != 0 &&
142
	(uncompressed_size <= max_size ||
143
	 (compression_mask & decompress_elsewhere) != 0)
144
	) {
145
	union ss_ {
146
	    stream_state ss;
147
	    stream_CFE_state cf;
148
	    stream_RLE_state rl;
149
	} sstate;
150
	int code;
151
	int try_size = op_size + min(uncompressed_size, max_size);
152
 
153
	*psize = try_size;
154
	code = (pcls != 0 ?
155
		set_cmd_put_op(dp, cldev, pcls, 0, try_size) :
156
		set_cmd_put_all_op(dp, cldev, 0, try_size));
157
	if (code < 0)
158
	    return code;
159
	cmd_uncount_op(0, try_size);
160
	/*
161
	 * Note that we currently keep all the padding if we are
162
	 * compressing.  This is ridiculous, but it's too hard to
163
	 * change right now.
164
	 */
165
	if (compression_mask & (1 << cmd_compress_cfe)) {
166
	    /* Try CCITTFax compression. */
167
	    clist_cfe_init(&sstate.cf,
168
			   uncompressed_raster << 3 /*width_bits*/,
169
			   mem);
170
	    compress = cmd_compress_cfe;
171
	} else if (compression_mask & (1 << cmd_compress_rle)) {
172
	    /* Try RLE compression. */
173
	    clist_rle_init(&sstate.rl);
174
	    compress = cmd_compress_rle;
175
	}
176
	if (compress) {
177
	    byte *wbase = dp + (op_size - 1);
178
	    stream_cursor_write w;
179
 
180
	    /*
181
	     * We can give up on compressing if we generate too much
182
	     * output to fit in the command reading buffer, or too
183
	     * much to make compression worthwhile.
184
	     */
185
	    uint wmax = min(uncompressed_size, max_size);
186
	    int status;
187
 
188
	    w.ptr = wbase;
189
	    w.limit = w.ptr + min(wmax, short_size >> 1);
190
	    status = cmd_compress_bitmap((stream_state *) & sstate, data,
191
				  uncompressed_raster << 3 /*width_bits */ ,
192
					 raster, height, &w);
193
	    if (status == 0) {	/* Use compressed representation. */
194
		uint wcount = w.ptr - wbase;
195
 
196
		cmd_shorten_list_op(cldev,
197
			     (pcls ? &pcls->list : &cldev->band_range_list),
198
				    try_size - (op_size + wcount));
199
		*psize = op_size + wcount;
200
		goto out;
201
	    }
202
	}
203
	if (uncompressed_size > max_size) {
204
	    /* Shorten to zero, erasing the operation altogether */
205
	    if_debug1 ('L', "[L]Uncompressed bits %u too large for buffer\n",
206
		       uncompressed_size);
207
	    cmd_shorten_list_op(cldev,
208
			     (pcls ? &pcls->list : &cldev->band_range_list),
209
				try_size);
210
	    return_error(gs_error_limitcheck);
211
	}
212
	if (uncompressed_size != short_size) {
213
	    if_debug2 ('L', "[L]Shortening bits from %u to %u\n",
214
		       try_size, op_size + short_size);
215
	    cmd_shorten_list_op(cldev,
216
			     (pcls ? &pcls->list : &cldev->band_range_list),
217
				try_size - (op_size + short_size));
218
	    *psize = op_size + short_size;
219
	}
220
	compress = 0;
221
    } else if (uncompressed_size > max_size)
222
	return_error(gs_error_limitcheck);
223
    else {
224
	int code;
225
 
226
	*psize = op_size + short_size;
227
	code = (pcls != 0 ?
228
		set_cmd_put_op(dp, cldev, pcls, 0, *psize) :
229
		set_cmd_put_all_op(dp, cldev, 0, *psize));
230
	if (code < 0)
231
	    return code;
232
	cmd_uncount_op(0, *psize);
233
    }
234
    bytes_copy_rectangle(dp + op_size, short_raster, data, raster,
235
			 short_raster, height);
236
out:
237
    *pdp = dp;
238
    return compress;
239
}
240
 
241
/* Add a command to set the tile size and depth. */
242
private uint
243
cmd_size_tile_params(const gx_strip_bitmap * tile)
244
{
245
    return 2 + cmd_size_w(tile->rep_width) + cmd_size_w(tile->rep_height) +
246
	(tile->rep_width == tile->size.x ? 0 :
247
	 cmd_size_w(tile->size.x / tile->rep_width)) +
248
	(tile->rep_height == tile->size.y ? 0 :
249
	 cmd_size_w(tile->size.y / tile->rep_height)) +
250
	(tile->rep_shift == 0 ? 0 : cmd_size_w(tile->rep_shift));
251
}
252
private void
253
cmd_store_tile_params(byte * dp, const gx_strip_bitmap * tile, int depth,
254
		      uint csize)
255
{
256
    byte *p = dp + 2;
257
    byte bd = cmd_depth_to_code(depth);
258
 
259
    *dp = cmd_count_op(cmd_opv_set_tile_size, csize);
260
    p = cmd_put_w(tile->rep_width, p);
261
    p = cmd_put_w(tile->rep_height, p);
262
    if (tile->rep_width != tile->size.x) {
263
	p = cmd_put_w(tile->size.x / tile->rep_width, p);
264
	bd |= 0x20;
265
    }
266
    if (tile->rep_height != tile->size.y) {
267
	p = cmd_put_w(tile->size.y / tile->rep_height, p);
268
	bd |= 0x40;
269
    }
270
    if (tile->rep_shift != 0) {
271
	cmd_put_w(tile->rep_shift, p);
272
	bd |= 0x80;
273
    }
274
    dp[1] = bd;
275
}
276
 
277
/* Add a command to set the tile index. */
278
/* This is a relatively high-frequency operation, so we declare it `inline'. */
279
inline private int
280
cmd_put_tile_index(gx_device_clist_writer *cldev, gx_clist_state *pcls,
281
		   uint indx)
282
{
283
    int idelta = indx - pcls->tile_index + 8;
284
    byte *dp;
285
    int code;
286
 
287
    if (!(idelta & ~15)) {
288
	code = set_cmd_put_op(dp, cldev, pcls,
289
			      cmd_op_delta_tile_index + idelta, 1);
290
	if (code < 0)
291
	    return code;
292
    } else {
293
	code = set_cmd_put_op(dp, cldev, pcls,
294
			      cmd_op_set_tile_index + (indx >> 8), 2);
295
	if (code < 0)
296
	    return code;
297
	dp[1] = indx & 0xff;
298
    }
299
    if_debug2('L', "[L]writing index=%u, offset=%lu\n",
300
	      indx, cldev->tile_table[indx].offset);
301
    return 0;
302
}
303
 
304
/* If necessary, write out data for a single color map. */
305
int
306
cmd_put_color_map(gx_device_clist_writer * cldev, cmd_map_index map_index,
307
	int comp_num, const gx_transfer_map * map, gs_id * pid)
308
{
309
    byte *dp;
310
    int code;
311
 
312
    if (map == 0) {
313
	if (pid && *pid == gs_no_id)
314
	    return 0;	/* no need to write */
315
	code = set_cmd_put_all_op(dp, cldev, cmd_opv_set_misc, 3);
316
	if (code < 0)
317
	    return code;
318
	dp[1] = cmd_set_misc_map + (cmd_map_none << 4) + map_index;
319
	dp[2] = comp_num;
320
	if (pid)
321
	    *pid = gs_no_id;
322
    } else {
323
	if (pid && map->id == *pid)
324
	    return 0;	/* no need to write */
325
	if (map->proc == gs_identity_transfer) {
326
	    code = set_cmd_put_all_op(dp, cldev, cmd_opv_set_misc, 3);
327
	    if (code < 0)
328
		return code;
329
	    dp[1] = cmd_set_misc_map + (cmd_map_identity << 4) + map_index;
330
	    dp[2] = comp_num;
331
	} else {
332
	    code = set_cmd_put_all_op(dp, cldev, cmd_opv_set_misc,
333
				      3 + sizeof(map->values));
334
	    if (code < 0)
335
		return code;
336
	    dp[1] = cmd_set_misc_map + (cmd_map_other << 4) + map_index;
337
	    dp[2] = comp_num;
338
	    memcpy(dp + 3, map->values, sizeof(map->values));
339
	}
340
	if (pid)
341
	    *pid = map->id;
342
    }
343
    return 0;
344
}
345
 
346
/* ------ Tile cache management ------ */
347
 
348
/* We want consecutive ids to map to consecutive hash slots if possible, */
349
/* so we can use a delta representation when setting the index. */
350
/* NB that we cannot emit 'delta' style tile indices if VM error recovery */
351
/* is in effect, since reader & writer's tile indices may get out of phase */
352
/* as a consequence of error recovery occurring. */
353
#define tile_id_hash(id) (id)
354
#define tile_hash_next(index) ((index) + 413)	/* arbitrary large odd # */
355
typedef struct tile_loc_s {
356
    uint index;
357
    tile_slot *tile;
358
} tile_loc;
359
 
360
/* Look up a tile or character in the cache.  If found, set the index and */
361
/* pointer; if not, set the index to the insertion point. */
362
private bool
363
clist_find_bits(gx_device_clist_writer * cldev, gx_bitmap_id id, tile_loc * ploc)
364
{
365
    uint index = tile_id_hash(id);
366
    const tile_hash *table = cldev->tile_table;
367
    uint mask = cldev->tile_hash_mask;
368
    ulong offset;
369
 
370
    for (; (offset = table[index &= mask].offset) != 0;
371
	 index = tile_hash_next(index)
372
	) {
373
	tile_slot *tile = (tile_slot *) (cldev->data + offset);
374
 
375
	if (tile->id == id) {
376
	    ploc->index = index;
377
	    ploc->tile = tile;
378
	    return true;
379
	}
380
    }
381
    ploc->index = index;
382
    return false;
383
}
384
 
385
/* Delete a tile from the cache. */
386
private void
387
clist_delete_tile(gx_device_clist_writer * cldev, tile_slot * slot)
388
{
389
    tile_hash *table = cldev->tile_table;
390
    uint mask = cldev->tile_hash_mask;
391
    uint index = slot->index;
392
    ulong offset;
393
 
394
    if_debug2('L', "[L]deleting index=%u, offset=%lu\n",
395
	      index, (ulong) ((byte *) slot - cldev->data));
396
    gx_bits_cache_free(&cldev->bits, (gx_cached_bits_head *) slot,
397
		       &cldev->chunk);
398
    table[index].offset = 0;
399
    /* Delete the entry from the hash table. */
400
    /* We'd like to move up any later entries, so that we don't need */
401
    /* a deleted mark, but it's too difficult to note this in the */
402
    /* band list, so instead, we just delete any entries that */
403
    /* would need to be moved. */
404
    while ((offset = table[index = tile_hash_next(index) & mask].offset) != 0) {
405
	tile_slot *tile = (tile_slot *) (cldev->data + offset);
406
	tile_loc loc;
407
 
408
	if (!clist_find_bits(cldev, tile->id, &loc)) {	/* We didn't find it, so it should be moved into a slot */
409
	    /* that we just vacated; instead, delete it. */
410
	    if_debug2('L', "[L]move-deleting index=%u, offset=%lu\n",
411
		      index, offset);
412
	    gx_bits_cache_free(&cldev->bits,
413
			     (gx_cached_bits_head *) (cldev->data + offset),
414
			       &cldev->chunk);
415
	    table[index].offset = 0;
416
	}
417
    }
418
}
419
 
420
/* Add a tile to the cache. */
421
/* tile->raster holds the raster for the replicated tile; */
422
/* we pass the raster of the actual data separately. */
423
private int
424
clist_add_tile(gx_device_clist_writer * cldev, const gx_strip_bitmap * tiles,
425
	       uint sraster, int depth)
426
{
427
    uint raster = tiles->raster;
428
    uint size_bytes = raster * tiles->size.y;
429
    uint tsize =
430
    sizeof(tile_slot) + cldev->tile_band_mask_size + size_bytes;
431
    gx_cached_bits_head *slot_head;
432
 
433
#define slot ((tile_slot *)slot_head)
434
 
435
    if (cldev->bits.csize == cldev->tile_max_count) {	/* Don't let the hash table get too full: delete an entry. */
436
	/* Since gx_bits_cache_alloc returns an entry to delete when */
437
	/* it fails, just force it to fail. */
438
	gx_bits_cache_alloc(&cldev->bits, (ulong) cldev->chunk.size,
439
			    &slot_head);
440
	if (slot_head == 0) {	/* Wrap around and retry. */
441
	    cldev->bits.cnext = 0;
442
	    gx_bits_cache_alloc(&cldev->bits, (ulong) cldev->chunk.size,
443
				&slot_head);
444
#ifdef DEBUG
445
	    if (slot_head == 0) {
446
		lprintf("No entry to delete!\n");
447
		return_error(gs_error_Fatal);
448
	    }
449
#endif
450
	}
451
	clist_delete_tile(cldev, slot);
452
    }
453
    /* Allocate the space for the new entry, deleting entries as needed. */
454
    while (gx_bits_cache_alloc(&cldev->bits, (ulong) tsize, &slot_head) < 0) {
455
	if (slot_head == 0) {	/* Wrap around. */
456
	    if (cldev->bits.cnext == 0) {	/* Too big to fit.  We should probably detect this */
457
		/* sooner, since if we get here, we've cleared the */
458
		/* cache. */
459
		return_error(gs_error_limitcheck);
460
	    }
461
	    cldev->bits.cnext = 0;
462
	} else
463
	    clist_delete_tile(cldev, slot);
464
    }
465
    /* Fill in the entry. */
466
    slot->cb_depth = depth;
467
    slot->cb_raster = raster;
468
    slot->width = tiles->rep_width;
469
    slot->height = tiles->rep_height;
470
    slot->shift = slot->rep_shift = tiles->rep_shift;
471
    slot->x_reps = slot->y_reps = 1;
472
    slot->id = tiles->id;
473
    memset(ts_mask(slot), 0, cldev->tile_band_mask_size);
474
    bytes_copy_rectangle(ts_bits(cldev, slot), raster,
475
			 tiles->data, sraster,
476
			 (tiles->rep_width * depth + 7) >> 3,
477
			 tiles->rep_height);
478
    /* Make the hash table entry. */
479
    {
480
	tile_loc loc;
481
 
482
#ifdef DEBUG
483
	if (clist_find_bits(cldev, tiles->id, &loc))
484
	    lprintf1("clist_find_bits(0x%lx) should have failed!\n",
485
		     (ulong) tiles->id);
486
#else
487
	clist_find_bits(cldev, tiles->id, &loc);	/* always fails */
488
#endif
489
	slot->index = loc.index;
490
	cldev->tile_table[loc.index].offset =
491
	    (byte *) slot_head - cldev->data;
492
	if_debug2('L', "[L]adding index=%u, offset=%lu\n",
493
		  loc.index, cldev->tile_table[loc.index].offset);
494
    }
495
    slot->num_bands = 0;
496
    return 0;
497
}
498
 
499
/* ------ Driver procedure support ------ */
500
 
501
/* Change the tile parameters (size and depth). */
502
/* Currently we do this for all bands at once. */
503
private void
504
clist_new_tile_params(gx_strip_bitmap * new_tile, const gx_strip_bitmap * tiles,
505
		      int depth, const gx_device_clist_writer * cldev)
506
{				/*
507
				 * Adjust the replication factors.  If we can, we replicate
508
				 * the tile in X up to 32 bytes, and then in Y up to 4 copies,
509
				 * as long as we don't exceed a total tile size of 256 bytes,
510
				 * or more than 255 repetitions in X or Y, or make the tile so
511
				 * large that not all possible tiles will fit in the cache.
512
				 * Also, don't attempt Y replication if shifting is required. 
513
				 */
514
#define max_tile_reps_x 255
515
#define max_tile_bytes_x 32
516
#define max_tile_reps_y 4
517
#define max_tile_bytes 256
518
    uint rep_width = tiles->rep_width;
519
    uint rep_height = tiles->rep_height;
520
    uint rep_width_bits = rep_width * depth;
521
    uint tile_overhead =
522
    sizeof(tile_slot) + cldev->tile_band_mask_size;
523
    uint max_bytes = cldev->chunk.size / (rep_width_bits * rep_height);
524
 
525
    max_bytes -= min(max_bytes, tile_overhead);
526
    if (max_bytes > max_tile_bytes)
527
	max_bytes = max_tile_bytes;
528
    *new_tile = *tiles;
529
    {
530
	uint max_bits_x = max_bytes * 8 / rep_height;
531
	uint reps_x =
532
	min(max_bits_x, max_tile_bytes_x * 8) / rep_width_bits;
533
	uint reps_y;
534
 
535
	while (reps_x > max_tile_reps_x)
536
	    reps_x >>= 1;
537
	new_tile->size.x = max(reps_x, 1) * rep_width;
538
	new_tile->raster = bitmap_raster(new_tile->size.x * depth);
539
	if (tiles->shift != 0)
540
	    reps_y = 1;
541
	else {
542
	    reps_y = max_bytes / (new_tile->raster * rep_height);
543
	    if (reps_y > max_tile_reps_y)
544
		reps_y = max_tile_reps_y;
545
	    else if (reps_y < 1)
546
		reps_y = 1;
547
	}
548
	new_tile->size.y = reps_y * rep_height;
549
    }
550
#undef max_tile_reps_x
551
#undef max_tile_bytes_x
552
#undef max_tile_reps_y
553
#undef max_tile_bytes
554
}
555
 
556
/* Change tile for clist_tile_rectangle. */
557
int
558
clist_change_tile(gx_device_clist_writer * cldev, gx_clist_state * pcls,
559
		  const gx_strip_bitmap * tiles, int depth)
560
{
561
    tile_loc loc;
562
    int code;
563
 
564
#define tile_params_differ(cldev, tiles, depth)\
565
  ((tiles)->rep_width != (cldev)->tile_params.rep_width ||\
566
   (tiles)->rep_height != (cldev)->tile_params.rep_height ||\
567
   (tiles)->rep_shift != (cldev)->tile_params.rep_shift ||\
568
   (depth) != (cldev)->tile_depth)
569
 
570
  top:if (clist_find_bits(cldev, tiles->id, &loc)) {	/* The bitmap is in the cache.  Check whether this band */
571
	/* knows about it. */
572
	int band_index = pcls - cldev->states;
573
	byte *bptr = ts_mask(loc.tile) + (band_index >> 3);
574
	byte bmask = 1 << (band_index & 7);
575
 
576
	if (*bptr & bmask) {	/* Already known.  Just set the index. */
577
	    if (pcls->tile_index == loc.index)
578
		return 0;
579
	    if ((code = cmd_put_tile_index(cldev, pcls, loc.index)) < 0)
580
	    	return code;
581
	} else {
582
	    uint extra = 0;
583
 
584
	    if tile_params_differ
585
		(cldev, tiles, depth) {		/*
586
						 * We have a cached tile whose parameters differ from
587
						 * the current ones.  Because of the way tile IDs are
588
						 * managed, this is currently only possible when mixing
589
						 * Patterns and halftones, but if we didn't generate new
590
						 * IDs each time the main halftone cache needed to be
591
						 * refreshed, this could also happen simply from
592
						 * switching screens.
593
						 */
594
		int band;
595
 
596
		clist_new_tile_params(&cldev->tile_params, tiles, depth,
597
				      cldev);
598
		cldev->tile_depth = depth;
599
		/* No band knows about the new parameters. */
600
		for (band = cldev->tile_known_min;
601
		     band <= cldev->tile_known_max;
602
		     ++band
603
		    )
604
		    cldev->states[band].known &= ~tile_params_known;
605
		cldev->tile_known_min = cldev->nbands;
606
		cldev->tile_known_max = -1;
607
		}
608
	    if (!(pcls->known & tile_params_known)) {	/* We're going to have to write the tile parameters. */
609
		extra = cmd_size_tile_params(&cldev->tile_params);
610
	    } {			/*
611
				 * This band doesn't know this tile yet, so output the
612
				 * bits.  Note that the offset we write is the one used by
613
				 * the reading phase, not the writing phase.  Note also
614
				 * that the size of the cached and written tile may differ
615
				 * from that of the client's tile.  Finally, note that
616
				 * this tile's size parameters are guaranteed to be
617
				 * compatible with those stored in the device
618
				 * (cldev->tile_params).
619
				 */
620
		ulong offset = (byte *) loc.tile - cldev->chunk.data;
621
		uint rsize =
622
		    extra + 1 + cmd_size_w(loc.index) + cmd_size_w(offset);
623
		byte *dp;
624
		uint csize;
625
		int code =
626
		cmd_put_bits(cldev, pcls, ts_bits(cldev, loc.tile),
627
			     tiles->rep_width * depth, tiles->rep_height,
628
			     loc.tile->cb_raster, rsize,
629
			     (cldev->tile_params.size.x > tiles->rep_width ?
630
			      decompress_elsewhere | decompress_spread :
631
			      decompress_elsewhere),
632
			     &dp, &csize);
633
 
634
		if (code < 0)
635
		    return code;
636
		if (extra) {	/* Write the tile parameters before writing the bits. */
637
		    cmd_store_tile_params(dp, &cldev->tile_params, depth,
638
					  extra);
639
		    dp += extra;
640
		    /* This band now knows the parameters. */
641
		    pcls->known |= tile_params_known;
642
		    if (band_index < cldev->tile_known_min)
643
			cldev->tile_known_min = band_index;
644
		    if (band_index > cldev->tile_known_max)
645
			cldev->tile_known_max = band_index;
646
		}
647
		*dp = cmd_count_op(cmd_opv_set_tile_bits, csize - extra);
648
		dp++;
649
		dp = cmd_put_w(loc.index, dp);
650
		cmd_put_w(offset, dp);
651
		*bptr |= bmask;
652
		loc.tile->num_bands++;
653
	    }
654
	}
655
	pcls->tile_index = loc.index;
656
	pcls->tile_id = loc.tile->id;
657
	return 0;
658
    }
659
    /* The tile is not in the cache, add it. */
660
    {
661
	gx_strip_bitmap new_tile;
662
	gx_strip_bitmap *ptile;
663
 
664
	/* Ensure that the tile size is compatible. */
665
	if (tile_params_differ(cldev, tiles, depth)) {	/* We'll reset cldev->tile_params when we write the bits. */
666
	    clist_new_tile_params(&new_tile, tiles, depth, cldev);
667
	    ptile = &new_tile;
668
	} else {
669
	    cldev->tile_params.id = tiles->id;
670
	    cldev->tile_params.data = tiles->data;
671
	    ptile = &cldev->tile_params;
672
	}
673
	code = clist_add_tile(cldev, ptile, tiles->raster, depth);
674
	if (code < 0)
675
	    return code;
676
    }
677
    goto top;
678
#undef tile_params_differ
679
}
680
 
681
/* Change "tile" for clist_copy_*.  tiles->[rep_]shift must be zero. */
682
int
683
clist_change_bits(gx_device_clist_writer * cldev, gx_clist_state * pcls,
684
		  const gx_strip_bitmap * tiles, int depth)
685
{
686
    tile_loc loc;
687
    int code;
688
 
689
  top:if (clist_find_bits(cldev, tiles->id, &loc)) {	/* The bitmap is in the cache.  Check whether this band */
690
	/* knows about it. */
691
	uint band_index = pcls - cldev->states;
692
	byte *bptr = ts_mask(loc.tile) + (band_index >> 3);
693
	byte bmask = 1 << (band_index & 7);
694
 
695
	if (*bptr & bmask) {	/* Already known.  Just set the index. */
696
	    if (pcls->tile_index == loc.index)
697
		return 0;
698
	    cmd_put_tile_index(cldev, pcls, loc.index);
699
	} else {		/* Not known yet.  Output the bits. */
700
	    /* Note that the offset we write is the one used by */
701
	    /* the reading phase, not the writing phase. */
702
	    ulong offset = (byte *) loc.tile - cldev->chunk.data;
703
	    uint rsize = 2 + cmd_size_w(loc.tile->width) +
704
	    cmd_size_w(loc.tile->height) + cmd_size_w(loc.index) +
705
	    cmd_size_w(offset);
706
	    byte *dp;
707
	    uint csize;
708
	    gx_clist_state *bit_pcls = pcls;
709
	    int code;
710
 
711
	    if (loc.tile->num_bands == CHAR_ALL_BANDS_COUNT)
712
		bit_pcls = NULL;
713
	    code = cmd_put_bits(cldev, bit_pcls, ts_bits(cldev, loc.tile),
714
				loc.tile->width * depth,
715
				loc.tile->height, loc.tile->cb_raster,
716
				rsize,
717
			     (1 << cmd_compress_cfe) | decompress_elsewhere,
718
				&dp, &csize);
719
 
720
	    if (code < 0)
721
		return code;
722
	    *dp = cmd_count_op(cmd_opv_set_bits, csize);
723
	    dp[1] = (depth << 2) + code;
724
	    dp += 2;
725
	    dp = cmd_put_w(loc.tile->width, dp);
726
	    dp = cmd_put_w(loc.tile->height, dp);
727
	    dp = cmd_put_w(loc.index, dp);
728
	    cmd_put_w(offset, dp);
729
	    if (bit_pcls == NULL) {
730
		memset(ts_mask(loc.tile), 0xff,
731
		       cldev->tile_band_mask_size);
732
		loc.tile->num_bands = cldev->nbands;
733
	    } else {
734
		*bptr |= bmask;
735
		loc.tile->num_bands++;
736
	    }
737
	}
738
	pcls->tile_index = loc.index;
739
	pcls->tile_id = loc.tile->id;
740
	return 0;
741
    }
742
    /* The tile is not in the cache. */
743
    code = clist_add_tile(cldev, tiles, tiles->raster, depth);
744
    if (code < 0)
745
	return code;
746
    goto top;
747
}