2 |
- |
1 |
/* Copyright (C) 1997, 1998 Aladdin Enterprises. All rights reserved.
|
|
|
2 |
|
|
|
3 |
This software is provided AS-IS with no warranty, either express or
|
|
|
4 |
implied.
|
|
|
5 |
|
|
|
6 |
This software is distributed under license and may not be copied,
|
|
|
7 |
modified or distributed except as expressly authorized under the terms
|
|
|
8 |
of the license contained in the file LICENSE in this distribution.
|
|
|
9 |
|
|
|
10 |
For more information about licensing, please refer to
|
|
|
11 |
http://www.ghostscript.com/licensing/. For information on
|
|
|
12 |
commercial licensing, go to http://www.artifex.com/licensing/ or
|
|
|
13 |
contact Artifex Software, Inc., 101 Lucas Valley Road #110,
|
|
|
14 |
San Rafael, CA 94903, U.S.A., +1(415)492-9861.
|
|
|
15 |
*/
|
|
|
16 |
|
|
|
17 |
/* $Id: gxbitops.h,v 1.4 2002/02/21 22:24:52 giles Exp $ */
|
|
|
18 |
/* Internal definitions for bitmap operations */
|
|
|
19 |
|
|
|
20 |
#ifndef gxbitops_INCLUDED
|
|
|
21 |
# define gxbitops_INCLUDED
|
|
|
22 |
|
|
|
23 |
#include "gsbitops.h"
|
|
|
24 |
|
|
|
25 |
/*
|
|
|
26 |
* Macros for processing bitmaps in the largest possible chunks.
|
|
|
27 |
* Bits within a byte are always stored big-endian;
|
|
|
28 |
* bytes are likewise stored in left-to-right order, i.e., big-endian.
|
|
|
29 |
* Note that this is the format used for the source of copy_mono.
|
|
|
30 |
* It used to be the case that bytes were stored in the natural
|
|
|
31 |
* platform order, and the client had force them into big-endian order
|
|
|
32 |
* by calling gdev_mem_ensure_byte_order, but this no longer necessary.
|
|
|
33 |
*
|
|
|
34 |
* Note that we use type uint for register variables holding a chunk:
|
|
|
35 |
* for this reason, the chunk size cannot be larger than uint.
|
|
|
36 |
*/
|
|
|
37 |
/* Generic macros for chunk accessing. */
|
|
|
38 |
#define cbytes(ct) size_of(ct) /* sizeof may be unsigned */
|
|
|
39 |
# define chunk_bytes cbytes(chunk)
|
|
|
40 |
/* The clog2_bytes macro assumes that ints are 2, 4, or 8 bytes in size. */
|
|
|
41 |
#define clog2_bytes(ct) (size_of(ct) == 8 ? 3 : size_of(ct)>>1)
|
|
|
42 |
# define chunk_log2_bytes clog2_bytes(chunk)
|
|
|
43 |
#define cbits(ct) (size_of(ct)*8) /* sizeof may be unsigned */
|
|
|
44 |
# define chunk_bits cbits(chunk)
|
|
|
45 |
#define clog2_bits(ct) (clog2_bytes(ct)+3)
|
|
|
46 |
# define chunk_log2_bits clog2_bits(chunk)
|
|
|
47 |
#define cbit_mask(ct) (cbits(ct)-1)
|
|
|
48 |
# define chunk_bit_mask cbit_mask(chunk)
|
|
|
49 |
#define calign_bytes(ct)\
|
|
|
50 |
(sizeof(ct) == 1 ? 1:\
|
|
|
51 |
sizeof(ct) == sizeof(short) ? arch_align_short_mod :\
|
|
|
52 |
sizeof(ct) == sizeof(int) ? arch_align_int_mod : arch_align_long_mod)
|
|
|
53 |
# define chunk_align_bytes calign_bytes(chunk)
|
|
|
54 |
#define calign_bit_mask(ct) (calign_bytes(ct)*8-1)
|
|
|
55 |
# define chunk_align_bit_mask calign_bit_mask(chunk)
|
|
|
56 |
/*
|
|
|
57 |
* The obvious definition for cmask is:
|
|
|
58 |
* #define cmask(ct) ((ct)~(ct)0)
|
|
|
59 |
* but this doesn't work on the VAX/VMS compiler, which fails to truncate
|
|
|
60 |
* the value to 16 bits when ct is ushort.
|
|
|
61 |
* Instead, we have to generate the mask with no extra 1-bits.
|
|
|
62 |
* We can't do this in the obvious way:
|
|
|
63 |
* #define cmask(ct) ((1 << (size_of(ct) * 8)) - 1)
|
|
|
64 |
* because some compilers won't allow a shift of the full type size.
|
|
|
65 |
* Instead, we have to do something really awkward:
|
|
|
66 |
*/
|
|
|
67 |
#define cmask(ct) ((ct) (((((ct)1 << (size_of(ct)*8-2)) - 1) << 2) + 3))
|
|
|
68 |
# define chunk_all_bits cmask(chunk)
|
|
|
69 |
/*
|
|
|
70 |
* The obvious definition for chi_bits is:
|
|
|
71 |
* #define chi_bits(ct,n) (cmask(ct)-(cmask(ct)>>(n)))
|
|
|
72 |
* but this doesn't work on the DEC/MIPS compilers.
|
|
|
73 |
* Instead, we have to restrict chi_bits to only working for values of n
|
|
|
74 |
* between 0 and cbits(ct)-1, and use
|
|
|
75 |
*/
|
|
|
76 |
#define chi_bits(ct,n) (ct)(~(ct)1 << (cbits(ct)-1 - (n)))
|
|
|
77 |
# define chunk_hi_bits(n) chi_bits(chunk,n)
|
|
|
78 |
|
|
|
79 |
/* Define whether this is a machine where chunks are long, */
|
|
|
80 |
/* but the machine can't shift a long by its full width. */
|
|
|
81 |
#define arch_cant_shift_full_chunk\
|
|
|
82 |
(arch_is_big_endian && !arch_ints_are_short && !arch_can_shift_full_long)
|
|
|
83 |
|
|
|
84 |
/* Pointer arithmetic macros. */
|
|
|
85 |
#define inc_ptr(ptr,delta)\
|
|
|
86 |
(ptr = (void *)((byte *)ptr + (delta)))
|
|
|
87 |
|
|
|
88 |
/* Define macros for setting up left- and right-end masks. */
|
|
|
89 |
/* These are used for monobit operations, and for filling */
|
|
|
90 |
/* with 2- and 4-bit-per-pixel patterns. */
|
|
|
91 |
|
|
|
92 |
/*
|
|
|
93 |
* Define the chunk size for monobit copying operations.
|
|
|
94 |
*/
|
|
|
95 |
#if arch_is_big_endian
|
|
|
96 |
# define mono_copy_chunk uint
|
|
|
97 |
# define set_mono_right_mask(var, w)\
|
|
|
98 |
(var = ((w) == chunk_bits ? chunk_all_bits : chunk_hi_bits(w)))
|
|
|
99 |
/*
|
|
|
100 |
* We have to split the following statement because of a bug in the Xenix C
|
|
|
101 |
* compiler (it produces a signed rather than an unsigned shift if we don't
|
|
|
102 |
* split).
|
|
|
103 |
*/
|
|
|
104 |
# define set_mono_thin_mask(var, w, bit)\
|
|
|
105 |
set_mono_right_mask(var, w), var >>= (bit)
|
|
|
106 |
/*
|
|
|
107 |
* We have to split the following statement in two because of a bug
|
|
|
108 |
* in the DEC VAX/VMS C compiler.
|
|
|
109 |
*/
|
|
|
110 |
# define set_mono_left_mask(var, bit)\
|
|
|
111 |
(var = chunk_all_bits, var >>= (bit))
|
|
|
112 |
#else
|
|
|
113 |
# define mono_copy_chunk bits16
|
|
|
114 |
extern const bits16 mono_copy_masks[17];
|
|
|
115 |
|
|
|
116 |
# if mono_fill_chunk_bytes == 2
|
|
|
117 |
# define mono_fill_masks mono_copy_masks
|
|
|
118 |
# else
|
|
|
119 |
extern const bits32 mono_fill_masks[33];
|
|
|
120 |
|
|
|
121 |
# endif
|
|
|
122 |
/*
|
|
|
123 |
* We define mono_masks as either mono_fill_masks or
|
|
|
124 |
* mono_copy_masks before using the following macros.
|
|
|
125 |
*/
|
|
|
126 |
# define set_mono_left_mask(var, bit)\
|
|
|
127 |
(var = mono_masks[bit])
|
|
|
128 |
# define set_mono_thin_mask(var, w, bit)\
|
|
|
129 |
(var = ~mono_masks[(w) + (bit)] & mono_masks[bit])
|
|
|
130 |
# define set_mono_right_mask(var, ebit)\
|
|
|
131 |
(var = ~mono_masks[ebit])
|
|
|
132 |
#endif
|
|
|
133 |
|
|
|
134 |
#endif /* gxbitops_INCLUDED */
|