Commit | Line | Data |
---|---|---|
a69ba577 WJ |
1 | /* Copyright (C) 1989, 1992 Aladdin Enterprises. All rights reserved. |
2 | Distributed by Free Software Foundation, Inc. | |
3 | ||
4 | This file is part of Ghostscript. | |
5 | ||
6 | Ghostscript is distributed in the hope that it will be useful, but | |
7 | WITHOUT ANY WARRANTY. No author or distributor accepts responsibility | |
8 | to anyone for the consequences of using it or for whether it serves any | |
9 | particular purpose or works at all, unless he says so in writing. Refer | |
10 | to the Ghostscript General Public License for full details. | |
11 | ||
12 | Everyone is granted permission to copy, modify and redistribute | |
13 | Ghostscript, but only under the conditions described in the Ghostscript | |
14 | General Public License. A copy of this license is supposed to have been | |
15 | given to you along with Ghostscript so you can know your rights and | |
16 | responsibilities. It should be in a file named COPYING. Among other | |
17 | things, the copyright notice and this notice must be preserved on all | |
18 | copies. */ | |
19 | ||
20 | /* gdevmem1.c */ | |
21 | /* Generic and monobit "memory" (stored bitmap) device */ | |
22 | /* for Ghostscript library. */ | |
23 | #include "memory_.h" | |
24 | #include "gs.h" | |
25 | #include "gxdevice.h" | |
26 | #include "gxdevmem.h" /* semi-public definitions */ | |
27 | #include "gdevmem.h" /* private definitions */ | |
28 | ||
29 | /* Define the chunk size for monobit operations. */ | |
30 | #if arch_is_big_endian | |
31 | # define mono_chunk uint | |
32 | #else | |
33 | # define mono_chunk ushort | |
34 | #endif | |
35 | ||
36 | /* ------ Generic code ------ */ | |
37 | ||
38 | /* Return the appropriate memory device for a given */ | |
39 | /* number of bits per pixel (0 if none suitable). */ | |
40 | gx_device_memory * | |
41 | gdev_mem_device_for_bits(int bits_per_pixel) | |
42 | { switch ( bits_per_pixel ) | |
43 | { | |
44 | case 1: return &mem_mono_device; | |
45 | case 2: return &mem_mapped2_color_device; | |
46 | case 4: return &mem_mapped4_color_device; | |
47 | case 8: return &mem_mapped8_color_device; | |
48 | case 16: return &mem_true16_color_device; | |
49 | case 24: return &mem_true24_color_device; | |
50 | case 32: return &mem_true32_color_device; | |
51 | default: return 0; | |
52 | } | |
53 | } | |
54 | ||
55 | /* Compute the size of the bitmap storage, */ | |
56 | /* including the space for the scan line pointer table. */ | |
57 | /* Note that scan lines are padded to a multiple of 4 bytes. */ | |
58 | ulong | |
59 | gdev_mem_bitmap_size(gx_device_memory *dev) | |
60 | { unsigned raster = | |
61 | ((dev->width * dev->color_info.depth + 31) >> 5) << 2; | |
62 | mdev->raster = raster; | |
63 | return (ulong)dev->height * (raster + sizeof(byte *)); | |
64 | } | |
65 | ||
66 | /* 'Open' the memory device and create the scan line table. */ | |
67 | int | |
68 | mem_open(gx_device *dev) | |
69 | { byte *scan_line = mdev->base; | |
70 | uint raster = mdev->raster; | |
71 | byte **pptr = (byte **)(scan_line + (uint)dev->height * raster); | |
72 | byte **pend = pptr + dev->height; | |
73 | mdev->line_ptrs = pptr; | |
74 | while ( pptr < pend ) | |
75 | { *pptr++ = scan_line; | |
76 | scan_line += raster; | |
77 | } | |
78 | mdev->bytes_le = | |
79 | #if arch_is_big_endian | |
80 | 0 | |
81 | #else | |
82 | /* NOTE: mem_mono_get_bits relies on the fact that */ | |
83 | /* sizeof(mono_chunk) == 2! */ | |
84 | (mdev->color_info.depth < 8 ? sizeof(mono_chunk) : 0); | |
85 | #endif | |
86 | ; | |
87 | return 0; | |
88 | } | |
89 | ||
90 | /* Return the initial transformation matrix */ | |
91 | void | |
92 | mem_get_initial_matrix(gx_device *dev, gs_matrix *pmat) | |
93 | { *pmat = mdev->initial_matrix; | |
94 | } | |
95 | ||
96 | /* Test whether a device is a memory device */ | |
97 | int | |
98 | gs_device_is_memory(const gx_device *dev) | |
99 | { /* We can't just compare the procs, or even an individual proc, */ | |
100 | /* because we might be tracing. Compare the device name, */ | |
101 | /* and hope for the best. */ | |
102 | const char *name = dev->dname; | |
103 | int i; | |
104 | for ( i = 0; i < 6; i++ ) | |
105 | if ( name[i] != "image("[i] ) return 0; | |
106 | return 1; | |
107 | } | |
108 | ||
109 | /* Ensure that the data bytes are in big-endian order. */ | |
110 | /* This is never called on big-endian platforms; */ | |
111 | /* on little-endian platforms, the chunk size is ushort, */ | |
112 | /* regardless of the size of an int. */ | |
113 | void | |
114 | gdev_mem_ensure_byte_order(gx_device_memory *dev) | |
115 | { | |
116 | #if !arch_is_big_endian | |
117 | if ( !dev->bytes_le ) return; /* already in order */ | |
118 | memswab(dev->base, dev->base, dev->raster * dev->height); | |
119 | dev->bytes_le = 0; | |
120 | #endif | |
121 | } | |
122 | ||
123 | /* Copy one or more scan lines to a client. */ | |
124 | #undef chunk | |
125 | #define chunk byte | |
126 | int | |
127 | mem_get_bits(gx_device *dev, int y, byte *str, uint size, int pad_to_word) | |
128 | { uint bytes_per_line = | |
129 | gx_device_bytes_per_scan_line(dev, pad_to_word); | |
130 | byte *src = scan_line_base(mdev, y); | |
131 | byte *dest = str; | |
132 | uint count = min(size / bytes_per_line, dev->height - y); | |
133 | int swap = mdev->bytes_le; | |
134 | if ( !mdev->raster ) /* compute it now */ | |
135 | (void)gdev_mem_bitmap_size(mdev); | |
136 | if ( bytes_per_line == mdev->raster ) | |
137 | { if ( swap && pad_to_word >= 0 ) | |
138 | memswab(src, dest, bytes_per_line * count); | |
139 | else | |
140 | memcpy(dest, src, bytes_per_line * count); | |
141 | } | |
142 | else /* know pad_to_word == 0 */ | |
143 | { uint c; | |
144 | for ( c = count; c-- != 0; ) | |
145 | { if ( swap ) | |
146 | { /* We have to take extra care if */ | |
147 | /* bytes_per_line is odd. */ | |
148 | if ( bytes_per_line & 1 ) | |
149 | { memswab(src, dest, bytes_per_line - 1); | |
150 | dest[bytes_per_line - 1] = | |
151 | src[bytes_per_line]; | |
152 | } | |
153 | else | |
154 | memswab(src, dest, bytes_per_line); | |
155 | } | |
156 | else | |
157 | memcpy(dest, src, bytes_per_line); | |
158 | src += mdev->raster; | |
159 | dest += bytes_per_line; | |
160 | } | |
161 | } | |
162 | return (swap && pad_to_word < 0 ? swap : 0); | |
163 | } | |
164 | ||
165 | /* ------ Monochrome ------ */ | |
166 | ||
167 | /* Procedures */ | |
168 | private dev_proc_copy_mono(mem_mono_copy_mono); | |
169 | private dev_proc_fill_rectangle(mem_mono_fill_rectangle); | |
170 | ||
171 | /* The device descriptor. */ | |
172 | private gx_device_procs mem_mono_procs = | |
173 | mem_procs(gx_default_map_rgb_color, gx_default_map_color_rgb, | |
174 | mem_mono_copy_mono, gx_default_copy_color, mem_mono_fill_rectangle); | |
175 | ||
176 | /* The instance is public. */ | |
177 | gx_device_memory mem_mono_device = | |
178 | mem_device("image(mono)", 1, mem_mono_procs); | |
179 | ||
180 | /* Convert x coordinate to byte offset in scan line. */ | |
181 | #define x_to_byte(x) ((x) >> 3) | |
182 | ||
183 | /* Fill a rectangle with a color. */ | |
184 | #undef chunk | |
185 | #define chunk mono_chunk | |
186 | private int | |
187 | mem_mono_fill_rectangle(gx_device *dev, int x, int y, int w, int h, | |
188 | gx_color_index color) | |
189 | { uint bit; | |
190 | chunk right_mask; | |
191 | byte fill; | |
192 | declare_scan_ptr(dest); | |
193 | check_rect(); | |
194 | setup_rect(dest); | |
195 | #define write_loop(stat)\ | |
196 | { int line_count = h;\ | |
197 | chunk *ptr = dest;\ | |
198 | do { stat; inc_chunk_ptr(ptr, draster); }\ | |
199 | while ( --line_count );\ | |
200 | } | |
201 | #define write_partial(msk)\ | |
202 | if ( fill ) write_loop(*ptr |= msk)\ | |
203 | else write_loop(*ptr &= ~msk) | |
204 | switch ( color ) | |
205 | { | |
206 | case 0: fill = mdev->invert; break; | |
207 | case 1: fill = ~mdev->invert; break; | |
208 | case gx_no_color_index: return 0; /* transparent */ | |
209 | default: return -1; /* invalid */ | |
210 | } | |
211 | bit = x & chunk_bit_mask; | |
212 | if ( bit + w <= chunk_bits ) | |
213 | { /* Only one word. */ | |
214 | right_mask = | |
215 | (w == chunk_bits ? chunk_all_bits : chunk_hi_bits(w)) | |
216 | >> bit; | |
217 | } | |
218 | else | |
219 | { int byte_count; | |
220 | if ( bit ) | |
221 | { /* We have to split the following statement */ | |
222 | /* into two because of a bug in the DEC */ | |
223 | /* VAX/VMS C compiler. */ | |
224 | chunk mask = chunk_all_bits; | |
225 | mask >>= bit; | |
226 | write_partial(mask); | |
227 | dest++; | |
228 | w += bit - chunk_bits; | |
229 | } | |
230 | right_mask = chunk_hi_bits(w & chunk_bit_mask); | |
231 | if ( (byte_count = (w >> 3) & -chunk_bytes) != 0 ) | |
232 | { write_loop(memset(ptr, fill, byte_count)); | |
233 | inc_chunk_ptr(dest, byte_count); | |
234 | } | |
235 | } | |
236 | if ( right_mask ) | |
237 | write_partial(right_mask); | |
238 | return 0; | |
239 | } | |
240 | ||
241 | /* Copy a monochrome bitmap. */ | |
242 | ||
243 | /* Fetch a chunk from the source. */ | |
244 | /* Note that the source data are always stored big-endian. */ | |
245 | /* Note also that the macros always cast cptr, */ | |
246 | /* so it doesn't matter what the type of cptr is. */ | |
247 | #undef chunk | |
248 | #if arch_is_big_endian | |
249 | # define chunk uint | |
250 | # define cfetch(cptr) (*(chunk *)(cptr)) | |
251 | #else | |
252 | # define chunk ushort | |
253 | # define cfetch(cptr) (((chunk)*(byte *)(cptr) << 8) + ((byte *)(cptr))[1]) | |
254 | #endif | |
255 | /* Fetch a chunk that straddles a chunk boundary. */ | |
256 | /*** | |
257 | #if arch_is_big_endian | |
258 | ***/ | |
259 | # define cfetch2(cptr, cskew, skew)\ | |
260 | ((cfetch(cptr) << cskew) + (cfetch((chunk *)(cptr) + 1) >> skew)) | |
261 | /*** | |
262 | #else | |
263 | # define cfetch2(cptr, cskew, skew)\ | |
264 | (cskew <= 8 ?\ | |
265 | (cfetch(cptr) << cskew) + (((byte *)(cptr))[2] >> (skew - 8)) :\ | |
266 | (((byte *)(cptr))[1] << cskew) + (cfetch((chunk *)(cptr) + 1) >> skew)) | |
267 | #endif | |
268 | ***/ | |
269 | ||
270 | /* copy_function and copy_shift get added together for dispatch */ | |
271 | typedef enum { | |
272 | copy_or = 0, copy_store, copy_and, copy_funny | |
273 | } copy_function; | |
274 | typedef enum { | |
275 | copy_right = 0, copy_left = 4 | |
276 | } copy_shift; | |
277 | typedef struct { | |
278 | short invert; | |
279 | ushort op; /* copy_function */ | |
280 | } copy_mode; | |
281 | /* Map from <c0,c1,invert> to copy_mode. */ | |
282 | #define cm(i,op) { i, (ushort)op } | |
283 | private copy_mode copy_modes[9*2] = { | |
284 | cm(-1, copy_funny), /* NN */ | |
285 | cm(-1, copy_and), /* N0 */ | |
286 | cm(0, copy_or), /* N1 */ | |
287 | cm(0, copy_and), /* 0N */ | |
288 | cm(0, copy_funny), /* 00 */ | |
289 | cm(0, copy_store), /* 01 */ | |
290 | cm(-1, copy_or), /* 1N */ | |
291 | cm(-1, copy_store), /* 10 */ | |
292 | cm(0, copy_funny), /* 11 */ | |
293 | cm(-1, copy_funny), /* NNi */ | |
294 | cm(0, copy_or), /* N1i */ | |
295 | cm(-1, copy_and), /* N0i */ | |
296 | cm(-1, copy_or), /* 1Ni */ | |
297 | cm(0, copy_funny), /* 11i */ | |
298 | cm(-1, copy_store), /* 10i */ | |
299 | cm(0, copy_and), /* 0Ni */ | |
300 | cm(0, copy_store), /* 01i */ | |
301 | cm(0, copy_funny) /* 00i */ | |
302 | }; | |
303 | private int | |
304 | mem_mono_copy_mono(gx_device *dev, | |
305 | byte *base, int sourcex, int sraster, gx_bitmap_id id, | |
306 | int x, int y, int w, int h, gx_color_index zero, gx_color_index one) | |
307 | { register byte *bptr; /* actually chunk * */ | |
308 | int dbit, wleft; | |
309 | uint mask; | |
310 | copy_mode mode; | |
311 | #define function (copy_function)(mode.op) | |
312 | declare_scan_ptr_as(dbptr, byte *); | |
313 | #define optr ((chunk *)dbptr) | |
314 | register int skew; | |
315 | register uint invert; | |
316 | check_rect(); | |
317 | #if gx_no_color_value != -1 /* hokey! */ | |
318 | if ( zero == gx_no_color_index ) zero = -1; | |
319 | if ( one == gx_no_color_index ) one = -1; | |
320 | #endif | |
321 | #define izero (int)zero | |
322 | #define ione (int)one | |
323 | mode = | |
324 | copy_modes[(mdev->invert & 9) + izero + izero + izero + ione + 4]; | |
325 | #undef izero | |
326 | #undef ione | |
327 | invert = (uint)(int)mode.invert; /* load register */ | |
328 | setup_rect_as(dbptr, byte *); | |
329 | bptr = base + ((sourcex & ~chunk_bit_mask) >> 3); | |
330 | dbit = x & chunk_bit_mask; | |
331 | skew = dbit - (sourcex & chunk_bit_mask); | |
332 | /* We have to split the following statement */ | |
333 | /* into two because of a bug in the DEC */ | |
334 | /* VAX/VMS C compiler. */ | |
335 | mask = chunk_all_bits; | |
336 | mask >>= dbit; | |
337 | /* Macros for writing partial chunks. */ | |
338 | /* The destination pointer is always named optr, */ | |
339 | /* and must be declared as chunk *. */ | |
340 | /* cinvert may be temporarily redefined. */ | |
341 | #define cinvert(bits) ((bits) ^ invert) | |
342 | #define write_or_masked(bits, mask, off)\ | |
343 | optr[off] |= (cinvert(bits) & mask) | |
344 | #define write_store_masked(bits, mask, off)\ | |
345 | optr[off] = ((optr[off] & ~mask) | (cinvert(bits) & mask)) | |
346 | #define write_and_masked(bits, mask, off)\ | |
347 | optr[off] &= (cinvert(bits) | ~mask) | |
348 | /* Macros for writing full chunks. */ | |
349 | #define write_or(bits) *optr |= cinvert(bits) | |
350 | #define write_store(bits) *optr = cinvert(bits) | |
351 | #define write_and(bits) *optr &= cinvert(bits) | |
352 | /* Macro for incrementing to next chunk. */ | |
353 | #define next_x_chunk\ | |
354 | bptr += chunk_bytes; dbptr += chunk_bytes | |
355 | /* Common macro for the end of each scan line. */ | |
356 | #define end_y_loop(sdelta, ddelta)\ | |
357 | if ( --h == 0 ) break;\ | |
358 | bptr += sdelta; dbptr += ddelta | |
359 | if ( (wleft = w + dbit - chunk_bits) <= 0 ) | |
360 | { /* The entire operation fits in one (destination) chunk. */ | |
361 | /* Some machines can't handle w == chunk_bits! */ | |
362 | #if arch_cant_shift_full_chunk | |
363 | if ( w != chunk_bits ) | |
364 | #endif | |
365 | mask -= mask >> w; | |
366 | #define write_single(wr_op, src)\ | |
367 | for ( ; ; )\ | |
368 | { wr_op(src, mask, 0);\ | |
369 | end_y_loop(sraster, draster);\ | |
370 | } | |
371 | #define write1_loop(src)\ | |
372 | switch ( function ) {\ | |
373 | case copy_or: write_single(write_or_masked, src); break;\ | |
374 | case copy_store: write_single(write_store_masked, src); break;\ | |
375 | case copy_and: write_single(write_and_masked, src); break;\ | |
376 | default: goto funny;\ | |
377 | } | |
378 | if ( skew >= 0 ) /* single -> single, right/no shift */ | |
379 | { write1_loop(cfetch(bptr) >> skew); | |
380 | } | |
381 | else if ( wleft <= skew ) /* single -> single, left shift */ | |
382 | { skew = -skew; | |
383 | write1_loop(cfetch(bptr) << skew); | |
384 | } | |
385 | else /* double -> single */ | |
386 | { int cskew = -skew; | |
387 | skew += chunk_bits; | |
388 | write1_loop(cfetch2(bptr, cskew, skew)); | |
389 | } | |
390 | #undef write1_loop | |
391 | #undef write_single | |
392 | } | |
393 | else if ( wleft <= skew ) | |
394 | { /* 1 source chunk -> 2 destination chunks. */ | |
395 | /* This is an important special case for */ | |
396 | /* both characters and halftone tiles. */ | |
397 | register uint bits; | |
398 | uint rmask = chunk_hi_bits(wleft); | |
399 | int cskew = chunk_bits - skew; | |
400 | #define write_1to2(wr_op)\ | |
401 | for ( ; ; )\ | |
402 | { bits = cfetch(bptr) ^ invert;\ | |
403 | wr_op(bits >> skew, mask, 0);\ | |
404 | wr_op(bits << cskew, rmask, 1);\ | |
405 | end_y_loop(sraster, draster);\ | |
406 | } | |
407 | #undef cinvert | |
408 | #define cinvert(bits) (bits) /* pre-inverted here */ | |
409 | switch ( function ) | |
410 | { | |
411 | case copy_or: write_1to2(write_or_masked); break; | |
412 | case copy_store: write_1to2(write_store_masked); break; | |
413 | case copy_and: write_1to2(write_and_masked); break; | |
414 | default: goto funny; | |
415 | } | |
416 | #undef cinvert | |
417 | #define cinvert(bits) ((bits) ^ invert) | |
418 | #undef write_1to2 | |
419 | } | |
420 | else | |
421 | { /* More than one source chunk and more than one */ | |
422 | /* destination chunk are involved. */ | |
423 | uint rmask = chunk_hi_bits(wleft & chunk_bit_mask); | |
424 | int words = (wleft & ~chunk_bit_mask) >> 3; | |
425 | uint sskip = sraster - words; | |
426 | uint dskip = draster - words; | |
427 | register uint bits; | |
428 | if ( skew == 0 ) /* optimize the aligned case */ | |
429 | { | |
430 | #define write_aligned(wr_op, wr_op_masked)\ | |
431 | for ( ; ; )\ | |
432 | { int count = wleft;\ | |
433 | /* Do first partial chunk. */\ | |
434 | wr_op_masked(cfetch(bptr), mask, 0);\ | |
435 | /* Do full chunks. */\ | |
436 | while ( (count -= chunk_bits) >= 0 )\ | |
437 | { next_x_chunk; wr_op(cfetch(bptr)); }\ | |
438 | /* Do last chunk */\ | |
439 | if ( count > -chunk_bits )\ | |
440 | { wr_op_masked(cfetch(bptr + chunk_bytes), rmask, 1); }\ | |
441 | end_y_loop(sskip, dskip);\ | |
442 | } | |
443 | switch ( function ) | |
444 | { | |
445 | case copy_or: | |
446 | write_aligned(write_or, write_or_masked); | |
447 | break; | |
448 | case copy_store: | |
449 | write_aligned(write_store, write_store_masked); | |
450 | break; | |
451 | case copy_and: | |
452 | write_aligned(write_and, write_and_masked); | |
453 | break; | |
454 | default: | |
455 | goto funny; | |
456 | } | |
457 | #undef write_aligned | |
458 | } | |
459 | else /* not aligned */ | |
460 | { int ccase = | |
461 | (skew >= 0 ? copy_right : | |
462 | ((bptr += chunk_bytes), copy_left)) + function; | |
463 | int cskew = -skew & chunk_bit_mask; | |
464 | skew &= chunk_bit_mask; | |
465 | for ( ; ; ) | |
466 | { int count = wleft; | |
467 | #define prefetch_right\ | |
468 | bits = cfetch(bptr) >> skew | |
469 | #define prefetch_left\ | |
470 | bits = cfetch2(bptr - chunk_bytes, cskew, skew) | |
471 | #define write_unaligned(wr_op, wr_op_masked)\ | |
472 | wr_op_masked(bits, mask, 0);\ | |
473 | /* Do full chunks. */\ | |
474 | while ( count >= chunk_bits )\ | |
475 | { bits = cfetch2(bptr, cskew, skew);\ | |
476 | next_x_chunk; wr_op(bits); count -= chunk_bits;\ | |
477 | }\ | |
478 | /* Do last chunk */\ | |
479 | if ( count > 0 )\ | |
480 | { bits = cfetch(bptr) << cskew;\ | |
481 | if ( count > skew ) bits += cfetch(bptr + chunk_bytes) >> skew;\ | |
482 | wr_op_masked(bits, rmask, 1);\ | |
483 | } | |
484 | switch ( ccase ) | |
485 | { | |
486 | case copy_or + copy_left: | |
487 | prefetch_left; goto uor; | |
488 | case copy_or + copy_right: | |
489 | prefetch_right; | |
490 | uor: write_unaligned(write_or, write_or_masked); | |
491 | break; | |
492 | case copy_store + copy_left: | |
493 | prefetch_left; goto ustore; | |
494 | case copy_store + copy_right: | |
495 | prefetch_right; | |
496 | ustore: write_unaligned(write_store, write_store_masked); | |
497 | break; | |
498 | case copy_and + copy_left: | |
499 | prefetch_left; goto uand; | |
500 | case copy_and + copy_right: | |
501 | prefetch_right; | |
502 | uand: write_unaligned(write_and, write_and_masked); | |
503 | break; | |
504 | default: | |
505 | goto funny; | |
506 | } | |
507 | end_y_loop(sskip, dskip); | |
508 | #undef write_unaligned | |
509 | #undef prefetch_left | |
510 | #undef prefetch_right | |
511 | } | |
512 | } | |
513 | } | |
514 | #undef end_y_loop | |
515 | #undef next_x_chunk | |
516 | return 0; | |
517 | /* Handle the funny cases that aren't supposed to happen. */ | |
518 | funny: return (invert ? -1 : mem_mono_fill_rectangle(dev, x, y, w, h, zero)); | |
519 | #undef optr | |
520 | } |