Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
6725 siemargl 1
/*
2
  Copyright (c) 1990-2008 Info-ZIP.  All rights reserved.
3
 
4
  See the accompanying file LICENSE, version 2007-Mar-04 or later
5
  (the contents of which are also included in unzip.h) for terms of use.
6
  If, for some reason, all these files are missing, the Info-ZIP license
7
  also may be found at:  ftp://ftp.info-zip.org/pub/infozip/license.html
8
*/
9
/* inflate.c -- by Mark Adler
10
   version c17e, 30 Mar 2007 */
11
 
12
 
13
/* Copyright history:
14
   - Starting with UnZip 5.41 of 16-April-2000, this source file
15
     is covered by the Info-Zip LICENSE cited above.
16
   - Prior versions of this source file, found in UnZip source packages
17
     up to UnZip 5.40, were put in the public domain.
18
     The original copyright note by Mark Adler was:
19
         "You can do whatever you like with this source file,
20
         though I would prefer that if you modify it and
21
         redistribute it that you include comments to that effect
22
         with your name and the date.  Thank you."
23
 
24
   History:
25
   vers    date          who           what
26
   ----  ---------  --------------  ------------------------------------
27
    a    ~~ Feb 92  M. Adler        used full (large, one-step) lookup table
28
    b1   21 Mar 92  M. Adler        first version with partial lookup tables
29
    b2   21 Mar 92  M. Adler        fixed bug in fixed-code blocks
30
    b3   22 Mar 92  M. Adler        sped up match copies, cleaned up some
31
    b4   25 Mar 92  M. Adler        added prototypes; removed window[] (now
32
                                    is the responsibility of unzip.h--also
33
                                    changed name to slide[]), so needs diffs
34
                                    for unzip.c and unzip.h (this allows
35
                                    compiling in the small model on MSDOS);
36
                                    fixed cast of q in huft_build();
37
    b5   26 Mar 92  M. Adler        got rid of unintended macro recursion.
38
    b6   27 Mar 92  M. Adler        got rid of nextbyte() routine.  fixed
39
                                    bug in inflate_fixed().
40
    c1   30 Mar 92  M. Adler        removed lbits, dbits environment variables.
41
                                    changed BMAX to 16 for explode.  Removed
42
                                    OUTB usage, and replaced it with flush()--
43
                                    this was a 20% speed improvement!  Added
44
                                    an explode.c (to replace unimplod.c) that
45
                                    uses the huft routines here.  Removed
46
                                    register union.
47
    c2    4 Apr 92  M. Adler        fixed bug for file sizes a multiple of 32k.
48
    c3   10 Apr 92  M. Adler        reduced memory of code tables made by
49
                                    huft_build significantly (factor of two to
50
                                    three).
51
    c4   15 Apr 92  M. Adler        added NOMEMCPY do kill use of memcpy().
52
                                    worked around a Turbo C optimization bug.
53
    c5   21 Apr 92  M. Adler        added the WSIZE #define to allow reducing
54
                                    the 32K window size for specialized
55
                                    applications.
56
    c6   31 May 92  M. Adler        added some typecasts to eliminate warnings
57
    c7   27 Jun 92  G. Roelofs      added some more typecasts (444:  MSC bug).
58
    c8    5 Oct 92  J-l. Gailly     added ifdef'd code to deal with PKZIP bug.
59
    c9    9 Oct 92  M. Adler        removed a memory error message (~line 416).
60
    c10  17 Oct 92  G. Roelofs      changed ULONG/UWORD/byte to ulg/ush/uch,
61
                                    removed old inflate, renamed inflate_entry
62
                                    to inflate, added Mark's fix to a comment.
63
   c10.5 14 Dec 92  M. Adler        fix up error messages for incomplete trees.
64
    c11   2 Jan 93  M. Adler        fixed bug in detection of incomplete
65
                                    tables, and removed assumption that EOB is
66
                                    the longest code (bad assumption).
67
    c12   3 Jan 93  M. Adler        make tables for fixed blocks only once.
68
    c13   5 Jan 93  M. Adler        allow all zero length codes (pkzip 2.04c
69
                                    outputs one zero length code for an empty
70
                                    distance tree).
71
    c14  12 Mar 93  M. Adler        made inflate.c standalone with the
72
                                    introduction of inflate.h.
73
   c14b  16 Jul 93  G. Roelofs      added (unsigned) typecast to w at 470.
74
   c14c  19 Jul 93  J. Bush         changed v[N_MAX], l[288], ll[28x+3x] arrays
75
                                    to static for Amiga.
76
   c14d  13 Aug 93  J-l. Gailly     de-complicatified Mark's c[*p++]++ thing.
77
   c14e   8 Oct 93  G. Roelofs      changed memset() to memzero().
78
   c14f  22 Oct 93  G. Roelofs      renamed quietflg to qflag; made Trace()
79
                                    conditional; added inflate_free().
80
   c14g  28 Oct 93  G. Roelofs      changed l/(lx+1) macro to pointer (Cray bug)
81
   c14h   7 Dec 93  C. Ghisler      huft_build() optimizations.
82
   c14i   9 Jan 94  A. Verheijen    set fixed_t{d,l} to NULL after freeing;
83
                    G. Roelofs      check NEXTBYTE macro for EOF.
84
   c14j  23 Jan 94  G. Roelofs      removed Ghisler "optimizations"; ifdef'd
85
                                    EOF check.
86
   c14k  27 Feb 94  G. Roelofs      added some typecasts to avoid warnings.
87
   c14l   9 Apr 94  G. Roelofs      fixed split comments on preprocessor lines
88
                                    to avoid bug in Encore compiler.
89
   c14m   7 Jul 94  P. Kienitz      modified to allow assembler version of
90
                                    inflate_codes() (define ASM_INFLATECODES)
91
   c14n  22 Jul 94  G. Roelofs      changed fprintf to macro for DLL versions
92
   c14o  23 Aug 94  C. Spieler      added a newline to a debug statement;
93
                    G. Roelofs      added another typecast to avoid MSC warning
94
   c14p   4 Oct 94  G. Roelofs      added (voidp *) cast to free() argument
95
   c14q  30 Oct 94  G. Roelofs      changed fprintf macro to MESSAGE()
96
   c14r   1 Nov 94  G. Roelofs      fixed possible redefinition of CHECK_EOF
97
   c14s   7 May 95  S. Maxwell      OS/2 DLL globals stuff incorporated;
98
                    P. Kienitz      "fixed" ASM_INFLATECODES macro/prototype
99
   c14t  18 Aug 95  G. Roelofs      added UZinflate() to use zlib functions;
100
                                    changed voidp to zvoid; moved huft_build()
101
                                    and huft_free() to end of file
102
   c14u   1 Oct 95  G. Roelofs      moved G into definition of MESSAGE macro
103
   c14v   8 Nov 95  P. Kienitz      changed ASM_INFLATECODES to use a regular
104
                                    call with __G__ instead of a macro
105
    c15   3 Aug 96  M. Adler        fixed bomb-bug on random input data (Adobe)
106
   c15b  24 Aug 96  M. Adler        more fixes for random input data
107
   c15c  28 Mar 97  G. Roelofs      changed USE_ZLIB fatal exit code from
108
                                    PK_MEM2 to PK_MEM3
109
    c16  20 Apr 97  J. Altman       added memzero(v[]) in huft_build()
110
   c16b  29 Mar 98  C. Spieler      modified DLL code for slide redirection
111
   c16c  04 Apr 99  C. Spieler      fixed memory leaks when processing gets
112
                                    stopped because of input data errors
113
   c16d  05 Jul 99  C. Spieler      take care of FLUSH() return values and
114
                                    stop processing in case of errors
115
    c17  31 Dec 00  C. Spieler      added preliminary support for Deflate64
116
   c17a  04 Feb 01  C. Spieler      complete integration of Deflate64 support
117
   c17b  16 Feb 02  C. Spieler      changed type of "extra bits" arrays and
118
                                    corresponding huft_build() parameter e from
119
                                    ush into uch, to save space
120
   c17c   9 Mar 02  C. Spieler      fixed NEEDBITS() "read beyond EOF" problem
121
                                    with CHECK_EOF enabled
122
   c17d  23 Jul 05  C. Spieler      fixed memory leaks in inflate_dynamic()
123
                                    when processing invalid compressed literal/
124
                                    distance table data
125
   c17e  30 Mar 07  C. Spieler      in inflate_dynamic(), initialize tl and td
126
                                    to prevent freeing unallocated huft tables
127
                                    when processing invalid compressed data and
128
                                    hitting premature EOF, do not reuse td as
129
                                    temp work ptr during tables decoding
130
 */
131
 
132
 
133
/*
134
   Inflate deflated (PKZIP's method 8 compressed) data.  The compression
135
   method searches for as much of the current string of bytes (up to a
136
   length of 258) in the previous 32K bytes.  If it doesn't find any
137
   matches (of at least length 3), it codes the next byte.  Otherwise, it
138
   codes the length of the matched string and its distance backwards from
139
   the current position.  There is a single Huffman code that codes both
140
   single bytes (called "literals") and match lengths.  A second Huffman
141
   code codes the distance information, which follows a length code.  Each
142
   length or distance code actually represents a base value and a number
143
   of "extra" (sometimes zero) bits to get to add to the base value.  At
144
   the end of each deflated block is a special end-of-block (EOB) literal/
145
   length code.  The decoding process is basically: get a literal/length
146
   code; if EOB then done; if a literal, emit the decoded byte; if a
147
   length then get the distance and emit the referred-to bytes from the
148
   sliding window of previously emitted data.
149
 
150
   There are (currently) three kinds of inflate blocks: stored, fixed, and
151
   dynamic.  The compressor outputs a chunk of data at a time and decides
152
   which method to use on a chunk-by-chunk basis.  A chunk might typically
153
   be 32K to 64K, uncompressed.  If the chunk is uncompressible, then the
154
   "stored" method is used.  In this case, the bytes are simply stored as
155
   is, eight bits per byte, with none of the above coding.  The bytes are
156
   preceded by a count, since there is no longer an EOB code.
157
 
158
   If the data are compressible, then either the fixed or dynamic methods
159
   are used.  In the dynamic method, the compressed data are preceded by
160
   an encoding of the literal/length and distance Huffman codes that are
161
   to be used to decode this block.  The representation is itself Huffman
162
   coded, and so is preceded by a description of that code.  These code
163
   descriptions take up a little space, and so for small blocks, there is
164
   a predefined set of codes, called the fixed codes.  The fixed method is
165
   used if the block ends up smaller that way (usually for quite small
166
   chunks); otherwise the dynamic method is used.  In the latter case, the
167
   codes are customized to the probabilities in the current block and so
168
   can code it much better than the pre-determined fixed codes can.
169
 
170
   The Huffman codes themselves are decoded using a multi-level table
171
   lookup, in order to maximize the speed of decoding plus the speed of
172
   building the decoding tables.  See the comments below that precede the
173
   lbits and dbits tuning parameters.
174
 
175
   GRR:  return values(?)
176
 
177
           1  incomplete table
178
           2  bad input
179
           3  not enough memory
180
         the following return codes are passed through from FLUSH() errors
181
           50 (PK_DISK)   "overflow of output space"
182
           80 (IZ_CTRLC)  "canceled by user's request"
183
 */
184
 
185
 
186
/*
187
   Notes beyond the 1.93a appnote.txt:
188
 
189
   1. Distance pointers never point before the beginning of the output
190
      stream.
191
   2. Distance pointers can point back across blocks, up to 32k away.
192
   3. There is an implied maximum of 7 bits for the bit length table and
193
      15 bits for the actual data.
194
   4. If only one code exists, then it is encoded using one bit.  (Zero
195
      would be more efficient, but perhaps a little confusing.)  If two
196
      codes exist, they are coded using one bit each (0 and 1).
197
   5. There is no way of sending zero distance codes--a dummy must be
198
      sent if there are none.  (History: a pre 2.0 version of PKZIP would
199
      store blocks with no distance codes, but this was discovered to be
200
      too harsh a criterion.)  Valid only for 1.93a.  2.04c does allow
201
      zero distance codes, which is sent as one code of zero bits in
202
      length.
203
   6. There are up to 286 literal/length codes.  Code 256 represents the
204
      end-of-block.  Note however that the static length tree defines
205
      288 codes just to fill out the Huffman codes.  Codes 286 and 287
206
      cannot be used though, since there is no length base or extra bits
207
      defined for them.  Similarily, there are up to 30 distance codes.
208
      However, static trees define 32 codes (all 5 bits) to fill out the
209
      Huffman codes, but the last two had better not show up in the data.
210
   7. Unzip can check dynamic Huffman blocks for complete code sets.
211
      The exception is that a single code would not be complete (see #4).
212
   8. The five bits following the block type is really the number of
213
      literal codes sent minus 257.
214
   9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
215
      (1+6+6).  Therefore, to output three times the length, you output
216
      three codes (1+1+1), whereas to output four times the same length,
217
      you only need two codes (1+3).  Hmm.
218
  10. In the tree reconstruction algorithm, Code = Code + Increment
219
      only if BitLength(i) is not zero.  (Pretty obvious.)
220
  11. Correction: 4 Bits: # of Bit Length codes - 4     (4 - 19)
221
  12. Note: length code 284 can represent 227-258, but length code 285
222
      really is 258.  The last length deserves its own, short code
223
      since it gets used a lot in very redundant files.  The length
224
      258 is special since 258 - 3 (the min match length) is 255.
225
  13. The literal/length and distance code bit lengths are read as a
226
      single stream of lengths.  It is possible (and advantageous) for
227
      a repeat code (16, 17, or 18) to go across the boundary between
228
      the two sets of lengths.
229
  14. The Deflate64 (PKZIP method 9) variant of the compression algorithm
230
      differs from "classic" deflate in the following 3 aspect:
231
      a) The size of the sliding history window is expanded to 64 kByte.
232
      b) The previously unused distance codes #30 and #31 code distances
233
         from 32769 to 49152 and 49153 to 65536.  Both codes take 14 bits
234
         of extra data to determine the exact position in their 16 kByte
235
         range.
236
      c) The last lit/length code #285 gets a different meaning. Instead
237
         of coding a fixed maximum match length of 258, it is used as a
238
         "generic" match length code, capable of coding any length from
239
         3 (min match length + 0) to 65538 (min match length + 65535).
240
         This means that the length code #285 takes 16 bits (!) of uncoded
241
         extra data, added to a fixed min length of 3.
242
      Changes a) and b) would have been transparent for valid deflated
243
      data, but change c) requires to switch decoder configurations between
244
      Deflate and Deflate64 modes.
245
 */
246
 
247
 
248
#define PKZIP_BUG_WORKAROUND    /* PKZIP 1.93a problem--live with it */
249
 
250
/*
251
    inflate.h must supply the uch slide[WSIZE] array, the zvoid typedef
252
    (void if (void *) is accepted, else char) and the NEXTBYTE,
253
    FLUSH() and memzero macros.  If the window size is not 32K, it
254
    should also define WSIZE.  If INFMOD is defined, it can include
255
    compiled functions to support the NEXTBYTE and/or FLUSH() macros.
256
    There are defaults for NEXTBYTE and FLUSH() below for use as
257
    examples of what those functions need to do.  Normally, you would
258
    also want FLUSH() to compute a crc on the data.  inflate.h also
259
    needs to provide these typedefs:
260
 
261
        typedef unsigned char uch;
262
        typedef unsigned short ush;
263
        typedef unsigned long ulg;
264
 
265
    This module uses the external functions malloc() and free() (and
266
    probably memset() or bzero() in the memzero() macro).  Their
267
    prototypes are normally found in  and .
268
 */
269
 
270
#define __INFLATE_C     /* identifies this source module */
271
 
272
/* #define DEBUG */
273
#define INFMOD          /* tell inflate.h to include code to be compiled */
274
#include "inflate.h"
275
 
276
 
277
/* marker for "unused" huft code, and corresponding check macro */
278
#define INVALID_CODE 99
279
#define IS_INVALID_CODE(c)  ((c) == INVALID_CODE)
280
 
281
#ifndef WSIZE               /* default is 32K resp. 64K */
282
#  ifdef USE_DEFLATE64
283
#    define WSIZE   65536L  /* window size--must be a power of two, and */
284
#  else                     /*  at least 64K for PKZip's deflate64 method */
285
#    define WSIZE   0x8000  /* window size--must be a power of two, and */
286
#  endif                    /*  at least 32K for zip's deflate method */
287
#endif
288
 
289
/* some buffer counters must be capable of holding 64k for Deflate64 */
290
#if (defined(USE_DEFLATE64) && defined(INT_16BIT))
291
#  define UINT_D64 ulg
292
#else
293
#  define UINT_D64 unsigned
294
#endif
295
 
296
#if (defined(DLL) && !defined(NO_SLIDE_REDIR))
297
#  define wsize G._wsize    /* wsize is a variable */
298
#else
299
#  define wsize WSIZE       /* wsize is a constant */
300
#endif
301
 
302
 
303
#ifndef NEXTBYTE        /* default is to simply get a byte from stdin */
304
#  define NEXTBYTE getchar()
305
#endif
306
 
307
#ifndef MESSAGE   /* only used twice, for fixed strings--NOT general-purpose */
308
#  define MESSAGE(str,len,flag)  fprintf(stderr,(char *)(str))
309
#endif
310
 
311
#ifndef FLUSH           /* default is to simply write the buffer to stdout */
312
#  define FLUSH(n) \
313
    (((extent)fwrite(redirSlide, 1, (extent)(n), stdout) == (extent)(n)) ? \
314
 
315
#endif
316
/* Warning: the fwrite above might not work on 16-bit compilers, since
317
   0x8000 might be interpreted as -32,768 by the library function.  When
318
   support for Deflate64 is enabled, the window size is 64K and the
319
   simple fwrite statement is definitely broken for 16-bit compilers. */
320
 
321
#ifndef Trace
322
#  ifdef DEBUG
323
#    define Trace(x) fprintf x
324
#  else
325
#    define Trace(x)
326
#  endif
327
#endif
328
 
329
 
330
/*---------------------------------------------------------------------------*/
331
#ifdef USE_ZLIB
332
 
333
/* Beginning with zlib version 1.2.0, a new inflate callback interface is
334
   provided that allows tighter integration of the zlib inflate service
335
   into unzip's extraction framework.
336
   The advantages are:
337
   - uses the windows buffer supplied by the unzip code; this saves one
338
     copy process between zlib's internal decompression buffer and unzip's
339
     post-decompression output buffer and improves performance.
340
   - does not pull in unused checksum code (adler32).
341
   The preprocessor flag NO_ZLIBCALLBCK can be set to force usage of the
342
   old zlib 1.1.x interface, for testing purpose.
343
 */
344
#ifdef USE_ZLIB_INFLATCB
345
#  undef USE_ZLIB_INFLATCB
346
#endif
347
#if (defined(ZLIB_VERNUM) && ZLIB_VERNUM >= 0x1200 && !defined(NO_ZLIBCALLBCK))
348
#  define USE_ZLIB_INFLATCB 1
349
#else
350
#  define USE_ZLIB_INFLATCB 0
351
#endif
352
 
353
/* Check for incompatible combinations of zlib and Deflate64 support. */
354
#if defined(USE_DEFLATE64)
355
# if !USE_ZLIB_INFLATCB
356
  #error Deflate64 is incompatible with traditional (pre-1.2.x) zlib interface!
357
# else
358
   /* The Deflate64 callback function in the framework of zlib 1.2.x requires
359
      the inclusion of the unsupported infback9 header file:
360
    */
361
#  include "infback9.h"
362
# endif
363
#endif /* USE_DEFLATE64 */
364
 
365
 
366
#if USE_ZLIB_INFLATCB
367
 
368
static unsigned zlib_inCB OF((void FAR *pG, unsigned char FAR * FAR * pInbuf));
369
static int zlib_outCB OF((void FAR *pG, unsigned char FAR *outbuf,
370
                          unsigned outcnt));
371
 
372
static unsigned zlib_inCB(pG, pInbuf)
373
    void FAR *pG;
374
    unsigned char FAR * FAR * pInbuf;
375
{
376
    *pInbuf = G.inbuf;
377
    return fillinbuf(__G);
378
}
379
 
380
static int zlib_outCB(pG, outbuf, outcnt)
381
    void FAR *pG;
382
    unsigned char FAR *outbuf;
383
    unsigned outcnt;
384
{
385
#ifdef FUNZIP
386
    return flush(__G__ (ulg)(outcnt));
387
#else
388
    return ((G.mem_mode) ? memflush(__G__ outbuf, (ulg)(outcnt))
389
                         : flush(__G__ outbuf, (ulg)(outcnt), 0));
390
#endif
391
}
392
#endif /* USE_ZLIB_INFLATCB */
393
 
394
 
395
/*
396
   GRR:  return values for both original inflate() and UZinflate()
397
 
398
           1  incomplete table(?)
399
           2  bad input
400
           3  not enough memory
401
 */
402
 
403
/**************************/
404
/*  Function UZinflate()  */
405
/**************************/
406
 
407
int UZinflate(__G__ is_defl64)
408
    __GDEF
409
    int is_defl64;
410
/* decompress an inflated entry using the zlib routines */
411
{
412
    int retval = 0;     /* return code: 0 = "no error" */
413
    int err=Z_OK;
414
#if USE_ZLIB_INFLATCB
415
 
416
#if (defined(DLL) && !defined(NO_SLIDE_REDIR))
417
    if (G.redirect_slide)
418
        wsize = G.redirect_size, redirSlide = G.redirect_buffer;
419
    else
420
        wsize = WSIZE, redirSlide = slide;
421
#endif
422
 
423
    if (!G.inflInit) {
424
        /* local buffer for efficiency */
425
        ZCONST char *zlib_RtVersion = zlibVersion();
426
 
427
        /* only need to test this stuff once */
428
        if ((zlib_RtVersion[0] != ZLIB_VERSION[0]) ||
429
            (zlib_RtVersion[2] != ZLIB_VERSION[2])) {
430
            Info(slide, 0x21, ((char *)slide,
431
              "error:  incompatible zlib version (expected %s, found %s)\n",
432
              ZLIB_VERSION, zlib_RtVersion));
433
            return 3;
434
        } else if (strcmp(zlib_RtVersion, ZLIB_VERSION) != 0)
435
            Info(slide, 0x21, ((char *)slide,
436
              "warning:  different zlib version (expected %s, using %s)\n",
437
              ZLIB_VERSION, zlib_RtVersion));
438
 
439
        G.dstrm.zalloc = (alloc_func)Z_NULL;
440
        G.dstrm.zfree = (free_func)Z_NULL;
441
 
442
        G.inflInit = 1;
443
    }
444
 
445
#ifdef USE_DEFLATE64
446
    if (is_defl64)
447
    {
448
        Trace((stderr, "initializing inflate9()\n"));
449
        err = inflateBack9Init(&G.dstrm, redirSlide);
450
 
451
        if (err == Z_MEM_ERROR)
452
            return 3;
453
        else if (err != Z_OK) {
454
            Trace((stderr, "oops!  (inflateBack9Init() err = %d)\n", err));
455
            return 2;
456
        }
457
 
458
        G.dstrm.next_in = G.inptr;
459
        G.dstrm.avail_in = G.incnt;
460
 
461
        err = inflateBack9(&G.dstrm, zlib_inCB, &G, zlib_outCB, &G);
462
        if (err != Z_STREAM_END) {
463
            if (err == Z_DATA_ERROR || err == Z_STREAM_ERROR) {
464
                Trace((stderr, "oops!  (inflateBack9() err = %d)\n", err));
465
                retval = 2;
466
            } else if (err == Z_MEM_ERROR) {
467
                retval = 3;
468
            } else if (err == Z_BUF_ERROR) {
469
                Trace((stderr, "oops!  (inflateBack9() err = %d)\n", err));
470
                if (G.dstrm.next_in == Z_NULL) {
471
                    /* input failure */
472
                    Trace((stderr, "  inflateBack9() input failure\n"));
473
                    retval = 2;
474
                } else {
475
                    /* output write failure */
476
                    retval = (G.disk_full != 0 ? PK_DISK : IZ_CTRLC);
477
                }
478
            } else {
479
                Trace((stderr, "oops!  (inflateBack9() err = %d)\n", err));
480
                retval = 2;
481
            }
482
        }
483
        if (G.dstrm.next_in != NULL) {
484
            G.inptr = (uch *)G.dstrm.next_in;
485
            G.incnt = G.dstrm.avail_in;
486
        }
487
 
488
        err = inflateBack9End(&G.dstrm);
489
        if (err != Z_OK) {
490
            Trace((stderr, "oops!  (inflateBack9End() err = %d)\n", err));
491
            if (retval == 0)
492
                retval = 2;
493
        }
494
    }
495
    else
496
#endif /* USE_DEFLATE64 */
497
    {
498
        /* For the callback interface, inflate initialization has to
499
           be called before each decompression call.
500
         */
501
        {
502
            unsigned i;
503
            int windowBits;
504
            /* windowBits = log2(wsize) */
505
            for (i = (unsigned)wsize, windowBits = 0;
506
                 !(i & 1);  i >>= 1, ++windowBits);
507
            if ((unsigned)windowBits > (unsigned)15)
508
                windowBits = 15;
509
            else if (windowBits < 8)
510
                windowBits = 8;
511
 
512
            Trace((stderr, "initializing inflate()\n"));
513
            err = inflateBackInit(&G.dstrm, windowBits, redirSlide);
514
 
515
            if (err == Z_MEM_ERROR)
516
                return 3;
517
            else if (err != Z_OK) {
518
                Trace((stderr, "oops!  (inflateBackInit() err = %d)\n", err));
519
                return 2;
520
            }
521
        }
522
 
523
        G.dstrm.next_in = G.inptr;
524
        G.dstrm.avail_in = G.incnt;
525
 
526
        err = inflateBack(&G.dstrm, zlib_inCB, &G, zlib_outCB, &G);
527
        if (err != Z_STREAM_END) {
528
            if (err == Z_DATA_ERROR || err == Z_STREAM_ERROR) {
529
                Trace((stderr, "oops!  (inflateBack() err = %d)\n", err));
530
                retval = 2;
531
            } else if (err == Z_MEM_ERROR) {
532
                retval = 3;
533
            } else if (err == Z_BUF_ERROR) {
534
                Trace((stderr, "oops!  (inflateBack() err = %d)\n", err));
535
                if (G.dstrm.next_in == Z_NULL) {
536
                    /* input failure */
537
                    Trace((stderr, "  inflateBack() input failure\n"));
538
                    retval = 2;
539
                } else {
540
                    /* output write failure */
541
                    retval = (G.disk_full != 0 ? PK_DISK : IZ_CTRLC);
542
                }
543
            } else {
544
                Trace((stderr, "oops!  (inflateBack() err = %d)\n", err));
545
                retval = 2;
546
            }
547
        }
548
        if (G.dstrm.next_in != NULL) {
549
            G.inptr = (uch *)G.dstrm.next_in;
550
            G.incnt = G.dstrm.avail_in;
551
        }
552
 
553
        err = inflateBackEnd(&G.dstrm);
554
        if (err != Z_OK) {
555
            Trace((stderr, "oops!  (inflateBackEnd() err = %d)\n", err));
556
            if (retval == 0)
557
                retval = 2;
558
        }
559
    }
560
 
561
#else /* !USE_ZLIB_INFLATCB */
562
    int repeated_buf_err;
563
 
564
#if (defined(DLL) && !defined(NO_SLIDE_REDIR))
565
    if (G.redirect_slide)
566
        wsize = G.redirect_size, redirSlide = G.redirect_buffer;
567
    else
568
        wsize = WSIZE, redirSlide = slide;
569
#endif
570
 
571
    G.dstrm.next_out = redirSlide;
572
    G.dstrm.avail_out = wsize;
573
 
574
    G.dstrm.next_in = G.inptr;
575
    G.dstrm.avail_in = G.incnt;
576
 
577
    if (!G.inflInit) {
578
        unsigned i;
579
        int windowBits;
580
        /* local buffer for efficiency */
581
        ZCONST char *zlib_RtVersion = zlibVersion();
582
 
583
        /* only need to test this stuff once */
584
        if (zlib_RtVersion[0] != ZLIB_VERSION[0]) {
585
            Info(slide, 0x21, ((char *)slide,
586
              "error:  incompatible zlib version (expected %s, found %s)\n",
587
              ZLIB_VERSION, zlib_RtVersion));
588
            return 3;
589
        } else if (strcmp(zlib_RtVersion, ZLIB_VERSION) != 0)
590
            Info(slide, 0x21, ((char *)slide,
591
              "warning:  different zlib version (expected %s, using %s)\n",
592
              ZLIB_VERSION, zlib_RtVersion));
593
 
594
        /* windowBits = log2(wsize) */
595
        for (i = (unsigned)wsize, windowBits = 0;
596
             !(i & 1);  i >>= 1, ++windowBits);
597
        if ((unsigned)windowBits > (unsigned)15)
598
            windowBits = 15;
599
        else if (windowBits < 8)
600
            windowBits = 8;
601
 
602
        G.dstrm.zalloc = (alloc_func)Z_NULL;
603
        G.dstrm.zfree = (free_func)Z_NULL;
604
 
605
        Trace((stderr, "initializing inflate()\n"));
606
        err = inflateInit2(&G.dstrm, -windowBits);
607
 
608
        if (err == Z_MEM_ERROR)
609
            return 3;
610
        else if (err != Z_OK)
611
            Trace((stderr, "oops!  (inflateInit2() err = %d)\n", err));
612
        G.inflInit = 1;
613
    }
614
 
615
#ifdef FUNZIP
616
    while (err != Z_STREAM_END) {
617
#else /* !FUNZIP */
618
    while (G.csize > 0) {
619
        Trace((stderr, "first loop:  G.csize = %ld\n", G.csize));
620
#endif /* ?FUNZIP */
621
        while (G.dstrm.avail_out > 0) {
622
            err = inflate(&G.dstrm, Z_PARTIAL_FLUSH);
623
 
624
            if (err == Z_DATA_ERROR) {
625
                retval = 2; goto uzinflate_cleanup_exit;
626
            } else if (err == Z_MEM_ERROR) {
627
                retval = 3; goto uzinflate_cleanup_exit;
628
            } else if (err != Z_OK && err != Z_STREAM_END)
629
                Trace((stderr, "oops!  (inflate(first loop) err = %d)\n", err));
630
 
631
#ifdef FUNZIP
632
            if (err == Z_STREAM_END)    /* "END-of-entry-condition" ? */
633
#else /* !FUNZIP */
634
            if (G.csize <= 0L)          /* "END-of-entry-condition" ? */
635
#endif /* ?FUNZIP */
636
                break;
637
 
638
            if (G.dstrm.avail_in == 0) {
639
                if (fillinbuf(__G) == 0) {
640
                    /* no "END-condition" yet, but no more data */
641
                    retval = 2; goto uzinflate_cleanup_exit;
642
                }
643
 
644
                G.dstrm.next_in = G.inptr;
645
                G.dstrm.avail_in = G.incnt;
646
            }
647
            Trace((stderr, "     avail_in = %u\n", G.dstrm.avail_in));
648
        }
649
        /* flush slide[] */
650
        if ((retval = FLUSH(wsize - G.dstrm.avail_out)) != 0)
651
            goto uzinflate_cleanup_exit;
652
        Trace((stderr, "inside loop:  flushing %ld bytes (ptr diff = %ld)\n",
653
          (long)(wsize - G.dstrm.avail_out),
654
          (long)(G.dstrm.next_out-(Bytef *)redirSlide)));
655
        G.dstrm.next_out = redirSlide;
656
        G.dstrm.avail_out = wsize;
657
    }
658
 
659
    /* no more input, so loop until we have all output */
660
    Trace((stderr, "beginning final loop:  err = %d\n", err));
661
    repeated_buf_err = FALSE;
662
    while (err != Z_STREAM_END) {
663
        err = inflate(&G.dstrm, Z_PARTIAL_FLUSH);
664
        if (err == Z_DATA_ERROR) {
665
            retval = 2; goto uzinflate_cleanup_exit;
666
        } else if (err == Z_MEM_ERROR) {
667
            retval = 3; goto uzinflate_cleanup_exit;
668
        } else if (err == Z_BUF_ERROR) {                /* DEBUG */
669
#ifdef FUNZIP
670
            Trace((stderr,
671
                   "zlib inflate() did not detect stream end\n"));
672
#else
673
            Trace((stderr,
674
                   "zlib inflate() did not detect stream end (%s, %s)\n",
675
                   G.zipfn, G.filename));
676
#endif
677
            if ((!repeated_buf_err) && (G.dstrm.avail_in == 0)) {
678
                /* when detecting this problem for the first time,
679
                   try to provide one fake byte beyond "EOF"... */
680
                G.dstrm.next_in = "";
681
                G.dstrm.avail_in = 1;
682
                repeated_buf_err = TRUE;
683
            } else
684
                break;
685
        } else if (err != Z_OK && err != Z_STREAM_END) {
686
            Trace((stderr, "oops!  (inflate(final loop) err = %d)\n", err));
687
            DESTROYGLOBALS();
688
            EXIT(PK_MEM3);
689
        }
690
        /* final flush of slide[] */
691
        if ((retval = FLUSH(wsize - G.dstrm.avail_out)) != 0)
692
            goto uzinflate_cleanup_exit;
693
        Trace((stderr, "final loop:  flushing %ld bytes (ptr diff = %ld)\n",
694
          (long)(wsize - G.dstrm.avail_out),
695
          (long)(G.dstrm.next_out-(Bytef *)redirSlide)));
696
        G.dstrm.next_out = redirSlide;
697
        G.dstrm.avail_out = wsize;
698
    }
699
    Trace((stderr, "total in = %lu, total out = %lu\n", G.dstrm.total_in,
700
      G.dstrm.total_out));
701
 
702
    G.inptr = (uch *)G.dstrm.next_in;
703
    G.incnt = (G.inbuf + INBUFSIZ) - G.inptr;  /* reset for other routines */
704
 
705
uzinflate_cleanup_exit:
706
    err = inflateReset(&G.dstrm);
707
    if (err != Z_OK)
708
        Trace((stderr, "oops!  (inflateReset() err = %d)\n", err));
709
 
710
#endif /* ?USE_ZLIB_INFLATCB */
711
    return retval;
712
}
713
 
714
 
715
/*---------------------------------------------------------------------------*/
716
#else /* !USE_ZLIB */
717
 
718
 
719
/* Function prototypes */
720
#ifndef OF
721
#  ifdef __STDC__
722
#    define OF(a) a
723
#  else
724
#    define OF(a) ()
725
#  endif
726
#endif /* !OF */
727
int inflate_codes OF((__GPRO__ struct huft *tl, struct huft *td,
728
                      unsigned bl, unsigned bd));
729
static int inflate_stored OF((__GPRO));
730
static int inflate_fixed OF((__GPRO));
731
static int inflate_dynamic OF((__GPRO));
732
static int inflate_block OF((__GPRO__ int *e));
733
 
734
 
735
/* The inflate algorithm uses a sliding 32K byte window on the uncompressed
736
   stream to find repeated byte strings.  This is implemented here as a
737
   circular buffer.  The index is updated simply by incrementing and then
738
   and'ing with 0x7fff (32K-1). */
739
/* It is left to other modules to supply the 32K area.  It is assumed
740
   to be usable as if it were declared "uch slide[32768];" or as just
741
   "uch *slide;" and then malloc'ed in the latter case.  The definition
742
   must be in unzip.h, included above. */
743
 
744
 
745
/* unsigned wp;  moved to globals.h */     /* current position in slide */
746
 
747
/* Tables for deflate from PKZIP's appnote.txt. */
748
/* - Order of the bit length code lengths */
749
static ZCONST unsigned border[] = {
750
        16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
751
 
752
/* - Copy lengths for literal codes 257..285 */
753
#ifdef USE_DEFLATE64
754
static ZCONST ush cplens64[] = {
755
        3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
756
        35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 3, 0, 0};
757
        /* For Deflate64, the code 285 is defined differently. */
758
#else
759
#  define cplens32 cplens
760
#endif
761
static ZCONST ush cplens32[] = {
762
        3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
763
        35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
764
        /* note: see note #13 above about the 258 in this list. */
765
/* - Extra bits for literal codes 257..285 */
766
#ifdef USE_DEFLATE64
767
static ZCONST uch cplext64[] = {
768
        0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
769
        3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 16, INVALID_CODE, INVALID_CODE};
770
#else
771
#  define cplext32 cplext
772
#endif
773
static ZCONST uch cplext32[] = {
774
        0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
775
        3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, INVALID_CODE, INVALID_CODE};
776
 
777
/* - Copy offsets for distance codes 0..29 (0..31 for Deflate64) */
778
static ZCONST ush cpdist[] = {
779
        1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
780
        257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
781
#if (defined(USE_DEFLATE64) || defined(PKZIP_BUG_WORKAROUND))
782
        8193, 12289, 16385, 24577, 32769, 49153};
783
#else
784
        8193, 12289, 16385, 24577};
785
#endif
786
 
787
/* - Extra bits for distance codes 0..29 (0..31 for Deflate64) */
788
#ifdef USE_DEFLATE64
789
static ZCONST uch cpdext64[] = {
790
        0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
791
        7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
792
        12, 12, 13, 13, 14, 14};
793
#else
794
#  define cpdext32 cpdext
795
#endif
796
static ZCONST uch cpdext32[] = {
797
        0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
798
        7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
799
#ifdef PKZIP_BUG_WORKAROUND
800
        12, 12, 13, 13, INVALID_CODE, INVALID_CODE};
801
#else
802
        12, 12, 13, 13};
803
#endif
804
 
805
#ifdef PKZIP_BUG_WORKAROUND
806
#  define MAXLITLENS 288
807
#else
808
#  define MAXLITLENS 286
809
#endif
810
#if (defined(USE_DEFLATE64) || defined(PKZIP_BUG_WORKAROUND))
811
#  define MAXDISTS 32
812
#else
813
#  define MAXDISTS 30
814
#endif
815
 
816
 
817
/* moved to consts.h (included in unzip.c), resp. funzip.c */
818
#if 0
819
/* And'ing with mask_bits[n] masks the lower n bits */
820
ZCONST unsigned near mask_bits[17] = {
821
    0x0000,
822
    0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
823
    0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
824
};
825
#endif /* 0 */
826
 
827
 
828
/* Macros for inflate() bit peeking and grabbing.
829
   The usage is:
830
 
831
        NEEDBITS(j)
832
        x = b & mask_bits[j];
833
        DUMPBITS(j)
834
 
835
   where NEEDBITS makes sure that b has at least j bits in it, and
836
   DUMPBITS removes the bits from b.  The macros use the variable k
837
   for the number of bits in b.  Normally, b and k are register
838
   variables for speed and are initialized at the beginning of a
839
   routine that uses these macros from a global bit buffer and count.
840
 
841
   In order to not ask for more bits than there are in the compressed
842
   stream, the Huffman tables are constructed to only ask for just
843
   enough bits to make up the end-of-block code (value 256).  Then no
844
   bytes need to be "returned" to the buffer at the end of the last
845
   block.  See the huft_build() routine.
846
 
847
   Actually, the precautions mentioned above are not sufficient to
848
   prevent fetches of bits beyound the end of the last block in every
849
   case. When the last code fetched before the end-of-block code was
850
   a very short distance code (shorter than "distance-prefetch-bits" -
851
   "end-of-block code bits"), this last distance code fetch already
852
   exausts the available data.  To prevent failure of extraction in this
853
   case, the "read beyond EOF" check delays the raise of the "invalid
854
   data" error until an actual overflow of "used data" is detected.
855
   This error condition is only fulfilled when the "number of available
856
   bits" counter k is found to be negative in the NEEDBITS() macro.
857
 
858
   An alternate fix for that problem adjusts the size of the distance code
859
   base table so that it does not exceed the length of the end-of-block code
860
   plus the minimum length of a distance code. This alternate fix can be
861
   enabled by defining the preprocessor symbol FIX_PAST_EOB_BY_TABLEADJUST.
862
 */
863
 
864
/* These have been moved to globals.h */
865
#if 0
866
ulg bb;                         /* bit buffer */
867
unsigned bk;                    /* bits in bit buffer */
868
#endif
869
 
870
#ifndef CHECK_EOF
871
#  define CHECK_EOF   /* default as of 5.13/5.2 */
872
#endif
873
 
874
#ifndef CHECK_EOF
875
#  define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE)<
876
#else
877
# ifdef FIX_PAST_EOB_BY_TABLEADJUST
878
#  define NEEDBITS(n) {while(k<(n)){int c=NEXTBYTE;\
879
    if(c==EOF){retval=1;goto cleanup_and_exit;}\
880
    b|=((ulg)c)<
881
# else
882
#  define NEEDBITS(n) {while((int)k<(int)(n)){int c=NEXTBYTE;\
883
    if(c==EOF){if((int)k>=0)break;retval=1;goto cleanup_and_exit;}\
884
    b|=((ulg)c)<
885
# endif
886
#endif
887
 
888
#define DUMPBITS(n) {b>>=(n);k-=(n);}
889
 
890
 
891
/*
892
   Huffman code decoding is performed using a multi-level table lookup.
893
   The fastest way to decode is to simply build a lookup table whose
894
   size is determined by the longest code.  However, the time it takes
895
   to build this table can also be a factor if the data being decoded
896
   are not very long.  The most common codes are necessarily the
897
   shortest codes, so those codes dominate the decoding time, and hence
898
   the speed.  The idea is you can have a shorter table that decodes the
899
   shorter, more probable codes, and then point to subsidiary tables for
900
   the longer codes.  The time it costs to decode the longer codes is
901
   then traded against the time it takes to make longer tables.
902
 
903
   This results of this trade are in the variables lbits and dbits
904
   below.  lbits is the number of bits the first level table for literal/
905
   length codes can decode in one step, and dbits is the same thing for
906
   the distance codes.  Subsequent tables are also less than or equal to
907
   those sizes.  These values may be adjusted either when all of the
908
   codes are shorter than that, in which case the longest code length in
909
   bits is used, or when the shortest code is *longer* than the requested
910
   table size, in which case the length of the shortest code in bits is
911
   used.
912
 
913
   There are two different values for the two tables, since they code a
914
   different number of possibilities each.  The literal/length table
915
   codes 286 possible values, or in a flat code, a little over eight
916
   bits.  The distance table codes 30 possible values, or a little less
917
   than five bits, flat.  The optimum values for speed end up being
918
   about one bit more than those, so lbits is 8+1 and dbits is 5+1.
919
   The optimum values may differ though from machine to machine, and
920
   possibly even between compilers.  Your mileage may vary.
921
 */
922
 
923
 
924
/* bits in base literal/length lookup table */
925
static ZCONST unsigned lbits = 9;
926
/* bits in base distance lookup table */
927
static ZCONST unsigned dbits = 6;
928
 
929
 
930
#ifndef ASM_INFLATECODES
931
 
932
int inflate_codes(__G__ tl, td, bl, bd)
933
     __GDEF
934
struct huft *tl, *td;   /* literal/length and distance decoder tables */
935
unsigned bl, bd;        /* number of bits decoded by tl[] and td[] */
936
/* inflate (decompress) the codes in a deflated (compressed) block.
937
   Return an error code or zero if it all goes ok. */
938
{
939
  register unsigned e;  /* table entry flag/number of extra bits */
940
  unsigned d;           /* index for copy */
941
  UINT_D64 n;           /* length for copy (deflate64: might be 64k+2) */
942
  UINT_D64 w;           /* current window position (deflate64: up to 64k) */
943
  struct huft *t;       /* pointer to table entry */
944
  unsigned ml, md;      /* masks for bl and bd bits */
945
  register ulg b;       /* bit buffer */
946
  register unsigned k;  /* number of bits in bit buffer */
947
  int retval = 0;       /* error code returned: initialized to "no error" */
948
 
949
 
950
  /* make local copies of globals */
951
  b = G.bb;                       /* initialize bit buffer */
952
  k = G.bk;
953
  w = G.wp;                       /* initialize window position */
954
 
955
 
956
  /* inflate the coded data */
957
  ml = mask_bits[bl];           /* precompute masks for speed */
958
  md = mask_bits[bd];
959
  while (1)                     /* do until end of block */
960
  {
961
    NEEDBITS(bl)
962
    t = tl + ((unsigned)b & ml);
963
    while (1) {
964
      DUMPBITS(t->b)
965
 
966
      if ((e = t->e) == 32)     /* then it's a literal */
967
      {
968
        redirSlide[w++] = (uch)t->v.n;
969
        if (w == wsize)
970
        {
971
          if ((retval = FLUSH(w)) != 0) goto cleanup_and_exit;
972
          w = 0;
973
        }
974
        break;
975
      }
976
 
977
      if (e < 31)               /* then it's a length */
978
      {
979
        /* get length of block to copy */
980
        NEEDBITS(e)
981
        n = t->v.n + ((unsigned)b & mask_bits[e]);
982
        DUMPBITS(e)
983
 
984
        /* decode distance of block to copy */
985
        NEEDBITS(bd)
986
        t = td + ((unsigned)b & md);
987
        while (1) {
988
          DUMPBITS(t->b)
989
          if ((e = t->e) < 32)
990
            break;
991
          if (IS_INVALID_CODE(e))
992
            return 1;
993
          e &= 31;
994
          NEEDBITS(e)
995
          t = t->v.t + ((unsigned)b & mask_bits[e]);
996
        }
997
        NEEDBITS(e)
998
        d = (unsigned)w - t->v.n - ((unsigned)b & mask_bits[e]);
999
        DUMPBITS(e)
1000
 
1001
        /* do the copy */
1002
        do {
1003
#if (defined(DLL) && !defined(NO_SLIDE_REDIR))
1004
          if (G.redirect_slide) {
1005
            /* &= w/ wsize unnecessary & wrong if redirect */
1006
            if ((UINT_D64)d >= wsize)
1007
              return 1;         /* invalid compressed data */
1008
            e = (unsigned)(wsize - (d > (unsigned)w ? (UINT_D64)d : w));
1009
          }
1010
          else
1011
#endif
1012
            e = (unsigned)(wsize -
1013
                           ((d &= (unsigned)(wsize-1)) > (unsigned)w ?
1014
                            (UINT_D64)d : w));
1015
          if ((UINT_D64)e > n) e = (unsigned)n;
1016
          n -= e;
1017
#ifndef NOMEMCPY
1018
          if ((unsigned)w - d >= e)
1019
          /* (this test assumes unsigned comparison) */
1020
          {
1021
            memcpy(redirSlide + (unsigned)w, redirSlide + d, e);
1022
            w += e;
1023
            d += e;
1024
          }
1025
          else                  /* do it slowly to avoid memcpy() overlap */
1026
#endif /* !NOMEMCPY */
1027
            do {
1028
              redirSlide[w++] = redirSlide[d++];
1029
            } while (--e);
1030
          if (w == wsize)
1031
          {
1032
            if ((retval = FLUSH(w)) != 0) goto cleanup_and_exit;
1033
            w = 0;
1034
          }
1035
        } while (n);
1036
        break;
1037
      }
1038
 
1039
      if (e == 31)              /* it's the EOB signal */
1040
      {
1041
        /* sorry for this goto, but we have to exit two loops at once */
1042
        goto cleanup_decode;
1043
      }
1044
 
1045
      if (IS_INVALID_CODE(e))
1046
        return 1;
1047
 
1048
      e &= 31;
1049
      NEEDBITS(e)
1050
      t = t->v.t + ((unsigned)b & mask_bits[e]);
1051
    }
1052
  }
1053
cleanup_decode:
1054
 
1055
  /* restore the globals from the locals */
1056
  G.wp = (unsigned)w;             /* restore global window pointer */
1057
  G.bb = b;                       /* restore global bit buffer */
1058
  G.bk = k;
1059
 
1060
 
1061
cleanup_and_exit:
1062
  /* done */
1063
  return retval;
1064
}
1065
 
1066
#endif /* ASM_INFLATECODES */
1067
 
1068
 
1069
 
1070
static int inflate_stored(__G)
1071
     __GDEF
1072
/* "decompress" an inflated type 0 (stored) block. */
1073
{
1074
  UINT_D64 w;           /* current window position (deflate64: up to 64k!) */
1075
  unsigned n;           /* number of bytes in block */
1076
  register ulg b;       /* bit buffer */
1077
  register unsigned k;  /* number of bits in bit buffer */
1078
  int retval = 0;       /* error code returned: initialized to "no error" */
1079
 
1080
 
1081
  /* make local copies of globals */
1082
  Trace((stderr, "\nstored block"));
1083
  b = G.bb;                       /* initialize bit buffer */
1084
  k = G.bk;
1085
  w = G.wp;                       /* initialize window position */
1086
 
1087
 
1088
  /* go to byte boundary */
1089
  n = k & 7;
1090
  DUMPBITS(n);
1091
 
1092
 
1093
  /* get the length and its complement */
1094
  NEEDBITS(16)
1095
  n = ((unsigned)b & 0xffff);
1096
  DUMPBITS(16)
1097
  NEEDBITS(16)
1098
  if (n != (unsigned)((~b) & 0xffff))
1099
    return 1;                   /* error in compressed data */
1100
  DUMPBITS(16)
1101
 
1102
 
1103
  /* read and output the compressed data */
1104
  while (n--)
1105
  {
1106
    NEEDBITS(8)
1107
    redirSlide[w++] = (uch)b;
1108
    if (w == wsize)
1109
    {
1110
      if ((retval = FLUSH(w)) != 0) goto cleanup_and_exit;
1111
      w = 0;
1112
    }
1113
    DUMPBITS(8)
1114
  }
1115
 
1116
 
1117
  /* restore the globals from the locals */
1118
  G.wp = (unsigned)w;             /* restore global window pointer */
1119
  G.bb = b;                       /* restore global bit buffer */
1120
  G.bk = k;
1121
 
1122
cleanup_and_exit:
1123
  return retval;
1124
}
1125
 
1126
 
1127
/* Globals for literal tables (built once) */
1128
/* Moved to globals.h                      */
1129
#if 0
1130
struct huft *fixed_tl = (struct huft *)NULL;
1131
struct huft *fixed_td;
1132
int fixed_bl, fixed_bd;
1133
#endif
1134
 
1135
static int inflate_fixed(__G)
1136
     __GDEF
1137
/* decompress an inflated type 1 (fixed Huffman codes) block.  We should
1138
   either replace this with a custom decoder, or at least precompute the
1139
   Huffman tables. */
1140
{
1141
  /* if first time, set up tables for fixed blocks */
1142
  Trace((stderr, "\nliteral block"));
1143
  if (G.fixed_tl == (struct huft *)NULL)
1144
  {
1145
    int i;                /* temporary variable */
1146
    unsigned l[288];      /* length list for huft_build */
1147
 
1148
    /* literal table */
1149
    for (i = 0; i < 144; i++)
1150
      l[i] = 8;
1151
    for (; i < 256; i++)
1152
      l[i] = 9;
1153
    for (; i < 280; i++)
1154
      l[i] = 7;
1155
    for (; i < 288; i++)          /* make a complete, but wrong code set */
1156
      l[i] = 8;
1157
    G.fixed_bl = 7;
1158
#ifdef USE_DEFLATE64
1159
    if ((i = huft_build(__G__ l, 288, 257, G.cplens, G.cplext,
1160
                        &G.fixed_tl, &G.fixed_bl)) != 0)
1161
#else
1162
    if ((i = huft_build(__G__ l, 288, 257, cplens, cplext,
1163
                        &G.fixed_tl, &G.fixed_bl)) != 0)
1164
#endif
1165
    {
1166
      G.fixed_tl = (struct huft *)NULL;
1167
      return i;
1168
    }
1169
 
1170
    /* distance table */
1171
    for (i = 0; i < MAXDISTS; i++)      /* make an incomplete code set */
1172
      l[i] = 5;
1173
    G.fixed_bd = 5;
1174
#ifdef USE_DEFLATE64
1175
    if ((i = huft_build(__G__ l, MAXDISTS, 0, cpdist, G.cpdext,
1176
                        &G.fixed_td, &G.fixed_bd)) > 1)
1177
#else
1178
    if ((i = huft_build(__G__ l, MAXDISTS, 0, cpdist, cpdext,
1179
                        &G.fixed_td, &G.fixed_bd)) > 1)
1180
#endif
1181
    {
1182
      huft_free(G.fixed_tl);
1183
      G.fixed_td = G.fixed_tl = (struct huft *)NULL;
1184
      return i;
1185
    }
1186
  }
1187
 
1188
  /* decompress until an end-of-block code */
1189
  return inflate_codes(__G__ G.fixed_tl, G.fixed_td,
1190
                             G.fixed_bl, G.fixed_bd);
1191
}
1192
 
1193
 
1194
 
1195
static int inflate_dynamic(__G)
1196
  __GDEF
1197
/* decompress an inflated type 2 (dynamic Huffman codes) block. */
1198
{
1199
  unsigned i;           /* temporary variables */
1200
  unsigned j;
1201
  unsigned l;           /* last length */
1202
  unsigned m;           /* mask for bit lengths table */
1203
  unsigned n;           /* number of lengths to get */
1204
  struct huft *tl = (struct huft *)NULL; /* literal/length code table */
1205
  struct huft *td = (struct huft *)NULL; /* distance code table */
1206
  struct huft *th;      /* temp huft table pointer used in tables decoding */
1207
  unsigned bl;          /* lookup bits for tl */
1208
  unsigned bd;          /* lookup bits for td */
1209
  unsigned nb;          /* number of bit length codes */
1210
  unsigned nl;          /* number of literal/length codes */
1211
  unsigned nd;          /* number of distance codes */
1212
  unsigned ll[MAXLITLENS+MAXDISTS]; /* lit./length and distance code lengths */
1213
  register ulg b;       /* bit buffer */
1214
  register unsigned k;  /* number of bits in bit buffer */
1215
  int retval = 0;       /* error code returned: initialized to "no error" */
1216
 
1217
 
1218
  /* make local bit buffer */
1219
  Trace((stderr, "\ndynamic block"));
1220
  b = G.bb;
1221
  k = G.bk;
1222
 
1223
 
1224
  /* read in table lengths */
1225
  NEEDBITS(5)
1226
  nl = 257 + ((unsigned)b & 0x1f);      /* number of literal/length codes */
1227
  DUMPBITS(5)
1228
  NEEDBITS(5)
1229
  nd = 1 + ((unsigned)b & 0x1f);        /* number of distance codes */
1230
  DUMPBITS(5)
1231
  NEEDBITS(4)
1232
  nb = 4 + ((unsigned)b & 0xf);         /* number of bit length codes */
1233
  DUMPBITS(4)
1234
  if (nl > MAXLITLENS || nd > MAXDISTS)
1235
    return 1;                   /* bad lengths */
1236
 
1237
 
1238
  /* read in bit-length-code lengths */
1239
  for (j = 0; j < nb; j++)
1240
  {
1241
    NEEDBITS(3)
1242
    ll[border[j]] = (unsigned)b & 7;
1243
    DUMPBITS(3)
1244
  }
1245
  for (; j < 19; j++)
1246
    ll[border[j]] = 0;
1247
 
1248
 
1249
  /* build decoding table for trees--single level, 7 bit lookup */
1250
  bl = 7;
1251
  retval = huft_build(__G__ ll, 19, 19, NULL, NULL, &tl, &bl);
1252
  if (bl == 0)                  /* no bit lengths */
1253
    retval = 1;
1254
  if (retval)
1255
  {
1256
    if (retval == 1)
1257
      huft_free(tl);
1258
    return retval;              /* incomplete code set */
1259
  }
1260
 
1261
 
1262
  /* read in literal and distance code lengths */
1263
  n = nl + nd;
1264
  m = mask_bits[bl];
1265
  i = l = 0;
1266
  while (i < n)
1267
  {
1268
    NEEDBITS(bl)
1269
    j = (th = tl + ((unsigned)b & m))->b;
1270
    DUMPBITS(j)
1271
    j = th->v.n;
1272
    if (j < 16)                 /* length of code in bits (0..15) */
1273
      ll[i++] = l = j;          /* save last length in l */
1274
    else if (j == 16)           /* repeat last length 3 to 6 times */
1275
    {
1276
      NEEDBITS(2)
1277
      j = 3 + ((unsigned)b & 3);
1278
      DUMPBITS(2)
1279
      if ((unsigned)i + j > n) {
1280
        huft_free(tl);
1281
        return 1;
1282
      }
1283
      while (j--)
1284
        ll[i++] = l;
1285
    }
1286
    else if (j == 17)           /* 3 to 10 zero length codes */
1287
    {
1288
      NEEDBITS(3)
1289
      j = 3 + ((unsigned)b & 7);
1290
      DUMPBITS(3)
1291
      if ((unsigned)i + j > n) {
1292
        huft_free(tl);
1293
        return 1;
1294
      }
1295
      while (j--)
1296
        ll[i++] = 0;
1297
      l = 0;
1298
    }
1299
    else                        /* j == 18: 11 to 138 zero length codes */
1300
    {
1301
      NEEDBITS(7)
1302
      j = 11 + ((unsigned)b & 0x7f);
1303
      DUMPBITS(7)
1304
      if ((unsigned)i + j > n) {
1305
        huft_free(tl);
1306
        return 1;
1307
      }
1308
      while (j--)
1309
        ll[i++] = 0;
1310
      l = 0;
1311
    }
1312
  }
1313
 
1314
 
1315
  /* free decoding table for trees */
1316
  huft_free(tl);
1317
 
1318
 
1319
  /* restore the global bit buffer */
1320
  G.bb = b;
1321
  G.bk = k;
1322
 
1323
 
1324
  /* build the decoding tables for literal/length and distance codes */
1325
  bl = lbits;
1326
#ifdef USE_DEFLATE64
1327
  retval = huft_build(__G__ ll, nl, 257, G.cplens, G.cplext, &tl, &bl);
1328
#else
1329
  retval = huft_build(__G__ ll, nl, 257, cplens, cplext, &tl, &bl);
1330
#endif
1331
  if (bl == 0)                  /* no literals or lengths */
1332
    retval = 1;
1333
  if (retval)
1334
  {
1335
    if (retval == 1) {
1336
      if (!uO.qflag)
1337
        MESSAGE((uch *)"(incomplete l-tree)  ", 21L, 1);
1338
      huft_free(tl);
1339
    }
1340
    return retval;              /* incomplete code set */
1341
  }
1342
#ifdef FIX_PAST_EOB_BY_TABLEADJUST
1343
  /* Adjust the requested distance base table size so that a distance code
1344
     fetch never tries to get bits behind an immediatly following end-of-block
1345
     code. */
1346
  bd = (dbits <= bl+1 ? dbits : bl+1);
1347
#else
1348
  bd = dbits;
1349
#endif
1350
#ifdef USE_DEFLATE64
1351
  retval = huft_build(__G__ ll + nl, nd, 0, cpdist, G.cpdext, &td, &bd);
1352
#else
1353
  retval = huft_build(__G__ ll + nl, nd, 0, cpdist, cpdext, &td, &bd);
1354
#endif
1355
#ifdef PKZIP_BUG_WORKAROUND
1356
  if (retval == 1)
1357
    retval = 0;
1358
#endif
1359
  if (bd == 0 && nl > 257)    /* lengths but no distances */
1360
    retval = 1;
1361
  if (retval)
1362
  {
1363
    if (retval == 1) {
1364
      if (!uO.qflag)
1365
        MESSAGE((uch *)"(incomplete d-tree)  ", 21L, 1);
1366
      huft_free(td);
1367
    }
1368
    huft_free(tl);
1369
    return retval;
1370
  }
1371
 
1372
  /* decompress until an end-of-block code */
1373
  retval = inflate_codes(__G__ tl, td, bl, bd);
1374
 
1375
cleanup_and_exit:
1376
  /* free the decoding tables, return */
1377
  if (tl != (struct huft *)NULL)
1378
    huft_free(tl);
1379
  if (td != (struct huft *)NULL)
1380
    huft_free(td);
1381
  return retval;
1382
}
1383
 
1384
 
1385
 
1386
static int inflate_block(__G__ e)
1387
  __GDEF
1388
  int *e;               /* last block flag */
1389
/* decompress an inflated block */
1390
{
1391
  unsigned t;           /* block type */
1392
  register ulg b;       /* bit buffer */
1393
  register unsigned k;  /* number of bits in bit buffer */
1394
  int retval = 0;       /* error code returned: initialized to "no error" */
1395
 
1396
 
1397
  /* make local bit buffer */
1398
  b = G.bb;
1399
  k = G.bk;
1400
 
1401
 
1402
  /* read in last block bit */
1403
  NEEDBITS(1)
1404
  *e = (int)b & 1;
1405
  DUMPBITS(1)
1406
 
1407
 
1408
  /* read in block type */
1409
  NEEDBITS(2)
1410
  t = (unsigned)b & 3;
1411
  DUMPBITS(2)
1412
 
1413
 
1414
  /* restore the global bit buffer */
1415
  G.bb = b;
1416
  G.bk = k;
1417
 
1418
 
1419
  /* inflate that block type */
1420
  if (t == 2)
1421
    return inflate_dynamic(__G);
1422
  if (t == 0)
1423
    return inflate_stored(__G);
1424
  if (t == 1)
1425
    return inflate_fixed(__G);
1426
 
1427
 
1428
  /* bad block type */
1429
  retval = 2;
1430
 
1431
cleanup_and_exit:
1432
  return retval;
1433
}
1434
 
1435
 
1436
 
1437
int inflate(__G__ is_defl64)
1438
    __GDEF
1439
    int is_defl64;
1440
/* decompress an inflated entry */
1441
{
1442
  int e;                /* last block flag */
1443
  int r;                /* result code */
1444
#ifdef DEBUG
1445
  unsigned h = 0;       /* maximum struct huft's malloc'ed */
1446
#endif
1447
 
1448
#if (defined(DLL) && !defined(NO_SLIDE_REDIR))
1449
  if (G.redirect_slide)
1450
    wsize = G.redirect_size, redirSlide = G.redirect_buffer;
1451
  else
1452
    wsize = WSIZE, redirSlide = slide;   /* how they're #defined if !DLL */
1453
#endif
1454
 
1455
  /* initialize window, bit buffer */
1456
  G.wp = 0;
1457
  G.bk = 0;
1458
  G.bb = 0;
1459
 
1460
#ifdef USE_DEFLATE64
1461
  if (is_defl64) {
1462
    G.cplens = cplens64;
1463
    G.cplext = cplext64;
1464
    G.cpdext = cpdext64;
1465
    G.fixed_tl = G.fixed_tl64;
1466
    G.fixed_bl = G.fixed_bl64;
1467
    G.fixed_td = G.fixed_td64;
1468
    G.fixed_bd = G.fixed_bd64;
1469
  } else {
1470
    G.cplens = cplens32;
1471
    G.cplext = cplext32;
1472
    G.cpdext = cpdext32;
1473
    G.fixed_tl = G.fixed_tl32;
1474
    G.fixed_bl = G.fixed_bl32;
1475
    G.fixed_td = G.fixed_td32;
1476
    G.fixed_bd = G.fixed_bd32;
1477
  }
1478
#else /* !USE_DEFLATE64 */
1479
  if (is_defl64) {
1480
    /* This should not happen unless UnZip is built from object files
1481
     * compiled with inconsistent option setting.  Handle this by
1482
     * returning with "bad input" error code.
1483
     */
1484
    Trace((stderr, "\nThis inflate() cannot handle Deflate64!\n"));
1485
    return 2;
1486
  }
1487
#endif /* ?USE_DEFLATE64 */
1488
 
1489
  /* decompress until the last block */
1490
  do {
1491
#ifdef DEBUG
1492
    G.hufts = 0;
1493
#endif
1494
    if ((r = inflate_block(__G__ &e)) != 0)
1495
      return r;
1496
#ifdef DEBUG
1497
    if (G.hufts > h)
1498
      h = G.hufts;
1499
#endif
1500
  } while (!e);
1501
 
1502
  Trace((stderr, "\n%u bytes in Huffman tables (%u/entry)\n",
1503
         h * (unsigned)sizeof(struct huft), (unsigned)sizeof(struct huft)));
1504
 
1505
#ifdef USE_DEFLATE64
1506
  if (is_defl64) {
1507
    G.fixed_tl64 = G.fixed_tl;
1508
    G.fixed_bl64 = G.fixed_bl;
1509
    G.fixed_td64 = G.fixed_td;
1510
    G.fixed_bd64 = G.fixed_bd;
1511
  } else {
1512
    G.fixed_tl32 = G.fixed_tl;
1513
    G.fixed_bl32 = G.fixed_bl;
1514
    G.fixed_td32 = G.fixed_td;
1515
    G.fixed_bd32 = G.fixed_bd;
1516
  }
1517
#endif
1518
 
1519
  /* flush out redirSlide and return (success, unless final FLUSH failed) */
1520
  return (FLUSH(G.wp));
1521
}
1522
 
1523
 
1524
 
1525
int inflate_free(__G)
1526
    __GDEF
1527
{
1528
  if (G.fixed_tl != (struct huft *)NULL)
1529
  {
1530
    huft_free(G.fixed_td);
1531
    huft_free(G.fixed_tl);
1532
    G.fixed_td = G.fixed_tl = (struct huft *)NULL;
1533
  }
1534
  return 0;
1535
}
1536
 
1537
#endif /* ?USE_ZLIB */
1538
 
1539
 
1540
/*
1541
 * GRR:  moved huft_build() and huft_free() down here; used by explode()
1542
 *       and fUnZip regardless of whether USE_ZLIB defined or not
1543
 */
1544
 
1545
 
1546
/* If BMAX needs to be larger than 16, then h and x[] should be ulg. */
1547
#define BMAX 16         /* maximum bit length of any code (16 for explode) */
1548
#define N_MAX 288       /* maximum number of codes in any set */
1549
 
1550
 
1551
int huft_build(__G__ b, n, s, d, e, t, m)
1552
  __GDEF
1553
  ZCONST unsigned *b;   /* code lengths in bits (all assumed <= BMAX) */
1554
  unsigned n;           /* number of codes (assumed <= N_MAX) */
1555
  unsigned s;           /* number of simple-valued codes (0..s-1) */
1556
  ZCONST ush *d;        /* list of base values for non-simple codes */
1557
  ZCONST uch *e;        /* list of extra bits for non-simple codes */
1558
  struct huft **t;      /* result: starting table */
1559
  unsigned *m;          /* maximum lookup bits, returns actual */
1560
/* Given a list of code lengths and a maximum table size, make a set of
1561
   tables to decode that set of codes.  Return zero on success, one if
1562
   the given code set is incomplete (the tables are still built in this
1563
   case), two if the input is invalid (all zero length codes or an
1564
   oversubscribed set of lengths), and three if not enough memory.
1565
   The code with value 256 is special, and the tables are constructed
1566
   so that no bits beyond that code are fetched when that code is
1567
   decoded. */
1568
{
1569
  unsigned a;                   /* counter for codes of length k */
1570
  unsigned c[BMAX+1];           /* bit length count table */
1571
  unsigned el;                  /* length of EOB code (value 256) */
1572
  unsigned f;                   /* i repeats in table every f entries */
1573
  int g;                        /* maximum code length */
1574
  int h;                        /* table level */
1575
  register unsigned i;          /* counter, current code */
1576
  register unsigned j;          /* counter */
1577
  register int k;               /* number of bits in current code */
1578
  int lx[BMAX+1];               /* memory for l[-1..BMAX-1] */
1579
  int *l = lx+1;                /* stack of bits per table */
1580
  register unsigned *p;         /* pointer into c[], b[], or v[] */
1581
  register struct huft *q;      /* points to current table */
1582
  struct huft r;                /* table entry for structure assignment */
1583
  struct huft *u[BMAX];         /* table stack */
1584
  unsigned v[N_MAX];            /* values in order of bit length */
1585
  register int w;               /* bits before this table == (l * h) */
1586
  unsigned x[BMAX+1];           /* bit offsets, then code stack */
1587
  unsigned *xp;                 /* pointer into x */
1588
  int y;                        /* number of dummy codes added */
1589
  unsigned z;                   /* number of entries in current table */
1590
 
1591
 
1592
  /* Generate counts for each bit length */
1593
  el = n > 256 ? b[256] : BMAX; /* set length of EOB code, if any */
1594
  memzero((char *)c, sizeof(c));
1595
  p = (unsigned *)b;  i = n;
1596
  do {
1597
    c[*p]++; p++;               /* assume all entries <= BMAX */
1598
  } while (--i);
1599
  if (c[0] == n)                /* null input--all zero length codes */
1600
  {
1601
    *t = (struct huft *)NULL;
1602
    *m = 0;
1603
    return 0;
1604
  }
1605
 
1606
 
1607
  /* Find minimum and maximum length, bound *m by those */
1608
  for (j = 1; j <= BMAX; j++)
1609
    if (c[j])
1610
      break;
1611
  k = j;                        /* minimum code length */
1612
  if (*m < j)
1613
    *m = j;
1614
  for (i = BMAX; i; i--)
1615
    if (c[i])
1616
      break;
1617
  g = i;                        /* maximum code length */
1618
  if (*m > i)
1619
    *m = i;
1620
 
1621
 
1622
  /* Adjust last length count to fill out codes, if needed */
1623
  for (y = 1 << j; j < i; j++, y <<= 1)
1624
    if ((y -= c[j]) < 0)
1625
      return 2;                 /* bad input: more codes than bits */
1626
  if ((y -= c[i]) < 0)
1627
    return 2;
1628
  c[i] += y;
1629
 
1630
 
1631
  /* Generate starting offsets into the value table for each length */
1632
  x[1] = j = 0;
1633
  p = c + 1;  xp = x + 2;
1634
  while (--i) {                 /* note that i == g from above */
1635
    *xp++ = (j += *p++);
1636
  }
1637
 
1638
 
1639
  /* Make a table of values in order of bit lengths */
1640
  memzero((char *)v, sizeof(v));
1641
  p = (unsigned *)b;  i = 0;
1642
  do {
1643
    if ((j = *p++) != 0)
1644
      v[x[j]++] = i;
1645
  } while (++i < n);
1646
  n = x[g];                     /* set n to length of v */
1647
 
1648
 
1649
  /* Generate the Huffman codes and for each, make the table entries */
1650
  x[0] = i = 0;                 /* first Huffman code is zero */
1651
  p = v;                        /* grab values in bit order */
1652
  h = -1;                       /* no tables yet--level -1 */
1653
  w = l[-1] = 0;                /* no bits decoded yet */
1654
  u[0] = (struct huft *)NULL;   /* just to keep compilers happy */
1655
  q = (struct huft *)NULL;      /* ditto */
1656
  z = 0;                        /* ditto */
1657
 
1658
  /* go through the bit lengths (k already is bits in shortest code) */
1659
  for (; k <= g; k++)
1660
  {
1661
    a = c[k];
1662
    while (a--)
1663
    {
1664
      /* here i is the Huffman code of length k bits for value *p */
1665
      /* make tables up to required level */
1666
      while (k > w + l[h])
1667
      {
1668
        w += l[h++];            /* add bits already decoded */
1669
 
1670
        /* compute minimum size table less than or equal to *m bits */
1671
        z = (z = g - w) > *m ? *m : z;                  /* upper limit */
1672
        if ((f = 1 << (j = k - w)) > a + 1)     /* try a k-w bit table */
1673
        {                       /* too few codes for k-w bit table */
1674
          f -= a + 1;           /* deduct codes from patterns left */
1675
          xp = c + k;
1676
          while (++j < z)       /* try smaller tables up to z bits */
1677
          {
1678
            if ((f <<= 1) <= *++xp)
1679
              break;            /* enough codes to use up j bits */
1680
            f -= *xp;           /* else deduct codes from patterns */
1681
          }
1682
        }
1683
        if ((unsigned)w + j > el && (unsigned)w < el)
1684
          j = el - w;           /* make EOB code end at table */
1685
        z = 1 << j;             /* table entries for j-bit table */
1686
        l[h] = j;               /* set table size in stack */
1687
 
1688
        /* allocate and link in new table */
1689
        if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) ==
1690
            (struct huft *)NULL)
1691
        {
1692
          if (h)
1693
            huft_free(u[0]);
1694
          return 3;             /* not enough memory */
1695
        }
1696
#ifdef DEBUG
1697
        G.hufts += z + 1;         /* track memory usage */
1698
#endif
1699
        *t = q + 1;             /* link to list for huft_free() */
1700
        *(t = &(q->v.t)) = (struct huft *)NULL;
1701
        u[h] = ++q;             /* table starts after link */
1702
 
1703
        /* connect to last table, if there is one */
1704
        if (h)
1705
        {
1706
          x[h] = i;             /* save pattern for backing up */
1707
          r.b = (uch)l[h-1];    /* bits to dump before this table */
1708
          r.e = (uch)(32 + j);  /* bits in this table */
1709
          r.v.t = q;            /* pointer to this table */
1710
          j = (i & ((1 << w) - 1)) >> (w - l[h-1]);
1711
          u[h-1][j] = r;        /* connect to last table */
1712
        }
1713
      }
1714
 
1715
      /* set up table entry in r */
1716
      r.b = (uch)(k - w);
1717
      if (p >= v + n)
1718
        r.e = INVALID_CODE;     /* out of values--invalid code */
1719
      else if (*p < s)
1720
      {
1721
        r.e = (uch)(*p < 256 ? 32 : 31);  /* 256 is end-of-block code */
1722
        r.v.n = (ush)*p++;                /* simple code is just the value */
1723
      }
1724
      else
1725
      {
1726
        r.e = e[*p - s];        /* non-simple--look up in lists */
1727
        r.v.n = d[*p++ - s];
1728
      }
1729
 
1730
      /* fill code-like entries with r */
1731
      f = 1 << (k - w);
1732
      for (j = i >> w; j < z; j += f)
1733
        q[j] = r;
1734
 
1735
      /* backwards increment the k-bit code i */
1736
      for (j = 1 << (k - 1); i & j; j >>= 1)
1737
        i ^= j;
1738
      i ^= j;
1739
 
1740
      /* backup over finished tables */
1741
      while ((i & ((1 << w) - 1)) != x[h])
1742
        w -= l[--h];            /* don't need to update q */
1743
    }
1744
  }
1745
 
1746
 
1747
  /* return actual size of base table */
1748
  *m = l[0];
1749
 
1750
 
1751
  /* Return true (1) if we were given an incomplete table */
1752
  return y != 0 && g != 1;
1753
}
1754
 
1755
 
1756
 
1757
int huft_free(t)
1758
struct huft *t;         /* table to free */
1759
/* Free the malloc'ed tables built by huft_build(), which makes a linked
1760
   list of the tables it made, with the links in a dummy first entry of
1761
   each table. */
1762
{
1763
  register struct huft *p, *q;
1764
 
1765
 
1766
  /* Go through linked list, freeing from the malloced (t[-1]) address. */
1767
  p = t;
1768
  while (p != (struct huft *)NULL)
1769
  {
1770
    q = (--p)->v.t;
1771
    free((zvoid *)p);
1772
    p = q;
1773
  }
1774
  return 0;
1775
}