debuggers.hg

annotate xen/common/unlzma.c @ 22848:6341fe0f4e5a

Added tag 4.1.0-rc2 for changeset 9dca60d88c63
author Keir Fraser <keir@xen.org>
date Tue Jan 25 14:06:55 2011 +0000 (2011-01-25)
parents 0d7fb1ab92f4
children
rev   line source
keir@20447 1 /* Lzma decompressor for Linux kernel. Shamelessly snarfed
keir@20447 2 * from busybox 1.1.1
keir@20447 3 *
keir@20447 4 * Linux kernel adaptation
keir@20447 5 * Copyright (C) 2006 Alain < alain@knaff.lu >
keir@20447 6 *
keir@20447 7 * Based on small lzma deflate implementation/Small range coder
keir@20447 8 * implementation for lzma.
keir@20447 9 * Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
keir@20447 10 *
keir@20447 11 * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
keir@20447 12 * Copyright (C) 1999-2005 Igor Pavlov
keir@20447 13 *
keir@20447 14 * Copyrights of the parts, see headers below.
keir@20447 15 *
keir@20447 16 *
keir@20447 17 * This program is free software; you can redistribute it and/or
keir@20447 18 * modify it under the terms of the GNU Lesser General Public
keir@20447 19 * License as published by the Free Software Foundation; either
keir@20447 20 * version 2.1 of the License, or (at your option) any later version.
keir@20447 21 *
keir@20447 22 * This program is distributed in the hope that it will be useful,
keir@20447 23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
keir@20447 24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
keir@20447 25 * Lesser General Public License for more details.
keir@20447 26 *
keir@20447 27 * You should have received a copy of the GNU Lesser General Public
keir@20447 28 * License along with this library; if not, write to the Free Software
keir@20447 29 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
keir@20447 30 */
keir@20447 31
keir@20447 32 #include "decompress.h"
keir@20447 33
keir@20447 34 #define MIN(a, b) (((a) < (b)) ? (a) : (b))
keir@20447 35
keir@20447 36 static long long INIT read_int(unsigned char *ptr, int size)
keir@20447 37 {
keir@20447 38 int i;
keir@20447 39 long long ret = 0;
keir@20447 40
keir@20447 41 for (i = 0; i < size; i++)
keir@20447 42 ret = (ret << 8) | ptr[size-i-1];
keir@20447 43 return ret;
keir@20447 44 }
keir@20447 45
keir@20447 46 #define ENDIAN_CONVERT(x) \
keir@20447 47 x = (typeof(x))read_int((unsigned char *)&x, sizeof(x))
keir@20447 48
keir@20447 49
keir@20447 50 /* Small range coder implementation for lzma.
keir@20447 51 * Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
keir@20447 52 *
keir@20447 53 * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
keir@20447 54 * Copyright (c) 1999-2005 Igor Pavlov
keir@20447 55 */
keir@20447 56
keir@20447 57 #include <xen/compiler.h>
keir@20447 58
keir@20447 59 #define LZMA_IOBUF_SIZE 0x10000
keir@20447 60
keir@20447 61 struct rc {
keir@20447 62 int (*fill)(void*, unsigned int);
keir@20447 63 uint8_t *ptr;
keir@20447 64 uint8_t *buffer;
keir@20447 65 uint8_t *buffer_end;
keir@20447 66 int buffer_size;
keir@20447 67 uint32_t code;
keir@20447 68 uint32_t range;
keir@20447 69 uint32_t bound;
keir@20447 70 };
keir@20447 71
keir@20447 72
keir@20447 73 #define RC_TOP_BITS 24
keir@20447 74 #define RC_MOVE_BITS 5
keir@20447 75 #define RC_MODEL_TOTAL_BITS 11
keir@20447 76
keir@20447 77
keir@20447 78 static int nofill(void *buffer, unsigned int len)
keir@20447 79 {
keir@20447 80 return -1;
keir@20447 81 }
keir@20447 82
keir@20447 83 /* Called twice: once at startup and once in rc_normalize() */
keir@20447 84 static void INIT rc_read(struct rc *rc)
keir@20447 85 {
keir@20447 86 rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE);
keir@20447 87 if (rc->buffer_size <= 0)
keir@20447 88 error("unexpected EOF");
keir@20447 89 rc->ptr = rc->buffer;
keir@20447 90 rc->buffer_end = rc->buffer + rc->buffer_size;
keir@20447 91 }
keir@20447 92
keir@20447 93 /* Called once */
keir@20447 94 static inline void INIT rc_init(struct rc *rc,
keir@20447 95 int (*fill)(void*, unsigned int),
keir@20447 96 unsigned char *buffer, int buffer_size)
keir@20447 97 {
keir@20447 98 if (fill)
keir@20447 99 rc->fill = fill;
keir@20447 100 else
keir@20447 101 rc->fill = nofill;
keir@20447 102 rc->buffer = (uint8_t *)buffer;
keir@20447 103 rc->buffer_size = buffer_size;
keir@20447 104 rc->buffer_end = rc->buffer + rc->buffer_size;
keir@20447 105 rc->ptr = rc->buffer;
keir@20447 106
keir@20447 107 rc->code = 0;
keir@20447 108 rc->range = 0xFFFFFFFF;
keir@20447 109 }
keir@20447 110
keir@20447 111 static inline void INIT rc_init_code(struct rc *rc)
keir@20447 112 {
keir@20447 113 int i;
keir@20447 114
keir@20447 115 for (i = 0; i < 5; i++) {
keir@20447 116 if (rc->ptr >= rc->buffer_end)
keir@20447 117 rc_read(rc);
keir@20447 118 rc->code = (rc->code << 8) | *rc->ptr++;
keir@20447 119 }
keir@20447 120 }
keir@20447 121
keir@20447 122
keir@20447 123 /* Called once. TODO: bb_maybe_free() */
keir@20447 124 static inline void INIT rc_free(struct rc *rc)
keir@20447 125 {
keir@20447 126 free(rc->buffer);
keir@20447 127 }
keir@20447 128
keir@20447 129 /* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */
keir@20447 130 static void INIT rc_do_normalize(struct rc *rc)
keir@20447 131 {
keir@20447 132 if (rc->ptr >= rc->buffer_end)
keir@20447 133 rc_read(rc);
keir@20447 134 rc->range <<= 8;
keir@20447 135 rc->code = (rc->code << 8) | *rc->ptr++;
keir@20447 136 }
keir@20447 137 static inline void INIT rc_normalize(struct rc *rc)
keir@20447 138 {
keir@20447 139 if (rc->range < (1 << RC_TOP_BITS))
keir@20447 140 rc_do_normalize(rc);
keir@20447 141 }
keir@20447 142
keir@20447 143 /* Called 9 times */
keir@20447 144 /* Why rc_is_bit_0_helper exists?
keir@20447 145 *Because we want to always expose (rc->code < rc->bound) to optimizer
keir@20447 146 */
keir@20447 147 static inline uint32_t INIT rc_is_bit_0_helper(struct rc *rc, uint16_t *p)
keir@20447 148 {
keir@20447 149 rc_normalize(rc);
keir@20447 150 rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
keir@20447 151 return rc->bound;
keir@20447 152 }
keir@20447 153 static inline int INIT rc_is_bit_0(struct rc *rc, uint16_t *p)
keir@20447 154 {
keir@20447 155 uint32_t t = rc_is_bit_0_helper(rc, p);
keir@20447 156 return rc->code < t;
keir@20447 157 }
keir@20447 158
keir@20447 159 /* Called ~10 times, but very small, thus inlined */
keir@20447 160 static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p)
keir@20447 161 {
keir@20447 162 rc->range = rc->bound;
keir@20447 163 *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
keir@20447 164 }
keir@20447 165 static inline void rc_update_bit_1(struct rc *rc, uint16_t *p)
keir@20447 166 {
keir@20447 167 rc->range -= rc->bound;
keir@20447 168 rc->code -= rc->bound;
keir@20447 169 *p -= *p >> RC_MOVE_BITS;
keir@20447 170 }
keir@20447 171
keir@20447 172 /* Called 4 times in unlzma loop */
keir@20447 173 static int INIT rc_get_bit(struct rc *rc, uint16_t *p, int *symbol)
keir@20447 174 {
keir@20447 175 if (rc_is_bit_0(rc, p)) {
keir@20447 176 rc_update_bit_0(rc, p);
keir@20447 177 *symbol *= 2;
keir@20447 178 return 0;
keir@20447 179 } else {
keir@20447 180 rc_update_bit_1(rc, p);
keir@20447 181 *symbol = *symbol * 2 + 1;
keir@20447 182 return 1;
keir@20447 183 }
keir@20447 184 }
keir@20447 185
keir@20447 186 /* Called once */
keir@20447 187 static inline int INIT rc_direct_bit(struct rc *rc)
keir@20447 188 {
keir@20447 189 rc_normalize(rc);
keir@20447 190 rc->range >>= 1;
keir@20447 191 if (rc->code >= rc->range) {
keir@20447 192 rc->code -= rc->range;
keir@20447 193 return 1;
keir@20447 194 }
keir@20447 195 return 0;
keir@20447 196 }
keir@20447 197
keir@20447 198 /* Called twice */
keir@20447 199 static inline void INIT
keir@20447 200 rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol)
keir@20447 201 {
keir@20447 202 int i = num_levels;
keir@20447 203
keir@20447 204 *symbol = 1;
keir@20447 205 while (i--)
keir@20447 206 rc_get_bit(rc, p + *symbol, symbol);
keir@20447 207 *symbol -= 1 << num_levels;
keir@20447 208 }
keir@20447 209
keir@20447 210
keir@20447 211 /*
keir@20447 212 * Small lzma deflate implementation.
keir@20447 213 * Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
keir@20447 214 *
keir@20447 215 * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
keir@20447 216 * Copyright (C) 1999-2005 Igor Pavlov
keir@20447 217 */
keir@20447 218
keir@20447 219
keir@20447 220 struct lzma_header {
keir@20447 221 uint8_t pos;
keir@20447 222 uint32_t dict_size;
keir@20447 223 uint64_t dst_size;
keir@20447 224 } __attribute__ ((packed)) ;
keir@20447 225
keir@20447 226
keir@20447 227 #define LZMA_BASE_SIZE 1846
keir@20447 228 #define LZMA_LIT_SIZE 768
keir@20447 229
keir@20447 230 #define LZMA_NUM_POS_BITS_MAX 4
keir@20447 231
keir@20447 232 #define LZMA_LEN_NUM_LOW_BITS 3
keir@20447 233 #define LZMA_LEN_NUM_MID_BITS 3
keir@20447 234 #define LZMA_LEN_NUM_HIGH_BITS 8
keir@20447 235
keir@20447 236 #define LZMA_LEN_CHOICE 0
keir@20447 237 #define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1)
keir@20447 238 #define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1)
keir@20447 239 #define LZMA_LEN_MID (LZMA_LEN_LOW \
keir@20447 240 + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS)))
keir@20447 241 #define LZMA_LEN_HIGH (LZMA_LEN_MID \
keir@20447 242 +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS)))
keir@20447 243 #define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS))
keir@20447 244
keir@20447 245 #define LZMA_NUM_STATES 12
keir@20447 246 #define LZMA_NUM_LIT_STATES 7
keir@20447 247
keir@20447 248 #define LZMA_START_POS_MODEL_INDEX 4
keir@20447 249 #define LZMA_END_POS_MODEL_INDEX 14
keir@20447 250 #define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1))
keir@20447 251
keir@20447 252 #define LZMA_NUM_POS_SLOT_BITS 6
keir@20447 253 #define LZMA_NUM_LEN_TO_POS_STATES 4
keir@20447 254
keir@20447 255 #define LZMA_NUM_ALIGN_BITS 4
keir@20447 256
keir@20447 257 #define LZMA_MATCH_MIN_LEN 2
keir@20447 258
keir@20447 259 #define LZMA_IS_MATCH 0
keir@20447 260 #define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
keir@20447 261 #define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES)
keir@20447 262 #define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES)
keir@20447 263 #define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES)
keir@20447 264 #define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES)
keir@20447 265 #define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \
keir@20447 266 + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
keir@20447 267 #define LZMA_SPEC_POS (LZMA_POS_SLOT \
keir@20447 268 +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS))
keir@20447 269 #define LZMA_ALIGN (LZMA_SPEC_POS \
keir@20447 270 + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX)
keir@20447 271 #define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS))
keir@20447 272 #define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS)
keir@20447 273 #define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS)
keir@20447 274
keir@20447 275
keir@20447 276 struct writer {
keir@20447 277 uint8_t *buffer;
keir@20447 278 uint8_t previous_byte;
keir@20447 279 size_t buffer_pos;
keir@20447 280 int bufsize;
keir@20447 281 size_t global_pos;
keir@20447 282 int(*flush)(void*, unsigned int);
keir@20447 283 struct lzma_header *header;
keir@20447 284 };
keir@20447 285
keir@20447 286 struct cstate {
keir@20447 287 int state;
keir@20447 288 uint32_t rep0, rep1, rep2, rep3;
keir@20447 289 };
keir@20447 290
keir@20447 291 static inline size_t INIT get_pos(struct writer *wr)
keir@20447 292 {
keir@20447 293 return
keir@20447 294 wr->global_pos + wr->buffer_pos;
keir@20447 295 }
keir@20447 296
keir@20447 297 static inline uint8_t INIT peek_old_byte(struct writer *wr,
keir@20447 298 uint32_t offs)
keir@20447 299 {
keir@20447 300 if (!wr->flush) {
keir@20447 301 int32_t pos;
keir@20447 302 while (offs > wr->header->dict_size)
keir@20447 303 offs -= wr->header->dict_size;
keir@20447 304 pos = wr->buffer_pos - offs;
keir@20447 305 return wr->buffer[pos];
keir@20447 306 } else {
keir@20447 307 uint32_t pos = wr->buffer_pos - offs;
keir@20447 308 while (pos >= wr->header->dict_size)
keir@20447 309 pos += wr->header->dict_size;
keir@20447 310 return wr->buffer[pos];
keir@20447 311 }
keir@20447 312
keir@20447 313 }
keir@20447 314
keir@20447 315 static inline void INIT write_byte(struct writer *wr, uint8_t byte)
keir@20447 316 {
keir@20447 317 wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte;
keir@20447 318 if (wr->flush && wr->buffer_pos == wr->header->dict_size) {
keir@20447 319 wr->buffer_pos = 0;
keir@20447 320 wr->global_pos += wr->header->dict_size;
keir@20447 321 wr->flush((char *)wr->buffer, wr->header->dict_size);
keir@20447 322 }
keir@20447 323 }
keir@20447 324
keir@20447 325
keir@20447 326 static inline void INIT copy_byte(struct writer *wr, uint32_t offs)
keir@20447 327 {
keir@20447 328 write_byte(wr, peek_old_byte(wr, offs));
keir@20447 329 }
keir@20447 330
keir@20447 331 static inline void INIT copy_bytes(struct writer *wr,
keir@20447 332 uint32_t rep0, int len)
keir@20447 333 {
keir@20447 334 do {
keir@20447 335 copy_byte(wr, rep0);
keir@20447 336 len--;
keir@20447 337 } while (len != 0 && wr->buffer_pos < wr->header->dst_size);
keir@20447 338 }
keir@20447 339
keir@20447 340 static inline void INIT process_bit0(struct writer *wr, struct rc *rc,
keir@20447 341 struct cstate *cst, uint16_t *p,
keir@20447 342 int pos_state, uint16_t *prob,
keir@20447 343 int lc, uint32_t literal_pos_mask) {
keir@20447 344 int mi = 1;
keir@20447 345 rc_update_bit_0(rc, prob);
keir@20447 346 prob = (p + LZMA_LITERAL +
keir@20447 347 (LZMA_LIT_SIZE
keir@20447 348 * (((get_pos(wr) & literal_pos_mask) << lc)
keir@20447 349 + (wr->previous_byte >> (8 - lc))))
keir@20447 350 );
keir@20447 351
keir@20447 352 if (cst->state >= LZMA_NUM_LIT_STATES) {
keir@20447 353 int match_byte = peek_old_byte(wr, cst->rep0);
keir@20447 354 do {
keir@20447 355 int bit;
keir@20447 356 uint16_t *prob_lit;
keir@20447 357
keir@20447 358 match_byte <<= 1;
keir@20447 359 bit = match_byte & 0x100;
keir@20447 360 prob_lit = prob + 0x100 + bit + mi;
keir@20447 361 if (rc_get_bit(rc, prob_lit, &mi)) {
keir@20447 362 if (!bit)
keir@20447 363 break;
keir@20447 364 } else {
keir@20447 365 if (bit)
keir@20447 366 break;
keir@20447 367 }
keir@20447 368 } while (mi < 0x100);
keir@20447 369 }
keir@20447 370 while (mi < 0x100) {
keir@20447 371 uint16_t *prob_lit = prob + mi;
keir@20447 372 rc_get_bit(rc, prob_lit, &mi);
keir@20447 373 }
keir@20447 374 write_byte(wr, mi);
keir@20447 375 if (cst->state < 4)
keir@20447 376 cst->state = 0;
keir@20447 377 else if (cst->state < 10)
keir@20447 378 cst->state -= 3;
keir@20447 379 else
keir@20447 380 cst->state -= 6;
keir@20447 381 }
keir@20447 382
keir@20447 383 static inline void INIT process_bit1(struct writer *wr, struct rc *rc,
keir@20447 384 struct cstate *cst, uint16_t *p,
keir@20447 385 int pos_state, uint16_t *prob) {
keir@20447 386 int offset;
keir@20447 387 uint16_t *prob_len;
keir@20447 388 int num_bits;
keir@20447 389 int len;
keir@20447 390
keir@20447 391 rc_update_bit_1(rc, prob);
keir@20447 392 prob = p + LZMA_IS_REP + cst->state;
keir@20447 393 if (rc_is_bit_0(rc, prob)) {
keir@20447 394 rc_update_bit_0(rc, prob);
keir@20447 395 cst->rep3 = cst->rep2;
keir@20447 396 cst->rep2 = cst->rep1;
keir@20447 397 cst->rep1 = cst->rep0;
keir@20447 398 cst->state = cst->state < LZMA_NUM_LIT_STATES ? 0 : 3;
keir@20447 399 prob = p + LZMA_LEN_CODER;
keir@20447 400 } else {
keir@20447 401 rc_update_bit_1(rc, prob);
keir@20447 402 prob = p + LZMA_IS_REP_G0 + cst->state;
keir@20447 403 if (rc_is_bit_0(rc, prob)) {
keir@20447 404 rc_update_bit_0(rc, prob);
keir@20447 405 prob = (p + LZMA_IS_REP_0_LONG
keir@20447 406 + (cst->state <<
keir@20447 407 LZMA_NUM_POS_BITS_MAX) +
keir@20447 408 pos_state);
keir@20447 409 if (rc_is_bit_0(rc, prob)) {
keir@20447 410 rc_update_bit_0(rc, prob);
keir@20447 411
keir@20447 412 cst->state = cst->state < LZMA_NUM_LIT_STATES ?
keir@20447 413 9 : 11;
keir@20447 414 copy_byte(wr, cst->rep0);
keir@20447 415 return;
keir@20447 416 } else {
keir@20447 417 rc_update_bit_1(rc, prob);
keir@20447 418 }
keir@20447 419 } else {
keir@20447 420 uint32_t distance;
keir@20447 421
keir@20447 422 rc_update_bit_1(rc, prob);
keir@20447 423 prob = p + LZMA_IS_REP_G1 + cst->state;
keir@20447 424 if (rc_is_bit_0(rc, prob)) {
keir@20447 425 rc_update_bit_0(rc, prob);
keir@20447 426 distance = cst->rep1;
keir@20447 427 } else {
keir@20447 428 rc_update_bit_1(rc, prob);
keir@20447 429 prob = p + LZMA_IS_REP_G2 + cst->state;
keir@20447 430 if (rc_is_bit_0(rc, prob)) {
keir@20447 431 rc_update_bit_0(rc, prob);
keir@20447 432 distance = cst->rep2;
keir@20447 433 } else {
keir@20447 434 rc_update_bit_1(rc, prob);
keir@20447 435 distance = cst->rep3;
keir@20447 436 cst->rep3 = cst->rep2;
keir@20447 437 }
keir@20447 438 cst->rep2 = cst->rep1;
keir@20447 439 }
keir@20447 440 cst->rep1 = cst->rep0;
keir@20447 441 cst->rep0 = distance;
keir@20447 442 }
keir@20447 443 cst->state = cst->state < LZMA_NUM_LIT_STATES ? 8 : 11;
keir@20447 444 prob = p + LZMA_REP_LEN_CODER;
keir@20447 445 }
keir@20447 446
keir@20447 447 prob_len = prob + LZMA_LEN_CHOICE;
keir@20447 448 if (rc_is_bit_0(rc, prob_len)) {
keir@20447 449 rc_update_bit_0(rc, prob_len);
keir@20447 450 prob_len = (prob + LZMA_LEN_LOW
keir@20447 451 + (pos_state <<
keir@20447 452 LZMA_LEN_NUM_LOW_BITS));
keir@20447 453 offset = 0;
keir@20447 454 num_bits = LZMA_LEN_NUM_LOW_BITS;
keir@20447 455 } else {
keir@20447 456 rc_update_bit_1(rc, prob_len);
keir@20447 457 prob_len = prob + LZMA_LEN_CHOICE_2;
keir@20447 458 if (rc_is_bit_0(rc, prob_len)) {
keir@20447 459 rc_update_bit_0(rc, prob_len);
keir@20447 460 prob_len = (prob + LZMA_LEN_MID
keir@20447 461 + (pos_state <<
keir@20447 462 LZMA_LEN_NUM_MID_BITS));
keir@20447 463 offset = 1 << LZMA_LEN_NUM_LOW_BITS;
keir@20447 464 num_bits = LZMA_LEN_NUM_MID_BITS;
keir@20447 465 } else {
keir@20447 466 rc_update_bit_1(rc, prob_len);
keir@20447 467 prob_len = prob + LZMA_LEN_HIGH;
keir@20447 468 offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
keir@20447 469 + (1 << LZMA_LEN_NUM_MID_BITS));
keir@20447 470 num_bits = LZMA_LEN_NUM_HIGH_BITS;
keir@20447 471 }
keir@20447 472 }
keir@20447 473
keir@20447 474 rc_bit_tree_decode(rc, prob_len, num_bits, &len);
keir@20447 475 len += offset;
keir@20447 476
keir@20447 477 if (cst->state < 4) {
keir@20447 478 int pos_slot;
keir@20447 479
keir@20447 480 cst->state += LZMA_NUM_LIT_STATES;
keir@20447 481 prob =
keir@20447 482 p + LZMA_POS_SLOT +
keir@20447 483 ((len <
keir@20447 484 LZMA_NUM_LEN_TO_POS_STATES ? len :
keir@20447 485 LZMA_NUM_LEN_TO_POS_STATES - 1)
keir@20447 486 << LZMA_NUM_POS_SLOT_BITS);
keir@20447 487 rc_bit_tree_decode(rc, prob,
keir@20447 488 LZMA_NUM_POS_SLOT_BITS,
keir@20447 489 &pos_slot);
keir@20447 490 if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
keir@20447 491 int i, mi;
keir@20447 492 num_bits = (pos_slot >> 1) - 1;
keir@20447 493 cst->rep0 = 2 | (pos_slot & 1);
keir@20447 494 if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
keir@20447 495 cst->rep0 <<= num_bits;
keir@20447 496 prob = p + LZMA_SPEC_POS +
keir@20447 497 cst->rep0 - pos_slot - 1;
keir@20447 498 } else {
keir@20447 499 num_bits -= LZMA_NUM_ALIGN_BITS;
keir@20447 500 while (num_bits--)
keir@20447 501 cst->rep0 = (cst->rep0 << 1) |
keir@20447 502 rc_direct_bit(rc);
keir@20447 503 prob = p + LZMA_ALIGN;
keir@20447 504 cst->rep0 <<= LZMA_NUM_ALIGN_BITS;
keir@20447 505 num_bits = LZMA_NUM_ALIGN_BITS;
keir@20447 506 }
keir@20447 507 i = 1;
keir@20447 508 mi = 1;
keir@20447 509 while (num_bits--) {
keir@20447 510 if (rc_get_bit(rc, prob + mi, &mi))
keir@20447 511 cst->rep0 |= i;
keir@20447 512 i <<= 1;
keir@20447 513 }
keir@20447 514 } else
keir@20447 515 cst->rep0 = pos_slot;
keir@20447 516 if (++(cst->rep0) == 0)
keir@20447 517 return;
keir@20447 518 }
keir@20447 519
keir@20447 520 len += LZMA_MATCH_MIN_LEN;
keir@20447 521
keir@20447 522 copy_bytes(wr, cst->rep0, len);
keir@20447 523 }
keir@20447 524
keir@20447 525
keir@20447 526
keir@20466 527 STATIC int INIT unlzma(unsigned char *buf, unsigned int in_len,
keir@20466 528 int(*fill)(void*, unsigned int),
keir@20466 529 int(*flush)(void*, unsigned int),
keir@20466 530 unsigned char *output,
keir@20466 531 unsigned int *posp,
keir@20466 532 void(*error_fn)(const char *x)
keir@20447 533 )
keir@20447 534 {
keir@20447 535 struct lzma_header header;
keir@20447 536 int lc, pb, lp;
keir@20447 537 uint32_t pos_state_mask;
keir@20447 538 uint32_t literal_pos_mask;
keir@20447 539 uint16_t *p;
keir@20447 540 int num_probs;
keir@20447 541 struct rc rc;
keir@20447 542 int i, mi;
keir@20447 543 struct writer wr;
keir@20447 544 struct cstate cst;
keir@20447 545 unsigned char *inbuf;
keir@20447 546 int ret = -1;
keir@20447 547
keir@20447 548 set_error_fn(error_fn);
keir@20447 549
keir@20447 550 if (buf)
keir@20447 551 inbuf = buf;
keir@20447 552 else
keir@20447 553 inbuf = malloc(LZMA_IOBUF_SIZE);
keir@20447 554 if (!inbuf) {
keir@20447 555 error("Could not allocate input bufer");
keir@20447 556 goto exit_0;
keir@20447 557 }
keir@20447 558
keir@20447 559 cst.state = 0;
keir@20447 560 cst.rep0 = cst.rep1 = cst.rep2 = cst.rep3 = 1;
keir@20447 561
keir@20447 562 wr.header = &header;
keir@20447 563 wr.flush = flush;
keir@20447 564 wr.global_pos = 0;
keir@20447 565 wr.previous_byte = 0;
keir@20447 566 wr.buffer_pos = 0;
keir@20447 567
keir@20447 568 rc_init(&rc, fill, inbuf, in_len);
keir@20447 569
keir@20447 570 for (i = 0; i < sizeof(header); i++) {
keir@20447 571 if (rc.ptr >= rc.buffer_end)
keir@20447 572 rc_read(&rc);
keir@20447 573 ((unsigned char *)&header)[i] = *rc.ptr++;
keir@20447 574 }
keir@20447 575
keir@20447 576 if (header.pos >= (9 * 5 * 5))
keir@20447 577 error("bad header");
keir@20447 578
keir@20447 579 mi = 0;
keir@20447 580 lc = header.pos;
keir@20447 581 while (lc >= 9) {
keir@20447 582 mi++;
keir@20447 583 lc -= 9;
keir@20447 584 }
keir@20447 585 pb = 0;
keir@20447 586 lp = mi;
keir@20447 587 while (lp >= 5) {
keir@20447 588 pb++;
keir@20447 589 lp -= 5;
keir@20447 590 }
keir@20447 591 pos_state_mask = (1 << pb) - 1;
keir@20447 592 literal_pos_mask = (1 << lp) - 1;
keir@20447 593
keir@20447 594 ENDIAN_CONVERT(header.dict_size);
keir@20447 595 ENDIAN_CONVERT(header.dst_size);
keir@20447 596
keir@20447 597 if (header.dict_size == 0)
keir@20447 598 header.dict_size = 1;
keir@20447 599
keir@20447 600 if (output)
keir@20447 601 wr.buffer = output;
keir@20447 602 else {
keir@20447 603 wr.bufsize = MIN(header.dst_size, header.dict_size);
keir@20447 604 wr.buffer = large_malloc(wr.bufsize);
keir@20447 605 }
keir@20447 606 if (wr.buffer == NULL)
keir@20447 607 goto exit_1;
keir@20447 608
keir@20447 609 num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
keir@20447 610 p = (uint16_t *) large_malloc(num_probs * sizeof(*p));
keir@20447 611 if (p == 0)
keir@20447 612 goto exit_2;
keir@20447 613 num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
keir@20447 614 for (i = 0; i < num_probs; i++)
keir@20447 615 p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
keir@20447 616
keir@20447 617 rc_init_code(&rc);
keir@20447 618
keir@20447 619 while (get_pos(&wr) < header.dst_size) {
keir@20447 620 int pos_state = get_pos(&wr) & pos_state_mask;
keir@20447 621 uint16_t *prob = p + LZMA_IS_MATCH +
keir@20447 622 (cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state;
keir@20447 623 if (rc_is_bit_0(&rc, prob))
keir@20447 624 process_bit0(&wr, &rc, &cst, p, pos_state, prob,
keir@20447 625 lc, literal_pos_mask);
keir@20447 626 else {
keir@20447 627 process_bit1(&wr, &rc, &cst, p, pos_state, prob);
keir@20447 628 if (cst.rep0 == 0)
keir@20447 629 break;
keir@20447 630 }
keir@20447 631 }
keir@20447 632
keir@20447 633 if (posp)
keir@20447 634 *posp = rc.ptr-rc.buffer;
keir@20447 635 if (wr.flush)
keir@20447 636 wr.flush(wr.buffer, wr.buffer_pos);
keir@20447 637 ret = 0;
keir@20447 638 large_free(p);
keir@20447 639 exit_2:
keir@20447 640 if (!output)
keir@20447 641 large_free(wr.buffer);
keir@20447 642 exit_1:
keir@20447 643 if (!buf)
keir@20447 644 free(inbuf);
keir@20447 645 exit_0:
keir@20447 646 return ret;
keir@20447 647 }