tweaks to patch from reedbeta

pull/175/head
Sean Barrett 2015-09-13 04:27:24 -07:00
parent 492e6e303d
commit fee80f3d83
1 changed files with 12 additions and 10 deletions

View File

@ -143,6 +143,7 @@
Latest revision history:
2.07 (2015-09-13) fix compiler warnings
2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value
2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning
2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit
@ -203,6 +204,7 @@
Martins Mozeiko
Joseph Thomson
Phil Jordan
Nathan Reed
LICENSE
@ -3502,10 +3504,10 @@ static int stbi__zbuild_huffman(stbi__zhuffman *z, stbi_uc *sizelist, int num)
z->size [c] = (stbi_uc ) s;
z->value[c] = (stbi__uint16) i;
if (s <= STBI__ZFAST_BITS) {
k = stbi__bit_reverse(next_code[s],s);
while (k < (1 << STBI__ZFAST_BITS)) {
z->fast[k] = fastv;
k += (1 << s);
int j = stbi__bit_reverse(next_code[s],s);
while (j < (1 << STBI__ZFAST_BITS)) {
z->fast[j] = fastv;
j += (1 << s);
}
}
++next_code[s];
@ -5556,7 +5558,7 @@ static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code)
static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g)
{
stbi_uc lzw_cs;
stbi__int32 len, code;
stbi__int32 len, init_code;
stbi__uint32 first;
stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear;
stbi__gif_lzw *p;
@ -5569,10 +5571,10 @@ static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g)
codemask = (1 << codesize) - 1;
bits = 0;
valid_bits = 0;
for (code = 0; code < clear; code++) {
g->codes[code].prefix = -1;
g->codes[code].first = (stbi_uc) code;
g->codes[code].suffix = (stbi_uc) code;
for (init_code = 0; init_code < clear; init_code++) {
g->codes[init_code].prefix = -1;
g->codes[init_code].first = (stbi_uc) init_code;
g->codes[init_code].suffix = (stbi_uc) init_code;
}
// support no starting clear code
@ -5591,7 +5593,7 @@ static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g)
bits |= (stbi__int32) stbi__get8(s) << valid_bits;
valid_bits += 8;
} else {
code = bits & codemask;
stbi__int32 code = bits & codemask;
bits >>= codesize;
valid_bits -= codesize;
// @OPTIMIZE: is there some way we can accelerate the non-clear path?