989 lines
32 KiB
C
989 lines
32 KiB
C
/* ========================== *
|
|
* Aseprite (.ase) file parser
|
|
*
|
|
* DEFLATE decoder based on Handmade Hero's png parser
|
|
* ========================== */
|
|
|
|
#include "ase.h"
|
|
#include "arena.h"
|
|
#include "scratch.h"
|
|
#include "byteio.h"
|
|
#include "string.h"
|
|
#include "log.h"
|
|
|
|
/* ========================== *
|
|
* Bitbuf
|
|
* ========================== */
|
|
|
|
struct bitbuf {
|
|
u8 *data;
|
|
u64 cur_bit;
|
|
};
|
|
|
|
INTERNAL u32 peek_bits(struct bitbuf *bb, u32 nbits)
|
|
{
|
|
ASSERT(nbits <= 32);
|
|
|
|
u64 cur_byte = bb->cur_bit >> 3;
|
|
u8 bit_index = bb->cur_bit % 8;
|
|
u64 nbytes = (nbits + bit_index + 7) >> 3;
|
|
|
|
u64 val64 = 0;
|
|
MEMCPY(&val64, &bb->data[cur_byte], nbytes);
|
|
u32 val32 = (u32)(val64 >> bit_index);
|
|
val32 &= U32_MAX >> (32 - nbits);
|
|
|
|
return val32;
|
|
}
|
|
|
|
INTERNAL u32 consume_bits(struct bitbuf *bb, u32 nbits)
|
|
{
|
|
u32 val = peek_bits(bb, nbits);
|
|
bb->cur_bit += nbits;
|
|
return val;
|
|
}
|
|
|
|
INTERNAL void skip_bits(struct bitbuf *bb, u32 nbits)
|
|
{
|
|
bb->cur_bit += nbits;
|
|
}
|
|
|
|
/* ========================== *
|
|
* Inflate
|
|
* ========================== */
|
|
|
|
#define HUFFMAN_BIT_COUNT 16
|
|
|
|
enum block_type {
|
|
BLOCK_TYPE_UNCOMPRESSED = 0,
|
|
BLOCK_TYPE_COMPRESSED_FIXED = 1,
|
|
BLOCK_TYPE_COMPRESSED_DYNAMIC = 2,
|
|
BLOCK_TYPE_RESERVED = 3
|
|
};
|
|
|
|
struct huffman_entry {
|
|
u16 symbol;
|
|
u16 bits_used;
|
|
};
|
|
|
|
struct huffman {
|
|
u32 max_code_bits;
|
|
u32 entries_count;
|
|
struct huffman_entry *entries;
|
|
};
|
|
|
|
GLOBAL READONLY u32 g_hclen_order[] = {
|
|
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
|
|
};
|
|
|
|
GLOBAL READONLY struct huffman_entry g_length_table[] = {
|
|
{3, 0}, /* 257 */
|
|
{4, 0}, /* 258 */
|
|
{5, 0}, /* 259 */
|
|
{6, 0}, /* 260 */
|
|
{7, 0}, /* 261 */
|
|
{8, 0}, /* 262 */
|
|
{9, 0}, /* 263 */
|
|
{10, 0}, /* 264 */
|
|
{11, 1}, /* 265 */
|
|
{13, 1}, /* 266 */
|
|
{15, 1}, /* 267 */
|
|
{17, 1}, /* 268 */
|
|
{19, 2}, /* 269 */
|
|
{23, 2}, /* 270 */
|
|
{27, 2}, /* 271 */
|
|
{31, 2}, /* 272 */
|
|
{35, 3}, /* 273 */
|
|
{43, 3}, /* 274 */
|
|
{51, 3}, /* 275 */
|
|
{59, 3}, /* 276 */
|
|
{67, 4}, /* 277 */
|
|
{83, 4}, /* 278 */
|
|
{99, 4}, /* 279 */
|
|
{115, 4}, /* 280 */
|
|
{131, 5}, /* 281 */
|
|
{163, 5}, /* 282 */
|
|
{195, 5}, /* 283 */
|
|
{227, 5}, /* 284 */
|
|
{258, 0}, /* 285 */
|
|
};
|
|
|
|
GLOBAL READONLY struct huffman_entry g_dist_table[] = {
|
|
{1, 0}, /* 0 */
|
|
{2, 0}, /* 1 */
|
|
{3, 0}, /* 2 */
|
|
{4, 0}, /* 3 */
|
|
{5, 1}, /* 4 */
|
|
{7, 1}, /* 5 */
|
|
{9, 2}, /* 6 */
|
|
{13, 2}, /* 7 */
|
|
{17, 3}, /* 8 */
|
|
{25, 3}, /* 9 */
|
|
{33, 4}, /* 10 */
|
|
{49, 4}, /* 11 */
|
|
{65, 5}, /* 12 */
|
|
{97, 5}, /* 13 */
|
|
{129, 6}, /* 14 */
|
|
{193, 6}, /* 15 */
|
|
{257, 7}, /* 16 */
|
|
{385, 7}, /* 17 */
|
|
{513, 8}, /* 18 */
|
|
{769, 8}, /* 19 */
|
|
{1025, 9}, /* 20 */
|
|
{1537, 9}, /* 21 */
|
|
{2049, 10}, /* 22 */
|
|
{3073, 10}, /* 23 */
|
|
{4097, 11}, /* 24 */
|
|
{6145, 11}, /* 25 */
|
|
{8193, 12}, /* 26 */
|
|
{12289, 12}, /* 27 */
|
|
{16385, 13}, /* 28 */
|
|
{24577, 13}, /* 29 */
|
|
};
|
|
|
|
GLOBAL READONLY u32 g_fixed_bl_counts[][2] = {
|
|
{143, 8},
|
|
{255, 9},
|
|
{279, 7},
|
|
{287, 8},
|
|
{319, 5},
|
|
};
|
|
|
|
INTERNAL u32 reverse_bits(u32 v, u32 bit_count)
|
|
{
|
|
/* 7 & 15 seem to be the most common bit_counts, so a
|
|
* more optimal path is layed out for them. */
|
|
if (bit_count == 15) {
|
|
u32 b1 = v & 0xFF;
|
|
b1 = (b1 & 0xF0) >> 4 | (b1 & 0x0F) << 4;
|
|
b1 = (b1 & 0xCC) >> 2 | (b1 & 0x33) << 2;
|
|
b1 = (b1 & 0xAA) >> 1 | (b1 & 0x55) << 1;
|
|
|
|
u32 b2 = (v & 0xFF00) >> 8;
|
|
b2 = (b2 & 0xF0) >> 4 | (b2 & 0x0F) << 4;
|
|
b2 = (b2 & 0xCC) >> 2 | (b2 & 0x33) << 2;
|
|
b2 = (b2 & 0xAA) >> 1 | (b2 & 0x55) << 1;
|
|
b2 >>= 1;
|
|
|
|
return (b1 << 7) | b2;
|
|
} else if (bit_count == 7) {
|
|
v = (v & 0xF0) >> 4 | (v & 0x0F) << 4;
|
|
v = (v & 0xCC) >> 2 | (v & 0x33) << 2;
|
|
v = (v & 0xAA) >> 1 | (v & 0x55) << 1;
|
|
return v >> 1;
|
|
|
|
} else {
|
|
u32 res = 0;
|
|
for (u32 i = 0; i <= (bit_count / 2); ++i) {
|
|
u32 inv = (bit_count - (i + 1));
|
|
res |= ((v >> i) & 0x1) << inv;
|
|
res |= ((v >> inv) & 0x1) << i;
|
|
}
|
|
return res;
|
|
}
|
|
}
|
|
|
|
INTERNAL struct huffman huffman_init(struct arena *arena, u32 max_code_bits, u32 *bl_counts, u32 bl_counts_count)
|
|
{
|
|
__prof;
|
|
|
|
struct huffman res = { 0 };
|
|
res.max_code_bits = max_code_bits;
|
|
res.entries_count = (1 << max_code_bits);
|
|
res.entries = arena_push_array(arena, struct huffman_entry, res.entries_count);
|
|
|
|
u32 code_length_hist[HUFFMAN_BIT_COUNT] = { 0 };
|
|
for (u32 i = 0; i < bl_counts_count; ++i) {
|
|
u32 count = bl_counts[i];
|
|
ASSERT(count <= ARRAY_COUNT(code_length_hist));
|
|
++code_length_hist[count];
|
|
}
|
|
|
|
u32 next_code[HUFFMAN_BIT_COUNT] = { 0 };
|
|
next_code[0] = 0;
|
|
code_length_hist[0] = 0;
|
|
for (u32 i = 1; i < ARRAY_COUNT(next_code); ++i) {
|
|
next_code[i] = ((next_code[i - 1] + code_length_hist[i - 1]) << 1);
|
|
}
|
|
|
|
for (u32 i = 0; i < bl_counts_count; ++i) {
|
|
u32 code_bits = bl_counts[i];
|
|
if (code_bits) {
|
|
ASSERT(code_bits < ARRAY_COUNT(next_code));
|
|
u32 code = next_code[code_bits]++;
|
|
u32 arbitrary_bits = res.max_code_bits - code_bits;
|
|
u32 entry_count = (1 << arbitrary_bits);
|
|
for (u32 entry_index = 0; entry_index < entry_count; ++entry_index) {
|
|
/* TODO: Optimize this. It's bloating up the loading times. */
|
|
u32 base_index = (code << arbitrary_bits) | entry_index;
|
|
u32 index = reverse_bits(base_index, res.max_code_bits);
|
|
struct huffman_entry *entry = &res.entries[index];
|
|
entry->symbol = (u16)i;
|
|
entry->bits_used = (u16)code_bits;
|
|
}
|
|
}
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
INTERNAL u16 huffman_decode(struct huffman *huffman, struct bitbuf *bb)
|
|
{
|
|
u32 index = peek_bits(bb, huffman->max_code_bits);
|
|
ASSERT(index < huffman->entries_count);
|
|
|
|
struct huffman_entry *entry = &huffman->entries[index];
|
|
u16 res = entry->symbol;
|
|
skip_bits(bb, entry->bits_used);
|
|
ASSERT(entry->bits_used > 0);
|
|
|
|
return res;
|
|
}
|
|
|
|
INTERNAL void inflate(u8 *dest, u8 *encoded)
|
|
{
|
|
struct temp_arena scratch = scratch_begin_no_conflict();
|
|
|
|
__prof;
|
|
struct bitbuf bb = { encoded };
|
|
|
|
/* ZLIB header */
|
|
u32 cm = consume_bits(&bb, 4);
|
|
u32 cinfo = consume_bits(&bb, 4);
|
|
ASSERT(cm == 8);
|
|
ASSERT(cinfo == 7);
|
|
|
|
u32 fcheck = consume_bits(&bb, 5);
|
|
u32 fdict = consume_bits(&bb, 1);
|
|
u32 flevl = consume_bits(&bb, 2);
|
|
ASSERT(fdict == 0);
|
|
|
|
u8 cmf = (u8)(cm | (cinfo << 4));
|
|
u8 flg = fcheck | (fdict << 5) | (flevl << 6);
|
|
(UNUSED)cmf;
|
|
(UNUSED)flg;
|
|
ASSERT(((cmf * 256) + flg) % 31 == 0);
|
|
|
|
u8 bfinal = 0;
|
|
while (!bfinal) {
|
|
bfinal = consume_bits(&bb, 1);
|
|
u8 btype = consume_bits(&bb, 2);
|
|
switch (btype) {
|
|
case BLOCK_TYPE_UNCOMPRESSED: {
|
|
sys_panic(STR("Unsupported block type while inflating ase: BLOCK_TYPE_UNCOMPRESSED"));
|
|
} break;
|
|
|
|
case BLOCK_TYPE_COMPRESSED_FIXED:
|
|
case BLOCK_TYPE_COMPRESSED_DYNAMIC: {
|
|
struct temp_arena temp = arena_temp_begin(scratch.arena);
|
|
u32 lit_len_dist_table[512] = { 0 };
|
|
u32 hlit;
|
|
u32 hdist;
|
|
|
|
if (btype == BLOCK_TYPE_COMPRESSED_DYNAMIC) {
|
|
/* Dynamic table */
|
|
|
|
/* Read huffman table */
|
|
hlit = consume_bits(&bb, 5) + 257;
|
|
hdist = consume_bits(&bb, 5) + 1;
|
|
u32 hclen = consume_bits(&bb, 4) + 4;
|
|
|
|
/* Init dict huffman (hclen) */
|
|
u32 hclen_bl_counts[19] = { 0 };
|
|
for (u32 i = 0; i < hclen; ++i) {
|
|
u32 code = g_hclen_order[i];
|
|
hclen_bl_counts[code] = consume_bits(&bb, 3);
|
|
}
|
|
struct huffman dict_huffman = huffman_init(temp.arena, 7, hclen_bl_counts, ARRAY_COUNT(hclen_bl_counts));
|
|
|
|
/* Decode dict huffman */
|
|
u32 lit_len_count = 0;
|
|
u32 len_count = hlit + hdist;
|
|
ASSERT(len_count <= ARRAY_COUNT(lit_len_dist_table));
|
|
while (lit_len_count < len_count) {
|
|
u32 rep_count = 1;
|
|
u32 rep_val = 0;
|
|
u32 encoded_len = huffman_decode(&dict_huffman, &bb);
|
|
if (encoded_len <= 15) {
|
|
rep_val = encoded_len;
|
|
} else if (encoded_len == 16) {
|
|
rep_count = 3 + consume_bits(&bb, 2);
|
|
ASSERT(lit_len_count > 0);
|
|
rep_val = lit_len_dist_table[lit_len_count - 1];
|
|
} else if (encoded_len == 17) {
|
|
rep_count = 3 + consume_bits(&bb, 3);
|
|
} else if (encoded_len == 18) {
|
|
rep_count = 11 + consume_bits(&bb, 7);
|
|
} else {
|
|
/* Invalid len */
|
|
ASSERT(false);
|
|
}
|
|
|
|
while (rep_count--) {
|
|
lit_len_dist_table[lit_len_count++] = rep_val;
|
|
}
|
|
}
|
|
ASSERT(lit_len_count == len_count);
|
|
} else {
|
|
/* Fixed table */
|
|
hlit = 288;
|
|
hdist = 32;
|
|
u32 index = 0;
|
|
for (u32 i = 0; i < ARRAY_COUNT(g_fixed_bl_counts); ++i) {
|
|
u32 bit_count = g_fixed_bl_counts[i][1];
|
|
u32 last_valuie = g_fixed_bl_counts[i][0];
|
|
while (index <= last_valuie) {
|
|
lit_len_dist_table[index++] = bit_count;
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Decode */
|
|
struct huffman lit_len_huffman = huffman_init(temp.arena, 15, lit_len_dist_table, hlit);
|
|
struct huffman dist_huffman = huffman_init(temp.arena, 15, lit_len_dist_table + hlit, hdist);
|
|
|
|
while (true) {
|
|
u32 lit_len = huffman_decode(&lit_len_huffman, &bb);
|
|
if (lit_len <= 255) {
|
|
*dest++ = lit_len & 0xFF;
|
|
} else if (lit_len >= 257) {
|
|
u32 length_index = (lit_len - 257);
|
|
struct huffman_entry length_entry = g_length_table[length_index];
|
|
u32 length = length_entry.symbol;
|
|
if (length_entry.bits_used > 0) {
|
|
u32 extra_bits = consume_bits(&bb, length_entry.bits_used);
|
|
length += extra_bits;
|
|
}
|
|
|
|
u32 dist_index = huffman_decode(&dist_huffman, &bb);
|
|
struct huffman_entry dist_entry = g_dist_table[dist_index];
|
|
u32 distance = dist_entry.symbol;
|
|
if (dist_entry.bits_used > 0) {
|
|
u32 extra_bits = consume_bits(&bb, dist_entry.bits_used);
|
|
distance += extra_bits;
|
|
}
|
|
u8 *source = dest - distance;
|
|
while (length--) {
|
|
*dest++ = *source++;
|
|
}
|
|
} else {
|
|
break;
|
|
}
|
|
}
|
|
|
|
arena_temp_end(temp);
|
|
} break;
|
|
|
|
case BLOCK_TYPE_RESERVED: {
|
|
/* TODO */
|
|
ASSERT(false);
|
|
} break;
|
|
}
|
|
}
|
|
|
|
scratch_end(scratch);
|
|
}
|
|
|
|
/* ========================== *
|
|
* Decoder structs
|
|
* ========================== */
|
|
|
|
enum chunk_type {
|
|
CHUNK_TYPE_OLD_PALETTE1 = 0x0004,
|
|
CHUNK_TYPE_OLD_PALETTE2 = 0x0011,
|
|
CHUNK_TYPE_LAYER = 0x2004,
|
|
CHUNK_TYPE_CEL = 0x2005,
|
|
CHUNK_TYPE_CEL_EXTRA = 0x2006,
|
|
CHUNK_TYPE_COLOR_PROFILE = 0x2007,
|
|
CHUNK_TYPE_EXTERNAL_FILES = 0x2008,
|
|
CHUNK_TYPE_MASK = 0x2016,
|
|
CHUNK_TYPE_PATH = 0x2017,
|
|
CHUNK_TYPE_TAGS = 0x2018,
|
|
CHUNK_TYPE_PALETTE = 0x2019,
|
|
CHUNK_TYPE_USER_DATA = 0x2020,
|
|
CHUNK_TYPE_SLICE = 0x2022,
|
|
CHUNK_TYPE_TILESET = 0x2023
|
|
};
|
|
|
|
enum cel_type {
|
|
CEL_TYPE_RAW_IMAGE = 0,
|
|
CEL_TYPE_LINKED = 1,
|
|
CEL_TYPE_COMPRESSED_IMAGE = 2,
|
|
CEL_TYPE_COMPRESSED_TILEMAP = 3
|
|
};
|
|
|
|
PACK(struct ase_header {
|
|
u32 file_size;
|
|
u16 magic;
|
|
u16 frames;
|
|
u16 width;
|
|
u16 height;
|
|
u16 color_depth;
|
|
u32 flags;
|
|
u16 speed;
|
|
u32 _1;
|
|
u32 _2;
|
|
u8 palette_entry;
|
|
u8 _3[3];
|
|
u16 num_colors;
|
|
u8 pixel_width;
|
|
u8 pixel_height;
|
|
i16 grid_x;
|
|
i16 grid_y;
|
|
u16 grid_width;
|
|
u16 grid_height;
|
|
u8 _4[84];
|
|
});
|
|
|
|
PACK(struct frame_header {
|
|
u32 bytes;
|
|
u16 magic;
|
|
u16 chunks_old;
|
|
u16 frame_duration_ms;
|
|
u8 _[2];
|
|
u32 chunks_new;
|
|
});
|
|
|
|
INTERNAL void push_error_copy_msg(struct arena *arena, struct ase_error_list *list, struct string msg_src)
|
|
{
|
|
logf_error("Error while decoding .ase: \"%F\"", FMT_STR(msg_src));
|
|
struct ase_error *e = arena_push(arena, struct ase_error);
|
|
*e = (struct ase_error) {
|
|
.msg = string_copy(arena, msg_src)
|
|
};
|
|
if (!list->first) {
|
|
list->first = e;
|
|
} else {
|
|
list->last->next = e;
|
|
}
|
|
list->last = e;
|
|
++list->count;
|
|
}
|
|
|
|
/* ========================== *
|
|
* Image decoder
|
|
* ========================== */
|
|
|
|
struct layer {
|
|
u16 flags;
|
|
u16 type;
|
|
u16 child_level;
|
|
u16 blend_mode;
|
|
u8 opacity;
|
|
struct string name;
|
|
u32 tileset_index;
|
|
|
|
u32 index;
|
|
struct layer *next;
|
|
};
|
|
|
|
struct cel {
|
|
u16 layer_index;
|
|
i16 x_pos;
|
|
i16 y_pos;
|
|
u8 opacity;
|
|
enum cel_type type;
|
|
i16 z_index;
|
|
|
|
/* Linked cel */
|
|
u16 frame_pos;
|
|
|
|
/* Compressed image */
|
|
u32 width;
|
|
u32 height;
|
|
u32 *pixels;
|
|
|
|
u16 frame_index;
|
|
struct cel *next;
|
|
};
|
|
|
|
/* Taken from
|
|
* https://github.com/RandyGaul/cute_headers/blob/master/cute_aseprite.h#L870 */
|
|
INTERNAL u32 mul_u8(u32 a, u32 b)
|
|
{
|
|
u32 t = (a * b) + 0x80;
|
|
return ((t >> 8) + t) >> 8;
|
|
}
|
|
|
|
INTERNAL u32 blend(u32 src, u32 dest, u8 opacity)
|
|
{
|
|
u32 dest_r = (dest & 0xff);
|
|
u32 dest_g = (dest >> 8) & 0xff;
|
|
u32 dest_b = (dest >> 16) & 0xff;
|
|
u32 dest_a = (dest >> 24) & 0xff;
|
|
|
|
u32 src_r = (src & 0xff);
|
|
u32 src_g = (src >> 8) & 0xff;
|
|
u32 src_b = (src >> 16) & 0xff;
|
|
u32 src_a = (src >> 24) & 0xff;
|
|
|
|
src_a = (u8)mul_u8(src_a, opacity);
|
|
u32 a = src_a + dest_a - mul_u8(src_a, dest_a);
|
|
u32 r, g, b;
|
|
if (a == 0) {
|
|
r = g = b = 0;
|
|
} else {
|
|
r = dest_r + (src_r - dest_r) * src_a / a;
|
|
g = dest_g + (src_g - dest_g) * src_a / a;
|
|
b = dest_b + (src_b - dest_b) * src_a / a;
|
|
}
|
|
|
|
return r | (g << 8) | (b << 16) | (a << 24);
|
|
}
|
|
|
|
INTERNAL void make_image_dimensions_squareish(struct ase_header *header, u32 *frames_x, u32 *frames_y, u64 *image_width, u64 *image_height)
|
|
{
|
|
/* Try and get image resolution into as much of a square as possible by
|
|
* separating frames into multiple rows. */
|
|
while (*frames_x > 1) {
|
|
u64 new_frames_x = *frames_x - 1;
|
|
u64 new_frames_y = ((header->frames - 1) / new_frames_x) + 1;
|
|
u64 new_image_width = header->width * new_frames_x;
|
|
u64 new_image_height = header->height * new_frames_y;
|
|
if (new_image_width >= new_image_height) {
|
|
*frames_x = new_frames_x;
|
|
*frames_y = new_frames_y;
|
|
*image_width = new_image_width;
|
|
*image_height = new_image_height;
|
|
} else {
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
struct ase_decode_image_result ase_decode_image(struct arena *arena, struct buffer encoded)
|
|
{
|
|
__prof;
|
|
|
|
struct temp_arena scratch = scratch_begin(arena);
|
|
struct ase_decode_image_result res = { 0 };
|
|
|
|
struct byte_reader br = br_create_from_buffer(encoded);
|
|
struct ase_header ase_header;
|
|
br_read_to_struct(&br, &ase_header);
|
|
|
|
if (ase_header.magic != 0xA5E0) {
|
|
push_error_copy_msg(arena, &res.errors, STR("Not a valid aseprite file"));
|
|
goto abort;
|
|
}
|
|
|
|
if (ase_header.color_depth != 32) {
|
|
struct string msg = string_format(scratch.arena,
|
|
STR("Only 32 bit rgba color mode is supported (got %F)"),
|
|
FMT_UINT(ase_header.color_depth));
|
|
push_error_copy_msg(arena, &res.errors, msg);
|
|
goto abort;
|
|
}
|
|
|
|
u64 frame_width = ase_header.width;
|
|
u64 frame_height = ase_header.height;
|
|
|
|
u32 frames_x = ase_header.frames;
|
|
u32 frames_y = 1;
|
|
u64 image_width = frame_width * frames_x;
|
|
u64 image_height = frame_height * frames_y;
|
|
make_image_dimensions_squareish(&ase_header, &frames_x, &frames_y, &image_width, &image_height);
|
|
|
|
res.image.width = image_width;
|
|
res.image.height = image_height;
|
|
/* TODO: Optimize this. Naive memset(0) is bloating the decode time for large images. */
|
|
res.image.pixels = arena_push_array_zero(arena, u32, image_width * image_height);
|
|
|
|
u32 num_layers = 0;
|
|
struct layer *layer_head = NULL;
|
|
|
|
struct cel *cel_head = NULL;
|
|
struct cel *cel_tail = NULL;
|
|
|
|
/* Iterate frames */
|
|
u32 num_frames = 0;
|
|
for (u16 i = 0; i < ase_header.frames; ++i) {
|
|
struct frame_header frame_header;
|
|
br_read_to_struct(&br, &frame_header);
|
|
|
|
u32 num_chunks = frame_header.chunks_new;
|
|
if (num_chunks == 0) {
|
|
ASSERT(frame_header.chunks_old != 0xFFFF);
|
|
num_chunks = frame_header.chunks_old;
|
|
}
|
|
|
|
/* Iterate chunks in frame */
|
|
for (u32 j = 0; j < num_chunks; ++j) {
|
|
u32 chunk_size = br_read_u32(&br);
|
|
enum chunk_type chunk_type = br_read_u16(&br);
|
|
|
|
/* Chunk size includes size & type */
|
|
ASSERT(chunk_size >= 6);
|
|
chunk_size -= 6;
|
|
|
|
u64 chunk_end_pos = br_pos(&br) + chunk_size;
|
|
|
|
switch (chunk_type) {
|
|
case CHUNK_TYPE_LAYER: {
|
|
struct layer *layer = arena_push_zero(scratch.arena, struct layer);
|
|
layer->next = layer_head;
|
|
layer_head = layer;
|
|
|
|
layer->flags = br_read_u16(&br);
|
|
layer->type = br_read_u16(&br);
|
|
layer->child_level = br_read_u16(&br);
|
|
|
|
/* Ignoring layer default width & height */
|
|
br_seek(&br, sizeof(u16) * 2);
|
|
|
|
layer->blend_mode = br_read_u16(&br);
|
|
if (layer->blend_mode != 0) {
|
|
push_error_copy_msg(arena,
|
|
&res.errors,
|
|
STR("Layer has unsupported blend mode (only 'Normal' mode is supported). Tip: Try using 'merge down' to create a normal layer as a workaround"));
|
|
goto abort;
|
|
}
|
|
|
|
layer->opacity = br_read_u8(&br);
|
|
if (!(ase_header.flags & 1)) {
|
|
layer->opacity = 255;
|
|
}
|
|
|
|
br_seek(&br, sizeof(u8) * 3);
|
|
|
|
u16 str_len = br_read_u16(&br);
|
|
u8 *str_bytes = br_read_raw(&br, str_len);
|
|
layer->name = (struct string) {
|
|
str_len,
|
|
arena_push_array(scratch.arena, u8, str_len)
|
|
};
|
|
MEMCPY(layer->name.text, str_bytes, str_len);
|
|
|
|
if (layer->type == 2) {
|
|
layer->tileset_index = br_read_u32(&br);
|
|
}
|
|
|
|
layer->index = num_layers++;
|
|
} break;
|
|
|
|
case CHUNK_TYPE_CEL: {
|
|
struct cel *cel = arena_push_zero(scratch.arena, struct cel);
|
|
if (cel_tail) {
|
|
cel_tail->next = cel;
|
|
} else {
|
|
cel_head = cel;
|
|
}
|
|
cel_tail = cel;
|
|
|
|
cel->layer_index = br_read_u16(&br);
|
|
cel->x_pos = br_read_i16(&br);
|
|
cel->y_pos = br_read_i16(&br);
|
|
cel->opacity = br_read_u8(&br);
|
|
cel->type = br_read_u16(&br);
|
|
cel->z_index = br_read_i16(&br);
|
|
br_seek(&br, sizeof(u8) * 5);
|
|
|
|
cel->frame_index = num_frames;
|
|
|
|
switch (cel->type) {
|
|
case CEL_TYPE_RAW_IMAGE: {
|
|
/* Unsupported */
|
|
br_seek_to(&br, chunk_end_pos);
|
|
} break;
|
|
|
|
case CEL_TYPE_LINKED: {
|
|
cel->frame_pos = br_read_u16(&br);
|
|
/* Actual linking happens later after iteration */
|
|
} break;
|
|
|
|
case CEL_TYPE_COMPRESSED_IMAGE: {
|
|
cel->width = br_read_u16(&br);
|
|
cel->height = br_read_u16(&br);
|
|
|
|
cel->pixels = arena_push_array(scratch.arena, u32, cel->width * cel->height);
|
|
inflate((u8 *)cel->pixels, br.at);
|
|
|
|
br_seek_to(&br, chunk_end_pos);
|
|
} break;
|
|
|
|
case CEL_TYPE_COMPRESSED_TILEMAP: {
|
|
/* Unsupported */
|
|
push_error_copy_msg(arena, &res.errors, STR("Tilemaps are not supported"));
|
|
goto abort;
|
|
} break;
|
|
}
|
|
} break;
|
|
|
|
default: {
|
|
br_seek(&br, chunk_size);
|
|
} break;
|
|
}
|
|
}
|
|
++num_frames;
|
|
}
|
|
|
|
/* Create ordered layers array */
|
|
struct layer **layers_ordered = arena_push_array(scratch.arena, struct layer *, num_layers);
|
|
for (struct layer *layer = layer_head; layer; layer = layer->next) {
|
|
layers_ordered[layer->index] = layer;
|
|
}
|
|
|
|
/* Link cels */
|
|
struct cel **cels_ordered = arena_push_array(scratch.arena, struct cel *, num_frames * num_layers);
|
|
for (struct cel *cel = cel_head; cel; cel = cel->next) {
|
|
cels_ordered[(cel->frame_index * num_layers) + cel->layer_index] = cel;
|
|
if (cel->type == CEL_TYPE_LINKED) {
|
|
struct cel *ref_cel = cels_ordered[(cel->frame_pos * num_layers) + cel->layer_index];
|
|
cel->width = ref_cel->width;
|
|
cel->height = ref_cel->height;
|
|
cel->pixels = ref_cel->pixels;
|
|
}
|
|
}
|
|
|
|
{
|
|
__profscope(assemble_image);
|
|
|
|
/* Assemble image from cels */
|
|
for (struct cel *cel = cel_head; cel; cel = cel->next) {
|
|
struct layer *layer = layers_ordered[cel->layer_index];
|
|
/* Only draw visible layers */
|
|
if (layer->flags & 1) {
|
|
u8 opacity = (cel->opacity / 255.0f) * (layer->opacity / 255.0f) * 255.0f;
|
|
|
|
i32 cel_width = cel->width;
|
|
i32 cel_height = cel->height;
|
|
|
|
i32 cel_left = 0;
|
|
i32 cel_right = cel_width;
|
|
i32 cel_top = 0;
|
|
i32 cel_bottom = cel_height;
|
|
|
|
i32 frame_left = cel->x_pos;
|
|
i32 frame_top = cel->y_pos;
|
|
|
|
i32 image_left = frame_left + ((cel->frame_index % frames_x) * frame_width);
|
|
i32 image_top = frame_top + ((cel->frame_index / frames_x) * frame_height);
|
|
|
|
/* Adjust bounds to ensure pixels outside of frame boundaries
|
|
* aren't (aseprite keeps chunks outside of frame around in
|
|
* project file). */
|
|
{
|
|
i32 frame_right = cel_width + frame_left;
|
|
i32 frame_bottom = frame_top + cel_height;
|
|
if (frame_left < 0) {
|
|
cel_left += -frame_left;
|
|
frame_left = 0;
|
|
}
|
|
if (frame_top < 0) {
|
|
cel_top += -frame_top;
|
|
frame_top = 0;
|
|
}
|
|
if (frame_right > (i32)frame_width) {
|
|
cel_right -= (frame_right - frame_width);
|
|
frame_right = frame_width;
|
|
}
|
|
if (frame_bottom > (i32)frame_height) {
|
|
cel_bottom -= (frame_bottom - frame_height);
|
|
frame_bottom = frame_height;
|
|
}
|
|
}
|
|
|
|
for (i32 cel_y = cel_top; cel_y < cel_bottom; ++cel_y) {
|
|
i32 image_y = image_top + cel_y;
|
|
i32 cel_stride = cel_y * cel_width;
|
|
i32 image_stride = image_y * image_width;
|
|
for (i32 cel_x = cel_left; cel_x < cel_right; ++cel_x) {
|
|
i32 image_x = image_left + cel_x;
|
|
u32 cel_pixel = cel->pixels[cel_x + cel_stride];
|
|
u32 *image_pixel = &res.image.pixels[image_x + image_stride];
|
|
*image_pixel = blend(cel_pixel, *image_pixel, opacity);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/* ASSERT all data was read */
|
|
ASSERT(br_bytes_left(&br) == 0);
|
|
|
|
abort:
|
|
|
|
scratch_end(scratch);
|
|
return res;
|
|
}
|
|
|
|
/* ========================== *
|
|
* Decode sheet
|
|
* ========================== */
|
|
|
|
struct ase_decode_sheet_result ase_decode_sheet(struct arena *arena, struct buffer encoded)
|
|
{
|
|
__prof;
|
|
|
|
struct ase_decode_sheet_result res = { 0 };
|
|
|
|
struct byte_reader br = br_create_from_buffer(encoded);
|
|
struct ase_header ase_header;
|
|
br_read_to_struct(&br, &ase_header);
|
|
|
|
u64 frame_width = ase_header.width;
|
|
u64 frame_height = ase_header.height;
|
|
|
|
u32 frames_x = ase_header.frames;
|
|
u32 frames_y = 1;
|
|
u64 image_width = frame_width * frames_x;
|
|
u64 image_height = frame_height * frames_y;
|
|
make_image_dimensions_squareish(&ase_header, &frames_x, &frames_y, &image_width, &image_height);
|
|
|
|
u32 num_frames = 0;
|
|
struct ase_frame *frame_head = NULL;
|
|
|
|
u32 num_spans = 0;
|
|
struct ase_span *span_head = NULL;
|
|
|
|
u32 num_slice_keys = 0;
|
|
struct ase_slice_key *slice_key_head = NULL;
|
|
|
|
/* Iterate frames */
|
|
for (u16 i = 0; i < ase_header.frames; ++i) {
|
|
struct frame_header frame_header;
|
|
br_read_to_struct(&br, &frame_header);
|
|
|
|
u32 num_chunks = frame_header.chunks_new;
|
|
if (num_chunks == 0) {
|
|
ASSERT(frame_header.chunks_old != 0xFFFF);
|
|
num_chunks = frame_header.chunks_old;
|
|
}
|
|
|
|
struct ase_frame *frame = arena_push_zero(arena, struct ase_frame);
|
|
frame->next = frame_head;
|
|
frame_head = frame;
|
|
|
|
frame->index = i;
|
|
frame->duration = frame_header.frame_duration_ms / 1000.0;
|
|
|
|
u32 frame_tile_x = i % frames_x;
|
|
u32 frame_tile_y = i / frames_x;
|
|
|
|
u32 frame_x1 = frame_tile_x * frame_width;
|
|
u32 frame_y1 = frame_tile_y * frame_height;
|
|
u32 frame_x2 = frame_x1 + frame_width;
|
|
u32 frame_y2 = frame_y1 + frame_height;
|
|
frame->x1 = frame_x1;
|
|
frame->y1 = frame_y1;
|
|
frame->x2 = frame_x2;
|
|
frame->y2 = frame_y2;
|
|
|
|
frame->index = i;
|
|
frame->duration = frame_header.frame_duration_ms / 1000.0;
|
|
|
|
/* Iterate chunks in frame */
|
|
for (u32 j = 0; j < num_chunks; ++j) {
|
|
u32 chunk_size = br_read_u32(&br);
|
|
enum chunk_type chunk_type = br_read_u16(&br);
|
|
|
|
/* Chunk size includes size & type */
|
|
ASSERT(chunk_size >= 6);
|
|
chunk_size -= 6;
|
|
|
|
u64 chunk_end_pos = br_pos(&br) + chunk_size;
|
|
|
|
switch (chunk_type) {
|
|
case CHUNK_TYPE_TAGS: {
|
|
u16 frame_span_count = br_read_u16(&br);
|
|
br_seek(&br, 8);
|
|
|
|
for (u16 k = 0; k < frame_span_count; ++k) {
|
|
struct ase_span *span = arena_push_zero(arena, struct ase_span);
|
|
span->next = span_head;
|
|
span_head = span;
|
|
|
|
span->start = br_read_u16(&br);
|
|
span->end = br_read_u16(&br);
|
|
br_seek(&br, 13);
|
|
|
|
u16 str_len = br_read_u16(&br);
|
|
u8 *str_bytes = br_read_raw(&br, str_len);
|
|
span->name = (struct string) {
|
|
str_len,
|
|
arena_push_array(arena, u8, str_len)
|
|
};
|
|
MEMCPY(span->name.text, str_bytes, str_len);
|
|
++num_spans;
|
|
}
|
|
|
|
} break;
|
|
|
|
case CHUNK_TYPE_SLICE: {
|
|
struct ase_slice_key *slice_key = arena_push_zero(arena, struct ase_slice_key);
|
|
slice_key->next = slice_key_head;
|
|
slice_key_head = slice_key;
|
|
|
|
u32 num_slices = br_read_u32(&br);
|
|
slice_key->num_slices = num_slices;
|
|
|
|
u32 flags = br_read_u32(&br);
|
|
br_seek(&br, 4);
|
|
|
|
struct string name;
|
|
{
|
|
u16 str_len = br_read_u16(&br);
|
|
u8 *str_bytes = br_read_raw(&br, str_len);
|
|
name = (struct string) {
|
|
str_len,
|
|
arena_push_array(arena, u8, str_len)
|
|
};
|
|
MEMCPY(name.text, str_bytes, str_len);
|
|
}
|
|
slice_key->name = name;
|
|
|
|
for (u32 k = 0; k < num_slices; ++k) {
|
|
struct ase_slice *slice = arena_push_zero(arena, struct ase_slice);
|
|
slice->next = slice_key->slice_head;
|
|
slice_key->slice_head = slice;
|
|
|
|
u32 start = br_read_u32(&br);
|
|
i32 x = br_read_i32(&br);
|
|
i32 y = br_read_i32(&br);
|
|
u32 width = br_read_u32(&br);
|
|
u32 height = br_read_u32(&br);
|
|
if (flags & 0x01) {
|
|
/* Skip 9-patches info */
|
|
br_seek(&br, 128);
|
|
}
|
|
if (flags & 0x02) {
|
|
/* Skip pivot info */
|
|
br_seek(&br, 64);
|
|
}
|
|
|
|
slice->start = start;
|
|
slice->x1 = x;
|
|
slice->y1 = y;
|
|
slice->x2 = x + width;
|
|
slice->y2 = y + height;
|
|
}
|
|
|
|
++num_slice_keys;
|
|
} break;
|
|
|
|
/* TODO */
|
|
//case CHUNK_TYPE_USER_DATA
|
|
|
|
default: {
|
|
br_seek_to(&br, chunk_end_pos);
|
|
} break;
|
|
}
|
|
}
|
|
++num_frames;
|
|
}
|
|
|
|
/* ASSERT all data was read */
|
|
ASSERT(br_bytes_left(&br) == 0);
|
|
|
|
res.image_size = V2(image_width, image_height);
|
|
res.frame_size = V2(frame_width, frame_height);
|
|
res.num_frames = num_frames;
|
|
res.num_spans = num_spans;
|
|
res.num_slice_keys = num_slice_keys;
|
|
res.frame_head = frame_head;
|
|
res.span_head = span_head;
|
|
res.slice_key_head = slice_key_head;
|
|
|
|
return res;
|
|
}
|