power_play/src/ase/ase_core.c
2025-07-30 15:58:38 -05:00

977 lines
31 KiB
C

/* ========================== *
* Aseprite (.ase) file parser
*
* DEFLATE decoder based on Handmade Hero's png parser
* ========================== */
struct huff_bb {
u8 *data;
u64 cur_bit;
};
internal u32 peek_bits(struct huff_bb *bb, u32 nbits)
{
Assert(nbits <= 32);
u64 cur_byte = bb->cur_bit >> 3;
u8 bit_index = bb->cur_bit % 8;
u64 nbytes = (nbits + bit_index + 7) >> 3;
u64 val64 = 0;
CopyBytes(&val64, &bb->data[cur_byte], nbytes);
u32 val32 = (u32)(val64 >> bit_index);
val32 &= U32Max >> (32 - nbits);
return val32;
}
internal u32 consume_bits(struct huff_bb *bb, u32 nbits)
{
u32 val = peek_bits(bb, nbits);
bb->cur_bit += nbits;
return val;
}
internal void skip_bits(struct huff_bb *bb, u32 nbits)
{
bb->cur_bit += nbits;
}
/* ========================== *
* Inflate
* ========================== */
#define HUFFMAN_BIT_COUNT 16
enum block_type {
BLOCK_TYPE_UNCOMPRESSED = 0,
BLOCK_TYPE_COMPRESSED_FIXED = 1,
BLOCK_TYPE_COMPRESSED_DYNAMIC = 2,
BLOCK_TYPE_RESERVED = 3
};
struct huffman_entry {
u16 symbol;
u16 bits_used;
};
struct huffman {
u32 max_code_bits;
u32 entries_count;
struct huffman_entry *entries;
};
Global Readonly u32 g_hclen_order[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
};
Global Readonly struct huffman_entry g_length_table[] = {
{3, 0}, /* 257 */
{4, 0}, /* 258 */
{5, 0}, /* 259 */
{6, 0}, /* 260 */
{7, 0}, /* 261 */
{8, 0}, /* 262 */
{9, 0}, /* 263 */
{10, 0}, /* 264 */
{11, 1}, /* 265 */
{13, 1}, /* 266 */
{15, 1}, /* 267 */
{17, 1}, /* 268 */
{19, 2}, /* 269 */
{23, 2}, /* 270 */
{27, 2}, /* 271 */
{31, 2}, /* 272 */
{35, 3}, /* 273 */
{43, 3}, /* 274 */
{51, 3}, /* 275 */
{59, 3}, /* 276 */
{67, 4}, /* 277 */
{83, 4}, /* 278 */
{99, 4}, /* 279 */
{115, 4}, /* 280 */
{131, 5}, /* 281 */
{163, 5}, /* 282 */
{195, 5}, /* 283 */
{227, 5}, /* 284 */
{258, 0}, /* 285 */
};
Global Readonly struct huffman_entry g_dist_table[] = {
{1, 0}, /* 0 */
{2, 0}, /* 1 */
{3, 0}, /* 2 */
{4, 0}, /* 3 */
{5, 1}, /* 4 */
{7, 1}, /* 5 */
{9, 2}, /* 6 */
{13, 2}, /* 7 */
{17, 3}, /* 8 */
{25, 3}, /* 9 */
{33, 4}, /* 10 */
{49, 4}, /* 11 */
{65, 5}, /* 12 */
{97, 5}, /* 13 */
{129, 6}, /* 14 */
{193, 6}, /* 15 */
{257, 7}, /* 16 */
{385, 7}, /* 17 */
{513, 8}, /* 18 */
{769, 8}, /* 19 */
{1025, 9}, /* 20 */
{1537, 9}, /* 21 */
{2049, 10}, /* 22 */
{3073, 10}, /* 23 */
{4097, 11}, /* 24 */
{6145, 11}, /* 25 */
{8193, 12}, /* 26 */
{12289, 12}, /* 27 */
{16385, 13}, /* 28 */
{24577, 13}, /* 29 */
};
Global Readonly u32 g_fixed_bl_counts[][2] = {
{143, 8},
{255, 9},
{279, 7},
{287, 8},
{319, 5},
};
internal u32 reverse_bits(u32 v, u32 bit_count)
{
/* 7 & 15 seem to be the most common bit_counts, so a
* more optimal path is layed out for them. */
if (bit_count == 15) {
u32 b1 = v & 0xFF;
b1 = (b1 & 0xF0) >> 4 | (b1 & 0x0F) << 4;
b1 = (b1 & 0xCC) >> 2 | (b1 & 0x33) << 2;
b1 = (b1 & 0xAA) >> 1 | (b1 & 0x55) << 1;
u32 b2 = (v & 0xFF00) >> 8;
b2 = (b2 & 0xF0) >> 4 | (b2 & 0x0F) << 4;
b2 = (b2 & 0xCC) >> 2 | (b2 & 0x33) << 2;
b2 = (b2 & 0xAA) >> 1 | (b2 & 0x55) << 1;
b2 >>= 1;
return (b1 << 7) | b2;
} else if (bit_count == 7) {
v = (v & 0xF0) >> 4 | (v & 0x0F) << 4;
v = (v & 0xCC) >> 2 | (v & 0x33) << 2;
v = (v & 0xAA) >> 1 | (v & 0x55) << 1;
return v >> 1;
} else {
u32 result = 0;
for (u32 i = 0; i <= (bit_count / 2); ++i) {
u32 inv = (bit_count - (i + 1));
result |= ((v >> i) & 0x1) << inv;
result |= ((v >> inv) & 0x1) << i;
}
return result;
}
}
internal struct huffman huffman_init(Arena *arena, u32 max_code_bits, u32 *bl_counts, u32 bl_counts_count)
{
__prof;
struct huffman result = ZI;
result.max_code_bits = max_code_bits;
result.entries_count = (1 << max_code_bits);
result.entries = PushStructsNoZero(arena, struct huffman_entry, result.entries_count);
u32 code_length_hist[HUFFMAN_BIT_COUNT] = ZI;
for (u32 i = 0; i < bl_counts_count; ++i) {
u32 count = bl_counts[i];
Assert(count <= countof(code_length_hist));
++code_length_hist[count];
}
u32 next_code[HUFFMAN_BIT_COUNT] = ZI;
next_code[0] = 0;
code_length_hist[0] = 0;
for (u32 i = 1; i < countof(next_code); ++i) {
next_code[i] = ((next_code[i - 1] + code_length_hist[i - 1]) << 1);
}
for (u32 i = 0; i < bl_counts_count; ++i) {
u32 code_bits = bl_counts[i];
if (code_bits) {
Assert(code_bits < countof(next_code));
u32 code = next_code[code_bits]++;
u32 arbitrary_bits = result.max_code_bits - code_bits;
u32 entry_count = (1 << arbitrary_bits);
for (u32 entry_index = 0; entry_index < entry_count; ++entry_index) {
/* TODO: Optimize this. It's bloating up the loading times. */
u32 base_index = (code << arbitrary_bits) | entry_index;
u32 index = reverse_bits(base_index, result.max_code_bits);
struct huffman_entry *entry = &result.entries[index];
entry->symbol = (u16)i;
entry->bits_used = (u16)code_bits;
}
}
}
return result;
}
internal u16 huffman_decode(struct huffman *huffman, struct huff_bb *bb)
{
u32 index = peek_bits(bb, huffman->max_code_bits);
Assert(index < huffman->entries_count);
struct huffman_entry *entry = &huffman->entries[index];
u16 result = entry->symbol;
skip_bits(bb, entry->bits_used);
Assert(entry->bits_used > 0);
return result;
}
internal void inflate(u8 *dst, u8 *encoded)
{
TempArena scratch = BeginScratchNoConflict();
__prof;
struct huff_bb bb = { .data = encoded };
/* ZLIB header */
u32 cm = consume_bits(&bb, 4);
u32 cinfo = consume_bits(&bb, 4);
Assert(cm == 8);
Assert(cinfo == 7);
u32 fcheck = consume_bits(&bb, 5);
u32 fdict = consume_bits(&bb, 1);
u32 flevl = consume_bits(&bb, 2);
Assert(fdict == 0);
u8 cmf = (u8)(cm | (cinfo << 4));
u8 flg = fcheck | (fdict << 5) | (flevl << 6);
(UNUSED)cmf;
(UNUSED)flg;
Assert(((cmf * 256) + flg) % 31 == 0);
u8 bfinal = 0;
while (!bfinal) {
bfinal = consume_bits(&bb, 1);
u8 btype = consume_bits(&bb, 2);
switch (btype) {
case BLOCK_TYPE_UNCOMPRESSED: {
skip_bits(&bb, (8 - (bb.cur_bit % 8)) % 8);
i16 len = consume_bits(&bb, 16);
i16 nlen = consume_bits(&bb, 16);
Assert(len == ~nlen); /* Validation */
(UNUSED)nlen;
while (len-- > 0) {
*dst++ = consume_bits(&bb, 8);
}
} break;
case BLOCK_TYPE_COMPRESSED_FIXED:
case BLOCK_TYPE_COMPRESSED_DYNAMIC: {
TempArena temp = BeginTempArena(scratch.arena);
u32 lit_len_dist_table[512] = ZI;
u32 hlit;
u32 hdist;
if (btype == BLOCK_TYPE_COMPRESSED_DYNAMIC) {
/* Dynamic table */
/* Read huffman table */
hlit = consume_bits(&bb, 5) + 257;
hdist = consume_bits(&bb, 5) + 1;
u32 hclen = consume_bits(&bb, 4) + 4;
/* Init dict huffman (hclen) */
u32 hclen_bl_counts[19] = ZI;
for (u32 i = 0; i < hclen; ++i) {
u32 code = g_hclen_order[i];
hclen_bl_counts[code] = consume_bits(&bb, 3);
}
struct huffman dict_huffman = huffman_init(temp.arena, 7, hclen_bl_counts, countof(hclen_bl_counts));
/* Decode dict huffman */
u32 lit_len_count = 0;
u32 len_count = hlit + hdist;
Assert(len_count <= countof(lit_len_dist_table));
while (lit_len_count < len_count) {
u32 rep_count = 1;
u32 rep_val = 0;
u32 encoded_len = huffman_decode(&dict_huffman, &bb);
if (encoded_len <= 15) {
rep_val = encoded_len;
} else if (encoded_len == 16) {
rep_count = 3 + consume_bits(&bb, 2);
Assert(lit_len_count > 0);
rep_val = lit_len_dist_table[lit_len_count - 1];
} else if (encoded_len == 17) {
rep_count = 3 + consume_bits(&bb, 3);
} else if (encoded_len == 18) {
rep_count = 11 + consume_bits(&bb, 7);
} else {
/* Invalid len */
Assert(0);
}
while (rep_count--) {
lit_len_dist_table[lit_len_count++] = rep_val;
}
}
Assert(lit_len_count == len_count);
} else {
/* Fixed table */
hlit = 288;
hdist = 32;
u32 index = 0;
for (u32 i = 0; i < countof(g_fixed_bl_counts); ++i) {
u32 bit_count = g_fixed_bl_counts[i][1];
u32 last_valuie = g_fixed_bl_counts[i][0];
while (index <= last_valuie) {
lit_len_dist_table[index++] = bit_count;
}
}
}
/* Decode */
struct huffman lit_len_huffman = huffman_init(temp.arena, 15, lit_len_dist_table, hlit);
struct huffman dist_huffman = huffman_init(temp.arena, 15, lit_len_dist_table + hlit, hdist);
for (;;) {
u32 lit_len = huffman_decode(&lit_len_huffman, &bb);
if (lit_len <= 255) {
*dst++ = lit_len & 0xFF;
} else if (lit_len >= 257) {
u32 length_index = (lit_len - 257);
struct huffman_entry length_entry = g_length_table[length_index];
u32 length = length_entry.symbol;
if (length_entry.bits_used > 0) {
u32 extra_bits = consume_bits(&bb, length_entry.bits_used);
length += extra_bits;
}
u32 dist_index = huffman_decode(&dist_huffman, &bb);
struct huffman_entry dist_entry = g_dist_table[dist_index];
u32 distance = dist_entry.symbol;
if (dist_entry.bits_used > 0) {
u32 extra_bits = consume_bits(&bb, dist_entry.bits_used);
distance += extra_bits;
}
u8 *src = dst - distance;
while (length--) {
*dst++ = *src++;
}
} else {
break;
}
}
EndTempArena(temp);
} break;
case BLOCK_TYPE_RESERVED: {
/* TODO */
Assert(0);
} break;
}
}
EndScratch(scratch);
}
/* ========================== *
* Decoder structs
* ========================== */
enum chunk_type {
CHUNK_TYPE_OLD_PALETTE1 = 0x0004,
CHUNK_TYPE_OLD_PALETTE2 = 0x0011,
CHUNK_TYPE_LAYER = 0x2004,
CHUNK_TYPE_CEL = 0x2005,
CHUNK_TYPE_CEL_EXTRA = 0x2006,
CHUNK_TYPE_COLOR_PROFILE = 0x2007,
CHUNK_TYPE_EXTERNAL_FILES = 0x2008,
CHUNK_TYPE_MASK = 0x2016,
CHUNK_TYPE_PATH = 0x2017,
CHUNK_TYPE_TAGS = 0x2018,
CHUNK_TYPE_PALETTE = 0x2019,
CHUNK_TYPE_USER_DATA = 0x2020,
CHUNK_TYPE_SLICE = 0x2022,
CHUNK_TYPE_TILESET = 0x2023
};
enum cel_type {
CEL_TYPE_RAW_IMAGE = 0,
CEL_TYPE_LINKED = 1,
CEL_TYPE_COMPRESSED_IMAGE = 2,
CEL_TYPE_COMPRESSED_TILEMAP = 3
};
Packed(struct ase_header {
u32 file_size;
u16 magic;
u16 frames;
u16 width;
u16 height;
u16 color_depth;
u32 flags;
u16 speed;
u32 _1;
u32 _2;
u8 palette_entry;
u8 _3[3];
u16 num_colors;
u8 pixel_width;
u8 pixel_height;
i16 grid_x;
i16 grid_y;
u16 grid_width;
u16 grid_height;
u8 _4[84];
});
Packed(struct frame_header {
u32 bytes;
u16 magic;
u16 chunks_old;
u16 frame_duration_ms;
u8 _[2];
u32 chunks_new;
});
internal void push_error_copy_msg(Arena *arena, Ase_ErrorList *list, String msg_src)
{
Ase_Error *e = PushStruct(arena, Ase_Error);
e->msg = CopyString(arena, msg_src);
if (!list->first) {
list->first = e;
} else {
list->last->next = e;
}
list->last = e;
++list->count;
}
/* ========================== *
* Image decoder
* ========================== */
struct layer {
u16 flags;
u16 type;
u16 child_level;
u16 blend_mode;
u8 opacity;
String name;
u32 tileset_index;
u32 index;
struct layer *next;
};
struct cel {
u16 layer_index;
i16 x_pos;
i16 y_pos;
u8 opacity;
enum cel_type type;
i16 z_index;
/* Linked cel */
u16 frame_pos;
/* Compressed image */
u32 width;
u32 height;
u32 *pixels;
u16 frame_index;
struct cel *next;
};
/* Taken from
* https://github.com/RandyGaul/cute_headers/blob/master/cute_aseprite.h#L870 */
internal u32 mul_u8(u32 a, u32 b)
{
u32 t = (a * b) + 0x80;
return ((t >> 8) + t) >> 8;
}
internal u32 blend(u32 src, u32 dst, u8 opacity)
{
u32 dst_r = (dst & 0xff);
u32 dst_g = (dst >> 8) & 0xff;
u32 dst_b = (dst >> 16) & 0xff;
u32 dst_a = (dst >> 24) & 0xff;
u32 src_r = (src & 0xff);
u32 src_g = (src >> 8) & 0xff;
u32 src_b = (src >> 16) & 0xff;
u32 src_a = (src >> 24) & 0xff;
src_a = (u8)mul_u8(src_a, opacity);
u32 a = src_a + dst_a - mul_u8(src_a, dst_a);
u32 r, g, b;
if (a == 0) {
r = g = b = 0;
} else {
r = dst_r + (src_r - dst_r) * src_a / a;
g = dst_g + (src_g - dst_g) * src_a / a;
b = dst_b + (src_b - dst_b) * src_a / a;
}
return r | (g << 8) | (b << 16) | (a << 24);
}
internal void make_image_dimensions_squareish(struct ase_header *header, u32 *frames_x, u32 *frames_y, u64 *image_width, u64 *image_height)
{
/* Try and get image resolution into as much of a square as possible by
* separating frames into multiple rows. */
while (*frames_x > 1) {
u64 new_frames_x = *frames_x - 1;
u64 new_frames_y = ((header->frames - 1) / new_frames_x) + 1;
u64 new_image_width = header->width * new_frames_x;
u64 new_image_height = header->height * new_frames_y;
if (new_image_width >= new_image_height) {
*frames_x = new_frames_x;
*frames_y = new_frames_y;
*image_width = new_image_width;
*image_height = new_image_height;
} else {
break;
}
}
}
Ase_DecodedImage ase_decode_image(Arena *arena, String encoded)
{
__prof;
TempArena scratch = BeginScratch(arena);
Ase_DecodedImage result = ZI;
BB_Buff bb = BitbuffFromString(encoded);
BB_Reader br = BB_ReaderFromBuffNoDebug(&bb);
struct ase_header ase_header;
BB_ReadBytes(&br, StringFromStruct(&ase_header));
if (ase_header.magic != 0xA5E0) {
push_error_copy_msg(arena, &result.errors, Lit("Not a valid aseprite file"));
goto abort;
}
if (ase_header.color_depth != 32) {
String msg = StringFormat(scratch.arena,
Lit("Only 32 bit rgba color mode is supported (got %F)"),
FmtUint(ase_header.color_depth));
push_error_copy_msg(arena, &result.errors, msg);
goto abort;
}
u64 frame_width = ase_header.width;
u64 frame_height = ase_header.height;
u32 frames_x = ase_header.frames;
u32 frames_y = 1;
u64 image_width = frame_width * frames_x;
u64 image_height = frame_height * frames_y;
make_image_dimensions_squareish(&ase_header, &frames_x, &frames_y, &image_width, &image_height);
result.width = image_width;
result.height = image_height;
/* TODO: Optimize this. Naive memset(0) is bloating the decode time for large images. */
result.pixels = PushStructs(arena, u32, image_width * image_height);
u32 num_layers = 0;
struct layer *layer_head = 0;
struct cel *cel_head = 0;
struct cel *cel_tail = 0;
/* Iterate frames */
u32 num_frames = 0;
for (u16 i = 0; i < ase_header.frames; ++i) {
struct frame_header frame_header;
BB_ReadBytes(&br, StringFromStruct(&frame_header));
u32 num_chunks = frame_header.chunks_new;
if (num_chunks == 0) {
Assert(frame_header.chunks_old != 0xFFFF);
num_chunks = frame_header.chunks_old;
}
/* Iterate chunks in frame */
for (u32 j = 0; j < num_chunks; ++j) {
u32 chunk_size = BB_ReadUBits(&br, 32);
enum chunk_type chunk_type = BB_ReadUBits(&br, 16);
/* Chunk size includes size & type */
Assert(chunk_size >= 6);
chunk_size -= 6;
u64 chunk_end_pos = BB_GetCurrentReaderByte(&br) + chunk_size;
switch (chunk_type) {
case CHUNK_TYPE_LAYER: {
struct layer *layer = PushStruct(scratch.arena, struct layer);
layer->next = layer_head;
layer_head = layer;
layer->flags = BB_ReadUBits(&br, 16);
layer->type = BB_ReadUBits(&br, 16);
layer->child_level = BB_ReadUBits(&br, 16);
/* Ignoring layer default width & height */
BB_SeekBytes(&br, sizeof(u16) * 2);
layer->blend_mode = BB_ReadUBits(&br, 16);
if (layer->blend_mode != 0) {
push_error_copy_msg(arena,
&result.errors,
Lit("Layer has unsupported blend mode (only 'Normal' mode is supported). Tip: Try using 'merge down' to create a normal layer as a workaround"));
goto abort;
}
layer->opacity = BB_ReadUBits(&br, 8);
if (!(ase_header.flags & 1)) {
layer->opacity = 255;
}
BB_SeekBytes(&br, sizeof(u8) * 3);
u16 str_len = BB_ReadUBits(&br, 16);
layer->name = (String) { str_len, PushStructsNoZero(scratch.arena, u8, str_len) };
BB_ReadBytes(&br, layer->name);
if (layer->type == 2) {
layer->tileset_index = BB_ReadUBits(&br, 32);
}
layer->index = num_layers++;
} break;
case CHUNK_TYPE_CEL: {
struct cel *cel = PushStruct(scratch.arena, struct cel);
if (cel_tail) {
cel_tail->next = cel;
} else {
cel_head = cel;
}
cel_tail = cel;
cel->layer_index = BB_ReadUBits(&br, 16);
cel->x_pos = BB_ReadIBits(&br, 16);
cel->y_pos = BB_ReadIBits(&br, 16);
cel->opacity = BB_ReadUBits(&br, 8);
cel->type = BB_ReadUBits(&br, 16);
cel->z_index = BB_ReadIBits(&br, 16);
BB_SeekBytes(&br, sizeof(u8) * 5);
cel->frame_index = num_frames;
switch (cel->type) {
case CEL_TYPE_RAW_IMAGE: {
/* Unsupported */
BB_SeekToByte(&br, chunk_end_pos);
} break;
case CEL_TYPE_LINKED: {
cel->frame_pos = BB_ReadUBits(&br, 16);
/* Actual linking happens later after iteration */
} break;
case CEL_TYPE_COMPRESSED_IMAGE: {
cel->width = BB_ReadUBits(&br, 16);
cel->height = BB_ReadUBits(&br, 16);
cel->pixels = PushStructsNoZero(scratch.arena, u32, cel->width * cel->height);
u8 *huffman_encoded = BB_ReadBytesRaw(&br, chunk_end_pos - BB_GetCurrentReaderByte(&br));
if (huffman_encoded) {
inflate((u8 *)cel->pixels, huffman_encoded);
}
} break;
case CEL_TYPE_COMPRESSED_TILEMAP: {
/* Unsupported */
push_error_copy_msg(arena, &result.errors, Lit("Tilemaps are not supported"));
goto abort;
} break;
}
} break;
default: {
BB_SeekToByte(&br, chunk_end_pos);
} break;
}
}
++num_frames;
}
/* Create ordered layers array */
struct layer **layers_ordered = PushStructsNoZero(scratch.arena, struct layer *, num_layers);
for (struct layer *layer = layer_head; layer; layer = layer->next) {
layers_ordered[layer->index] = layer;
}
/* Link cels */
struct cel **cels_ordered = PushStructsNoZero(scratch.arena, struct cel *, num_frames * num_layers);
for (struct cel *cel = cel_head; cel; cel = cel->next) {
cels_ordered[(cel->frame_index * num_layers) + cel->layer_index] = cel;
if (cel->type == CEL_TYPE_LINKED) {
struct cel *ref_cel = cels_ordered[(cel->frame_pos * num_layers) + cel->layer_index];
cel->width = ref_cel->width;
cel->height = ref_cel->height;
cel->pixels = ref_cel->pixels;
}
}
{
__profn("Build image from cels");
/* Assemble image from cels */
for (struct cel *cel = cel_head; cel; cel = cel->next) {
struct layer *layer = layers_ordered[cel->layer_index];
/* Only draw visible layers */
if (layer->flags & 1) {
u8 opacity = (cel->opacity / 255.0f) * (layer->opacity / 255.0f) * 255.0f;
i32 cel_width = cel->width;
i32 cel_height = cel->height;
i32 cel_left = 0;
i32 cel_right = cel_width;
i32 cel_top = 0;
i32 cel_bottom = cel_height;
i32 frame_left = cel->x_pos;
i32 frame_top = cel->y_pos;
i32 image_left = frame_left + ((cel->frame_index % frames_x) * frame_width);
i32 image_top = frame_top + ((cel->frame_index / frames_x) * frame_height);
/* Adjust bounds to ensure pixels outside of frame boundaries
* aren't (aseprite keeps chunks outside of frame around in
* project file). */
{
i32 frame_right = cel_width + frame_left;
i32 frame_bottom = frame_top + cel_height;
if (frame_left < 0) {
cel_left += -frame_left;
frame_left = 0;
}
if (frame_top < 0) {
cel_top += -frame_top;
frame_top = 0;
}
if (frame_right > (i32)frame_width) {
cel_right -= (frame_right - frame_width);
frame_right = frame_width;
}
if (frame_bottom > (i32)frame_height) {
cel_bottom -= (frame_bottom - frame_height);
frame_bottom = frame_height;
}
}
for (i32 cel_y = cel_top; cel_y < cel_bottom; ++cel_y) {
i32 image_y = image_top + cel_y;
i32 cel_stride = cel_y * cel_width;
i32 image_stride = image_y * image_width;
for (i32 cel_x = cel_left; cel_x < cel_right; ++cel_x) {
i32 image_x = image_left + cel_x;
u32 cel_pixel = cel->pixels[cel_x + cel_stride];
u32 *image_pixel = &result.pixels[image_x + image_stride];
*image_pixel = blend(cel_pixel, *image_pixel, opacity);
}
}
}
}
}
/* Assert all data was read */
Assert(BB_NumBytesRemaining(&br) == 0);
abort:
if (result.errors.count <= 0) {
result.success = 1;
}
EndScratch(scratch);
return result;
}
/* ========================== *
* Decode sheet
* ========================== */
Ase_DecodedSheet ase_decode_sheet(Arena *arena, String encoded)
{
__prof;
Ase_DecodedSheet result = ZI;
BB_Buff bb = BitbuffFromString(encoded);
BB_Reader br = BB_ReaderFromBuffNoDebug(&bb);
struct ase_header ase_header;
BB_ReadBytes(&br, StringFromStruct(&ase_header));
u64 frame_width = ase_header.width;
u64 frame_height = ase_header.height;
u32 frames_x = ase_header.frames;
u32 frames_y = 1;
u64 image_width = frame_width * frames_x;
u64 image_height = frame_height * frames_y;
make_image_dimensions_squareish(&ase_header, &frames_x, &frames_y, &image_width, &image_height);
u32 num_frames = 0;
Ase_Frame *frame_head = 0;
u32 num_spans = 0;
Ase_Span *span_head = 0;
u32 num_slice_keys = 0;
Ase_SliceKey *slice_key_head = 0;
/* Iterate frames */
for (u16 i = 0; i < ase_header.frames; ++i) {
struct frame_header frame_header;
BB_ReadBytes(&br, StringFromStruct(&frame_header));
u32 num_chunks = frame_header.chunks_new;
if (num_chunks == 0) {
Assert(frame_header.chunks_old != 0xFFFF);
num_chunks = frame_header.chunks_old;
}
Ase_Frame *frame = PushStruct(arena, Ase_Frame);
frame->next = frame_head;
frame_head = frame;
frame->index = i;
frame->duration = frame_header.frame_duration_ms / 1000.0;
u32 frame_tile_x = i % frames_x;
u32 frame_tile_y = i / frames_x;
u32 frame_x1 = frame_tile_x * frame_width;
u32 frame_y1 = frame_tile_y * frame_height;
u32 frame_x2 = frame_x1 + frame_width;
u32 frame_y2 = frame_y1 + frame_height;
frame->x1 = frame_x1;
frame->y1 = frame_y1;
frame->x2 = frame_x2;
frame->y2 = frame_y2;
frame->index = i;
frame->duration = frame_header.frame_duration_ms / 1000.0;
/* Iterate chunks in frame */
for (u32 j = 0; j < num_chunks; ++j) {
u32 chunk_size = BB_ReadUBits(&br, 32);
enum chunk_type chunk_type = BB_ReadUBits(&br, 16);
/* Chunk size includes size & type */
Assert(chunk_size >= 6);
chunk_size -= 6;
u64 chunk_end_pos = BB_GetCurrentReaderByte(&br) + chunk_size;
switch (chunk_type) {
case CHUNK_TYPE_TAGS: {
u16 frame_span_count = BB_ReadUBits(&br, 16);
BB_SeekBytes(&br, 8);
for (u16 k = 0; k < frame_span_count; ++k) {
Ase_Span *span = PushStruct(arena, Ase_Span);
span->next = span_head;
span_head = span;
span->start = BB_ReadUBits(&br, 16);
span->end = BB_ReadUBits(&br, 16);
BB_SeekBytes(&br, 13);
u16 str_len = BB_ReadUBits(&br, 16);
span->name = (String) { str_len, PushStructsNoZero(arena, u8, str_len) };
BB_ReadBytes(&br, span->name);
++num_spans;
}
} break;
case CHUNK_TYPE_SLICE: {
Ase_SliceKey *slice_key = PushStruct(arena, Ase_SliceKey);
slice_key->next = slice_key_head;
slice_key_head = slice_key;
u32 num_slices = BB_ReadUBits(&br, 32);
slice_key->num_slices = num_slices;
u32 flags = BB_ReadUBits(&br, 32);
BB_SeekBytes(&br, 4);
u16 str_len = BB_ReadUBits(&br, 16);
slice_key->name = (String) { str_len, PushStructsNoZero(arena, u8, str_len) };
BB_ReadBytes(&br, slice_key->name);
for (u32 k = 0; k < num_slices; ++k) {
Ase_Slice *slice = PushStruct(arena, Ase_Slice);
slice->next = slice_key->slice_head;
slice_key->slice_head = slice;
u32 start = BB_ReadUBits(&br, 32);
i32 x = BB_ReadIBits(&br, 32);
i32 y = BB_ReadIBits(&br, 32);
u32 width = BB_ReadUBits(&br, 32);
u32 height = BB_ReadUBits(&br, 32);
if (flags & 0x01) {
/* Skip 9-patches info */
BB_SeekBytes(&br, 128);
}
if (flags & 0x02) {
/* Skip pivot info */
BB_SeekBytes(&br, 64);
}
slice->start = start;
slice->x1 = x;
slice->y1 = y;
slice->x2 = x + width;
slice->y2 = y + height;
}
++num_slice_keys;
} break;
/* TODO */
//case CHUNK_TYPE_USER_DATA
default: {
BB_SeekToByte(&br, chunk_end_pos);
} break;
}
}
++num_frames;
}
/* Assert all data was read */
Assert(BB_NumBytesRemaining(&br) == 0);
result.image_size = VEC2(image_width, image_height);
result.frame_size = VEC2(frame_width, frame_height);
result.num_frames = num_frames;
result.num_spans = num_spans;
result.num_slice_keys = num_slice_keys;
result.frame_head = frame_head;
result.span_head = span_head;
result.slice_key_head = slice_key_head;
if (result.errors.count <= 0) {
result.success = 1;
}
return result;
}