summaryrefslogtreecommitdiff
path: root/src/internal
diff options
context:
space:
mode:
Diffstat (limited to 'src/internal')
-rw-r--r--src/internal/audio/kitaudio.c316
-rw-r--r--src/internal/kitdecoder.c235
-rw-r--r--src/internal/kitlibstate.c8
-rw-r--r--src/internal/libass.c24
-rw-r--r--src/internal/subtitle/kitsubtitle.c254
-rw-r--r--src/internal/subtitle/kitsubtitlepacket.c19
-rw-r--r--src/internal/subtitle/renderers/kitsubass.c217
-rw-r--r--src/internal/subtitle/renderers/kitsubimage.c93
-rw-r--r--src/internal/subtitle/renderers/kitsubrenderer.c33
-rw-r--r--src/internal/utils/kitbuffer.c100
-rw-r--r--src/internal/utils/kithelpers.c30
-rw-r--r--src/internal/utils/kitringbuffer.c172
-rw-r--r--src/internal/video/kitvideo.c269
13 files changed, 1770 insertions, 0 deletions
diff --git a/src/internal/audio/kitaudio.c b/src/internal/audio/kitaudio.c
new file mode 100644
index 0000000..447a74d
--- /dev/null
+++ b/src/internal/audio/kitaudio.c
@@ -0,0 +1,316 @@
+#include <assert.h>
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+
+#include <libavformat/avformat.h>
+#include <libavutil/samplefmt.h>
+#include <libswresample/swresample.h>
+#include <SDL2/SDL.h>
+
+#include "kitchensink/kiterror.h"
+#include "kitchensink/internal/utils/kithelpers.h"
+#include "kitchensink/internal/utils/kitbuffer.h"
+#include "kitchensink/internal/audio/kitaudio.h"
+#include "kitchensink/internal/utils/kitringbuffer.h"
+#include "kitchensink/internal/utils/kitlog.h"
+
+#define KIT_AUDIO_OUT_SIZE 64
+#define AUDIO_SYNC_THRESHOLD 0.05
+
+typedef struct Kit_AudioDecoder {
+ Kit_AudioFormat *format;
+ SwrContext *swr;
+ AVFrame *scratch_frame;
+} Kit_AudioDecoder;
+
+typedef struct Kit_AudioPacket {
+ double pts;
+ size_t original_size;
+ Kit_RingBuffer *rb;
+} Kit_AudioPacket;
+
+
+Kit_AudioPacket* _CreateAudioPacket(const char* data, size_t len, double pts) {
+ Kit_AudioPacket *p = calloc(1, sizeof(Kit_AudioPacket));
+ p->rb = Kit_CreateRingBuffer(len);
+ Kit_WriteRingBuffer(p->rb, data, len);
+ p->pts = pts;
+ return p;
+}
+
+enum AVSampleFormat _FindAVSampleFormat(int format) {
+ switch(format) {
+ case AUDIO_U8: return AV_SAMPLE_FMT_U8;
+ case AUDIO_S16SYS: return AV_SAMPLE_FMT_S16;
+ case AUDIO_S32SYS: return AV_SAMPLE_FMT_S32;
+ default:
+ return AV_SAMPLE_FMT_NONE;
+ }
+}
+
+int64_t _FindAVChannelLayout(int channels) {
+ switch(channels) {
+ case 1: return AV_CH_LAYOUT_MONO;
+ case 2: return AV_CH_LAYOUT_STEREO;
+ case 4: return AV_CH_LAYOUT_QUAD;
+ case 6: return AV_CH_LAYOUT_5POINT1;
+ default: return AV_CH_LAYOUT_STEREO_DOWNMIX;
+ }
+}
+
+void _FindChannelLayout(uint64_t channel_layout, int *channels) {
+ switch(channel_layout) {
+ case AV_CH_LAYOUT_MONO:
+ *channels = 1;
+ break;
+ case AV_CH_LAYOUT_STEREO:
+ *channels = 2;
+ break;
+ case AV_CH_LAYOUT_QUAD:
+ *channels = 4;
+ break;
+ case AV_CH_LAYOUT_5POINT1:
+ *channels = 6;
+ break;
+ default:
+ *channels = 2;
+ }
+}
+
+void _FindAudioFormat(enum AVSampleFormat fmt, int *bytes, bool *is_signed, unsigned int *format) {
+ switch(fmt) {
+ case AV_SAMPLE_FMT_U8:
+ *bytes = 1;
+ *is_signed = false;
+ *format = AUDIO_U8;
+ break;
+ case AV_SAMPLE_FMT_S16:
+ *bytes = 2;
+ *is_signed = true;
+ *format = AUDIO_S16SYS;
+ break;
+ case AV_SAMPLE_FMT_S32:
+ *bytes = 4;
+ *is_signed = true;
+ *format = AUDIO_S32SYS;
+ break;
+ default:
+ *bytes = 2;
+ *is_signed = true;
+ *format = AUDIO_S16SYS;
+ break;
+ }
+}
+
+static void free_out_audio_packet_cb(void *packet) {
+ Kit_AudioPacket *p = packet;
+ Kit_DestroyRingBuffer(p->rb);
+ free(p);
+}
+
+static int dec_decode_audio_cb(Kit_Decoder *dec, AVPacket *in_packet) {
+ assert(dec != NULL);
+ assert(in_packet != NULL);
+
+ Kit_AudioDecoder *audio_dec = dec->userdata;
+ int frame_finished;
+ int len, len2;
+ int dst_linesize;
+ int dst_nb_samples, dst_bufsize;
+ unsigned char **dst_data;
+
+ // Decode as long as there is data
+ while(in_packet->size > 0) {
+ len = avcodec_decode_audio4(dec->codec_ctx, audio_dec->scratch_frame, &frame_finished, in_packet);
+ if(len < 0) {
+ return 1;
+ }
+
+ if(frame_finished) {
+ dst_nb_samples = av_rescale_rnd(
+ audio_dec->scratch_frame->nb_samples,
+ audio_dec->format->samplerate, // Target samplerate
+ dec->codec_ctx->sample_rate, // Source samplerate
+ AV_ROUND_UP);
+
+ av_samples_alloc_array_and_samples(
+ &dst_data,
+ &dst_linesize,
+ audio_dec->format->channels,
+ dst_nb_samples,
+ _FindAVSampleFormat(audio_dec->format->format),
+ 0);
+
+ len2 = swr_convert(
+ audio_dec->swr,
+ dst_data,
+ audio_dec->scratch_frame->nb_samples,
+ (const unsigned char **)audio_dec->scratch_frame->extended_data,
+ audio_dec->scratch_frame->nb_samples);
+
+ dst_bufsize = av_samples_get_buffer_size(
+ &dst_linesize,
+ audio_dec->format->channels,
+ len2,
+ _FindAVSampleFormat(audio_dec->format->format), 1);
+
+ // Get presentation timestamp
+ double pts = av_frame_get_best_effort_timestamp(audio_dec->scratch_frame);
+ pts *= av_q2d(dec->format_ctx->streams[dec->stream_index]->time_base);
+
+ // Lock, write to audio buffer, unlock
+ Kit_AudioPacket *out_packet = _CreateAudioPacket(
+ (char*)dst_data[0], (size_t)dst_bufsize, pts);
+ Kit_WriteDecoderOutput(dec, out_packet);
+
+ // Free temps
+ av_freep(&dst_data[0]);
+ av_freep(&dst_data);
+ }
+
+ in_packet->size -= len;
+ in_packet->data += len;
+ }
+
+
+ return 1;
+}
+
+static void dec_close_audio_cb(Kit_Decoder *dec) {
+ if(dec == NULL) return;
+
+ Kit_AudioDecoder *audio_dec = dec->userdata;
+ if(audio_dec->scratch_frame != NULL) {
+ av_frame_free(&audio_dec->scratch_frame);
+ }
+ if(audio_dec->swr != NULL) {
+ swr_free(&audio_dec->swr);
+ }
+ free(audio_dec);
+}
+
+Kit_Decoder* Kit_CreateAudioDecoder(const Kit_Source *src, Kit_AudioFormat *format) {
+ assert(src != NULL);
+ assert(format != NULL);
+ if(src->audio_stream_index < 0) {
+ return NULL;
+ }
+
+ // First the generic decoder component ...
+ Kit_Decoder *dec = Kit_CreateDecoder(
+ src, src->audio_stream_index,
+ KIT_AUDIO_OUT_SIZE,
+ free_out_audio_packet_cb);
+ if(dec == NULL) {
+ goto exit_0;
+ }
+
+ // Find formats
+ format->samplerate = dec->codec_ctx->sample_rate;
+ format->is_enabled = true;
+ format->stream_index = src->audio_stream_index;
+ _FindChannelLayout(dec->codec_ctx->channel_layout, &format->channels);
+ _FindAudioFormat(dec->codec_ctx->sample_fmt, &format->bytes, &format->is_signed, &format->format);
+
+ // ... then allocate the audio decoder
+ Kit_AudioDecoder *audio_dec = calloc(1, sizeof(Kit_AudioDecoder));
+ if(audio_dec == NULL) {
+ goto exit_1;
+ }
+
+ // Create temporary audio frame
+ audio_dec->scratch_frame = av_frame_alloc();
+ if(audio_dec->scratch_frame == NULL) {
+ Kit_SetError("Unable to initialize temporary audio frame");
+ goto exit_2;
+ }
+
+ // Create resampler
+ audio_dec->swr = swr_alloc_set_opts(
+ NULL,
+ _FindAVChannelLayout(format->channels), // Target channel layout
+ _FindAVSampleFormat(format->format), // Target fmt
+ format->samplerate, // Target samplerate
+ dec->codec_ctx->channel_layout, // Source channel layout
+ dec->codec_ctx->sample_fmt, // Source fmt
+ dec->codec_ctx->sample_rate, // Source samplerate
+ 0, NULL);
+
+ if(swr_init(audio_dec->swr) != 0) {
+ Kit_SetError("Unable to initialize audio resampler context");
+ goto exit_3;
+ }
+
+ // Set callbacks and userdata, and we're go
+ audio_dec->format = format;
+ dec->dec_decode = dec_decode_audio_cb;
+ dec->dec_close = dec_close_audio_cb;
+ dec->userdata = audio_dec;
+ return dec;
+
+exit_3:
+ av_frame_free(&audio_dec->scratch_frame);
+exit_2:
+ free(audio_dec);
+exit_1:
+ Kit_CloseDecoder(dec);
+exit_0:
+ return NULL;
+}
+
+int Kit_GetAudioDecoderData(Kit_Decoder *dec, unsigned char *buf, int len) {
+ assert(dec != NULL);
+
+ Kit_AudioPacket *packet = Kit_PeekDecoderOutput(dec);
+ if(packet == NULL) {
+ return 0;
+ }
+
+ int ret = 0;
+ Kit_AudioDecoder *audio_dec = dec->userdata;
+ int bytes_per_sample = audio_dec->format->bytes * audio_dec->format->channels;
+ double bytes_per_second = bytes_per_sample * audio_dec->format->samplerate;
+ double sync_ts = _GetSystemTime() - dec->clock_sync;
+
+ if(packet->pts > sync_ts + AUDIO_SYNC_THRESHOLD) {
+ return 0;
+ } else if(packet->pts < sync_ts - AUDIO_SYNC_THRESHOLD) {
+ // Audio is lagging, skip until good pts is found
+ while(1) {
+ Kit_AdvanceDecoderOutput(dec);
+ free_out_audio_packet_cb(packet);
+ packet = Kit_PeekDecoderOutput(dec);
+ if(packet == NULL) {
+ break;
+ } else {
+ dec->clock_pos = packet->pts;
+ }
+ if(packet->pts > sync_ts - AUDIO_SYNC_THRESHOLD) {
+ break;
+ }
+ }
+ }
+
+ // If we have no viable packet, just skip
+ if(packet == NULL) {
+ return 0;
+ }
+
+ // Read data from packet ringbuffer
+ if(len > 0) {
+ ret = Kit_ReadRingBuffer(packet->rb, (char*)buf, len);
+ }
+
+ // If ringbuffer is cleared, kill packet and advance buffer.
+ // Otherwise forward the pts value for the current packet.
+ if(Kit_GetRingBufferLength(packet->rb) == 0) {
+ Kit_AdvanceDecoderOutput(dec);
+ dec->clock_pos = packet->pts;
+ free_out_audio_packet_cb(packet);
+ } else {
+ packet->pts += ((double)ret) / bytes_per_second;
+ dec->clock_pos = packet->pts;
+ }
+
+ return ret;
+}
diff --git a/src/internal/kitdecoder.c b/src/internal/kitdecoder.c
new file mode 100644
index 0000000..555c54f
--- /dev/null
+++ b/src/internal/kitdecoder.c
@@ -0,0 +1,235 @@
+#include <stdlib.h>
+#include <assert.h>
+
+#include <libavformat/avformat.h>
+
+#include "kitchensink/internal/kitdecoder.h"
+#include "kitchensink/kiterror.h"
+
+#define BUFFER_IN_SIZE 256
+
+static void free_in_video_packet_cb(void *packet) {
+ av_packet_free((AVPacket**)&packet);
+}
+
+Kit_Decoder* Kit_CreateDecoder(const Kit_Source *src, int stream_index,
+ int out_b_size, dec_free_packet_cb free_out_cb) {
+ assert(src != NULL);
+ assert(out_b_size > 0);
+
+ AVCodecContext *codec_ctx = NULL;
+ AVCodec *codec = NULL;
+ AVFormatContext *format_ctx = src->format_ctx;
+ int bsizes[2] = {BUFFER_IN_SIZE, out_b_size};
+ dec_free_packet_cb free_hooks[2] = {free_in_video_packet_cb, free_out_cb};
+
+ // Make sure index seems correct
+ if(stream_index >= (int)format_ctx->nb_streams || stream_index < 0) {
+ Kit_SetError("Invalid stream %d", stream_index);
+ goto exit_0;
+ }
+
+ // Allocate decoder and make sure allocation was a success
+ Kit_Decoder *dec = calloc(1, sizeof(Kit_Decoder));
+ if(dec == NULL) {
+ Kit_SetError("Unable to allocate kit decoder for stream %d", stream_index);
+ goto exit_0;
+ }
+
+ // Find audio decoder
+ codec = avcodec_find_decoder(format_ctx->streams[stream_index]->codec->codec_id);
+ if(!codec) {
+ Kit_SetError("No suitable decoder found for stream %d", stream_index);
+ goto exit_1;
+ }
+
+ // Allocate a context for the codec
+ codec_ctx = avcodec_alloc_context3(codec);
+ if(avcodec_copy_context(codec_ctx, format_ctx->streams[stream_index]->codec) != 0) {
+ Kit_SetError("Unable to copy audio codec context for stream %d", stream_index);
+ goto exit_1;
+ }
+
+ // Open the stream
+ if(avcodec_open2(codec_ctx, codec, NULL) < 0) {
+ Kit_SetError("Unable to open codec for stream %d", stream_index);
+ goto exit_2;
+ }
+
+ // Set index and codec
+ dec->stream_index = stream_index;
+ dec->codec_ctx = codec_ctx;
+ dec->format_ctx = format_ctx;
+
+ // Allocate input/output ringbuffers and locks
+ for(int i = 0; i < 2; i++) {
+ dec->buffer[i] = Kit_CreateBuffer(bsizes[i], free_hooks[i]);
+ if(dec->buffer[i] == NULL) {
+ Kit_SetError("Unable to allocate ringbuffer type %d for stream %d", i, stream_index);
+ goto exit_3;
+ }
+
+ dec->lock[i] = SDL_CreateMutex();
+ if(dec->lock[i] == NULL) {
+ Kit_SetError("Unable to allocate mutex type %d for stream %d", i, stream_index);
+ goto exit_3;
+ }
+ }
+
+ // That's that
+ return dec;
+
+exit_3:
+ for(int i = 0; i < 2; i++) {
+ SDL_DestroyMutex(dec->lock[i]);
+ Kit_DestroyBuffer(dec->buffer[i]);
+ }
+ avcodec_close(codec_ctx);
+exit_2:
+ avcodec_free_context(&codec_ctx);
+exit_1:
+ free(dec);
+exit_0:
+ return NULL;
+}
+
+void Kit_SetDecoderClockSync(Kit_Decoder *dec, double sync) {
+ if(dec == NULL) return;
+ dec->clock_sync = sync;
+}
+
+void Kit_ChangeDecoderClockSync(Kit_Decoder *dec, double sync) {
+ if(dec == NULL) return;
+ dec->clock_sync += sync;
+}
+
+int Kit_WriteDecoderInput(Kit_Decoder *dec, AVPacket *packet) {
+ assert(dec != NULL);
+ int ret = 1;
+ if(SDL_LockMutex(dec->lock[KIT_DEC_IN]) == 0) {
+ ret = Kit_WriteBuffer(dec->buffer[KIT_DEC_IN], packet);
+ SDL_UnlockMutex(dec->lock[KIT_DEC_IN]);
+ }
+ return ret;
+}
+
+bool Kit_CanWriteDecoderInput(Kit_Decoder *dec) {
+ assert(dec != NULL);
+ bool ret = false;
+ if(SDL_LockMutex(dec->lock[KIT_DEC_IN]) == 0) {
+ ret = !Kit_IsBufferFull(dec->buffer[KIT_DEC_IN]);
+ SDL_UnlockMutex(dec->lock[KIT_DEC_IN]);
+ }
+ return ret;
+}
+
+AVPacket* Kit_ReadDecoderInput(Kit_Decoder *dec) {
+ assert(dec != NULL);
+ AVPacket *ret = NULL;
+ if(SDL_LockMutex(dec->lock[KIT_DEC_IN]) == 0) {
+ ret = Kit_ReadBuffer(dec->buffer[KIT_DEC_IN]);
+ SDL_UnlockMutex(dec->lock[KIT_DEC_IN]);
+ }
+ return ret;
+}
+
+int Kit_WriteDecoderOutput(Kit_Decoder *dec, void *packet) {
+ assert(dec != NULL);
+ int ret = 1;
+ if(SDL_LockMutex(dec->lock[KIT_DEC_OUT]) == 0) {
+ ret = Kit_WriteBuffer(dec->buffer[KIT_DEC_OUT], packet);
+ SDL_UnlockMutex(dec->lock[KIT_DEC_OUT]);
+ }
+ return ret;
+}
+
+void Kit_ClearDecoderOutput(Kit_Decoder *dec) {
+ if(SDL_LockMutex(dec->lock[KIT_DEC_OUT]) == 0) {
+ Kit_ClearBuffer(dec->buffer[KIT_DEC_OUT]);
+ SDL_UnlockMutex(dec->lock[KIT_DEC_OUT]);
+ }
+}
+
+void Kit_ClearDecoderInput(Kit_Decoder *dec) {
+ if(SDL_LockMutex(dec->lock[KIT_DEC_IN]) == 0) {
+ Kit_ClearBuffer(dec->buffer[KIT_DEC_IN]);
+ SDL_UnlockMutex(dec->lock[KIT_DEC_IN]);
+ }
+}
+
+void* Kit_PeekDecoderOutput(Kit_Decoder *dec) {
+ assert(dec != NULL);
+ void *ret = NULL;
+ if(SDL_LockMutex(dec->lock[KIT_DEC_OUT]) == 0) {
+ ret = Kit_PeekBuffer(dec->buffer[KIT_DEC_OUT]);
+ SDL_UnlockMutex(dec->lock[KIT_DEC_OUT]);
+ }
+ return ret;
+}
+
+void Kit_ForEachDecoderOutput(Kit_Decoder *dec, Kit_ForEachItemCallback cb, void *userdata) {
+ assert(dec != NULL);
+ if(SDL_LockMutex(dec->lock[KIT_DEC_OUT]) == 0) {
+ Kit_ForEachItemInBuffer(dec->buffer[KIT_DEC_OUT], cb, userdata);
+ SDL_UnlockMutex(dec->lock[KIT_DEC_OUT]);
+ }
+}
+
+void Kit_AdvanceDecoderOutput(Kit_Decoder *dec) {
+ assert(dec != NULL);
+ if(SDL_LockMutex(dec->lock[KIT_DEC_OUT]) == 0) {
+ Kit_AdvanceBuffer(dec->buffer[KIT_DEC_OUT]);
+ SDL_UnlockMutex(dec->lock[KIT_DEC_OUT]);
+ }
+}
+
+void Kit_ClearDecoderBuffers(Kit_Decoder *dec) {
+ if(dec == NULL) return;
+ Kit_ClearDecoderInput(dec);
+ Kit_ClearDecoderOutput(dec);
+ avcodec_flush_buffers(dec->codec_ctx);
+}
+
+int Kit_RunDecoder(Kit_Decoder *dec) {
+ if(dec == NULL) return 0;
+
+ AVPacket *in_packet;
+ int is_output_full = 1;
+ int ret;
+
+ // First, check if there is room in output buffer
+ if(SDL_LockMutex(dec->lock[KIT_DEC_OUT]) == 0) {
+ is_output_full = Kit_IsBufferFull(dec->buffer[KIT_DEC_OUT]);
+ SDL_UnlockMutex(dec->lock[KIT_DEC_OUT]);
+ }
+ if(is_output_full) {
+ return 0;
+ }
+
+ // Then, see if we have incoming data
+ in_packet = Kit_ReadDecoderInput(dec);
+ if(in_packet == NULL) {
+ return 0;
+ }
+
+ // Run decoder with incoming packet
+ ret = dec->dec_decode(dec, in_packet);
+
+ // Free raw packet before returning
+ av_packet_free(&in_packet);
+ return ret;
+}
+
+void Kit_CloseDecoder(Kit_Decoder *dec) {
+ if(dec == NULL) return;
+ if(dec->dec_close) {
+ dec->dec_close(dec);
+ }
+ for(int i = 0; i < 2; i++) {
+ SDL_DestroyMutex(dec->lock[i]);
+ Kit_DestroyBuffer(dec->buffer[i]);
+ }
+ avcodec_close(dec->codec_ctx);
+ avcodec_free_context(&dec->codec_ctx);
+ free(dec);
+}
diff --git a/src/internal/kitlibstate.c b/src/internal/kitlibstate.c
new file mode 100644
index 0000000..d2d98ca
--- /dev/null
+++ b/src/internal/kitlibstate.c
@@ -0,0 +1,8 @@
+#include <stdlib.h>
+#include "kitchensink/internal/kitlibstate.h"
+
+static Kit_LibraryState _librarystate = {0, NULL, NULL};
+
+Kit_LibraryState* Kit_GetLibraryState() {
+ return &_librarystate;
+}
diff --git a/src/internal/libass.c b/src/internal/libass.c
new file mode 100644
index 0000000..a13e32d
--- /dev/null
+++ b/src/internal/libass.c
@@ -0,0 +1,24 @@
+#ifdef USE_DYNAMIC_LIBASS
+
+#include <SDL2/SDL_loadso.h>
+#include "kitchensink/internal/libass.h"
+
+int load_libass(void *handle) {
+ ass_library_init = SDL_LoadFunction(handle, "ass_library_init");
+ ass_library_done = SDL_LoadFunction(handle, "ass_library_done");
+ ass_set_message_cb = SDL_LoadFunction(handle, "ass_set_message_cb");
+ ass_renderer_init = SDL_LoadFunction(handle, "ass_renderer_init");
+ ass_renderer_done = SDL_LoadFunction(handle, "ass_renderer_done");
+ ass_set_frame_size = SDL_LoadFunction(handle, "ass_set_frame_size");
+ ass_set_hinting = SDL_LoadFunction(handle, "ass_set_hinting");
+ ass_set_fonts = SDL_LoadFunction(handle, "ass_set_fonts");
+ ass_render_frame = SDL_LoadFunction(handle, "ass_render_frame");
+ ass_new_track = SDL_LoadFunction(handle, "ass_new_track");
+ ass_free_track = SDL_LoadFunction(handle, "ass_free_track");
+ ass_process_data = SDL_LoadFunction(handle, "ass_process_data");
+ ass_add_font = SDL_LoadFunction(handle, "ass_add_font");
+ ass_process_codec_private = SDL_LoadFunction(handle, "ass_process_codec_private");
+ return 0;
+}
+
+#endif // USE_DYNAMIC_LIBASS
diff --git a/src/internal/subtitle/kitsubtitle.c b/src/internal/subtitle/kitsubtitle.c
new file mode 100644
index 0000000..38d4768
--- /dev/null
+++ b/src/internal/subtitle/kitsubtitle.c
@@ -0,0 +1,254 @@
+#include <assert.h>
+
+#include <SDL2/SDL.h>
+#include <libavformat/avformat.h>
+
+#include "kitchensink/internal/utils/kitlog.h"
+
+#include "kitchensink/kiterror.h"
+#include "kitchensink/kitlib.h"
+#include "kitchensink/internal/utils/kitlog.h"
+#include "kitchensink/internal/kitlibstate.h"
+#include "kitchensink/internal/subtitle/kitsubtitlepacket.h"
+#include "kitchensink/internal/subtitle/kitsubtitle.h"
+#include "kitchensink/internal/subtitle/renderers/kitsubimage.h"
+#include "kitchensink/internal/subtitle/renderers/kitsubass.h"
+#include "kitchensink/internal/subtitle/renderers/kitsubrenderer.h"
+#include "kitchensink/internal/utils/kithelpers.h"
+
+
+#define KIT_SUBTITLE_OUT_SIZE 1024
+
+typedef struct Kit_SubtitleDecoder {
+ Kit_SubtitleFormat *format;
+ Kit_SubtitleRenderer *renderer;
+ AVSubtitle scratch_frame;
+ int w;
+ int h;
+ int output_state;
+ SDL_Surface *tmp_buffer;
+} Kit_SubtitleDecoder;
+
+
+static void free_out_subtitle_packet_cb(void *packet) {
+ Kit_FreeSubtitlePacket((Kit_SubtitlePacket*)packet);
+}
+
+static int dec_decode_subtitle_cb(Kit_Decoder *dec, AVPacket *in_packet) {
+ assert(dec != NULL);
+ assert(in_packet != NULL);
+
+ Kit_SubtitleDecoder *subtitle_dec = dec->userdata;
+ int frame_finished;
+ int len;
+
+ if(in_packet->size > 0) {
+ len = avcodec_decode_subtitle2(dec->codec_ctx, &subtitle_dec->scratch_frame, &frame_finished, in_packet);
+ if(len < 0) {
+ return 1;
+ }
+
+ if(frame_finished) {
+ // Start and end presentation timestamps for subtitle frame
+ double pts = 0;
+ if(in_packet->pts != AV_NOPTS_VALUE) {
+ pts = in_packet->pts;
+ pts *= av_q2d(dec->format_ctx->streams[dec->stream_index]->time_base);
+ }
+ double start = pts + (subtitle_dec->scratch_frame.start_display_time / 1000.0f);
+ double end = -1;
+ if(subtitle_dec->scratch_frame.end_display_time < UINT_MAX) {
+ end = pts + (subtitle_dec->scratch_frame.end_display_time / 1000.0f);
+ }
+
+ // Create a packet. This should be filled by renderer.
+ Kit_SubtitlePacket *out_packet = Kit_RunSubtitleRenderer(
+ subtitle_dec->renderer, &subtitle_dec->scratch_frame, start, end);
+ if(end < 0) {
+ Kit_ClearDecoderOutput(dec);
+ }
+ if(out_packet != NULL) {
+ Kit_WriteDecoderOutput(dec, out_packet);
+ }
+
+ // Free subtitle since it has now been handled
+ avsubtitle_free(&subtitle_dec->scratch_frame);
+ }
+ }
+
+ return 1;
+}
+
+static void dec_close_subtitle_cb(Kit_Decoder *dec) {
+ if(dec == NULL) return;
+ Kit_SubtitleDecoder *subtitle_dec = dec->userdata;
+ SDL_FreeSurface(subtitle_dec->tmp_buffer);
+ Kit_CloseSubtitleRenderer(subtitle_dec->renderer);
+ free(subtitle_dec);
+}
+
+Kit_Decoder* Kit_CreateSubtitleDecoder(const Kit_Source *src, Kit_SubtitleFormat *format, int w, int h) {
+ assert(src != NULL);
+ assert(format != NULL);
+ if(src->subtitle_stream_index < 0) {
+ return NULL;
+ }
+
+ Kit_LibraryState *library_state = Kit_GetLibraryState();
+
+ // First the generic decoder component
+ Kit_Decoder *dec = Kit_CreateDecoder(
+ src, src->subtitle_stream_index,
+ KIT_SUBTITLE_OUT_SIZE,
+ free_out_subtitle_packet_cb);
+ if(dec == NULL) {
+ goto exit_0;
+ }
+
+ // Set format. Note that is_enabled may be changed below ...
+ format->is_enabled = true;
+ format->stream_index = src->subtitle_stream_index;
+ format->format = SDL_PIXELFORMAT_RGBA32; // Always this
+
+ // ... then allocate the subtitle decoder
+ Kit_SubtitleDecoder *subtitle_dec = calloc(1, sizeof(Kit_SubtitleDecoder));
+ if(subtitle_dec == NULL) {
+ goto exit_1;
+ }
+
+ // For subtitles, we need a renderer for the stream. Pick one based on codec ID.
+ Kit_SubtitleRenderer *ren = NULL;
+ switch(dec->codec_ctx->codec_id) {
+ case AV_CODEC_ID_TEXT:
+ case AV_CODEC_ID_HDMV_TEXT_SUBTITLE:
+ case AV_CODEC_ID_SRT:
+ case AV_CODEC_ID_SUBRIP:
+ case AV_CODEC_ID_SSA:
+ case AV_CODEC_ID_ASS:
+ if(library_state->init_flags & KIT_INIT_ASS) {
+ ren = Kit_CreateASSSubtitleRenderer(dec, w, h);
+ } else {
+ format->is_enabled = false;
+ }
+ break;
+ case AV_CODEC_ID_DVD_SUBTITLE:
+ case AV_CODEC_ID_DVB_SUBTITLE:
+ case AV_CODEC_ID_HDMV_PGS_SUBTITLE:
+ case AV_CODEC_ID_XSUB:
+ ren = Kit_CreateImageSubtitleRenderer(dec, w, h);
+ break;
+ default:
+ format->is_enabled = false;
+ break;
+ }
+ if(ren == NULL) {
+ Kit_SetError("Unrecognized subtitle format");
+ goto exit_2;
+ }
+
+ // Allocate temporary screen-sized subtitle buffer
+ SDL_Surface *tmp_buffer = SDL_CreateRGBSurfaceWithFormat(0, w, h, 32, SDL_PIXELFORMAT_RGBA32);
+ if(tmp_buffer == NULL) {
+ Kit_SetError("Unable to allocate subtitle buffer");
+ goto exit_3;
+ }
+ SDL_FillRect(tmp_buffer, NULL, 0);
+
+ // Set callbacks and userdata, and we're go
+ subtitle_dec->format = format;
+ subtitle_dec->renderer = ren;
+ subtitle_dec->w = w;
+ subtitle_dec->h = h;
+ subtitle_dec->tmp_buffer = tmp_buffer;
+ subtitle_dec->output_state = 0;
+ dec->dec_decode = dec_decode_subtitle_cb;
+ dec->dec_close = dec_close_subtitle_cb;
+ dec->userdata = subtitle_dec;
+ return dec;
+
+exit_3:
+ Kit_CloseSubtitleRenderer(ren);
+exit_2:
+ free(subtitle_dec);
+exit_1:
+ Kit_CloseDecoder(dec);
+exit_0:
+ return NULL;
+}
+
+
+typedef struct {
+ double sync_ts;
+ SDL_Surface *surface;
+ int rendered;
+} tmp_sub_image;
+
+
+static void _merge_subtitle_texture(void *ptr, void *userdata) {
+ tmp_sub_image *img = userdata;
+ Kit_SubtitlePacket *packet = ptr;
+
+ // Make sure current time is within presentation range
+ if(packet->pts_start >= img->sync_ts)
+ return;
+ if(packet->pts_end <= img->sync_ts && packet->pts_end >= 0)
+ return;
+
+ // Tell the renderer function that we did something here
+ img->rendered = 1;
+
+ // Blit source whole source surface to target surface in requested coords
+ SDL_Rect dst_rect;
+ dst_rect.x = packet->x;
+ dst_rect.y = packet->y;
+ SDL_BlitSurface(packet->surface, NULL, img->surface, &dst_rect);
+}
+
+
+int Kit_GetSubtitleDecoderDataTexture(Kit_Decoder *dec, SDL_Texture *texture) {
+ assert(dec != NULL);
+ assert(texture != NULL);
+
+ Kit_SubtitleDecoder *subtitle_dec = dec->userdata;
+
+ double sync_ts = _GetSystemTime() - dec->clock_sync;
+
+ // If we rendered on last frame, clear the buffer
+ if(subtitle_dec->output_state == 1) {
+ SDL_FillRect(subtitle_dec->tmp_buffer, NULL, 0);
+ }
+
+ // Clear out old packets
+ Kit_SubtitlePacket *packet = NULL;
+ while((packet = Kit_PeekDecoderOutput(dec)) != NULL) {
+ if(packet->pts_end >= sync_ts)
+ break;
+ if(packet->pts_end < 0)
+ break;
+ Kit_AdvanceDecoderOutput(dec);
+ free_out_subtitle_packet_cb(packet);
+ }
+
+ // Blit all subtitle image rectangles to master buffer
+ tmp_sub_image img;
+ img.sync_ts = sync_ts;
+ img.surface = subtitle_dec->tmp_buffer;
+ img.rendered = 0;
+ Kit_ForEachDecoderOutput(dec, _merge_subtitle_texture, (void*)&img);
+
+ // If nothing was rendered now or last frame, just return. No need to update texture.
+ dec->clock_pos = sync_ts;
+ if(img.rendered == 0 && subtitle_dec->output_state == 0) {
+ return 1;
+ }
+ subtitle_dec->output_state = img.rendered;
+
+ // Update output texture with current buffered image
+ SDL_UpdateTexture(
+ texture, NULL,
+ subtitle_dec->tmp_buffer->pixels,
+ subtitle_dec->tmp_buffer->pitch);
+
+ // all done!
+ return 0;
+}
diff --git a/src/internal/subtitle/kitsubtitlepacket.c b/src/internal/subtitle/kitsubtitlepacket.c
new file mode 100644
index 0000000..ff52d39
--- /dev/null
+++ b/src/internal/subtitle/kitsubtitlepacket.c
@@ -0,0 +1,19 @@
+#include "kitchensink/internal/subtitle/kitsubtitlepacket.h"
+
+
+Kit_SubtitlePacket* Kit_CreateSubtitlePacket(
+ double pts_start, double pts_end, int pos_x, int pos_y, SDL_Surface *surface)
+{
+ Kit_SubtitlePacket *p = calloc(1, sizeof(Kit_SubtitlePacket));
+ p->pts_start = pts_start;
+ p->pts_end = pts_end;
+ p->x = pos_x;
+ p->y = pos_y;
+ p->surface = surface;
+ return p;
+}
+
+void Kit_FreeSubtitlePacket(Kit_SubtitlePacket *p) {
+ SDL_FreeSurface(p->surface);
+ free(p);
+}
diff --git a/src/internal/subtitle/renderers/kitsubass.c b/src/internal/subtitle/renderers/kitsubass.c
new file mode 100644
index 0000000..2a9eedf
--- /dev/null
+++ b/src/internal/subtitle/renderers/kitsubass.c
@@ -0,0 +1,217 @@
+#include <assert.h>
+#include <stdlib.h>
+
+#include <SDL2/SDL_surface.h>
+
+#include "kitchensink/kiterror.h"
+#include "kitchensink/internal/utils/kitlog.h"
+#include "kitchensink/internal/kitlibstate.h"
+#include "kitchensink/internal/subtitle/kitsubtitlepacket.h"
+#include "kitchensink/internal/utils/kithelpers.h"
+#include "kitchensink/internal/subtitle/renderers/kitsubass.h"
+
+// For compatibility
+#ifndef ASS_FONTPROVIDER_AUTODETECT
+#define ASS_FONTPROVIDER_AUTODETECT 1
+#endif
+
+typedef struct Kit_ASSSubtitleRenderer {
+ ASS_Renderer *renderer;
+ ASS_Track *track;
+} Kit_ASSSubtitleRenderer;
+
+static void _ProcessAssImage(SDL_Surface *surface, const ASS_Image *img, int min_x, int min_y) {
+ unsigned char r = ((img->color) >> 24) & 0xFF;
+ unsigned char g = ((img->color) >> 16) & 0xFF;
+ unsigned char b = ((img->color) >> 8) & 0xFF;
+ unsigned char a = (img->color) & 0xFF;
+ unsigned char *src = img->bitmap;
+ unsigned char *dst = surface->pixels;
+ unsigned int pos_x = img->dst_x - min_x;
+ unsigned int pos_y = img->dst_y - min_y;
+ unsigned int an, ao, x, y, x_off;
+ dst += pos_y * surface->pitch;
+
+ for(y = 0; y < img->h; y++) {
+ for(x = 0; x < img->w; x++) {
+ x_off = (pos_x + x) * 4;
+ an = ((255 - a) * src[x]) >> 8; // New alpha
+ ao = dst[x_off + 3]; // Original alpha
+ if(ao == 0) {
+ dst[x_off + 0] = r;
+ dst[x_off + 1] = g;
+ dst[x_off + 2] = b;
+ dst[x_off + 3] = an;
+ } else {
+ dst[x_off + 3] = 255 - (255 - dst[x_off + 3]) * (255 - an) / 255;
+ if(dst[x_off + 3] != 0) {
+ dst[x_off + 0] = (dst[x_off + 0] * ao * (255-an) / 255 + r * an ) / dst[x_off + 3];
+ dst[x_off + 1] = (dst[x_off + 1] * ao * (255-an) / 255 + g * an ) / dst[x_off + 3];
+ dst[x_off + 2] = (dst[x_off + 2] * ao * (255-an) / 255 + b * an ) / dst[x_off + 3];
+ }
+ }
+ }
+ src += img->stride;
+ dst += surface->pitch;
+ }
+}
+
+static Kit_SubtitlePacket* ren_render_ass_cb(Kit_SubtitleRenderer *ren, void *src, double start_pts, double end_pts) {
+ assert(ren != NULL);
+ assert(src != NULL);
+
+ Kit_ASSSubtitleRenderer *ass_ren = ren->userdata;
+ AVSubtitle *sub = src;
+ SDL_Surface *surface = NULL;
+ ASS_Image *image = NULL;
+ ASS_Image *wt_image = NULL;
+ unsigned int now = start_pts * 1000;
+ int change = 0;
+ int x0 = INT_MAX, y0 = INT_MAX;
+ int x1 = 0, y1 = 0;
+ int w, h;
+
+ // Read incoming subtitle packets to libASS
+ for(int r = 0; r < sub->num_rects; r++) {
+ if(sub->rects[r]->ass == NULL)
+ continue;
+ ass_process_data(ass_ren->track, sub->rects[r]->ass, strlen(sub->rects[r]->ass));
+ }
+
+ // Ask libass to render frames. If there are no changes since last render, stop here.
+ wt_image = image = ass_render_frame(ass_ren->renderer, ass_ren->track, now, &change);
+ if(change == 0) {
+ return NULL;
+ }
+
+ // Find dimensions
+ for(image = wt_image; image; image = image->next) {
+ if(image->dst_x < x0)
+ x0 = image->dst_x;
+ if(image->dst_y < y0)
+ y0 = image->dst_y;
+ if(image->dst_x + image->w > x1)
+ x1 = image->dst_x + image->w;
+ if(image->dst_y + image->h > y1)
+ y1 = image->dst_y + image->h;
+ }
+ w = x1 - x0;
+ h = y1 - y0;
+
+ // Surface to render on
+ surface = SDL_CreateRGBSurfaceWithFormat(0, w, h, 32, SDL_PIXELFORMAT_RGBA32);
+ SDL_SetSurfaceBlendMode(surface, SDL_BLENDMODE_BLEND);
+ SDL_FillRect(surface, NULL, 0);
+
+ // Render subimages to the target surface.
+ for(image = wt_image; image; image = image->next) {
+ if(image->w == 0 || image->h == 0)
+ continue;
+ _ProcessAssImage(surface, image, x0, y0);
+ }
+
+ // We tell subtitle handler to clear output before adding this frame.
+ return Kit_CreateSubtitlePacket(start_pts, end_pts, x0, y0, surface);
+}
+
+static void ren_close_ass_cb(Kit_SubtitleRenderer *ren) {
+ if(ren == NULL) return;
+
+ Kit_ASSSubtitleRenderer *ass_ren = ren->userdata;
+ ass_renderer_done(ass_ren->renderer);
+ free(ass_ren);
+}
+
+Kit_SubtitleRenderer* Kit_CreateASSSubtitleRenderer(const Kit_Decoder *dec, int w, int h) {
+ assert(dec != NULL);
+ assert(w >= 0);
+ assert(h >= 0);
+
+ // Make sure that libass library has been initialized + get handle
+ Kit_LibraryState *state = Kit_GetLibraryState();
+ if(state->libass_handle == NULL) {
+ Kit_SetError("Libass library has not been initialized");
+ return NULL;
+ }
+
+ // First allocate the generic decoder component
+ Kit_SubtitleRenderer *ren = Kit_CreateSubtitleRenderer();
+ if(ren == NULL) {
+ goto exit_0;
+ }
+
+ // Next, allocate ASS subtitle renderer context.
+ Kit_ASSSubtitleRenderer *ass_ren = calloc(1, sizeof(Kit_ASSSubtitleRenderer));
+ if(ass_ren == NULL) {
+ goto exit_1;
+ }
+
+ // Initialize libass renderer
+ ASS_Renderer *ass_renderer = ass_renderer_init(state->libass_handle);
+ if(ass_renderer == NULL) {
+ Kit_SetError("Unable to initialize libass renderer");
+ goto exit_2;
+ }
+
+ // Read fonts from attachment streams and give them to libass
+ for(int j = 0; j < dec->format_ctx->nb_streams; j++) {
+ AVStream *st = dec->format_ctx->streams[j];
+ if(st->codec->codec_type == AVMEDIA_TYPE_ATTACHMENT && attachment_is_font(st)) {
+ const AVDictionaryEntry *tag = av_dict_get(
+ st->metadata,
+ "filename",
+ NULL,
+ AV_DICT_MATCH_CASE);
+ if(tag) {
+ ass_add_font(
+ state->libass_handle,
+ tag->value,
+ (char*)st->codec->extradata,
+ st->codec->extradata_size);
+ }
+ }
+ }
+
+ // Init libass fonts and window frame size
+ ass_set_fonts(
+ ass_renderer,
+ NULL, "sans-serif",
+ ASS_FONTPROVIDER_AUTODETECT,
+ NULL, 1);
+ ass_set_frame_size(ass_renderer, w, h);
+ ass_set_hinting(ass_renderer, ASS_HINTING_NONE);
+
+ // Initialize libass track
+ ASS_Track *ass_track = ass_new_track(state->libass_handle);
+ if(ass_track == NULL) {
+ Kit_SetError("Unable to initialize libass track");
+ goto exit_3;
+ }
+
+ // Set up libass track headers (ffmpeg provides these)
+ if(dec->codec_ctx->subtitle_header) {
+ ass_process_codec_private(
+ ass_track,
+ (char*)dec->codec_ctx->subtitle_header,
+ dec->codec_ctx->subtitle_header_size);
+ }
+
+ LOG("kekekekee\n");
+
+ // Set callbacks and userdata, and we're go
+ ass_ren->renderer = ass_renderer;
+ ass_ren->track = ass_track;
+ ren->ren_render = ren_render_ass_cb;
+ ren->ren_close = ren_close_ass_cb;
+ ren->userdata = ass_ren;
+ return ren;
+
+exit_3:
+ ass_renderer_done(ass_renderer);
+exit_2:
+ free(ass_ren);
+exit_1:
+ Kit_CloseSubtitleRenderer(ren);
+exit_0:
+ return NULL;
+}
diff --git a/src/internal/subtitle/renderers/kitsubimage.c b/src/internal/subtitle/renderers/kitsubimage.c
new file mode 100644
index 0000000..4cd5b52
--- /dev/null
+++ b/src/internal/subtitle/renderers/kitsubimage.c
@@ -0,0 +1,93 @@
+#include <assert.h>
+#include <stdlib.h>
+
+#include <SDL2/SDL_surface.h>
+
+#include "kitchensink/kiterror.h"
+#include "kitchensink/internal/utils/kitlog.h"
+#include "kitchensink/internal/subtitle/kitsubtitlepacket.h"
+#include "kitchensink/internal/subtitle/renderers/kitsubimage.h"
+
+
+static void _ProcessSubImage(SDL_Surface *surface, const AVSubtitleRect *rect, int min_x, int min_y) {
+ SDL_Surface *src = SDL_CreateRGBSurfaceWithFormatFrom(
+ rect->data[0], rect->w, rect->h, 8, rect->linesize[0], SDL_PIXELFORMAT_INDEX8);
+ SDL_SetPaletteColors(src->format->palette, (SDL_Color*)rect->data[1], 0, 256);
+
+ SDL_Rect dst_rect;
+ dst_rect.x = rect->x - min_x;
+ dst_rect.y = rect->y - min_y;
+
+ SDL_BlitSurface(src, NULL, surface, &dst_rect);
+}
+
+static Kit_SubtitlePacket* ren_render_image_cb(Kit_SubtitleRenderer *ren, void *src, double start_pts, double end_pts) {
+ assert(ren != NULL);
+ assert(src != NULL);
+
+ AVSubtitle *sub = src;
+ SDL_Surface *surface = NULL;
+ int x0 = INT_MAX, y0 = INT_MAX;
+ int x1 = 0, y1 = 0;
+ int w, h;
+ int has_content = 0;
+
+ // Find sizes of incoming subtitle bitmaps
+ for(int n = 0; n < sub->num_rects; n++) {
+ AVSubtitleRect *r = sub->rects[n];
+ if(r->type != SUBTITLE_BITMAP)
+ continue;
+ has_content = 1;
+ if(r->x < x0)
+ x0 = r->x;
+ if(r->y < y0)
+ y0 = r->y;
+ if(r->x + r->w > x1)
+ x1 = r->x + r->w;
+ if(r->y + r->h > y1)
+ y1 = r->y + r->h;
+ }
+
+ if(has_content == 0) {
+ return NULL;
+ }
+
+ w = x1 - x0;
+ h = y1 - y0;
+ LOG("x, y = %d, %d w, h = %d, %d\n", x0, y0, w, h);
+
+ // Surface to render on
+ surface = SDL_CreateRGBSurfaceWithFormat(0, w, h, 32, SDL_PIXELFORMAT_RGBA32);
+ SDL_SetSurfaceBlendMode(surface, SDL_BLENDMODE_BLEND);
+ SDL_FillRect(surface, NULL, 0);
+
+ // Render subimages to the target surface.
+ for(int n = 0; n < sub->num_rects; n++) {
+ AVSubtitleRect *r = sub->rects[n];
+ if(r->type != SUBTITLE_BITMAP)
+ continue;
+ _ProcessSubImage(surface, r, x0, y0);
+ }
+
+ LOG("Setting %f, %f\n", start_pts, end_pts);
+ return Kit_CreateSubtitlePacket(start_pts, end_pts, x0, y0, surface);
+}
+
+
+Kit_SubtitleRenderer* Kit_CreateImageSubtitleRenderer(const Kit_Decoder *dec, int w, int h) {
+ assert(dec != NULL);
+ assert(w >= 0);
+ assert(h >= 0);
+
+ // Allocate a new renderer
+ Kit_SubtitleRenderer *ren = Kit_CreateSubtitleRenderer();
+ if(ren == NULL) {
+ return NULL;
+ }
+
+ // Only renderer required, no other data.
+ ren->ren_render = ren_render_image_cb;
+ ren->ren_close = NULL;
+ ren->userdata = NULL;
+ return ren;
+}
diff --git a/src/internal/subtitle/renderers/kitsubrenderer.c b/src/internal/subtitle/renderers/kitsubrenderer.c
new file mode 100644
index 0000000..ce3e408
--- /dev/null
+++ b/src/internal/subtitle/renderers/kitsubrenderer.c
@@ -0,0 +1,33 @@
+#include <stdlib.h>
+#include <assert.h>
+
+#include "kitchensink/kiterror.h"
+#include "kitchensink/internal/subtitle/kitsubtitlepacket.h"
+#include "kitchensink/internal/subtitle/renderers/kitsubrenderer.h"
+
+
+Kit_SubtitleRenderer* Kit_CreateSubtitleRenderer() {
+ // Allocate renderer and make sure allocation was a success
+ Kit_SubtitleRenderer *ren = calloc(1, sizeof(Kit_SubtitleRenderer));
+ if(ren == NULL) {
+ Kit_SetError("Unable to allocate kit subtitle renderer");
+ return NULL;
+ }
+ return ren;
+}
+
+Kit_SubtitlePacket* Kit_RunSubtitleRenderer(Kit_SubtitleRenderer *ren, void *src, double start_pts, double end_pts) {
+ if(ren == NULL)
+ return NULL;
+ if(ren->ren_render != NULL)
+ return ren->ren_render(ren, src, start_pts, end_pts);
+ return NULL;
+}
+
+void Kit_CloseSubtitleRenderer(Kit_SubtitleRenderer *ren) {
+ if(ren == NULL)
+ return;
+ if(ren->ren_close != NULL)
+ ren->ren_close(ren);
+ free(ren);
+}
diff --git a/src/internal/utils/kitbuffer.c b/src/internal/utils/kitbuffer.c
new file mode 100644
index 0000000..0133154
--- /dev/null
+++ b/src/internal/utils/kitbuffer.c
@@ -0,0 +1,100 @@
+#include <stdlib.h>
+#include <assert.h>
+
+#include "kitchensink/internal/utils/kitbuffer.h"
+
+Kit_Buffer* Kit_CreateBuffer(unsigned int size, Kit_BufferFreeCallback free_cb) {
+ Kit_Buffer *b = calloc(1, sizeof(Kit_Buffer));
+ if(b == NULL) {
+ return NULL;
+ }
+ b->size = size;
+ b->free_cb = free_cb;
+ b->data = calloc(size, sizeof(void*));
+ if(b->data == NULL) {
+ free(b);
+ return NULL;
+ }
+ return b;
+}
+
+void Kit_DestroyBuffer(Kit_Buffer *buffer) {
+ if(buffer == NULL) return;
+ Kit_ClearBuffer(buffer);
+ free(buffer->data);
+ free(buffer);
+}
+
+void Kit_ClearBuffer(Kit_Buffer *buffer) {
+ void *data;
+ if(buffer->free_cb == NULL)
+ return;
+ while((data = Kit_ReadBuffer(buffer)) != NULL) {
+ buffer->free_cb(data);
+ }
+}
+
+void* Kit_ReadBuffer(Kit_Buffer *buffer) {
+ assert(buffer != NULL);
+ if(buffer->read_p < buffer->write_p) {
+ void *out = buffer->data[buffer->read_p % buffer->size];
+ buffer->data[buffer->read_p % buffer->size] = NULL;
+ buffer->read_p++;
+ if(buffer->read_p >= buffer->size) {
+ buffer->read_p = buffer->read_p % buffer->size;
+ buffer->write_p = buffer->write_p % buffer->size;
+ }
+ return out;
+ }
+ return NULL;
+}
+
+void* Kit_PeekBuffer(const Kit_Buffer *buffer) {
+ assert(buffer != NULL);
+ if(buffer->read_p < buffer->write_p) {
+ return buffer->data[buffer->read_p % buffer->size];
+ }
+ return NULL;
+}
+
+void Kit_AdvanceBuffer(Kit_Buffer *buffer) {
+ assert(buffer != NULL);
+ if(buffer->read_p < buffer->write_p) {
+ buffer->data[buffer->read_p % buffer->size] = NULL;
+ buffer->read_p++;
+ if(buffer->read_p >= buffer->size) {
+ buffer->read_p = buffer->read_p % buffer->size;
+ buffer->write_p = buffer->write_p % buffer->size;
+ }
+ }
+}
+
+void Kit_ForEachItemInBuffer(const Kit_Buffer *buffer, Kit_ForEachItemCallback cb, void *userdata) {
+ unsigned int read_p = buffer->read_p;
+ unsigned int write_p = buffer->write_p;
+ while(read_p < write_p) {
+ cb(buffer->data[read_p++ % buffer->size], userdata);
+ if(read_p >= buffer->size) {
+ read_p = read_p % buffer->size;
+ write_p = write_p % buffer->size;
+ }
+ }
+}
+
+int Kit_WriteBuffer(Kit_Buffer *buffer, void *ptr) {
+ assert(buffer != NULL);
+ assert(ptr != NULL);
+
+ if(!Kit_IsBufferFull(buffer)) {
+ buffer->data[buffer->write_p % buffer->size] = ptr;
+ buffer->write_p++;
+ return 0;
+ }
+ return 1;
+}
+
+int Kit_IsBufferFull(const Kit_Buffer *buffer) {
+ int len = buffer->write_p - buffer->read_p;
+ int k = (len >= buffer->size);
+ return k;
+}
diff --git a/src/internal/utils/kithelpers.c b/src/internal/utils/kithelpers.c
new file mode 100644
index 0000000..c68f1c7
--- /dev/null
+++ b/src/internal/utils/kithelpers.c
@@ -0,0 +1,30 @@
+#include <libavutil/time.h>
+#include <libavutil/avstring.h>
+
+#include "kitchensink/internal/utils/kithelpers.h"
+
+static const char * const font_mime[] = {
+ "application/x-font-ttf",
+ "application/x-font-truetype",
+ "application/x-truetype-font",
+ "application/x-font-opentype",
+ "application/vnd.ms-opentype",
+ "application/font-sfnt",
+ NULL
+};
+
+double _GetSystemTime() {
+ return (double)av_gettime() / 1000000.0;
+}
+
+bool attachment_is_font(AVStream *stream) {
+ AVDictionaryEntry *tag = av_dict_get(stream->metadata, "mimetype", NULL, AV_DICT_MATCH_CASE);
+ if(tag) {
+ for(int n = 0; font_mime[n]; n++) {
+ if(av_strcasecmp(font_mime[n], tag->value) == 0) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
diff --git a/src/internal/utils/kitringbuffer.c b/src/internal/utils/kitringbuffer.c
new file mode 100644
index 0000000..70fd24f
--- /dev/null
+++ b/src/internal/utils/kitringbuffer.c
@@ -0,0 +1,172 @@
+/*
+ * Ringbuffer
+ *
+ * Copyright (c) 2017, Tuomas Virtanen
+ * license: MIT; see LICENSE for details.
+*/
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include "kitchensink/internal/utils/kitringbuffer.h"
+
+/**
+ * Creates a new ringbuffer with the given size.
+ * @param size Size for the new ringbuffer
+ * @return Ringbuffer handle
+ */
+Kit_RingBuffer* Kit_CreateRingBuffer(unsigned int size) {
+ Kit_RingBuffer *rb = calloc(1, sizeof(Kit_RingBuffer));
+ if(rb == NULL) return NULL;
+ rb->size = size;
+ rb->data = malloc(size);
+ if(rb->data == NULL) {
+ free(rb);
+ return NULL;
+ }
+ return rb;
+}
+
+/**
+ * Deletes the given ringbuffer.
+ * @param rb Ringbuffer to be deleted.
+ */
+void Kit_DestroyRingBuffer(Kit_RingBuffer* rb) {
+ if(rb == NULL) return;
+ free(rb->data);
+ free(rb);
+}
+
+/**
+ * Writes to the given ringbuffer. If given length is larger than the amount
+ * the ringbuffer can fit, only the data that fits will be written.
+ * @param rb Ringbuffer to write to.
+ * @param data Data to write
+ * @param len Data length
+ * @return Amount of data that was actually written.
+ */
+int Kit_WriteRingBuffer(Kit_RingBuffer *rb, const char* data, int len) {
+ int k;
+ len = (len > (rb->size - rb->len)) ? (rb->size - rb->len) : len;
+ if(rb->len < rb->size) {
+ if(len + rb->wpos > rb->size) {
+ k = (len + rb->wpos) % rb->size;
+ memcpy((rb->data + rb->wpos), data, len - k);
+ memcpy(rb->data, data+(len-k), k);
+ } else {
+ memcpy((rb->data + rb->wpos), data, len);
+ }
+ rb->len += len;
+ rb->wpos += len;
+ if(rb->wpos >= rb->size) {
+ rb->wpos = rb->wpos % rb->size;
+ }
+ return len;
+ }
+ return 0;
+}
+
+/**
+ * Reads data from ringbuffer. If ringbuffer has less data than was requested,
+ * only the available data will be read.
+ * @param rb Ringbuffer to read from.
+ * @param data Buffer to read into.
+ * @param len How much data do we want
+ * @return Amount of data that was actually read.
+ */
+int Kit_ReadRingBuffer(Kit_RingBuffer *rb, char* data, int len) {
+ int k;
+ len = (len > rb->len) ? rb->len : len;
+ if(rb->len > 0) {
+ if(len + rb->rpos > rb->size) {
+ k = (len + rb->rpos) % rb->size;
+ memcpy(data, (rb->data + rb->rpos), len-k);
+ memcpy(data+(len-k), (rb->data), k);
+ } else {
+ memcpy(data, (rb->data + rb->rpos), len);
+ }
+ rb->len -= len;
+ rb->rpos += len;
+ if(rb->rpos >= rb->size) {
+ rb->rpos = rb->rpos % rb->size;
+ }
+ return len;
+ }
+ return 0;
+}
+
+/**
+ * Peeks into the given ringbuffer. Technically same as rb_read, but does not advance
+ * the internal position pointer. In other words, you may peek as many times as you wish,
+ * and will always get the same data.
+ * @param rb Ringbuffer to peek into.
+ * @param data Buffer to read into
+ * @param len How much data do we need
+ * @return Amount of data actually read
+ */
+int Kit_PeekRingBuffer(const Kit_RingBuffer *rb, char* data, int len) {
+ int k;
+ len = (len > rb->len) ? rb->len : len;
+ if(rb->len > 0) {
+ if(len + rb->rpos > rb->size) {
+ k = (len + rb->rpos) % rb->size;
+ memcpy(data, (rb->data + rb->rpos), len-k);
+ memcpy(data+(len-k), (rb->data), k);
+ } else {
+ memcpy(data, (rb->data + rb->rpos), len);
+ }
+ return len;
+ }
+ return 0;
+}
+
+/**
+ * Advances the internal position counter by given amount. Note that counter can only be
+ * advanced by the amount of unreadable data in ringbuffer.
+ * @param rb Ringbuffer to handle
+ * @param len How much should the position counter be increased
+ * @return How much the position counter was actually increased.
+ */
+int Kit_AdvanceRingBuffer(Kit_RingBuffer *rb, int len) {
+ len = (len > rb->len) ? rb->len : len;
+ if(rb->len > 0) {
+ rb->len -= len;
+ rb->rpos += len;
+ if(rb->rpos >= rb->size) {
+ rb->rpos = rb->rpos % rb->size;
+ }
+ return len;
+ }
+ return 0;
+}
+
+/**
+ * Returns the current length of the Ringbuffer. In other words, how much data
+ * the ringbuffer contains
+ * @param rb Ringbuffer to handle
+ * @return Data in ringbuffer (in bytes).
+ */
+int Kit_GetRingBufferLength(const Kit_RingBuffer *rb) {
+ return rb->len;
+}
+
+/**
+ * Returns the size of the ringbuffer. In other words, the maximum amount of data
+ * the ringbuffer can hold.
+ * @param rb Ringbuffer to handle
+ * @return Size of the ringbuffer
+ */
+int Kit_GetRingBufferSize(const Kit_RingBuffer *rb) {
+ return rb->size;
+}
+
+/**
+ * Returns the free size of the ringbuffer.
+ * @param rb Ringbuffer to handle
+ * @return Free size in the ringbuffer
+ */
+int Kit_GetRingBufferFree(const Kit_RingBuffer *rb) {
+ return rb->size - rb->len;
+}
diff --git a/src/internal/video/kitvideo.c b/src/internal/video/kitvideo.c
new file mode 100644
index 0000000..631ff35
--- /dev/null
+++ b/src/internal/video/kitvideo.c
@@ -0,0 +1,269 @@
+#include <assert.h>
+
+#include <libavformat/avformat.h>
+#include <libavutil/imgutils.h>
+#include <libswscale/swscale.h>
+
+#include "kitchensink/kiterror.h"
+#include "kitchensink/internal/kitdecoder.h"
+#include "kitchensink/internal/utils/kithelpers.h"
+#include "kitchensink/internal/utils/kitbuffer.h"
+#include "kitchensink/internal/video/kitvideo.h"
+#include "kitchensink/internal/utils/kitlog.h"
+
+#define KIT_VIDEO_OUT_SIZE 2
+#define KIT_VIDEO_SYNC_THRESHOLD 0.01
+
+typedef struct Kit_VideoDecoder {
+ Kit_VideoFormat *format;
+ struct SwsContext *sws;
+ AVFrame *scratch_frame;
+} Kit_VideoDecoder;
+
+typedef struct Kit_VideoPacket {
+ double pts;
+ AVFrame *frame;
+} Kit_VideoPacket;
+
+
+static Kit_VideoPacket* _CreateVideoPacket(AVFrame *frame, double pts) {
+ Kit_VideoPacket *p = calloc(1, sizeof(Kit_VideoPacket));
+ p->frame = frame;
+ p->pts = pts;
+ return p;
+}
+
+static unsigned int _FindPixelFormat(enum AVPixelFormat fmt) {
+ switch(fmt) {
+ case AV_PIX_FMT_YUV420P9:
+ case AV_PIX_FMT_YUV420P10:
+ case AV_PIX_FMT_YUV420P12:
+ case AV_PIX_FMT_YUV420P14:
+ case AV_PIX_FMT_YUV420P16:
+ case AV_PIX_FMT_YUV420P:
+ return SDL_PIXELFORMAT_YV12;
+ case AV_PIX_FMT_YUYV422:
+ return SDL_PIXELFORMAT_YUY2;
+ case AV_PIX_FMT_UYVY422:
+ return SDL_PIXELFORMAT_UYVY;
+ default:
+ return SDL_PIXELFORMAT_RGBA32;
+ }
+}
+
+static enum AVPixelFormat _FindAVPixelFormat(unsigned int fmt) {
+ switch(fmt) {
+ case SDL_PIXELFORMAT_IYUV: return AV_PIX_FMT_YUV420P;
+ case SDL_PIXELFORMAT_YV12: return AV_PIX_FMT_YUV420P;
+ case SDL_PIXELFORMAT_YUY2: return AV_PIX_FMT_YUYV422;
+ case SDL_PIXELFORMAT_UYVY: return AV_PIX_FMT_UYVY422;
+ case SDL_PIXELFORMAT_ARGB32: return AV_PIX_FMT_BGRA;
+ case SDL_PIXELFORMAT_RGBA32: return AV_PIX_FMT_RGBA;
+ default:
+ return AV_PIX_FMT_NONE;
+ }
+}
+
+static void free_out_video_packet_cb(void *packet) {
+ Kit_VideoPacket *p = packet;
+ av_freep(&p->frame->data[0]);
+ av_frame_free(&p->frame);
+ free(p);
+}
+
+static int dec_decode_video_cb(Kit_Decoder *dec, AVPacket *in_packet) {
+ assert(dec != NULL);
+ assert(in_packet != NULL);
+
+ Kit_VideoDecoder *video_dec = dec->userdata;
+ int frame_finished;
+
+
+ while(in_packet->size > 0) {
+ int len = avcodec_decode_video2(dec->codec_ctx, video_dec->scratch_frame, &frame_finished, in_packet);
+ if(len < 0) {
+ return 1;
+ }
+
+ if(frame_finished) {
+ // Target frame
+ AVFrame *out_frame = av_frame_alloc();
+ av_image_alloc(
+ out_frame->data,
+ out_frame->linesize,
+ dec->codec_ctx->width,
+ dec->codec_ctx->height,
+ _FindAVPixelFormat(video_dec->format->format),
+ 1);
+
+ // Scale from source format to target format, don't touch the size
+ sws_scale(
+ video_dec->sws,
+ (const unsigned char * const *)video_dec->scratch_frame->data,
+ video_dec->scratch_frame->linesize,
+ 0,
+ dec->codec_ctx->height,
+ out_frame->data,
+ out_frame->linesize);
+
+ // Get presentation timestamp
+ double pts = av_frame_get_best_effort_timestamp(video_dec->scratch_frame);
+ pts *= av_q2d(dec->format_ctx->streams[dec->stream_index]->time_base);
+
+ // Lock, write to audio buffer, unlock
+ Kit_VideoPacket *out_packet = _CreateVideoPacket(out_frame, pts);
+ Kit_WriteDecoderOutput(dec, out_packet);
+ }
+ in_packet->size -= len;
+ in_packet->data += len;
+ }
+
+
+ return 1;
+}
+
+static void dec_close_video_cb(Kit_Decoder *dec) {
+ if(dec == NULL) return;
+
+ Kit_VideoDecoder *video_dec = dec->userdata;
+ if(video_dec->scratch_frame != NULL) {
+ av_frame_free(&video_dec->scratch_frame);
+ }
+ if(video_dec->sws != NULL) {
+ sws_freeContext(video_dec->sws);
+ }
+ free(video_dec);
+}
+
+Kit_Decoder* Kit_CreateVideoDecoder(const Kit_Source *src, Kit_VideoFormat *format) {
+ assert(src != NULL);
+ assert(format != NULL);
+ if(src->video_stream_index < 0) {
+ return NULL;
+ }
+
+ // First the generic decoder component ...
+ Kit_Decoder *dec = Kit_CreateDecoder(
+ src, src->video_stream_index,
+ KIT_VIDEO_OUT_SIZE,
+ free_out_video_packet_cb);
+ if(dec == NULL) {
+ goto exit_0;
+ }
+
+ // Find formats
+ format->is_enabled = true;
+ format->width = dec->codec_ctx->width;
+ format->height = dec->codec_ctx->height;
+ format->stream_index = src->video_stream_index;
+ format->format = _FindPixelFormat(dec->codec_ctx->pix_fmt);
+
+ // ... then allocate the video decoder
+ Kit_VideoDecoder *video_dec = calloc(1, sizeof(Kit_VideoDecoder));
+ if(video_dec == NULL) {
+ goto exit_1;
+ }
+
+ // Create temporary video frame
+ video_dec->scratch_frame = av_frame_alloc();
+ if(video_dec->scratch_frame == NULL) {
+ Kit_SetError("Unable to initialize temporary video frame");
+ goto exit_2;
+ }
+
+ // Create scaler for handling format changes
+ video_dec->sws = sws_getContext(
+ dec->codec_ctx->width, // Source w
+ dec->codec_ctx->height, // Source h
+ dec->codec_ctx->pix_fmt, // Source fmt
+ dec->codec_ctx->width, // Target w
+ dec->codec_ctx->height, // Target h
+ _FindAVPixelFormat(format->format), // Target fmt
+ SWS_BILINEAR,
+ NULL, NULL, NULL);
+ if(video_dec->sws == NULL) {
+ Kit_SetError("Unable to initialize video converter context");
+ goto exit_3;
+ }
+
+ // Set callbacks and userdata, and we're go
+ video_dec->format = format;
+ dec->dec_decode = dec_decode_video_cb;
+ dec->dec_close = dec_close_video_cb;
+ dec->userdata = video_dec;
+ return dec;
+
+exit_3:
+ av_frame_free(&video_dec->scratch_frame);
+exit_2:
+ free(video_dec);
+exit_1:
+ Kit_CloseDecoder(dec);
+exit_0:
+ return NULL;
+}
+
+int Kit_GetVideoDecoderDataTexture(Kit_Decoder *dec, SDL_Texture *texture) {
+ assert(dec != NULL);
+ assert(texture != NULL);
+
+ Kit_VideoPacket *packet = Kit_PeekDecoderOutput(dec);
+ if(packet == NULL) {
+ return 0;
+ }
+
+ Kit_VideoDecoder *video_dec = dec->userdata;
+ double sync_ts = _GetSystemTime() - dec->clock_sync;
+
+ // Check if we want the packet
+ if(packet->pts > sync_ts + KIT_VIDEO_SYNC_THRESHOLD) {
+ // Video is ahead, don't show yet.
+ return 0;
+ } else if(packet->pts < sync_ts - KIT_VIDEO_SYNC_THRESHOLD) {
+ // Video is lagging, skip until we find a good PTS to continue from.
+ while(packet != NULL) {
+ Kit_AdvanceDecoderOutput(dec);
+ free_out_video_packet_cb(packet);
+ packet = Kit_PeekDecoderOutput(dec);
+ if(packet == NULL) {
+ break;
+ } else {
+ dec->clock_pos = packet->pts;
+ }
+ if(packet->pts > sync_ts - KIT_VIDEO_SYNC_THRESHOLD) {
+ break;
+ }
+ }
+ }
+
+ // If we have no viable packet, just skip
+ if(packet == NULL) {
+ return 0;
+ }
+
+ // Update output texture with current video data.
+ // Take formats into account.
+ switch(video_dec->format->format) {
+ case SDL_PIXELFORMAT_YV12:
+ case SDL_PIXELFORMAT_IYUV:
+ SDL_UpdateYUVTexture(
+ texture, NULL,
+ packet->frame->data[0], packet->frame->linesize[0],
+ packet->frame->data[1], packet->frame->linesize[1],
+ packet->frame->data[2], packet->frame->linesize[2]);
+ break;
+ default:
+ SDL_UpdateTexture(
+ texture, NULL,
+ packet->frame->data[0],
+ packet->frame->linesize[0]);
+ break;
+ }
+
+ // Advance buffer, and free the decoded frame.
+ Kit_AdvanceDecoderOutput(dec);
+ dec->clock_pos = packet->pts;
+ free_out_video_packet_cb(packet);
+
+ return 0;
+}