summaryrefslogtreecommitdiffstats
path: root/tinyDAV/src
diff options
context:
space:
mode:
Diffstat (limited to 'tinyDAV/src')
-rw-r--r--tinyDAV/src/audio/alsa/tdav_common_alsa.c275
-rw-r--r--tinyDAV/src/audio/alsa/tdav_consumer_alsa.c288
-rw-r--r--tinyDAV/src/audio/alsa/tdav_producer_alsa.c261
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_audiounit.c425
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_consumer_audioqueue.c268
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_consumer_audiounit.c447
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_producer_audioqueue.c253
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_producer_audiounit.c422
-rw-r--r--tinyDAV/src/audio/directsound/tdav_consumer_dsound.c458
-rw-r--r--tinyDAV/src/audio/directsound/tdav_producer_dsound.c402
-rw-r--r--tinyDAV/src/audio/oss/tdav_consumer_oss.c397
-rw-r--r--tinyDAV/src/audio/oss/tdav_producer_oss.c369
-rw-r--r--tinyDAV/src/audio/tdav_consumer_audio.c272
-rw-r--r--tinyDAV/src/audio/tdav_jitterbuffer.c1036
-rw-r--r--tinyDAV/src/audio/tdav_producer_audio.c133
-rw-r--r--tinyDAV/src/audio/tdav_session_audio.c991
-rw-r--r--tinyDAV/src/audio/tdav_speakup_jitterbuffer.c281
-rw-r--r--tinyDAV/src/audio/tdav_speex_denoise.c312
-rw-r--r--tinyDAV/src/audio/tdav_speex_jitterbuffer.c319
-rw-r--r--tinyDAV/src/audio/tdav_speex_resampler.c254
-rw-r--r--tinyDAV/src/audio/tdav_webrtc_denoise.c627
-rw-r--r--tinyDAV/src/audio/wasapi/tdav_consumer_wasapi.cxx676
-rw-r--r--tinyDAV/src/audio/wasapi/tdav_producer_wasapi.cxx681
-rw-r--r--tinyDAV/src/audio/waveapi/tdav_consumer_waveapi.c402
-rw-r--r--tinyDAV/src/audio/waveapi/tdav_producer_waveapi.c393
-rw-r--r--tinyDAV/src/bfcp/tdav_session_bfcp.c741
-rw-r--r--tinyDAV/src/codecs/amr/tdav_codec_amr.c816
-rw-r--r--tinyDAV/src/codecs/bfcp/tdav_codec_bfcp.c104
-rw-r--r--tinyDAV/src/codecs/bv/tdav_codec_bv16.c250
-rw-r--r--tinyDAV/src/codecs/bv/tdav_codec_bv32.c0
-rw-r--r--tinyDAV/src/codecs/dtmf/tdav_codec_dtmf.c126
-rw-r--r--tinyDAV/src/codecs/fec/tdav_codec_red.c263
-rw-r--r--tinyDAV/src/codecs/fec/tdav_codec_ulpfec.c424
-rw-r--r--tinyDAV/src/codecs/g711/g711.c295
-rw-r--r--tinyDAV/src/codecs/g711/tdav_codec_g711.c326
-rw-r--r--tinyDAV/src/codecs/g722/g722_decode.c400
-rw-r--r--tinyDAV/src/codecs/g722/g722_encode.c426
-rw-r--r--tinyDAV/src/codecs/g722/tdav_codec_g722.c219
-rw-r--r--tinyDAV/src/codecs/g729/tdav_codec_g729.c466
-rw-r--r--tinyDAV/src/codecs/gsm/tdav_codec_gsm.c209
-rw-r--r--tinyDAV/src/codecs/h261/tdav_codec_h261.c536
-rw-r--r--tinyDAV/src/codecs/h263/tdav_codec_h263.c1373
-rw-r--r--tinyDAV/src/codecs/h264/tdav_codec_h264.c993
-rw-r--r--tinyDAV/src/codecs/h264/tdav_codec_h264_cisco.cxx882
-rw-r--r--tinyDAV/src/codecs/h264/tdav_codec_h264_cuda.cxx1130
-rw-r--r--tinyDAV/src/codecs/h264/tdav_codec_h264_intel.cxx2221
-rw-r--r--tinyDAV/src/codecs/h264/tdav_codec_h264_rtp.c411
-rw-r--r--tinyDAV/src/codecs/ilbc/tdav_codec_ilbc.c265
-rw-r--r--tinyDAV/src/codecs/mp4ves/tdav_codec_mp4ves.c818
-rw-r--r--tinyDAV/src/codecs/msrp/tdav_codec_msrp.c106
-rw-r--r--tinyDAV/src/codecs/opus/tdav_codec_opus.c363
-rw-r--r--tinyDAV/src/codecs/speex/tdav_codec_speex.c286
-rw-r--r--tinyDAV/src/codecs/t140/tdav_codec_t140.c175
-rw-r--r--tinyDAV/src/codecs/theora/tdav_codec_theora.c862
-rw-r--r--tinyDAV/src/codecs/vpx/tdav_codec_vp8.c1059
-rw-r--r--tinyDAV/src/msrp/tdav_consumer_msrp.c0
-rw-r--r--tinyDAV/src/msrp/tdav_producer_msrp.c0
-rw-r--r--tinyDAV/src/msrp/tdav_session_msrp.c984
-rw-r--r--tinyDAV/src/t140/tdav_consumer_t140.c137
-rw-r--r--tinyDAV/src/t140/tdav_producer_t140.c139
-rw-r--r--tinyDAV/src/t140/tdav_session_t140.c1165
-rw-r--r--tinyDAV/src/tdav.c758
-rw-r--r--tinyDAV/src/tdav_apple.mm159
-rw-r--r--tinyDAV/src/tdav_session_av.c2474
-rw-r--r--tinyDAV/src/tdav_win32.c234
-rw-r--r--tinyDAV/src/video/directx/tdav_producer_screencast_d3d9.cxx185
-rw-r--r--tinyDAV/src/video/directx/tdav_producer_screencast_ddraw.cxx1542
-rw-r--r--tinyDAV/src/video/gdi/tdav_consumer_video_gdi.c544
-rw-r--r--tinyDAV/src/video/gdi/tdav_producer_screencast_gdi.c534
-rw-r--r--tinyDAV/src/video/jb/tdav_video_frame.c243
-rw-r--r--tinyDAV/src/video/jb/tdav_video_jb.c573
-rw-r--r--tinyDAV/src/video/mf/tdav_consumer_video_mf.cxx185
-rw-r--r--tinyDAV/src/video/mf/tdav_producer_video_mf.cxx855
-rw-r--r--tinyDAV/src/video/tdav_consumer_video.c207
-rw-r--r--tinyDAV/src/video/tdav_converter_video.cxx832
-rw-r--r--tinyDAV/src/video/tdav_runnable_video.c95
-rw-r--r--tinyDAV/src/video/tdav_session_video.c1649
-rw-r--r--tinyDAV/src/video/v4linux/tdav_producer_video_v4l2.c1164
-rw-r--r--tinyDAV/src/video/winm/tdav_consumer_winm.cxx219
-rw-r--r--tinyDAV/src/video/winm/tdav_producer_winm.cxx737
80 files changed, 42801 insertions, 0 deletions
diff --git a/tinyDAV/src/audio/alsa/tdav_common_alsa.c b/tinyDAV/src/audio/alsa/tdav_common_alsa.c
new file mode 100644
index 0000000..d1deec8
--- /dev/null
+++ b/tinyDAV/src/audio/alsa/tdav_common_alsa.c
@@ -0,0 +1,275 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/audio/alsa/tdav_common_alsa.h"
+
+#if HAVE_ALSA_ASOUNDLIB_H
+
+#define ALSA_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[ALSA Common] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[ALSA Common] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[ALSA Common] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[ALSA Common] " FMT, ##__VA_ARGS__)
+
+#define ALSA_PLAYBACK_PERIODS 6
+
+int tdav_common_alsa_init(tdav_common_alsa_t* p_self)
+{
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if (p_self->b_initialized) {
+ ALSA_DEBUG_WARN("Already initialized");
+ return 0;
+ }
+ tsk_safeobj_init(p_self);
+ p_self->b_initialized = tsk_true;
+ return 0;
+}
+
+int tdav_common_alsa_lock(tdav_common_alsa_t* p_self)
+{
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ return tsk_safeobj_lock(p_self);
+}
+
+int tdav_common_alsa_unlock(tdav_common_alsa_t* p_self)
+{
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ return tsk_safeobj_unlock(p_self);
+}
+
+int tdav_common_alsa_prepare(tdav_common_alsa_t* p_self, tsk_bool_t is_capture, int ptime, int channels, int sample_rate)
+{
+ int err = 0, val;
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(p_self);
+
+ if (p_self->b_prepared) {
+ ALSA_DEBUG_WARN("Already prepared");
+ goto bail;
+ }
+ if (!p_self->p_device_name) {
+ p_self->p_device_name = strdup("default");
+ }
+ p_self->b_capture = is_capture;
+
+ if ((err = snd_pcm_open(&p_self->p_handle, p_self->p_device_name, is_capture ? SND_PCM_STREAM_CAPTURE : SND_PCM_STREAM_PLAYBACK, /*SND_PCM_NONBLOCK | SND_PCM_ASYNC*/0)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to open audio device %s (%s)", p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+ ALSA_DEBUG_INFO("device('%s') opened", p_self->p_device_name);
+
+ if ((err = snd_pcm_hw_params_malloc(&p_self->p_params)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to allocate hardware parameter structure(%s)", snd_strerror(err));
+ goto bail;
+ }
+
+ if ((err = snd_pcm_hw_params_any(p_self->p_handle, p_self->p_params)) < 0) {
+ ALSA_DEBUG_ERROR("Failed to initialize hardware parameter structure (device=%s, err=%s)", p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ if ((err = snd_pcm_hw_params_set_access(p_self->p_handle, p_self->p_params, SND_PCM_ACCESS_RW_INTERLEAVED)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to set access type (device=%s, err=%s)", p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ if ((err = snd_pcm_hw_params_set_format(p_self->p_handle, p_self->p_params, SND_PCM_FORMAT_S16_LE)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to set sample format (device=%s, err=%s)", p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ val = sample_rate;
+ if ((err = snd_pcm_hw_params_set_rate_near(p_self->p_handle, p_self->p_params, &val, 0)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to set sample rate (rate=%d, device=%s, err=%s)", p_self->sample_rate, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+ ALSA_DEBUG_INFO("sample_rate: req=%d, resp=%d", sample_rate, val);
+ p_self->sample_rate = val;
+
+ val = channels;
+ if ((err = snd_pcm_hw_params_set_channels_near(p_self->p_handle, p_self->p_params, &val)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to set channels (channels=%d, device=%s, err=%s)", p_self->channels, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+ ALSA_DEBUG_INFO("channels: req=%d, resp=%d", channels, val);
+ p_self->channels = val;
+
+ if (!is_capture) {
+ unsigned int periods = ALSA_PLAYBACK_PERIODS;
+ snd_pcm_uframes_t periodSize = (ptime * p_self->sample_rate * p_self->channels) / 1000;
+ if ((err = snd_pcm_hw_params_set_periods_near(p_self->p_handle, p_self->p_params, &periods, 0)) != 0) {
+ ALSA_DEBUG_ERROR ("Failed to set periods (val=%u, device=%s, err=%s)", periods, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ snd_pcm_uframes_t bufferSize = (periodSize * periods);
+ if ((err = snd_pcm_hw_params_set_buffer_size(p_self->p_handle, p_self->p_params, bufferSize)) != 0) {
+ ALSA_DEBUG_ERROR ("Failed to set buffer size (val=%lu, device=%s, err=%s)", bufferSize, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+ ALSA_DEBUG_INFO("periods=%u, buffersize=%lu", periods, bufferSize);
+ }
+
+ if ((err = snd_pcm_hw_params (p_self->p_handle, p_self->p_params)) != 0) {
+ ALSA_DEBUG_ERROR ("Failed to set parameters (channels=%d, rate=%d, device=%s, err=%s)", p_self->channels, p_self->sample_rate, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+ if ((err = snd_pcm_prepare(p_self->p_handle)) != 0) {
+ ALSA_DEBUG_ERROR ("Failed to prepare device (channels=%d, rate=%d, device=%s, err=%s)", p_self->channels, p_self->sample_rate, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ /*if (is_capture)*/ {
+ p_self->n_buff_size_in_bytes = (ptime * p_self->sample_rate * (2/*SND_PCM_FORMAT_S16_LE*/ * p_self->channels)) / 1000;
+ if (!(p_self->p_buff_ptr = tsk_realloc(p_self->p_buff_ptr, p_self->n_buff_size_in_bytes))) {
+ ALSA_DEBUG_ERROR("Failed to allocate buffer with size = %u", p_self->n_buff_size_in_bytes);
+ err = -4;
+ goto bail;
+ }
+ p_self->n_buff_size_in_samples = (p_self->n_buff_size_in_bytes >> 1/*SND_PCM_FORMAT_S16_LE*/);
+ ALSA_DEBUG_INFO("n_buff_size_in_bytes=%u", p_self->n_buff_size_in_bytes);
+ }
+
+ ALSA_DEBUG_INFO("device('%s') prepared", p_self->p_device_name);
+
+ // everything is OK
+ p_self->b_prepared = tsk_true;
+bail:
+ if (err) {
+ tdav_common_alsa_unprepare(p_self);
+ }
+ tdav_common_alsa_unlock(p_self);
+ return err;
+
+}
+
+int tdav_common_alsa_unprepare(tdav_common_alsa_t* p_self)
+{
+ int err = 0;
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(p_self);
+
+ if (p_self->b_started) {
+ ALSA_DEBUG_ERROR("Must stop the capture device before unpreparing");
+ err = -2;
+ goto bail;
+ }
+
+ if (p_self->p_params) {
+ snd_pcm_hw_params_free(p_self->p_params);
+ p_self->p_params = tsk_null;
+ }
+ if (p_self->p_handle) {
+ snd_pcm_close(p_self->p_handle);
+ p_self->p_handle = tsk_null;
+ }
+ p_self->b_prepared = tsk_false;
+
+ ALSA_DEBUG_INFO("device('%s') unprepared", p_self->p_device_name);
+
+bail:
+ tdav_common_alsa_unlock(p_self);
+ return err;
+}
+
+int tdav_common_alsa_start(tdav_common_alsa_t* p_self)
+{
+ int err = 0;
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(p_self);
+
+ if (p_self->b_started) {
+ ALSA_DEBUG_WARN("Already started");
+ err = - 3;
+ goto bail;
+ }
+ if (!p_self->b_prepared) {
+ ALSA_DEBUG_ERROR("Not prepared");
+ err = -2;
+ goto bail;
+ }
+
+ if ((err = snd_pcm_start(p_self->p_handle)) != 0) {
+ ALSA_DEBUG_ERROR ("Failed to start device (channels=%d, rate=%d, device=%s, err=%s)", p_self->channels, p_self->sample_rate, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ p_self->b_started = tsk_true;
+ ALSA_DEBUG_INFO("device('%s') started", p_self->p_device_name);
+bail:
+ tdav_common_alsa_unlock(p_self);
+ return err;
+}
+
+int tdav_common_alsa_stop(tdav_common_alsa_t* p_self)
+{
+ int err = 0;
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(p_self);
+
+ if (p_self->b_started) {
+ p_self->b_started = tsk_false;
+ //err = snd_pcm_drain(p_self->p_handle);
+ ALSA_DEBUG_INFO("device('%s') stopped", p_self->p_device_name);
+ }
+ if (p_self->b_prepared) {
+ tdav_common_alsa_unprepare(p_self);
+ }
+bail:
+ tdav_common_alsa_unlock(p_self);
+ return err;
+}
+
+int tdav_common_alsa_deinit(tdav_common_alsa_t* p_self)
+{
+ if (p_self && p_self->b_initialized) {
+ tdav_common_alsa_stop(p_self);
+ tdav_common_alsa_unprepare(p_self);
+ TSK_FREE(p_self->p_device_name);
+ TSK_FREE(p_self->p_buff_ptr);
+ tsk_safeobj_deinit(p_self);
+ p_self->b_initialized = tsk_false;
+ }
+ return 0;
+}
+
+#endif /* HAVE_ALSA_ASOUNDLIB_H */
+
diff --git a/tinyDAV/src/audio/alsa/tdav_consumer_alsa.c b/tinyDAV/src/audio/alsa/tdav_consumer_alsa.c
new file mode 100644
index 0000000..65bfcd8
--- /dev/null
+++ b/tinyDAV/src/audio/alsa/tdav_consumer_alsa.c
@@ -0,0 +1,288 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/audio/alsa/tdav_consumer_alsa.h"
+
+#if HAVE_ALSA_ASOUNDLIB_H
+
+#include "tinydav/audio/alsa/tdav_common_alsa.h"
+
+#define ALSA_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[ALSA Consumer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[ALSA Consumer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[ALSA Consumer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[ALSA Consumer] " FMT, ##__VA_ARGS__)
+
+typedef struct tdav_consumer_alsa_s
+{
+ TDAV_DECLARE_CONSUMER_AUDIO;
+
+ tsk_bool_t b_muted;
+ tsk_bool_t b_started;
+ tsk_bool_t b_paused;
+
+ tsk_thread_handle_t* tid[1];
+
+ struct tdav_common_alsa_s alsa_common;
+}
+tdav_consumer_alsa_t;
+
+static void* TSK_STDCALL _tdav_producer_alsa_playback_thread(void *param)
+{
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)param;
+ int err;
+
+ ALSA_DEBUG_INFO("__playback_thread -- START");
+
+ tsk_thread_set_priority_2(TSK_THREAD_PRIORITY_TIME_CRITICAL);
+
+ while (p_alsa->b_started) {
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+ //snd_pcm_wait(p_alsa->alsa_common.p_handle, 20);
+ //ALSA_DEBUG_INFO ("get (%d)", p_alsa->alsa_common.n_buff_size_in_bytes);
+ err = tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(p_alsa), p_alsa->alsa_common.p_buff_ptr, p_alsa->alsa_common.n_buff_size_in_bytes); // requires 16bits, thread-safe
+ //ALSA_DEBUG_INFO ("get returned %d", err);
+ if (err < p_alsa->alsa_common.n_buff_size_in_bytes) {
+ memset(((uint8_t*)p_alsa->alsa_common.p_buff_ptr) + err, 0, (p_alsa->alsa_common.n_buff_size_in_bytes - err));
+
+ }
+ if ((err = snd_pcm_writei(p_alsa->alsa_common.p_handle, p_alsa->alsa_common.p_buff_ptr, p_alsa->alsa_common.n_buff_size_in_samples)) != p_alsa->alsa_common.n_buff_size_in_samples) {
+ if (err == -EPIPE) { // pipe broken
+ err = snd_pcm_recover(p_alsa->alsa_common.p_handle, err, 0);
+ if (err == 0) {
+ ALSA_DEBUG_INFO ("recovered");
+ goto next;
+ }
+ }
+ ALSA_DEBUG_ERROR ("Failed to read data from audio interface failed (%d->%s)", err, snd_strerror(err));
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ goto bail;
+ }
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(p_alsa));
+next:
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ }
+bail:
+ ALSA_DEBUG_INFO("__playback_thread -- STOP");
+ return tsk_null;
+}
+
+
+/* ============ Media Consumer Interface ================= */
+static int tdav_consumer_alsa_set(tmedia_consumer_t* self, const tmedia_param_t* param)
+{
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)self;
+ int ret = 0;
+
+ ret = tdav_consumer_audio_set(TDAV_CONSUMER_AUDIO(self), param);
+
+ return ret;
+}
+
+static int tdav_consumer_alsa_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)self;
+ int err = 0;
+ ALSA_DEBUG_INFO("******* tdav_consumer_alsa_prepare ******");
+
+ if (! p_alsa || !codec && codec->plugin) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+
+ // Set using requested
+ TMEDIA_CONSUMER(p_alsa)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(p_alsa)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(p_alsa)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+ // Prepare
+ err = tdav_common_alsa_prepare(&p_alsa->alsa_common, tsk_false/*is_record*/, TMEDIA_CONSUMER( p_alsa)->audio.ptime, TMEDIA_CONSUMER( p_alsa)->audio.in.channels, TMEDIA_CONSUMER( p_alsa)->audio.in.rate);
+ if (err) {
+ goto bail;
+ }
+
+ ALSA_DEBUG_INFO("prepared: req_channels=%d; req_rate=%d, resp_channels=%d; resp_rate=%d",
+ TMEDIA_CONSUMER(p_alsa)->audio.in.channels, TMEDIA_CONSUMER(p_alsa)->audio.in.rate,
+ p_alsa->alsa_common.channels, p_alsa->alsa_common.sample_rate);
+
+ // Set using supported (up to the resampler to convert to requested)
+ TMEDIA_CONSUMER(p_alsa)->audio.out.channels = p_alsa->alsa_common.channels;
+ TMEDIA_CONSUMER(p_alsa)->audio.out.rate = p_alsa->alsa_common.sample_rate;
+
+bail:
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ return err;
+}
+
+static int tdav_consumer_alsa_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)self;
+ int err = 0;
+
+ ALSA_DEBUG_INFO("******* tdav_consumer_alsa_start ******");
+
+ if (!p_alsa) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+
+ if (p_alsa->b_started) {
+ ALSA_DEBUG_WARN("Already started");
+ goto bail;
+ }
+
+ /* start device */
+ err = tdav_common_alsa_start(&p_alsa->alsa_common);
+ if (err) {
+ goto bail;
+ }
+
+ /* start thread */
+ p_alsa->b_started = tsk_true;
+ tsk_thread_create(&p_alsa->tid[0], _tdav_producer_alsa_playback_thread, p_alsa);
+
+ ALSA_DEBUG_INFO("started");
+
+bail:
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ return err;
+}
+
+static int tdav_consumer_alsa_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ int err = 0;
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)self;
+
+ if (!p_alsa || !buffer || !size) {
+ ALSA_DEBUG_ERROR("Invalid paramter");
+ return -1;
+ }
+
+ //tdav_common_alsa_lock(&p_alsa->alsa_common);
+
+ if (!p_alsa->b_started) {
+ ALSA_DEBUG_WARN("Not started");
+ err = -2;
+ goto bail;
+ }
+
+ if ((err = tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(p_alsa), buffer, size, proto_hdr))) {//thread-safe
+ ALSA_DEBUG_WARN("Failed to put audio data to the jitter buffer");
+ goto bail;
+ }
+
+bail:
+ //tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ return err;
+}
+
+static int tdav_consumer_alsa_pause(tmedia_consumer_t* self)
+{
+ return 0;
+}
+
+static int tdav_consumer_alsa_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)self;
+ int err;
+
+ if (!p_alsa) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* should be done here */
+ p_alsa->b_started = tsk_false;
+
+ err = tdav_common_alsa_stop(&p_alsa->alsa_common);
+
+ /* stop thread */
+ if (p_alsa->tid[0]) {
+ tsk_thread_join(&(p_alsa->tid[0]));
+ }
+
+ ALSA_DEBUG_INFO("stopped");
+
+ return 0;
+}
+
+
+//
+// ALSA consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_alsa_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_alsa_t *p_alsa = self;
+ if (p_alsa) {
+ ALSA_DEBUG_INFO("create");
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(p_alsa));
+ /* init self */
+ tdav_common_alsa_init(&p_alsa->alsa_common);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_alsa_dtor(tsk_object_t * self)
+{
+ tdav_consumer_alsa_t *p_alsa = self;
+ if (p_alsa) {
+ /* stop */
+ if (p_alsa->b_started) {
+ tdav_consumer_alsa_stop((tmedia_consumer_t*)p_alsa);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(p_alsa));
+ /* deinit self */
+ tdav_common_alsa_deinit(&p_alsa->alsa_common);
+
+ ALSA_DEBUG_INFO("*** destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_alsa_def_s =
+{
+ sizeof(tdav_consumer_alsa_t),
+ tdav_consumer_alsa_ctor,
+ tdav_consumer_alsa_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_alsa_plugin_def_s =
+{
+ &tdav_consumer_alsa_def_s,
+
+ tmedia_audio,
+ "Linux ALSA consumer",
+
+ tdav_consumer_alsa_set,
+ tdav_consumer_alsa_prepare,
+ tdav_consumer_alsa_start,
+ tdav_consumer_alsa_consume,
+ tdav_consumer_alsa_pause,
+ tdav_consumer_alsa_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_alsa_plugin_def_t = &tdav_consumer_alsa_plugin_def_s;
+
+#endif /* #if HAVE_ALSA_ASOUNDLIB_H */
diff --git a/tinyDAV/src/audio/alsa/tdav_producer_alsa.c b/tinyDAV/src/audio/alsa/tdav_producer_alsa.c
new file mode 100644
index 0000000..d5c4021
--- /dev/null
+++ b/tinyDAV/src/audio/alsa/tdav_producer_alsa.c
@@ -0,0 +1,261 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/audio/alsa/tdav_producer_alsa.h"
+
+#if HAVE_ALSA_ASOUNDLIB_H
+
+#include "tinydav/audio/alsa/tdav_common_alsa.h"
+
+#define ALSA_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[ALSA Producer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[ALSA Producer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[ALSA Producer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[ALSA Producer] " FMT, ##__VA_ARGS__)
+
+typedef struct tdav_producer_alsa_s
+{
+ TDAV_DECLARE_PRODUCER_AUDIO;
+
+ tsk_bool_t b_muted;
+ tsk_bool_t b_started;
+ tsk_bool_t b_paused;
+
+ tsk_thread_handle_t* tid[1];
+
+ struct tdav_common_alsa_s alsa_common;
+}
+tdav_producer_alsa_t;
+
+static void* TSK_STDCALL _tdav_producer_alsa_record_thread(void *param)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)param;
+ int err;
+
+ ALSA_DEBUG_INFO("__record_thread -- START");
+
+ tsk_thread_set_priority_2(TSK_THREAD_PRIORITY_TIME_CRITICAL);
+
+ while (p_alsa->b_started) {
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+ if ((err = snd_pcm_readi(p_alsa->alsa_common.p_handle, p_alsa->alsa_common.p_buff_ptr, p_alsa->alsa_common.n_buff_size_in_samples)) != p_alsa->alsa_common.n_buff_size_in_samples) {
+ ALSA_DEBUG_ERROR ("Failed to read data from audio interface failed (%d->%s)", err, snd_strerror(err));
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ goto bail;
+ }
+ if (!p_alsa->b_muted && TMEDIA_PRODUCER(p_alsa)->enc_cb.callback) {
+ TMEDIA_PRODUCER(p_alsa)->enc_cb.callback(TMEDIA_PRODUCER(p_alsa)->enc_cb.callback_data, p_alsa->alsa_common.p_buff_ptr, p_alsa->alsa_common.n_buff_size_in_bytes);
+ }
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ }
+bail:
+ ALSA_DEBUG_INFO("__record_thread -- STOP");
+ return tsk_null;
+}
+
+
+/* ============ Media Producer Interface ================= */
+static int tdav_producer_alsa_set(tmedia_producer_t* self, const tmedia_param_t* param)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)self;
+ if (param->plugin_type == tmedia_ppt_producer) {
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "volume")) {
+ return 0;
+ }
+ else if(tsk_striequals(param->key, "mute")){
+ p_alsa->b_muted = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+ return 0;
+ }
+ }
+ }
+ return tdav_producer_audio_set(TDAV_PRODUCER_AUDIO(self), param);
+}
+
+static int tdav_producer_alsa_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)self;
+ int err = 0;
+ ALSA_DEBUG_INFO("******* tdav_producer_alsa_prepare ******");
+
+ if (! p_alsa || !codec && codec->plugin) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+
+ // Set using requested
+ TMEDIA_PRODUCER( p_alsa)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER( p_alsa)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER( p_alsa)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+
+ // Prepare
+ err = tdav_common_alsa_prepare(&p_alsa->alsa_common, tsk_true/*is_capture*/, TMEDIA_PRODUCER( p_alsa)->audio.ptime, TMEDIA_PRODUCER( p_alsa)->audio.channels, TMEDIA_PRODUCER( p_alsa)->audio.rate);
+ if (err) {
+ goto bail;
+ }
+
+ ALSA_DEBUG_INFO("prepared: req_channels=%d; req_rate=%d, resp_channels=%d; resp_rate=%d",
+ TMEDIA_PRODUCER(p_alsa)->audio.channels, TMEDIA_PRODUCER(p_alsa)->audio.rate,
+ p_alsa->alsa_common.channels, p_alsa->alsa_common.sample_rate);
+
+ // Set using supported (up to the resampler to convert to requested)
+ TMEDIA_PRODUCER(p_alsa)->audio.channels = p_alsa->alsa_common.channels;
+ TMEDIA_PRODUCER(p_alsa)->audio.rate = p_alsa->alsa_common.sample_rate;
+
+bail:
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ return err;
+}
+
+static int tdav_producer_alsa_start(tmedia_producer_t* self)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)self;
+ int err = 0;
+
+ ALSA_DEBUG_INFO("******* tdav_producer_alsa_start ******");
+
+ if (!p_alsa) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+
+ if (p_alsa->b_started) {
+ ALSA_DEBUG_WARN("Already started");
+ goto bail;
+ }
+
+ /* start device */
+ err = tdav_common_alsa_start(&p_alsa->alsa_common);
+ if (err) {
+ goto bail;
+ }
+
+ /* start thread */
+ p_alsa->b_started = tsk_true;
+ tsk_thread_create(&p_alsa->tid[0], _tdav_producer_alsa_record_thread, p_alsa);
+
+ ALSA_DEBUG_INFO("started");
+
+bail:
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ return err;
+}
+
+static int tdav_producer_alsa_pause(tmedia_producer_t* self)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)self;
+
+ if (!p_alsa) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ ALSA_DEBUG_INFO("paused");
+
+ return 0;
+}
+
+static int tdav_producer_alsa_stop(tmedia_producer_t* self)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)self;
+ int err;
+
+ if (!p_alsa) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* should be done here */
+ p_alsa->b_started = tsk_false;
+
+ err = tdav_common_alsa_stop(&p_alsa->alsa_common);
+
+ /* stop thread */
+ if (p_alsa->tid[0]) {
+ tsk_thread_join(&(p_alsa->tid[0]));
+ }
+
+ ALSA_DEBUG_INFO("stopped");
+
+ return 0;
+}
+
+
+//
+// ALSA producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_alsa_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_alsa_t *p_alsa = (tdav_producer_alsa_t*)self;
+ if (p_alsa) {
+ ALSA_DEBUG_INFO("create");
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(p_alsa));
+ /* init self */
+ tdav_common_alsa_init(&p_alsa->alsa_common);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_alsa_dtor(tsk_object_t * self)
+{
+ tdav_producer_alsa_t *p_alsa = (tdav_producer_alsa_t *)self;
+ if (p_alsa) {
+ /* stop */
+ if (p_alsa->b_started) {
+ tdav_producer_alsa_stop((tmedia_producer_t*)p_alsa);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(p_alsa));
+ /* deinit self */
+ tdav_common_alsa_deinit(&p_alsa->alsa_common);
+
+ ALSA_DEBUG_INFO("*** destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_alsa_def_s =
+{
+ sizeof(tdav_producer_alsa_t),
+ tdav_producer_alsa_ctor,
+ tdav_producer_alsa_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_alsa_plugin_def_s =
+{
+ &tdav_producer_alsa_def_s,
+
+ tmedia_audio,
+ "Linux ALSA producer",
+
+ tdav_producer_alsa_set,
+ tdav_producer_alsa_prepare,
+ tdav_producer_alsa_start,
+ tdav_producer_alsa_pause,
+ tdav_producer_alsa_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_alsa_plugin_def_t = &tdav_producer_alsa_plugin_def_s;
+
+#endif /* #if HAVE_ALSA_ASOUNDLIB_H */
diff --git a/tinyDAV/src/audio/coreaudio/tdav_audiounit.c b/tinyDAV/src/audio/coreaudio/tdav_audiounit.c
new file mode 100644
index 0000000..dc11f10
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_audiounit.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (C) 2010-2011 Mamadou Diop.
+ *
+ * Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+#include "tinydav/audio/coreaudio/tdav_audiounit.h"
+
+#if HAVE_COREAUDIO_AUDIO_UNIT
+
+#include "tinydav/tdav_apple.h"
+
+#include "tsk_string.h"
+#include "tsk_list.h"
+#include "tsk_safeobj.h"
+#include "tsk_debug.h"
+
+#if TARGET_OS_IPHONE
+static UInt32 kOne = 1;
+static UInt32 kZero = 0;
+#endif /* TARGET_OS_IPHONE */
+
+#if TARGET_OS_IPHONE
+ #if TARGET_IPHONE_SIMULATOR // VoiceProcessingIO will give unexpected result on the simulator when using iOS 5
+ #define kDoubangoAudioUnitSubType kAudioUnitSubType_RemoteIO
+ #else // Echo cancellation, AGC, ...
+ #define kDoubangoAudioUnitSubType kAudioUnitSubType_VoiceProcessingIO
+ #endif
+#elif TARGET_OS_MAC
+ #define kDoubangoAudioUnitSubType kAudioUnitSubType_HALOutput
+#else
+ #error "Unknown target"
+#endif
+
+#undef kInputBus
+#define kInputBus 1
+#undef kOutputBus
+#define kOutputBus 0
+
+typedef struct tdav_audiounit_instance_s
+{
+ TSK_DECLARE_OBJECT;
+ uint64_t session_id;
+ uint32_t frame_duration;
+ AudioComponentInstance audioUnit;
+ struct{
+ unsigned consumer:1;
+ unsigned producer:1;
+ } prepared;
+ unsigned started:1;
+ unsigned interrupted:1;
+
+ TSK_DECLARE_SAFEOBJ;
+
+}
+tdav_audiounit_instance_t;
+TINYDAV_GEXTERN const tsk_object_def_t *tdav_audiounit_instance_def_t;
+typedef tsk_list_t tdav_audiounit_instances_L_t;
+
+
+static AudioComponent __audioSystem = tsk_null;
+static tdav_audiounit_instances_L_t* __audioUnitInstances = tsk_null;
+
+static int _tdav_audiounit_handle_signal_xxx_prepared(tdav_audiounit_handle_t* self, tsk_bool_t consumer)
+{
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+ if(!inst || !inst->audioUnit){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(inst);
+
+ if(consumer){
+ inst->prepared.consumer = tsk_true;
+ }
+ else {
+ inst->prepared.producer = tsk_true;
+ }
+
+ OSStatus status;
+
+ // For iOS we are using full-duplex AudioUnit and we wait for both consumer and producer to be prepared
+#if TARGET_OS_IPHONE
+ if(inst->prepared.consumer && inst->prepared.producer)
+#endif
+ {
+ status = AudioUnitInitialize(inst->audioUnit);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitInitialize failed with status =%ld", (signed long)status);
+ tsk_safeobj_unlock(inst);
+ return -2;
+ }
+ }
+
+ tsk_safeobj_unlock(inst);
+ return 0;
+}
+
+tdav_audiounit_handle_t* tdav_audiounit_handle_create(uint64_t session_id)
+{
+ tdav_audiounit_instance_t* inst = tsk_null;
+
+ // create audio unit component
+ if(!__audioSystem){
+ AudioComponentDescription audioDescription;
+ audioDescription.componentType = kAudioUnitType_Output;
+ audioDescription.componentSubType = kDoubangoAudioUnitSubType;
+ audioDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
+ audioDescription.componentFlags = 0;
+ audioDescription.componentFlagsMask = 0;
+ if((__audioSystem = AudioComponentFindNext(NULL, &audioDescription))){
+ // leave blank
+ }
+ else {
+ TSK_DEBUG_ERROR("Failed to find new audio component");
+ goto done;
+ }
+
+ }
+ // create list used to hold instances
+ if(!__audioUnitInstances && !(__audioUnitInstances = tsk_list_create())){
+ TSK_DEBUG_ERROR("Failed to create new list");
+ goto done;
+ }
+
+ //= lock the list
+ tsk_list_lock(__audioUnitInstances);
+
+ // For iOS we are using full-duplex AudioUnit and to keep it unique for both
+ // the consumer and producer we use the session id.
+#if TARGET_OS_IPHONE
+ // find the instance from the list
+ const tsk_list_item_t* item;
+ tsk_list_foreach(item,__audioUnitInstances){
+ if(((tdav_audiounit_instance_t*)item->data)->session_id == session_id){
+ inst = tsk_object_ref(item->data);
+ goto done;
+ }
+ }
+#endif
+
+ // create instance object and put it into the list
+ if((inst = tsk_object_new(tdav_audiounit_instance_def_t))){
+ OSStatus status = noErr;
+ tdav_audiounit_instance_t* _inst;
+
+ // create new instance
+ if((status= AudioComponentInstanceNew(__audioSystem, &inst->audioUnit)) != noErr){
+ TSK_DEBUG_ERROR("AudioComponentInstanceNew() failed with status=%ld", (signed long)status);
+ TSK_OBJECT_SAFE_FREE(inst);
+ goto done;
+ }
+ _inst = inst, _inst->session_id = session_id;
+ tsk_list_push_back_data(__audioUnitInstances, (void**)&_inst);
+ }
+
+done:
+ //= unlock the list
+ tsk_list_unlock(__audioUnitInstances);
+ return (tdav_audiounit_handle_t*)inst;
+}
+
+AudioComponentInstance tdav_audiounit_handle_get_instance(tdav_audiounit_handle_t* self)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+ return ((tdav_audiounit_instance_t*)self)->audioUnit;
+}
+
+int tdav_audiounit_handle_signal_consumer_prepared(tdav_audiounit_handle_t* self)
+{
+ return _tdav_audiounit_handle_signal_xxx_prepared(self, tsk_true);
+}
+
+int tdav_audiounit_handle_signal_producer_prepared(tdav_audiounit_handle_t* self)
+{
+ return _tdav_audiounit_handle_signal_xxx_prepared(self, tsk_false);
+}
+
+int tdav_audiounit_handle_start(tdav_audiounit_handle_t* self)
+{
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+ OSStatus status = noErr;
+ if(!inst || !inst->audioUnit){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(inst);
+ status = (OSStatus)tdav_apple_enable_audio();
+ if (status == noErr) {
+ if ((!inst->started || inst->interrupted) && (status = AudioOutputUnitStart(inst->audioUnit))) {
+ TSK_DEBUG_ERROR("AudioOutputUnitStart failed with status=%ld", (signed long)status);
+ }
+ }
+ else {
+ TSK_DEBUG_ERROR("tdav_apple_enable_audio() failed with status=%ld", (signed long)status);
+ }
+ inst->started = (status == noErr) ? tsk_true : tsk_false;
+ if (inst->started) inst->interrupted = 0;
+ tsk_safeobj_unlock(inst);
+ return status ? -2 : 0;
+}
+
+uint32_t tdav_audiounit_handle_get_frame_duration(tdav_audiounit_handle_t* self)
+{
+ if(self){
+ return ((tdav_audiounit_instance_t*)self)->frame_duration;
+ }
+ return 0;
+}
+
+int tdav_audiounit_handle_configure(tdav_audiounit_handle_t* self, tsk_bool_t consumer, uint32_t ptime, AudioStreamBasicDescription* audioFormat)
+{
+ OSStatus status = noErr;
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+
+ if(!inst || !audioFormat){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+#if TARGET_OS_IPHONE
+ // set preferred buffer size
+ Float32 preferredBufferSize = ((Float32)ptime / 1000.f); // in seconds
+ UInt32 size = sizeof(preferredBufferSize);
+ status = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration) failed with status=%d", (int)status);
+ TSK_OBJECT_SAFE_FREE(inst);
+ goto done;
+ }
+ status = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration, &size, &preferredBufferSize);
+ if(status == noErr){
+ inst->frame_duration = (preferredBufferSize * 1000);
+ TSK_DEBUG_INFO("Frame duration=%d", inst->frame_duration);
+ }
+ else {
+ TSK_DEBUG_ERROR("AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration, %f) failed", preferredBufferSize);
+ }
+
+
+ UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
+ status = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioSessionSetProperty(kAudioSessionProperty_AudioCategory) failed with status code=%d", (int)status);
+ goto done;
+ }
+
+#elif TARGET_OS_MAC
+#if 1
+ // set preferred buffer size
+ UInt32 preferredBufferSize = ((ptime * audioFormat->mSampleRate)/1000); // in bytes
+ UInt32 size = sizeof(preferredBufferSize);
+ status = AudioUnitSetProperty(inst->audioUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &preferredBufferSize, size);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_SetInputCallback) failed with status=%ld", (signed long)status);
+ }
+ status = AudioUnitGetProperty(inst->audioUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &preferredBufferSize, &size);
+ if(status == noErr){
+ inst->frame_duration = ((preferredBufferSize * 1000)/audioFormat->mSampleRate);
+ TSK_DEBUG_INFO("Frame duration=%d", inst->frame_duration);
+ }
+ else {
+ TSK_DEBUG_ERROR("AudioUnitGetProperty(kAudioDevicePropertyBufferFrameSize, %lu) failed", (unsigned long)preferredBufferSize);
+ }
+#endif
+
+#endif
+
+done:
+ return (status == noErr) ? 0 : -2;
+}
+
+int tdav_audiounit_handle_mute(tdav_audiounit_handle_t* self, tsk_bool_t mute)
+{
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+ if(!inst || !inst->audioUnit){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+#if TARGET_OS_IPHONE
+ OSStatus status = noErr;
+ status = AudioUnitSetProperty(inst->audioUnit, kAUVoiceIOProperty_MuteOutput,
+ kAudioUnitScope_Output, kOutputBus, mute ? &kOne : &kZero, mute ? sizeof(kOne) : sizeof(kZero));
+
+ return (status == noErr) ? 0 : -2;
+#else
+ return 0;
+#endif
+}
+
+int tdav_audiounit_handle_interrupt(tdav_audiounit_handle_t* self, tsk_bool_t interrupt)
+{
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+ if (!inst){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ OSStatus status = noErr;
+ if (inst->interrupted != interrupt && inst->started) {
+ if (interrupt) {
+ status = AudioOutputUnitStop(inst->audioUnit);
+ if (status != noErr) {
+ TSK_DEBUG_ERROR("AudioOutputUnitStop failed with status=%ld", (signed long)status);
+ goto bail;
+ }
+ }
+ else {
+#if TARGET_OS_IPHONE
+ status = (OSStatus)tdav_apple_enable_audio();
+ if (status != noErr) {
+ TSK_DEBUG_ERROR("AudioSessionSetActive failed with status=%ld", (signed long)status);
+ goto bail;
+ }
+#endif
+ status = AudioOutputUnitStart(inst->audioUnit);
+ if (status != noErr) {
+ TSK_DEBUG_ERROR("AudioOutputUnitStart failed with status=%ld", (signed long)status);
+ goto bail;
+ }
+ }
+ }
+ inst->interrupted = interrupt ? 1: 0;
+bail:
+ return (status != noErr) ? -2 : 0;
+}
+
+int tdav_audiounit_handle_stop(tdav_audiounit_handle_t* self)
+{
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+ OSStatus status = noErr;
+ if(!inst || (inst->started && !inst->audioUnit)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(inst);
+ if(inst->started && (status = AudioOutputUnitStop(inst->audioUnit))){
+ TSK_DEBUG_ERROR("AudioOutputUnitStop failed with status=%ld", (signed long)status);
+ }
+ inst->started = (status == noErr ? tsk_false : tsk_true);
+ tsk_safeobj_unlock(inst);
+ return (status != noErr) ? -2 : 0;
+}
+
+int tdav_audiounit_handle_destroy(tdav_audiounit_handle_t** self){
+ if(!self || !*self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ tsk_list_lock(__audioUnitInstances);
+ if(tsk_object_get_refcount(*self)==1){
+ tsk_list_remove_item_by_data(__audioUnitInstances, *self);
+ }
+ else {
+ tsk_object_unref(*self);
+ }
+ tsk_list_unlock(__audioUnitInstances);
+ *self = tsk_null;
+ return 0;
+}
+
+//
+// Object definition for and AudioUnit instance
+//
+static tsk_object_t* tdav_audiounit_instance_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_audiounit_instance_t* inst = self;
+ if(inst){
+ tsk_safeobj_init(inst);
+ }
+ return self;
+}
+static tsk_object_t* tdav_audiounit_instance_dtor(tsk_object_t * self)
+{
+ tdav_audiounit_instance_t* inst = self;
+ if(inst){
+ tsk_safeobj_lock(inst);
+ if(inst->audioUnit){
+ AudioUnitUninitialize(inst->audioUnit);
+ AudioComponentInstanceDispose(inst->audioUnit);
+ inst->audioUnit = tsk_null;
+ }
+ tsk_safeobj_unlock(inst);
+
+ tsk_safeobj_deinit(inst);
+ TSK_DEBUG_INFO("*** AudioUnit Instance destroyed ***");
+ }
+ return self;
+}
+static int tdav_audiounit_instance_cmp(const tsk_object_t *_ai1, const tsk_object_t *_ai2)
+{
+ return (int)(_ai1 - _ai2);
+}
+static const tsk_object_def_t tdav_audiounit_instance_def_s =
+{
+ sizeof(tdav_audiounit_instance_t),
+ tdav_audiounit_instance_ctor,
+ tdav_audiounit_instance_dtor,
+ tdav_audiounit_instance_cmp,
+};
+const tsk_object_def_t *tdav_audiounit_instance_def_t = &tdav_audiounit_instance_def_s;
+
+
+
+#endif /* HAVE_COREAUDIO_AUDIO_UNIT */
diff --git a/tinyDAV/src/audio/coreaudio/tdav_consumer_audioqueue.c b/tinyDAV/src/audio/coreaudio/tdav_consumer_audioqueue.c
new file mode 100644
index 0000000..2f5fd90
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_consumer_audioqueue.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2010-2011 Mamadou Diop.
+ *
+ * Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+
+/**@file tdav_consumer_audioqueue.c
+ * @brief Audio Consumer for MacOSX and iOS platforms.
+ *
+ * @authors
+ * - Laurent Etiemble <laurent.etiemble(at)gmail.com>
+ * - Mamadou Diop <diopmamadou(at)doubango(dot)org>
+ *
+ * @date Created: Sat Nov 8 16:54:58 2009 letiemble
+ */
+#include "tinydav/audio/coreaudio/tdav_consumer_audioqueue.h"
+
+
+// http://developer.apple.com/library/mac/#documentation/MusicAudio/Reference/AudioQueueReference/Reference/reference.html
+#if HAVE_COREAUDIO_AUDIO_QUEUE
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+static void __handle_output_buffer(void *userdata, AudioQueueRef queue, AudioQueueBufferRef buffer) {
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)userdata;
+
+ if (!consumer->started) {
+ return;
+ }
+
+ if(!tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(consumer), buffer->mAudioData, consumer->buffer_size)){
+ // Put silence
+ memset(buffer->mAudioData, 0, consumer->buffer_size);
+ }
+
+ // Re-enqueue the buffer
+ AudioQueueEnqueueBuffer(consumer->queue, buffer, 0, NULL);
+ // alert the jitter buffer
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(consumer));
+}
+
+/* ============ Media Consumer Interface ================= */
+#define tdav_consumer_audioqueue_set tsk_null
+
+int tdav_consumer_audioqueue_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ OSStatus ret;
+ tsk_size_t i;
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)self;
+
+ if(!consumer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TMEDIA_CONSUMER(consumer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+ /* codec should have ptime */
+
+ // Set audio category
+#if TARGET_OS_IPHONE
+ UInt32 category = kAudioSessionCategory_PlayAndRecord;
+ AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
+#endif
+
+ // Create the audio stream description
+ AudioStreamBasicDescription *description = &(consumer->description);
+ description->mSampleRate = TMEDIA_CONSUMER(consumer)->audio.out.rate ? TMEDIA_CONSUMER(consumer)->audio.out.rate : TMEDIA_CONSUMER(consumer)->audio.in.rate;
+ description->mFormatID = kAudioFormatLinearPCM;
+ description->mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
+ description->mChannelsPerFrame = TMEDIA_CONSUMER(consumer)->audio.in.channels;
+ description->mFramesPerPacket = 1;
+ description->mBitsPerChannel = TMEDIA_CONSUMER(consumer)->audio.bits_per_sample;
+ description->mBytesPerPacket = description->mBitsPerChannel / 8 * description->mChannelsPerFrame;
+ description->mBytesPerFrame = description->mBytesPerPacket;
+ description->mReserved = 0;
+
+ int packetperbuffer = 1000 / TMEDIA_CONSUMER(consumer)->audio.ptime;
+ consumer->buffer_size = description->mSampleRate * description->mBytesPerFrame / packetperbuffer;
+
+ // Create the playback audio queue
+ ret = AudioQueueNewOutput(&(consumer->description),
+ __handle_output_buffer,
+ consumer,
+ NULL,
+ NULL,
+ 0,
+ &(consumer->queue));
+
+ for(i = 0; i < CoreAudioPlayBuffers; i++) {
+ // Create the buffer for the queue
+ ret = AudioQueueAllocateBuffer(consumer->queue, consumer->buffer_size, &(consumer->buffers[i]));
+ if (ret) {
+ break;
+ }
+
+ // Clear the data
+ memset(consumer->buffers[i]->mAudioData, 0, consumer->buffer_size);
+ consumer->buffers[i]->mAudioDataByteSize = consumer->buffer_size;
+
+ // Enqueue the buffer
+ ret = AudioQueueEnqueueBuffer(consumer->queue, consumer->buffers[i], 0, NULL);
+ if (ret) {
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int tdav_consumer_audioqueue_start(tmedia_consumer_t* self)
+{
+ OSStatus ret;
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(consumer->started){
+ TSK_DEBUG_WARN("Consumer already started");
+ return 0;
+ }
+
+ consumer->started = tsk_true;
+ ret = AudioQueueStart(consumer->queue, NULL);
+
+ return ret;
+}
+
+int tdav_consumer_audioqueue_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)self;
+
+ if(!consumer || !buffer || !size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ // buffer is already decoded
+ return tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(consumer), buffer, size, proto_hdr);
+}
+
+int tdav_consumer_audioqueue_pause(tmedia_consumer_t* self)
+{
+ OSStatus ret;
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ ret = AudioQueuePause(consumer->queue);
+
+ return ret;
+}
+
+int tdav_consumer_audioqueue_stop(tmedia_consumer_t* self)
+{
+ OSStatus ret;
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)self;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!consumer->started){
+ TSK_DEBUG_WARN("Consumer not started");
+ return 0;
+ }
+
+ consumer->started = tsk_false;
+ ret = AudioQueueStop(consumer->queue, false);
+
+ return ret;
+}
+
+//
+// coreaudio consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_audioqueue_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_audioqueue_t *consumer = self;
+ if(consumer){
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(consumer));
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_audioqueue_dtor(tsk_object_t * self)
+{
+ tdav_consumer_audioqueue_t *consumer = self;
+ if(consumer){
+ // Stop the consumer if not done
+ if(consumer->started){
+ tdav_consumer_audioqueue_stop(self);
+ }
+
+ // Free all buffers and dispose the queue
+ if (consumer->queue) {
+ tsk_size_t i;
+
+ for(i=0; i<CoreAudioPlayBuffers; i++){
+ AudioQueueFreeBuffer(consumer->queue, consumer->buffers[i]);
+ }
+
+ AudioQueueDispose(consumer->queue, true);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(consumer));
+ }
+
+ return self;
+}
+
+/* object definition */
+static const tsk_object_def_t tdav_consumer_audioqueue_def_s =
+{
+ sizeof(tdav_consumer_audioqueue_t),
+ tdav_consumer_audioqueue_ctor,
+ tdav_consumer_audioqueue_dtor,
+ tdav_consumer_audio_cmp,
+};
+
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_audioqueue_plugin_def_s =
+{
+ &tdav_consumer_audioqueue_def_s,
+
+ tmedia_audio,
+ "Apple CoreAudio consumer(AudioQueue)",
+
+ tdav_consumer_audioqueue_set,
+ tdav_consumer_audioqueue_prepare,
+ tdav_consumer_audioqueue_start,
+ tdav_consumer_audioqueue_consume,
+ tdav_consumer_audioqueue_pause,
+ tdav_consumer_audioqueue_stop
+};
+
+const tmedia_consumer_plugin_def_t *tdav_consumer_audioqueue_plugin_def_t = &tdav_consumer_audioqueue_plugin_def_s;
+
+#endif /* HAVE_COREAUDIO_AUDIO_QUEUE */
diff --git a/tinyDAV/src/audio/coreaudio/tdav_consumer_audiounit.c b/tinyDAV/src/audio/coreaudio/tdav_consumer_audiounit.c
new file mode 100644
index 0000000..947d782
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_consumer_audiounit.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (C) 2010-2011 Mamadou Diop.
+ *
+ * Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+#include "tinydav/audio/coreaudio/tdav_consumer_audiounit.h"
+
+// http://developer.apple.com/library/ios/#documentation/MusicAudio/Conceptual/AudioUnitHostingGuide_iOS/Introduction/Introduction.html%23//apple_ref/doc/uid/TP40009492-CH1-SW1
+// Resampler: http://developer.apple.com/library/mac/#technotes/tn2097/_index.html
+
+#if HAVE_COREAUDIO_AUDIO_UNIT
+
+#undef DISABLE_JITTER_BUFFER
+#define DISABLE_JITTER_BUFFER 0
+
+#include "tsk_debug.h"
+#include "tsk_memory.h"
+#include "tsk_string.h"
+
+#define kNoDataError -1
+#define kRingPacketCount +10
+
+static tsk_size_t tdav_consumer_audiounit_get(tdav_consumer_audiounit_t* self, void* data, tsk_size_t size);
+
+static OSStatus __handle_output_buffer(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
+ AudioBufferList *ioData) {
+ OSStatus status = noErr;
+ // tsk_size_t out_size;
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t* )inRefCon;
+
+ if(!consumer->started || consumer->paused){
+ goto done;
+ }
+
+ if(!ioData){
+ TSK_DEBUG_ERROR("Invalid argument");
+ status = kNoDataError;
+ goto done;
+ }
+ // read from jitter buffer and fill ioData buffers
+ tsk_mutex_lock(consumer->ring.mutex);
+ for(int i=0; i<ioData->mNumberBuffers; i++){
+ /* int ret = */ tdav_consumer_audiounit_get(consumer, ioData->mBuffers[i].mData, ioData->mBuffers[i].mDataByteSize);
+ }
+ tsk_mutex_unlock(consumer->ring.mutex);
+
+done:
+ return status;
+}
+
+static tsk_size_t tdav_consumer_audiounit_get(tdav_consumer_audiounit_t* self, void* data, tsk_size_t size)
+{
+ tsk_ssize_t retSize = 0;
+
+#if DISABLE_JITTER_BUFFER
+ retSize = speex_buffer_read(self->ring.buffer, data, size);
+ if(retSize < size){
+ memset(((uint8_t*)data)+retSize, 0, (size - retSize));
+ }
+#else
+ self->ring.leftBytes += size;
+ while (self->ring.leftBytes >= self->ring.chunck.size) {
+ self->ring.leftBytes -= self->ring.chunck.size;
+ retSize = (tsk_ssize_t)tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(self), self->ring.chunck.buffer, self->ring.chunck.size);
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(self));
+ speex_buffer_write(self->ring.buffer, self->ring.chunck.buffer, retSize);
+ }
+ // IMPORTANT: looks like there is a bug in speex: continously trying to read more than avail
+ // many times can corrupt the buffer. At least on OS X 1.5
+ if(speex_buffer_get_available(self->ring.buffer) >= size){
+ retSize = (tsk_ssize_t)speex_buffer_read(self->ring.buffer, data, (int)size);
+ }
+ else{
+ memset(data, 0, size);
+ }
+#endif
+
+ return retSize;
+}
+
+/* ============ Media Consumer Interface ================= */
+int tdav_consumer_audiounit_set(tmedia_consumer_t* self, const tmedia_param_t* param)
+{
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+ if (param->plugin_type == tmedia_ppt_consumer) {
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "interrupt")) {
+ int32_t interrupt = *((uint8_t*)param->value) ? 1 : 0;
+ return tdav_audiounit_handle_interrupt(consumer->audioUnitHandle, interrupt);
+ }
+ }
+ }
+ return tdav_consumer_audio_set(TDAV_CONSUMER_AUDIO(self), param);
+}
+
+static int tdav_consumer_audiounit_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ static UInt32 flagOne = 1;
+ AudioStreamBasicDescription audioFormat;
+#define kOutputBus 0
+
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+ OSStatus status = noErr;
+
+ if(!consumer || !codec || !codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(!consumer->audioUnitHandle){
+ if(!(consumer->audioUnitHandle = tdav_audiounit_handle_create(TMEDIA_CONSUMER(consumer)->session_id))){
+ TSK_DEBUG_ERROR("Failed to get audio unit instance for session with id=%lld", TMEDIA_CONSUMER(consumer)->session_id);
+ return -3;
+ }
+ }
+
+ // enable
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle),
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output,
+ kOutputBus,
+ &flagOne,
+ sizeof(flagOne));
+ if(status){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_EnableIO) failed with status=%d", (int32_t)status);
+ return -4;
+ }
+ else {
+
+#if !TARGET_OS_IPHONE // strange: TARGET_OS_MAC is equal to '1' on Smulator
+ UInt32 param;
+
+ // disable input
+ param = 0;
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle), kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &param, sizeof(UInt32));
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_EnableIO) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+
+ // set default audio device
+ param = sizeof(AudioDeviceID);
+ AudioDeviceID outputDeviceID;
+ status = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice, &param, &outputDeviceID);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+
+ // set the current device to the default input unit
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle),
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global,
+ 0,
+ &outputDeviceID,
+ sizeof(AudioDeviceID));
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_CurrentDevice) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+
+#endif
+
+ TMEDIA_CONSUMER(consumer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+ TSK_DEBUG_INFO("AudioUnit consumer: in.channels=%d, out.channles=%d, in.rate=%d, out.rate=%d, ptime=%d",
+ TMEDIA_CONSUMER(consumer)->audio.in.channels,
+ TMEDIA_CONSUMER(consumer)->audio.out.channels,
+ TMEDIA_CONSUMER(consumer)->audio.in.rate,
+ TMEDIA_CONSUMER(consumer)->audio.out.rate,
+ TMEDIA_CONSUMER(consumer)->audio.ptime);
+
+ audioFormat.mSampleRate = TMEDIA_CONSUMER(consumer)->audio.out.rate ? TMEDIA_CONSUMER(consumer)->audio.out.rate : TMEDIA_CONSUMER(consumer)->audio.in.rate;
+ audioFormat.mFormatID = kAudioFormatLinearPCM;
+ audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
+ audioFormat.mChannelsPerFrame = TMEDIA_CONSUMER(consumer)->audio.in.channels;
+ audioFormat.mFramesPerPacket = 1;
+ audioFormat.mBitsPerChannel = TMEDIA_CONSUMER(consumer)->audio.bits_per_sample;
+ audioFormat.mBytesPerPacket = audioFormat.mBitsPerChannel / 8 * audioFormat.mChannelsPerFrame;
+ audioFormat.mBytesPerFrame = audioFormat.mBytesPerPacket;
+ audioFormat.mReserved = 0;
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle),
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ kOutputBus,
+ &audioFormat,
+ sizeof(audioFormat));
+
+ if(status){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioUnitProperty_StreamFormat) failed with status=%ld", (signed long)status);
+ return -5;
+ }
+ else {
+ // configure
+ if(tdav_audiounit_handle_configure(consumer->audioUnitHandle, tsk_true, TMEDIA_CONSUMER(consumer)->audio.ptime, &audioFormat)){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_set_rate(%d) failed", TMEDIA_CONSUMER(consumer)->audio.out.rate);
+ return -4;
+ }
+
+ // set callback function
+ AURenderCallbackStruct callback;
+ callback.inputProc = __handle_output_buffer;
+ callback.inputProcRefCon = consumer;
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle),
+ kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Input,
+ kOutputBus,
+ &callback,
+ sizeof(callback));
+ if(status){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_SetInputCallback) failed with status=%ld", (signed long)status);
+ return -6;
+ }
+ }
+ }
+
+ // allocate the chunck buffer and create the ring
+ consumer->ring.chunck.size = (TMEDIA_CONSUMER(consumer)->audio.ptime * audioFormat.mSampleRate * audioFormat.mBytesPerFrame) / 1000;
+ consumer->ring.size = kRingPacketCount * consumer->ring.chunck.size;
+ if(!(consumer->ring.chunck.buffer = tsk_realloc(consumer->ring.chunck.buffer, consumer->ring.chunck.size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ return -7;
+ }
+ if(!consumer->ring.buffer){
+ consumer->ring.buffer = speex_buffer_init((int)consumer->ring.size);
+ }
+ else {
+ int ret;
+ if((ret = (int)speex_buffer_resize(consumer->ring.buffer, (int)consumer->ring.size)) < 0){
+ TSK_DEBUG_ERROR("speex_buffer_resize(%d) failed with error code=%d", (int)consumer->ring.size, ret);
+ return ret;
+ }
+ }
+ if(!consumer->ring.buffer){
+ TSK_DEBUG_ERROR("Failed to create a new ring buffer with size = %d", (int)consumer->ring.size);
+ return -8;
+ }
+ if(!consumer->ring.mutex && !(consumer->ring.mutex = tsk_mutex_create_2(tsk_false))){
+ TSK_DEBUG_ERROR("Failed to create mutex");
+ return -9;
+ }
+
+ // set maximum frames per slice as buffer size
+ //UInt32 numFrames = (UInt32)consumer->ring.chunck.size;
+ //status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle),
+ // kAudioUnitProperty_MaximumFramesPerSlice,
+ // kAudioUnitScope_Global,
+ // 0,
+ // &numFrames,
+ // sizeof(numFrames));
+ //if(status){
+ // TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioUnitProperty_MaximumFramesPerSlice, %u) failed with status=%d", (unsigned)numFrames, (int32_t)status);
+ // return -6;
+ //}
+
+ TSK_DEBUG_INFO("AudioUnit consumer prepared");
+ return tdav_audiounit_handle_signal_consumer_prepared(consumer->audioUnitHandle);
+}
+
+static int tdav_consumer_audiounit_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(consumer->paused){
+ consumer->paused = tsk_false;
+ }
+ if(consumer->started){
+ TSK_DEBUG_WARN("Already started");
+ return 0;
+ }
+ else {
+ int ret = tdav_audiounit_handle_start(consumer->audioUnitHandle);
+ if(ret){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_start failed with error code=%d", ret);
+ return ret;
+ }
+ }
+ consumer->started = tsk_true;
+ TSK_DEBUG_INFO("AudioUnit consumer started");
+ return 0;
+}
+
+static int tdav_consumer_audiounit_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+ if(!consumer || !buffer || !size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+#if DISABLE_JITTER_BUFFER
+ {
+ if(consumer->ring.buffer){
+ tsk_mutex_lock(consumer->ring.mutex);
+ speex_buffer_write(consumer->ring.buffer, (void*)buffer, size);
+ tsk_mutex_unlock(consumer->ring.mutex);
+ return 0;
+ }
+ return -2;
+ }
+#else
+ {
+ return tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(consumer), buffer, size, proto_hdr);
+ }
+#endif
+}
+
+static int tdav_consumer_audiounit_pause(tmedia_consumer_t* self)
+{
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ consumer->paused = tsk_true;
+ TSK_DEBUG_INFO("AudioUnit consumer paused");
+ return 0;
+}
+
+static int tdav_consumer_audiounit_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(!consumer->started){
+ TSK_DEBUG_INFO("Not started");
+ return 0;
+ }
+ else {
+ int ret = tdav_audiounit_handle_stop(consumer->audioUnitHandle);
+ if(ret){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_stop failed with error code=%d", ret);
+ return ret;
+ }
+ }
+#if TARGET_OS_IPHONE
+ //https://devforums.apple.com/thread/118595
+ if(consumer->audioUnitHandle){
+ tdav_audiounit_handle_destroy(&consumer->audioUnitHandle);
+ }
+#endif
+
+ consumer->started = tsk_false;
+ TSK_DEBUG_INFO("AudioUnit consumer stoppped");
+ return 0;
+
+}
+
+//
+// coreaudio consumer (AudioUnit) object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_audiounit_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_audiounit_t *consumer = self;
+ if(consumer){
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(consumer));
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_audiounit_dtor(tsk_object_t * self)
+{
+ tdav_consumer_audiounit_t *consumer = self;
+ if(consumer){
+ /* deinit self */
+ // Stop the consumer if not done
+ if(consumer->started){
+ tdav_consumer_audiounit_stop(self);
+ }
+ // destroy handle
+ if(consumer->audioUnitHandle){
+ tdav_audiounit_handle_destroy(&consumer->audioUnitHandle);
+ }
+ TSK_FREE(consumer->ring.chunck.buffer);
+ if(consumer->ring.buffer){
+ speex_buffer_destroy(consumer->ring.buffer);
+ }
+ if(consumer->ring.mutex){
+ tsk_mutex_destroy(&consumer->ring.mutex);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(consumer));
+ TSK_DEBUG_INFO("*** AudioUnit Consumer destroyed ***");
+ }
+
+ return self;
+}
+
+/* object definition */
+static const tsk_object_def_t tdav_consumer_audiounit_def_s =
+{
+ sizeof(tdav_consumer_audiounit_t),
+ tdav_consumer_audiounit_ctor,
+ tdav_consumer_audiounit_dtor,
+ tdav_consumer_audio_cmp,
+};
+
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_audiounit_plugin_def_s =
+{
+ &tdav_consumer_audiounit_def_s,
+
+ tmedia_audio,
+ "Apple CoreAudio consumer(AudioUnit)",
+
+ tdav_consumer_audiounit_set,
+ tdav_consumer_audiounit_prepare,
+ tdav_consumer_audiounit_start,
+ tdav_consumer_audiounit_consume,
+ tdav_consumer_audiounit_pause,
+ tdav_consumer_audiounit_stop
+};
+
+const tmedia_consumer_plugin_def_t *tdav_consumer_audiounit_plugin_def_t = &tdav_consumer_audiounit_plugin_def_s;
+
+#endif /* HAVE_COREAUDIO_AUDIO_UNIT */
diff --git a/tinyDAV/src/audio/coreaudio/tdav_producer_audioqueue.c b/tinyDAV/src/audio/coreaudio/tdav_producer_audioqueue.c
new file mode 100644
index 0000000..d96fd67
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_producer_audioqueue.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2010-2011 Mamadou Diop.
+ *
+ * Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+
+/**@file tdav_producer_audioqueue.c
+ * @brief Audio Producer for MacOSX and iOS platforms using AudioQueue.
+ *
+ * @authors
+ * - Laurent Etiemble <laurent.etiemble(at)gmail.com>
+ * - Mamadou Diop <diopmamadou(at)doubango(dot)org>
+ *
+ * @date Created: Sat Nov 8 16:54:58 2009 letiemble
+ */
+#include "tinydav/audio/coreaudio/tdav_producer_audioqueue.h"
+
+
+// http://developer.apple.com/library/mac/#documentation/MusicAudio/Reference/AudioQueueReference/Reference/reference.html
+
+#if HAVE_COREAUDIO_AUDIO_QUEUE
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+static void __handle_input_buffer (void *userdata, AudioQueueRef queue, AudioQueueBufferRef buffer, const AudioTimeStamp *start_time, UInt32 number_packet_descriptions, const AudioStreamPacketDescription *packet_descriptions ) {
+ tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)userdata;
+
+ if (!producer->started) {
+ return;
+ }
+
+ // Alert the session that there is new data to send
+ if(TMEDIA_PRODUCER(producer)->enc_cb.callback) {
+ TMEDIA_PRODUCER(producer)->enc_cb.callback(TMEDIA_PRODUCER(producer)->enc_cb.callback_data, buffer->mAudioData, buffer->mAudioDataByteSize);
+ }
+
+ // Re-enqueue the buffer
+ AudioQueueEnqueueBuffer(producer->queue, buffer, 0, NULL);
+}
+
+/* ============ Media Producer Interface ================= */
+#define tdav_producer_audioqueue_set tsk_null
+
+static int tdav_producer_audioqueue_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ OSStatus ret;
+ tsk_size_t i;
+ tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)self;
+
+ if(!producer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TMEDIA_PRODUCER(producer)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+ /* codec should have ptime */
+
+
+ // Set audio category
+#if TARGET_OS_IPHONE
+ UInt32 category = kAudioSessionCategory_PlayAndRecord;
+ AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
+#endif
+ // Create the audio stream description
+ AudioStreamBasicDescription *description = &(producer->description);
+ description->mSampleRate = TMEDIA_PRODUCER(producer)->audio.rate;
+ description->mFormatID = kAudioFormatLinearPCM;
+ description->mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
+ description->mChannelsPerFrame = TMEDIA_PRODUCER(producer)->audio.channels;
+ description->mFramesPerPacket = 1;
+ description->mBitsPerChannel = TMEDIA_PRODUCER(producer)->audio.bits_per_sample;
+ description->mBytesPerPacket = description->mBitsPerChannel / 8 * description->mChannelsPerFrame;
+ description->mBytesPerFrame = description->mBytesPerPacket;
+ description->mReserved = 0;
+
+ int packetperbuffer = 1000 / TMEDIA_PRODUCER(producer)->audio.ptime;
+ producer->buffer_size = description->mSampleRate * description->mBytesPerFrame / packetperbuffer;
+
+ // Create the record audio queue
+ ret = AudioQueueNewInput(&(producer->description),
+ __handle_input_buffer,
+ producer,
+ NULL,
+ kCFRunLoopCommonModes,
+ 0,
+ &(producer->queue));
+
+ for(i = 0; i < CoreAudioRecordBuffers; i++) {
+ // Create the buffer for the queue
+ ret = AudioQueueAllocateBuffer(producer->queue, producer->buffer_size, &(producer->buffers[i]));
+ if (ret) {
+ break;
+ }
+
+ // Clear the data
+ memset(producer->buffers[i]->mAudioData, 0, producer->buffer_size);
+ producer->buffers[i]->mAudioDataByteSize = producer->buffer_size;
+
+ // Enqueue the buffer
+ ret = AudioQueueEnqueueBuffer(producer->queue, producer->buffers[i], 0, NULL);
+ if (ret) {
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int tdav_producer_audioqueue_start(tmedia_producer_t* self)
+{
+ OSStatus ret;
+ tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(producer->started){
+ TSK_DEBUG_WARN("Producer already started");
+ return 0;
+ }
+
+ producer->started = tsk_true;
+ ret = AudioQueueStart(producer->queue, NULL);
+
+ return ret;
+}
+
+static int tdav_producer_audioqueue_pause(tmedia_producer_t* self)
+{
+ OSStatus ret;
+ tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ ret = AudioQueuePause(producer->queue);
+
+ return ret;
+}
+
+static int tdav_producer_audioqueue_stop(tmedia_producer_t* self)
+{
+ OSStatus ret;
+ tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)self;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!producer->started){
+ TSK_DEBUG_WARN("Producer not started");
+ return 0;
+ }
+
+ producer->started = tsk_false;
+ ret = AudioQueueStop(producer->queue, false);
+
+ return ret;
+}
+
+
+//
+// CoreAudio producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_audioqueue_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_audioqueue_t *producer = self;
+ if(producer){
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(producer));
+ /* init self */
+ // TODO
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_audioqueue_dtor(tsk_object_t * self)
+{
+ tdav_producer_audioqueue_t *producer = self;
+ if(producer){
+ // Stop the producer if not done
+ if(producer->started){
+ tdav_producer_audioqueue_stop(self);
+ }
+
+ // Free all buffers and dispose the queue
+ if (producer->queue) {
+ tsk_size_t i;
+
+ for(i=0; i<CoreAudioRecordBuffers; i++){
+ AudioQueueFreeBuffer(producer->queue, producer->buffers[i]);
+ }
+ AudioQueueDispose(producer->queue, true);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(producer));
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_audioqueue_def_s =
+{
+ sizeof(tdav_producer_audioqueue_t),
+ tdav_producer_audioqueue_ctor,
+ tdav_producer_audioqueue_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_audioqueue_plugin_def_s =
+{
+ &tdav_producer_audioqueue_def_s,
+
+ tmedia_audio,
+ "Apple CoreAudio producer (AudioQueue)",
+
+ tdav_producer_audioqueue_set,
+ tdav_producer_audioqueue_prepare,
+ tdav_producer_audioqueue_start,
+ tdav_producer_audioqueue_pause,
+ tdav_producer_audioqueue_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_audioqueue_plugin_def_t = &tdav_producer_audioqueue_plugin_def_s;
+
+#endif /* HAVE_COREAUDIO_AUDIO_QUEUE */
diff --git a/tinyDAV/src/audio/coreaudio/tdav_producer_audiounit.c b/tinyDAV/src/audio/coreaudio/tdav_producer_audiounit.c
new file mode 100644
index 0000000..a88261e
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_producer_audiounit.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2010-2011 Mamadou Diop.
+ *
+ * Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+#include "tinydav/audio/coreaudio/tdav_producer_audiounit.h"
+
+// http://developer.apple.com/library/ios/#documentation/MusicAudio/Conceptual/AudioUnitHostingGuide_iOS/Introduction/Introduction.html%23//apple_ref/doc/uid/TP40009492-CH1-SW1
+
+#if HAVE_COREAUDIO_AUDIO_UNIT
+
+#include <mach/mach.h>
+#import <sys/sysctl.h>
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_thread.h"
+#include "tsk_debug.h"
+
+#define kRingPacketCount 10
+
+static OSStatus __handle_input_buffer(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
+ AudioBufferList *ioData) {
+ OSStatus status = noErr;
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)inRefCon;
+
+ // holder
+ AudioBuffer buffer;
+ buffer.mData = tsk_null;
+ buffer.mDataByteSize = 0;
+ buffer.mNumberChannels = TMEDIA_PRODUCER(producer)->audio.channels;
+
+ // list of holders
+ AudioBufferList buffers;
+ buffers.mNumberBuffers = 1;
+ buffers.mBuffers[0] = buffer;
+
+ // render to get frames from the system
+ status = AudioUnitRender(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ ioActionFlags,
+ inTimeStamp,
+ inBusNumber,
+ inNumberFrames,
+ &buffers);
+ if(status == 0){
+ // must not be done on async thread: doing it gives bad audio quality when audio+video call is done with CPU consuming codec (e.g. speex or g729)
+ speex_buffer_write(producer->ring.buffer, buffers.mBuffers[0].mData, buffers.mBuffers[0].mDataByteSize);
+ int avail = speex_buffer_get_available(producer->ring.buffer);
+ while (producer->started && avail >= producer->ring.chunck.size) {
+ avail -= speex_buffer_read(producer->ring.buffer, (void*)producer->ring.chunck.buffer, (int)producer->ring.chunck.size);
+ TMEDIA_PRODUCER(producer)->enc_cb.callback(TMEDIA_PRODUCER(producer)->enc_cb.callback_data,
+ producer->ring.chunck.buffer, producer->ring.chunck.size);
+ }
+ }
+
+ return status;
+}
+
+/* ============ Media Producer Interface ================= */
+int tdav_producer_audiounit_set(tmedia_producer_t* self, const tmedia_param_t* param)
+{
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)self;
+ if(param->plugin_type == tmedia_ppt_producer){
+ if(param->value_type == tmedia_pvt_int32){
+ if (tsk_striequals(param->key, "mute")) {
+ producer->muted = TSK_TO_INT32((uint8_t*)param->value);
+ return tdav_audiounit_handle_mute(((tdav_producer_audiounit_t*)self)->audioUnitHandle, producer->muted);
+ }
+ else if (tsk_striequals(param->key, "interrupt")) {
+ int32_t interrupt = *((uint8_t*)param->value) ? 1 : 0;
+ return tdav_audiounit_handle_interrupt(producer->audioUnitHandle, interrupt);
+ }
+ }
+ }
+ return tdav_producer_audio_set(TDAV_PRODUCER_AUDIO(self), param);
+}
+
+static int tdav_producer_audiounit_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ static UInt32 flagOne = 1;
+ UInt32 param;
+ // static UInt32 flagZero = 0;
+#define kInputBus 1
+
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)self;
+ OSStatus status = noErr;
+ AudioStreamBasicDescription audioFormat;
+ AudioStreamBasicDescription deviceFormat;
+
+ if(!producer || !codec || !codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(!producer->audioUnitHandle){
+ if(!(producer->audioUnitHandle = tdav_audiounit_handle_create(TMEDIA_PRODUCER(producer)->session_id))){
+ TSK_DEBUG_ERROR("Failed to get audio unit instance for session with id=%lld", TMEDIA_PRODUCER(producer)->session_id);
+ return -3;
+ }
+ }
+
+ // enable
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Input,
+ kInputBus,
+ &flagOne,
+ sizeof(flagOne));
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_EnableIO) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+ else {
+#if !TARGET_OS_IPHONE // strange: TARGET_OS_MAC is equal to '1' on Smulator
+ // disable output
+ param = 0;
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output,
+ 0,
+ &param,
+ sizeof(UInt32));
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_EnableIO) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+
+ // set default audio device
+ param = sizeof(AudioDeviceID);
+ AudioDeviceID inputDeviceID;
+ status = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &param, &inputDeviceID);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+
+ // set the current device to the default input unit
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Output,
+ 0,
+ &inputDeviceID,
+ sizeof(AudioDeviceID));
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_CurrentDevice) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+#endif /* TARGET_OS_MAC */
+
+ /* codec should have ptime */
+ TMEDIA_PRODUCER(producer)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+
+ TSK_DEBUG_INFO("AudioUnit producer: channels=%d, rate=%d, ptime=%d",
+ TMEDIA_PRODUCER(producer)->audio.channels,
+ TMEDIA_PRODUCER(producer)->audio.rate,
+ TMEDIA_PRODUCER(producer)->audio.ptime);
+
+ // get device format
+ param = sizeof(AudioStreamBasicDescription);
+ status = AudioUnitGetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ kInputBus,
+ &deviceFormat, &param);
+ if(status == noErr && deviceFormat.mSampleRate){
+#if TARGET_OS_IPHONE
+ // iOS support 8Khz, 16kHz and 32kHz => do not override the sampleRate
+#elif TARGET_OS_MAC
+ // For example, iSight supports only 48kHz
+ TMEDIA_PRODUCER(producer)->audio.rate = deviceFormat.mSampleRate;
+#endif
+ }
+
+ // set format
+ audioFormat.mSampleRate = TMEDIA_PRODUCER(producer)->audio.rate;
+ audioFormat.mFormatID = kAudioFormatLinearPCM;
+ audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved;
+ audioFormat.mChannelsPerFrame = TMEDIA_PRODUCER(producer)->audio.channels;
+ audioFormat.mFramesPerPacket = 1;
+ audioFormat.mBitsPerChannel = TMEDIA_PRODUCER(producer)->audio.bits_per_sample;
+ audioFormat.mBytesPerPacket = audioFormat.mBitsPerChannel / 8 * audioFormat.mChannelsPerFrame;
+ audioFormat.mBytesPerFrame = audioFormat.mBytesPerPacket;
+ audioFormat.mReserved = 0;
+ if(audioFormat.mFormatID == kAudioFormatLinearPCM && audioFormat.mChannelsPerFrame == 1){
+ audioFormat.mFormatFlags &= ~kLinearPCMFormatFlagIsNonInterleaved;
+ }
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ kInputBus,
+ &audioFormat,
+ sizeof(audioFormat));
+ if(status){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioUnitProperty_StreamFormat) failed with status=%ld", (signed long)status);
+ return -5;
+ }
+ else {
+
+ // configure
+ if(tdav_audiounit_handle_configure(producer->audioUnitHandle, tsk_false, TMEDIA_PRODUCER(producer)->audio.ptime, &audioFormat)){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_set_rate(%d) failed", TMEDIA_PRODUCER(producer)->audio.rate);
+ return -4;
+ }
+
+ // set callback function
+ AURenderCallbackStruct callback;
+ callback.inputProc = __handle_input_buffer;
+ callback.inputProcRefCon = producer;
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioOutputUnitProperty_SetInputCallback,
+ kAudioUnitScope_Output,
+ kInputBus,
+ &callback,
+ sizeof(callback));
+ if(status){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_SetInputCallback) failed with status=%ld", (signed long)status);
+ return -6;
+ }
+ else {
+ // disbale buffer allocation as we will provide ours
+ //status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ // kAudioUnitProperty_ShouldAllocateBuffer,
+ // kAudioUnitScope_Output,
+ // kInputBus,
+ // &flagZero,
+ // sizeof(flagZero));
+
+ producer->ring.chunck.size = (TMEDIA_PRODUCER(producer)->audio.ptime * audioFormat.mSampleRate * audioFormat.mBytesPerFrame) / 1000;
+ // allocate our chunck buffer
+ if(!(producer->ring.chunck.buffer = tsk_realloc(producer->ring.chunck.buffer, producer->ring.chunck.size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ return -7;
+ }
+ // create ringbuffer
+ producer->ring.size = kRingPacketCount * producer->ring.chunck.size;
+ if(!producer->ring.buffer){
+ producer->ring.buffer = speex_buffer_init((int)producer->ring.size);
+ }
+ else {
+ int ret;
+ if((ret = speex_buffer_resize(producer->ring.buffer, producer->ring.size)) < 0){
+ TSK_DEBUG_ERROR("speex_buffer_resize(%d) failed with error code=%d", (int)producer->ring.size, ret);
+ return ret;
+ }
+ }
+ if(!producer->ring.buffer){
+ TSK_DEBUG_ERROR("Failed to create a new ring buffer with size = %d", (int)producer->ring.size);
+ return -9;
+ }
+ }
+
+ }
+ }
+
+ TSK_DEBUG_INFO("AudioUnit producer prepared");
+ return tdav_audiounit_handle_signal_producer_prepared(producer->audioUnitHandle);;
+}
+
+static int tdav_producer_audiounit_start(tmedia_producer_t* self)
+{
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(producer->paused){
+ producer->paused = tsk_false;
+ return tsk_false;
+ }
+
+ int ret;
+ if(producer->started){
+ TSK_DEBUG_WARN("Already started");
+ return 0;
+ }
+ else {
+ ret = tdav_audiounit_handle_start(producer->audioUnitHandle);
+ if(ret){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_start failed with error code=%d", ret);
+ return ret;
+ }
+ }
+ producer->started = tsk_true;
+
+ // apply parameters (because could be lost when the producer is restarted -handle recreated-)
+ ret = tdav_audiounit_handle_mute(producer->audioUnitHandle, producer->muted);
+
+ TSK_DEBUG_INFO("AudioUnit producer started");
+ return 0;
+}
+
+static int tdav_producer_audiounit_pause(tmedia_producer_t* self)
+{
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)self;
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ producer->paused = tsk_true;
+ TSK_DEBUG_INFO("AudioUnit producer paused");
+ return 0;
+}
+
+static int tdav_producer_audiounit_stop(tmedia_producer_t* self)
+{
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(!producer->started){
+ TSK_DEBUG_INFO("Not started");
+ return 0;
+ }
+ else {
+ int ret = tdav_audiounit_handle_stop(producer->audioUnitHandle);
+ if(ret){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_stop failed with error code=%d", ret);
+ // do not return even if failed => we MUST stop the thread!
+ }
+#if TARGET_OS_IPHONE
+ //https://devforums.apple.com/thread/118595
+ if(producer->audioUnitHandle){
+ tdav_audiounit_handle_destroy(&producer->audioUnitHandle);
+ }
+#endif
+ }
+ producer->started = tsk_false;
+ TSK_DEBUG_INFO("AudioUnit producer stoppped");
+ return 0;
+}
+
+
+//
+// CoreAudio producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_audiounit_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_audiounit_t *producer = self;
+ if(producer){
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(producer));
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_audiounit_dtor(tsk_object_t * self)
+{
+ tdav_producer_audiounit_t *producer = self;
+ if(producer){
+ // Stop the producer if not done
+ if(producer->started){
+ tdav_producer_audiounit_stop(self);
+ }
+
+ // Free all buffers and dispose the queue
+ if (producer->audioUnitHandle) {
+ tdav_audiounit_handle_destroy(&producer->audioUnitHandle);
+ }
+ TSK_FREE(producer->ring.chunck.buffer);
+ if(producer->ring.buffer){
+ speex_buffer_destroy(producer->ring.buffer);
+ }
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(producer));
+
+ TSK_DEBUG_INFO("*** AudioUnit Producer destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_audiounit_def_s =
+{
+ sizeof(tdav_producer_audiounit_t),
+ tdav_producer_audiounit_ctor,
+ tdav_producer_audiounit_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_audiounit_plugin_def_s =
+{
+ &tdav_producer_audiounit_def_s,
+
+ tmedia_audio,
+ "Apple CoreAudio producer (AudioUnit)",
+
+ tdav_producer_audiounit_set,
+ tdav_producer_audiounit_prepare,
+ tdav_producer_audiounit_start,
+ tdav_producer_audiounit_pause,
+ tdav_producer_audiounit_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_audiounit_plugin_def_t = &tdav_producer_audiounit_plugin_def_s;
+
+
+#endif /* HAVE_COREAUDIO_AUDIO_UNIT */
diff --git a/tinyDAV/src/audio/directsound/tdav_consumer_dsound.c b/tinyDAV/src/audio/directsound/tdav_consumer_dsound.c
new file mode 100644
index 0000000..82e125b
--- /dev/null
+++ b/tinyDAV/src/audio/directsound/tdav_consumer_dsound.c
@@ -0,0 +1,458 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_consumer_dsound.c
+ * @brief Microsoft DirectSound consumer.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ */
+#include "tinydav/audio/directsound/tdav_consumer_dsound.h"
+
+#if HAVE_DSOUND_H
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "dsound.lib")
+// # pragma comment(lib, "dxguid.lib")
+#endif
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <initguid.h>
+#include <dsound.h>
+
+extern void tdav_win32_print_error(const char* func, HRESULT hr);
+
+#if !defined(TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT)
+# define TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT 20
+#endif /* TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT */
+
+typedef struct tdav_consumer_dsound_s
+{
+ TDAV_DECLARE_CONSUMER_AUDIO;
+
+ tsk_bool_t started;
+ tsk_size_t bytes_per_notif_size;
+ uint8_t* bytes_per_notif_ptr;
+ tsk_thread_handle_t* tid[1];
+
+ LPDIRECTSOUND device;
+ LPDIRECTSOUNDBUFFER primaryBuffer;
+ LPDIRECTSOUNDBUFFER secondaryBuffer;
+ HANDLE notifEvents[TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT];
+}
+tdav_consumer_dsound_t;
+
+static _inline int32_t __convert_volume(int32_t volume)
+{
+ static const int32_t __step = (DSBVOLUME_MAX - DSBVOLUME_MIN) / 100;
+ return (volume * __step) + DSBVOLUME_MIN;
+}
+
+static void* TSK_STDCALL _tdav_consumer_dsound_playback_thread(void *param)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)param;
+
+ HRESULT hr;
+ LPVOID lpvAudio1, lpvAudio2;
+ DWORD dwBytesAudio1, dwBytesAudio2, dwEvent;
+ static const DWORD dwWriteCursor = 0;
+ tsk_size_t out_size;
+
+ TSK_DEBUG_INFO("_tdav_consumer_dsound_playback_thread -- START");
+
+ SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
+
+ while (dsound->started) {
+ dwEvent = WaitForMultipleObjects(TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT, dsound->notifEvents, FALSE, INFINITE);
+ if (!dsound->started) {
+ break;
+ }
+
+ // lock
+ hr = IDirectSoundBuffer_Lock(
+ dsound->secondaryBuffer,
+ dwWriteCursor/* Ignored because of DSBLOCK_FROMWRITECURSOR */,
+ (DWORD)dsound->bytes_per_notif_size,
+ &lpvAudio1, &dwBytesAudio1,
+ &lpvAudio2, &dwBytesAudio2,
+ DSBLOCK_FROMWRITECURSOR);
+ if (hr != DS_OK) {
+ tdav_win32_print_error("IDirectSoundBuffer_Lock", hr);
+ goto next;
+ }
+
+ out_size = tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(dsound), dsound->bytes_per_notif_ptr, dsound->bytes_per_notif_size);
+ if (out_size < dsound->bytes_per_notif_size) {
+ // fill with silence
+ memset(&dsound->bytes_per_notif_ptr[out_size], 0, (dsound->bytes_per_notif_size - out_size));
+ }
+ if ((dwBytesAudio1 + dwBytesAudio2) == dsound->bytes_per_notif_size) {
+ memcpy(lpvAudio1, dsound->bytes_per_notif_ptr, dwBytesAudio1);
+ if (lpvAudio2 && dwBytesAudio2) {
+ memcpy(lpvAudio2, &dsound->bytes_per_notif_ptr[dwBytesAudio1], dwBytesAudio2);
+ }
+ }
+ else {
+ TSK_DEBUG_ERROR("Not expected: %d+%d#%d", dwBytesAudio1, dwBytesAudio2, dsound->bytes_per_notif_size);
+ }
+#if 0
+ memset(lpvAudio1, rand(), dwBytesAudio1);
+#endif
+ // unlock
+ if ((hr = IDirectSoundBuffer_Unlock(dsound->secondaryBuffer, lpvAudio1, dwBytesAudio1, lpvAudio2, dwBytesAudio2)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundBuffer_UnLock", hr);
+ goto next;
+ }
+next:
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(dsound));
+ }
+
+ TSK_DEBUG_INFO("_tdav_consumer_dsound_playback_thread -- STOP");
+
+
+ return tsk_null;
+}
+
+static int _tdav_consumer_dsound_unprepare(tdav_consumer_dsound_t *dsound)
+{
+ if(dsound){
+ tsk_size_t i;
+ if(dsound->primaryBuffer){
+ IDirectSoundBuffer_Release(dsound->primaryBuffer);
+ dsound->primaryBuffer = NULL;
+ }
+ if(dsound->secondaryBuffer){
+ IDirectSoundBuffer_Release(dsound->secondaryBuffer);
+ dsound->secondaryBuffer = NULL;
+ }
+ if(dsound->device){
+ IDirectSound_Release(dsound->device);
+ dsound->device = NULL;
+ }
+ for(i = 0; i<sizeof(dsound->notifEvents)/sizeof(dsound->notifEvents[0]); i++){
+ if(dsound->notifEvents[i]){
+ CloseHandle(dsound->notifEvents[i]);
+ dsound->notifEvents[i] = NULL;
+ }
+ }
+ }
+ return 0;
+}
+
+
+
+/* ============ Media Consumer Interface ================= */
+static int tdav_consumer_dsound_set(tmedia_consumer_t* self, const tmedia_param_t* param)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+ int ret = tdav_consumer_audio_set(TDAV_CONSUMER_AUDIO(self), param);
+
+ if(ret == 0){
+ if(dsound->secondaryBuffer && tsk_striequals(param->key, "volume")){
+ if(IDirectSoundBuffer_SetVolume(dsound->secondaryBuffer, __convert_volume(TMEDIA_CONSUMER(self)->audio.volume)) != DS_OK){
+ TSK_DEBUG_ERROR("IDirectSoundBuffer_SetVolume() failed");
+ ret = -1;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int tdav_consumer_dsound_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ HRESULT hr;
+ HWND hWnd;
+
+ WAVEFORMATEX wfx = {0};
+ DSBUFFERDESC dsbd = {0};
+
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+
+ if(!dsound){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(dsound->device || dsound->primaryBuffer || dsound->secondaryBuffer){
+ TSK_DEBUG_ERROR("Consumer already prepared");
+ return -2;
+ }
+
+ TMEDIA_CONSUMER(dsound)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(dsound)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(dsound)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+#if 0
+ TMEDIA_CONSUMER(dsound)->audio.out.rate = 48000;
+ TMEDIA_CONSUMER(dsound)->audio.out.channels = 2;
+#endif
+
+ /* Create sound device */
+ if((hr = DirectSoundCreate(NULL, &dsound->device, NULL) != DS_OK)){
+ tdav_win32_print_error("DirectSoundCreate", hr);
+ return -3;
+ }
+
+ /* Set CooperativeLevel */
+ if((hWnd = GetForegroundWindow()) || (hWnd = GetDesktopWindow()) || (hWnd = GetConsoleWindow())){
+ if((hr = IDirectSound_SetCooperativeLevel(dsound->device, hWnd, DSSCL_PRIORITY)) != DS_OK){
+ tdav_win32_print_error("IDirectSound_SetCooperativeLevel", hr);
+ return -2;
+ }
+ }
+
+ /* Creates the primary buffer and apply format */
+ wfx.wFormatTag = WAVE_FORMAT_PCM;
+ wfx.nChannels = TMEDIA_CONSUMER(dsound)->audio.out.channels ? TMEDIA_CONSUMER(dsound)->audio.out.channels : TMEDIA_CONSUMER(dsound)->audio.in.channels;
+ wfx.nSamplesPerSec = TMEDIA_CONSUMER(dsound)->audio.out.rate ? TMEDIA_CONSUMER(dsound)->audio.out.rate : TMEDIA_CONSUMER(dsound)->audio.in.rate;
+ wfx.wBitsPerSample = TMEDIA_CONSUMER(dsound)->audio.bits_per_sample;
+ wfx.nBlockAlign = (wfx.nChannels * wfx.wBitsPerSample/8);
+ wfx.nAvgBytesPerSec = (wfx.nSamplesPerSec * wfx.nBlockAlign);
+
+ /* Average bytes (count) for each notification */
+ dsound->bytes_per_notif_size = ((wfx.nAvgBytesPerSec * TMEDIA_CONSUMER(dsound)->audio.ptime)/1000);
+ if(!(dsound->bytes_per_notif_ptr = tsk_realloc(dsound->bytes_per_notif_ptr, dsound->bytes_per_notif_size))){
+ TSK_DEBUG_ERROR("Failed to allocate buffer with size = %u", dsound->bytes_per_notif_size);
+ return -3;
+ }
+
+ dsbd.dwSize = sizeof(DSBUFFERDESC);
+ dsbd.dwFlags = DSBCAPS_PRIMARYBUFFER;
+ dsbd.dwBufferBytes = 0;
+ dsbd.lpwfxFormat = NULL;
+
+ if((hr = IDirectSound_CreateSoundBuffer(dsound->device, &dsbd, &dsound->primaryBuffer, NULL)) != DS_OK){
+ tdav_win32_print_error("IDirectSound_CreateSoundBuffer", hr);
+ return -4;
+ }
+ if((hr = IDirectSoundBuffer_SetFormat(dsound->primaryBuffer, &wfx)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_SetFormat", hr);
+ return -5;
+ }
+
+ /* Creates the secondary buffer and apply format */
+ dsbd.dwFlags = (DSBCAPS_CTRLPOSITIONNOTIFY | DSBCAPS_GLOBALFOCUS | DSBCAPS_CTRLVOLUME);
+ dsbd.dwBufferBytes = (DWORD)(TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT * dsound->bytes_per_notif_size);
+ dsbd.lpwfxFormat = &wfx;
+
+ if((hr = IDirectSound_CreateSoundBuffer(dsound->device, &dsbd, &dsound->secondaryBuffer, NULL)) != DS_OK){
+ tdav_win32_print_error("IDirectSound_CreateSoundBuffer", hr);
+ return -6;
+ }
+
+ /* Set Volume */
+ if(IDirectSoundBuffer_SetVolume(dsound->secondaryBuffer, __convert_volume(TMEDIA_CONSUMER(self)->audio.volume)) != DS_OK){
+ TSK_DEBUG_ERROR("IDirectSoundBuffer_SetVolume() failed");
+ }
+
+ return 0;
+}
+
+static int tdav_consumer_dsound_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+
+ tsk_size_t i;
+ HRESULT hr;
+ LPDIRECTSOUNDNOTIFY lpDSBNotify;
+ DSBPOSITIONNOTIFY pPosNotify[TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT] = {0};
+
+ static DWORD dwMajorVersion = -1;
+
+ // Get OS version
+ if(dwMajorVersion == -1){
+ OSVERSIONINFO osvi;
+ ZeroMemory(&osvi, sizeof(OSVERSIONINFO));
+ osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+ GetVersionEx(&osvi);
+ dwMajorVersion = osvi.dwMajorVersion;
+ }
+
+ if(!dsound){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!dsound->device || !dsound->primaryBuffer || !dsound->secondaryBuffer){
+ TSK_DEBUG_ERROR("Consumer not prepared");
+ return -2;
+ }
+
+ if(dsound->started){
+ return 0;
+ }
+
+ if((hr = IDirectSoundBuffer_QueryInterface(dsound->secondaryBuffer, &IID_IDirectSoundNotify, (LPVOID*)&lpDSBNotify)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_QueryInterface", hr);
+ return -3;
+ }
+
+ /* Events associated to notification points */
+ for(i = 0; i<TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT; i++){
+ dsound->notifEvents[i] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ // set notification point offset at the start of the buffer for Windows Vista and later and at the half of the buffer of XP and before
+ pPosNotify[i].dwOffset = (DWORD)((dsound->bytes_per_notif_size * i) + (dwMajorVersion > 5 ? (dsound->bytes_per_notif_size >> 1) : 1));
+ pPosNotify[i].hEventNotify = dsound->notifEvents[i];
+ }
+ if((hr = IDirectSoundNotify_SetNotificationPositions(lpDSBNotify, TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT, pPosNotify)) != DS_OK){
+ IDirectSoundNotify_Release(lpDSBNotify);
+ tdav_win32_print_error("IDirectSoundBuffer_QueryInterface", hr);
+ return -4;
+ }
+
+ if((hr = IDirectSoundNotify_Release(lpDSBNotify))){
+ tdav_win32_print_error("IDirectSoundNotify_Release", hr);
+ }
+
+ /* Start the buffer */
+ if((hr = IDirectSoundBuffer_Play(dsound->secondaryBuffer, 0, 0, DSBPLAY_LOOPING)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundNotify_Release", hr);
+ return -5;
+ }
+
+ /* start the reader thread */
+ dsound->started = tsk_true;
+ tsk_thread_create(&dsound->tid[0], _tdav_consumer_dsound_playback_thread, dsound);
+
+ return 0;
+}
+
+static int tdav_consumer_dsound_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+
+ if(!dsound || !buffer || !size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* buffer is already decoded */
+ return tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(dsound), buffer, size, proto_hdr);
+}
+
+static int tdav_consumer_dsound_pause(tmedia_consumer_t* self)
+{
+ return 0;
+}
+
+static int tdav_consumer_dsound_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+
+ HRESULT hr;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!dsound->started){
+ return 0;
+ }
+
+ /* should be done here */
+ dsound->started = tsk_false;
+
+ /* stop thread */
+ if(dsound->tid[0]){
+ tsk_thread_join(&(dsound->tid[0]));
+ }
+
+ if((hr = IDirectSoundBuffer_Stop(dsound->secondaryBuffer)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_Stop", hr);
+ }
+ if((hr = IDirectSoundBuffer_SetCurrentPosition(dsound->secondaryBuffer, 0)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_SetCurrentPosition", hr);
+ }
+
+ // unprepare
+ // will be prepared again before calling next start()
+ _tdav_consumer_dsound_unprepare(dsound);
+
+ return 0;
+}
+
+
+//
+// WaveAPI consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_dsound_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_dsound_t *consumer = self;
+ if(consumer){
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(consumer));
+ /* init self */
+
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_dsound_dtor(tsk_object_t * self)
+{
+ tdav_consumer_dsound_t *dsound = self;
+ if(dsound){
+ /* stop */
+ if(dsound->started){
+ tdav_consumer_dsound_stop(self);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(dsound));
+ /* deinit self */
+ _tdav_consumer_dsound_unprepare(dsound);
+ TSK_FREE(dsound->bytes_per_notif_ptr);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_dsound_def_s =
+{
+ sizeof(tdav_consumer_dsound_t),
+ tdav_consumer_dsound_ctor,
+ tdav_consumer_dsound_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_dsound_plugin_def_s =
+{
+ &tdav_consumer_dsound_def_s,
+
+ tmedia_audio,
+ "Microsoft DirectSound consumer",
+
+ tdav_consumer_dsound_set,
+ tdav_consumer_dsound_prepare,
+ tdav_consumer_dsound_start,
+ tdav_consumer_dsound_consume,
+ tdav_consumer_dsound_pause,
+ tdav_consumer_dsound_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_dsound_plugin_def_t = &tdav_consumer_dsound_plugin_def_s;
+
+
+#endif /* HAVE_DSOUND_H */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/directsound/tdav_producer_dsound.c b/tinyDAV/src/audio/directsound/tdav_producer_dsound.c
new file mode 100644
index 0000000..c5ae167
--- /dev/null
+++ b/tinyDAV/src/audio/directsound/tdav_producer_dsound.c
@@ -0,0 +1,402 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_producer_dsound.c
+ * @brief Microsoft DirectSound producer.
+ *
+ */
+#include "tinydav/audio/directsound/tdav_producer_dsound.h"
+
+#if HAVE_DSOUND_H
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "dsound.lib")
+// # pragma comment(lib, "dxguid.lib")
+#endif
+
+#if !defined(SEND_SILENCE_ON_MUTE)
+# if METROPOLIS
+# define SEND_SILENCE_ON_MUTE 1
+# else
+# define SEND_SILENCE_ON_MUTE 0
+# endif
+#endif /* SEND_SILENCE_ON_MUTE */
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <initguid.h>
+#include <dsound.h>
+
+extern void tdav_win32_print_error(const char* func, HRESULT hr);
+
+#if !defined(TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT)
+# define TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT 10
+#endif /* TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT */
+
+typedef struct tdav_producer_dsound_s
+{
+ TDAV_DECLARE_PRODUCER_AUDIO;
+
+ tsk_bool_t started;
+ tsk_bool_t mute;
+ tsk_size_t bytes_per_notif_size;
+ tsk_thread_handle_t* tid[1];
+
+ LPDIRECTSOUNDCAPTURE device;
+ LPDIRECTSOUNDCAPTUREBUFFER captureBuffer;
+ HANDLE notifEvents[TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT];
+}
+tdav_producer_dsound_t;
+
+static void* TSK_STDCALL _tdav_producer_dsound_record_thread(void *param)
+{
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)param;
+
+ HRESULT hr;
+ LPVOID lpvAudio1, lpvAudio2;
+ DWORD dwBytesAudio1, dwBytesAudio2, dwEvent, dwIndex;
+
+ TSK_DEBUG_INFO("_tdav_producer_dsound_record_thread -- START");
+
+ SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_TIME_CRITICAL);
+
+ while (dsound->started) {
+ dwEvent = WaitForMultipleObjects(TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT, dsound->notifEvents, FALSE, INFINITE);
+ if (!dsound->started) {
+ break;
+ }
+ if (dwEvent < WAIT_OBJECT_0 || dwEvent >(WAIT_OBJECT_0 + TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT)) {
+ TSK_DEBUG_ERROR("Invalid dwEvent(%d)", dwEvent);
+ break;
+ }
+ dwIndex = (dwEvent - WAIT_OBJECT_0);
+
+ // lock
+ if ((hr = IDirectSoundCaptureBuffer_Lock(dsound->captureBuffer, (DWORD)(dwIndex * dsound->bytes_per_notif_size), (DWORD)dsound->bytes_per_notif_size, &lpvAudio1, &dwBytesAudio1, &lpvAudio2, &dwBytesAudio2, 0)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_Lock", hr);
+ continue;
+ }
+
+ if (TMEDIA_PRODUCER(dsound)->enc_cb.callback) {
+#if SEND_SILENCE_ON_MUTE
+ if (dsound->mute) {
+ memset(lpvAudio1, 0, dwBytesAudio1);
+ if(lpvAudio2){
+ memset(lpvAudio2, 0, dwBytesAudio2);
+ }
+ }
+#endif
+ TMEDIA_PRODUCER(dsound)->enc_cb.callback(TMEDIA_PRODUCER(dsound)->enc_cb.callback_data, lpvAudio1, dwBytesAudio1);
+ if (lpvAudio2) {
+ TMEDIA_PRODUCER(dsound)->enc_cb.callback(TMEDIA_PRODUCER(dsound)->enc_cb.callback_data, lpvAudio2, dwBytesAudio2);
+ }
+ }
+
+ // unlock
+ if ((hr = IDirectSoundCaptureBuffer_Unlock(dsound->captureBuffer, lpvAudio1, dwBytesAudio1, lpvAudio2, dwBytesAudio2)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_Unlock", hr);
+ continue;
+ }
+ }
+
+ TSK_DEBUG_INFO("_tdav_producer_dsound_record_thread -- STOP");
+
+
+ return tsk_null;
+}
+
+static int _tdav_producer_dsound_unprepare(tdav_producer_dsound_t* dsound)
+{
+ if (dsound) {
+ tsk_size_t i;
+ if (dsound->captureBuffer) {
+ IDirectSoundCaptureBuffer_Release(dsound->captureBuffer);
+ dsound->captureBuffer = NULL;
+ }
+ if (dsound->device) {
+ IDirectSoundCapture_Release(dsound->device);
+ dsound->device = NULL;
+ }
+ for (i = 0; i < (sizeof(dsound->notifEvents) / sizeof(dsound->notifEvents[0])); i++){
+ if (dsound->notifEvents[i]) {
+ CloseHandle(dsound->notifEvents[i]);
+ dsound->notifEvents[i] = NULL;
+ }
+ }
+ }
+ return 0;
+}
+
+
+
+
+/* ============ Media Producer Interface ================= */
+static int tdav_producer_dsound_set(tmedia_producer_t* self, const tmedia_param_t* param)
+{
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)self;
+ if (param->plugin_type == tmedia_ppt_producer) {
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "volume")) {
+ return 0;
+ }
+ else if (tsk_striequals(param->key, "mute")) {
+ dsound->mute = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+#if !SEND_SILENCE_ON_MUTE
+ if (dsound->started) {
+ if (dsound->mute) {
+ IDirectSoundCaptureBuffer_Stop(dsound->captureBuffer);
+ }
+ else {
+ IDirectSoundCaptureBuffer_Start(dsound->captureBuffer, DSBPLAY_LOOPING);
+ }
+ }
+#endif
+ return 0;
+ }
+ }
+ }
+ return tdav_producer_audio_set(TDAV_PRODUCER_AUDIO(self), param);
+}
+static int tdav_producer_dsound_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ HRESULT hr;
+
+ WAVEFORMATEX wfx = { 0 };
+ DSCBUFFERDESC dsbd = { 0 };
+
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)self;
+
+ if (!dsound || !codec) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (dsound->device || dsound->captureBuffer) {
+ TSK_DEBUG_ERROR("Producer already prepared");
+ return -2;
+ }
+
+ TMEDIA_PRODUCER(dsound)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(dsound)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER(dsound)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+
+#if 0
+ TMEDIA_PRODUCER(dsound)->audio.rate = 48000;
+ TMEDIA_PRODUCER(dsound)->audio.channels = 1;
+#endif
+
+ /* Create capture device */
+ if ((hr = DirectSoundCaptureCreate(NULL, &dsound->device, NULL) != DS_OK)) {
+ tdav_win32_print_error("DirectSoundCaptureCreate", hr);
+ return -3;
+ }
+
+ /* Creates the capture buffer */
+ wfx.wFormatTag = WAVE_FORMAT_PCM;
+ wfx.nChannels = TMEDIA_PRODUCER(dsound)->audio.channels;
+ wfx.nSamplesPerSec = TMEDIA_PRODUCER(dsound)->audio.rate;
+ wfx.wBitsPerSample = TMEDIA_PRODUCER(dsound)->audio.bits_per_sample;
+ wfx.nBlockAlign = (wfx.nChannels * wfx.wBitsPerSample / 8);
+ wfx.nAvgBytesPerSec = (wfx.nSamplesPerSec * wfx.nBlockAlign);
+
+ /* Average bytes (count) for each notification */
+ dsound->bytes_per_notif_size = ((wfx.nAvgBytesPerSec * TMEDIA_PRODUCER(dsound)->audio.ptime) / 1000);
+
+ dsbd.dwSize = sizeof(DSCBUFFERDESC);
+ dsbd.dwBufferBytes = (DWORD)(TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT * dsound->bytes_per_notif_size);
+ dsbd.lpwfxFormat = &wfx;
+
+ if ((hr = IDirectSoundCapture_CreateCaptureBuffer(dsound->device, &dsbd, &dsound->captureBuffer, NULL)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCapture_CreateCaptureBuffer", hr);
+ return -4;
+ }
+
+ return 0;
+}
+
+static int tdav_producer_dsound_start(tmedia_producer_t* self)
+{
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)self;
+
+ tsk_size_t i;
+ DWORD dwOffset;
+ HRESULT hr;
+ LPDIRECTSOUNDNOTIFY lpDSBNotify;
+ DSBPOSITIONNOTIFY pPosNotify[TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT] = { 0 };
+
+ if (!dsound) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (!dsound->device || !dsound->captureBuffer) {
+ TSK_DEBUG_ERROR("Producer not prepared");
+ return -2;
+ }
+
+ if (dsound->started) {
+ return 0;
+ }
+
+ if ((hr = IDirectSoundCaptureBuffer_QueryInterface(dsound->captureBuffer, &IID_IDirectSoundNotify, (LPVOID*)&lpDSBNotify)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_QueryInterface", hr);
+ return -3;
+ }
+
+ /* Events associated to notification points */
+ dwOffset = (DWORD)(dsound->bytes_per_notif_size - 1);
+ for (i = 0; i < (sizeof(dsound->notifEvents) / sizeof(dsound->notifEvents[0])); i++){
+ dsound->notifEvents[i] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ pPosNotify[i].dwOffset = dwOffset;
+ pPosNotify[i].hEventNotify = dsound->notifEvents[i];
+ dwOffset += (DWORD)dsound->bytes_per_notif_size;
+ }
+ if ((hr = IDirectSoundNotify_SetNotificationPositions(lpDSBNotify, TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT, pPosNotify)) != DS_OK) {
+ IDirectSoundNotify_Release(lpDSBNotify);
+ tdav_win32_print_error("IDirectSoundBuffer_QueryInterface", hr);
+ return -4;
+ }
+
+ if ((hr = IDirectSoundNotify_Release(lpDSBNotify))) {
+ tdav_win32_print_error("IDirectSoundNotify_Release", hr);
+ }
+
+ /* Start the buffer */
+ if ((hr = IDirectSoundCaptureBuffer_Start(dsound->captureBuffer, DSBPLAY_LOOPING)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_Start", hr);
+ return -5;
+ }
+
+ /* start the reader thread */
+ dsound->started = tsk_true;
+ tsk_thread_create(&dsound->tid[0], _tdav_producer_dsound_record_thread, dsound);
+
+ return 0;
+}
+
+static int tdav_producer_dsound_pause(tmedia_producer_t* self)
+{
+ return 0;
+}
+
+static int tdav_producer_dsound_stop(tmedia_producer_t* self)
+{
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)self;
+
+ HRESULT hr;
+
+ if (!self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (!dsound->started) {
+ return 0;
+ }
+
+ // should be done here
+ dsound->started = tsk_false;
+
+#if !SEND_SILENCE_ON_MUTE
+ if (dsound->mute && dsound->notifEvents[0]) {
+ // thread is paused -> raise event now that "started" is equal to false
+ SetEvent(dsound->notifEvents[0]);
+ }
+#endif
+
+ // stop thread
+ if (dsound->tid[0]) {
+ tsk_thread_join(&(dsound->tid[0]));
+ }
+
+ if ((hr = IDirectSoundCaptureBuffer_Stop(dsound->captureBuffer)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_Stop", hr);
+ }
+
+ // unprepare
+ // will be prepared again before next start()
+ _tdav_producer_dsound_unprepare(dsound);
+
+ return 0;
+}
+
+
+//
+// WaveAPI producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_dsound_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_dsound_t *producer = self;
+ if (producer) {
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(producer));
+ /* init self */
+
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_dsound_dtor(tsk_object_t * self)
+{
+ tdav_producer_dsound_t *dsound = self;
+ if (dsound) {
+ /* stop */
+ if (dsound->started) {
+ tdav_producer_dsound_stop(self);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(dsound));
+ /* deinit self */
+ _tdav_producer_dsound_unprepare(dsound);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_dsound_def_s =
+{
+ sizeof(tdav_producer_dsound_t),
+ tdav_producer_dsound_ctor,
+ tdav_producer_dsound_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_dsound_plugin_def_s =
+{
+ &tdav_producer_dsound_def_s,
+
+ tmedia_audio,
+ "Microsoft DirectSound producer",
+
+ tdav_producer_dsound_set,
+ tdav_producer_dsound_prepare,
+ tdav_producer_dsound_start,
+ tdav_producer_dsound_pause,
+ tdav_producer_dsound_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_dsound_plugin_def_t = &tdav_producer_dsound_plugin_def_s;
+
+
+#endif /* HAVE_DSOUND_H */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/oss/tdav_consumer_oss.c b/tinyDAV/src/audio/oss/tdav_consumer_oss.c
new file mode 100644
index 0000000..0370210
--- /dev/null
+++ b/tinyDAV/src/audio/oss/tdav_consumer_oss.c
@@ -0,0 +1,397 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/audio/oss/tdav_consumer_oss.h"
+
+#if HAVE_LINUX_SOUNDCARD_H
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_safeobj.h"
+#include "tsk_debug.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+#include <linux/soundcard.h>
+
+#define OSS_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[OSS Consumer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[OSS Consumer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[OSS Consumer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[OSS Consumer] " FMT, ##__VA_ARGS__)
+
+typedef struct tdav_consumer_oss_s
+{
+ TDAV_DECLARE_CONSUMER_AUDIO;
+
+ tsk_bool_t b_started;
+ tsk_bool_t b_prepared;
+ tsk_bool_t b_muted;
+ int n_bits_per_sample;
+
+ int fd;
+ tsk_thread_handle_t* tid[1];
+
+ tsk_size_t n_buff_size_in_bytes;
+ tsk_size_t n_buff_size_in_samples;
+ uint8_t* p_buff_ptr;
+
+ tsk_size_t n_buff16_size_in_bytes;
+ tsk_size_t n_buff16_size_in_samples;
+ uint16_t* p_buff16_ptr;
+
+ TSK_DECLARE_SAFEOBJ;
+}
+tdav_consumer_oss_t;
+
+static int __oss_from_16bits_to_8bits(const void* p_src, void* p_dst, tsk_size_t n_samples)
+{
+ tsk_size_t i;
+ uint16_t *_p_src = (uint16_t*)p_src;
+ uint8_t *_p_dst = (uint8_t*)p_dst;
+
+ if (!p_src || !p_dst || !n_samples) {
+ OSS_DEBUG_ERROR("invalid parameter");
+ return -1;
+ }
+ for (i = 0; i < n_samples; ++i) {
+ _p_dst[i] = _p_src[i];
+ }
+ return 0;
+}
+
+static void* TSK_STDCALL _tdav_consumer_oss_playback_thread(void *param)
+{
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)param;
+ int err;
+ void* p_buffer = ((p_oss->n_bits_per_sample == 8) ? (void*)p_oss->p_buff16_ptr: (void*)p_oss->p_buff_ptr);
+ tsk_size_t n_buffer_in_bytes = (p_oss->n_bits_per_sample == 8) ? p_oss->n_buff16_size_in_bytes : p_oss->n_buff_size_in_bytes;
+ tsk_size_t n_buffer_in_samples = p_oss->n_buff_size_in_samples;
+
+ const void* _p_buffer;
+ tsk_size_t _n_buffer_in_bytes;
+
+ OSS_DEBUG_INFO("__playback_thread -- START");
+
+ tsk_thread_set_priority_2(TSK_THREAD_PRIORITY_TIME_CRITICAL);
+
+ while (p_oss->b_started) {
+ tsk_safeobj_lock(p_oss);
+ err = tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(p_oss), p_buffer, n_buffer_in_bytes); // requires 16bits, thread-safe
+ if (err >= 0) {
+ _p_buffer = p_buffer;
+ _n_buffer_in_bytes = n_buffer_in_bytes;
+ if (err < n_buffer_in_bytes) {
+ memset(((uint8_t*)p_buffer) + err, 0, (n_buffer_in_bytes - err));
+ }
+ if (p_oss->n_bits_per_sample == 8) {
+ __oss_from_16bits_to_8bits(p_buffer, p_oss->p_buff_ptr, n_buffer_in_samples);
+ _p_buffer = p_oss->p_buff_ptr;
+ _n_buffer_in_bytes >>= 1;
+ }
+ if ((err = write(p_oss->fd, _p_buffer, _n_buffer_in_bytes)) != _n_buffer_in_bytes) {
+ OSS_DEBUG_ERROR ("Failed to read data from audio interface failed (%d -> %s)", err , strerror(errno));
+ tsk_safeobj_unlock(p_oss);
+ goto bail;
+ }
+ }
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(p_oss));
+
+ tsk_safeobj_unlock(p_oss);
+ }
+bail:
+ OSS_DEBUG_INFO("__playback_thread -- STOP");
+ return tsk_null;
+}
+
+/* ============ Media Consumer Interface ================= */
+static int tdav_consumer_oss_set(tmedia_consumer_t* self, const tmedia_param_t* param)
+{
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)self;
+ int ret = 0;
+
+ ret = tdav_consumer_audio_set(TDAV_CONSUMER_AUDIO(self), param);
+
+ return ret;
+}
+
+static int tdav_consumer_oss_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)self;
+ int err = 0, channels, sample_rate, bits_per_sample;
+
+ if (!p_oss || !codec && codec->plugin) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ if (p_oss->fd == -1) {
+ if ((p_oss->fd = open("/dev/dsp", O_WRONLY)) < 0) {
+ OSS_DEBUG_ERROR("open('/dev/dsp') failed: %s", strerror(errno));
+ err = -2;
+ goto bail;
+ }
+ }
+
+ TMEDIA_CONSUMER(p_oss)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(p_oss)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(p_oss)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+ // Set using requested
+ channels = TMEDIA_CONSUMER(p_oss)->audio.in.channels;
+ sample_rate = TMEDIA_CONSUMER(p_oss)->audio.in.rate;
+ bits_per_sample = TMEDIA_CONSUMER(p_oss)->audio.bits_per_sample; // 16
+
+ // Prepare
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_BITS, &bits_per_sample)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_BITS, %d) failed: %d->%s", bits_per_sample, err, strerror(errno));
+ goto bail;
+ }
+ if (bits_per_sample != 16 && bits_per_sample != 8) {
+ OSS_DEBUG_ERROR("bits_per_sample=%d not supported", bits_per_sample);
+ err = -3;
+ goto bail;
+ }
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_CHANNELS, &channels)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_CHANNELS, %d) failed: %d->%s", channels, err, strerror(errno));
+ goto bail;
+ }
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_RATE, &sample_rate)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_RATE, %d) failed: %d->%s", sample_rate, err, strerror(errno));
+ goto bail;
+ }
+
+ p_oss->n_buff_size_in_bytes = (TMEDIA_CONSUMER(p_oss)->audio.ptime * sample_rate * ((bits_per_sample >> 3) * channels)) / 1000;
+ if (!(p_oss->p_buff_ptr = tsk_realloc(p_oss->p_buff_ptr, p_oss->n_buff_size_in_bytes))) {
+ OSS_DEBUG_ERROR("Failed to allocate buffer with size = %u", p_oss->n_buff_size_in_bytes);
+ err = -4;
+ goto bail;
+ }
+ p_oss->n_buff_size_in_samples = (p_oss->n_buff_size_in_bytes / (bits_per_sample >> 3));
+ if (bits_per_sample == 8) {
+ p_oss->n_buff16_size_in_bytes = p_oss->n_buff_size_in_bytes << 1;
+ if (!(p_oss->p_buff16_ptr = tsk_realloc(p_oss->p_buff16_ptr, p_oss->n_buff16_size_in_bytes))) {
+ OSS_DEBUG_ERROR("Failed to allocate buffer with size = %u", p_oss->n_buff_size_in_bytes);
+ err = -5;
+ goto bail;
+ }
+ p_oss->n_buff16_size_in_samples = p_oss->n_buff_size_in_samples;
+ }
+
+ OSS_DEBUG_INFO("prepared: req_bits_per_sample=%d; req_channels=%d; req_rate=%d, resp_bits_per_sample=%d; resp_channels=%d; resp_rate=%d /// n_buff_size_in_samples=%u;n_buff_size_in_bytes=%u",
+ TMEDIA_CONSUMER(p_oss)->audio.bits_per_sample, TMEDIA_CONSUMER(p_oss)->audio.in.channels, TMEDIA_CONSUMER(p_oss)->audio.in.rate,
+ bits_per_sample, channels, sample_rate,
+ p_oss->n_buff_size_in_samples, p_oss->n_buff_size_in_bytes);
+
+ // Set using supported (up to the resampler to convert to requested)
+ TMEDIA_CONSUMER(p_oss)->audio.out.channels = channels;
+ TMEDIA_CONSUMER(p_oss)->audio.out.rate = sample_rate;
+ // TMEDIA_CONSUMER(p_oss)->audio.bits_per_sample = bits_per_sample;
+
+ p_oss->n_bits_per_sample = bits_per_sample;
+ p_oss->b_prepared = tsk_true;
+
+bail:
+ if (err) {
+ if (p_oss->fd != -1) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ }
+ tsk_safeobj_unlock(p_oss);
+
+ return err;
+}
+
+static int tdav_consumer_oss_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)self;
+ int err = 0;
+
+ if (! p_oss) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ if (!p_oss->b_prepared) {
+ OSS_DEBUG_WARN("Not prepared");
+ err = -2;
+ goto bail;
+ }
+
+ if (p_oss->b_started) {
+ OSS_DEBUG_WARN("Already started");
+ goto bail;
+ }
+
+ /* start thread */
+ p_oss->b_started = tsk_true;
+ tsk_thread_create(&p_oss->tid[0], _tdav_consumer_oss_playback_thread, p_oss);
+
+ OSS_DEBUG_INFO("started");
+
+bail:
+ tsk_safeobj_unlock(p_oss);
+ return err;
+}
+
+static int tdav_consumer_oss_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ int err = 0;
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)self;
+
+ if (!p_oss || !buffer || !size) {
+ OSS_DEBUG_ERROR("Invalid paramter");
+ return -1;
+ }
+
+ //tsk_safeobj_lock(p_oss);
+
+ if (!p_oss->b_started) {
+ OSS_DEBUG_WARN("Not started");
+ err = -2;
+ goto bail;
+ }
+ if ((err = tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(p_oss), buffer, size, proto_hdr))/*thread-safe*/) {
+ OSS_DEBUG_WARN("Failed to put audio data to the jitter buffer");
+ goto bail;
+ }
+
+bail:
+ //tsk_safeobj_unlock(p_oss);
+ return err;
+}
+
+static int tdav_consumer_oss_pause(tmedia_consumer_t* self)
+{
+ return 0;
+}
+
+static int tdav_consumer_oss_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)self;
+ int err;
+
+ if (!p_oss) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ /* should be done here */
+ p_oss->b_started = tsk_false;
+
+ /* stop thread */
+ if (p_oss->tid[0]) {
+ tsk_thread_join(&(p_oss->tid[0]));
+ }
+ if (p_oss->fd != -1) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ p_oss->b_prepared = tsk_false;
+
+ OSS_DEBUG_INFO("stopped");
+
+ tsk_safeobj_unlock(p_oss);
+
+ return 0;
+}
+
+
+//
+// Linux OSS consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_oss_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_oss_t *p_oss = self;
+ if (p_oss) {
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(p_oss));
+ /* init self */
+
+ p_oss->fd = -1;
+ tsk_safeobj_init(p_oss);
+
+ OSS_DEBUG_INFO("created");
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_oss_dtor(tsk_object_t * self)
+{
+ tdav_consumer_oss_t *p_oss = self;
+ if (p_oss) {
+
+ /* stop */
+ if (p_oss->b_started) {
+ tdav_consumer_oss_stop(self);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(p_oss));
+ /* deinit self */
+ if (p_oss->fd > 0) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ TSK_FREE(p_oss->p_buff_ptr);
+ TSK_FREE(p_oss->p_buff16_ptr);
+ tsk_safeobj_deinit(p_oss);
+
+ OSS_DEBUG_INFO("*** destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_oss_def_s =
+{
+ sizeof(tdav_consumer_oss_t),
+ tdav_consumer_oss_ctor,
+ tdav_consumer_oss_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_oss_plugin_def_s =
+{
+ &tdav_consumer_oss_def_s,
+
+ tmedia_audio,
+ "Linux OSS consumer",
+
+ tdav_consumer_oss_set,
+ tdav_consumer_oss_prepare,
+ tdav_consumer_oss_start,
+ tdav_consumer_oss_consume,
+ tdav_consumer_oss_pause,
+ tdav_consumer_oss_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_oss_plugin_def_t = &tdav_consumer_oss_plugin_def_s;
+
+#endif /* HAVE_LINUX_SOUNDCARD_H */
diff --git a/tinyDAV/src/audio/oss/tdav_producer_oss.c b/tinyDAV/src/audio/oss/tdav_producer_oss.c
new file mode 100644
index 0000000..d61fb96
--- /dev/null
+++ b/tinyDAV/src/audio/oss/tdav_producer_oss.c
@@ -0,0 +1,369 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/audio/oss/tdav_producer_oss.h"
+
+#if HAVE_LINUX_SOUNDCARD_H
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_safeobj.h"
+#include "tsk_debug.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+#include <linux/soundcard.h>
+
+#define OSS_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[OSS Producer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[OSS Producer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[OSS Producer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[OSS Producer] " FMT, ##__VA_ARGS__)
+
+typedef struct tdav_producer_oss_s
+{
+ TDAV_DECLARE_PRODUCER_AUDIO;
+
+ tsk_bool_t b_started;
+ tsk_bool_t b_prepared;
+ tsk_bool_t b_muted;
+ int n_bits_per_sample;
+
+ int fd;
+ tsk_thread_handle_t* tid[1];
+
+ tsk_size_t n_buff_size_in_bytes;
+ tsk_size_t n_buff_size_in_samples;
+ uint8_t* p_buff_ptr;
+
+ tsk_size_t n_buff16_size_in_bytes;
+ tsk_size_t n_buff16_size_in_samples;
+ uint16_t* p_buff16_ptr;
+
+ TSK_DECLARE_SAFEOBJ;
+}
+tdav_producer_oss_t;
+
+static int __oss_from_8bits_to_16bits(const void* p_src, void* p_dst, tsk_size_t n_samples)
+{
+ tsk_size_t i;
+ const uint8_t *_p_src = (const uint8_t*)p_src;
+ uint16_t *_p_dst = (uint16_t*)p_dst;
+
+ if (!p_src || !p_dst || !n_samples) {
+ OSS_DEBUG_ERROR("invalid parameter");
+ return -1;
+ }
+ for (i = 0; i < n_samples; ++i) {
+ _p_dst[i] = _p_src[i];
+ }
+ return 0;
+}
+
+static void* TSK_STDCALL _tdav_producer_oss_record_thread(void *param)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)param;
+ int err;
+ const void* p_buffer = ((p_oss->n_bits_per_sample == 8) ? (const void*)p_oss->p_buff16_ptr: (const void*)p_oss->p_buff_ptr);
+ tsk_size_t n_buffer_in_bytes = (p_oss->n_bits_per_sample == 8) ? p_oss->n_buff16_size_in_bytes : p_oss->n_buff_size_in_bytes;
+
+ OSS_DEBUG_INFO("__record_thread -- START");
+
+ tsk_thread_set_priority_2(TSK_THREAD_PRIORITY_TIME_CRITICAL);
+
+ while (p_oss->b_started) {
+ tsk_safeobj_lock(p_oss);
+ if ((err = read(p_oss->fd, p_oss->p_buff_ptr, p_oss->n_buff_size_in_bytes)) != p_oss->n_buff_size_in_bytes) {
+ OSS_DEBUG_ERROR ("Failed to read data from audio interface failed (%d -> %s)", err , strerror(errno));
+ tsk_safeobj_unlock(p_oss);
+ goto bail;
+ }
+ if (p_oss->n_bits_per_sample == 8) {
+ if ((err = __oss_from_8bits_to_16bits(p_oss->p_buff_ptr, p_oss->p_buff16_ptr, p_oss->n_buff_size_in_samples))) {
+ tsk_safeobj_unlock(p_oss);
+ goto bail;
+ }
+ }
+ if (!p_oss->b_muted && TMEDIA_PRODUCER(p_oss)->enc_cb.callback) {
+ TMEDIA_PRODUCER(p_oss)->enc_cb.callback(TMEDIA_PRODUCER(p_oss)->enc_cb.callback_data, p_buffer, n_buffer_in_bytes);
+ }
+ tsk_safeobj_unlock(p_oss);
+ }
+bail:
+ OSS_DEBUG_INFO("__record_thread -- STOP");
+ return tsk_null;
+}
+
+
+/* ============ Media Producer Interface ================= */
+static int tdav_producer_oss_set(tmedia_producer_t* self, const tmedia_param_t* param)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)self;
+ if (param->plugin_type == tmedia_ppt_producer) {
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "volume")) {
+ return 0;
+ }
+ else if(tsk_striequals(param->key, "mute")){
+ p_oss->b_muted = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+ return 0;
+ }
+ }
+ }
+ return tdav_producer_audio_set(TDAV_PRODUCER_AUDIO(self), param);
+}
+
+static int tdav_producer_oss_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)self;
+ int err = 0, channels, sample_rate, bits_per_sample;
+
+ if (!p_oss || !codec && codec->plugin) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ if (p_oss->fd == -1) {
+ if ((p_oss->fd = open("/dev/dsp", O_RDONLY)) < 0) {
+ OSS_DEBUG_ERROR("open('/dev/dsp') failed: %s", strerror(errno));
+ err = -2;
+ goto bail;
+ }
+ }
+
+ // Set using requested
+ channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ sample_rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ bits_per_sample = TMEDIA_PRODUCER(p_oss)->audio.bits_per_sample; // 16
+
+ // Prepare
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_BITS, &bits_per_sample)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_BITS, %d) failed: %d->%s", bits_per_sample, err, strerror(errno));
+ goto bail;
+ }
+ if (bits_per_sample != 16 && bits_per_sample != 8) {
+ OSS_DEBUG_ERROR("bits_per_sample=%d not supported", bits_per_sample);
+ err = -3;
+ goto bail;
+ }
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_CHANNELS, &channels)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_CHANNELS, %d) failed: %d->%s", channels, err, strerror(errno));
+ goto bail;
+ }
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_RATE, &sample_rate)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_RATE, %d) failed: %d->%s", sample_rate, err, strerror(errno));
+ goto bail;
+ }
+
+ p_oss->n_buff_size_in_bytes = (TMEDIA_PRODUCER(p_oss)->audio.ptime * sample_rate * ((bits_per_sample >> 3) * channels)) / 1000;
+ if (!(p_oss->p_buff_ptr = tsk_realloc(p_oss->p_buff_ptr, p_oss->n_buff_size_in_bytes))) {
+ OSS_DEBUG_ERROR("Failed to allocate buffer with size = %u", p_oss->n_buff_size_in_bytes);
+ err = -4;
+ goto bail;
+ }
+ p_oss->n_buff_size_in_samples = (p_oss->n_buff_size_in_bytes / (bits_per_sample >> 3));
+ if (bits_per_sample == 8) {
+ p_oss->n_buff16_size_in_bytes = p_oss->n_buff_size_in_bytes << 1;
+ if (!(p_oss->p_buff16_ptr = tsk_realloc(p_oss->p_buff16_ptr, p_oss->n_buff16_size_in_bytes))) {
+ OSS_DEBUG_ERROR("Failed to allocate buffer with size = %u", p_oss->n_buff_size_in_bytes);
+ err = -5;
+ goto bail;
+ }
+ p_oss->n_buff16_size_in_samples = p_oss->n_buff_size_in_samples;
+ }
+
+ OSS_DEBUG_INFO("prepared: req_bits_per_sample=%d; req_channels=%d; req_rate=%d, resp_bits_per_sample=%d; resp_channels=%d; resp_rate=%d /// n_buff_size_in_samples=%u;n_buff_size_in_bytes=%u",
+ TMEDIA_PRODUCER(p_oss)->audio.bits_per_sample, TMEDIA_PRODUCER(p_oss)->audio.channels, TMEDIA_PRODUCER(p_oss)->audio.rate,
+ bits_per_sample, channels, sample_rate,
+ p_oss->n_buff_size_in_samples, p_oss->n_buff_size_in_bytes);
+
+ // Set using supported (up to the resampler to convert to requested)
+ TMEDIA_PRODUCER(p_oss)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(p_oss)->audio.channels = channels;
+ TMEDIA_PRODUCER(p_oss)->audio.rate = sample_rate;
+ // TMEDIA_PRODUCER(p_oss)->audio.bits_per_sample = bits_per_sample;
+
+ p_oss->n_bits_per_sample = bits_per_sample;
+ p_oss->b_prepared = tsk_true;
+
+bail:
+ if (err) {
+ if (p_oss->fd != -1) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ }
+ tsk_safeobj_unlock(p_oss);
+
+ return err;
+}
+
+static int tdav_producer_oss_start(tmedia_producer_t* self)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)self;
+ int err = 0;
+
+ if (! p_oss) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ if (!p_oss->b_prepared) {
+ OSS_DEBUG_WARN("Not prepared");
+ err = -2;
+ goto bail;
+ }
+
+ if (p_oss->b_started) {
+ OSS_DEBUG_WARN("Already started");
+ goto bail;
+ }
+
+ /* start thread */
+ p_oss->b_started = tsk_true;
+ tsk_thread_create(&p_oss->tid[0], _tdav_producer_oss_record_thread, p_oss);
+
+ OSS_DEBUG_INFO("started");
+
+bail:
+ tsk_safeobj_unlock(p_oss);
+ return err;
+}
+
+static int tdav_producer_oss_pause(tmedia_producer_t* self)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)self;
+
+ if (!p_oss) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ OSS_DEBUG_INFO("paused");
+
+ return 0;
+}
+
+static int tdav_producer_oss_stop(tmedia_producer_t* self)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)self;
+ int err;
+
+ if (!p_oss) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ /* should be done here */
+ p_oss->b_started = tsk_false;
+
+ /* stop thread */
+ if (p_oss->tid[0]) {
+ tsk_thread_join(&(p_oss->tid[0]));
+ }
+ if (p_oss->fd != -1) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ p_oss->b_prepared = tsk_false;
+
+ OSS_DEBUG_INFO("stopped");
+
+ tsk_safeobj_unlock(p_oss);
+
+ return 0;
+}
+
+
+//
+// Linux OSS producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_oss_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_oss_t *p_oss = (tdav_producer_oss_t*)self;
+ if (p_oss) {
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(p_oss));
+ /* init self */
+ p_oss->fd = -1;
+ tsk_safeobj_init(p_oss);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_oss_dtor(tsk_object_t * self)
+{
+ tdav_producer_oss_t *p_oss = (tdav_producer_oss_t *)self;
+ if (p_oss) {
+ /* stop */
+ if (p_oss->b_started) {
+ tdav_producer_oss_stop((tmedia_producer_t*)p_oss);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(p_oss));
+ /* deinit self */
+ if (p_oss->fd != -1) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ TSK_FREE(p_oss->p_buff_ptr);
+ TSK_FREE(p_oss->p_buff16_ptr);
+ tsk_safeobj_deinit(p_oss);
+
+ OSS_DEBUG_INFO("*** destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_oss_def_s =
+{
+ sizeof(tdav_producer_oss_t),
+ tdav_producer_oss_ctor,
+ tdav_producer_oss_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_oss_plugin_def_s =
+{
+ &tdav_producer_oss_def_s,
+
+ tmedia_audio,
+ "Linux OSS producer",
+
+ tdav_producer_oss_set,
+ tdav_producer_oss_prepare,
+ tdav_producer_oss_start,
+ tdav_producer_oss_pause,
+ tdav_producer_oss_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_oss_plugin_def_t = &tdav_producer_oss_plugin_def_s;
+
+#endif /* HAVE_LINUX_SOUNDCARD_H */
diff --git a/tinyDAV/src/audio/tdav_consumer_audio.c b/tinyDAV/src/audio/tdav_consumer_audio.c
new file mode 100644
index 0000000..73d9688
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_consumer_audio.c
@@ -0,0 +1,272 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+
+/**@file tdav_consumer_audio.c
+* @brief Base class for all Audio consumers.
+*/
+#include "tinydav/audio/tdav_consumer_audio.h"
+
+#include "tinymedia/tmedia_defaults.h"
+#include "tinymedia/tmedia_denoise.h"
+#include "tinymedia/tmedia_resampler.h"
+#include "tinymedia/tmedia_jitterbuffer.h"
+#include "tinyrtp/rtp/trtp_rtp_header.h"
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_time.h"
+#include "tsk_debug.h"
+
+#if TSK_UNDER_WINDOWS
+# include <Winsock2.h> // timeval
+#elif defined(__SYMBIAN32__)
+# include <_timeval.h>
+#else
+# include <sys/time.h>
+#endif
+
+#define TDAV_BITS_PER_SAMPLE_DEFAULT 16
+#define TDAV_CHANNELS_DEFAULT 2
+#define TDAV_RATE_DEFAULT 8000
+#define TDAV_PTIME_DEFAULT 20
+
+#define TDAV_AUDIO_GAIN_MAX 15
+
+/** Initialize audio consumer */
+int tdav_consumer_audio_init(tdav_consumer_audio_t* self)
+{
+ int ret;
+
+ TSK_DEBUG_INFO("tdav_consumer_audio_init()");
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* base */
+ if ((ret = tmedia_consumer_init(TMEDIA_CONSUMER(self)))){
+ return ret;
+ }
+
+ /* self (should be update by prepare() by using the codec's info)*/
+ TMEDIA_CONSUMER(self)->audio.bits_per_sample = TDAV_BITS_PER_SAMPLE_DEFAULT;
+ TMEDIA_CONSUMER(self)->audio.ptime = TDAV_PTIME_DEFAULT;
+ TMEDIA_CONSUMER(self)->audio.in.channels = TDAV_CHANNELS_DEFAULT;
+ TMEDIA_CONSUMER(self)->audio.in.rate = TDAV_RATE_DEFAULT;
+ TMEDIA_CONSUMER(self)->audio.gain = TSK_MIN(tmedia_defaults_get_audio_consumer_gain(), TDAV_AUDIO_GAIN_MAX);
+
+ tsk_safeobj_init(self);
+
+ return 0;
+}
+
+/**
+* Generic function to compare two consumers.
+* @param consumer1 The first consumer to compare.
+* @param consumer2 The second consumer to compare.
+* @retval Returns an integral value indicating the relationship between the two consumers:
+* <0 : @a consumer1 less than @a consumer2.<br>
+* 0 : @a consumer1 identical to @a consumer2.<br>
+* >0 : @a consumer1 greater than @a consumer2.<br>
+*/
+int tdav_consumer_audio_cmp(const tsk_object_t* consumer1, const tsk_object_t* consumer2)
+{
+ int ret;
+ tsk_subsat_int32_ptr(consumer1, consumer2, &ret);
+ return ret;
+}
+
+int tdav_consumer_audio_set(tdav_consumer_audio_t* self, const tmedia_param_t* param)
+{
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (param->plugin_type == tmedia_ppt_consumer){
+ if (param->value_type == tmedia_pvt_int32){
+ if (tsk_striequals(param->key, "gain")){
+ int32_t gain = *((int32_t*)param->value);
+ if (gain < TDAV_AUDIO_GAIN_MAX && gain >= 0){
+ TMEDIA_CONSUMER(self)->audio.gain = (uint8_t)gain;
+ TSK_DEBUG_INFO("audio consumer gain=%u", gain);
+ }
+ else{
+ TSK_DEBUG_ERROR("%u is invalid as gain value", gain);
+ return -2;
+ }
+ }
+ else if (tsk_striequals(param->key, "volume")){
+ TMEDIA_CONSUMER(self)->audio.volume = TSK_TO_INT32((uint8_t*)param->value);
+ TMEDIA_CONSUMER(self)->audio.volume = TSK_CLAMP(0, TMEDIA_CONSUMER(self)->audio.volume, 100);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* put data (bytes not shorts) into the jitter buffer (consumers always have ptime of 20ms) */
+int tdav_consumer_audio_put(tdav_consumer_audio_t* self, const void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr)
+{
+ int ret;
+
+ if (!self || !data || !self->jitterbuffer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(self);
+
+ if (!TMEDIA_JITTER_BUFFER(self->jitterbuffer)->opened){
+ uint32_t rate = TMEDIA_CONSUMER(self)->audio.out.rate ? TMEDIA_CONSUMER(self)->audio.out.rate : TMEDIA_CONSUMER(self)->audio.in.rate;
+ uint32_t channels = TMEDIA_CONSUMER(self)->audio.out.channels ? TMEDIA_CONSUMER(self)->audio.out.channels : tmedia_defaults_get_audio_channels_playback();
+ if ((ret = tmedia_jitterbuffer_open(self->jitterbuffer, TMEDIA_CONSUMER(self)->audio.ptime, rate, channels))){
+ TSK_DEBUG_ERROR("Failed to open jitterbuffer (%d)", ret);
+ tsk_safeobj_unlock(self);
+ return ret;
+ }
+ }
+
+ ret = tmedia_jitterbuffer_put(self->jitterbuffer, (void*)data, data_size, proto_hdr);
+
+ tsk_safeobj_unlock(self);
+
+ return ret;
+}
+
+/* get data from the jitter buffer (consumers should always have ptime of 20ms) */
+tsk_size_t tdav_consumer_audio_get(tdav_consumer_audio_t* self, void* out_data, tsk_size_t out_size)
+{
+ tsk_size_t ret_size = 0;
+ if (!self || !self->jitterbuffer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ tsk_safeobj_lock(self);
+
+ if (!TMEDIA_JITTER_BUFFER(self->jitterbuffer)->opened){
+ int ret;
+ uint32_t frame_duration = TMEDIA_CONSUMER(self)->audio.ptime;
+ uint32_t rate = TMEDIA_CONSUMER(self)->audio.out.rate ? TMEDIA_CONSUMER(self)->audio.out.rate : TMEDIA_CONSUMER(self)->audio.in.rate;
+ uint32_t channels = TMEDIA_CONSUMER(self)->audio.out.channels ? TMEDIA_CONSUMER(self)->audio.out.channels : tmedia_defaults_get_audio_channels_playback();
+ if ((ret = tmedia_jitterbuffer_open(TMEDIA_JITTER_BUFFER(self->jitterbuffer), frame_duration, rate, channels))){
+ TSK_DEBUG_ERROR("Failed to open jitterbuffer (%d)", ret);
+ tsk_safeobj_unlock(self);
+ return 0;
+ }
+ }
+ ret_size = tmedia_jitterbuffer_get(TMEDIA_JITTER_BUFFER(self->jitterbuffer), out_data, out_size);
+
+ tsk_safeobj_unlock(self);
+
+ // denoiser
+ if (self->denoise && self->denoise->opened && (self->denoise->echo_supp_enabled || self->denoise->noise_supp_enabled)) {
+ if (self->denoise->echo_supp_enabled) {
+ // Echo process last frame
+ if (self->denoise->playback_frame && self->denoise->playback_frame->size) {
+ tmedia_denoise_echo_playback(self->denoise, self->denoise->playback_frame->data, (uint32_t)self->denoise->playback_frame->size);
+ }
+ if (ret_size){
+ // save
+ tsk_buffer_copy(self->denoise->playback_frame, 0, out_data, ret_size);
+ }
+ }
+
+#if 1 // suppress noise if not supported by remote party's encoder
+ // suppress noise
+ if (self->denoise->noise_supp_enabled && ret_size) {
+ tmedia_denoise_process_playback(self->denoise, out_data, (uint32_t)ret_size);
+ }
+#endif
+ }
+
+ return ret_size;
+}
+
+int tdav_consumer_audio_tick(tdav_consumer_audio_t* self)
+{
+ if (!self || !self->jitterbuffer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+ return tmedia_jitterbuffer_tick(TMEDIA_JITTER_BUFFER(self->jitterbuffer));
+}
+
+/* set denioiser */
+void tdav_consumer_audio_set_denoise(tdav_consumer_audio_t* self, struct tmedia_denoise_s* denoise)
+{
+ tsk_safeobj_lock(self);
+ TSK_OBJECT_SAFE_FREE(self->denoise);
+ self->denoise = (struct tmedia_denoise_s*)tsk_object_ref(denoise);
+ tsk_safeobj_unlock(self);
+}
+
+void tdav_consumer_audio_set_jitterbuffer(tdav_consumer_audio_t* self, struct tmedia_jitterbuffer_s* jitterbuffer)
+{
+ tsk_safeobj_lock(self);
+ TSK_OBJECT_SAFE_FREE(self->jitterbuffer);
+ self->jitterbuffer = (struct tmedia_jitterbuffer_s*)tsk_object_ref(jitterbuffer);
+ tsk_safeobj_unlock(self);
+}
+
+/** Reset jitterbuffer */
+int tdav_consumer_audio_reset(tdav_consumer_audio_t* self){
+ int ret;
+ if (!self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(self);
+ ret = tmedia_jitterbuffer_reset(TMEDIA_JITTER_BUFFER(self->jitterbuffer));
+ tsk_safeobj_unlock(self);
+
+ return ret;
+}
+
+/* tsk_safeobj_lock(self); */
+/* tsk_safeobj_unlock(self); */
+
+/** DeInitialize audio consumer */
+int tdav_consumer_audio_deinit(tdav_consumer_audio_t* self)
+{
+ int ret;
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* base */
+ if ((ret = tmedia_consumer_deinit(TMEDIA_CONSUMER(self)))){
+ /* return ret; */
+ }
+
+ /* self */
+ TSK_OBJECT_SAFE_FREE(self->denoise);
+ TSK_OBJECT_SAFE_FREE(self->resampler);
+ TSK_OBJECT_SAFE_FREE(self->jitterbuffer);
+
+ tsk_safeobj_deinit(self);
+
+ return 0;
+}
+
diff --git a/tinyDAV/src/audio/tdav_jitterbuffer.c b/tinyDAV/src/audio/tdav_jitterbuffer.c
new file mode 100644
index 0000000..4fd1010
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_jitterbuffer.c
@@ -0,0 +1,1036 @@
+/* File from: http://cms.speakup.nl/tech/opensource/jitterbuffer/verslag-20051209.pdf/ */
+
+/*******************************************************
+* jitterbuffer:
+* an application-independent jitterbuffer, which tries
+* to achieve the maximum user perception during a call.
+* For more information look at:
+* http://www.speakup.nl/opensource/jitterbuffer/
+*
+* Copyright on this file is held by:
+* - Jesse Kaijen <jesse@speakup.nl>
+* - SpeakUp <info@speakup.nl>
+*
+* Contributors:
+* Jesse Kaijen <jesse@speakup.nl>
+*
+* This program is free software, distributed under the terms of:
+* - the GNU Lesser (Library) General Public License
+* - the Mozilla Public License
+*
+* if you are interested in an different licence type, please contact us.
+*
+* How to use the jitterbuffer, please look at the comments
+* in the headerfile.
+*
+* Further details on specific implementations,
+* please look at the comments in the code file.
+*/
+#include "tinydav/audio/tdav_jitterbuffer.h"
+
+#if !(HAVE_SPEEX_DSP && HAVE_SPEEX_JB)
+
+#include "tsk_memory.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+
+#define jb_warn(...) (warnf ? warnf(__VA_ARGS__) : (void)0)
+#define jb_err(...) (errf ? errf(__VA_ARGS__) : (void)0)
+#define jb_dbg(...) (dbgf ? dbgf(__VA_ARGS__) : (void)0)
+
+//public functions
+jitterbuffer *jb_new();
+void jb_reset(jitterbuffer *jb);
+void jb_reset_all(jitterbuffer *jb);
+void jb_destroy(jitterbuffer *jb);
+void jb_set_settings(jitterbuffer *jb, jb_settings *settings);
+
+void jb_get_info(jitterbuffer *jb, jb_info *stats);
+void jb_get_settings(jitterbuffer *jb, jb_settings *settings);
+float jb_guess_mos(float p, long d, int codec);
+int jb_has_frames(jitterbuffer *jb);
+
+void jb_put(jitterbuffer *jb, void *data, int type, long ms, long ts, long now, int codec);
+int jb_get(jitterbuffer *jb, void **data, long now, long interpl);
+
+
+
+//private functions
+static void set_default_settings(jitterbuffer *jb);
+static void reset(jitterbuffer *jb);
+static long find_pointer(long *array, long max_index, long value); static void frame_free(jb_frame *frame);
+
+static void put_control(jitterbuffer *jb, void *data, int type, long ts);
+static void put_voice(jitterbuffer *jb, void *data, int type, long ms, long ts, int codec);
+static void put_history(jitterbuffer *jb, long ts, long now, long ms, int codec);
+static void calculate_info(jitterbuffer *jb, long ts, long now, int codec);
+
+static int get_control(jitterbuffer *jb, void **data);
+static int get_voice(jitterbuffer *jb, void **data, long now, long interpl);
+static int get_voicecase(jitterbuffer *jb, void **data, long now, long interpl, long diff);
+
+static int get_next_frametype(jitterbuffer *jb, long ts);
+static long get_next_framets(jitterbuffer *jb);
+static jb_frame *get_frame(jitterbuffer *jb, long ts);
+static jb_frame *get_all_frames(jitterbuffer *jb);
+
+//debug...
+static jb_output_function_t warnf, errf, dbgf;
+void jb_setoutput(jb_output_function_t warn, jb_output_function_t err, jb_output_function_t dbg) {
+ warnf = warn;
+ errf = err;
+ dbgf = dbg;
+}
+
+
+/***********
+ * create a new jitterbuffer
+ * return NULL if malloc doesn't work
+ * else return jb with default_settings.
+ */
+jitterbuffer *jb_new()
+{
+ jitterbuffer *jb;
+
+ jb_dbg("N");
+ jb = tsk_calloc(1, sizeof(jitterbuffer));
+ if (!jb) {
+ jb_err("cannot allocate jitterbuffer\n");
+ return NULL;
+ }
+ set_default_settings(jb);
+ reset(jb);
+ return jb;
+}
+
+
+/***********
+ * empty voice messages
+ * reset statistics
+ * keep the settings
+ */
+void jb_reset(jitterbuffer *jb)
+{
+ jb_frame *frame;
+
+ jb_dbg("R");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_reset()\n");
+ return;
+ }
+
+ //free voice
+ while(jb->voiceframes) {
+ frame = get_all_frames(jb);
+ frame_free(frame);
+ }
+ //reset stats
+ memset(&(jb->info),0,sizeof(jb_info) );
+ // set default settings
+ reset(jb);
+}
+
+
+/***********
+ * empty nonvoice messages
+ * empty voice messages
+ * reset statistics
+ * reset settings to default
+ */
+void jb_reset_all(jitterbuffer *jb)
+{
+ jb_frame *frame;
+
+ jb_dbg("r");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_reset_all()\n");
+ return;
+ }
+
+ // free nonvoice
+ while(jb->controlframes) {
+ frame = jb->controlframes;
+ jb->controlframes = frame->next;
+ frame_free(frame);
+ }
+ // free voice and reset statistics is done by jb_reset
+ jb_reset(jb);
+ set_default_settings(jb);
+}
+
+
+/***********
+ * destroy the jitterbuffer
+ * free all the [non]voice frames with reset_all
+ * free the jitterbuffer
+ */
+void jb_destroy(jitterbuffer *jb)
+{
+ jb_dbg("D");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_destroy()\n");
+ return;
+ }
+
+ jb_reset_all(jb);
+ free(jb);
+}
+
+
+/***********
+ * Set settings for the jitterbuffer.
+ * Only if a setting is defined it will be written
+ * in the jb->settings.
+ * This means that no setting can be set to zero
+ */
+void jb_set_settings(jitterbuffer *jb, jb_settings *settings)
+{
+ jb_dbg("S");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_set_settings()\n");
+ return;
+ }
+
+ if (settings->min_jb) {
+ jb->settings.min_jb = settings->min_jb;
+ }
+ if (settings->max_jb) {
+ jb->settings.max_jb = settings->max_jb;
+ }
+ if (settings->max_successive_interp) {
+ jb->settings.max_successive_interp = settings->max_successive_interp;
+ }
+ if (settings->extra_delay) {
+ jb->settings.extra_delay = settings->extra_delay;
+ }
+ if (settings->wait_grow) {
+ jb->settings.wait_grow = settings->wait_grow;
+ }
+ if (settings->wait_shrink) {
+ jb->settings.wait_shrink = settings->wait_shrink;
+ }
+ if (settings->max_diff) {
+ jb->settings.max_diff = settings->max_diff;
+ }
+}
+
+
+/***********
+ * validates the statistics
+ * the losspct due the jitterbuffer will be calculated.
+ * delay and delay_target will be calculated
+ * *stats = info
+ */
+void jb_get_info(jitterbuffer *jb, jb_info *stats)
+{
+ long max_index, pointer;
+
+ jb_dbg("I");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_get_info()\n");
+ return;
+ }
+
+ jb->info.delay = jb->current - jb->min;
+ jb->info.delay_target = jb->target - jb->min;
+
+ //calculate the losspct...
+ max_index = (jb->hist_pointer < JB_HISTORY_SIZE) ?
+jb->hist_pointer : JB_HISTORY_SIZE-1;
+ if (max_index>1) {
+ pointer = find_pointer(&jb->hist_sorted_delay[0], max_index,
+jb->current);
+ jb->info.losspct = ((max_index - pointer)*100/max_index);
+ if (jb->info.losspct < 0) {
+ jb->info.losspct = 0;
+ }
+ } else {
+ jb->info.losspct = 0;
+ }
+
+ *stats = jb->info;
+}
+
+
+/***********
+ * gives the settings for this jitterbuffer
+ * *settings = settings
+ */
+void jb_get_settings(jitterbuffer *jb, jb_settings *settings)
+{
+ jb_dbg("S");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_get_settings()\n");
+ return;
+ }
+
+ *settings = jb->settings;
+}
+
+
+/***********
+ * returns an estimate on the MOS with given loss, delay and codec
+ * if the formula is not present the default will be used
+ * please use the JB_CODEC_OTHER if you want to define your own formula
+ *
+ */
+float jb_guess_mos(float p, long d, int codec)
+{
+ float result;
+
+ switch (codec) {
+ case JB_CODEC_GSM_EFR:
+ result = (4.31f - 0.23f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_G723_1:
+ result = (3.99f - 0.16f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_G729:
+ case JB_CODEC_G729A:
+ result = (4.13f - 0.14f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_G711x_PLC:
+ result = (4.42f - 0.087f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_G711x:
+ result = (4.42f - 0.63f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_OTHER:
+ default:
+ result = (4.42f - 0.63f*p - 0.0071f*d);
+
+ }
+ return result;
+}
+
+
+/***********
+ * if there are any frames left in JB returns JB_OK, otherwise returns JB_EMPTY
+ */
+int jb_has_frames(jitterbuffer *jb)
+{
+ jb_dbg("H");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_has_frames()\n");
+ return JB_NOJB;
+ }
+
+ if(jb->controlframes || jb->voiceframes) {
+ return JB_OK;
+ } else {
+ return JB_EMPTY;
+ }
+}
+
+
+/***********
+ * Put a packet into the jitterbuffers
+ * Only the timestamps of voicepackets are put in the history
+ * this because the jitterbuffer only works for voicepackets
+ * don't put packets twice in history and queue (e.g. transmitting every frame twice)
+ * keep track of statistics
+ */
+void jb_put(jitterbuffer *jb, void *data, int type, long ms, long ts, long now, int codec)
+{
+ long pointer, max_index;
+
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_put()\n");
+ return;
+ }
+
+ jb->info.frames_received++;
+
+ if (type == JB_TYPE_CONTROL) {
+ //put the packet into the contol-queue of the jitterbuffer
+ jb_dbg("pC");
+ put_control(jb,data,type,ts);
+
+ } else if (type == JB_TYPE_VOICE) {
+ // only add voice that aren't already in the buffer
+ max_index = (jb->hist_pointer < JB_HISTORY_SIZE) ? jb->hist_pointer : JB_HISTORY_SIZE-1;
+ pointer = find_pointer(&jb->hist_sorted_timestamp[0], max_index, ts);
+ if (jb->hist_sorted_timestamp[pointer]==ts) { //timestamp already in queue
+ jb_dbg("pT");
+ free(data);
+ jb->info.frames_dropped_twice++;
+ } else { //add
+ jb_dbg("pV");
+ /* add voicepacket to history */
+ put_history(jb,ts,now,ms,codec);
+ /*calculate jitterbuffer size*/
+ calculate_info(jb, ts, now, codec);
+ /*put the packet into the queue of the jitterbuffer*/
+ put_voice(jb,data,type,ms,ts,codec);
+ }
+
+ } else if (type == JB_TYPE_SILENCE){ //silence
+ jb_dbg("pS");
+ put_voice(jb,data,type,ms,ts,codec);
+
+ } else {//should NEVER happen
+ jb_err("jb_put(): type not known\n");
+ free(data);
+ }
+}
+
+
+/***********
+ * control frames have a higher priority then voice frames
+ * returns JB_OK if a frame is available and *data points to the packet
+ * returns JB_NOFRAME if it's no time to play voice and no control available
+ * returns JB_INTERP if interpolating is required
+ * returns JB_EMPTY if no voice frame is in the jitterbuffer (only during silence)
+ */
+int jb_get(jitterbuffer *jb, void **data, long now, long interpl)
+{
+ int result;
+
+ jb_dbg("A");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_get()\n");
+ return JB_NOJB;
+ }
+
+ result = get_control(jb, data);
+ if (result != JB_OK ) { //no control message available maybe there is voice...
+ result = get_voice(jb, data, now, interpl);
+ }
+ return result;
+}
+
+
+/***********
+ * set all the settings to default
+ */
+static void set_default_settings(jitterbuffer *jb)
+{
+ jb->settings.min_jb = JB_MIN_SIZE;
+ jb->settings.max_jb = JB_MAX_SIZE;
+ jb->settings.max_successive_interp = JB_MAX_SUCCESSIVE_INTERP;
+ jb->settings.extra_delay = JB_ALLOW_EXTRA_DELAY;
+ jb->settings.wait_grow = JB_WAIT_GROW;
+ jb->settings.wait_shrink = JB_WAIT_SHRINK;
+ jb->settings.max_diff = JB_MAX_DIFF;
+}
+
+
+/***********
+ * reset the jitterbuffer so we can start in silence and
+ * we start with a new history
+ */
+static void reset(jitterbuffer *jb)
+{
+ jb->hist_pointer = 0; //start over
+ jb->silence_begin_ts = 0; //no begin_ts defined
+ jb->info.silence =1; //we always start in silence
+}
+
+
+/***********
+ * Search algorithm
+ * @REQUIRE max_index is within array
+ *
+ * Find the position of value in hist_sorted_delay
+ * if value doesn't exist return first pointer where array[low]>value
+ * int low; //the lowest index being examined
+ * int max_index; //the highest index being examined
+ * int mid; //the middle index between low and max_index.
+ * mid ==(low+max_index)/2
+ * at the end low is the position of value or where array[low]>value
+ */
+static long find_pointer(long *array, long max_index, long value)
+{
+ register long low, mid, high;
+ low = 0;
+ high = max_index;
+ while (low<=high) {
+ mid= (low+high)/2;
+ if (array[mid] < value) {
+ low = mid+1;
+ } else {
+ high = mid-1;
+ }
+ }
+ while(low < max_index && (array[low]==array[(low+1)]) ) {
+ low++;
+ }
+ return low;
+}
+
+
+/***********
+ * free the given frame, afterwards the framepointer is undefined
+ */
+static void frame_free(jb_frame *frame)
+{
+ if (frame->data) {
+ free(frame->data);
+ }
+ free(frame);
+}
+
+
+/***********
+ * put a nonvoice frame into the nonvoice queue
+ */
+static void put_control(jitterbuffer *jb, void *data, int type, long ts)
+{
+ jb_frame *frame, *p;
+
+ frame = malloc(sizeof(jb_frame));
+ if(!frame) {
+ jb_err("cannot allocate frame\n");
+ return;
+ }
+ frame->data = data;
+ frame->ts = ts;
+ frame->type = type;
+ frame->next = NULL;
+ data = NULL;//to avoid stealing memory
+
+ p = jb->controlframes;
+ if (p) { //there are already control messages
+ if (ts < p->ts) {
+ jb->controlframes = frame;
+ frame->next = p;
+ } else {
+ while (p->next && (ts >=p->next->ts)) {//sort on timestamps! so find place to put...
+ p = p->next;
+ }
+ if (p->next) {
+ frame->next = p->next;
+ }
+ p->next = frame;
+ }
+ } else {
+ jb->controlframes = frame;
+ }
+}
+
+
+/***********
+ * put a voice or silence frame into the jitterbuffer
+ */
+static void put_voice(jitterbuffer *jb, void *data, int type, long ms, long ts, int codec)
+{
+ jb_frame *frame, *p;
+ frame = malloc(sizeof(jb_frame));
+ if(!frame) {
+ jb_err("cannot allocate frame\n");
+ return;
+ }
+
+ frame->data = data;
+ frame->ts = ts;
+ frame->ms = ms;
+ frame->type = type;
+ frame->codec = codec;
+
+ data = NULL; //to avoid stealing the memory location
+ /*
+ * frames are a circular list, jb->voiceframes points to to the lowest ts,
+ * jb->voiceframes->prev points to the highest ts
+ */
+ if(!jb->voiceframes) { /* queue is empty */
+ jb->voiceframes = frame;
+ frame->next = frame;
+ frame->prev = frame;
+ } else {
+ p = jb->voiceframes;
+ if(ts < p->prev->ts) { //frame is out of order
+ jb->info.frames_ooo++;
+ }
+ if (ts < p->ts) { //frame is lowest, let voiceframes point to it!
+ jb->voiceframes = frame;
+ } else {
+ while(ts < p->prev->ts ) {
+ p = p->prev;
+ }
+ }
+ frame->next = p;
+ frame->prev = p->prev;
+ frame->next->prev = frame;
+ frame->prev->next = frame;
+ }
+}
+
+
+/***********
+ * puts the timestamps of a received packet in the history of *jb
+ * for later calculations of the size of jitterbuffer *jb.
+ *
+ * summary of function:
+ * - calculate delay difference
+ * - delete old value from hist & sorted_history_delay & sorted_history_timestamp if needed
+ * - add new value to history & sorted_history_delay & sorted_history_timestamp
+ * - we keep sorted_history_delay for calculations
+ * - we keep sorted_history_timestamp for ensuring each timestamp isn't put twice in the buffer.
+ */
+static void put_history(jitterbuffer *jb, long ts, long now, long ms, int codec)
+{
+ jb_hist_element out, in;
+ long max_index, pointer, location;
+
+ // max_index is the highest possible index
+ max_index = (jb->hist_pointer < JB_HISTORY_SIZE) ? jb->hist_pointer : JB_HISTORY_SIZE-1;
+ location = (jb->hist_pointer % JB_HISTORY_SIZE);
+
+ // we want to delete a value from the jitterbuffer
+ // only when we are through the history.
+ if (jb->hist_pointer > JB_HISTORY_SIZE-1) {
+ /* the value we need to delete from sorted histories */
+ out = jb->hist[location];
+ //delete delay from hist_sorted_delay
+ pointer = find_pointer(&jb->hist_sorted_delay[0], max_index, out.delay);
+ /* move over pointer is the position of kicked*/
+ if (pointer<max_index) { //only move if we have something to move
+ memmove( &(jb->hist_sorted_delay[pointer]),
+ &(jb->hist_sorted_delay[pointer+1]),
+ ((JB_HISTORY_SIZE-(pointer+1)) * sizeof(long)) );
+ }
+
+ //delete timestamp from hist_sorted_timestamp
+ pointer = find_pointer(&jb->hist_sorted_timestamp[0], max_index, out.ts);
+ /* move over pointer is the position of kicked*/
+ if (pointer<max_index) { //only move if we have something to move
+ memmove( &(jb->hist_sorted_timestamp[pointer]),
+ &(jb->hist_sorted_timestamp[pointer+1]),
+ ((JB_HISTORY_SIZE-(pointer+1)) * sizeof(long)) );
+ }
+ }
+
+ in.delay = now - ts; //delay of current packet
+ in.ts = ts; //timestamp of current packet
+ in.ms = ms; //length of current packet
+ in.codec = codec; //codec of current packet
+
+ /* adding the new delay to the sorted history
+ * first special cases:
+ * - delay is the first history stamp
+ * - delay > highest history stamp
+ */
+ if (max_index==0 || in.delay >= jb->hist_sorted_delay[max_index-1]) {
+ jb->hist_sorted_delay[max_index] = in.delay;
+ } else {
+ pointer = find_pointer(&jb->hist_sorted_delay[0], (max_index-1), in.delay);
+ /* move over and add delay */
+ memmove( &(jb->hist_sorted_delay[pointer+1]),
+ &(jb->hist_sorted_delay[pointer]),
+ ((JB_HISTORY_SIZE-(pointer+1)) * sizeof(long)) );
+ jb->hist_sorted_delay[pointer] = in.delay;
+ }
+
+ /* adding the new timestamp to the sorted history
+ * first special cases:
+ * - timestamp is the first history stamp
+ * - timestamp > highest history stamp
+ */
+ if (max_index==0 || in.ts >= jb->hist_sorted_timestamp[max_index-1]) {
+ jb->hist_sorted_timestamp[max_index] = in.ts;
+ } else {
+
+ pointer = find_pointer(&jb->hist_sorted_timestamp[0], (max_index-1), in.ts);
+ /* move over and add timestamp */
+ memmove( &(jb->hist_sorted_timestamp[pointer+1]),
+ &(jb->hist_sorted_timestamp[pointer]),
+ ((JB_HISTORY_SIZE-(pointer+1)) * sizeof(long)) );
+ jb->hist_sorted_timestamp[pointer] = in.ts;
+ }
+
+ /* put the jb_hist_element in the history
+ * then increase hist_pointer for next time
+ */
+ jb->hist[location] = in;
+ jb->hist_pointer++;
+}
+
+
+/***********
+ * this tries to make a jitterbuffer that behaves like
+ * the jitterbuffer proposed in this article:
+ * Adaptive Playout Buffer Algorithm for Enhancing Perceived Quality of Streaming Applications
+ * by: Kouhei Fujimoto & Shingo Ata & Masayuki Murata
+ * http://www.nal.ics.es.osaka-u.ac.jp/achievements/web2002/pdf/journal/k-fujimo02TSJ-AdaptivePlayoutBuffer.pdf
+ *
+ * it calculates jitter and minimum delay
+ * get the best delay for the specified codec
+
+ */
+static void calculate_info(jitterbuffer *jb, long ts, long now, int codec)
+{
+ long diff, size, max_index, d, d1, d2, n;
+ float p, p1, p2, A, B;
+ //size = how many items there in the history
+ size = (jb->hist_pointer < JB_HISTORY_SIZE) ? jb->hist_pointer : JB_HISTORY_SIZE;
+ max_index = size-1;
+
+ /*
+ * the Inter-Quartile Range can be used for estimating jitter
+ * http://www.slac.stanford.edu/comp/net/wan-mon/tutorial.html#variable
+ * just take the square root of the iqr for jitter
+ */
+ jb->info.iqr = jb->hist_sorted_delay[max_index*3/4] - jb->hist_sorted_delay[max_index/4];
+
+
+ /*
+ * The RTP way of calculating jitter.
+ * This one is used at the moment, although it is not correct.
+ * But in this way the other side understands us.
+ */
+ diff = now - ts - jb->last_delay;
+ if (!jb->last_delay) {
+ diff = 0; //this to make sure we won't get odd jitter due first ts.
+ }
+ jb->last_delay = now - ts;
+ if (diff <0){
+ diff = -diff;
+ }
+ jb->info.jitter = jb->info.jitter + (diff - jb->info.jitter)/16;
+
+ /* jb->min is minimum delay in hist_sorted_delay, we don't look at the lowest 2% */
+ /* because sometimes there are odd delays in there */
+ jb->min = jb->hist_sorted_delay[(max_index*2/100)];
+
+ /*
+ * calculating the preferred size of the jitterbuffer:
+ * instead of calculating the optimum delay using the Pareto equation
+ * I use look at the array of sorted delays and choose my optimum from there
+ * always walk trough a percentage of the history this because imagine following tail:
+ * [...., 12, 300, 301 ,302]
+ * her we want to discard last three but that won't happen if we won't walk the array
+ * the number of frames we walk depends on how scattered the sorted delays are.
+ * For that we look at the iqr. The dependencies of the iqr are based on
+ * tests we've done here in the lab. But are not optimized.
+ */
+ //init:
+ //the higest delay..
+ d = d1= d2 = jb->hist_sorted_delay[max_index]- jb->min;
+ A=B=LONG_MIN;
+ p = p2 =0;
+ n=0;
+ p1 = 5; //always look at the top 5%
+ if (jb->info.iqr >200) { //with more jitter look at more delays
+ p1=25;
+ } else if (jb->info.iqr >100) {
+ p1=20;
+ } else if (jb->info.iqr >50){
+ p1=11;
+ }
+
+ //find the optimum delay..
+ while(max_index>10 && (B > A ||p2<p1)) { // By MDI: from ">=" to ">"
+ //the packetloss with this delay
+ p2 =(n*100.0f/size);
+ // estimate MOS-value
+ B = jb_guess_mos(p2,d2,codec);
+ if (B > A) {
+ p = p2;
+ d = d2;
+ A = B;
+ }
+ d1 = d2;
+ //find next delay != delay so the same delay isn't calculated twice
+ //don't look further if we have seen half of the history
+ while((d2>=d1) && ((n*2)<max_index) ) {
+ n++;
+ d2 = jb->hist_sorted_delay[(max_index-n)] - jb->min;
+ }
+ }
+ //the targeted size of the jitterbuffer
+ if (jb->settings.min_jb && (jb->settings.min_jb > d) ) {
+ jb->target = jb->min + jb->settings.min_jb;
+ } else if (jb->settings.max_jb && (jb->settings.max_jb > d) ){
+ jb->target = jb->min + jb->settings.max_jb;
+ } else {
+ jb->target = jb->min + d;
+ }
+}
+
+
+/***********
+ * if there is a nonvoice frame it will be returned [*data] and the frame
+ * will be made free
+ */
+static int get_control(jitterbuffer *jb, void **data)
+{
+ jb_frame *frame;
+ int result;
+
+ frame = jb->controlframes;
+ if (frame) {
+ jb_dbg("gC");
+ *data = frame->data;
+ frame->data = NULL;
+ jb->controlframes = frame->next;
+ frame_free(frame);
+ result = JB_OK;
+ } else {
+ result = JB_NOFRAME;
+ }
+ return result;
+}
+
+
+/***********
+ * returns JB_OK if a frame is available and *data points to the packet
+ * returns JB_NOFRAME if it's no time to play voice and or no frame available
+ * returns JB_INTERP if interpolating is required
+ * returns JB_EMPTY if no voice frame is in the jitterbuffer (only during silence)
+ *
+ * if the next frame is a silence frame we will go in silence-mode
+ * each new instance of the jitterbuffer will start in silence mode
+ * in silence mode we will set the jitterbuffer to the size we want
+ * when we are not in silence mode get_voicecase will handle the rest.
+ */
+static int get_voice(jitterbuffer *jb, void **data, long now, long interpl)
+{
+ jb_frame *frame;
+ long diff;
+ int result;
+
+ diff = jb->target - jb->current;
+
+ //if the next frame is a silence frame, go in silence mode...
+ if((get_next_frametype(jb, now - jb->current) == JB_TYPE_SILENCE) ) {
+ jb_dbg("gs");
+ frame = get_frame(jb, now - jb->current);
+ *data = frame->data;
+ frame->data = NULL;
+ jb->info.silence =1;
+ jb->silence_begin_ts = frame->ts;
+ frame_free(frame);
+ result = JB_OK;
+ } else {
+ if(jb->info.silence) { // we are in silence
+ /*
+ * During silence we can set the jitterbuffer size to the size
+ * we want...
+ */
+ if (diff) {
+ jb->current = jb->target;
+ }
+ frame = get_frame(jb, now - jb->current);
+ if (frame) {
+ if (jb->silence_begin_ts && frame->ts < jb->silence_begin_ts) {
+ jb_dbg("gL");
+ /* voice frame is late, next!*/
+ jb->info.frames_late++;
+ frame_free(frame);
+ result = get_voice(jb, data, now, interpl);
+ } else {
+ jb_dbg("gP");
+ /* voice frame */
+ jb->info.silence = 0;
+ jb->silence_begin_ts = 0;
+ jb->next_voice_time = frame->ts + frame->ms;
+ jb->info.last_voice_ms = frame->ms;
+ *data = frame->data;
+ frame->data = NULL;
+ frame_free(frame);
+ result = JB_OK;
+ }
+ } else { //no frame
+ jb_dbg("gS");
+ result = JB_EMPTY;
+ }
+ } else { //voice case
+ result = get_voicecase(jb,data,now,interpl,diff);
+ }
+ }
+ return result;
+}
+
+
+/***********
+ * The voicecase has four 'options'
+ * - difference is way off, reset
+ * - diff > 0, we may need to grow
+ * - diff < 0, we may need to shrink
+ * - everything else
+ */
+static int get_voicecase(jitterbuffer *jb, void **data, long now, long interpl, long diff)
+{
+ jb_frame *frame;
+ int result;
+
+ // * - difference is way off, reset
+ if (diff > jb->settings.max_diff || -diff > jb->settings.max_diff) {
+ jb_err("wakko diff in get_voicecase\n");
+ reset(jb); //reset hist because the timestamps are wakko.
+ result = JB_NOFRAME;
+ //- diff > 0, we may need to grow
+ } else if ((diff > 0) &&
+ (now > (jb->last_adjustment + jb->settings.wait_grow)
+ || (now + jb->current + interpl) < get_next_framets(jb) ) ) { //grow
+ /* first try to grow */
+ if (diff<interpl/2) {
+ jb_dbg("ag");
+ jb->current +=diff;
+ } else {
+ jb_dbg("aG");
+ /* grow by interp frame len */
+ jb->current += interpl;
+ }
+ jb->last_adjustment = now;
+ result = get_voice(jb, data, now, interpl);
+ //- diff < 0, we may need to shrink
+ } else if ( (diff < 0)
+ && (now > (jb->last_adjustment + jb->settings.wait_shrink))
+ && ((-diff) > jb->settings.extra_delay) ) {
+ /* now try to shrink
+ * if there is a frame shrink by frame length
+ * otherwise shrink by interpl
+ */
+ jb->last_adjustment = now;
+
+ frame = get_frame(jb, now - jb->current);
+ if(frame) {
+ jb_dbg("as");
+ /* shrink by frame size we're throwing out */
+ jb->info.frames_dropped++;
+ jb->current -= frame->ms;
+ frame_free(frame);
+ } else {
+ jb_dbg("aS");
+ /* shrink by interpl */
+ jb->current -= interpl;
+ }
+ result = get_voice(jb, data, now, interpl);
+ } else {
+ /* if it is not the time to play a result = JB_NOFRAME
+ * else We try to play a frame if a frame is available
+ * and not late it is played otherwise
+ * if available it is dropped and the next is tried
+ * last option is interpolating
+ */
+ if (now - jb->current < jb->next_voice_time) {
+ jb_dbg("aN");
+ result = JB_NOFRAME;
+ } else {
+ frame = get_frame(jb, now - jb->current);
+ if (frame) { //there is a frame
+ /* voice frame is late */
+ if(frame->ts < jb->next_voice_time) { //late
+ jb_dbg("aL");
+ jb->info.frames_late++;
+ frame_free(frame);
+ result = get_voice(jb, data, now, interpl);
+ } else {
+ jb_dbg("aP");
+ /* normal case; return the frame, increment stuff */
+ *data = frame->data;
+ frame->data = NULL;
+ jb->next_voice_time = frame->ts + frame->ms;
+ jb->cnt_successive_interp = 0;
+ frame_free(frame);
+ result = JB_OK;
+ }
+ } else { // no frame, thus interpolate
+ jb->cnt_successive_interp++;
+ /* assume silence instead of continuing to interpolate */
+ if (jb->settings.max_successive_interp && jb->cnt_successive_interp >= jb->settings.max_successive_interp) {
+ jb->info.silence = 1;
+ jb->silence_begin_ts = jb->next_voice_time;
+ }
+ jb_dbg("aI");
+ jb->next_voice_time += interpl;
+ result = JB_INTERP;
+ }
+ }
+ }
+ return result;
+
+}
+
+
+/***********
+ * if there are frames and next frame->ts is smaller or equal ts
+ * return type of next frame.
+ * else return 0
+ */
+static int get_next_frametype(jitterbuffer *jb, long ts)
+{
+ jb_frame *frame;
+ int result;
+
+ result = 0;
+ frame = jb->voiceframes;
+ if (frame && frame->ts <= ts) {
+ result = frame->type;
+ }
+ return result;
+}
+
+
+/***********
+ * returns ts from next frame in jb->voiceframes
+ * or returns LONG_MAX if there is no frame
+ */
+static long get_next_framets(jitterbuffer *jb)
+{
+ if (jb->voiceframes) {
+ return jb->voiceframes->ts;
+ }
+ return LONG_MAX;
+}
+
+
+/***********
+ * if there is a frame in jb->voiceframes and
+ * has a timestamp smaller/equal to ts
+ * this frame will be returned and
+ * removed from the queue
+ */
+static jb_frame *get_frame(jitterbuffer *jb, long ts)
+{
+ jb_frame *frame;
+
+ frame = jb->voiceframes;
+ if (frame && frame->ts <= ts) {
+ if(frame->next == frame) {
+ jb->voiceframes = NULL;
+ } else {
+ /* remove this frame */
+ frame->prev->next = frame->next;
+ frame->next->prev = frame->prev;
+ jb->voiceframes = frame->next;
+ }
+ return frame;
+ }
+ return NULL;
+}
+
+/***********
+ * if there is a frame in jb->voiceframes
+ * this frame will be unconditionally returned and
+ * removed from the queue
+ */
+static jb_frame *get_all_frames(jitterbuffer *jb)
+{
+ jb_frame *frame;
+
+ frame = jb->voiceframes;
+ if (frame) {
+ if(frame->next == frame) {
+ jb->voiceframes = NULL;
+ } else {
+ /* remove this frame */
+ frame->prev->next = frame->next;
+ frame->next->prev = frame->prev;
+ jb->voiceframes = frame->next;
+ }
+ return frame;
+ }
+ return NULL;
+}
+
+
+#endif // !(HAVE_SPEEX_DSP && HAVE_SPEEX_JB)
diff --git a/tinyDAV/src/audio/tdav_producer_audio.c b/tinyDAV/src/audio/tdav_producer_audio.c
new file mode 100644
index 0000000..8c73c9f
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_producer_audio.c
@@ -0,0 +1,133 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_producer_audio.c
+ * @brief Base class for all Audio producers.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/audio/tdav_producer_audio.h"
+
+#include "tinymedia/tmedia_defaults.h"
+
+#define TDAV_PRODUCER_BITS_PER_SAMPLE_DEFAULT 16
+#define TDAV_PRODUCER_CHANNELS_DEFAULT 1
+#define TDAV_PRODUCER_RATE_DEFAULT 8000
+#define TDAV_PRODUCER_PTIME_DEFAULT 20
+#define TDAV_PRODUCER_AUDIO_GAIN_MAX 15
+
+#include "tsk_string.h"
+#include "tsk_debug.h"
+
+/** Initialize Audio producer
+* @param self The producer to initialize
+*/
+int tdav_producer_audio_init(tdav_producer_audio_t* self)
+{
+ int ret;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* base */
+ if((ret = tmedia_producer_init(TMEDIA_PRODUCER(self)))){
+ return ret;
+ }
+
+ /* self (should be update by prepare() by using the codec's info)*/
+ TMEDIA_PRODUCER(self)->audio.bits_per_sample = TDAV_PRODUCER_BITS_PER_SAMPLE_DEFAULT;
+ TMEDIA_PRODUCER(self)->audio.channels = TDAV_PRODUCER_CHANNELS_DEFAULT;
+ TMEDIA_PRODUCER(self)->audio.rate = TDAV_PRODUCER_RATE_DEFAULT;
+ TMEDIA_PRODUCER(self)->audio.ptime = TDAV_PRODUCER_PTIME_DEFAULT;
+ TMEDIA_PRODUCER(self)->audio.gain = TSK_MIN(tmedia_defaults_get_audio_producer_gain(), TDAV_PRODUCER_AUDIO_GAIN_MAX);
+
+ return 0;
+}
+
+/**
+* Generic function to compare two producers.
+* @param producer1 The first producer to compare.
+* @param producer2 The second producer to compare.
+* @retval Returns an integral value indicating the relationship between the two producers:
+* <0 : @a producer1 less than @a producer2.<br>
+* 0 : @a producer1 identical to @a producer2.<br>
+* >0 : @a producer1 greater than @a producer2.<br>
+*/
+int tdav_producer_audio_cmp(const tsk_object_t* producer1, const tsk_object_t* producer2)
+{
+ int ret;
+ tsk_subsat_int32_ptr(producer1, producer2, &ret);
+ return ret;
+}
+
+int tdav_producer_audio_set(tdav_producer_audio_t* self, const tmedia_param_t* param)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(param->plugin_type == tmedia_ppt_producer){
+ if(param->value_type == tmedia_pvt_int32){
+ if(tsk_striequals(param->key, "gain")){
+ int32_t gain = *((int32_t*)param->value);
+ if(gain<TDAV_PRODUCER_AUDIO_GAIN_MAX && gain>=0){
+ TMEDIA_PRODUCER(self)->audio.gain = (uint8_t)gain;
+ TSK_DEBUG_INFO("audio producer gain=%u", gain);
+ }
+ else{
+ TSK_DEBUG_ERROR("%u is invalid as gain value", gain);
+ return -2;
+ }
+ }
+ else if(tsk_striequals(param->key, "volume")){
+ TMEDIA_PRODUCER(self)->audio.volume = TSK_TO_INT32((uint8_t*)param->value);
+ TMEDIA_PRODUCER(self)->audio.volume = TSK_CLAMP(0, TMEDIA_PRODUCER(self)->audio.volume, 100);
+ TSK_DEBUG_INFO("audio producer volume=%u", TMEDIA_PRODUCER(self)->audio.volume);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/** Deinitialize a producer
+*/
+int tdav_producer_audio_deinit(tdav_producer_audio_t* self)
+{
+ int ret;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* base */
+ if((ret = tmedia_producer_deinit(TMEDIA_PRODUCER(self)))){
+ return ret;
+ }
+
+ return ret;
+} \ No newline at end of file
diff --git a/tinyDAV/src/audio/tdav_session_audio.c b/tinyDAV/src/audio/tdav_session_audio.c
new file mode 100644
index 0000000..f12e801
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_session_audio.c
@@ -0,0 +1,991 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_session_audio.c
+* @brief Audio Session plugin.
+*
+* @author Mamadou Diop <diopmamadou(at)doubango.org>
+* @contributors: See $(DOUBANGO_HOME)\contributors.txt
+*/
+#include "tinydav/audio/tdav_session_audio.h"
+
+//#include "tinydav/codecs/dtmf/tdav_codec_dtmf.h"
+#include "tinydav/audio/tdav_consumer_audio.h"
+
+#include "tinymedia/tmedia_resampler.h"
+#include "tinymedia/tmedia_denoise.h"
+#include "tinymedia/tmedia_jitterbuffer.h"
+#include "tinymedia/tmedia_consumer.h"
+#include "tinymedia/tmedia_producer.h"
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tinyrtp/trtp_manager.h"
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tsk_timer.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_AUDIO_RESAMPLER_DEFAULT_QUALITY 5
+
+static int _tdav_session_audio_dtmfe_timercb(const void* arg, tsk_timer_id_t timer_id);
+static struct tdav_session_audio_dtmfe_s* _tdav_session_audio_dtmfe_create(const tdav_session_audio_t* session, uint8_t event, uint16_t duration, uint32_t seq, uint32_t timestamp, uint8_t format, tsk_bool_t M, tsk_bool_t E);
+static void _tdav_session_audio_apply_gain(void* buffer, int len, int bps, int gain);
+static tmedia_resampler_t* _tdav_session_audio_resampler_create(int32_t bytes_per_sample, uint32_t in_freq, uint32_t out_freq, uint32_t frame_duration, uint32_t in_channels, uint32_t out_channels, uint32_t quality, void** resampler_buffer, tsk_size_t *resampler_buffer_size);
+
+/* DTMF event object */
+typedef struct tdav_session_audio_dtmfe_s
+{
+ TSK_DECLARE_OBJECT;
+
+ tsk_timer_id_t timer_id;
+ trtp_rtp_packet_t* packet;
+
+ const tdav_session_audio_t* session;
+}
+tdav_session_audio_dtmfe_t;
+extern const tsk_object_def_t *tdav_session_audio_dtmfe_def_t;
+
+// RTP/RTCP callback (From the network to the consumer)
+static int tdav_session_audio_rtp_cb(const void* callback_data, const struct trtp_rtp_packet_s* packet)
+{
+ tdav_session_audio_t* audio = (tdav_session_audio_t*)callback_data;
+ tmedia_codec_t* codec = tsk_null;
+ tdav_session_av_t* base = (tdav_session_av_t*)callback_data;
+ int ret = -1;
+
+ if (!audio || !packet || !packet->header) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ goto bail;
+ }
+
+ if (audio->is_started && base->consumer && base->consumer->is_started) {
+ tsk_size_t out_size = 0;
+
+ // Find the codec to use to decode the RTP payload
+ if (!audio->decoder.codec || audio->decoder.payload_type != packet->header->payload_type) {
+ tsk_istr_t format;
+ TSK_OBJECT_SAFE_FREE(audio->decoder.codec);
+ tsk_itoa(packet->header->payload_type, &format);
+ if (!(audio->decoder.codec = tmedia_codec_find_by_format(TMEDIA_SESSION(audio)->neg_codecs, format)) || !audio->decoder.codec->plugin || !audio->decoder.codec->plugin->decode){
+ TSK_DEBUG_ERROR("%s is not a valid payload for this session", format);
+ ret = -2;
+ goto bail;
+ }
+ audio->decoder.payload_type = packet->header->payload_type;
+ }
+ // ref() the codec to be able to use it short time after stop(SAFE_FREE(codec))
+ if (!(codec = tsk_object_ref(TSK_OBJECT(audio->decoder.codec)))) {
+ TSK_DEBUG_ERROR("Failed to get decoder codec");
+ goto bail;
+ }
+
+ // Open codec if not already done
+ if (!TMEDIA_CODEC(codec)->opened) {
+ tsk_safeobj_lock(base);
+ if ((ret = tmedia_codec_open(codec))) {
+ tsk_safeobj_unlock(base);
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", codec->plugin->desc);
+ TSK_OBJECT_SAFE_FREE(audio->decoder.codec);
+ goto bail;
+ }
+ tsk_safeobj_unlock(base);
+ }
+ // Decode data
+ out_size = codec->plugin->decode(codec, packet->payload.data, packet->payload.size, &audio->decoder.buffer, &audio->decoder.buffer_size, packet->header);
+ if (out_size && audio->is_started) { // check "is_started" again ...to be sure stop() not called by another thread
+ void* buffer = audio->decoder.buffer;
+ tsk_size_t size = out_size;
+
+ // resample if needed
+ if ((base->consumer->audio.out.rate && base->consumer->audio.out.rate != codec->in.rate) || (base->consumer->audio.out.channels && base->consumer->audio.out.channels != TMEDIA_CODEC_AUDIO(codec)->in.channels)) {
+ tsk_size_t resampler_result_size = 0;
+ int bytesPerSample = (base->consumer->audio.bits_per_sample >> 3);
+
+ if (!audio->decoder.resampler.instance) {
+ TSK_DEBUG_INFO("Create audio resampler(%s) for consumer: rate=%d->%d, channels=%d->%d, bytesPerSample=%d",
+ codec->plugin->desc,
+ codec->in.rate, base->consumer->audio.out.rate,
+ TMEDIA_CODEC_AUDIO(codec)->in.channels, base->consumer->audio.out.channels,
+ bytesPerSample);
+ audio->decoder.resampler.instance = _tdav_session_audio_resampler_create(
+ bytesPerSample,
+ codec->in.rate, base->consumer->audio.out.rate,
+ base->consumer->audio.ptime,
+ TMEDIA_CODEC_AUDIO(codec)->in.channels, base->consumer->audio.out.channels,
+ TDAV_AUDIO_RESAMPLER_DEFAULT_QUALITY,
+ &audio->decoder.resampler.buffer, &audio->decoder.resampler.buffer_size
+ );
+ }
+ if (!audio->decoder.resampler.instance) {
+ TSK_DEBUG_ERROR("No resampler to handle data");
+ ret = -5;
+ goto bail;
+ }
+ if (!(resampler_result_size = tmedia_resampler_process(audio->decoder.resampler.instance, buffer, size / bytesPerSample, audio->decoder.resampler.buffer, audio->decoder.resampler.buffer_size / bytesPerSample))){
+ TSK_DEBUG_ERROR("Failed to process audio resampler input buffer");
+ ret = -6;
+ goto bail;
+ }
+
+ buffer = audio->decoder.resampler.buffer;
+ size = audio->decoder.resampler.buffer_size;
+ }
+
+ // adjust the gain
+ if (base->consumer->audio.gain) {
+ _tdav_session_audio_apply_gain(buffer, (int)size, base->consumer->audio.bits_per_sample, base->consumer->audio.gain);
+ }
+ // consume the frame
+ tmedia_consumer_consume(base->consumer, buffer, size, packet->header);
+ }
+ }
+ else {
+ TSK_DEBUG_INFO("Session audio not ready");
+ }
+
+ // everything is ok
+ ret = 0;
+
+bail:
+ tsk_object_unref(TSK_OBJECT(codec));
+ return ret;
+}
+
+// Producer callback (From the producer to the network). Will encode() data before sending
+static int tdav_session_audio_producer_enc_cb(const void* callback_data, const void* buffer, tsk_size_t size)
+{
+ int ret = 0;
+
+ tdav_session_audio_t* audio = (tdav_session_audio_t*)callback_data;
+ tdav_session_av_t* base = (tdav_session_av_t*)callback_data;
+
+ if (!audio) {
+ TSK_DEBUG_ERROR("Null session");
+ return 0;
+ }
+
+ // do nothing if session is held
+ // when the session is held the end user will get feedback he also has possibilities to put the consumer and producer on pause
+ if (TMEDIA_SESSION(audio)->lo_held) {
+ return 0;
+ }
+
+ // get best negotiated codec if not already done
+ // the encoder codec could be null when session is renegotiated without re-starting (e.g. hold/resume)
+ if (!audio->encoder.codec) {
+ const tmedia_codec_t* codec;
+ tsk_safeobj_lock(base);
+ if (!(codec = tdav_session_av_get_best_neg_codec(base))) {
+ TSK_DEBUG_ERROR("No codec matched");
+ tsk_safeobj_unlock(base);
+ return -2;
+ }
+ audio->encoder.codec = tsk_object_ref(TSK_OBJECT(codec));
+ tsk_safeobj_unlock(base);
+ }
+
+ if (audio->is_started && base->rtp_manager && base->rtp_manager->is_started) {
+ /* encode */
+ tsk_size_t out_size = 0;
+
+ // Open codec if not already done
+ if (!audio->encoder.codec->opened) {
+ tsk_safeobj_lock(base);
+ if ((ret = tmedia_codec_open(audio->encoder.codec))) {
+ tsk_safeobj_unlock(base);
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", audio->encoder.codec->plugin->desc);
+ return -4;
+ }
+ tsk_safeobj_unlock(base);
+ }
+ // check if we're sending DTMF or not
+ if (audio->is_sending_dtmf_events) {
+ if (base->rtp_manager) {
+ // increment the timestamp
+ base->rtp_manager->rtp.timestamp += TMEDIA_CODEC_PCM_FRAME_SIZE_AUDIO_ENCODING(audio->encoder.codec)/*duration*/;
+ }
+ TSK_DEBUG_INFO("Skiping audio frame as we're sending DTMF...");
+ return 0;
+ }
+
+ // resample if needed
+ if (base->producer->audio.rate != audio->encoder.codec->out.rate || base->producer->audio.channels != TMEDIA_CODEC_AUDIO(audio->encoder.codec)->out.channels){
+ tsk_size_t resampler_result_size = 0;
+ int bytesPerSample = (base->producer->audio.bits_per_sample >> 3);
+
+ if (!audio->encoder.resampler.instance){
+ TSK_DEBUG_INFO("Create audio resampler(%s) for producer: rate=%d->%d, channels=%d->%d, bytesPerSample=%d",
+ audio->encoder.codec->plugin->desc,
+ base->producer->audio.rate, audio->encoder.codec->out.rate,
+ base->producer->audio.channels, TMEDIA_CODEC_AUDIO(audio->encoder.codec)->out.channels,
+ bytesPerSample);
+ audio->encoder.resampler.instance = _tdav_session_audio_resampler_create(
+ bytesPerSample,
+ base->producer->audio.rate, audio->encoder.codec->out.rate,
+ base->producer->audio.ptime,
+ base->producer->audio.channels, TMEDIA_CODEC_AUDIO(audio->encoder.codec)->out.channels,
+ TDAV_AUDIO_RESAMPLER_DEFAULT_QUALITY,
+ &audio->encoder.resampler.buffer, &audio->encoder.resampler.buffer_size
+ );
+ }
+ if (!audio->encoder.resampler.instance){
+ TSK_DEBUG_ERROR("No resampler to handle data");
+ ret = -1;
+ goto done;
+ }
+ if (!(resampler_result_size = tmedia_resampler_process(audio->encoder.resampler.instance, buffer, size / bytesPerSample, audio->encoder.resampler.buffer, audio->encoder.resampler.buffer_size / bytesPerSample))){
+ TSK_DEBUG_ERROR("Failed to process audio resampler input buffer");
+ ret = -1;
+ goto done;
+ }
+
+ buffer = audio->encoder.resampler.buffer;
+ size = audio->encoder.resampler.buffer_size;
+ }
+
+ // Denoise (VAD, AGC, Noise suppression, ...)
+ // Must be done after resampling
+ if (audio->denoise){
+ tsk_bool_t silence_or_noise = tsk_false;
+ if (audio->denoise->echo_supp_enabled){
+ ret = tmedia_denoise_process_record(TMEDIA_DENOISE(audio->denoise), (void*)buffer, (uint32_t)size, &silence_or_noise);
+ }
+ }
+ // adjust the gain
+ // Must be done after resampling
+ if (base->producer->audio.gain){
+ _tdav_session_audio_apply_gain((void*)buffer, (int)size, base->producer->audio.bits_per_sample, base->producer->audio.gain);
+ }
+
+ // Encode data
+ if ((audio->encoder.codec = tsk_object_ref(audio->encoder.codec))){ /* Thread safeness (SIP reINVITE or UPDATE could update the encoder) */
+ out_size = audio->encoder.codec->plugin->encode(audio->encoder.codec, buffer, size, &audio->encoder.buffer, &audio->encoder.buffer_size);
+ if (out_size){
+ trtp_manager_send_rtp(base->rtp_manager, audio->encoder.buffer, out_size, TMEDIA_CODEC_FRAME_DURATION_AUDIO_ENCODING(audio->encoder.codec), tsk_false/*Marker*/, tsk_true/*lastPacket*/);
+ }
+ tsk_object_unref(audio->encoder.codec);
+ }
+ else{
+ TSK_DEBUG_WARN("No encoder");
+ }
+ }
+
+done:
+ return ret;
+}
+
+
+/* ============ Plugin interface ================= */
+
+static int tdav_session_audio_set(tmedia_session_t* self, const tmedia_param_t* param)
+{
+ int ret = 0;
+ tdav_session_audio_t* audio;
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (tdav_session_av_set(TDAV_SESSION_AV(self), param) == tsk_true){
+ return 0;
+ }
+
+ audio = (tdav_session_audio_t*)self;
+
+ if (param->plugin_type == tmedia_ppt_consumer){
+ TSK_DEBUG_ERROR("Not expected consumer_set(%s)", param->key);
+ }
+ else if (param->plugin_type == tmedia_ppt_producer){
+ TSK_DEBUG_ERROR("Not expected producer_set(%s)", param->key);
+ }
+ else{
+ if (param->value_type == tmedia_pvt_int32){
+ if (tsk_striequals(param->key, "echo-supp")){
+ if (audio->denoise){
+ audio->denoise->echo_supp_enabled = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+ }
+ }
+ else if (tsk_striequals(param->key, "echo-tail")){
+ if (audio->denoise){
+ return tmedia_denoise_set(audio->denoise, param);
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int tdav_session_audio_get(tmedia_session_t* self, tmedia_param_t* param)
+{
+ if (!self || !param){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ // try with the base class to see if this option is supported or not
+ if (tdav_session_av_get(TDAV_SESSION_AV(self), param) == tsk_true){
+ return 0;
+ }
+ else {
+ // the codec information is held by the session even if the user is authorized to request it for the consumer/producer
+ if (param->value_type == tmedia_pvt_pobject){
+ if (param->plugin_type == tmedia_ppt_consumer){
+ TSK_DEBUG_ERROR("Not implemented");
+ return -4;
+ }
+ else if (param->plugin_type == tmedia_ppt_producer){
+ if (tsk_striequals("codec", param->key)) {
+ const tmedia_codec_t* codec;
+ if (!(codec = TDAV_SESSION_AUDIO(self)->encoder.codec)){
+ codec = tdav_session_av_get_best_neg_codec((const tdav_session_av_t*)self); // up to the caller to release the object
+ }
+ *((tsk_object_t**)param->value) = tsk_object_ref(TSK_OBJECT(codec));
+ return 0;
+ }
+ }
+ else if (param->plugin_type == tmedia_ppt_session) {
+ if (tsk_striequals(param->key, "codec-encoder")) {
+ *((tsk_object_t**)param->value) = tsk_object_ref(TDAV_SESSION_AUDIO(self)->encoder.codec); // up to the caller to release the object
+ return 0;
+ }
+ }
+ }
+ }
+
+ TSK_DEBUG_WARN("This session doesn't support get(%s)", param->key);
+ return -2;
+}
+
+static int tdav_session_audio_prepare(tmedia_session_t* self)
+{
+ tdav_session_av_t* base = (tdav_session_av_t*)(self);
+ int ret;
+
+ if ((ret = tdav_session_av_prepare(base))){
+ TSK_DEBUG_ERROR("tdav_session_av_prepare(audio) failed");
+ return ret;
+ }
+
+ if (base->rtp_manager){
+ ret = trtp_manager_set_rtp_callback(base->rtp_manager, tdav_session_audio_rtp_cb, base);
+ }
+
+ return ret;
+}
+
+static int tdav_session_audio_start(tmedia_session_t* self)
+{
+ int ret;
+ tdav_session_audio_t* audio;
+ const tmedia_codec_t* codec;
+ tdav_session_av_t* base;
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ audio = (tdav_session_audio_t*)self;
+ base = (tdav_session_av_t*)self;
+
+ if (audio->is_started) {
+ TSK_DEBUG_INFO("Audio session already started");
+ return 0;
+ }
+
+ if (!(codec = tdav_session_av_get_best_neg_codec(base))){
+ TSK_DEBUG_ERROR("No codec matched");
+ return -2;
+ }
+
+ TSK_OBJECT_SAFE_FREE(audio->encoder.codec);
+ audio->encoder.codec = tsk_object_ref((tsk_object_t*)codec);
+
+ if ((ret = tdav_session_av_start(base, codec))){
+ TSK_DEBUG_ERROR("tdav_session_av_start(audio) failed");
+ return ret;
+ }
+
+ if (base->rtp_manager){
+ /* Denoise (AEC, Noise Suppression, AGC)
+ * tmedia_denoise_process_record() is called after resampling and before encoding which means sampling rate is equal to codec's rate
+ * tmedia_denoise_echo_playback() is called before playback which means sampling rate is equal to consumer's rate
+ */
+ if (audio->denoise){
+ uint32_t record_frame_size_samples = TMEDIA_CODEC_PCM_FRAME_SIZE_AUDIO_ENCODING(audio->encoder.codec);
+ uint32_t record_sampling_rate = TMEDIA_CODEC_RATE_ENCODING(audio->encoder.codec);
+ uint32_t record_channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(audio->encoder.codec);
+
+ uint32_t playback_frame_size_samples = (base->consumer && base->consumer->audio.ptime && base->consumer->audio.out.rate && base->consumer->audio.out.channels)
+ ? ((base->consumer->audio.ptime * base->consumer->audio.out.rate) / 1000) * base->consumer->audio.out.channels
+ : TMEDIA_CODEC_PCM_FRAME_SIZE_AUDIO_DECODING(audio->encoder.codec);
+ uint32_t playback_sampling_rate = (base->consumer && base->consumer->audio.out.rate)
+ ? base->consumer->audio.out.rate
+ : TMEDIA_CODEC_RATE_DECODING(audio->encoder.codec);
+ uint32_t playback_channels = (base->consumer && base->consumer->audio.out.channels)
+ ? base->consumer->audio.out.channels
+ : TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(audio->encoder.codec);
+
+ TSK_DEBUG_INFO("Audio denoiser to be opened(record_frame_size_samples=%u, record_sampling_rate=%u, record_channels=%u, playback_frame_size_samples=%u, playback_sampling_rate=%u, playback_channels=%u)",
+ record_frame_size_samples, record_sampling_rate, record_channels, playback_frame_size_samples, playback_sampling_rate, playback_channels);
+
+ // close()
+ tmedia_denoise_close(audio->denoise);
+ // open() with new values
+ tmedia_denoise_open(audio->denoise,
+ record_frame_size_samples, record_sampling_rate, TSK_CLAMP(1, record_channels, 2),
+ playback_frame_size_samples, playback_sampling_rate, TSK_CLAMP(1, playback_channels, 2));
+ }
+ }
+
+ audio->is_started = (ret == 0);
+
+ return ret;
+}
+
+static int tdav_session_audio_stop(tmedia_session_t* self)
+{
+ tdav_session_audio_t* audio = TDAV_SESSION_AUDIO(self);
+ tdav_session_av_t* base = TDAV_SESSION_AV(self);
+ int ret = tdav_session_av_stop(base);
+ audio->is_started = tsk_false;
+ TSK_OBJECT_SAFE_FREE(audio->encoder.codec);
+ TSK_OBJECT_SAFE_FREE(audio->decoder.codec);
+
+ // close the jitter buffer and denoiser to be sure it will be reopened and reinitialized if reINVITE or UPDATE
+ // this is a "must" when the initial and updated sessions use codecs with different rate
+ if (audio->jitterbuffer && audio->jitterbuffer->opened) {
+ ret = tmedia_jitterbuffer_close(audio->jitterbuffer);
+ }
+ if (audio->denoise && audio->denoise->opened) {
+ ret = tmedia_denoise_close(audio->denoise);
+ }
+ return ret;
+}
+
+static int tdav_session_audio_send_dtmf(tmedia_session_t* self, uint8_t event)
+{
+ tdav_session_audio_t* audio;
+ tdav_session_av_t* base;
+ tmedia_codec_t* codec;
+ int ret, rate = 8000, ptime = 20;
+ uint16_t duration;
+ tdav_session_audio_dtmfe_t *dtmfe, *copy;
+ int format = 101;
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ audio = (tdav_session_audio_t*)self;
+ base = (tdav_session_av_t*)self;
+
+ // Find the DTMF codec to use to use the RTP payload
+ if ((codec = tmedia_codec_find_by_format(TMEDIA_SESSION(audio)->codecs, TMEDIA_CODEC_FORMAT_DTMF))){
+ rate = (int)codec->out.rate;
+ format = atoi(codec->neg_format ? codec->neg_format : codec->format);
+ TSK_OBJECT_SAFE_FREE(codec);
+ }
+
+ /* do we have an RTP manager? */
+ if (!base->rtp_manager){
+ TSK_DEBUG_ERROR("No RTP manager associated to this session");
+ return -2;
+ }
+
+ /* Create Events list */
+ if (!audio->dtmf_events){
+ audio->dtmf_events = tsk_list_create();
+ }
+
+ /* Create global reference to the timer manager */
+ if (!audio->timer.handle_mgr_global){
+ if (!(audio->timer.handle_mgr_global = tsk_timer_mgr_global_ref())){
+ TSK_DEBUG_ERROR("Failed to create Global Timer Manager");
+ return -3;
+ }
+ }
+
+ /* Start the timer manager */
+ if (!audio->timer.started){
+ if ((ret = tsk_timer_manager_start(audio->timer.handle_mgr_global))){
+ TSK_DEBUG_ERROR("Failed to start Global Timer Manager");
+ return ret;
+ }
+ audio->timer.started = tsk_true;
+ }
+
+
+ /* RFC 4733 - 5. Examples
+
+ +-------+-----------+------+--------+------+--------+--------+------+
+ | Time | Event | M | Time- | Seq | Event | Dura- | E |
+ | (ms) | | bit | stamp | No | Code | tion | bit |
+ +-------+-----------+------+--------+------+--------+--------+------+
+ | 0 | "9" | | | | | | |
+ | | starts | | | | | | |
+ | 50 | RTP | "1" | 0 | 1 | 9 | 400 | "0" |
+ | | packet 1 | | | | | | |
+ | | sent | | | | | | |
+ | 100 | RTP | "0" | 0 | 2 | 9 | 800 | "0" |
+ | | packet 2 | | | | | | |
+ | | sent | | | | | | |
+ | 150 | RTP | "0" | 0 | 3 | 9 | 1200 | "0" |
+ | | packet 3 | | | | | | |
+ | | sent | | | | | | |
+ | 200 | RTP | "0" | 0 | 4 | 9 | 1600 | "0" |
+ | | packet 4 | | | | | | |
+ | | sent | | | | | | |
+ | 200 | "9" ends | | | | | | |
+ | 250 | RTP | "0" | 0 | 5 | 9 | 1600 | "1" |
+ | | packet 4 | | | | | | |
+ | | first | | | | | | |
+ | | retrans- | | | | | | |
+ | | mission | | | | | | |
+ | 300 | RTP | "0" | 0 | 6 | 9 | 1600 | "1" |
+ | | packet 4 | | | | | | |
+ | | second | | | | | | |
+ | | retrans- | | | | | | |
+ | | mission | | | | | | |
+ =====================================================================
+ | 880 | First "1" | | | | | | |
+ | | starts | | | | | | |
+ | 930 | RTP | "1" | 7040 | 7 | 1 | 400 | "0" |
+ | | packet 5 | | | | | | |
+ | | sent | | | | | | |
+ */
+
+ // ref()(thread safeness)
+ audio = tsk_object_ref(audio);
+
+ // says we're sending DTMF digits to avoid mixing with audio (SRTP won't let this happen because of senquence numbers)
+ // flag will be turned OFF when the list is empty
+ audio->is_sending_dtmf_events = tsk_true;
+
+ duration = TMEDIA_CODEC_PCM_FRAME_SIZE_AUDIO_ENCODING(audio->encoder.codec);
+
+ // lock() list
+ tsk_list_lock(audio->dtmf_events);
+
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 1, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_true, tsk_false);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 0, _tdav_session_audio_dtmfe_timercb, copy);
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 2, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_false, tsk_false);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 1, _tdav_session_audio_dtmfe_timercb, copy);
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 3, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_false, tsk_false);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 2, _tdav_session_audio_dtmfe_timercb, copy);
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 4, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_false, tsk_false);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 3, _tdav_session_audio_dtmfe_timercb, copy);
+
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 4, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_false, tsk_true);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 4, _tdav_session_audio_dtmfe_timercb, copy);
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 4, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_false, tsk_true);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 5, _tdav_session_audio_dtmfe_timercb, copy);
+
+ // unlock() list
+ tsk_list_unlock(audio->dtmf_events);
+
+ // increment timestamp
+ base->rtp_manager->rtp.timestamp += duration;
+
+ // unref()(thread safeness)
+ audio = tsk_object_unref(audio);
+
+ return 0;
+}
+
+static int tdav_session_audio_pause(tmedia_session_t* self)
+{
+ return tdav_session_av_pause(TDAV_SESSION_AV(self));
+}
+
+static const tsdp_header_M_t* tdav_session_audio_get_lo(tmedia_session_t* self)
+{
+ tsk_bool_t updated = tsk_false;
+ const tsdp_header_M_t* ret;
+ tdav_session_av_t* base = TDAV_SESSION_AV(self);
+
+
+ if (!(ret = tdav_session_av_get_lo(base, &updated))){
+ TSK_DEBUG_ERROR("tdav_session_av_get_lo(audio) failed");
+ return tsk_null;
+ }
+
+ if (updated){
+ tsk_safeobj_lock(base);
+ TSK_OBJECT_SAFE_FREE(TDAV_SESSION_AUDIO(self)->encoder.codec);
+ tsk_safeobj_unlock(base);
+ }
+
+ return ret;
+}
+
+static int tdav_session_audio_set_ro(tmedia_session_t* self, const tsdp_header_M_t* m)
+{
+ int ret;
+ tsk_bool_t updated = tsk_false;
+ tdav_session_av_t* base = TDAV_SESSION_AV(self);
+
+ if ((ret = tdav_session_av_set_ro(base, m, &updated))){
+ TSK_DEBUG_ERROR("tdav_session_av_set_ro(audio) failed");
+ return ret;
+ }
+
+ if (updated) {
+ tsk_safeobj_lock(base);
+ // reset audio jitter buffer (new Offer probably comes with new seq_nums or timestamps)
+ if (base->consumer) {
+ ret = tdav_consumer_audio_reset(TDAV_CONSUMER_AUDIO(base->consumer));
+ }
+ // destroy encoder to force requesting new one
+ TSK_OBJECT_SAFE_FREE(TDAV_SESSION_AUDIO(self)->encoder.codec);
+ tsk_safeobj_unlock(base);
+ }
+
+ return ret;
+}
+
+/* apply gain */
+static void _tdav_session_audio_apply_gain(void* buffer, int len, int bps, int gain)
+{
+ register int i;
+ int max_val;
+
+ max_val = (1 << (bps - 1 - gain)) - 1;
+
+ if (bps == 8) {
+ int8_t *buff = buffer;
+ for (i = 0; i < len; i++) {
+ if (buff[i] > -max_val && buff[i] < max_val)
+ buff[i] = buff[i] << gain;
+ }
+ }
+ else if (bps == 16) {
+ int16_t *buff = buffer;
+ for (i = 0; i < len / 2; i++) {
+ if (buff[i] > -max_val && buff[i] < max_val)
+ buff[i] = buff[i] << gain;
+ }
+ }
+}
+
+
+/* Internal function used to create new DTMF event */
+static tdav_session_audio_dtmfe_t* _tdav_session_audio_dtmfe_create(const tdav_session_audio_t* session, uint8_t event, uint16_t duration, uint32_t seq, uint32_t timestamp, uint8_t format, tsk_bool_t M, tsk_bool_t E)
+{
+ tdav_session_audio_dtmfe_t* dtmfe;
+ const tdav_session_av_t* base = (const tdav_session_av_t*)session;
+ static uint8_t volume = 10;
+ static uint32_t ssrc = 0x5234A8;
+
+ uint8_t pay[4] = { 0 };
+
+ /* RFC 4733 - 2.3. Payload Format
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | event |E|R| volume | duration |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+ if (!(dtmfe = tsk_object_new(tdav_session_audio_dtmfe_def_t))){
+ TSK_DEBUG_ERROR("Failed to create new DTMF event");
+ return tsk_null;
+ }
+ dtmfe->session = session;
+
+ if (!(dtmfe->packet = trtp_rtp_packet_create((session && base->rtp_manager) ? base->rtp_manager->rtp.ssrc.local : ssrc, seq, timestamp, format, M))){
+ TSK_DEBUG_ERROR("Failed to create DTMF RTP packet");
+ TSK_OBJECT_SAFE_FREE(dtmfe);
+ return tsk_null;
+ }
+
+ pay[0] = event;
+ pay[1] |= ((E << 7) | (volume & 0x3F));
+ pay[2] = (duration >> 8);
+ pay[3] = (duration & 0xFF);
+
+ /* set data */
+ if ((dtmfe->packet->payload.data = tsk_calloc(sizeof(pay), sizeof(uint8_t)))){
+ memcpy(dtmfe->packet->payload.data, pay, sizeof(pay));
+ dtmfe->packet->payload.size = sizeof(pay);
+ }
+
+ return dtmfe;
+}
+
+static int _tdav_session_audio_dtmfe_timercb(const void* arg, tsk_timer_id_t timer_id)
+{
+ tdav_session_audio_dtmfe_t* dtmfe = (tdav_session_audio_dtmfe_t*)arg;
+ tdav_session_audio_t *audio;
+
+ if (!dtmfe || !dtmfe->session || !dtmfe->session->dtmf_events){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* Send the data */
+ TSK_DEBUG_INFO("Sending DTMF event...");
+ trtp_manager_send_rtp_packet(TDAV_SESSION_AV(dtmfe->session)->rtp_manager, dtmfe->packet, tsk_false);
+
+
+ audio = tsk_object_ref(TSK_OBJECT(dtmfe->session));
+ tsk_list_lock(audio->dtmf_events);
+ /* Remove and delete the event from the queue */
+ tsk_list_remove_item_by_data(audio->dtmf_events, dtmfe);
+ /* Check if there are pending events */
+ audio->is_sending_dtmf_events = !TSK_LIST_IS_EMPTY(audio->dtmf_events);
+ tsk_list_unlock(audio->dtmf_events);
+ tsk_object_unref(audio);
+
+ return 0;
+}
+
+static tmedia_resampler_t* _tdav_session_audio_resampler_create(int32_t bytes_per_sample, uint32_t in_freq, uint32_t out_freq, uint32_t frame_duration, uint32_t in_channels, uint32_t out_channels, uint32_t quality, void** resampler_buffer, tsk_size_t *resampler_buffer_size)
+{
+ uint32_t resampler_buff_size;
+ tmedia_resampler_t* resampler;
+ int ret;
+
+ if (out_channels > 2 || in_channels > 2) {
+ TSK_DEBUG_ERROR("Invalid parameter: out_channels=%u, in_channels=%u", out_channels, in_channels);
+ return tsk_null;
+ }
+
+ resampler_buff_size = (((out_freq * frame_duration) / 1000) * bytes_per_sample) << (out_channels == 2 ? 1 : 0);
+
+ if (!(resampler = tmedia_resampler_create())) {
+ TSK_DEBUG_ERROR("Failed to create audio resampler");
+ return tsk_null;
+ }
+ else {
+ if ((ret = tmedia_resampler_open(resampler, in_freq, out_freq, frame_duration, in_channels, out_channels, quality, 16))) {
+ TSK_DEBUG_ERROR("Failed to open audio resampler (%d, %d, %d, %d, %d,%d) with retcode=%d", in_freq, out_freq, frame_duration, in_channels, out_channels, quality, ret);
+ TSK_OBJECT_SAFE_FREE(resampler);
+ goto done;
+ }
+ }
+ // create temp resampler buffer
+ if ((*resampler_buffer = tsk_realloc(*resampler_buffer, resampler_buff_size))) {
+ *resampler_buffer_size = resampler_buff_size;
+ }
+ else {
+ *resampler_buffer_size = 0;
+ TSK_DEBUG_ERROR("Failed to allocate resampler buffer with size = %d", resampler_buff_size);
+ TSK_OBJECT_SAFE_FREE(resampler);
+ goto done;
+ }
+done:
+ return resampler;
+}
+
+//=================================================================================================
+// Session Audio Plugin object definition
+//
+/* constructor */
+static tsk_object_t* tdav_session_audio_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_session_audio_t *audio = self;
+ if (audio){
+ int ret;
+ tdav_session_av_t *base = TDAV_SESSION_AV(self);
+
+ /* init() base */
+ if ((ret = tdav_session_av_init(base, tmedia_audio)) != 0){
+ TSK_DEBUG_ERROR("tdav_session_av_init(audio) failed");
+ return tsk_null;
+ }
+
+ /* init() self */
+ if (base->producer){
+ tmedia_producer_set_enc_callback(base->producer, tdav_session_audio_producer_enc_cb, audio);
+ }
+ if (base->consumer){
+ // It's important to create the denoiser and jitter buffer here as dynamic plugins (from shared libs) don't have access to the registry
+ if (!(audio->denoise = tmedia_denoise_create())){
+ TSK_DEBUG_WARN("No Audio denoiser found");
+ }
+ else{
+ // IMPORTANT: This means that the consumer must be child of "tdav_consumer_audio_t" object
+ tdav_consumer_audio_set_denoise(TDAV_CONSUMER_AUDIO(base->consumer), audio->denoise);
+ }
+
+ if (!(audio->jitterbuffer = tmedia_jitterbuffer_create(tmedia_audio))){
+ TSK_DEBUG_ERROR("Failed to create jitter buffer");
+ }
+ else{
+ ret = tmedia_jitterbuffer_init(TMEDIA_JITTER_BUFFER(audio->jitterbuffer));
+ tdav_consumer_audio_set_jitterbuffer(TDAV_CONSUMER_AUDIO(base->consumer), audio->jitterbuffer);
+ }
+ }
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_session_audio_dtor(tsk_object_t * self)
+{
+ tdav_session_audio_t *audio = self;
+ TSK_DEBUG_INFO("*** tdav_session_audio_t destroyed ***");
+ if (audio){
+ tdav_session_audio_stop((tmedia_session_t*)audio);
+ // Do it in this order (deinit self first)
+
+ /* Timer manager */
+ if (audio->timer.started){
+ if (audio->dtmf_events){
+ /* Cancel all events */
+ tsk_list_item_t* item;
+ tsk_list_foreach(item, audio->dtmf_events){
+ tsk_timer_mgr_global_cancel(((tdav_session_audio_dtmfe_t*)item->data)->timer_id);
+ }
+ }
+ }
+
+ tsk_timer_mgr_global_unref(&audio->timer.handle_mgr_global);
+
+ /* CleanUp the DTMF events */
+ TSK_OBJECT_SAFE_FREE(audio->dtmf_events);
+
+ TSK_OBJECT_SAFE_FREE(audio->denoise);
+ TSK_OBJECT_SAFE_FREE(audio->jitterbuffer);
+
+ TSK_OBJECT_SAFE_FREE(audio->encoder.codec);
+ TSK_FREE(audio->encoder.buffer);
+ TSK_OBJECT_SAFE_FREE(audio->decoder.codec);
+ TSK_FREE(audio->decoder.buffer);
+
+ // free resamplers
+ TSK_FREE(audio->encoder.resampler.buffer);
+ TSK_OBJECT_SAFE_FREE(audio->encoder.resampler.instance);
+ TSK_FREE(audio->decoder.resampler.buffer);
+ TSK_OBJECT_SAFE_FREE(audio->decoder.resampler.instance);
+
+ /* deinit base */
+ tdav_session_av_deinit(TDAV_SESSION_AV(self));
+
+ TSK_DEBUG_INFO("*** Audio session destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_session_audio_def_s =
+{
+ sizeof(tdav_session_audio_t),
+ tdav_session_audio_ctor,
+ tdav_session_audio_dtor,
+ tmedia_session_cmp,
+};
+/* plugin definition*/
+static const tmedia_session_plugin_def_t tdav_session_audio_plugin_def_s =
+{
+ &tdav_session_audio_def_s,
+
+ tmedia_audio,
+ "audio",
+
+ tdav_session_audio_set,
+ tdav_session_audio_get,
+ tdav_session_audio_prepare,
+ tdav_session_audio_start,
+ tdav_session_audio_pause,
+ tdav_session_audio_stop,
+
+ /* Audio part */
+ {
+ tdav_session_audio_send_dtmf
+ },
+
+ tdav_session_audio_get_lo,
+ tdav_session_audio_set_ro
+};
+const tmedia_session_plugin_def_t *tdav_session_audio_plugin_def_t = &tdav_session_audio_plugin_def_s;
+static const tmedia_session_plugin_def_t tdav_session_bfcpaudio_plugin_def_s =
+{
+ &tdav_session_audio_def_s,
+
+ tmedia_bfcp_audio,
+ "audio",
+
+ tdav_session_audio_set,
+ tdav_session_audio_get,
+ tdav_session_audio_prepare,
+ tdav_session_audio_start,
+ tdav_session_audio_pause,
+ tdav_session_audio_stop,
+
+ /* Audio part */
+ {
+ tdav_session_audio_send_dtmf
+ },
+
+ tdav_session_audio_get_lo,
+ tdav_session_audio_set_ro
+};
+const tmedia_session_plugin_def_t *tdav_session_bfcpaudio_plugin_def_t = &tdav_session_bfcpaudio_plugin_def_s;
+
+
+
+//=================================================================================================
+// DTMF event object definition
+//
+static tsk_object_t* tdav_session_audio_dtmfe_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_session_audio_dtmfe_t *event = self;
+ if (event){
+ event->timer_id = TSK_INVALID_TIMER_ID;
+ }
+ return self;
+}
+
+static tsk_object_t* tdav_session_audio_dtmfe_dtor(tsk_object_t * self)
+{
+ tdav_session_audio_dtmfe_t *event = self;
+ if (event){
+ TSK_OBJECT_SAFE_FREE(event->packet);
+ }
+
+ return self;
+}
+
+static int tdav_session_audio_dtmfe_cmp(const tsk_object_t *_e1, const tsk_object_t *_e2)
+{
+ int ret;
+ tsk_subsat_int32_ptr(_e1, _e2, &ret);
+ return ret;
+}
+
+static const tsk_object_def_t tdav_session_audio_dtmfe_def_s =
+{
+ sizeof(tdav_session_audio_dtmfe_t),
+ tdav_session_audio_dtmfe_ctor,
+ tdav_session_audio_dtmfe_dtor,
+ tdav_session_audio_dtmfe_cmp,
+};
+const tsk_object_def_t *tdav_session_audio_dtmfe_def_t = &tdav_session_audio_dtmfe_def_s;
diff --git a/tinyDAV/src/audio/tdav_speakup_jitterbuffer.c b/tinyDAV/src/audio/tdav_speakup_jitterbuffer.c
new file mode 100644
index 0000000..cccc235
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_speakup_jitterbuffer.c
@@ -0,0 +1,281 @@
+/*
+* Copyright (C) 2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_speakup_jitterbuffer.c
+ * @brief Speakup Audio jitterbuffer Plugin
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+
+ */
+#include "tinydav/audio/tdav_speakup_jitterbuffer.h"
+
+#if !(HAVE_SPEEX_DSP && HAVE_SPEEX_JB)
+
+#include "tinyrtp/rtp/trtp_rtp_header.h"
+
+#include "tsk_time.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <string.h>
+
+#if TSK_UNDER_WINDOWS
+# include <Winsock2.h> // timeval
+#elif defined(__SYMBIAN32__)
+# include <_timeval.h>
+#else
+# include <sys/time.h>
+#endif
+
+#define TDAV_SPEAKUP_10MS 10
+#define TDAV_SPEAKUP_10MS_FRAME_SIZE(self) (((self)->rate * TDAV_SPEAKUP_10MS)/1000)
+#define TDAV_SPEAKUP_PTIME_FRAME_SIZE(self) (((self)->rate * (self)->framesize)/1000)
+
+static int tdav_speakup_jitterbuffer_set(tmedia_jitterbuffer_t *self, const tmedia_param_t* param)
+{
+ TSK_DEBUG_ERROR("Not implemented");
+ return -2;
+}
+
+static int tdav_speakup_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate, uint32_t channels)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
+ if(!jitterbuffer->jbuffer){
+ if(!(jitterbuffer->jbuffer = jb_new())){
+ TSK_DEBUG_ERROR("Failed to create new buffer");
+ return -1;
+ }
+ jitterbuffer->jcodec = JB_CODEC_OTHER;
+ }
+ jitterbuffer->ref_timestamp = 0;
+ jitterbuffer->frame_duration = frame_duration;
+ jitterbuffer->rate = rate;
+ jitterbuffer->channels = channels;
+ jitterbuffer->_10ms_size_bytes = 160 * (rate/8000);
+
+ return 0;
+}
+
+static int tdav_speakup_jitterbuffer_tick(tmedia_jitterbuffer_t* self)
+{
+ return 0;
+}
+
+static int tdav_speakup_jitterbuffer_put(tmedia_jitterbuffer_t* self, void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
+ const trtp_rtp_header_t* rtp_hdr = (const trtp_rtp_header_t*)proto_hdr;
+ int i;
+ long now, ts;
+ void* _10ms_buf;
+ uint8_t* pdata;
+
+ if(!self || !data || !data_size || !jitterbuffer->jbuffer || !rtp_hdr){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* synchronize the reference timestamp */
+ if(!jitterbuffer->ref_timestamp){
+ uint64_t now = tsk_time_now();
+ struct timeval tv;
+ long ts = (rtp_hdr->timestamp/(jitterbuffer->rate/1000));
+ //=> Do not use (see clock_gettime() on linux): tsk_gettimeofday(&tv, tsk_null);
+ tv.tv_sec = (long)(now)/1000;
+ tv.tv_usec = (long)(now - (tv.tv_sec*1000))*1000;
+
+ tv.tv_sec -= (ts / jitterbuffer->rate);
+ tv.tv_usec -= (ts % jitterbuffer->rate) * 125;
+ if((tv.tv_usec -= (tv.tv_usec % (TDAV_SPEAKUP_10MS * 10000))) <0){
+ tv.tv_usec += 1000000;
+ tv.tv_sec -= 1;
+ }
+ jitterbuffer->ref_timestamp = tsk_time_get_ms(&tv);
+
+ switch(rtp_hdr->payload_type){
+ case 8: /*TMEDIA_CODEC_FORMAT_G711a*/
+ case 0: /* TMEDIA_CODEC_FORMAT_G711u */
+ jitterbuffer->jcodec = JB_CODEC_G711x;
+ break;
+ case 18: /* TMEDIA_CODEC_FORMAT_G729 */
+ jitterbuffer->jcodec = JB_CODEC_G729A;
+ break;
+ case 3: /* TMEDIA_CODEC_FORMAT_GSM */
+ jitterbuffer->jcodec = JB_CODEC_GSM_EFR;
+ break;
+
+ default:
+ jitterbuffer->jcodec = JB_CODEC_OTHER;
+ break;
+ }
+ }
+
+ // split as several 10ms frames
+ now = (long) (tsk_time_now()-jitterbuffer->ref_timestamp);
+ ts = (long)(rtp_hdr->timestamp/(jitterbuffer->rate/1000));
+ pdata = (uint8_t*)data;
+ for(i=0; i<(int)(data_size/jitterbuffer->_10ms_size_bytes);i++){
+ if((_10ms_buf = tsk_calloc(jitterbuffer->_10ms_size_bytes, 1))){
+ memcpy(_10ms_buf, &pdata[i*jitterbuffer->_10ms_size_bytes], jitterbuffer->_10ms_size_bytes);
+ jb_put(jitterbuffer->jbuffer, _10ms_buf, JB_TYPE_VOICE, TDAV_SPEAKUP_10MS, ts, now, jitterbuffer->jcodec);
+ _10ms_buf = tsk_null;
+ }
+ ts += TDAV_SPEAKUP_10MS;
+ }
+
+ return 0;
+}
+
+static tsk_size_t tdav_speakup_jitterbuffer_get(tmedia_jitterbuffer_t* self, void* out_data, tsk_size_t out_size)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
+ int jret;
+
+ int i, _10ms_count;
+ long now;
+ short* _10ms_buf = tsk_null;
+ uint8_t* pout_data = (uint8_t*)out_data;
+
+ if(!out_data || (out_size % jitterbuffer->_10ms_size_bytes)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ _10ms_count = (out_size/jitterbuffer->_10ms_size_bytes);
+ now = (long) (tsk_time_now() - jitterbuffer->ref_timestamp);
+ for(i=0; i<_10ms_count; i++){
+
+ jret = jb_get(jitterbuffer->jbuffer, (void**)&_10ms_buf, now, TDAV_SPEAKUP_10MS);
+ switch(jret){
+ case JB_INTERP:
+ TSK_DEBUG_INFO("JB_INTERP");
+ jb_reset_all(jitterbuffer->jbuffer);
+ memset(&pout_data[i*jitterbuffer->_10ms_size_bytes], 0, (_10ms_count*jitterbuffer->_10ms_size_bytes)-(i*jitterbuffer->_10ms_size_bytes));
+ i = _10ms_count; // for exit
+ break;
+ case JB_OK:
+ case JB_EMPTY:
+ case JB_NOFRAME:
+ case JB_NOJB:
+ {
+ if(_10ms_buf && (jret == JB_OK)){
+ /* copy data */
+ memcpy(&pout_data[i*jitterbuffer->_10ms_size_bytes], _10ms_buf, jitterbuffer->_10ms_size_bytes);
+ }
+ else{
+ /* copy silence */
+ memset(&pout_data[i*jitterbuffer->_10ms_size_bytes], 0, jitterbuffer->_10ms_size_bytes);
+ }
+ }
+
+ default:
+ break;
+ }
+ TSK_FREE(_10ms_buf);
+ }
+
+ return (_10ms_count * jitterbuffer->_10ms_size_bytes);
+}
+
+static int tdav_speakup_jitterbuffer_reset(tmedia_jitterbuffer_t* self)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
+ if(jitterbuffer->jbuffer){
+ jb_reset_all(jitterbuffer->jbuffer);
+ return 0;
+ }
+ else{
+ TSK_DEBUG_ERROR("invalid parameter");
+ return -1;
+ }
+}
+
+static int tdav_speakup_jitterbuffer_close(tmedia_jitterbuffer_t* self)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
+ if(jitterbuffer->jbuffer){
+ jb_destroy(jitterbuffer->jbuffer);
+ jitterbuffer->jbuffer = tsk_null;
+ }
+ return 0;
+}
+
+
+
+//
+// Speakup jitterbufferr Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_speakup_jitterbuffer_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = self;
+ TSK_DEBUG_INFO("Create speekup jitter buffer");
+ if(jitterbuffer){
+ /* init base */
+ tmedia_jitterbuffer_init(TMEDIA_JITTER_BUFFER(jitterbuffer));
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_speakup_jitterbuffer_dtor(tsk_object_t * self)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = self;
+ if(jitterbuffer){
+ /* deinit base */
+ tmedia_jitterbuffer_deinit(TMEDIA_JITTER_BUFFER(jitterbuffer));
+ /* deinit self */
+ if(jitterbuffer->jbuffer){
+ jb_destroy(jitterbuffer->jbuffer);
+ jitterbuffer->jbuffer = tsk_null;
+ }
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_speakup_jitterbuffer_def_s =
+{
+ sizeof(tdav_speakup_jitterbuffer_t),
+ tdav_speakup_jitterbuffer_ctor,
+ tdav_speakup_jitterbuffer_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_jitterbuffer_plugin_def_t tdav_speakup_jitterbuffer_plugin_def_s =
+{
+ &tdav_speakup_jitterbuffer_def_s,
+ tmedia_audio,
+ "Audio/video JitterBuffer based on Speakup",
+
+ tdav_speakup_jitterbuffer_set,
+ tdav_speakup_jitterbuffer_open,
+ tdav_speakup_jitterbuffer_tick,
+ tdav_speakup_jitterbuffer_put,
+ tdav_speakup_jitterbuffer_get,
+ tdav_speakup_jitterbuffer_reset,
+ tdav_speakup_jitterbuffer_close,
+};
+const tmedia_jitterbuffer_plugin_def_t *tdav_speakup_jitterbuffer_plugin_def_t = &tdav_speakup_jitterbuffer_plugin_def_s;
+
+#endif /* !(HAVE_SPEEX_DSP && HAVE_SPEEX_JB) */
diff --git a/tinyDAV/src/audio/tdav_speex_denoise.c b/tinyDAV/src/audio/tdav_speex_denoise.c
new file mode 100644
index 0000000..4f344dd
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_speex_denoise.c
@@ -0,0 +1,312 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_speex_denoise.c
+* @brief Speex Denoiser (Noise suppression, AGC, AEC) Plugin
+*/
+#include "tinydav/audio/tdav_speex_denoise.h"
+
+#if HAVE_SPEEX_DSP && (!defined(HAVE_SPEEX_DENOISE) || HAVE_SPEEX_DENOISE)
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include "tinymedia/tmedia_defaults.h"
+
+#include <string.h>
+
+#include <speex/speex_preprocess.h>
+#include <speex/speex_echo.h>
+
+/** Speex denoiser*/
+typedef struct tdav_speex_denoise_s
+{
+ TMEDIA_DECLARE_DENOISE;
+
+ SpeexPreprocessState *preprocess_state_record;
+ SpeexPreprocessState *preprocess_state_playback;
+ SpeexEchoState *echo_state;
+
+ spx_int16_t* echo_output_frame;
+ uint32_t record_frame_size_samples, record_frame_size_bytes;
+ uint32_t playback_frame_size_samples, playback_frame_size_bytes;
+}
+tdav_speex_denoise_t;
+
+static int tdav_speex_denoise_set(tmedia_denoise_t* _self, const tmedia_param_t* param)
+{
+ tdav_speex_denoise_t *self = (tdav_speex_denoise_t *)_self;
+ if(!self || !param){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(param->value_type == tmedia_pvt_int32){
+ if(tsk_striequals(param->key, "echo-tail")){
+ int32_t echo_tail = *((int32_t*)param->value);
+ TSK_DEBUG_INFO("speex_set_echo_tail(%d) ignore", echo_tail); // because Speex AEC just do not work (use WebRTC)
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int tdav_speex_denoise_open(tmedia_denoise_t* self, uint32_t record_frame_size_samples, uint32_t record_sampling_rate, uint32_t record_channels, uint32_t playback_frame_size_samples, uint32_t playback_sampling_rate, uint32_t playback_channels)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+ float f;
+ int i;
+
+ if (!denoiser->echo_state && TMEDIA_DENOISE(denoiser)->echo_supp_enabled) {
+ TSK_DEBUG_INFO("Init Aec frame_size[%u] filter_length[%u] SampleRate[%u]",
+ (uint32_t)(record_frame_size_samples),TMEDIA_DENOISE(denoiser)->echo_tail*record_frame_size_samples, record_sampling_rate);
+ if((denoiser->echo_state = speex_echo_state_init(record_frame_size_samples, TMEDIA_DENOISE(denoiser)->echo_tail))){
+ speex_echo_ctl(denoiser->echo_state, SPEEX_ECHO_SET_SAMPLING_RATE, &record_sampling_rate);
+ }
+ }
+
+ if (!denoiser->preprocess_state_record && !denoiser->preprocess_state_playback) {
+ denoiser->record_frame_size_samples = record_frame_size_samples;
+ denoiser->record_frame_size_bytes = (record_frame_size_samples << 1);
+ denoiser->playback_frame_size_samples = playback_frame_size_samples;
+ denoiser->playback_frame_size_bytes = (playback_frame_size_samples << 1);
+
+ if((denoiser->preprocess_state_record = speex_preprocess_state_init(record_frame_size_samples, record_sampling_rate))
+ && (denoiser->preprocess_state_playback = speex_preprocess_state_init(playback_frame_size_samples, playback_sampling_rate))
+ ){
+
+ // Echo suppression
+ if(denoiser->echo_state){
+ int echo_supp , echo_supp_active = 0;
+
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_ECHO_STATE, denoiser->echo_state);
+
+ TSK_FREE(denoiser->echo_output_frame);
+ denoiser->echo_output_frame = tsk_calloc(denoiser->record_frame_size_samples, sizeof(spx_int16_t));
+
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_GET_ECHO_SUPPRESS , &echo_supp );
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_GET_ECHO_SUPPRESS_ACTIVE , &echo_supp_active );
+ TSK_DEBUG_INFO("AEC echo_supp level [%d] echo_supp_active level[%d] ", echo_supp , echo_supp_active);
+ echo_supp = -60 ;
+ echo_supp_active = -60 ;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_ECHO_SUPPRESS , &echo_supp );
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_ECHO_SUPPRESS_ACTIVE , &echo_supp_active );
+ // TRACES
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_GET_ECHO_SUPPRESS , &echo_supp );
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_GET_ECHO_SUPPRESS_ACTIVE , &echo_supp_active );
+ TSK_DEBUG_INFO("New aec echo_supp level [%d] echo_supp_active level[%d] ", echo_supp , echo_supp_active);
+ }
+
+ // Noise suppression
+ if(TMEDIA_DENOISE(denoiser)->noise_supp_enabled){
+ TSK_DEBUG_INFO("SpeexDSP: Noise supp enabled");
+ i = 1;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state_playback, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ i = TMEDIA_DENOISE(denoiser)->noise_supp_level;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state_playback, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &i);
+ }
+ else{
+ i = 0;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state_playback, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ }
+
+ // Automatic gain control
+ if(TMEDIA_DENOISE(denoiser)->agc_enabled){
+ float agc_level = TMEDIA_DENOISE(denoiser)->agc_level;
+ TSK_DEBUG_INFO("SpeexDSP: AGC enabled");
+
+ i = 1;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_AGC, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_AGC_LEVEL, &agc_level);
+ }
+ else{
+ i = 0, f = 8000.0f;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_AGC, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_AGC_LEVEL, &f);
+ }
+
+ // Voice Activity detection
+ i = TMEDIA_DENOISE(denoiser)->vad_enabled ? 1 : 0;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_VAD, &i);
+
+ return 0;
+ }
+ else{
+ TSK_DEBUG_ERROR("Failed to create Speex preprocessor state");
+ return -2;
+ }
+ }
+
+ return 0;
+}
+
+static int tdav_speex_denoise_echo_playback(tmedia_denoise_t* self, const void* echo_frame, uint32_t echo_frame_size_bytes)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+
+ if(denoiser->record_frame_size_bytes != echo_frame_size_bytes){
+ TSK_DEBUG_ERROR("Size mismatch: %u<>%u", denoiser->record_frame_size_bytes, echo_frame_size_bytes);
+ return -1;
+ }
+
+ if(denoiser->echo_state){
+ speex_echo_playback(denoiser->echo_state, echo_frame);
+ }
+ return 0;
+}
+
+
+
+static int tdav_speex_denoise_process_record(tmedia_denoise_t* self, void* audio_frame, uint32_t audio_frame_size_bytes, tsk_bool_t* silence_or_noise)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+ int vad;
+
+ if(denoiser->record_frame_size_bytes != audio_frame_size_bytes){
+ TSK_DEBUG_ERROR("Size mismatch: %u<>%u", denoiser->record_frame_size_bytes, audio_frame_size_bytes);
+ return -1;
+ }
+
+ if(denoiser->preprocess_state_record){
+ if(denoiser->echo_state && denoiser->echo_output_frame){
+ speex_echo_capture(denoiser->echo_state, audio_frame, denoiser->echo_output_frame);
+ memcpy(audio_frame, denoiser->echo_output_frame, denoiser->record_frame_size_bytes);
+ }
+ vad = speex_preprocess_run(denoiser->preprocess_state_record, audio_frame);
+ if(!vad && TMEDIA_DENOISE(denoiser)->vad_enabled){
+ *silence_or_noise = tsk_true;
+ }
+ }
+
+ return 0;
+}
+
+static int tdav_speex_denoise_process_playback(tmedia_denoise_t* self, void* audio_frame, uint32_t audio_frame_size_bytes)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+
+ if(denoiser->playback_frame_size_bytes != audio_frame_size_bytes){
+ TSK_DEBUG_ERROR("Size mismatch: %u<>%u", denoiser->playback_frame_size_bytes, audio_frame_size_bytes);
+ return -1;
+ }
+
+ if(denoiser->preprocess_state_playback){
+ speex_preprocess_run(denoiser->preprocess_state_playback, audio_frame);
+ }
+ return 0;
+}
+
+static int tdav_speex_denoise_close(tmedia_denoise_t* self)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+
+ if(denoiser->preprocess_state_record){
+ speex_preprocess_state_destroy(denoiser->preprocess_state_record);
+ denoiser->preprocess_state_record = tsk_null;
+ }
+ if(denoiser->preprocess_state_playback){
+ speex_preprocess_state_destroy(denoiser->preprocess_state_playback);
+ denoiser->preprocess_state_playback = tsk_null;
+ }
+ if(denoiser->echo_state){
+ speex_echo_state_destroy(denoiser->echo_state);
+ denoiser->echo_state = tsk_null;
+ }
+ TSK_FREE(denoiser->echo_output_frame);
+
+ return 0;
+}
+
+
+
+//
+// Speex denoiser Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_speex_denoise_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_speex_denoise_t *denoise = self;
+ if(denoise){
+ /* init base */
+ tmedia_denoise_init(TMEDIA_DENOISE(denoise));
+ /* init self */
+
+ TSK_DEBUG_INFO("Create SpeexDSP denoiser");
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_speex_denoise_dtor(tsk_object_t * self)
+{
+ tdav_speex_denoise_t *denoise = self;
+ if(denoise){
+ /* deinit base */
+ tmedia_denoise_deinit(TMEDIA_DENOISE(denoise));
+ /* deinit self */
+ if(denoise->preprocess_state_record){
+ speex_preprocess_state_destroy(denoise->preprocess_state_record);
+ denoise->preprocess_state_record = tsk_null;
+ }
+ if(denoise->preprocess_state_playback){
+ speex_preprocess_state_destroy(denoise->preprocess_state_playback);
+ denoise->preprocess_state_playback = tsk_null;
+ }
+ if(denoise->echo_state){
+ speex_echo_state_destroy(denoise->echo_state);
+ denoise->echo_state = tsk_null;
+ }
+ TSK_FREE(denoise->echo_output_frame);
+
+ TSK_DEBUG_INFO("*** SpeexDSP denoiser destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_speex_denoise_def_s =
+{
+ sizeof(tdav_speex_denoise_t),
+ tdav_speex_denoise_ctor,
+ tdav_speex_denoise_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_denoise_plugin_def_t tdav_speex_denoise_plugin_def_s =
+{
+ &tdav_speex_denoise_def_s,
+
+ "Audio Denoiser based on SpeexDSP",
+
+ tdav_speex_denoise_set,
+ tdav_speex_denoise_open,
+ tdav_speex_denoise_echo_playback,
+ tdav_speex_denoise_process_record,
+ tdav_speex_denoise_process_playback,
+ tdav_speex_denoise_close,
+};
+const tmedia_denoise_plugin_def_t *tdav_speex_denoise_plugin_def_t = &tdav_speex_denoise_plugin_def_s;
+
+
+#endif /* HAVE_SPEEX_DSP */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/tdav_speex_jitterbuffer.c b/tinyDAV/src/audio/tdav_speex_jitterbuffer.c
new file mode 100644
index 0000000..d4639b9
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_speex_jitterbuffer.c
@@ -0,0 +1,319 @@
+/*
+* Copyright (C) 2011-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_speex_jitterbuffer.c
+ * @brief Speex Audio jitterbuffer Plugin
+ */
+#include "tinydav/audio/tdav_speex_jitterbuffer.h"
+#include "tinyrtp/rtp/trtp_rtp_header.h"
+
+#if HAVE_SPEEX_DSP && HAVE_SPEEX_JB
+
+// rfc3551 - 4.5 Audio Encodings: all frames length are multiple of 10ms
+
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <speex/speex_jitter.h>
+
+/** Speex JitterBuffer*/
+typedef struct tdav_speex_jitterBuffer_s
+{
+ TMEDIA_DECLARE_JITTER_BUFFER;
+
+ JitterBuffer* state;
+ uint32_t rate;
+ uint32_t frame_duration;
+ uint32_t channels;
+ uint32_t x_data_size; // expected data size
+ uint16_t fake_seqnum; // if ptime mismatch then, reassembled pkt will have invalid seqnum
+ struct {
+ uint8_t* ptr;
+ tsk_size_t size;
+ tsk_size_t index;
+ } buff;
+
+ uint64_t num_pkt_in; // Number of incoming pkts since the last reset
+ uint64_t num_pkt_miss; // Number of times we got consecutive "JITTER_BUFFER_MISSING" results
+ uint64_t num_pkt_miss_max; // Max value for "num_pkt_miss" before reset()ing the jitter buffer
+}
+tdav_speex_jitterbuffer_t;
+
+static int tdav_speex_jitterbuffer_set(tmedia_jitterbuffer_t *self, const tmedia_param_t* param)
+{
+ TSK_DEBUG_ERROR("Not implemented");
+ return -2;
+}
+
+static int tdav_speex_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate, uint32_t channels)
+{
+ tdav_speex_jitterbuffer_t *jitterbuffer = (tdav_speex_jitterbuffer_t *)self;
+ spx_int32_t tmp;
+
+ TSK_DEBUG_INFO("Open speex jb (ptime=%u, rate=%u)", frame_duration, rate);
+
+ if (!(jitterbuffer->state = jitter_buffer_init((int)frame_duration))) {
+ TSK_DEBUG_ERROR("jitter_buffer_init() failed");
+ return -2;
+ }
+ jitterbuffer->rate = rate;
+ jitterbuffer->frame_duration = frame_duration;
+ jitterbuffer->channels = channels;
+ jitterbuffer->x_data_size = ((frame_duration * jitterbuffer->rate) / 500) << (channels == 2 ? 1 : 0);
+
+ jitterbuffer->num_pkt_in = 0;
+ jitterbuffer->num_pkt_miss = 0;
+ jitterbuffer->num_pkt_miss_max = (1000 / frame_duration) * 2; // 2 seconds missing --> "Houston, we have a problem"
+
+ jitter_buffer_ctl(jitterbuffer->state, JITTER_BUFFER_GET_MARGIN, &tmp);
+ TSK_DEBUG_INFO("Default Jitter buffer margin=%d", tmp);
+ jitter_buffer_ctl(jitterbuffer->state, JITTER_BUFFER_GET_MAX_LATE_RATE, &tmp);
+ TSK_DEBUG_INFO("Default Jitter max late rate=%d", tmp);
+
+ if ((tmp = tmedia_defaults_get_jb_margin()) >= 0) {
+ jitter_buffer_ctl(jitterbuffer->state, JITTER_BUFFER_SET_MARGIN, &tmp);
+ TSK_DEBUG_INFO("New Jitter buffer margin=%d", tmp);
+ }
+ if ((tmp = tmedia_defaults_get_jb_max_late_rate()) >= 0) {
+ jitter_buffer_ctl(jitterbuffer->state, JITTER_BUFFER_SET_MAX_LATE_RATE, &tmp);
+ TSK_DEBUG_INFO("New Jitter buffer max late rate=%d", tmp);
+ }
+
+ return 0;
+}
+
+static int tdav_speex_jitterbuffer_tick(tmedia_jitterbuffer_t* self)
+{
+ tdav_speex_jitterbuffer_t *jitterbuffer = (tdav_speex_jitterbuffer_t *)self;
+ if (!jitterbuffer->state) {
+ TSK_DEBUG_ERROR("Invalid state");
+ return -1;
+ }
+ jitter_buffer_tick(jitterbuffer->state);
+ return 0;
+}
+
+static int tdav_speex_jitterbuffer_put(tmedia_jitterbuffer_t* self, void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr)
+{
+ tdav_speex_jitterbuffer_t *jb = (tdav_speex_jitterbuffer_t *)self;
+ const trtp_rtp_header_t* rtp_hdr;
+ JitterBufferPacket jb_packet;
+ static uint16_t seq_num = 0;
+
+ if (!data || !data_size || !proto_hdr) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (!jb->state) {
+ TSK_DEBUG_ERROR("Invalid state");
+ return -2;
+ }
+
+ rtp_hdr = TRTP_RTP_HEADER(proto_hdr);
+
+ jb_packet.user_data = 0;
+ jb_packet.span = jb->frame_duration;
+ jb_packet.len = jb->x_data_size;
+
+ if (jb->x_data_size == data_size) { /* ptime match */
+ jb_packet.data = data;
+ jb_packet.sequence = rtp_hdr->seq_num;
+ jb_packet.timestamp = (rtp_hdr->seq_num * jb_packet.span);
+ jitter_buffer_put(jb->state, &jb_packet);
+ }
+ else { /* ptime mismatch */
+ tsk_size_t i;
+ jb_packet.sequence = 0; // Ignore
+ if ((jb->buff.index + data_size) > jb->buff.size) {
+ if (!(jb->buff.ptr = tsk_realloc(jb->buff.ptr, (jb->buff.index + data_size)))) {
+ jb->buff.size = 0;
+ jb->buff.index = 0;
+ return 0;
+ }
+ jb->buff.size = (jb->buff.index + data_size);
+ }
+
+ memcpy(&jb->buff.ptr[jb->buff.index], data, data_size);
+ jb->buff.index += data_size;
+
+ if (jb->buff.index >= jb->x_data_size) {
+ tsk_size_t copied = 0;
+ for (i = 0; (i + jb->x_data_size) <= jb->buff.index; i += jb->x_data_size) {
+ jb_packet.data = (char*)&jb->buff.ptr[i];
+ jb_packet.timestamp = (++jb->fake_seqnum * jb_packet.span);// reassembled pkt will have fake seqnum
+ jitter_buffer_put(jb->state, &jb_packet);
+ copied += jb->x_data_size;
+ }
+ if (copied == jb->buff.index) {
+ // all copied
+ jb->buff.index = 0;
+ }
+ else {
+ memmove(&jb->buff.ptr[0], &jb->buff.ptr[copied], (jb->buff.index - copied));
+ jb->buff.index -= copied;
+ }
+ }
+ }
+ ++jb->num_pkt_in;
+
+ return 0;
+}
+
+static tsk_size_t tdav_speex_jitterbuffer_get(tmedia_jitterbuffer_t* self, void* out_data, tsk_size_t out_size)
+{
+ tdav_speex_jitterbuffer_t *jb = (tdav_speex_jitterbuffer_t *)self;
+ JitterBufferPacket jb_packet;
+ int ret, miss = 0;
+ tsk_size_t ret_size = 0;
+
+ if (!out_data || !out_size) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+ if (!jb->state) {
+ TSK_DEBUG_ERROR("Invalid state");
+ return 0;
+ }
+ if (jb->x_data_size != out_size) { // consumer must request PTIME data
+ TSK_DEBUG_WARN("%d not expected as frame size. %u<>%u", out_size, jb->frame_duration, (out_size * 500) / jb->rate);
+ return 0;
+ }
+
+ jb_packet.data = out_data;
+ jb_packet.len = (spx_uint32_t)out_size;
+
+ if ((ret = jitter_buffer_get(jb->state, &jb_packet, jb->frame_duration/*(out_size * 500)/jb->rate*/, tsk_null)) != JITTER_BUFFER_OK) {
+ ++jb->num_pkt_miss;
+ switch (ret) {
+ case JITTER_BUFFER_MISSING:
+ /*TSK_DEBUG_INFO("JITTER_BUFFER_MISSING - %d", ret);*/
+ if (jb->num_pkt_miss > jb->num_pkt_miss_max /*too much missing pkts*/ && jb->num_pkt_in > jb->num_pkt_miss_max/*we're really receiving pkts*/) {
+ jb->num_pkt_miss = 0;
+ self->plugin->reset(self);
+ TSK_DEBUG_WARN("Too much missing audio pkts");
+ }
+ break;
+ case JITTER_BUFFER_INSERTION:
+ /*TSK_DEBUG_INFO("JITTER_BUFFER_INSERTION - %d", ret);*/
+ break;
+ default:
+ TSK_DEBUG_INFO("jitter_buffer_get() failed - %d", ret);
+ break;
+ }
+ // jitter_buffer_update_delay(jb->state, &jb_packet, NULL);
+ //return 0;
+ }
+ else {
+ jb->num_pkt_miss = 0; // reset
+ ret_size = jb_packet.len;
+ }
+ //jitter_buffer_update_delay(jb->state, &jb_packet, NULL);
+
+ return ret_size;
+}
+
+static int tdav_speex_jitterbuffer_reset(tmedia_jitterbuffer_t* self)
+{
+ tdav_speex_jitterbuffer_t *jb = (tdav_speex_jitterbuffer_t *)self;
+ if (jb->state) {
+ jitter_buffer_reset(jb->state);
+ }
+ jb->num_pkt_in = 0;
+ jb->num_pkt_miss = 0;
+ return 0;
+}
+
+static int tdav_speex_jitterbuffer_close(tmedia_jitterbuffer_t* self)
+{
+ tdav_speex_jitterbuffer_t *jitterbuffer = (tdav_speex_jitterbuffer_t *)self;
+ if (jitterbuffer->state) {
+ jitter_buffer_destroy(jitterbuffer->state);
+ jitterbuffer->state = tsk_null;
+ }
+ return 0;
+}
+
+
+
+//
+// Speex jitterbufferr Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_speex_jitterbuffer_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_speex_jitterbuffer_t *jitterbuffer = self;
+ TSK_DEBUG_INFO("Create SpeexDSP jitter buffer");
+ if (jitterbuffer){
+ /* init base */
+ tmedia_jitterbuffer_init(TMEDIA_JITTER_BUFFER(jitterbuffer));
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_speex_jitterbuffer_dtor(tsk_object_t * self)
+{
+ tdav_speex_jitterbuffer_t *jb = self;
+ if (jb){
+ /* deinit base */
+ tmedia_jitterbuffer_deinit(TMEDIA_JITTER_BUFFER(jb));
+ /* deinit self */
+ if (jb->state){
+ jitter_buffer_destroy(jb->state);
+ jb->state = tsk_null;
+ }
+ TSK_FREE(jb->buff.ptr);
+
+ TSK_DEBUG_INFO("*** SpeexDSP jb destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_speex_jitterbuffer_def_s =
+{
+ sizeof(tdav_speex_jitterbuffer_t),
+ tdav_speex_jitterbuffer_ctor,
+ tdav_speex_jitterbuffer_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_jitterbuffer_plugin_def_t tdav_speex_jitterbuffer_plugin_def_s =
+{
+ &tdav_speex_jitterbuffer_def_s,
+ tmedia_audio,
+ "Audio JitterBuffer based on Speex",
+
+ tdav_speex_jitterbuffer_set,
+ tdav_speex_jitterbuffer_open,
+ tdav_speex_jitterbuffer_tick,
+ tdav_speex_jitterbuffer_put,
+ tdav_speex_jitterbuffer_get,
+ tdav_speex_jitterbuffer_reset,
+ tdav_speex_jitterbuffer_close,
+};
+const tmedia_jitterbuffer_plugin_def_t *tdav_speex_jitterbuffer_plugin_def_t = &tdav_speex_jitterbuffer_plugin_def_s;
+
+
+#endif /* HAVE_SPEEX_DSP */
diff --git a/tinyDAV/src/audio/tdav_speex_resampler.c b/tinyDAV/src/audio/tdav_speex_resampler.c
new file mode 100644
index 0000000..f71ddd2
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_speex_resampler.c
@@ -0,0 +1,254 @@
+/*
+* Copyright (C) 2011-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+#include "tinydav/audio/tdav_speex_resampler.h"
+
+#if HAVE_SPEEX_DSP && (!defined(HAVE_SPEEX_RESAMPLER) || HAVE_SPEEX_RESAMPLER)
+
+#include <speex/speex_resampler.h>
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_SPEEX_RESAMPLER_MAX_QUALITY 10
+
+/** Speex resampler*/
+typedef struct tdav_speex_resampler_s
+{
+ TMEDIA_DECLARE_RESAMPLER;
+
+ tsk_size_t in_size;
+ tsk_size_t out_size;
+ uint32_t in_channels;
+ uint32_t out_channels;
+ uint32_t bytes_per_sample;
+
+ struct{
+ void* ptr;
+ tsk_size_t size_in_samples;
+ } tmp_buffer;
+
+ SpeexResamplerState *state;
+}
+tdav_speex_resampler_t;
+
+static int tdav_speex_resampler_open(tmedia_resampler_t* self, uint32_t in_freq, uint32_t out_freq, uint32_t frame_duration, uint32_t in_channels, uint32_t out_channels, uint32_t quality, uint32_t bits_per_sample)
+{
+ tdav_speex_resampler_t *resampler = (tdav_speex_resampler_t *)self;
+ int ret = 0;
+ uint32_t bytes_per_sample = (bits_per_sample >> 3);
+
+ if (in_channels != 1 && in_channels != 2) {
+ TSK_DEBUG_ERROR("%d not valid as input channel", in_channels);
+ return -1;
+ }
+ if (out_channels != 1 && out_channels != 2) {
+ TSK_DEBUG_ERROR("%d not valid as output channel", out_channels);
+ return -1;
+ }
+ if (bytes_per_sample != sizeof(spx_int16_t) && bytes_per_sample != sizeof(float)) {
+ TSK_DEBUG_ERROR("%d not valid as bits_per_sample", bits_per_sample);
+ return -1;
+ }
+
+ if (!(resampler->state = speex_resampler_init(in_channels, in_freq, out_freq, TSK_CLAMP(0, quality, TDAV_SPEEX_RESAMPLER_MAX_QUALITY), &ret))) {
+ TSK_DEBUG_ERROR("speex_resampler_init() returned %d", ret);
+ return -2;
+ }
+
+ resampler->bytes_per_sample = bytes_per_sample;
+ resampler->in_size = ((in_freq * frame_duration) / 1000) << (in_channels == 2 ? 1 : 0);
+ resampler->out_size = ((out_freq * frame_duration) / 1000) << (out_channels == 2 ? 1 : 0);
+ resampler->in_channels = in_channels;
+ resampler->out_channels = out_channels;
+
+ if (in_channels != out_channels) {
+ resampler->tmp_buffer.size_in_samples = ((TSK_MAX(in_freq, out_freq) * frame_duration) / 1000) << (TSK_MAX(in_channels, out_channels) == 2 ? 1 : 0);
+ if (!(resampler->tmp_buffer.ptr = tsk_realloc(resampler->tmp_buffer.ptr, resampler->tmp_buffer.size_in_samples * resampler->bytes_per_sample))) {
+ resampler->tmp_buffer.size_in_samples = 0;
+ return -2;
+ }
+ }
+
+ return 0;
+}
+
+
+static tsk_size_t tdav_speex_resampler_process(tmedia_resampler_t* self, const void* in_data, tsk_size_t in_size_in_sample, void* out_data, tsk_size_t out_size_in_sample)
+{
+ tdav_speex_resampler_t *resampler = (tdav_speex_resampler_t *)self;
+ int err = RESAMPLER_ERR_SUCCESS;
+ spx_uint32_t _out_size_in_sample = (spx_uint32_t)out_size_in_sample;
+ if (!resampler->state || !out_data) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if (in_size_in_sample != resampler->in_size) {
+ TSK_DEBUG_ERROR("Input data has wrong size");
+ return 0;
+ }
+
+ if (out_size_in_sample < resampler->out_size) {
+ TSK_DEBUG_ERROR("Output data is too short");
+ return 0;
+ }
+
+ if (resampler->in_channels == resampler->out_channels) {
+ if (resampler->bytes_per_sample == sizeof(spx_int16_t)) {
+ err = speex_resampler_process_int(resampler->state, 0,
+ (const spx_int16_t *)in_data, (spx_uint32_t *)&in_size_in_sample,
+ (spx_int16_t *)out_data, &_out_size_in_sample);
+ }
+ else {
+ err = speex_resampler_process_float(resampler->state, 0,
+ (const float *)in_data, (spx_uint32_t *)&in_size_in_sample,
+ (float *)out_data, &_out_size_in_sample);
+ }
+ }
+ else {
+ spx_uint32_t i, j;
+ // in_channels = 1, out_channels = 2
+ if (resampler->in_channels == 1) {
+ if (resampler->bytes_per_sample == sizeof(spx_int16_t)) {
+ err = speex_resampler_process_int(resampler->state, 0, (const spx_int16_t *)in_data, (spx_uint32_t *)&in_size_in_sample, resampler->tmp_buffer.ptr, &_out_size_in_sample);
+ if (err == RESAMPLER_ERR_SUCCESS) {
+ spx_int16_t* pout_data = (spx_int16_t*)(out_data);
+ for (i = 0, j = 0; i < _out_size_in_sample; ++i, j += 2) {
+ pout_data[j] = pout_data[j + 1] = *(((const spx_int16_t*)resampler->tmp_buffer.ptr) + i);
+ }
+ }
+ }
+ else {
+ err = speex_resampler_process_float(resampler->state, 0, (const float *)in_data, (spx_uint32_t *)&in_size_in_sample, resampler->tmp_buffer.ptr, &_out_size_in_sample);
+ if (err == RESAMPLER_ERR_SUCCESS) {
+ float* pout_data = (float*)(out_data);
+ for (i = 0, j = 0; i < _out_size_in_sample; ++i, j += 2) {
+ pout_data[j] = pout_data[j + 1] = *(((const float*)resampler->tmp_buffer.ptr) + i);
+ }
+ }
+ }
+
+ }
+ else {
+ // in_channels = 2, out_channels = 1
+ spx_uint32_t _out_size2_in_sample = (_out_size_in_sample << 1);
+ if (resampler->bytes_per_sample == sizeof(spx_int16_t)) {
+ err = speex_resampler_process_int(resampler->state, 0,
+ (const spx_int16_t *)in_data, (spx_uint32_t *)&in_size_in_sample,
+ (spx_int16_t *)resampler->tmp_buffer.ptr, &_out_size2_in_sample);
+ if (err == RESAMPLER_ERR_SUCCESS) {
+ spx_int16_t* pout_data = (spx_int16_t*)(out_data);
+ _out_size_in_sample = (spx_uint32_t)resampler->out_size;
+ for (i = 0, j = 0; j < _out_size2_in_sample; ++i, j += 2) {
+ pout_data[i] = *(((const spx_int16_t*)resampler->tmp_buffer.ptr) + j);
+ }
+ }
+ }
+ else {
+ err = speex_resampler_process_float(resampler->state, 0,
+ (const float *)in_data, (spx_uint32_t *)&in_size_in_sample,
+ (float *)resampler->tmp_buffer.ptr, &_out_size2_in_sample);
+ if (err == RESAMPLER_ERR_SUCCESS) {
+ float* pout_data = (float*)(out_data);
+ for (i = 0, j = 0; j < _out_size2_in_sample; ++i, j += 2) {
+ pout_data[i] = *(((const float*)resampler->tmp_buffer.ptr) + j);
+ }
+ }
+ }
+ }
+ }
+
+ if (err != RESAMPLER_ERR_SUCCESS) {
+ TSK_DEBUG_ERROR("speex_resampler_process_int() failed with error code %d", err);
+ return 0;
+ }
+ return (tsk_size_t)_out_size_in_sample;
+}
+
+static int tdav_speex_resampler_close(tmedia_resampler_t* self)
+{
+ tdav_speex_resampler_t *resampler = (tdav_speex_resampler_t *)self;
+
+ if (resampler->state) {
+ speex_resampler_destroy(resampler->state);
+ resampler->state = tsk_null;
+ }
+ return 0;
+}
+
+
+
+//
+// Speex resamplerr Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_speex_resampler_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_speex_resampler_t *resampler = (tdav_speex_resampler_t *)self;
+ if (resampler){
+ /* init base */
+ tmedia_resampler_init(TMEDIA_RESAMPLER(resampler));
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_speex_resampler_dtor(tsk_object_t * self)
+{
+ tdav_speex_resampler_t *resampler = (tdav_speex_resampler_t *)self;
+ if (resampler){
+ /* deinit base */
+ tmedia_resampler_deinit(TMEDIA_RESAMPLER(resampler));
+ /* deinit self */
+ if (resampler->state) {
+ speex_resampler_destroy(resampler->state);
+ resampler->state = tsk_null;
+ }
+ TSK_FREE(resampler->tmp_buffer.ptr);
+
+ TSK_DEBUG_INFO("*** SpeexDSP resampler (plugin) destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_speex_resampler_def_s =
+{
+ sizeof(tdav_speex_resampler_t),
+ tdav_speex_resampler_ctor,
+ tdav_speex_resampler_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_resampler_plugin_def_t tdav_speex_resampler_plugin_def_s =
+{
+ &tdav_speex_resampler_def_s,
+
+ "Audio Resampler based on Speex",
+
+ tdav_speex_resampler_open,
+ tdav_speex_resampler_process,
+ tdav_speex_resampler_close,
+};
+const tmedia_resampler_plugin_def_t *tdav_speex_resampler_plugin_def_t = &tdav_speex_resampler_plugin_def_s;
+
+
+#endif /* HAVE_SPEEX_DSP */
diff --git a/tinyDAV/src/audio/tdav_webrtc_denoise.c b/tinyDAV/src/audio/tdav_webrtc_denoise.c
new file mode 100644
index 0000000..598470a
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_webrtc_denoise.c
@@ -0,0 +1,627 @@
+/*
+* Copyright (C) 2011-2015 Mamadou DIOP
+* Copyright (C) 2011-2015 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_webrtc_denoise.c
+* @brief Google WebRTC Denoiser (Noise suppression, AGC, AEC) Plugin
+*/
+#include "tinydav/audio/tdav_webrtc_denoise.h"
+
+#if HAVE_WEBRTC && (!defined(HAVE_WEBRTC_DENOISE) || HAVE_WEBRTC_DENOISE)
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include "tinymedia/tmedia_defaults.h"
+#include "tinymedia/tmedia_resampler.h"
+
+#include <string.h>
+
+#if !defined(WEBRTC_AEC_AGGRESSIVE)
+# define WEBRTC_AEC_AGGRESSIVE 0
+#endif
+#if !defined(WEBRTC_MAX_ECHO_TAIL)
+# define WEBRTC_MAX_ECHO_TAIL 500
+#endif
+#if !defined(WEBRTC_MIN_ECHO_TAIL)
+# define WEBRTC_MIN_ECHO_TAIL 20 // 0 will cause random crashes
+#endif
+
+#if TDAV_UNDER_MOBILE || 1 // FIXME
+typedef int16_t sample_t;
+#else
+typedef float sample_t;
+#endif
+
+typedef struct tdav_webrtc_pin_xs
+{
+ uint32_t n_duration;
+ uint32_t n_rate;
+ uint32_t n_channels;
+ uint32_t n_sample_size;
+}
+tdav_webrtc_pin_xt;
+
+typedef struct tdav_webrtc_resampler_s
+{
+ TSK_DECLARE_OBJECT;
+
+ tmedia_resampler_t* p_resampler;
+ void* p_bufftmp_ptr; // used to convert float <->int16
+ tsk_size_t n_bufftmp_size_in_bytes;
+
+ struct {
+ tdav_webrtc_pin_xt x_pin;
+ tsk_size_t n_buff_size_in_bytes;
+ tsk_size_t n_buff_size_in_samples;
+ } in;
+ struct {
+ tdav_webrtc_pin_xt x_pin;
+ void* p_buff_ptr;
+ tsk_size_t n_buff_size_in_bytes;
+ tsk_size_t n_buff_size_in_samples;
+ } out;
+}
+tdav_webrtc_resampler_t;
+
+static int _tdav_webrtc_resampler_create(const tdav_webrtc_pin_xt* p_pin_in, const tdav_webrtc_pin_xt* p_pin_out, tdav_webrtc_resampler_t **pp_resampler);
+static int _tdav_webrtc_resampler_process(tdav_webrtc_resampler_t* p_self, const void* p_buff_ptr, tsk_size_t n_buff_size_in_bytes);
+
+/** WebRTC denoiser (AEC, NS, AGC...) */
+typedef struct tdav_webrtc_denoise_s
+{
+ TMEDIA_DECLARE_DENOISE;
+
+ void *AEC_inst;
+#if HAVE_SPEEX_DSP && PREFER_SPEEX_DENOISER
+ SpeexPreprocessState *SpeexDenoiser_proc;
+#else
+ TDAV_NsHandle *NS_inst;
+#endif
+
+ uint32_t echo_tail;
+ uint32_t echo_skew;
+
+ struct {
+ tdav_webrtc_resampler_t* p_rpl_in2den; // input -> denoiser
+ tdav_webrtc_resampler_t* p_rpl_den2in; // denoiser -> input
+ } record;
+ struct {
+ tdav_webrtc_resampler_t* p_rpl_in2den; // input -> denoiser
+ tdav_webrtc_resampler_t* p_rpl_den2in; // denoiser -> input
+ } playback;
+
+ struct {
+ uint32_t nb_samples_per_process;
+ uint32_t sampling_rate;
+ uint32_t channels; // always "1"
+ } neg;
+
+ TSK_DECLARE_SAFEOBJ;
+}
+tdav_webrtc_denoise_t;
+
+static int tdav_webrtc_denoise_set(tmedia_denoise_t* _self, const tmedia_param_t* param)
+{
+ tdav_webrtc_denoise_t *self = (tdav_webrtc_denoise_t *)_self;
+ if (!self || !param) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "echo-tail")) {
+ int32_t echo_tail = *((int32_t*)param->value);
+ self->echo_tail = TSK_CLAMP(WEBRTC_MIN_ECHO_TAIL, echo_tail, WEBRTC_MAX_ECHO_TAIL);
+ TSK_DEBUG_INFO("set_echo_tail (%d->%d)", echo_tail, self->echo_tail);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int tdav_webrtc_denoise_open(tmedia_denoise_t* self, uint32_t record_frame_size_samples, uint32_t record_sampling_rate, uint32_t record_channels, uint32_t playback_frame_size_samples, uint32_t playback_sampling_rate, uint32_t playback_channels)
+{
+ tdav_webrtc_denoise_t *denoiser = (tdav_webrtc_denoise_t *)self;
+ int ret;
+ tdav_webrtc_pin_xt pin_record_in = { 0 }, pin_record_den = { 0 }, pin_playback_in = { 0 }, pin_playback_den = { 0 };
+
+ if (!denoiser) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (denoiser->AEC_inst ||
+#if HAVE_SPEEX_DSP && PREFER_SPEEX_DENOISER
+ denoiser->SpeexDenoiser_proc
+#else
+ denoiser->NS_inst
+#endif
+ ){
+ TSK_DEBUG_ERROR("Denoiser already initialized");
+ return -2;
+ }
+
+ denoiser->echo_tail = TSK_CLAMP(WEBRTC_MIN_ECHO_TAIL, TMEDIA_DENOISE(denoiser)->echo_tail, WEBRTC_MAX_ECHO_TAIL);
+ denoiser->echo_skew = TMEDIA_DENOISE(denoiser)->echo_skew;
+ TSK_DEBUG_INFO("echo_tail=%d, echo_skew=%d, echo_supp_enabled=%d, noise_supp_enabled=%d", denoiser->echo_tail, denoiser->echo_skew, self->echo_supp_enabled, self->noise_supp_enabled);
+
+ //
+ // DENOISER
+ //
+#if TDAV_UNDER_MOBILE // AECM= [8-16]k, AEC=[8-32]k
+ denoiser->neg.sampling_rate = TSK_MIN(TSK_MAX(record_sampling_rate, playback_sampling_rate), 16000);
+#else
+ denoiser->neg.sampling_rate = TSK_MIN(TSK_MAX(record_sampling_rate, playback_sampling_rate), 16000); // FIXME: 32000 accepted by echo_process fails
+#endif
+ denoiser->neg.nb_samples_per_process = /*TSK_CLAMP(80,*/ ((denoiser->neg.sampling_rate * 10) / 1000)/*, 160)*/; // Supported by the module: "80"(10ms) and "160"(20ms)
+ denoiser->neg.channels = 1;
+
+ //
+ // RECORD
+ //
+ TSK_OBJECT_SAFE_FREE(denoiser->record.p_rpl_den2in);
+ TSK_OBJECT_SAFE_FREE(denoiser->record.p_rpl_in2den);
+ pin_record_in.n_sample_size = sizeof(int16_t);
+ pin_record_in.n_rate = record_sampling_rate;
+ pin_record_in.n_channels = record_channels;
+ pin_record_in.n_duration = (((record_frame_size_samples * 1000) / record_sampling_rate)) / record_channels;
+ pin_record_den.n_sample_size = sizeof(sample_t);
+ pin_record_den.n_rate = denoiser->neg.sampling_rate;
+
+ pin_record_den.n_channels = 1;
+ pin_record_den.n_duration = pin_record_in.n_duration;
+ if (pin_record_in.n_sample_size != pin_record_den.n_sample_size || pin_record_in.n_rate != pin_record_den.n_rate || pin_record_in.n_channels != pin_record_den.n_channels) {
+ if ((ret = _tdav_webrtc_resampler_create(&pin_record_in, &pin_record_den, &denoiser->record.p_rpl_in2den))) {
+ return ret;
+ }
+ if ((ret = _tdav_webrtc_resampler_create(&pin_record_den, &pin_record_in, &denoiser->record.p_rpl_den2in))) {
+ return ret;
+ }
+ }
+ //
+ // PLAYBACK
+ //
+ TSK_OBJECT_SAFE_FREE(denoiser->playback.p_rpl_den2in);
+ TSK_OBJECT_SAFE_FREE(denoiser->playback.p_rpl_in2den);
+ pin_playback_in.n_sample_size = sizeof(int16_t);
+ pin_playback_in.n_rate = playback_sampling_rate;
+ pin_playback_in.n_channels = playback_channels;
+ pin_playback_in.n_duration = (((playback_frame_size_samples * 1000) / playback_sampling_rate)) / playback_channels;
+ pin_playback_den.n_sample_size = sizeof(sample_t);
+ pin_playback_den.n_rate = denoiser->neg.sampling_rate;
+ pin_playback_den.n_channels = 1;
+ pin_playback_den.n_duration = pin_playback_in.n_duration;
+ if (pin_playback_in.n_sample_size != pin_playback_den.n_sample_size || pin_playback_in.n_rate != pin_playback_den.n_rate || pin_playback_in.n_channels != pin_playback_den.n_channels) {
+ if ((ret = _tdav_webrtc_resampler_create(&pin_playback_in, &pin_playback_den, &denoiser->playback.p_rpl_in2den))) {
+ return ret;
+ }
+ if ((ret = _tdav_webrtc_resampler_create(&pin_playback_den, &pin_playback_in, &denoiser->playback.p_rpl_den2in))) {
+ return ret;
+ }
+ }
+
+ //
+ // AEC instance
+ //
+ if ((ret = TDAV_WebRtcAec_Create(&denoiser->AEC_inst))) {
+ TSK_DEBUG_ERROR("WebRtcAec_Create failed with error code = %d", ret);
+ return ret;
+ }
+ if ((ret = TDAV_WebRtcAec_Init(denoiser->AEC_inst, denoiser->neg.sampling_rate, denoiser->neg.sampling_rate))) {
+ TSK_DEBUG_ERROR("WebRtcAec_Init failed with error code = %d", ret);
+ return ret;
+ }
+
+#if TDAV_UNDER_MOBILE
+#else
+ {
+ AecConfig aecConfig;
+#if WEBRTC_AEC_AGGRESSIVE
+ aecConfig.nlpMode = kAecNlpAggressive;
+#else
+ aecConfig.nlpMode = kAecNlpModerate;
+#endif
+ aecConfig.skewMode = kAecFalse;
+ aecConfig.metricsMode = kAecTrue;
+ aecConfig.delay_logging = kAecFalse;
+ if ((ret = WebRtcAec_set_config(denoiser->AEC_inst, aecConfig))) {
+ TSK_DEBUG_ERROR("WebRtcAec_set_config failed with error code = %d", ret);
+ }
+ }
+#endif
+
+
+ //
+ // Noise Suppression instance
+ //
+ if (TMEDIA_DENOISE(denoiser)->noise_supp_enabled) {
+#if HAVE_SPEEX_DSP && PREFER_SPEEX_DENOISER
+ if ((denoiser->SpeexDenoiser_proc = speex_preprocess_state_init((pin_record_den.n_rate / 1000) * pin_record_den.n_duration, pin_record_den.n_rate))) {
+ int i = 1;
+ speex_preprocess_ctl(denoiser->SpeexDenoiser_proc, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ i = TMEDIA_DENOISE(denoiser)->noise_supp_level;
+ speex_preprocess_ctl(denoiser->SpeexDenoiser_proc, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &i);
+ }
+#else
+ if ((ret = TDAV_WebRtcNs_Create(&denoiser->NS_inst))) {
+ TSK_DEBUG_ERROR("WebRtcNs_Create failed with error code = %d", ret);
+ return ret;
+ }
+ if ((ret = TDAV_WebRtcNs_Init(denoiser->NS_inst, 80))) {
+ TSK_DEBUG_ERROR("WebRtcNs_Init failed with error code = %d", ret);
+ return ret;
+ }
+#endif
+ }
+
+ TSK_DEBUG_INFO("WebRTC denoiser opened: record:%uHz,%uchannels // playback:%uHz,%uchannels // neg:%uHz,%uchannels",
+ record_sampling_rate, record_channels,
+ playback_sampling_rate, playback_channels,
+ denoiser->neg.sampling_rate, denoiser->neg.channels);
+
+ return ret;
+}
+
+static int tdav_webrtc_denoise_echo_playback(tmedia_denoise_t* self, const void* echo_frame, uint32_t echo_frame_size_bytes)
+{
+ tdav_webrtc_denoise_t *p_self = (tdav_webrtc_denoise_t *)self;
+ int ret = 0;
+
+ tsk_safeobj_lock(p_self);
+ if (p_self->AEC_inst && echo_frame && echo_frame_size_bytes) {
+ const sample_t* _echo_frame = (const sample_t*)echo_frame;
+ tsk_size_t _echo_frame_size_bytes = echo_frame_size_bytes;
+ tsk_size_t _echo_frame_size_samples = (_echo_frame_size_bytes / sizeof(int16_t));
+ // IN -> DEN
+ if (p_self->playback.p_rpl_in2den) {
+ if ((ret = _tdav_webrtc_resampler_process(p_self->playback.p_rpl_in2den, _echo_frame, _echo_frame_size_bytes))) {
+ goto bail;
+ }
+ _echo_frame = p_self->playback.p_rpl_in2den->out.p_buff_ptr;
+ _echo_frame_size_bytes = p_self->playback.p_rpl_in2den->out.n_buff_size_in_bytes;
+ _echo_frame_size_samples = p_self->playback.p_rpl_in2den->out.n_buff_size_in_samples;
+ }
+ // PROCESS
+ if (_echo_frame_size_samples && _echo_frame) {
+ uint32_t _samples;
+ for (_samples = 0; _samples < _echo_frame_size_samples; _samples += p_self->neg.nb_samples_per_process) {
+ if ((ret = TDAV_WebRtcAec_BufferFarend(p_self->AEC_inst, &_echo_frame[_samples], p_self->neg.nb_samples_per_process))){
+ TSK_DEBUG_ERROR("WebRtcAec_BufferFarend failed with error code = %d, nb_samples_per_process=%u", ret, p_self->neg.nb_samples_per_process);
+ goto bail;
+ }
+ }
+ }
+ }
+bail:
+ tsk_safeobj_unlock(p_self);
+ return ret;
+}
+
+static int tdav_webrtc_denoise_process_record(tmedia_denoise_t* self, void* audio_frame, uint32_t audio_frame_size_bytes, tsk_bool_t* silence_or_noise)
+{
+ tdav_webrtc_denoise_t *p_self = (tdav_webrtc_denoise_t *)self;
+ int ret = 0;
+
+ *silence_or_noise = tsk_false;
+
+ tsk_safeobj_lock(p_self);
+
+ if (p_self->AEC_inst && audio_frame && audio_frame_size_bytes) {
+ tsk_size_t _samples;
+ const sample_t* _audio_frame = (const sample_t*)audio_frame;
+ tsk_size_t _audio_frame_size_bytes = audio_frame_size_bytes;
+ tsk_size_t _audio_frame_size_samples = (_audio_frame_size_bytes / sizeof(int16_t));
+ // IN -> DEN
+ if (p_self->record.p_rpl_in2den) {
+ if ((ret = _tdav_webrtc_resampler_process(p_self->record.p_rpl_in2den, _audio_frame, _audio_frame_size_bytes))) {
+ goto bail;
+ }
+ _audio_frame = p_self->record.p_rpl_in2den->out.p_buff_ptr;
+ _audio_frame_size_bytes = p_self->record.p_rpl_in2den->out.n_buff_size_in_bytes;
+ _audio_frame_size_samples = p_self->record.p_rpl_in2den->out.n_buff_size_in_samples;
+ }
+ // NOISE SUPPRESSION
+#if HAVE_SPEEX_DSP && PREFER_SPEEX_DENOISER
+ if (p_self->SpeexDenoiser_proc) {
+ speex_preprocess_run(p_self->SpeexDenoiser_proc, (spx_int16_t*)_audio_frame);
+ }
+#else
+ // WebRTC NoiseSupp only accept 10ms frames
+ // Our encoder will always output 20ms frames ==> execute 2x noise_supp
+ if (p_self->NS_inst) {
+ for (_samples = 0; _samples < _audio_frame_size_samples; _samples+= p_self->neg.nb_samples_per_process) {
+ if ((ret = TDAV_WebRtcNs_Process(p_self->NS_inst, &_audio_frame[_samples], tsk_null, _audio_frame, tsk_null))) {
+ TSK_DEBUG_ERROR("WebRtcNs_Process with error code = %d", ret);
+ goto bail;
+ }
+ }
+ }
+#endif
+ // PROCESS
+ if (_audio_frame_size_samples && _audio_frame) {
+ for (_samples = 0; _samples < _audio_frame_size_samples; _samples += p_self->neg.nb_samples_per_process) {
+ if ((ret = TDAV_WebRtcAec_Process(p_self->AEC_inst, &_audio_frame[_samples], tsk_null, (sample_t*)&_audio_frame[_samples], tsk_null, p_self->neg.nb_samples_per_process, p_self->echo_tail, p_self->echo_skew))){
+ TSK_DEBUG_ERROR("WebRtcAec_Process with error code = %d, nb_samples_per_process=%u", ret, p_self->neg.nb_samples_per_process);
+ goto bail;
+ }
+ }
+ }
+ // DEN -> IN
+ if (p_self->record.p_rpl_den2in) {
+ if ((ret = _tdav_webrtc_resampler_process(p_self->record.p_rpl_den2in, _audio_frame, _audio_frame_size_bytes))) {
+ goto bail;
+ }
+ _audio_frame = p_self->record.p_rpl_den2in->out.p_buff_ptr;
+ _audio_frame_size_bytes = p_self->record.p_rpl_den2in->out.n_buff_size_in_bytes;
+ _audio_frame_size_samples = p_self->record.p_rpl_den2in->out.n_buff_size_in_samples;
+ }
+ // Sanity check
+ if (_audio_frame_size_bytes != audio_frame_size_bytes) {
+ TSK_DEBUG_ERROR("Size mismatch: %u <> %u", _audio_frame_size_bytes, audio_frame_size_bytes);
+ ret = -3;
+ goto bail;
+ }
+ if (audio_frame != (const void*)_audio_frame) {
+ memcpy(audio_frame, _audio_frame, _audio_frame_size_bytes);
+ }
+ }
+
+bail:
+ tsk_safeobj_unlock(p_self);
+ return ret;
+}
+
+static int tdav_webrtc_denoise_process_playback(tmedia_denoise_t* self, void* audio_frame, uint32_t audio_frame_size_bytes)
+{
+ tdav_webrtc_denoise_t *denoiser = (tdav_webrtc_denoise_t *)self;
+
+ (void)(denoiser);
+
+ // Not mandatory to denoise audio before playback.
+ // All Doubango clients support noise suppression.
+ return 0;
+}
+
+static int tdav_webrtc_denoise_close(tmedia_denoise_t* self)
+{
+ tdav_webrtc_denoise_t *denoiser = (tdav_webrtc_denoise_t *)self;
+
+ tsk_safeobj_lock(denoiser);
+ if (denoiser->AEC_inst) {
+ TDAV_WebRtcAec_Free(denoiser->AEC_inst);
+ denoiser->AEC_inst = tsk_null;
+ }
+#if HAVE_SPEEX_DSP && PREFER_SPEEX_DENOISER
+ if (denoiser->SpeexDenoiser_proc) {
+ speex_preprocess_state_destroy(denoiser->SpeexDenoiser_proc);
+ denoiser->SpeexDenoiser_proc = tsk_null;
+ }
+#else
+ if (denoiser->NS_inst) {
+ TDAV_WebRtcNs_Free(denoiser->NS_inst);
+ denoiser->NS_inst = tsk_null;
+ }
+#endif
+ tsk_safeobj_unlock(denoiser);
+
+ return 0;
+}
+
+static int _tdav_webrtc_resampler_create(const tdav_webrtc_pin_xt* p_pin_in, const tdav_webrtc_pin_xt* p_pin_out, tdav_webrtc_resampler_t **pp_resampler)
+{
+ extern const tsk_object_def_t *tdav_webrtc_resampler_def_t;
+ int ret = 0;
+ if (!p_pin_in || !p_pin_out || !pp_resampler || *pp_resampler) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if (!(*pp_resampler = tsk_object_new(tdav_webrtc_resampler_def_t))) {
+ TSK_DEBUG_ERROR("Failed to create resampler object");
+ ret = -3;
+ goto bail;
+ }
+ if (!((*pp_resampler)->p_resampler = tmedia_resampler_create())) {
+ ret = -3;
+ goto bail;
+ }
+ ret = tmedia_resampler_open((*pp_resampler)->p_resampler,
+ p_pin_in->n_rate, p_pin_out->n_rate,
+ p_pin_in->n_duration,
+ p_pin_in->n_channels, p_pin_out->n_channels,
+ TMEDIA_RESAMPLER_QUALITY,
+ (p_pin_out->n_sample_size << 3));
+ if (ret) {
+ TSK_DEBUG_ERROR("Failed to open resampler: in_rate=%u,in_duration=%u,in_channels=%u /// out_rate=%u,out_duration=%u,out_channels=%u",
+ p_pin_in->n_rate, p_pin_in->n_duration, p_pin_in->n_channels,
+ p_pin_out->n_rate, p_pin_out->n_duration, p_pin_out->n_channels);
+ goto bail;
+ }
+
+ (*pp_resampler)->out.n_buff_size_in_bytes = ((((p_pin_out->n_rate * p_pin_out->n_duration) / 1000)) * p_pin_out->n_channels) * p_pin_out->n_sample_size;
+ (*pp_resampler)->out.p_buff_ptr = tsk_malloc((*pp_resampler)->out.n_buff_size_in_bytes);
+ if (!(*pp_resampler)->out.p_buff_ptr) {
+ TSK_DEBUG_ERROR("Failed to allocate buffer with size=%u", (*pp_resampler)->out.n_buff_size_in_bytes);
+ ret = -3;
+ goto bail;
+ }
+ (*pp_resampler)->out.n_buff_size_in_samples = (*pp_resampler)->out.n_buff_size_in_bytes / p_pin_out->n_sample_size;
+ (*pp_resampler)->in.n_buff_size_in_bytes = ((((p_pin_in->n_rate * p_pin_in->n_duration) / 1000)) * p_pin_in->n_channels) * p_pin_in->n_sample_size;
+ (*pp_resampler)->in.n_buff_size_in_samples = (*pp_resampler)->in.n_buff_size_in_bytes / p_pin_in->n_sample_size;
+
+ (*pp_resampler)->n_bufftmp_size_in_bytes = (((48000 * TSK_MAX(p_pin_in->n_duration, p_pin_out->n_duration)) / 1000) * 2/*channels*/) * sizeof(float); // Max
+ (*pp_resampler)->p_bufftmp_ptr = tsk_malloc((*pp_resampler)->n_bufftmp_size_in_bytes);
+ if (!(*pp_resampler)->p_bufftmp_ptr) {
+ TSK_DEBUG_ERROR("Failed to allocate buffer with size:%u", (*pp_resampler)->n_bufftmp_size_in_bytes);
+ ret = -3;
+ goto bail;
+ }
+
+ memcpy(&(*pp_resampler)->in.x_pin, p_pin_in, sizeof(tdav_webrtc_pin_xt));
+ memcpy(&(*pp_resampler)->out.x_pin, p_pin_out, sizeof(tdav_webrtc_pin_xt));
+bail:
+ if (ret) {
+ TSK_OBJECT_SAFE_FREE((*pp_resampler));
+ }
+ return ret;
+}
+
+static int _tdav_webrtc_resampler_process(tdav_webrtc_resampler_t *p_self, const void* p_buff_ptr, tsk_size_t n_buff_size_in_bytes)
+{
+ tsk_size_t n_out_size;
+ const void* _p_buff_ptr = p_buff_ptr;
+ tsk_size_t _n_buff_size_in_bytes = n_buff_size_in_bytes;
+ tsk_size_t _n_buff_size_in_samples;
+
+ if (!p_self || !p_buff_ptr || !n_buff_size_in_bytes) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if (p_self->in.n_buff_size_in_bytes != n_buff_size_in_bytes) {
+ TSK_DEBUG_ERROR("Invalid input size: %u <> %u", p_self->in.n_buff_size_in_bytes, n_buff_size_in_bytes);
+ return -2;
+ }
+ _n_buff_size_in_samples = p_self->in.n_buff_size_in_samples;
+ if (p_self->in.x_pin.n_sample_size != p_self->out.x_pin.n_sample_size) {
+ tsk_size_t index;
+ if (p_self->in.x_pin.n_sample_size == sizeof(int16_t)) {
+ // int16_t -> float
+ const int16_t* p_src = (const int16_t*)p_buff_ptr;
+ float* p_dst = (float*)p_self->p_bufftmp_ptr;
+ for (index = 0; index < _n_buff_size_in_samples; ++index) {
+ p_dst[index] = (float)p_src[index];
+ }
+ }
+ else {
+ // float -> int16_t
+ const float* p_src = (const float*)p_buff_ptr;
+ int16_t* p_dst = (int16_t*)p_self->p_bufftmp_ptr;
+ for (index = 0; index < _n_buff_size_in_samples; ++index) {
+ p_dst[index] = (int16_t)p_src[index];
+ }
+ }
+ _p_buff_ptr = p_self->p_bufftmp_ptr;
+ _n_buff_size_in_bytes = p_self->in.n_buff_size_in_bytes;
+ }
+ n_out_size = tmedia_resampler_process(p_self->p_resampler, _p_buff_ptr, _n_buff_size_in_samples, (int16_t*)p_self->out.p_buff_ptr, p_self->out.n_buff_size_in_samples);
+ if (n_out_size != p_self->out.n_buff_size_in_samples) {
+ TSK_DEBUG_ERROR("Invalid output size: %u <> %u", n_out_size, p_self->out.n_buff_size_in_bytes);
+ return -4;
+ }
+ return 0;
+}
+
+//
+// WEBRTC resampler object definition
+//
+static tsk_object_t* tdav_webrtc_resampler_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_webrtc_resampler_t *p_resampler = (tdav_webrtc_resampler_t*)self;
+ if (p_resampler) {
+
+ }
+ return self;
+}
+static tsk_object_t* tdav_webrtc_resampler_dtor(tsk_object_t * self)
+{
+ tdav_webrtc_resampler_t *p_resampler = (tdav_webrtc_resampler_t*)self;
+ if (p_resampler) {
+ TSK_OBJECT_SAFE_FREE(p_resampler->p_resampler);
+ TSK_FREE(p_resampler->out.p_buff_ptr);
+ TSK_FREE(p_resampler->p_bufftmp_ptr);
+ }
+ return self;
+}
+static const tsk_object_def_t tdav_webrtc_resampler_def_s =
+{
+ sizeof(tdav_webrtc_resampler_t),
+ tdav_webrtc_resampler_ctor,
+ tdav_webrtc_resampler_dtor,
+ tsk_object_cmp,
+};
+const tsk_object_def_t *tdav_webrtc_resampler_def_t = &tdav_webrtc_resampler_def_s;
+
+
+//
+// WEBRTC denoiser Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_webrtc_denoise_ctor(tsk_object_t * _self, va_list * app)
+{
+ tdav_webrtc_denoise_t *self = _self;
+ if (self){
+ /* init base */
+ tmedia_denoise_init(TMEDIA_DENOISE(self));
+ /* init self */
+ tsk_safeobj_init(self);
+ self->neg.channels = 1;
+
+ TSK_DEBUG_INFO("Create WebRTC denoiser");
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_webrtc_denoise_dtor(tsk_object_t * _self)
+{
+ tdav_webrtc_denoise_t *self = _self;
+ if (self){
+ /* deinit base (will close the denoise if not done yet) */
+ tmedia_denoise_deinit(TMEDIA_DENOISE(self));
+ /* deinit self */
+ tdav_webrtc_denoise_close(TMEDIA_DENOISE(self));
+ TSK_OBJECT_SAFE_FREE(self->record.p_rpl_in2den);
+ TSK_OBJECT_SAFE_FREE(self->record.p_rpl_den2in);
+ TSK_OBJECT_SAFE_FREE(self->playback.p_rpl_in2den);
+ TSK_OBJECT_SAFE_FREE(self->playback.p_rpl_den2in);
+ tsk_safeobj_deinit(self);
+
+ TSK_DEBUG_INFO("*** Destroy WebRTC denoiser ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_webrtc_denoise_def_s =
+{
+ sizeof(tdav_webrtc_denoise_t),
+ tdav_webrtc_denoise_ctor,
+ tdav_webrtc_denoise_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_denoise_plugin_def_t tdav_webrtc_denoise_plugin_def_s =
+{
+ &tdav_webrtc_denoise_def_s,
+
+ "Audio Denoiser based on Google WebRTC",
+
+ tdav_webrtc_denoise_set,
+ tdav_webrtc_denoise_open,
+ tdav_webrtc_denoise_echo_playback,
+ tdav_webrtc_denoise_process_record,
+ tdav_webrtc_denoise_process_playback,
+ tdav_webrtc_denoise_close,
+};
+const tmedia_denoise_plugin_def_t *tdav_webrtc_denoise_plugin_def_t = &tdav_webrtc_denoise_plugin_def_s;
+
+
+#endif /* HAVE_WEBRTC */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/wasapi/tdav_consumer_wasapi.cxx b/tinyDAV/src/audio/wasapi/tdav_consumer_wasapi.cxx
new file mode 100644
index 0000000..c3a88e3
--- /dev/null
+++ b/tinyDAV/src/audio/wasapi/tdav_consumer_wasapi.cxx
@@ -0,0 +1,676 @@
+/*Copyright (C) 2013 Mamadou DIOP
+* Copyright (C) 2013-2014 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+/**@file tdav_consumer_wasapi.cxx
+ * @brief Microsoft Windows Audio Session API (WASAPI) consumer.
+ * http://msdn.microsoft.com/en-us/library/windows/desktop/dd316551(v=vs.85).aspx
+ */
+#include "tinydav/audio/wasapi/tdav_consumer_wasapi.h"
+
+#if HAVE_WASAPI
+
+#include "tinydav/audio/tdav_consumer_audio.h"
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_string.h"
+#include "tsk_condwait.h"
+#include "tsk_debug.h"
+
+#include <windows.h>
+#include <audioclient.h>
+#include <phoneaudioclient.h>
+
+#include <speex/speex_buffer.h>
+
+#if !defined(TDAV_WASAPI_CONSUMER_NOTIF_POS_COUNT)
+# define TDAV_WASAPI_CONSUMER_NOTIF_POS_COUNT 4
+#endif
+#define WASAPI_MILLIS_TO_100NS(MILLIS) (((LONGLONG)(MILLIS)) * 10000ui64)
+#define WASAPI_100NS_TO_MILLIS(NANOS) (((LONGLONG)(NANOS)) / 10000ui64)
+
+#define WASAPI_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[WASAPI Consumer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[WASAPI Consumer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[WASAPI Consumer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[WASAPI Consumer] " FMT, ##__VA_ARGS__)
+
+struct tdav_consumer_wasapi_s;
+
+namespace Doubango
+{
+ namespace VoIP
+ {
+ ref class AudioRender sealed
+ {
+ public:
+ virtual ~AudioRender();
+ internal:
+ AudioRender();
+
+ int Prepare(struct tdav_consumer_wasapi_s* wasapi, const tmedia_codec_t* codec);
+ int UnPrepare();
+ int Start();
+ int Stop();
+ int Pause();
+ int Consume(const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr);
+ private:
+ tsk_size_t Read(void* data, tsk_size_t size);
+ void AsyncThread(Windows::Foundation::IAsyncAction^ operation);
+
+ private:
+ tsk_mutex_handle_t* m_hMutex;
+ const struct tdav_consumer_wasapi_s* m_pWrappedConsumer; // Must not take ref() otherwise dtor() will be never called (circular reference)
+ IAudioClient2* m_pDevice;
+ IAudioRenderClient* m_pClient;
+ HANDLE m_hEvent;
+ Windows::Foundation::IAsyncAction^ m_pAsyncThread;
+ INT32 m_nBytesPerNotif;
+ INT32 m_nSourceFrameSizeInBytes;
+ UINT32 m_nMaxFrameCount;
+ UINT32 m_nPtime;
+
+ struct {
+ struct {
+ void* buffer;
+ tsk_size_t size;
+ } chunck;
+ tsk_ssize_t leftBytes;
+ SpeexBuffer* buffer;
+ tsk_size_t size;
+ } m_ring;
+
+ bool m_bStarted;
+ bool m_bPrepared;
+ bool m_bPaused;
+ };
+ }
+}
+
+typedef struct tdav_consumer_wasapi_s
+{
+ TDAV_DECLARE_CONSUMER_AUDIO;
+
+ Doubango::VoIP::AudioRender ^AudioRender;
+}
+tdav_consumer_wasapi_t;
+
+extern "C" void tdav_win32_print_error(const char* func, HRESULT hr);
+
+
+/* ============ Media consumer Interface ================= */
+
+static int tdav_consumer_wasapi_set(tmedia_consumer_t* self, const tmedia_param_t* param)
+{
+ return tdav_consumer_audio_set(TDAV_CONSUMER_AUDIO(self), param);
+}
+
+static int tdav_consumer_wasapi_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_consumer_wasapi_t* wasapi = (tdav_consumer_wasapi_t*)self;
+
+ if (!wasapi || !codec || !wasapi->AudioRender) {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TMEDIA_CONSUMER(wasapi)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(wasapi)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(wasapi)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+ WASAPI_DEBUG_INFO("in.channels=%d, out.channles=%d, in.rate=%d, out.rate=%d, ptime=%d",
+ TMEDIA_CONSUMER(wasapi)->audio.in.channels,
+ TMEDIA_CONSUMER(wasapi)->audio.out.channels,
+ TMEDIA_CONSUMER(wasapi)->audio.in.rate,
+ TMEDIA_CONSUMER(wasapi)->audio.out.rate,
+ TMEDIA_CONSUMER(wasapi)->audio.ptime);
+
+ return wasapi->AudioRender->Prepare(wasapi, codec);
+}
+
+static int tdav_consumer_wasapi_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_wasapi_t* wasapi = (tdav_consumer_wasapi_t*)self;
+
+ WASAPI_DEBUG_INFO("tdav_consumer_wasapi_start()");
+
+ if (!wasapi || !wasapi->AudioRender) {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->AudioRender->Start();
+}
+
+
+static int tdav_consumer_wasapi_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_wasapi_t* wasapi = (tdav_consumer_wasapi_t*)self;
+ if (!wasapi || !wasapi->AudioRender || !buffer || !size) {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->AudioRender->Consume(buffer, size, proto_hdr);
+}
+
+static int tdav_consumer_wasapi_pause(tmedia_consumer_t* self)
+{
+ tdav_consumer_wasapi_t* wasapi = (tdav_consumer_wasapi_t*)self;
+
+ if (!wasapi || !wasapi->AudioRender){
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->AudioRender->Pause();
+}
+
+static int tdav_consumer_wasapi_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_wasapi_t* wasapi = (tdav_consumer_wasapi_t*)self;
+
+ WASAPI_DEBUG_INFO("tdav_consumer_wasapi_stop()");
+
+ if (!wasapi || !wasapi->AudioRender) {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->AudioRender->Stop();
+}
+
+
+
+
+
+
+
+Doubango::VoIP::AudioRender::AudioRender()
+ : m_pDevice(nullptr)
+ , m_hMutex(nullptr)
+ , m_pClient(nullptr)
+ , m_hEvent(nullptr)
+ , m_pAsyncThread(nullptr)
+ , m_pWrappedConsumer(nullptr)
+ , m_nBytesPerNotif(0)
+ , m_nSourceFrameSizeInBytes(0)
+ , m_nMaxFrameCount(0)
+ , m_nPtime(0)
+ , m_bStarted(false)
+ , m_bPrepared(false)
+ , m_bPaused(false)
+{
+ memset(&m_ring, 0, sizeof(m_ring));
+
+ if (!(m_hMutex = tsk_mutex_create())) {
+ throw ref new Platform::FailureException(L"Failed to create mutex");
+ }
+}
+
+Doubango::VoIP::AudioRender::~AudioRender()
+{
+ Stop();
+ UnPrepare();
+
+ tsk_mutex_destroy(&m_hMutex);
+}
+
+int Doubango::VoIP::AudioRender::Prepare(tdav_consumer_wasapi_t* wasapi, const tmedia_codec_t* codec)
+{
+ HRESULT hr = E_FAIL;
+ int ret = 0;
+ WAVEFORMATEX wfx = {0};
+ AudioClientProperties properties = {0};
+ LPCWSTR pwstrRenderId = nullptr;
+
+ #define WASAPI_SET_ERROR(code) ret = (code); goto bail;
+
+ tsk_mutex_lock(m_hMutex);
+
+ if (m_bPrepared) {
+ WASAPI_DEBUG_INFO("Already prepared");
+ goto bail;
+ }
+
+ if (!wasapi || !codec) {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ WASAPI_SET_ERROR(-1);
+ }
+
+ if (m_pDevice || m_pClient) {
+ WASAPI_DEBUG_ERROR("consumer already prepared");
+ WASAPI_SET_ERROR(-2);
+ }
+
+ pwstrRenderId = GetDefaultAudioRenderId(AudioDeviceRole::Communications);
+
+ if (NULL == pwstrRenderId) {
+ tdav_win32_print_error("GetDefaultAudioRenderId", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-3);
+ }
+
+ hr = ActivateAudioInterface(pwstrRenderId, __uuidof(IAudioClient2), (void**)&m_pDevice);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("ActivateAudioInterface", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-4);
+ }
+
+ if (SUCCEEDED(hr)) {
+ properties.cbSize = sizeof AudioClientProperties;
+ properties.eCategory = AudioCategory_Communications;
+ hr = m_pDevice->SetClientProperties(&properties);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("SetClientProperties", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-5);
+ }
+ }
+ else {
+ tdav_win32_print_error("ActivateAudioInterface", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-6);
+ }
+
+ /* Set best format */
+ {
+ wfx.wFormatTag = WAVE_FORMAT_PCM;
+ wfx.nChannels = TMEDIA_CONSUMER(wasapi)->audio.in.channels;
+ wfx.nSamplesPerSec = TMEDIA_CONSUMER(wasapi)->audio.in.rate;
+ wfx.wBitsPerSample = TMEDIA_CONSUMER(wasapi)->audio.bits_per_sample;
+ wfx.nBlockAlign = (wfx.nChannels * wfx.wBitsPerSample/8);
+ wfx.nAvgBytesPerSec = (wfx.nSamplesPerSec * wfx.nBlockAlign);
+
+ PWAVEFORMATEX pwfxClosestMatch = NULL;
+ hr = m_pDevice->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, &wfx, &pwfxClosestMatch);
+ if (hr != S_OK && hr != S_FALSE) {
+ tdav_win32_print_error("IsFormatSupported", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-8);
+ }
+
+ if (hr == S_FALSE) {
+ if (!pwfxClosestMatch) {
+ WASAPI_DEBUG_ERROR("malloc(%d) failed", sizeof(WAVEFORMATEX));
+ WASAPI_SET_ERROR(-7);
+ }
+
+ wfx.nSamplesPerSec = pwfxClosestMatch->nSamplesPerSec;
+ wfx.nChannels = pwfxClosestMatch->nChannels;
+#if 0
+ wfx.wBitsPerSample = pwfxClosestMatch->wBitsPerSample;
+#endif
+ wfx.nBlockAlign = wfx.nChannels * (wfx.wBitsPerSample / 8);
+ wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;
+ // Request resampler
+ TMEDIA_CONSUMER(wasapi)->audio.out.rate = (uint32_t)wfx.nSamplesPerSec;
+ TMEDIA_CONSUMER(wasapi)->audio.bits_per_sample = (uint8_t)wfx.wBitsPerSample;
+ TMEDIA_CONSUMER(wasapi)->audio.out.channels = (uint8_t)wfx.nChannels;
+
+ WASAPI_DEBUG_INFO("Audio device format fallback: rate=%d, bps=%d, channels=%d", wfx.nSamplesPerSec, wfx.wBitsPerSample, wfx.nChannels);
+ }
+ if (pwfxClosestMatch) {
+ CoTaskMemFree(pwfxClosestMatch);
+ }
+ }
+
+ m_nSourceFrameSizeInBytes = (wfx.wBitsPerSample >> 3) * wfx.nChannels;
+ m_nBytesPerNotif = ((wfx.nAvgBytesPerSec * TMEDIA_CONSUMER(wasapi)->audio.ptime) / 1000);
+
+ // Initialize
+ hr = m_pDevice->Initialize(
+ AUDCLNT_SHAREMODE_SHARED,
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
+ WASAPI_MILLIS_TO_100NS(TDAV_WASAPI_CONSUMER_NOTIF_POS_COUNT * TMEDIA_CONSUMER(wasapi)->audio.ptime),
+ 0,
+ &wfx,
+ NULL);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("#WASAPI: Render::Initialize", hr);
+ WASAPI_SET_ERROR(-9);
+ }
+
+ REFERENCE_TIME DefaultDevicePeriod, MinimumDevicePeriod;
+ hr = m_pDevice->GetDevicePeriod(&DefaultDevicePeriod, &MinimumDevicePeriod);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("GetDevicePeriod", hr);
+ WASAPI_SET_ERROR(-10);
+ }
+ hr = m_pDevice->GetBufferSize(&m_nMaxFrameCount);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("GetBufferSize", hr);
+ WASAPI_SET_ERROR(-10);
+ }
+
+ WASAPI_DEBUG_INFO("#WASAPI (Playback): BufferSize=%u, DefaultDevicePeriod=%lld ms, MinimumDevicePeriod=%lldms", m_nMaxFrameCount, WASAPI_100NS_TO_MILLIS(DefaultDevicePeriod), WASAPI_100NS_TO_MILLIS(MinimumDevicePeriod));
+
+ if (!m_hEvent) {
+ if (!(m_hEvent = CreateEventEx(NULL, NULL, 0, EVENT_MODIFY_STATE | SYNCHRONIZE))) {
+ tdav_win32_print_error("CreateEventEx(EVENT_MODIFY_STATE | SYNCHRONIZE)", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-11);
+ }
+ }
+
+ hr = m_pDevice->SetEventHandle(m_hEvent);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("SetEventHandle", hr);
+ WASAPI_SET_ERROR(-12);
+ }
+
+ hr = m_pDevice->GetService(__uuidof(IAudioRenderClient), (void**)&m_pClient);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("GetService", hr);
+ WASAPI_SET_ERROR(-14);
+ }
+
+ m_ring.chunck.size = (TMEDIA_CONSUMER(wasapi)->audio.ptime * TMEDIA_CONSUMER(wasapi)->audio.out.rate * ((TMEDIA_CONSUMER(wasapi)->audio.bits_per_sample >> 3) * TMEDIA_CONSUMER(wasapi)->audio.out.channels)) / 1000;
+ m_ring.size = TDAV_WASAPI_CONSUMER_NOTIF_POS_COUNT * m_ring.chunck.size;
+ if (!(m_ring.chunck.buffer = tsk_realloc(m_ring.chunck.buffer, m_ring.chunck.size))) {
+ m_ring.size = 0;
+ WASAPI_DEBUG_ERROR("Failed to allocate new buffer");
+ WASAPI_SET_ERROR(-15);
+ }
+ if (!m_ring.buffer) {
+ m_ring.buffer = speex_buffer_init(m_ring.size);
+ }
+ else {
+ int sret;
+ if ((sret = speex_buffer_resize(m_ring.buffer, m_ring.size)) < 0) {
+ WASAPI_DEBUG_ERROR("speex_buffer_resize(%d) failed with error code=%d", m_ring.size, sret);
+ WASAPI_SET_ERROR(-16);
+ }
+ }
+ if (!m_ring.buffer) {
+ WASAPI_DEBUG_ERROR("Failed to create a new ring buffer with size = %d", m_ring.size);
+ WASAPI_SET_ERROR(-17);
+ }
+
+bail:
+ if (pwstrRenderId) {
+ CoTaskMemFree((LPVOID)pwstrRenderId);
+ }
+ if (ret != 0) {
+ UnPrepare();
+ }
+
+ if ((m_bPrepared = (ret == 0))) {
+ m_pWrappedConsumer = wasapi;
+ m_nPtime = TMEDIA_CONSUMER(wasapi)->audio.ptime;
+ }
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return ret;
+}
+
+int Doubango::VoIP::AudioRender::UnPrepare()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ if (m_hEvent) {
+ CloseHandle(m_hEvent), m_hEvent = nullptr;
+ }
+ if (m_pDevice) {
+ m_pDevice->Release(), m_pDevice = nullptr;
+ }
+ if (m_pClient) {
+ m_pClient->Release(), m_pClient = nullptr;
+ }
+
+ TSK_FREE(m_ring.chunck.buffer);
+ if (m_ring.buffer) {
+ speex_buffer_destroy(m_ring.buffer);
+ m_ring.buffer = nullptr;
+ }
+
+ m_pWrappedConsumer = nullptr;
+
+ m_bPrepared = false;
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+int Doubango::VoIP::AudioRender::Start()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ if (m_bStarted) {
+ WASAPI_DEBUG_INFO("already started");
+ goto bail;
+ }
+ if (!m_bPrepared) {
+ WASAPI_DEBUG_ERROR("not prepared");
+ goto bail;
+ }
+
+ m_pAsyncThread = Windows::System::Threading::ThreadPool::RunAsync(ref new Windows::System::Threading::WorkItemHandler(this, &Doubango::VoIP::AudioRender::AsyncThread),
+ Windows::System::Threading::WorkItemPriority::High,
+ Windows::System::Threading::WorkItemOptions::TimeSliced);
+
+ if ((m_bStarted = (m_pAsyncThread != nullptr))) {
+ HRESULT hr = m_pDevice->Start();
+ if(!SUCCEEDED(hr)) {
+ tdav_win32_print_error("Device::Start", hr);
+ Stop();
+ }
+ m_bPaused = false;
+ }
+
+bail:
+ tsk_mutex_unlock(m_hMutex);
+
+ return (m_bStarted ? 0 : -2);
+}
+
+int Doubango::VoIP::AudioRender::Stop()
+{
+ m_bStarted = false;
+
+ tsk_mutex_lock(m_hMutex);
+
+ if (m_hEvent) {
+ SetEvent(m_hEvent);
+ }
+
+ if (m_pAsyncThread) {
+ m_pAsyncThread->Cancel();
+ m_pAsyncThread->Close();
+ m_pAsyncThread = nullptr;
+ }
+
+ if (m_pDevice) {
+ m_pDevice->Stop();
+ }
+
+ // will be prepared again before next start()
+ UnPrepare();
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+int Doubango::VoIP::AudioRender::Pause()
+{
+ m_bPaused = true;
+
+ return 0;
+}
+
+int Doubango::VoIP::AudioRender::Consume(const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ int ret;
+ // tsk_mutex_lock(m_hMutex);
+ ret = tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(m_pWrappedConsumer), buffer, size, proto_hdr); // thread-safe
+ // tsk_mutex_unlock(m_hMutex);
+ return ret;
+}
+
+tsk_size_t Doubango::VoIP::AudioRender::Read(void* data, tsk_size_t size)
+{
+ tsk_ssize_t retSize = 0, availSize;
+
+ m_ring.leftBytes += size;
+ while (m_ring.leftBytes >= (tsk_ssize_t)m_ring.chunck.size) {
+ m_ring.leftBytes -= m_ring.chunck.size;
+ retSize = (tsk_ssize_t)tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(m_pWrappedConsumer), m_ring.chunck.buffer, m_ring.chunck.size);
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(m_pWrappedConsumer));
+ speex_buffer_write(m_ring.buffer, m_ring.chunck.buffer, retSize);
+ }
+ // IMPORTANT: looks like there is a bug in speex: continously trying to read more than avail
+ // many times can corrupt the buffer. At least on OS X 1.5
+#if 0
+ if (speex_buffer_get_available(m_ring.buffer) >= (tsk_ssize_t)size) {
+ retSize = speex_buffer_read(m_ring.buffer, data, size);
+ }
+ else{
+ memset(data, 0, size);
+ }
+#else
+ availSize = speex_buffer_get_available(m_ring.buffer);
+ if (availSize == 0) {
+ memset(data, 0, size);
+ }
+ else {
+ retSize = speex_buffer_read(m_ring.buffer, data, min(availSize, (tsk_ssize_t)size));
+ if (availSize < (tsk_ssize_t)size) {
+ memset(((uint8_t*)data) + availSize, 0, (size - availSize));
+ }
+ }
+
+#endif
+
+ return retSize;
+}
+
+void Doubango::VoIP::AudioRender::AsyncThread(Windows::Foundation::IAsyncAction^ operation)
+{
+ HRESULT hr = S_OK;
+ INT32 nFramesToWrite;
+ UINT32 nPadding, nRead;
+ DWORD retval;
+
+ WASAPI_DEBUG_INFO("#WASAPI: __playback_thread -- START");
+
+ #define BREAK_WHILE tsk_mutex_unlock(m_hMutex); break;
+
+ while (m_bStarted && SUCCEEDED(hr)) {
+ retval = WaitForSingleObjectEx(m_hEvent, /*m_nPtime*/INFINITE, FALSE);
+
+ tsk_mutex_lock(m_hMutex);
+
+ if (!m_bStarted) {
+ BREAK_WHILE;
+ }
+
+ if (retval == WAIT_OBJECT_0) {
+ hr = m_pDevice->GetCurrentPadding(&nPadding);
+ if (SUCCEEDED(hr)) {
+ BYTE* pRenderBuffer = NULL;
+ nFramesToWrite = m_nMaxFrameCount - nPadding;
+
+ if (nFramesToWrite > 0) {
+ hr = m_pClient->GetBuffer(nFramesToWrite, &pRenderBuffer);
+ if (SUCCEEDED(hr)) {
+ nRead = Read(pRenderBuffer, (nFramesToWrite * m_nSourceFrameSizeInBytes));
+
+ // Release the buffer
+ hr = m_pClient->ReleaseBuffer(nFramesToWrite, (nRead == 0) ? AUDCLNT_BUFFERFLAGS_SILENT : 0);
+ }
+ }
+ }
+ }
+
+ tsk_mutex_unlock(m_hMutex);
+ }// end-of-while
+
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("AsyncThread: ", hr);
+ }
+
+
+ WASAPI_DEBUG_INFO("__playback_thread(%s) -- STOP", (SUCCEEDED(hr) && retval == WAIT_OBJECT_0) ? "OK" : "NOK");
+}
+
+
+
+
+
+
+
+//
+// WaveAPI consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_wasapi_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_wasapi_t *wasapi = (tdav_consumer_wasapi_t*)self;
+ if (wasapi) {
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(wasapi));
+ /* init self */
+
+ wasapi->AudioRender = ref new Doubango::VoIP::AudioRender();
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_wasapi_dtor(tsk_object_t * self)
+{
+ tdav_consumer_wasapi_t *wasapi = (tdav_consumer_wasapi_t*)self;
+ if (wasapi) {
+ /* stop */
+ tdav_consumer_wasapi_stop((tmedia_consumer_t*)self);
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(wasapi));
+ /* deinit self */
+ if (wasapi->AudioRender) {
+ delete wasapi->AudioRender;
+ wasapi->AudioRender = nullptr;
+ }
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_wasapi_def_s =
+{
+ sizeof(tdav_consumer_wasapi_t),
+ tdav_consumer_wasapi_ctor,
+ tdav_consumer_wasapi_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_wasapi_plugin_def_s =
+{
+ &tdav_consumer_wasapi_def_s,
+
+ tmedia_audio,
+ "Microsoft Windows Audio Session API (WASAPI) consumer",
+
+ tdav_consumer_wasapi_set,
+ tdav_consumer_wasapi_prepare,
+ tdav_consumer_wasapi_start,
+ tdav_consumer_wasapi_consume,
+ tdav_consumer_wasapi_pause,
+ tdav_consumer_wasapi_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_wasapi_plugin_def_t = &tdav_consumer_wasapi_plugin_def_s;
+
+
+
+
+#endif /* HAVE_WASAPI */
diff --git a/tinyDAV/src/audio/wasapi/tdav_producer_wasapi.cxx b/tinyDAV/src/audio/wasapi/tdav_producer_wasapi.cxx
new file mode 100644
index 0000000..7d172a2
--- /dev/null
+++ b/tinyDAV/src/audio/wasapi/tdav_producer_wasapi.cxx
@@ -0,0 +1,681 @@
+/*Copyright (C) 2013 Mamadou DIOP
+* Copyright (C) 2013-2014 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+/**@file tdav_producer_wasapi.cxx
+ * @brief Microsoft Windows Audio Session API (WASAPI) producer.
+ * http://msdn.microsoft.com/en-us/library/windows/desktop/dd316551(v=vs.85).aspx
+ */
+#include "tinydav/audio/wasapi/tdav_producer_wasapi.h"
+
+#if HAVE_WASAPI
+
+#include "tinydav/audio/tdav_producer_audio.h"
+
+#include "tsk_memory.h"
+#include "tsk_string.h"
+#include "tsk_debug.h"
+
+#include <windows.h>
+#include <audioclient.h>
+#include <phoneaudioclient.h>
+
+#include <speex/speex_buffer.h>
+
+#if !defined(TDAV_WASAPI_PRODUCER_NOTIF_POS_COUNT)
+# define TDAV_WASAPI_PRODUCER_NOTIF_POS_COUNT 10
+#endif
+#define WASAPI_MILLIS_TO_100NS(MILLIS) (((LONGLONG)(MILLIS)) * 10000ui64)
+#define WASAPI_100NS_TO_MILLIS(NANOS) (((LONGLONG)(NANOS)) / 10000ui64)
+
+#define WASAPI_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[WASAPI Producer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[WASAPI Producer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[WASAPI Producer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[WASAPI Producer] " FMT, ##__VA_ARGS__)
+
+struct tdav_producer_wasapi_s;
+
+namespace Doubango
+{
+ namespace VoIP
+ {
+ ref class AudioCapture sealed
+ {
+ public:
+ virtual ~AudioCapture();
+ internal:
+ AudioCapture();
+
+ int Prepare(struct tdav_producer_wasapi_s* wasapi, const tmedia_codec_t* codec);
+ int UnPrepare();
+ int Start();
+ int Stop();
+ int Pause();
+
+ private:
+ void AsyncThread(Windows::Foundation::IAsyncAction^ operation);
+
+ private:
+ tsk_mutex_handle_t* m_hMutex;
+ IAudioClient2* m_pDevice;
+ IAudioCaptureClient* m_pClient;
+ HANDLE m_hCaptureEvent;
+ HANDLE m_hShutdownEvent;
+ Windows::Foundation::IAsyncAction^ m_pAsyncThread;
+ INT32 m_nBytesPerNotif;
+ INT32 m_nSourceFrameSizeInBytes;
+
+ struct{
+ tmedia_producer_enc_cb_f fn;
+ const void* pcData;
+ } m_callback;
+
+ struct {
+ struct {
+ void* buffer;
+ tsk_size_t size;
+ } chunck;
+ SpeexBuffer* buffer;
+ tsk_size_t size;
+ } m_ring;
+ bool m_bStarted;
+ bool m_bPrepared;
+ bool m_bPaused;
+ };
+ }
+}
+
+typedef struct tdav_producer_wasapi_s
+{
+ TDAV_DECLARE_PRODUCER_AUDIO;
+
+ Doubango::VoIP::AudioCapture ^audioCapture;
+}
+tdav_producer_wasapi_t;
+
+extern "C" void tdav_win32_print_error(const char* func, HRESULT hr);
+
+
+/* ============ Media Producer Interface ================= */
+static int tdav_producer_wasapi_set(tmedia_producer_t* self, const tmedia_param_t* param)
+{
+ tdav_producer_wasapi_t* wasapi = (tdav_producer_wasapi_t*)self;
+ if (param->plugin_type == tmedia_ppt_producer) {
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "volume")) {
+ return 0;
+ }
+ else if (tsk_striequals(param->key, "mute")) {
+ //wasapi->mute = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+#if !FIXME_SEND_SILENCE_ON_MUTE
+ //if(wasapi->started){
+ // if(wasapi->mute){
+ //IDirectSoundCaptureBuffer_Stop(wasapi->captureBuffer);
+ // }
+ // else{
+ //IDirectSoundCaptureBuffer_Start(wasapi->captureBuffer, DSBPLAY_LOOPING);
+ // }
+ //}
+#endif
+ return 0;
+ }
+ }
+ }
+ return tdav_producer_audio_set(TDAV_PRODUCER_AUDIO(self), param);
+}
+
+
+
+static int tdav_producer_wasapi_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_producer_wasapi_t* wasapi = (tdav_producer_wasapi_t*)self;
+
+ if(!wasapi || !codec || !wasapi->audioCapture){
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* codec should have ptime */
+ TMEDIA_PRODUCER(wasapi)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(wasapi)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER(wasapi)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+
+ WASAPI_DEBUG_INFO("channels=%d, rate=%d, ptime=%d",
+ TMEDIA_PRODUCER(wasapi)->audio.channels,
+ TMEDIA_PRODUCER(wasapi)->audio.rate,
+ TMEDIA_PRODUCER(wasapi)->audio.ptime);
+
+ return wasapi->audioCapture->Prepare(wasapi, codec);
+}
+
+static int tdav_producer_wasapi_start(tmedia_producer_t* self)
+{
+ tdav_producer_wasapi_t* wasapi = (tdav_producer_wasapi_t*)self;
+
+ WASAPI_DEBUG_INFO("tdav_producer_wasapi_start()");
+
+ if(!wasapi || !wasapi->audioCapture){
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->audioCapture->Start();
+}
+
+static int tdav_producer_wasapi_pause(tmedia_producer_t* self)
+{
+ tdav_producer_wasapi_t* wasapi = (tdav_producer_wasapi_t*)self;
+
+ if(!wasapi || !wasapi->audioCapture){
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->audioCapture->Pause();
+}
+
+static int tdav_producer_wasapi_stop(tmedia_producer_t* self)
+{
+ tdav_producer_wasapi_t* wasapi = (tdav_producer_wasapi_t*)self;
+
+ WASAPI_DEBUG_INFO("tdav_producer_wasapi_stop()");
+
+ if(!wasapi || !wasapi->audioCapture){
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->audioCapture->Stop();
+}
+
+
+
+
+
+
+
+Doubango::VoIP::AudioCapture::AudioCapture()
+ : m_pDevice(nullptr)
+ , m_hMutex(nullptr)
+ , m_pClient(nullptr)
+ , m_hCaptureEvent(nullptr)
+ , m_hShutdownEvent(nullptr)
+ , m_pAsyncThread(nullptr)
+ , m_nBytesPerNotif(0)
+ , m_nSourceFrameSizeInBytes(0)
+ , m_bStarted(false)
+ , m_bPrepared(false)
+ , m_bPaused(false)
+{
+ m_callback.fn = nullptr, m_callback.pcData = nullptr;
+ memset(&m_ring, 0, sizeof(m_ring));
+
+ if(!(m_hMutex = tsk_mutex_create())){
+ throw ref new Platform::FailureException(L"Failed to create mutex");
+ }
+}
+
+Doubango::VoIP::AudioCapture::~AudioCapture()
+{
+ Stop();
+ UnPrepare();
+
+ tsk_mutex_destroy(&m_hMutex);
+}
+
+int Doubango::VoIP::AudioCapture::Prepare(tdav_producer_wasapi_t* wasapi, const tmedia_codec_t* codec)
+{
+ HRESULT hr = E_FAIL;
+ int ret = 0;
+ WAVEFORMATEX wfx = {0};
+ AudioClientProperties properties = {0};
+ LPCWSTR pwstrCaptureId = nullptr;
+
+ #define WASAPI_SET_ERROR(code) ret = (code); goto bail;
+
+ tsk_mutex_lock(m_hMutex);
+
+ if(m_bPrepared)
+ {
+ WASAPI_DEBUG_INFO("#WASAPI: Audio producer already prepared");
+ goto bail;
+ }
+
+ if(!wasapi || !codec)
+ {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ WASAPI_SET_ERROR(-1);
+ }
+
+ if(m_pDevice || m_pClient){
+ WASAPI_DEBUG_ERROR("Producer already prepared");
+ WASAPI_SET_ERROR(-2);
+ }
+
+ pwstrCaptureId = GetDefaultAudioCaptureId(AudioDeviceRole::Communications);
+
+ if (NULL == pwstrCaptureId){
+ tdav_win32_print_error("GetDefaultAudioCaptureId", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-3);
+ }
+
+ hr = ActivateAudioInterface(pwstrCaptureId, __uuidof(IAudioClient2), (void**)&m_pDevice);
+ if(!SUCCEEDED(hr)){
+ tdav_win32_print_error("ActivateAudioInterface", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-4);
+ }
+
+ if (SUCCEEDED(hr)){
+ properties.cbSize = sizeof AudioClientProperties;
+ properties.eCategory = AudioCategory_Communications;
+ hr = m_pDevice->SetClientProperties(&properties);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("SetClientProperties", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-5);
+ }
+ }
+ else{
+ tdav_win32_print_error("ActivateAudioInterface", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-6);
+ }
+
+ /* Set best format */
+ {
+ wfx.wFormatTag = WAVE_FORMAT_PCM;
+ wfx.nChannels = TMEDIA_PRODUCER(wasapi)->audio.channels;
+ wfx.nSamplesPerSec = TMEDIA_PRODUCER(wasapi)->audio.rate;
+ wfx.wBitsPerSample = TMEDIA_PRODUCER(wasapi)->audio.bits_per_sample;
+ wfx.nBlockAlign = (wfx.nChannels * wfx.wBitsPerSample/8);
+ wfx.nAvgBytesPerSec = (wfx.nSamplesPerSec * wfx.nBlockAlign);
+
+ PWAVEFORMATEX pwfxClosestMatch = NULL;
+ hr = m_pDevice->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, &wfx, &pwfxClosestMatch);
+ if(hr != S_OK && hr != S_FALSE)
+ {
+ tdav_win32_print_error("IsFormatSupported", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-8);
+ }
+
+ if(hr == S_FALSE)
+ {
+ if(!pwfxClosestMatch)
+ {
+ WASAPI_DEBUG_ERROR("malloc(%d) failed", sizeof(WAVEFORMATEX));
+ WASAPI_SET_ERROR(-7);
+ }
+ wfx.nChannels = pwfxClosestMatch->nChannels;
+ wfx.nSamplesPerSec = pwfxClosestMatch->nSamplesPerSec;
+#if 0
+ wfx.wBitsPerSample = pwfxClosestMatch->wBitsPerSample;
+#endif
+ wfx.nBlockAlign = wfx.nChannels * (wfx.wBitsPerSample / 8);
+ wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;
+ // Request resampler
+ TMEDIA_PRODUCER(wasapi)->audio.rate = (uint32_t)wfx.nSamplesPerSec;
+ TMEDIA_PRODUCER(wasapi)->audio.bits_per_sample = (uint8_t)wfx.wBitsPerSample;
+ TMEDIA_PRODUCER(wasapi)->audio.channels = (uint8_t)wfx.nChannels;
+
+ WASAPI_DEBUG_INFO("Audio device format fallback: rate=%d, bps=%d, channels=%d", wfx.nSamplesPerSec, wfx.wBitsPerSample, wfx.nChannels);
+ }
+ if(pwfxClosestMatch)
+ {
+ CoTaskMemFree(pwfxClosestMatch);
+ }
+ }
+
+ m_nSourceFrameSizeInBytes = (wfx.wBitsPerSample >> 3) * wfx.nChannels;
+ m_nBytesPerNotif = ((wfx.nAvgBytesPerSec * TMEDIA_PRODUCER(wasapi)->audio.ptime)/1000);
+
+ // Initialize
+ hr = m_pDevice->Initialize(
+ AUDCLNT_SHAREMODE_SHARED,
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
+ (TDAV_WASAPI_PRODUCER_NOTIF_POS_COUNT * WASAPI_MILLIS_TO_100NS(TMEDIA_PRODUCER(wasapi)->audio.ptime)),
+ 0,
+ &wfx,
+ NULL);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("#WASAPI: Capture::SetClientProperties", hr);
+ WASAPI_SET_ERROR(-9);
+ }
+
+ REFERENCE_TIME DefaultDevicePeriod, MinimumDevicePeriod;
+ hr = m_pDevice->GetDevicePeriod(&DefaultDevicePeriod, &MinimumDevicePeriod);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("GetDevicePeriod", hr);
+ WASAPI_SET_ERROR(-10);
+ }
+ WASAPI_DEBUG_INFO("#WASAPI(Capture): DefaultDevicePeriod=%lld ms, MinimumDevicePeriod=%lldms", WASAPI_100NS_TO_MILLIS(DefaultDevicePeriod), WASAPI_100NS_TO_MILLIS(MinimumDevicePeriod));
+
+ if(!m_hCaptureEvent){
+ if(!(m_hCaptureEvent = CreateEventEx(NULL, NULL, 0, EVENT_ALL_ACCESS))){
+ tdav_win32_print_error("CreateEventEx(Capture)", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-11);
+ }
+ }
+ if(!m_hShutdownEvent){
+ if(!(m_hShutdownEvent = CreateEventEx(NULL, NULL, CREATE_EVENT_MANUAL_RESET, EVENT_ALL_ACCESS))){
+ tdav_win32_print_error("CreateEventEx(Shutdown)", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-12);
+ }
+ }
+
+ hr = m_pDevice->SetEventHandle(m_hCaptureEvent);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("SetEventHandle", hr);
+ WASAPI_SET_ERROR(-13);
+ }
+
+ hr = m_pDevice->GetService(__uuidof(IAudioCaptureClient), (void**)&m_pClient);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("GetService", hr);
+ WASAPI_SET_ERROR(-14);
+ }
+
+ int packetperbuffer = (1000 / TMEDIA_PRODUCER(wasapi)->audio.ptime);
+ m_ring.chunck.size = wfx.nSamplesPerSec * (wfx.wBitsPerSample >> 3) / packetperbuffer;
+ WASAPI_DEBUG_INFO("#WASAPI: Audio producer ring chunk size = %u", m_ring.chunck.size);
+ // allocate our chunck buffer
+ if(!(m_ring.chunck.buffer = tsk_realloc(m_ring.chunck.buffer, m_ring.chunck.size))){
+ WASAPI_DEBUG_ERROR("Failed to allocate new buffer");
+ WASAPI_SET_ERROR(-15);
+ }
+ // create ringbuffer
+ m_ring.size = TDAV_WASAPI_PRODUCER_NOTIF_POS_COUNT * m_ring.chunck.size;
+ WASAPI_DEBUG_INFO("#WASAPI: Audio producer ring size = %u", m_ring.size);
+ if(!m_ring.buffer){
+ m_ring.buffer = speex_buffer_init(m_ring.size);
+ }
+ else {
+ int sret;
+ if((sret = speex_buffer_resize(m_ring.buffer, m_ring.size)) < 0){
+ WASAPI_DEBUG_ERROR("speex_buffer_resize(%d) failed with error code=%d", m_ring.size, sret);
+ WASAPI_SET_ERROR(-16);
+ }
+ }
+ if(!m_ring.buffer){
+ WASAPI_DEBUG_ERROR("Failed to create a new ring buffer with size = %d", m_ring.size);
+ WASAPI_SET_ERROR(-17);
+ }
+
+ m_callback.fn = TMEDIA_PRODUCER(wasapi)->enc_cb.callback;
+ m_callback.pcData = TMEDIA_PRODUCER(wasapi)->enc_cb.callback_data;
+
+bail:
+ if (pwstrCaptureId){
+ CoTaskMemFree((LPVOID)pwstrCaptureId);
+ }
+ if(ret != 0){
+ UnPrepare();
+ }
+ m_bPrepared = (ret == 0);
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return ret;
+}
+
+int Doubango::VoIP::AudioCapture::UnPrepare()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ if(m_hCaptureEvent)
+ {
+ CloseHandle(m_hCaptureEvent), m_hCaptureEvent = nullptr;
+ }
+ if(m_hShutdownEvent)
+ {
+ CloseHandle(m_hShutdownEvent), m_hShutdownEvent = nullptr;
+ }
+ if(m_pDevice)
+ {
+ m_pDevice->Release(), m_pDevice = nullptr;
+ }
+ if(m_pClient)
+ {
+ m_pClient->Release(), m_pClient = nullptr;
+ }
+
+ TSK_FREE(m_ring.chunck.buffer);
+ if(m_ring.buffer){
+ speex_buffer_destroy(m_ring.buffer);
+ m_ring.buffer = nullptr;
+ }
+
+ m_callback.fn = nullptr;
+ m_callback.pcData = nullptr;
+
+ m_bPrepared = false;
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+int Doubango::VoIP::AudioCapture::Start()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ if(m_bStarted)
+ {
+ WASAPI_DEBUG_INFO("#WASAPI: Audio producer already started");
+ goto bail;
+ }
+ if(!m_bPrepared)
+ {
+ WASAPI_DEBUG_ERROR("Audio producer not prepared");
+ goto bail;
+ }
+
+ m_pAsyncThread = Windows::System::Threading::ThreadPool::RunAsync(ref new Windows::System::Threading::WorkItemHandler(this, &Doubango::VoIP::AudioCapture::AsyncThread),
+ Windows::System::Threading::WorkItemPriority::High,
+ Windows::System::Threading::WorkItemOptions::TimeSliced);
+
+ if((m_bStarted = (m_pAsyncThread != nullptr)))
+ {
+ HRESULT hr = m_pDevice->Start();
+ if(!SUCCEEDED(hr))
+ {
+ tdav_win32_print_error("Device::Start", hr);
+ Stop();
+ }
+ m_bPaused = false;
+ }
+
+bail:
+ tsk_mutex_unlock(m_hMutex);
+
+ return (m_bStarted ? 0 : -2);
+}
+
+int Doubango::VoIP::AudioCapture::Stop()
+{
+ m_bStarted = false;
+
+ tsk_mutex_lock(m_hMutex);
+
+ if (m_hShutdownEvent)
+ {
+ SetEvent(m_hShutdownEvent);
+ }
+
+ if (m_pAsyncThread)
+ {
+ m_pAsyncThread->Cancel();
+ m_pAsyncThread->Close();
+ m_pAsyncThread = nullptr;
+ }
+
+ if(m_pDevice)
+ {
+ m_pDevice->Stop();
+ }
+
+ // will be prepared again before next start()
+ UnPrepare();
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+int Doubango::VoIP::AudioCapture::Pause()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ m_bPaused = true;
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+void Doubango::VoIP::AudioCapture::AsyncThread(Windows::Foundation::IAsyncAction^ operation)
+{
+ HRESULT hr = S_OK;
+ BYTE* pbData = nullptr;
+ UINT32 nFrames = 0;
+ DWORD dwFlags = 0;
+ UINT32 incomingBufferSize;
+ INT32 avail;
+ UINT32 nNextPacketSize;
+
+ HANDLE eventHandles[] = {
+ m_hCaptureEvent, // WAIT_OBJECT0
+ m_hShutdownEvent // WAIT_OBJECT1
+ };
+
+ WASAPI_DEBUG_INFO("#WASAPI: __record_thread -- START");
+
+ #define BREAK_WHILE tsk_mutex_unlock(m_hMutex); break;
+
+ while(m_bStarted && SUCCEEDED(hr)){
+ DWORD waitResult = WaitForMultipleObjectsEx(SIZEOF_ARRAY(eventHandles), eventHandles, FALSE, INFINITE, FALSE);
+
+ tsk_mutex_lock(m_hMutex);
+
+ if(!m_bStarted){
+ BREAK_WHILE;
+ }
+
+ if(waitResult == WAIT_OBJECT_0 && m_callback.fn) {
+ hr = m_pClient->GetNextPacketSize(&nNextPacketSize);
+ while(SUCCEEDED(hr) && nNextPacketSize >0){
+ hr = m_pClient->GetBuffer(&pbData, &nFrames, &dwFlags, nullptr, nullptr);
+ if(SUCCEEDED(hr) && pbData && nFrames){
+ if((dwFlags & AUDCLNT_BUFFERFLAGS_SILENT) != AUDCLNT_BUFFERFLAGS_SILENT){
+ incomingBufferSize = nFrames * m_nSourceFrameSizeInBytes;
+ speex_buffer_write(m_ring.buffer, pbData, incomingBufferSize);
+ avail = speex_buffer_get_available(m_ring.buffer);
+ while (m_bStarted && avail >= (INT32)m_ring.chunck.size) {
+ avail -= speex_buffer_read(m_ring.buffer, m_ring.chunck.buffer, m_ring.chunck.size);
+ m_callback.fn(m_callback.pcData, m_ring.chunck.buffer, m_ring.chunck.size);
+ }
+ }
+
+ if (SUCCEEDED(hr)){
+ hr = m_pClient->ReleaseBuffer(nFrames);
+ }
+ if (SUCCEEDED(hr)){
+ hr = m_pClient->GetNextPacketSize(&nNextPacketSize);
+ }
+ }
+ }
+ }
+ else if(waitResult != WAIT_OBJECT_0){
+ BREAK_WHILE;
+ }
+
+ tsk_mutex_unlock(m_hMutex);
+ }// end-of-while
+
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("AsyncThread: ", hr);
+ }
+
+
+ WASAPI_DEBUG_INFO("WASAPI: __record_thread(%s) -- STOP", SUCCEEDED(hr) ? "OK": "NOK");
+}
+
+
+
+
+
+
+
+//
+// WaveAPI producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_wasapi_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_wasapi_t *wasapi = (tdav_producer_wasapi_t*)self;
+ if(wasapi){
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(wasapi));
+ /* init self */
+
+ wasapi->audioCapture = ref new Doubango::VoIP::AudioCapture();
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_wasapi_dtor(tsk_object_t * self)
+{
+ tdav_producer_wasapi_t *wasapi = (tdav_producer_wasapi_t*)self;
+ if(wasapi){
+ /* stop */
+ tdav_producer_wasapi_stop((tmedia_producer_t*)self);
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(wasapi));
+ /* deinit self */
+ if(wasapi->audioCapture){
+ delete wasapi->audioCapture;
+ wasapi->audioCapture = nullptr;
+ }
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_wasapi_def_s =
+{
+ sizeof(tdav_producer_wasapi_t),
+ tdav_producer_wasapi_ctor,
+ tdav_producer_wasapi_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_wasapi_plugin_def_s =
+{
+ &tdav_producer_wasapi_def_s,
+
+ tmedia_audio,
+ "Microsoft Windows Audio Session API (WASAPI) producer",
+
+ tdav_producer_wasapi_set,
+ tdav_producer_wasapi_prepare,
+ tdav_producer_wasapi_start,
+ tdav_producer_wasapi_pause,
+ tdav_producer_wasapi_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_wasapi_plugin_def_t = &tdav_producer_wasapi_plugin_def_s;
+
+
+
+
+#endif /* HAVE_WASAPI */
diff --git a/tinyDAV/src/audio/waveapi/tdav_consumer_waveapi.c b/tinyDAV/src/audio/waveapi/tdav_consumer_waveapi.c
new file mode 100644
index 0000000..1883fa4
--- /dev/null
+++ b/tinyDAV/src/audio/waveapi/tdav_consumer_waveapi.c
@@ -0,0 +1,402 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_consumer_waveapi.c
+ * @brief Audio Consumer for Win32 and WinCE platforms.
+ *
+ */
+#include "tinydav/audio/waveapi/tdav_consumer_waveapi.h"
+
+#if HAVE_WAVE_API
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_WAVEAPI_CONSUMER_ERROR_BUFF_COUNT 0xFF
+
+#define tdav_consumer_waveapi_set tsk_null
+
+static void print_last_error(MMRESULT mmrError, const char* func)
+{
+ static char buffer_err[TDAV_WAVEAPI_CONSUMER_ERROR_BUFF_COUNT];
+
+ waveOutGetErrorTextA(mmrError, buffer_err, sizeof(buffer_err));
+ TSK_DEBUG_ERROR("%s() error: %s", func, buffer_err);
+}
+
+static int free_wavehdr(tdav_consumer_waveapi_t* consumer, tsk_size_t index)
+{
+ if(!consumer || index >= sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_FREE(consumer->hWaveHeaders[index]->lpData);
+ TSK_FREE(consumer->hWaveHeaders[index]);
+
+ return 0;
+}
+
+static int create_wavehdr(tdav_consumer_waveapi_t* consumer, tsk_size_t index)
+{
+ if(!consumer || index >= sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(consumer->hWaveHeaders[index]){
+ free_wavehdr(consumer, index);
+ }
+
+ consumer->hWaveHeaders[index] = tsk_calloc(1, sizeof(WAVEHDR));
+ consumer->hWaveHeaders[index]->lpData = tsk_calloc(1, consumer->bytes_per_notif);
+ consumer->hWaveHeaders[index]->dwBufferLength = (DWORD)consumer->bytes_per_notif;
+ consumer->hWaveHeaders[index]->dwFlags = WHDR_BEGINLOOP | WHDR_ENDLOOP;
+ consumer->hWaveHeaders[index]->dwLoops = 0x01;
+ consumer->hWaveHeaders[index]->dwUser = index;
+
+ return 0;
+}
+
+static int write_wavehdr(tdav_consumer_waveapi_t* consumer, tsk_size_t index)
+{
+ MMRESULT result;
+
+ if(!consumer || !consumer->hWaveHeaders[index] || !consumer->hWaveOut){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ result = waveOutPrepareHeader(consumer->hWaveOut, consumer->hWaveHeaders[index], sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutPrepareHeader");
+ return -2;
+ }
+
+ result = waveOutWrite(consumer->hWaveOut, consumer->hWaveHeaders[index], sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutWrite");
+ return -3;
+ }
+
+ return 0;
+}
+
+static int play_wavehdr(tdav_consumer_waveapi_t* consumer, LPWAVEHDR lpHdr)
+{
+ MMRESULT result;
+ tsk_size_t out_size;
+
+ if(!consumer || !lpHdr || !consumer->hWaveOut){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ result = waveOutUnprepareHeader(consumer->hWaveOut, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutUnprepareHeader");
+ return -2;
+ }
+
+ //
+ //
+ // Fill lpHdr->Data with decoded data
+ //
+ //
+ if((out_size = tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(consumer), lpHdr->lpData, lpHdr->dwBufferLength))){
+ //memcpy(lpHdr->lpData, data, lpHdr->dwBufferLength);
+ //TSK_FREE(data);
+ }
+ else{
+ /* Put silence */
+ memset(lpHdr->lpData, 0, lpHdr->dwBufferLength);
+ }
+
+ if(!consumer->started){
+ return 0;
+ }
+
+ result = waveOutPrepareHeader(consumer->hWaveOut, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutPrepareHeader");
+ return -3;
+ }
+
+ result = waveOutWrite(consumer->hWaveOut, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutWrite");
+ return -4;
+ }
+
+ return 0;
+}
+
+static void* TSK_STDCALL __playback_thread(void *param)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)param;
+ DWORD dwEvent;
+ tsk_size_t i;
+
+ TSK_DEBUG_INFO("__playback_thread -- START");
+
+ SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
+
+ for(;;){
+ dwEvent = WaitForMultipleObjects(2, consumer->events, FALSE, INFINITE);
+
+ if (dwEvent == 1){
+ break;
+ }
+
+ else if (dwEvent == 0){
+ EnterCriticalSection(&consumer->cs);
+ for(i = 0; i< sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ if(consumer->hWaveHeaders[i] && (consumer->hWaveHeaders[i]->dwFlags & WHDR_DONE)){
+ play_wavehdr(consumer, consumer->hWaveHeaders[i]);
+ }
+ }
+ LeaveCriticalSection(&consumer->cs);
+ }
+ }
+
+ TSK_DEBUG_INFO("__playback_thread -- STOP");
+
+
+ return tsk_null;
+}
+
+
+
+
+
+
+
+
+/* ============ Media Consumer Interface ================= */
+int tdav_consumer_waveapi_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+ tsk_size_t i;
+
+ if(!consumer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TMEDIA_CONSUMER(consumer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+ /* codec should have ptime */
+
+
+ /* Format */
+ ZeroMemory(&consumer->wfx, sizeof(WAVEFORMATEX));
+ consumer->wfx.wFormatTag = WAVE_FORMAT_PCM;
+ consumer->wfx.nChannels = TMEDIA_CONSUMER(consumer)->audio.in.channels;
+ consumer->wfx.nSamplesPerSec = TMEDIA_CONSUMER(consumer)->audio.out.rate ? TMEDIA_CONSUMER(consumer)->audio.out.rate : TMEDIA_CONSUMER(consumer)->audio.in.rate;
+ consumer->wfx.wBitsPerSample = TMEDIA_CONSUMER(consumer)->audio.bits_per_sample;
+ consumer->wfx.nBlockAlign = (consumer->wfx.nChannels * consumer->wfx.wBitsPerSample/8);
+ consumer->wfx.nAvgBytesPerSec = (consumer->wfx.nSamplesPerSec * consumer->wfx.nBlockAlign);
+
+ /* Average bytes (count) for each notification */
+ consumer->bytes_per_notif = ((consumer->wfx.nAvgBytesPerSec * TMEDIA_CONSUMER(consumer)->audio.ptime)/1000);
+
+ /* create buffers */
+ for(i = 0; i< sizeof(consumer->hWaveHeaders)/sizeof(consumer->hWaveHeaders[0]); i++){
+ create_wavehdr(consumer, i);
+ }
+
+ return 0;
+}
+
+int tdav_consumer_waveapi_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+ MMRESULT result;
+ tsk_size_t i;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(consumer->started || consumer->hWaveOut){
+ TSK_DEBUG_WARN("Consumer already started");
+ return 0;
+ }
+
+ /* create events */
+ if(!consumer->events[0]){
+ consumer->events[0] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ }
+ if(!consumer->events[1]){
+ consumer->events[1] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ }
+
+ /* open */
+ result = waveOutOpen((HWAVEOUT *)&consumer->hWaveOut, WAVE_MAPPER, &consumer->wfx, (DWORD)consumer->events[0], (DWORD_PTR)consumer, CALLBACK_EVENT);
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutOpen");
+ return -2;
+ }
+
+ /* write */
+ for(i = 0; i< sizeof(consumer->hWaveHeaders)/sizeof(consumer->hWaveHeaders[0]); i++){
+ write_wavehdr(consumer, i);
+ }
+
+ /* start thread */
+ consumer->started = tsk_true;
+ tsk_thread_create(&consumer->tid[0], __playback_thread, consumer);
+
+ return 0;
+}
+
+int tdav_consumer_waveapi_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+
+ if(!consumer || !buffer || !size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* buffer is already decoded */
+ return tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(consumer), buffer, size, proto_hdr);
+}
+
+int tdav_consumer_waveapi_pause(tmedia_consumer_t* self)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+int tdav_consumer_waveapi_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+ MMRESULT result;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!consumer->started){
+ TSK_DEBUG_WARN("Consumer not started");
+ return 0;
+ }
+
+ /* stop thread */
+ if(consumer->tid[0]){
+ SetEvent(consumer->events[1]);
+ tsk_thread_join(&(consumer->tid[0]));
+ }
+
+ /* should be done here */
+ consumer->started = tsk_false;
+
+ if(consumer->hWaveOut && ((result = waveOutReset(consumer->hWaveOut)) != MMSYSERR_NOERROR)){
+ print_last_error(result, "waveOutReset");
+ }
+
+ return 0;
+}
+
+
+//
+// WaveAPI consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_waveapi_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_waveapi_t *consumer = self;
+ if(consumer){
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(consumer));
+ /* init self */
+ InitializeCriticalSection(&consumer->cs);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_waveapi_dtor(tsk_object_t * self)
+{
+ tdav_consumer_waveapi_t *consumer = self;
+ if(consumer){
+ tsk_size_t i;
+
+ /* stop */
+ if(consumer->started){
+ tdav_consumer_waveapi_stop(self);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(consumer));
+ /* deinit self */
+ for(i = 0; i< sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ free_wavehdr(consumer, i);
+ }
+ if(consumer->hWaveOut){
+ waveOutClose(consumer->hWaveOut);
+ }
+ if(consumer->events[0]){
+ CloseHandle(consumer->events[0]);
+ }
+ if(consumer->events[1]){
+ CloseHandle(consumer->events[1]);
+ }
+ DeleteCriticalSection(&consumer->cs);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_waveapi_def_s =
+{
+ sizeof(tdav_consumer_waveapi_t),
+ tdav_consumer_waveapi_ctor,
+ tdav_consumer_waveapi_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_waveapi_plugin_def_s =
+{
+ &tdav_consumer_waveapi_def_s,
+
+ tmedia_audio,
+ "Microsoft WaveAPI consumer",
+
+ tdav_consumer_waveapi_set,
+ tdav_consumer_waveapi_prepare,
+ tdav_consumer_waveapi_start,
+ tdav_consumer_waveapi_consume,
+ tdav_consumer_waveapi_pause,
+ tdav_consumer_waveapi_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_waveapi_plugin_def_t = &tdav_consumer_waveapi_plugin_def_s;
+
+#endif /* HAVE_WAVE_API */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/waveapi/tdav_producer_waveapi.c b/tinyDAV/src/audio/waveapi/tdav_producer_waveapi.c
new file mode 100644
index 0000000..d077790
--- /dev/null
+++ b/tinyDAV/src/audio/waveapi/tdav_producer_waveapi.c
@@ -0,0 +1,393 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_producer_waveapi.c
+ * @brief Audio Producer for Win32 and WinCE platforms.
+ */
+#include "tinydav/audio/waveapi/tdav_producer_waveapi.h"
+
+#if HAVE_WAVE_API
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_WAVEAPI_PRODUCER_ERROR_BUFF_COUNT 0xFF
+
+#define tdav_producer_waveapi_set tsk_null
+
+static void print_last_error(MMRESULT mmrError, const char* func)
+{
+ static char buffer_err[TDAV_WAVEAPI_PRODUCER_ERROR_BUFF_COUNT];
+
+ waveInGetErrorTextA(mmrError, buffer_err, sizeof(buffer_err));
+ TSK_DEBUG_ERROR("%s() error: %s", func, buffer_err);
+}
+
+static int free_wavehdr(tdav_producer_waveapi_t* producer, tsk_size_t index)
+{
+ if(!producer || index >= sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_FREE(producer->hWaveHeaders[index]->lpData);
+ TSK_FREE(producer->hWaveHeaders[index]);
+
+ return 0;
+}
+
+static int create_wavehdr(tdav_producer_waveapi_t* producer, tsk_size_t index)
+{
+ if(!producer || index >= sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(producer->hWaveHeaders[index]){
+ free_wavehdr(producer, index);
+ }
+
+ producer->hWaveHeaders[index] = tsk_calloc(1, sizeof(WAVEHDR));
+ producer->hWaveHeaders[index]->lpData = tsk_calloc(1, producer->bytes_per_notif);
+ producer->hWaveHeaders[index]->dwBufferLength = (DWORD)producer->bytes_per_notif;
+ producer->hWaveHeaders[index]->dwFlags = WHDR_BEGINLOOP | WHDR_ENDLOOP;
+ producer->hWaveHeaders[index]->dwLoops = 0x01;
+ producer->hWaveHeaders[index]->dwUser = index;
+
+ return 0;
+}
+
+static int add_wavehdr(tdav_producer_waveapi_t* producer, tsk_size_t index)
+{
+ MMRESULT result;
+
+ if(!producer || !producer->hWaveHeaders[index] || !producer->hWaveIn){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ result = waveInPrepareHeader(producer->hWaveIn, producer->hWaveHeaders[index], sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInPrepareHeader");
+ return -2;
+ }
+
+ result = waveInAddBuffer(producer->hWaveIn, producer->hWaveHeaders[index], sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInAddBuffer");
+ return -3;
+ }
+
+ return 0;
+}
+
+static int record_wavehdr(tdav_producer_waveapi_t* producer, LPWAVEHDR lpHdr)
+{
+ MMRESULT result;
+
+ if(!producer || !lpHdr || !producer->hWaveIn){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ //
+ // Alert the session that there is new data to send over the network
+ //
+ if(TMEDIA_PRODUCER(producer)->enc_cb.callback){
+#if 0
+ {
+ static FILE* f = NULL;
+ if(!f) f = fopen("./waveapi_producer.raw", "w+");
+ fwrite(lpHdr->lpData, 1, lpHdr->dwBytesRecorded, f);
+ }
+#endif
+ TMEDIA_PRODUCER(producer)->enc_cb.callback(TMEDIA_PRODUCER(producer)->enc_cb.callback_data, lpHdr->lpData, lpHdr->dwBytesRecorded);
+ }
+
+ if(!producer->started){
+ return 0;
+ }
+
+ result = waveInUnprepareHeader(producer->hWaveIn, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInUnprepareHeader");
+ return -2;
+ }
+
+ result = waveInPrepareHeader(producer->hWaveIn, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInPrepareHeader");
+ return -3;
+ }
+
+ result = waveInAddBuffer(producer->hWaveIn, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInAddBuffer");
+ return -4;
+ }
+
+ return 0;
+}
+
+static void* TSK_STDCALL __record_thread(void *param)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)param;
+ DWORD dwEvent;
+ tsk_size_t i;
+
+ TSK_DEBUG_INFO("__record_thread -- START");
+
+ // SetPriorityClass(GetCurrentThread(), REALTIME_PRIORITY_CLASS);
+
+ for(;;){
+ dwEvent = WaitForMultipleObjects(2, producer->events, FALSE, INFINITE);
+
+ if (dwEvent == 1){
+ break;
+ }
+
+ else if (dwEvent == 0){
+ EnterCriticalSection(&producer->cs);
+ for(i = 0; i< sizeof(producer->hWaveHeaders)/sizeof(producer->hWaveHeaders[0]); i++){
+ if(producer->hWaveHeaders[i] && (producer->hWaveHeaders[i]->dwFlags & WHDR_DONE)){
+ record_wavehdr(producer, producer->hWaveHeaders[i]);
+ }
+ }
+ LeaveCriticalSection(&producer->cs);
+ }
+ }
+
+ TSK_DEBUG_INFO("__record_thread() -- STOP");
+
+
+ return tsk_null;
+}
+
+
+
+
+
+
+
+
+/* ============ Media Producer Interface ================= */
+int tdav_producer_waveapi_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)self;
+ tsk_size_t i;
+
+ if(!producer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TMEDIA_PRODUCER(producer)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+ /* codec should have ptime */
+
+
+ /* Format */
+ ZeroMemory(&producer->wfx, sizeof(WAVEFORMATEX));
+ producer->wfx.wFormatTag = WAVE_FORMAT_PCM;
+ producer->wfx.nChannels = TMEDIA_PRODUCER(producer)->audio.channels;
+ producer->wfx.nSamplesPerSec = TMEDIA_PRODUCER(producer)->audio.rate;
+ producer->wfx.wBitsPerSample = TMEDIA_PRODUCER(producer)->audio.bits_per_sample;
+ producer->wfx.nBlockAlign = (producer->wfx.nChannels * producer->wfx.wBitsPerSample/8);
+ producer->wfx.nAvgBytesPerSec = (producer->wfx.nSamplesPerSec * producer->wfx.nBlockAlign);
+
+ /* Average bytes (count) for each notification */
+ producer->bytes_per_notif = ((producer->wfx.nAvgBytesPerSec * TMEDIA_PRODUCER(producer)->audio.ptime)/1000);
+
+ /* create buffers */
+ for(i = 0; i< sizeof(producer->hWaveHeaders)/sizeof(producer->hWaveHeaders[0]); i++){
+ create_wavehdr(producer, i);
+ }
+
+ return 0;
+}
+
+int tdav_producer_waveapi_start(tmedia_producer_t* self)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)self;
+ MMRESULT result;
+ tsk_size_t i;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(producer->started || producer->hWaveIn){
+ TSK_DEBUG_WARN("Producer already started");
+ return 0;
+ }
+
+ /* create events */
+ if(!producer->events[0]){
+ producer->events[0] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ }
+ if(!producer->events[1]){
+ producer->events[1] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ }
+
+ /* open */
+ result = waveInOpen((HWAVEIN *)&producer->hWaveIn, /*WAVE_MAPPER*/0, &producer->wfx, (DWORD)producer->events[0], (DWORD_PTR)producer, CALLBACK_EVENT);
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInOpen");
+ return -2;
+ }
+
+ /* start */
+ result = waveInStart(producer->hWaveIn);
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInStart");
+ return -2;
+ }
+
+ /* write */
+ for(i = 0; i< sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ add_wavehdr(producer, i);
+ }
+
+ /* start thread */
+ producer->started = tsk_true;
+ tsk_thread_create(&producer->tid[0], __record_thread, producer);
+
+ return 0;
+}
+
+int tdav_producer_waveapi_pause(tmedia_producer_t* self)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+int tdav_producer_waveapi_stop(tmedia_producer_t* self)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)self;
+ MMRESULT result;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!producer->started){
+ TSK_DEBUG_WARN("Producer not started");
+ return 0;
+ }
+
+ /* stop thread */
+ if(producer->tid[0]){
+ SetEvent(producer->events[1]);
+ tsk_thread_join(&(producer->tid[0]));
+ }
+
+ /* should be done here */
+ producer->started = tsk_false;
+
+ if(producer->hWaveIn && (((result = waveInReset(producer->hWaveIn)) != MMSYSERR_NOERROR) || ((result = waveInClose(producer->hWaveIn)) != MMSYSERR_NOERROR))){
+ print_last_error(result, "waveInReset/waveInClose");
+ }
+
+ return 0;
+}
+
+
+//
+// WaveAPI producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_waveapi_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_waveapi_t *producer = self;
+ if(producer){
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(producer));
+ /* init self */
+ InitializeCriticalSection(&producer->cs);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_waveapi_dtor(tsk_object_t * self)
+{
+ tdav_producer_waveapi_t *producer = self;
+ if(producer){
+ tsk_size_t i;
+
+ /* stop */
+ if(producer->started){
+ tdav_producer_waveapi_stop(self);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(producer));
+ /* deinit self */
+ for(i = 0; i< sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ free_wavehdr(producer, i);
+ }
+ if(producer->hWaveIn){
+ waveInClose(producer->hWaveIn);
+ }
+ if(producer->events[0]){
+ CloseHandle(producer->events[0]);
+ }
+ if(producer->events[1]){
+ CloseHandle(producer->events[1]);
+ }
+ DeleteCriticalSection(&producer->cs);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_waveapi_def_s =
+{
+ sizeof(tdav_producer_waveapi_t),
+ tdav_producer_waveapi_ctor,
+ tdav_producer_waveapi_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_waveapi_plugin_def_s =
+{
+ &tdav_producer_waveapi_def_s,
+
+ tmedia_audio,
+ "Microsoft WaveAPI producer",
+
+ tdav_producer_waveapi_set,
+ tdav_producer_waveapi_prepare,
+ tdav_producer_waveapi_start,
+ tdav_producer_waveapi_pause,
+ tdav_producer_waveapi_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_waveapi_plugin_def_t = &tdav_producer_waveapi_plugin_def_s;
+
+#endif /* HAVE_WAVE_API */ \ No newline at end of file
diff --git a/tinyDAV/src/bfcp/tdav_session_bfcp.c b/tinyDAV/src/bfcp/tdav_session_bfcp.c
new file mode 100644
index 0000000..07e770b
--- /dev/null
+++ b/tinyDAV/src/bfcp/tdav_session_bfcp.c
@@ -0,0 +1,741 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_session_bfcp.c
+ * @brief The The Binary Floor Control Protocol (BFCP, rfc4582) session.
+ */
+#include "tinydav/bfcp/tdav_session_bfcp.h"
+
+#if !defined(HAVE_TINYBFCP) || HAVE_TINYBFCP
+
+#include "tinybfcp/tbfcp_session.h"
+#include "tinybfcp/tbfcp_pkt.h"
+#include "tinybfcp/tbfcp_utils.h"
+
+#include "tsk_string.h"
+#include "tsk_memory.h" /* TSK_FREE */
+#include "tsk_debug.h"
+
+/*
+ * https://tools.ietf.org/html/rfc4574
+ * https://tools.ietf.org/html/rfc4582
+ * https://tools.ietf.org/html/rfc4583
+ * http://tools.ietf.org/html/rfc4796
+ * https://tools.ietf.org/html/draft-ietf-bfcpbis-rfc4582bis-1
+*/
+
+typedef struct tdav_session_bfcp_s
+{
+ TMEDIA_DECLARE_SESSION_BFCP;
+
+ struct tbfcp_session_s* p_bfcp_s;
+ struct tbfcp_pkt_s* p_pkt_FloorRequest;
+ struct tbfcp_pkt_s* p_pkt_FloorRelease;
+ struct tbfcp_pkt_s* p_pkt_Hello;
+
+ tsk_bool_t b_started;
+ tsk_bool_t b_use_ipv6;
+ tsk_bool_t b_revoked_handled;
+ tsk_bool_t b_conf_idf_changed;
+ tsk_bool_t b_stop_to_reconf;
+
+ char* p_local_ip;
+ //uint16_t local_port;
+
+ /* NAT Traversal context */
+ struct tnet_nat_ctx_s* p_natt_ctx;
+
+ char* p_remote_ip;
+ uint16_t u_remote_port;
+
+ // https://tools.ietf.org/html/rfc4583 attributes
+ struct {
+ char* confid;
+ char* floorid;
+ char* mstrm;
+ char* userid;
+ } rfc4583;
+}
+tdav_session_bfcp_t;
+
+static int _tdav_session_bfcp_notif(const struct tbfcp_session_event_xs *e);
+static int _tdav_session_bfcp_send_Hello(tdav_session_bfcp_t* p_bfcp);
+
+/* ============ Plugin interface ================= */
+
+static int _tdav_session_bfcp_set(tmedia_session_t* p_self, const tmedia_param_t* param)
+{
+ int ret = 0;
+ tdav_session_bfcp_t* p_bfcp;
+
+ if (!p_self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_DEBUG_INFO("tdav_session_bfcp_set");
+
+ p_bfcp = (tdav_session_bfcp_t*)p_self;
+
+ if (param->value_type == tmedia_pvt_pchar) {
+ if (tsk_striequals(param->key, "remote-ip")) {
+ // only if no ip associated to the "m=" line
+ if (param->value && !p_bfcp->p_remote_ip) {
+ p_bfcp->p_remote_ip = tsk_strdup(param->value);
+ }
+ }
+ else if (tsk_striequals(param->key, "local-ip")) {
+ tsk_strupdate(&p_bfcp->p_local_ip, param->value);
+ }
+ else if (tsk_striequals(param->key, "local-ipver")) {
+ p_bfcp->b_use_ipv6 = tsk_striequals(param->value, "ipv6");
+ }
+ }
+ else if (param->value_type == tmedia_pvt_pobject) {
+ if (tsk_striequals(param->key, "natt-ctx")) {
+ TSK_OBJECT_SAFE_FREE(p_bfcp->p_natt_ctx);
+ p_bfcp->p_natt_ctx = tsk_object_ref(param->value);
+ }
+ }
+ else if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "stop-to-reconf")) {
+ p_bfcp->b_stop_to_reconf = TSK_TO_INT32((uint8_t*)param->value) ? tsk_true : tsk_false;
+ }
+ }
+
+
+ return ret;
+}
+
+static int _tdav_session_bfcp_get(tmedia_session_t* p_self, tmedia_param_t* param)
+{
+ int ret = 0;
+ tdav_session_bfcp_t* p_bfcp;
+
+ if (!p_self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_DEBUG_INFO("_tdav_session_bfcp_get");
+
+ p_bfcp = (tdav_session_bfcp_t*)p_self;
+
+ return ret;
+}
+
+static int _tdav_session_bfcp_prepare(tmedia_session_t* p_self)
+{
+ int ret = 0;
+ tdav_session_bfcp_t* p_bfcp;
+
+ if (!p_self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_DEBUG_INFO("_tdav_session_bfcp_prepare");
+
+ p_bfcp = (tdav_session_bfcp_t*)p_self;
+
+ if (!p_bfcp->p_bfcp_s) {
+ enum tnet_socket_type_e e_socket_type = kBfcpTransportDefault;
+ if ((ret = tbfcp_session_create(e_socket_type, p_bfcp->p_local_ip, &p_bfcp->p_bfcp_s))) {
+ return ret;
+ }
+ }
+ if ((ret = tbfcp_session_set_natt_ctx(p_bfcp->p_bfcp_s, p_bfcp->p_natt_ctx))) {
+ return ret;
+ }
+ if ((ret = tbfcp_session_prepare(p_bfcp->p_bfcp_s))) {
+ return ret;
+ }
+ if ((ret = tbfcp_session_set_callback(p_bfcp->p_bfcp_s, _tdav_session_bfcp_notif, p_bfcp))) {
+ return ret;
+ }
+ TSK_OBJECT_SAFE_FREE(p_bfcp->p_pkt_Hello);
+ TSK_OBJECT_SAFE_FREE(p_bfcp->p_pkt_FloorRequest);
+ TSK_OBJECT_SAFE_FREE(p_bfcp->p_pkt_FloorRelease);
+ p_bfcp->b_revoked_handled = tsk_false;
+
+ return ret;
+}
+
+static int _tdav_session_bfcp_start(tmedia_session_t* p_self)
+{
+ int ret = 0;
+ tdav_session_bfcp_t* p_bfcp;
+
+ if (!p_self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_DEBUG_INFO("_tdav_session_bfcp_start");
+
+ p_bfcp = (tdav_session_bfcp_t*)p_self;
+
+ if ((ret = tbfcp_session_set_remote_address(p_bfcp->p_bfcp_s, p_bfcp->p_remote_ip, p_bfcp->u_remote_port))) {
+ return ret;
+ }
+ if ((ret = tbfcp_session_start(p_bfcp->p_bfcp_s))) {
+ return ret;
+ }
+ if ((ret = _tdav_session_bfcp_send_Hello(p_bfcp))) {
+ return ret;
+ }
+
+ p_bfcp->b_started = tsk_true;
+
+ return ret;
+}
+
+static int _tdav_session_bfcp_pause(tmedia_session_t* p_self)
+{
+ int ret = 0;
+ tdav_session_bfcp_t* p_bfcp;
+
+ if (!p_self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_DEBUG_INFO("_tdav_session_bfcp_pause");
+
+ p_bfcp = (tdav_session_bfcp_t*)p_self;
+
+ if (p_bfcp->p_bfcp_s && (ret = tbfcp_session_pause(p_bfcp->p_bfcp_s))) {
+ return ret;
+ }
+
+ return ret;
+}
+
+static int _tdav_session_bfcp_stop(tmedia_session_t* p_self)
+{
+ int ret = 0;
+ tdav_session_bfcp_t* p_bfcp;
+
+ if (!p_self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_DEBUG_INFO("_tdav_session_bfcp_stop");
+
+ p_bfcp = (tdav_session_bfcp_t*)p_self;
+
+ if (!p_bfcp->b_stop_to_reconf) { // If stop-to-reconf then do not release the FloorRequest but reuse it
+ if (p_bfcp->b_started) {
+ /*if (p_bfcp->p_bfcp_s)*/ {
+ /*if (!p_bfcp->p_pkt_FloorRelease) {
+ ret = tbfcp_session_create_pkt_FloorRelease(p_bfcp->p_bfcp_s, &p_bfcp->p_pkt_FloorRelease);
+ }
+ if (ret == 0 && p_bfcp->p_pkt_FloorRelease && (ret = tbfcp_session_send_pkt(p_bfcp->p_bfcp_s, p_bfcp->p_pkt_FloorRelease))) {
+ //!\ do not exit
+ }*/
+ }
+ }
+ tsk_strupdate(&p_bfcp->rfc4583.confid, "");
+ }
+
+ if (p_bfcp->p_bfcp_s) {
+ ret = tbfcp_session_stop(p_bfcp->p_bfcp_s);
+ }
+
+ p_bfcp->b_started = tsk_false;
+ p_bfcp->b_stop_to_reconf = tsk_false; // reset
+
+ return ret;
+}
+
+static const tsdp_header_M_t* _tdav_session_bfcp_get_lo(tmedia_session_t* p_self)
+{
+ tdav_session_bfcp_t* p_bfcp;
+ tsk_bool_t b_changed = tsk_false;
+ const char *pc_local_ip, *pc_local_profile, *pc_local_role, *pc_local_setup;
+ tnet_port_t u_local_port;
+ enum tbfcp_role_e e_local_role;
+ enum tbfcp_setup_e e_local_setup;
+ int ret;
+
+ TSK_DEBUG_INFO("_tdav_session_bfcp_get_lo");
+
+ if (!p_self || !p_self->plugin) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+
+ p_bfcp = (tdav_session_bfcp_t*)p_self;
+
+ b_changed = (p_self->ro_changed || !p_self->M.lo);
+
+ if (!b_changed) {
+ TSK_DEBUG_INFO("No changes to the BFCP session...skip SDP update");
+ return p_self->M.lo;
+ }
+
+ if (b_changed && p_self->M.lo) {
+ static const char* __fields[] = { "floorctrl", "setup", "connection", "curr", "des", "conf" };
+ // remove media-level attributes
+ tsdp_header_A_removeAll_by_fields(p_self->M.lo->Attributes, __fields, sizeof(__fields)/sizeof(__fields[0]));
+ // Codec list never change and FMTs always a single star (*) value. Radvision TelePresence System reject a BFCP session whithout the single FMT (*)
+ // The Codecs and formats are never rebuilt which means we must not clear them
+#if 0
+ tsk_list_clear_items(p_self->M.lo->FMTs);
+#endif
+ }
+
+ // get local address
+ if ((ret = tbfcp_session_get_local_address(p_bfcp->p_bfcp_s, &pc_local_ip, &u_local_port))) {
+ TSK_DEBUG_ERROR("Failed to get local address from BFCP session");
+ return tsk_null;
+ }
+ // get local profile
+ if ((ret = tbfcp_session_get_profile(p_bfcp->p_bfcp_s, &pc_local_profile))) {
+ TSK_DEBUG_ERROR("Failed to get local profile from BFCP session");
+ return tsk_null;
+ }
+ // get local role
+ if ((ret = tbfcp_session_get_local_role(p_bfcp->p_bfcp_s, &e_local_role))) {
+ TSK_DEBUG_ERROR("Failed to get local role from BFCP session");
+ return tsk_null;
+ }
+ if ((ret = tbfcp_utils_get_role(e_local_role, &pc_local_role))) {
+ return tsk_null;
+ }
+ // get local setup
+ if ((ret = tbfcp_session_get_local_setup(p_bfcp->p_bfcp_s, &e_local_setup))) {
+ TSK_DEBUG_ERROR("Failed to get local setup from BFCP session");
+ return tsk_null;
+ }
+ if ((ret = tbfcp_utils_get_setup(e_local_role, &pc_local_setup))) {
+ return tsk_null;
+ }
+
+ if (!p_self->M.lo){
+ if (!(p_self->M.lo = tsdp_header_M_create(p_self->plugin->media, u_local_port, pc_local_profile))) {
+ TSK_DEBUG_ERROR("Failed to create BFCP SDP media header");
+ return tsk_null;
+ }
+ tsdp_header_M_add_headers(p_self->M.lo,
+ TSDP_FMT_VA_ARGS("*"),
+ TSDP_HEADER_A_VA_ARGS("connection", "new"),
+ TSDP_HEADER_A_VA_ARGS("lib", "tinyBFCP"),
+ tsk_null);
+ // If NATT is active, do not rely on the global IP address Connection line
+ if (p_bfcp->p_natt_ctx) {
+ tsdp_header_M_add_headers(p_self->M.lo,
+ TSDP_HEADER_C_VA_ARGS("IN", p_bfcp->b_use_ipv6 ? "IP6" : "IP4", pc_local_ip),
+ tsk_null);
+ }
+ }
+ else {
+ p_self->M.lo->port = u_local_port;
+ tsk_strupdate(&p_self->M.lo->proto, pc_local_profile);
+ }
+
+ // add "floorctrl" and "setup" attributes
+ tsdp_header_M_add_headers(p_self->M.lo,
+ TSDP_HEADER_A_VA_ARGS("connection", "new"),
+ TSDP_HEADER_A_VA_ARGS("floorctrl", pc_local_role),
+ TSDP_HEADER_A_VA_ARGS("setup", pc_local_setup),
+ tsk_null);
+
+ return p_self->M.lo;
+}
+
+static int _tdav_session_bfcp_set_ro(tmedia_session_t* p_self, const tsdp_header_M_t* m)
+{
+ int ret = 0;
+ const tsdp_header_A_t* A;
+ tdav_session_bfcp_t* p_bfcp;
+ enum tbfcp_role_e e_remote_role = tbfcp_role_c_s;
+ uint32_t u_remote_conf_id = 0xFFFF;
+ uint16_t u_remote_user_id = 0xFFFF, u_remote_floor_id = 0xFFFF;
+
+ if (!p_self || !m) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_DEBUG_INFO("_tdav_session_bfcp_set_ro");
+
+ p_bfcp = (tdav_session_bfcp_t*)p_self;
+
+ /* update remote offer */
+ TSK_OBJECT_SAFE_FREE(p_self->M.ro);
+ p_self->M.ro = tsk_object_ref(TSK_OBJECT(m));
+
+
+ // https://tools.ietf.org/html/rfc4583
+ {
+ p_bfcp->b_conf_idf_changed = tsk_false;
+ if ((A = tsdp_header_M_findA(m, "floorctrl"))) {
+ if ((ret = tbfcp_utils_parse_role(A->value, &e_remote_role))) {
+ return ret;
+ }
+ }
+ if ((A = tsdp_header_M_findA(m, "confid"))) {
+ p_bfcp->b_conf_idf_changed |= !tsk_striequals(p_bfcp->rfc4583.confid, A->value);
+ tsk_strupdate(&p_bfcp->rfc4583.confid, A->value);
+ u_remote_conf_id = (uint32_t)tsk_atoi64(p_bfcp->rfc4583.confid);
+ }
+ if ((A = tsdp_header_M_findA(m, "userid"))) {
+ p_bfcp->b_conf_idf_changed |= !tsk_striequals(p_bfcp->rfc4583.userid, A->value);
+ tsk_strupdate(&p_bfcp->rfc4583.userid, A->value);
+ u_remote_user_id = (uint16_t)tsk_atoi64(p_bfcp->rfc4583.userid);
+ }
+ if ((A = tsdp_header_M_findA(m, "floorid"))) {
+ char tmp_str[256];
+ if (sscanf(A->value, "%255s %*s", tmp_str) != EOF) {
+ char *pch, *saveptr;
+ p_bfcp->b_conf_idf_changed |= !tsk_striequals(p_bfcp->rfc4583.floorid, tmp_str);
+ tsk_strupdate(&p_bfcp->rfc4583.floorid, tmp_str);
+ u_remote_floor_id = (uint16_t)tsk_atoi64(p_bfcp->rfc4583.floorid);
+ pch = tsk_strtok_r(&A->value[tsk_strlen(tmp_str) + 1], " ", &saveptr);
+ while (pch) {
+ if (sscanf(pch, "mstrm: %255s", tmp_str) != EOF) {
+ p_bfcp->b_conf_idf_changed |= !tsk_striequals(p_bfcp->rfc4583.mstrm, tmp_str);
+ tsk_strupdate(&p_bfcp->rfc4583.mstrm, tmp_str);
+ break;
+ }
+ pch = tsk_strtok_r(tsk_null, " ", &saveptr);
+ }
+ }
+ }
+ // set remote role
+ if ((ret = tbfcp_session_set_remote_role(p_bfcp->p_bfcp_s, e_remote_role))) {
+ return ret;
+ }
+ if ((e_remote_role & tbfcp_role_s_only)) {
+ // local = client
+ if ((ret = tbfcp_session_set_conf_ids(p_bfcp->p_bfcp_s, u_remote_conf_id, u_remote_user_id, u_remote_floor_id))) {
+ return ret;
+ }
+ }
+ else {
+ // local = remote: Not supported yet and will never happen
+ }
+ }//end-of-rfc4583
+
+ /* get connection associated to this media line
+ * If the connnection is global, then the manager will call tdav_session_audio_set() */
+ if (m->C && m->C->addr) {
+ tsk_strupdate(&p_bfcp->p_remote_ip, m->C->addr);
+ p_bfcp->b_use_ipv6 = tsk_striequals(m->C->addrtype, "IP6");
+ }
+ /* set remote port */
+ p_bfcp->u_remote_port = m->port;
+
+ return ret;
+}
+
+static int _tdav_session_bfcp_send_Hello(tdav_session_bfcp_t* p_bfcp)
+{
+ int ret = 0;
+ if (!p_bfcp) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (!p_bfcp->p_pkt_Hello && (ret = tbfcp_session_create_pkt_Hello(p_bfcp->p_bfcp_s, &p_bfcp->p_pkt_Hello))) {
+ return ret;
+ }
+ if ((ret = tbfcp_session_send_pkt(p_bfcp->p_bfcp_s, p_bfcp->p_pkt_Hello))) {
+ return ret;
+ }
+ return ret;
+}
+
+static int _tdav_session_bfcp_notif(const struct tbfcp_session_event_xs *e)
+{
+ tdav_session_bfcp_t* p_bfcp = tsk_object_ref(TSK_OBJECT(e->pc_usr_data));
+ int ret = 0;
+ static const char* kErrTextGlobalError = "Global error";
+ static const int kErrCodeGlobalError = -56;
+ static const char* kErrTextTimeout = "Timeout";
+ static const int kErrCodeTimeout = -57;
+ static const char* kErrTextUnExpectedIncomingMsg = "Unexpected incoming BFCP message";
+ static const int kErrCodeUnExpectedIncomingMsg = -58;
+ static const char* kErrTextBadRequest = "Bad Request";
+ static const int kErrCodeBadRequest = -59;
+ static const char* kInfoTextFloorReqStatus = "FloorRequestStatus";
+
+#define _RAISE_ERR_AND_GOTO_BAIL(_code, _reason) \
+ if (TMEDIA_SESSION(p_bfcp)->bfcp_cb.fun) { \
+ tmedia_session_bfcp_evt_xt e; \
+ e.type = tmedia_session_bfcp_evt_type_err; e.err.code = _code; e.reason = _reason; \
+ TMEDIA_SESSION(p_bfcp)->bfcp_cb.fun(TMEDIA_SESSION(p_bfcp)->bfcp_cb.usrdata, TMEDIA_SESSION(p_bfcp), &e); \
+ } \
+ ret = _code; goto bail;
+#define _RAISE_FLREQ(_status, _reason) \
+ if (TMEDIA_SESSION(p_bfcp)->bfcp_cb.fun) { \
+ tmedia_session_bfcp_evt_xt e; \
+ e.type = tmedia_session_bfcp_evt_type_flreq_status; e.flreq.status = _status; e.reason = _reason; \
+ TMEDIA_SESSION(p_bfcp)->bfcp_cb.fun(TMEDIA_SESSION(p_bfcp)->bfcp_cb.usrdata, TMEDIA_SESSION(p_bfcp), &e); \
+ } \
+
+ switch (e->e_type) {
+ case tbfcp_session_event_type_inf_inc_msg:
+ {
+ if (p_bfcp->p_pkt_Hello && p_bfcp->p_pkt_Hello->hdr.transac_id == e->pc_pkt->hdr.transac_id && p_bfcp->p_pkt_Hello->hdr.user_id == e->pc_pkt->hdr.user_id && p_bfcp->p_pkt_Hello->hdr.conf_id == e->pc_pkt->hdr.conf_id) {
+ TSK_OBJECT_SAFE_FREE(p_bfcp->p_pkt_Hello);
+ if (e->pc_pkt->hdr.primitive == tbfcp_primitive_HelloAck) {
+ if (!p_bfcp->p_pkt_FloorRequest) {
+ if (p_bfcp->b_conf_idf_changed || 0) {
+ // Create the "FloorRelease" for this "FloorRequest"
+ TSK_OBJECT_SAFE_FREE(p_bfcp->p_pkt_FloorRelease);
+ if ((ret = tbfcp_session_create_pkt_FloorRelease(p_bfcp->p_bfcp_s, &p_bfcp->p_pkt_FloorRelease))) {
+ goto raise_err;
+ }
+ if ((ret = tbfcp_session_create_pkt_FloorRequest(p_bfcp->p_bfcp_s, &p_bfcp->p_pkt_FloorRequest))) {
+ goto raise_err;
+ }
+ if ((ret = tbfcp_session_send_pkt(p_bfcp->p_bfcp_s, p_bfcp->p_pkt_FloorRequest))) {
+ goto raise_err;
+ }
+ }
+ else {
+ TSK_DEBUG_INFO("No change to BFCP session... do not send FloorRequest");
+ }
+ }
+ }
+ else {
+ TSK_DEBUG_ERROR("%s", kErrTextUnExpectedIncomingMsg);
+ _RAISE_ERR_AND_GOTO_BAIL(kErrCodeUnExpectedIncomingMsg, kErrTextUnExpectedIncomingMsg);
+ }
+ }
+ else if(p_bfcp->p_pkt_FloorRequest /*&& p_bfcp->p_pkt_FloorRequest->hdr.transac_id == e->pc_pkt->hdr.transac_id*/ && p_bfcp->p_pkt_FloorRequest->hdr.user_id == e->pc_pkt->hdr.user_id && p_bfcp->p_pkt_FloorRequest->hdr.conf_id == e->pc_pkt->hdr.conf_id) {
+ tsk_bool_t transac_id_matched = (p_bfcp->p_pkt_FloorRequest->hdr.transac_id == e->pc_pkt->hdr.transac_id);
+ if (e->pc_pkt->hdr.primitive == tbfcp_primitive_FloorRequestStatus || e->pc_pkt->hdr.primitive == tbfcp_primitive_FloorStatus) {
+ tsk_size_t u_index0, u_index1, u_index2, u_index3;
+ const tbfcp_attr_grouped_t *pc_attr_FloorRequestInformation = tsk_null,
+ *pc_attr_FloorRequestStatus = tsk_null,
+ *pc_attr_OverallRequestStatus = tsk_null;
+ const tbfcp_attr_octetstring16_t *pc_attr_RequestStatus = tsk_null;
+
+ u_index0 = 0;
+ // Find "FloorRequestInformation"
+ while ((ret = tbfcp_pkt_attr_find_at(e->pc_pkt, tbfcp_attribute_format_Grouped, u_index0++, (const tbfcp_attr_t **)&pc_attr_FloorRequestInformation)) == 0 && pc_attr_FloorRequestInformation) {
+ if (TBFCP_ATTR(pc_attr_FloorRequestInformation)->hdr.type != tbfcp_attribute_type_FLOOR_REQUEST_INFORMATION) {
+ continue;
+ }
+ // Find "FloorRequestStatus"
+ u_index1 = 0;
+ while ((ret = tbfcp_attr_grouped_find_at(pc_attr_FloorRequestInformation, tbfcp_attribute_format_Grouped, u_index1++, (const tbfcp_attr_t **)&pc_attr_FloorRequestStatus)) == 0 && pc_attr_FloorRequestStatus) {
+ if (TBFCP_ATTR(pc_attr_FloorRequestStatus)->hdr.type != tbfcp_attribute_type_FLOOR_REQUEST_STATUS) {
+ continue;
+ }
+ if (pc_attr_FloorRequestStatus->extra_hdr.FloorID != atoi(p_bfcp->rfc4583.floorid)) {
+ continue;
+ }
+ break;
+ }
+ if (!pc_attr_FloorRequestStatus) {
+ continue;
+ }
+ // Find "OverallRequestStatus"
+ u_index2 = 0;
+ while ((ret = tbfcp_attr_grouped_find_at(pc_attr_FloorRequestInformation, tbfcp_attribute_format_Grouped, u_index2++, (const tbfcp_attr_t **)&pc_attr_OverallRequestStatus)) == 0 && pc_attr_OverallRequestStatus) {
+ if (TBFCP_ATTR(pc_attr_OverallRequestStatus)->hdr.type != tbfcp_attribute_type_OVERALL_REQUEST_STATUS) {
+ continue;
+ }
+
+ // Find "RequestStatus"
+ u_index3 = 0;
+ while ((ret = tbfcp_attr_grouped_find_at(pc_attr_OverallRequestStatus, tbfcp_attribute_format_OctetString16, u_index3++, (const tbfcp_attr_t **)&pc_attr_RequestStatus)) == 0 && pc_attr_RequestStatus) {
+ if (TBFCP_ATTR(pc_attr_RequestStatus)->hdr.type != tbfcp_attribute_type_REQUEST_STATUS) {
+ continue;
+ }
+ break;
+ }
+ }
+ if (pc_attr_RequestStatus) {
+ break;
+ }
+ }
+
+ if (pc_attr_RequestStatus) {
+ // https://tools.ietf.org/html/rfc4582#section-5.2.5
+ uint16_t u_status = pc_attr_RequestStatus->OctetString16[0] + (pc_attr_RequestStatus->OctetString16[1] << 8);
+ if (transac_id_matched) {
+ if (u_status == tbfcp_reqstatus_Revoked && !p_bfcp->b_revoked_handled) { // revoked
+ TSK_OBJECT_SAFE_FREE(p_bfcp->p_pkt_FloorRequest); // free the FloorRequest and ask new one once HelloAck is received
+ // Radvision sends a Revoke after a reINVITE to ask for new negotiation.
+ if (p_bfcp->p_pkt_FloorRelease) {
+ if ((ret = tbfcp_session_send_pkt(p_bfcp->p_bfcp_s, p_bfcp->p_pkt_FloorRelease))) {
+ goto raise_err;
+ }
+ }
+ if ((ret = _tdav_session_bfcp_send_Hello(p_bfcp))) {
+ goto raise_err;
+ }
+ p_bfcp->b_revoked_handled = tsk_true;
+ }
+ else {
+ _RAISE_FLREQ(u_status, kInfoTextFloorReqStatus);
+ }
+ }
+ else { //!transac_id_matched
+ // Status from old FloorRequest
+ tbfcp_pkt_t* p_pkt = tsk_null;
+ TSK_DEBUG_INFO("Status from old Request");
+ if (u_status == tbfcp_reqstatus_Pending || u_status == tbfcp_reqstatus_Accepted || u_status == tbfcp_reqstatus_Granted) {
+ if ((ret = tbfcp_pkt_create_FloorRelease_2(e->pc_pkt->hdr.conf_id, e->pc_pkt->hdr.transac_id, e->pc_pkt->hdr.user_id, pc_attr_FloorRequestStatus->extra_hdr.FloorID, &p_pkt))) {
+ goto raise_err;
+ }
+ ret = tbfcp_session_send_pkt(p_bfcp->p_bfcp_s, p_pkt);
+ TSK_OBJECT_SAFE_FREE(p_pkt);
+ if (ret) {
+ goto raise_err;
+ }
+ }
+ }
+ }
+ else {
+ /* /!\ No RequestStatus attribute in FloorRequestStatus */
+ TSK_OBJECT_SAFE_FREE(p_bfcp->p_pkt_FloorRequest);
+ TSK_DEBUG_ERROR("%s", kErrTextBadRequest);
+ _RAISE_ERR_AND_GOTO_BAIL(kErrCodeBadRequest, kErrTextBadRequest);
+ }
+ }
+ else {
+ switch (e->pc_pkt->hdr.primitive) {
+ case tbfcp_primitive_Hello: break; // already handled in "_tbfcp_session_process_incoming_pkt()"
+ default:
+ {
+ TSK_DEBUG_ERROR("%s", kErrTextUnExpectedIncomingMsg);
+ _RAISE_ERR_AND_GOTO_BAIL(kErrCodeUnExpectedIncomingMsg, kErrTextUnExpectedIncomingMsg);
+ break;
+ }
+ }
+ }
+ }
+ break;
+ }
+ case tbfcp_session_event_type_err_send_timedout:
+ {
+ /* /!\ Sending BFCP message timedout */
+ TSK_DEBUG_ERROR("%s", kErrTextTimeout);
+ _RAISE_ERR_AND_GOTO_BAIL(kErrCodeTimeout, kErrTextTimeout);
+ break;
+ }
+ }
+raise_err:
+ if (ret) {
+ TSK_DEBUG_ERROR("%s", kErrTextGlobalError);
+ _RAISE_ERR_AND_GOTO_BAIL(kErrCodeGlobalError, kErrTextGlobalError);
+ }
+bail:
+
+ TSK_OBJECT_SAFE_FREE(p_bfcp);
+ return ret;
+}
+
+
+/* ============ Public functions ================= */
+
+
+
+
+
+//=================================================================================================
+// Session MSRp Plugin object definition
+//
+/* constructor */
+static tsk_object_t* _tdav_session_bfcp_ctor(tsk_object_t * p_self, va_list * app)
+{
+ tdav_session_bfcp_t *p_session = (tdav_session_bfcp_t *)p_self;
+ if (p_session) {
+ /* init base: called by tmedia_session_create() */
+ /* init self */
+ // TMEDIA_SESSION_BFCP(session)->send_file = tdav_session_bfcp_send_file;
+ // TMEDIA_SESSION_BFCP(session)->send_message = tdav_session_bfcp_send_message;
+
+ // session->config = tbfcp_config_create();
+ // session->setup = bfcp_setup_actpass;
+ // session->dir = tdav_bfcp_dir_none;
+ }
+ return p_self;
+}
+/* destructor */
+static tsk_object_t* _tdav_session_bfcp_dtor(tsk_object_t * p_self)
+{
+ tdav_session_bfcp_t *p_session = (tdav_session_bfcp_t *)p_self;
+ if (p_session) {
+ /* deinit self */
+
+ TSK_OBJECT_SAFE_FREE(p_session->p_bfcp_s);
+ TSK_OBJECT_SAFE_FREE(p_session->p_pkt_FloorRequest);
+ TSK_OBJECT_SAFE_FREE(p_session->p_pkt_FloorRelease);
+ TSK_OBJECT_SAFE_FREE(p_session->p_pkt_Hello);
+
+ TSK_FREE(p_session->p_local_ip);
+ TSK_FREE(p_session->p_remote_ip);
+
+ /* rfc4583 */
+ TSK_FREE(p_session->rfc4583.confid);
+ TSK_FREE(p_session->rfc4583.floorid);
+ TSK_FREE(p_session->rfc4583.mstrm);
+ TSK_FREE(p_session->rfc4583.userid);
+
+ /* NAT Traversal context */
+ TSK_OBJECT_SAFE_FREE(p_session->p_natt_ctx);
+
+ /* deinit base */
+ tmedia_session_deinit(p_self);
+
+ TSK_DEBUG_INFO("*** tdav_session_bfcp_t destroyed ***");
+ }
+
+ return p_self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_session_bfcp_def_s =
+{
+ sizeof(tdav_session_bfcp_t),
+ _tdav_session_bfcp_ctor,
+ _tdav_session_bfcp_dtor,
+ tmedia_session_cmp,
+};
+/* plugin definition*/
+static const tmedia_session_plugin_def_t tdav_session_bfcp_plugin_def_s =
+{
+ &tdav_session_bfcp_def_s,
+
+ tmedia_bfcp,
+ "application",
+
+ _tdav_session_bfcp_set,
+ _tdav_session_bfcp_get,
+ _tdav_session_bfcp_prepare,
+ _tdav_session_bfcp_start,
+ _tdav_session_bfcp_pause,
+ _tdav_session_bfcp_stop,
+
+ /* Audio part */
+ { tsk_null },
+
+ _tdav_session_bfcp_get_lo,
+ _tdav_session_bfcp_set_ro
+};
+const tmedia_session_plugin_def_t *tdav_session_bfcp_plugin_def_t = &tdav_session_bfcp_plugin_def_s;
+
+#endif /* !defined(HAVE_TINYBFCP) || HAVE_TINYBFCP */ \ No newline at end of file
diff --git a/tinyDAV/src/codecs/amr/tdav_codec_amr.c b/tinyDAV/src/codecs/amr/tdav_codec_amr.c
new file mode 100644
index 0000000..9304f85
--- /dev/null
+++ b/tinyDAV/src/codecs/amr/tdav_codec_amr.c
@@ -0,0 +1,816 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_amr.c
+ * @brief AMR-NB and AMR-WB codecs.
+ * RTP payloader/depayloader are based on RFC 4867
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/amr/tdav_codec_amr.h"
+
+#include "tsk_params.h"
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <stdlib.h> /* atoi() */
+
+#if HAVE_OPENCORE_AMR
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "..\\thirdparties\\win32\\lib\\opencore\\libopencore-amrnb.a")
+#endif
+
+#define NO_DATA 15
+#define DEFAULT_ENC_MODE ((enum Mode)MR122) /* Higher, could be changed by remote party by using CMR */
+
+/* From WmfDecBytesPerFrame in dec_input_format_tab.cpp */
+static const int tdav_codec_amr_nb_sizes[] = { 12, 13, 15, 17, 19, 20, 26, 31, 5, 6, 5, 5, 0, 0, 0, 0 };
+/* From pvamrwbdecoder_api.h, by dividing by 8 and rounding up */
+static const int tdav_codec_amr_wb_sizes[] = { 17, 23, 32, 36, 40, 46, 50, 58, 60, 5, -1, -1, -1, -1, -1, -1 };
+
+/* ============ Common ================= */
+static int tdav_codec_amr_init(tdav_codec_amr_t* self, tdav_codec_amr_type_t type, tdav_codec_amr_mode_t mode);
+static int tdav_codec_amr_deinit(tdav_codec_amr_t* self);
+static tdav_codec_amr_mode_t tdav_codec_amr_get_mode(const char* fmtp);
+static int tdav_codec_amr_parse_fmtp(tdav_codec_amr_t* self, const char* fmtp);
+static tsk_size_t tdav_codec_amr_oa_decode(tdav_codec_amr_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr);
+static tsk_size_t tdav_codec_amr_be_decode(tdav_codec_amr_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr);
+static tsk_size_t tdav_codec_amr_be_encode(tdav_codec_amr_t* amr, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size);
+static tsk_size_t tdav_codec_amr_oa_encode(tdav_codec_amr_t* amr, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size);
+static uint8_t tdav_codec_amr_bitbuffer_read(const void* bits, tsk_size_t size, tsk_size_t start, tsk_size_t count);
+
+/* ============ AMR-NB Plugin interface =================
+ The AMR codec was originally developed and standardized by the
+ European Telecommunications Standards Institute (ETSI) for GSM
+ cellular systems. It is now chosen by the Third Generation
+ Partnership Project (3GPP) as the mandatory codec for third
+ generation (3G) cellular systems [1].
+
+ The AMR codec is a multi-mode codec that supports eight narrow band
+ speech encoding modes with bit rates between 4.75 and 12.2 kbps. The
+ sampling frequency used in AMR is 8000 Hz and the speech encoding is
+ performed on 20 ms speech frames. Therefore, each encoded AMR speech
+ frame represents 160 samples of the original speech.
+
+ Among the eight AMR encoding modes, three are already separately
+ adopted as standards of their own. Particularly, the 6.7 kbps mode
+ is adopted as PDC-EFR [18], the 7.4 kbps mode as IS-641 codec in TDMA
+ [17], and the 12.2 kbps mode as GSM-EFR [16].
+*/
+
+int tdav_codec_amrnb_open(tmedia_codec_t* self)
+{
+ tdav_codec_amr_t* amrnb = (tdav_codec_amr_t*)self;
+
+ if(!TDAV_CODEC_AMR(amrnb)->encoder){
+ if(!(TDAV_CODEC_AMR(amrnb)->encoder = Encoder_Interface_init(0))){
+ TSK_DEBUG_ERROR("Failed to initialize AMR-NB encoder");
+ return -2;
+ }
+ }
+
+ if(!TDAV_CODEC_AMR(amrnb)->decoder){
+ if(!(TDAV_CODEC_AMR(amrnb)->decoder = Decoder_Interface_init())){
+ TSK_DEBUG_ERROR("Failed to initialize AMR-NB encoder");
+ return -2;
+ }
+ }
+
+ return 0;
+}
+
+int tdav_codec_amrnb_close(tmedia_codec_t* self)
+{
+ tdav_codec_amr_t* amrnb = (tdav_codec_amr_t*)self;
+
+ if(TDAV_CODEC_AMR(amrnb)->encoder){
+ Encoder_Interface_exit(TDAV_CODEC_AMR(amrnb)->encoder);
+ TDAV_CODEC_AMR(amrnb)->encoder = tsk_null;
+ }
+
+ if(TDAV_CODEC_AMR(amrnb)->decoder){
+ Decoder_Interface_exit(TDAV_CODEC_AMR(amrnb)->decoder);
+ TDAV_CODEC_AMR(amrnb)->decoder = tsk_null;
+ }
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_amrnb_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tdav_codec_amr_t* amr = (tdav_codec_amr_t*)self;
+
+ switch(amr->mode){
+ case tdav_codec_amr_mode_be:
+ return tdav_codec_amr_be_encode(amr, in_data, in_size, out_data, out_max_size);
+ default:
+ return tdav_codec_amr_oa_encode(amr, in_data, in_size, out_data, out_max_size);
+ }
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_amrnb_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tdav_codec_amr_t* amr = (tdav_codec_amr_t*)self;
+
+ switch(amr->mode){
+ case tdav_codec_amr_mode_be:
+ return tdav_codec_amr_be_decode(amr, in_data, in_size, out_data, out_max_size, proto_hdr);
+ default:
+ return tdav_codec_amr_oa_decode(amr, in_data, in_size, out_data, out_max_size, proto_hdr);
+ }
+}
+
+char* tdav_codec_amrnb_sdp_att_get(const tmedia_codec_t* codec, const char* att_name)
+{
+ const tdav_codec_amr_t* amr = (const tdav_codec_amr_t*)codec;
+
+ /* We support all modes, all ... */
+ if(amr){
+ switch(amr->mode){
+ case tdav_codec_amr_mode_be:
+ return tsk_strdup("octet-align=0");
+ default:
+ return tsk_strdup("octet-align=1");
+ }
+ }
+ return tsk_null;
+}
+
+tsk_bool_t tdav_codec_amrnb_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ tdav_codec_amr_t* amr;
+ if(!(amr = (tdav_codec_amr_t*)codec) || !att_name){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_false;
+ }
+
+ if(amr && tsk_striequals(att_name, "fmtp")){
+ /* Match mode */
+ if(tdav_codec_amr_get_mode(att_value) != amr->mode){
+ TSK_DEBUG_INFO("Failed to match [%s]", att_value);
+ return tsk_false;
+ }
+ /* check parameters validity */
+ if(tdav_codec_amr_parse_fmtp(amr, att_value)){
+ TSK_DEBUG_INFO("Failed to match [%s]", att_value);
+ return tsk_false;
+ }
+
+ return tsk_true;
+ }
+ return tsk_false;
+}
+
+
+//
+// AMR-NB OA Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_amrnb_oa_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_amr_t *amrnb_oa = self;
+ if(amrnb_oa){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ tdav_codec_amr_init(TDAV_CODEC_AMR(amrnb_oa), tdav_codec_amr_type_nb, tdav_codec_amr_mode_oa);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_amrnb_oa_dtor(tsk_object_t * self)
+{
+ tdav_codec_amr_t *amrnb_oa = self;
+ if(amrnb_oa){
+ /* deinit base */
+ tmedia_codec_audio_deinit(amrnb_oa);
+ /* deinit self */
+ tdav_codec_amr_deinit(TDAV_CODEC_AMR(amrnb_oa));
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_amrnb_oa_def_s =
+{
+ sizeof(tdav_codec_amr_t),
+ tdav_codec_amrnb_oa_ctor,
+ tdav_codec_amrnb_oa_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_amrnb_oa_plugin_def_s =
+{
+ &tdav_codec_amrnb_oa_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_amr_nb_oa,
+ "AMR",
+ "AMR Narrow Band - Octet Aligned (libopencore-amr)",
+ TMEDIA_CODEC_FORMAT_AMR_NB_OA,
+ tsk_true,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 20 // ptime
+ },
+
+ /* video */
+ {0},
+
+ tsk_null, // set()
+ tdav_codec_amrnb_open,
+ tdav_codec_amrnb_close,
+ tdav_codec_amrnb_encode,
+ tdav_codec_amrnb_decode,
+ tdav_codec_amrnb_sdp_att_match,
+ tdav_codec_amrnb_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_amrnb_oa_plugin_def_t = &tdav_codec_amrnb_oa_plugin_def_s;
+
+//
+// AMR-NB BE Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_amrnb_be_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_amr_t *amrnb_be = self;
+ if(amrnb_be){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ tdav_codec_amr_init(TDAV_CODEC_AMR(amrnb_be), tdav_codec_amr_type_nb, tdav_codec_amr_mode_be);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_amrnb_be_dtor(tsk_object_t * self)
+{
+ tdav_codec_amr_t *amrnb_be = self;
+ if(amrnb_be){
+ /* deinit base */
+ tmedia_codec_audio_deinit(amrnb_be);
+ /* deinit self */
+ tdav_codec_amr_deinit(TDAV_CODEC_AMR(amrnb_be));
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_amrnb_be_def_s =
+{
+ sizeof(tdav_codec_amr_t),
+ tdav_codec_amrnb_be_ctor,
+ tdav_codec_amrnb_be_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_amrnb_be_plugin_def_s =
+{
+ &tdav_codec_amrnb_be_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_amr_nb_be,
+ "AMR",
+ "AMR Narrow Band - Bandwidth-Efficient (libopencore-amr)",
+ TMEDIA_CODEC_FORMAT_AMR_NB_BE,
+ tsk_true,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 0 // ptime @deprecated
+ },
+
+ /* video */
+ {0},
+
+ tsk_null, // set()
+ tdav_codec_amrnb_open,
+ tdav_codec_amrnb_close,
+ tdav_codec_amrnb_encode,
+ tdav_codec_amrnb_decode,
+ tdav_codec_amrnb_sdp_att_match,
+ tdav_codec_amrnb_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_amrnb_be_plugin_def_t = &tdav_codec_amrnb_be_plugin_def_s;
+
+
+
+
+
+
+
+
+
+//
+// Common functions
+//
+
+static int tdav_codec_amr_init(tdav_codec_amr_t* self, tdav_codec_amr_type_t type, tdav_codec_amr_mode_t mode)
+{
+ if(self){
+ self->type = type;
+ self->mode = mode;
+ self->encoder_mode = DEFAULT_ENC_MODE;
+
+ return 0;
+ }
+ else{
+ TSK_DEBUG_ERROR("Invalid Parameter");
+ return -1;
+ }
+}
+
+static int tdav_codec_amr_deinit(tdav_codec_amr_t* self)
+{
+ if(self){
+ switch(self->type){
+ case tdav_codec_amr_type_nb:
+ { /* AMR-NB */
+ if(self->encoder){
+ Encoder_Interface_exit(self->encoder);
+ self->encoder = tsk_null;
+ }
+ if(self->decoder){
+ Decoder_Interface_exit(self->decoder);
+ self->decoder = tsk_null;
+ }
+ break;
+ }
+ case tdav_codec_amr_type_wb:
+ { /* AMR-WB */
+ break;
+ }
+ }
+
+ return 0;
+ }
+ else{
+ TSK_DEBUG_ERROR("Invalid Parameter");
+ return -1;
+ }
+}
+
+static tdav_codec_amr_mode_t tdav_codec_amr_get_mode(const char* fmtp)
+{
+ /* RFC 4867 - 8.1. AMR Media Type Registration
+ octet-align: Permissible values are 0 and 1. If 1, octet-aligned
+ operation SHALL be used. If 0 or if not present, bandwidth-efficient operation is employed.
+ */
+ tdav_codec_amr_mode_t mode = tdav_codec_amr_mode_be;
+ tsk_size_t size = tsk_strlen(fmtp);
+ int start, end;
+
+ if((start = tsk_strindexOf(fmtp, size, "octet-align")) !=-1){
+ tsk_param_t* param;
+ if((end = tsk_strindexOf((fmtp+start), (size-start), ";")) == -1){
+ end = size;
+ }
+ if((param = tsk_params_parse_param((fmtp+start), (end-start)))){
+ if(param->value && tsk_strequals(param->value, "1")){
+ mode = tdav_codec_amr_mode_oa;
+ }
+ TSK_OBJECT_SAFE_FREE(param);
+ }
+ }
+ return mode;
+}
+
+int tdav_codec_amr_parse_fmtp(tdav_codec_amr_t* self, const char* fmtp)
+{
+ int ret = 0;
+ int val_int;
+ const char* val_str;
+ //--tdav_codec_amr_mode_t mode = self->mode;
+ tsk_params_L_t* params = tsk_null;
+
+ if((params = tsk_params_fromstring(fmtp, ";", tsk_true))){
+ /* Do not check "octet-align" => already done by the caller of this function */
+
+ /* === mode-set ===*/
+ if((val_str = tsk_params_get_param_value(params, "mode-set"))){
+ char* modes = tsk_strdup(val_str);
+ char *pch, *saveptr;
+ int mode_int;
+ pch = tsk_strtok_r(modes, ", ", &saveptr);
+ while(pch){
+ mode_int = atoi(pch);
+ self->modes |= 0x0001 << mode_int;
+ pch = tsk_strtok_r(tsk_null, ", ", &saveptr);
+ }
+
+ TSK_FREE(modes);
+ }
+ else{
+ self->modes = 0xFFFF;
+ }
+
+ /* === interleaving ===*/
+ if((val_int = tsk_params_get_param_value_as_int(params, "interleaving")) != -1){
+ TSK_DEBUG_WARN("interleaving not supported");
+ ret = -1; goto bail;
+ }
+ /* === mode-change-period ===*/
+ if((val_int = tsk_params_get_param_value_as_int(params, "mode-change-period")) != -1){
+ if(val_int != 1 && val_int != 2){
+ TSK_DEBUG_ERROR("Invalid [mode-change-period]");
+ ret = -1; goto bail;
+ }
+ self->mcp = (unsigned)val_int;
+ }
+ /* === mode-change-capability ===*/
+ if((val_int = tsk_params_get_param_value_as_int(params, "mode-change-capability")) != -1){
+ if(val_int != 1 && val_int != 2){
+ TSK_DEBUG_ERROR("Invalid [mode-change-capability]");
+ ret = -1; goto bail;
+ }
+ self->mcc = (unsigned)val_int;
+ }
+ /* === mode-change-neighbor ===*/
+ if((val_int = tsk_params_get_param_value_as_int(params, "mode-change-neighbor")) != -1){
+ if(val_int != 0 && val_int != 1){
+ TSK_DEBUG_ERROR("Invalid [mode-change-neighbor]");
+ ret = -1; goto bail;
+ }
+ self->mcn = (unsigned)val_int;
+ }
+ }
+
+bail:
+ TSK_OBJECT_SAFE_FREE(params);
+ return ret;
+}
+
+
+/* RFC 4867 - 4.2. Payload Structure
+ +----------------+-------------------+----------------
+ | payload header | table of contents | speech data ...
+ +----------------+-------------------+----------------
+*/
+/* RFC 4867 - 4.4.2. The Payload Table of Contents and Frame CRCs
+ The table of contents (ToC) consists of a list of ToC entries, each representing a speech frame.
+ +---------------------+
+ | list of ToC entries |
+ +---------------------+
+ | list of frame CRCs | (optional)
+ - - - - - - - - - - -
+ Note, for ToC entries with FT=14 or 15, there will be no
+ corresponding speech frame or frame CRC present in the payload.
+*/
+
+
+static tsk_size_t tdav_codec_amr_be_encode(tdav_codec_amr_t* amr, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tsk_size_t out_size = 0, i;
+ int ret_size;
+ uint8_t ToC;
+ static uint8_t CMR = NO_DATA /* No interleaving */;
+
+ uint8_t outbuf[60 + 1]; /* enought for both NB and WB at ptime=20ms */
+ if(!amr || !in_data || !in_size || !out_data || (amr->mode != tdav_codec_amr_mode_be)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* Encode */
+ if((ret_size = Encoder_Interface_Encode(amr->encoder, amr->encoder_mode, in_data, outbuf, 0)) <= 0){
+ TSK_DEBUG_ERROR("Encoder_Interface_Encode() failed");
+ goto bail;
+ }
+
+
+ /* allocate output buffer */
+ if((int)*out_max_size <ret_size){
+ if(!(*out_data = tsk_realloc(*out_data, ret_size))){
+ *out_max_size = 0;
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ goto bail;
+ }
+ *out_max_size = ret_size;
+ }
+
+ out_size = ret_size;
+
+ /* CMR (4bits) */
+ ((uint8_t*)*out_data)[0] = (CMR<<4);
+ /* ToC (Always ONE Frame, don't need to test for [F]) (6bits)*/
+ ToC = outbuf[0]>>2/*2*[P]*/;
+ ((uint8_t*)*out_data)[0] |= (ToC >> 2/*[Q],[1-FT]*/) & 0xF; /* 4bits */
+ ((uint8_t*)*out_data)[1] = (ToC & 0x3/*[1-FT],[Q]*/)<<6; /* 2bits */
+
+ /* === THERE ARE 2 EXTRA BITS === */
+
+ for(i=1; i<out_size-1; i++){
+ ((uint8_t*)*out_data)[i] |= outbuf[i]>>2;/* 6bits */
+ ((uint8_t*)*out_data)[i+1] = outbuf[i]<<6;/* 2bits */
+ }
+
+bail:
+ return out_size;
+}
+
+tsk_size_t tdav_codec_amr_be_decode(tdav_codec_amr_t* amr, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tsk_size_t out_size = 0, pcm_frame_size = 0, index = 0;
+ const uint8_t* pdata = (const uint8_t*)in_data;
+ //--const uint8_t* pend = (pdata + in_size);
+ uint8_t CMR;
+ int toc_entries = 0, i, k; // ToC entries count
+
+ if(!amr || !in_data || !in_size || !out_data || (amr->mode != tdav_codec_amr_mode_be)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* compute PCM frame size */
+ switch(TDAV_CODEC_AMR(amr)->type){
+ case tdav_codec_amr_type_nb:
+ pcm_frame_size = 160 * sizeof(short);
+ break;
+ case tdav_codec_amr_type_wb:
+ pcm_frame_size = 320 * sizeof(short);
+ break;
+ default:
+ TSK_DEBUG_ERROR("Invalid AMR type");
+ return 0;
+ }
+
+ /* CMR (4bits) */
+ CMR = tdav_codec_amr_bitbuffer_read(in_data, (in_size*8), index, 4);
+ index += 4;
+ if(CMR != NO_DATA){
+ amr->encoder_mode = (enum Mode)CMR;
+ }
+
+ /* F(1bit), FT(4bits), Q(1bit) */
+ /* count ToC entries */
+ do{ /* At least ONE ToC */
+ ++toc_entries;
+ ++pdata;
+ index += 6;
+ }
+ while((index < (in_size*8)) && (tdav_codec_amr_bitbuffer_read(in_data, (in_size*8), (index-6), 1)/* F */));
+
+ for(i = 0; (i<toc_entries && (in_size < (in_size*8))) ; i++){
+ int size = -1;
+ uint8_t* speech_data = tsk_null;
+ //--int speech_data_size = 0;
+ uint8_t ToC = tdav_codec_amr_bitbuffer_read(in_data, (in_size*8), 4/*CMR*/ + (i*6), 6);
+
+ switch(TDAV_CODEC_AMR(amr)->type){
+ case tdav_codec_amr_type_nb:
+ size = tdav_codec_amr_nb_sizes[(ToC>>1)&0x0F/* FT */];
+ break;
+ case tdav_codec_amr_type_wb:
+ size = tdav_codec_amr_wb_sizes[(ToC>>1)&0x0F/* FT */];
+ break;
+ }
+
+ if((speech_data = tsk_calloc((size + 2/* ToC + '\0' */), sizeof(uint8_t)))){
+ /* copy ToC */
+ speech_data[0] = (ToC & 0x1F)<<2/* 2*[P] */; /* ToC as OA layout */
+ /* copy speech data */
+ for(k=0; k<size; k++){
+ speech_data[1 + k] = tdav_codec_amr_bitbuffer_read(in_data, (in_size*8), index, 8);
+ index+=8;
+ if((k==size-1) && (index%8)){
+ speech_data[1 + k] <<= (8-(index%8)); //clean
+ }
+ }
+
+ /* allocate/reallocate speech data */
+ if(*out_max_size <(out_size + pcm_frame_size)){
+ if(!(*out_data = tsk_realloc(*out_data, (out_size + pcm_frame_size)))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ TSK_FREE(speech_data);
+ goto bail;
+ }
+ *out_max_size = out_size + pcm_frame_size;
+ }
+
+ /* decode speech data */
+ Decoder_Interface_Decode(amr->decoder, speech_data, &((short*)*out_data)[out_size/sizeof(short)], 0);
+ out_size += pcm_frame_size, pdata+= size;
+
+ TSK_FREE(speech_data);
+ }
+ }
+
+bail:
+ return out_size;
+}
+
+static tsk_size_t tdav_codec_amr_oa_encode(tdav_codec_amr_t* amr, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tsk_size_t out_size = 0;
+ int ret_size;
+ static uint8_t CMR = NO_DATA /* No interleaving */;
+
+ uint8_t outbuf[60 + 1]; /* enought for both NB and WB at ptime=20ms */
+ if(!amr || !in_data || !in_size || !out_data || (amr->mode != tdav_codec_amr_mode_oa)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* Encode */
+ if((ret_size = Encoder_Interface_Encode(amr->encoder, amr->encoder_mode, in_data, outbuf, 0)) <= 0){
+ TSK_DEBUG_ERROR("Encoder_Interface_Encode() failed");
+ goto bail;
+ }
+
+ out_size = ret_size + 1 /* CMR without interleaving */;
+ /* allocate output buffer */
+ if(*out_max_size <out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = out_size = 0;
+ goto bail;
+ }
+ *out_max_size = out_size;
+ }
+
+ /* CMR */
+ ((uint8_t*)*out_data)[0] = (CMR << 4);
+ /* Only ONE ToC --> believe me */
+ memcpy(&((uint8_t*)*out_data)[1], outbuf, ret_size);
+
+bail:
+ return out_size;
+}
+
+static tsk_size_t tdav_codec_amr_oa_decode(tdav_codec_amr_t* amr, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tsk_size_t out_size = 0, pcm_frame_size = 0;
+ const uint8_t* pdata = (const uint8_t*)in_data;
+ const uint8_t* pend = (pdata + in_size);
+ uint8_t CMR;
+ int toc_entries = 0, i; // ToC entries count
+
+ if(!amr || !in_data || !in_size || !out_data || (amr->mode != tdav_codec_amr_mode_oa)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* compute PCM frame size */
+ switch(TDAV_CODEC_AMR(amr)->type){
+ case tdav_codec_amr_type_nb:
+ pcm_frame_size = 160 * sizeof(short);
+ break;
+ case tdav_codec_amr_type_wb:
+ pcm_frame_size = 320 * sizeof(short);
+ break;
+ default:
+ TSK_DEBUG_ERROR("Invalid AMR type");
+ return 0;
+ }
+
+ /* RFC 4867 - 4.4. Octet-Aligned Mode
+ In octet-aligned mode, the payload header consists of a 4-bit CMR, 4
+ reserved bits, and optionally, an 8-bit interleaving header, as shown
+ below:
+
+ 0 1
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+ +-+-+-+-+-+-+-+-+- - - - - - - -
+ | CMR |R|R|R|R| ILL | ILP |
+ +-+-+-+-+-+-+-+-+- - - - - - - -
+
+ CMR (4 bits): same as defined in Section 4.3.1.
+
+ "interleaving" not supported ==> could ignore ILL and ILP (wich are optional)
+ */
+
+ CMR = (*pdata++ >> 4);
+ if(CMR != NO_DATA){
+ /* The codec mode request received in the CMR field is valid until the
+ next codec mode request is received, i.e., a newly received CMR value
+ corresponding to a speech mode, or NO_DATA overrides the previously
+ received CMR value corresponding to a speech mode or NO_DATA. */
+ amr->encoder_mode = (enum Mode)CMR; // As we support all modes, do not check for validity
+ }
+
+ /*
+ A ToC entry takes the following format in octet-aligned mode:
+ 0 1 2 3 4 5 6 7
+ +-+-+-+-+-+-+-+-+
+ |F| FT |Q|P|P|
+ +-+-+-+-+-+-+-+-+
+
+ F (1 bit): see definition in Section 4.3.2.
+ FT (4 bits, unsigned integer): see definition in Section 4.3.2.
+ Q (1 bit): see definition in Section 4.3.2.
+ P bits: padding bits, MUST be set to zero, and MUST be ignored on reception.
+ */
+
+ /* count ToC entries */
+ do{ /* At least ONE ToC */
+ ++toc_entries;
+ ++pdata;
+ }
+ while(pdata && (pdata < pend) && (pdata[-1] >> 7/* F */));
+
+ for(i = 0; (i<toc_entries && (pdata < pend)) ; i++){
+ int size = -1;
+ uint8_t* speech_data = tsk_null;
+ //--int speech_data_size = 0;
+ uint8_t ToC = ((const uint8_t*)in_data)[1/*CMR...*/ + i];
+ switch(TDAV_CODEC_AMR(amr)->type){
+ case tdav_codec_amr_type_nb:
+ size = tdav_codec_amr_nb_sizes[(ToC>>3) & 0x0F/* FT */];
+ break;
+ case tdav_codec_amr_type_wb:
+ size = tdav_codec_amr_wb_sizes[(ToC>>3) & 0x0F/* FT */];
+ break;
+ }
+
+ /* check size */
+ if(size <0 || ((pdata + size) > pend)){
+ TSK_DEBUG_ERROR("Invalid size");
+ break;
+ }
+
+ if((speech_data = tsk_calloc((size + 2/* ToC + '\0' */), sizeof(uint8_t)))){
+ /* copy ToC */
+ *speech_data = ToC & 0x7F/* with 'F'=0 */;
+ /* copy speech data */
+ memcpy((speech_data + 1), pdata, size);
+ /* allocate/reallocate speech data */
+ if(*out_max_size <(out_size + pcm_frame_size)){
+ if(!(*out_data = tsk_realloc(*out_data, (out_size + pcm_frame_size)))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ TSK_FREE(speech_data);
+ goto bail;
+ }
+ *out_max_size = (out_size + pcm_frame_size);
+ }
+ /* decode speech data */
+ Decoder_Interface_Decode(amr->decoder, speech_data, &((short*)*out_data)[out_size/sizeof(short)], 0);
+ out_size += pcm_frame_size, pdata+= size;
+
+ TSK_FREE(speech_data);
+ }
+ }
+
+bail:
+ return out_size;
+}
+
+
+static uint8_t tdav_codec_amr_bitbuffer_read(const void* bits, tsk_size_t size, tsk_size_t start, tsk_size_t count)
+{
+ uint8_t byte, left, right, pad;
+
+ if(!bits || !size || count>8){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if((start + count) > size){
+ count = (size - start);
+ }
+
+ pad = start ? (8 - (start % 8)) : count;
+ left = ((uint8_t*)bits)[start/8] << (8-pad);
+ right = ((uint8_t*)bits)[((start+count)<size ? (start+count) : start)/8] >> pad;
+
+ if((start && (start % 8) != ((start+count)%8)) || (!start && count>8)){
+ /* overlap */
+ byte = (left | right) >> (8-count);
+ }
+ else{
+ byte = (left | right) & (0xFF >> (8-count));
+ }
+
+ return byte;
+}
+
+#endif /* HAVE_OPENCORE_AMR */
diff --git a/tinyDAV/src/codecs/bfcp/tdav_codec_bfcp.c b/tinyDAV/src/codecs/bfcp/tdav_codec_bfcp.c
new file mode 100644
index 0000000..3495295
--- /dev/null
+++ b/tinyDAV/src/codecs/bfcp/tdav_codec_bfcp.c
@@ -0,0 +1,104 @@
+/*
+* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_bfcp.c
+ * @brief The The Binary Floor Control Protocol (BFCP, rfc4582) session.
+ */
+#include "tinydav/codecs/bfcp/tdav_codec_bfcp.h"
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+/* ============ BFCP Plugin interface ================= */
+#define tdav_codec_bfcp_open tsk_null
+#define tdav_codec_bfcp_close tsk_null
+#define tdav_codec_bfcp_sdp_att_get tsk_null
+#define tdav_codec_bfcp_sdp_att_get tsk_null
+#define tdav_codec_bfcp_encode tsk_null
+#define tdav_codec_bfcp_decode tsk_null
+
+static tsk_bool_t tdav_codec_bfcp_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{ /* always match */
+ return tsk_true;
+}
+
+
+//
+// BFCP Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_bfcp_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_bfcp_t *bfcp = self;
+ if (bfcp) {
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_bfcp_dtor(tsk_object_t * self)
+{
+ tdav_codec_bfcp_t *bfcp = self;
+ if (bfcp) {
+ /* deinit base */
+ tmedia_codec_bfcp_deinit(bfcp);
+ /* deinit self */
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_bfcp_def_s =
+{
+ sizeof(tdav_codec_bfcp_t),
+ tdav_codec_bfcp_ctor,
+ tdav_codec_bfcp_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_bfcp_plugin_def_s =
+{
+ &tdav_codec_bfcp_def_s,
+
+ tmedia_bfcp,
+ tmedia_codec_id_none, // fake codec without real id
+ "application",
+ "BFCP fake codec",
+ TMEDIA_CODEC_FORMAT_BFCP,
+ tsk_false,
+ 0, // rate
+
+ /* audio */
+ {0},
+
+ /* video */
+ {0},
+
+ tsk_null, // set()
+ tdav_codec_bfcp_open,
+ tdav_codec_bfcp_close,
+ tdav_codec_bfcp_encode,
+ tdav_codec_bfcp_decode,
+ tdav_codec_bfcp_sdp_att_match,
+ tdav_codec_bfcp_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_bfcp_plugin_def_t = &tdav_codec_bfcp_plugin_def_s;
diff --git a/tinyDAV/src/codecs/bv/tdav_codec_bv16.c b/tinyDAV/src/codecs/bv/tdav_codec_bv16.c
new file mode 100644
index 0000000..21850fb
--- /dev/null
+++ b/tinyDAV/src/codecs/bv/tdav_codec_bv16.c
@@ -0,0 +1,250 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_bv16.c
+ * @brief BroadVoice16 codec
+ * The payloader/depayloader follow RFC 4298
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/bv/tdav_codec_bv16.h"
+
+#if HAVE_BV16
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "..\\thirdparties\\win32\\lib\\BroadVoice16\\libbv16.a")
+#endif
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include "typedef.h"
+#include "bvcommon.h"
+#include "bv16cnst.h"
+#include "bv16strct.h"
+#include "bv16.h"
+#include "utility.h"
+#if G192BITSTREAM
+#include "g192.h"
+#else
+#include "bitpack.h"
+#endif
+#include "memutil.h"
+
+/* RFC 4298 - 3.1. BroadVoice16 Bit Stream Definition */
+#define TDAV_BV16_FRAME_SIZE 10
+#define FRSZ_IN_U8 (FRSZ*2)
+
+/* ============ BV16 Plugin interface ================= */
+
+#define tdav_codec_bv16_sdp_att_get tsk_null
+#define tdav_codec_bv16_fmtp_set tsk_null
+
+static int sizestate = sizeof(struct BV16_Encoder_State);
+static int sizebitstream = sizeof(struct BV16_Bit_Stream);
+
+int tdav_codec_bv16_open(tmedia_codec_t* self)
+{
+ tdav_codec_bv16_t* bv16 = (tdav_codec_bv16_t*)self;
+
+ if(!bv16->encoder.state){
+ bv16->encoder.state = allocWord16(0, sizestate/2-1);
+ Reset_BV16_Encoder((struct BV16_Encoder_State*)bv16->encoder.state);
+ }
+ if(!bv16->encoder.bs){
+ bv16->encoder.bs = allocWord16(0, sizebitstream/2-1);
+ }
+ if(!bv16->encoder.x){
+ bv16->encoder.x = allocWord16(0, FRSZ-1);
+ }
+
+ if(!bv16->decoder.state){
+ bv16->decoder.state = allocWord16(0, sizestate/2-1);
+ Reset_BV16_Decoder((struct BV16_Decoder_State*)bv16->decoder.state);
+ }
+ if(!bv16->decoder.bs){
+ bv16->decoder.bs = allocWord16(0, sizebitstream/2-1);
+ }
+ if(!bv16->decoder.x){
+ bv16->decoder.x = allocWord16(0, FRSZ-1);
+ }
+
+ return 0;
+}
+
+int tdav_codec_bv16_close(tmedia_codec_t* self)
+{
+ tdav_codec_bv16_t* bv16 = (tdav_codec_bv16_t*)self;
+
+ if(bv16->encoder.state){
+ deallocWord16(bv16->encoder.state, 0, sizestate/2-1);
+ bv16->encoder.state = tsk_null;
+ }
+ if(bv16->encoder.bs){
+ deallocWord16(bv16->encoder.bs, 0, sizebitstream/2-1);
+ bv16->encoder.bs = tsk_null;
+ }
+ if(bv16->encoder.x){
+ deallocWord16(bv16->encoder.x, 0, FRSZ-1);
+ bv16->encoder.x = tsk_null;
+ }
+
+ if(bv16->decoder.state){
+ deallocWord16(bv16->decoder.state, 0, sizestate/2-1);
+ bv16->decoder.state = tsk_null;
+ }
+ if(bv16->encoder.bs){
+ deallocWord16(bv16->decoder.bs, 0, sizebitstream/2-1);
+ bv16->decoder.bs = tsk_null;
+ }
+ if(bv16->decoder.x){
+ deallocWord16(bv16->decoder.x, 0, FRSZ-1);
+ bv16->decoder.x = tsk_null;
+ }
+
+
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_bv16_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ //tsk_size_t out_size = 0;
+ tdav_codec_bv16_t* bv16 = (tdav_codec_bv16_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_bv16_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tsk_size_t out_size = 0;
+ int i;
+ tdav_codec_bv16_t* bv16 = (tdav_codec_bv16_t*)self;
+ uint8_t mama[600];
+
+ if(!self || !in_data || !in_size || !out_data || (in_size % TDAV_BV16_FRAME_SIZE)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ for(i=0; i<(int)in_size; i+=TDAV_BV16_FRAME_SIZE){
+ BV16_BitUnPack(mama, (struct BV16_Bit_Stream*)bv16->decoder.bs);
+ //BV16_BitUnPack(&((UWord8 *)in_data)[i], (struct BV16_Bit_Stream*)bv16->decoder.bs);
+ BV16_Decode((struct BV16_Bit_Stream*)bv16->decoder.bs, (struct BV16_Decoder_State*)bv16->decoder.state, bv16->decoder.x);
+
+
+ if(*out_max_size<(out_size + FRSZ_IN_U8)){
+ if(!(*out_data = tsk_realloc(*out_data, (out_size + FRSZ_IN_U8)))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = (out_size + FRSZ_IN_U8);
+ }
+ memcpy(&((uint8_t*)* out_data)[out_size], bv16->decoder.x, FRSZ_IN_U8);
+ out_size += FRSZ_IN_U8;
+ }
+
+
+ return out_size;
+}
+
+tsk_bool_t tdav_codec_bv16_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{ /* always match */
+ return tsk_true;
+}
+
+
+//
+// BV16 Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_bv16_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_bv16_t *bv16 = self;
+ if(bv16){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_bv16_dtor(tsk_object_t * self)
+{
+ tdav_codec_bv16_t *bv16 = self;
+ if(bv16){
+ /* deinit base */
+ tmedia_codec_audio_deinit(bv16);
+ /* deinit self */
+
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_bv16_def_s =
+{
+ sizeof(tdav_codec_bv16_t),
+ tdav_codec_bv16_ctor,
+ tdav_codec_bv16_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_bv16_plugin_def_s =
+{
+ &tdav_codec_bv16_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_bv16,
+ "BV16",
+ "BroadVoice16 Rate",
+ TMEDIA_CODEC_FORMAT_BV16,
+ tsk_true,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 0 // ptime @deprecated
+ },
+
+ /* video */
+ {0},
+
+ tdav_codec_bv16_open,
+ tdav_codec_bv16_close,
+ tdav_codec_bv16_encode,
+ tdav_codec_bv16_decode,
+ tdav_codec_bv16_sdp_att_match,
+ tdav_codec_bv16_sdp_att_get,
+ tdav_codec_bv16_fmtp_set
+};
+const tmedia_codec_plugin_def_t *tdav_codec_bv16_plugin_def_t = &tdav_codec_bv16_plugin_def_s;
+
+
+#endif /* HAVE_BV16 */
diff --git a/tinyDAV/src/codecs/bv/tdav_codec_bv32.c b/tinyDAV/src/codecs/bv/tdav_codec_bv32.c
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tinyDAV/src/codecs/bv/tdav_codec_bv32.c
diff --git a/tinyDAV/src/codecs/dtmf/tdav_codec_dtmf.c b/tinyDAV/src/codecs/dtmf/tdav_codec_dtmf.c
new file mode 100644
index 0000000..103ac8d
--- /dev/null
+++ b/tinyDAV/src/codecs/dtmf/tdav_codec_dtmf.c
@@ -0,0 +1,126 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_dtmf.c
+ * @brief DTMF (RFC 4733) codec plugins.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/dtmf/tdav_codec_dtmf.h"
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+
+/* ============ DTMF Plugin interface ================= */
+
+tsk_size_t tdav_codec_dtmf_fmtp_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ return 0;
+}
+
+tsk_size_t tdav_codec_dtmf_fmtp_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ return 0;
+}
+
+char* tdav_codec_dtmf_sdp_att_get(const tmedia_codec_t* self, const char* att_name)
+{
+ if(tsk_striequals(att_name, "fmtp")){
+ return tsk_strdup("0-16");
+ }
+ return tsk_null;
+}
+
+tsk_bool_t tdav_codec_dtmf_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{ /* always match */
+ return tsk_true;
+}
+
+
+//
+// DTMF Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_dtmf_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_dtmf_t *dtmf = self;
+ if(dtmf){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_dtmf_dtor(tsk_object_t * self)
+{
+ tdav_codec_dtmf_t *dtmf = self;
+ if(dtmf){
+ /* deinit base */
+ tmedia_codec_audio_deinit(dtmf);
+ /* deinit self */
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_dtmf_def_s =
+{
+ sizeof(tdav_codec_dtmf_t),
+ tdav_codec_dtmf_ctor,
+ tdav_codec_dtmf_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_dtmf_plugin_def_s =
+{
+ &tdav_codec_dtmf_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_none, // fake codec without real identifier
+ "telephone-event",
+ "DTMF Codec (RFC 4733)",
+ TMEDIA_CODEC_FORMAT_DTMF,
+ tsk_true,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 20 // ptime
+ },
+
+ /* video */
+ {0},
+
+ tsk_null, // set()
+ tsk_null, // open
+ tsk_null, // close
+ tdav_codec_dtmf_fmtp_encode,
+ tdav_codec_dtmf_fmtp_decode,
+ tdav_codec_dtmf_sdp_att_match,
+ tdav_codec_dtmf_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_dtmf_plugin_def_t = &tdav_codec_dtmf_plugin_def_s;
diff --git a/tinyDAV/src/codecs/fec/tdav_codec_red.c b/tinyDAV/src/codecs/fec/tdav_codec_red.c
new file mode 100644
index 0000000..2fb6f27
--- /dev/null
+++ b/tinyDAV/src/codecs/fec/tdav_codec_red.c
@@ -0,0 +1,263 @@
+/*
+* Copyright (C) 2012 Doubango Telecom <http://www.doubango.org>
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango[dot]org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_red.c
+ * @brief RTP Payload for Redundant Audio Data as per RFC 2198
+ */
+#include "tinydav/codecs/fec/tdav_codec_red.h"
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tsk_memory.h"
+#include "tsk_time.h"
+#include "tsk_debug.h"
+
+typedef struct tdav_codec_red_s
+{
+ TMEDIA_DECLARE_CODEC_VIDEO;
+
+ tdav_codec_red_rtppacket_cb_f callback;
+ const void* callback_data;
+}
+tdav_codec_red_t;
+
+int tdav_codec_red_set_callback(tdav_codec_red_t *self, tdav_codec_red_rtppacket_cb_f callback, const void* callback_data)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ self->callback = callback;
+ self->callback_data = callback_data;
+
+ return 0;
+}
+
+static int tdav_codec_red_open(tmedia_codec_t* self)
+{
+ return 0;
+}
+
+static int tdav_codec_red_close(tmedia_codec_t* self)
+{
+ return 0;
+}
+
+static tsk_size_t tdav_codec_red_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tdav_codec_red_t *red = (tdav_codec_red_t *)self;
+ tsk_size_t xsize = (in_size + 1);
+ static const uint8_t __first_octet = 0x00; // F=1, PT=0. Up to the caller to update this first octet with the right PT.
+
+ if(!red || !in_data || !in_size || !out_data || !out_max_size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if(*out_max_size < xsize){
+ if(!(*out_data = tsk_realloc(*out_data, xsize))){
+ TSK_DEBUG_ERROR("Failed to realloc data");
+ *out_max_size = 0;
+ }
+ *out_max_size = xsize;
+ }
+
+ ((uint8_t*)*out_data)[0] = __first_octet;
+ memcpy(&((uint8_t*)*out_data)[1], in_data, in_size);
+
+ return xsize;
+}
+
+static tsk_size_t tdav_codec_red_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tdav_codec_red_t* red = (tdav_codec_red_t*)self;
+ const trtp_rtp_header_t* rtp_hdr = proto_hdr;
+ trtp_rtp_packet_t* red_rtp_pkt = tsk_null;
+ const uint8_t* pdata = in_data;
+ const uint8_t* red_hdr = in_data;
+ tsk_size_t red_hdrs_count, i;
+ tsk_bool_t last;
+ uint8_t F;
+ uint16_t timestamp_offset, block_length;
+
+ if(!red || !in_data || (in_size < TDAV_CODEC_RED_MIN_PKT_SIZE)|| !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if(!red->callback){
+ TSK_DEBUG_WARN("Not callback installed for RED data");
+ return 0;
+ }
+
+ if((F = (pdata[0] & 0x80)) == 0){
+ i = 1;
+ red_hdrs_count = 1;
+ }
+ else{
+ for(i = 0, red_hdrs_count = 0; i < in_size; i+= 4, ++red_hdrs_count){
+ if((F = (pdata[i] & 0x80)) == 0){ ++i; ++red_hdrs_count; break; }
+ }
+ }
+
+ if(i >= in_size){
+ TSK_DEBUG_ERROR("Invalid data");
+ return 0;
+ }
+
+ pdata += i;
+ in_size -= i;
+
+ for(i = 0; i < red_hdrs_count && in_size > 0; ++i){
+ TSK_OBJECT_SAFE_FREE(red_rtp_pkt);
+ if(!(red_rtp_pkt = trtp_rtp_packet_create_null())){
+ TSK_DEBUG_ERROR("Failed to create RTP packet");
+ continue;
+ }
+ if(!(red_rtp_pkt->header = trtp_rtp_header_create(rtp_hdr->ssrc, rtp_hdr->seq_num, rtp_hdr->timestamp, rtp_hdr->payload_type, rtp_hdr->marker))){
+ TSK_DEBUG_ERROR("Failed to create RTP header");
+ continue;
+ }
+
+ // Must create an RTP packet for each RED chunck as they will be saved in the JB
+ last = (i == (red_hdrs_count - 1));
+ F = (red_hdr[0] & 0x80);
+ red_rtp_pkt->header->payload_type = (red_hdr[0] & 0x7F);
+
+ if(last || !F){
+ /*
+ 0 1 2 3 4 5 6 7
+ +-+-+-+-+-+-+-+-+
+ |0| Block PT |
+ +-+-+-+-+-+-+-+-+
+ */
+ block_length = (uint16_t)in_size;
+ }
+ else{
+ /*
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |1| block PT=7 | timestamp offset | block length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ timestamp_offset = ((red_hdr[1] << 8) | red_hdr[2]) >> 2;
+ block_length = ((red_hdr[2] & 0x03) << 8) | red_hdr[3];
+ if(block_length > in_size){
+ TSK_DEBUG_ERROR("Invalid 'block length'");
+ break;
+ }
+ red_rtp_pkt->header->timestamp += timestamp_offset;
+ red_hdr += 4;
+ }
+
+ // decode
+ if(red->callback){
+ // do not use "data_const" as payload will be saved in the jitter buffer and decoded later (async)
+ if((red_rtp_pkt->payload.data = tsk_malloc(block_length))){
+ memcpy(red_rtp_pkt->payload.data, pdata, block_length);
+ red_rtp_pkt->payload.size = block_length;
+ red->callback(red->callback_data, red_rtp_pkt);
+ }
+ }
+
+ pdata += block_length;
+ in_size -= block_length;
+ }
+
+ TSK_OBJECT_SAFE_FREE(red_rtp_pkt);
+
+ return 0; // must be always zero
+}
+
+static tsk_bool_t tdav_codec_red_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ return tsk_true;
+}
+
+static char* tdav_codec_red_sdp_att_get(const tmedia_codec_t* self, const char* att_name)
+{
+ return tsk_null;
+}
+
+
+/* ============ red object definition ================= */
+
+/* constructor */
+static tsk_object_t* tdav_codec_red_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_red_t *red = self;
+ if(red){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_red_dtor(tsk_object_t * self)
+{
+ tdav_codec_red_t *red = self;
+ if(red){
+ /* deinit base */
+ tmedia_codec_video_deinit(red);
+ /* deinit self */
+
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_red_def_s =
+{
+ sizeof(tdav_codec_red_t),
+ tdav_codec_red_ctor,
+ tdav_codec_red_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_red_plugin_def_s =
+{
+ &tdav_codec_red_def_s,
+
+ (/* tmedia_video | tmedia_audio | */tmedia_t140), // FIXME: for now is only supported with T.140
+ tmedia_codec_id_red,
+ "red",
+ "red codec",
+ TMEDIA_CODEC_FORMAT_RED,
+ tsk_true,
+ 1000, // rate: FIXME: for now it's only for T.140
+
+ /* audio */
+ { 0 },
+
+ /* video (defaul width,height,fps) */
+ {176, 144, 15},
+
+ tsk_null, // set()
+ tdav_codec_red_open,
+ tdav_codec_red_close,
+ tdav_codec_red_encode,
+ tdav_codec_red_decode,
+ tdav_codec_red_sdp_att_match,
+ tdav_codec_red_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_red_plugin_def_t = &tdav_codec_red_plugin_def_s;
diff --git a/tinyDAV/src/codecs/fec/tdav_codec_ulpfec.c b/tinyDAV/src/codecs/fec/tdav_codec_ulpfec.c
new file mode 100644
index 0000000..f492a52
--- /dev/null
+++ b/tinyDAV/src/codecs/fec/tdav_codec_ulpfec.c
@@ -0,0 +1,424 @@
+/*
+* Copyright (C) 2012-2015 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_ulpfec.c
+ * @brief Forward Error Correction (FEC) implementation as per RFC 5109
+ */
+#include "tinydav/codecs/fec/tdav_codec_ulpfec.h"
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_FEC_PKT_HDR_SIZE 10
+
+typedef struct tdav_codec_ulpfec_s
+{
+ TMEDIA_DECLARE_CODEC_VIDEO;
+
+ struct{
+ struct tdav_fec_pkt_s* pkt;
+ } encoder;
+}
+tdav_codec_ulpfec_t;
+
+//
+// FEC LEVEL
+//
+typedef struct tdav_fec_level_s
+{
+ TSK_DECLARE_OBJECT;
+
+ struct{ // 7.4. FEC Level Header for FEC Packets
+ uint16_t length;
+ uint64_t mask;
+ tsk_size_t mask_size; // in bits
+ } hdr;
+ struct{
+ uint8_t* ptr;
+ tsk_size_t size;
+ }payload;
+}tdav_fec_level_t;
+typedef tsk_list_t tdav_fec_levels_L_t;
+static tsk_object_t* tdav_fec_level_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_fec_level_t *level = self;
+ if (level){
+ level->hdr.mask_size = 16; // L=0
+ }
+ return self;
+}
+static tsk_object_t* tdav_fec_level_dtor(tsk_object_t * self)
+{
+ tdav_fec_level_t *level = self;
+ if (level){
+ TSK_FREE(level->payload.ptr);
+ }
+
+ return self;
+}
+static const tsk_object_def_t tdav_fec_level_def_s =
+{
+ sizeof(tdav_fec_level_t),
+ tdav_fec_level_ctor,
+ tdav_fec_level_dtor,
+ tsk_null,
+};
+const tsk_object_def_t *tdav_fec_level_def_t = &tdav_fec_level_def_s;
+
+
+//
+// FEC PACKET
+//
+typedef struct tdav_fec_pkt_s
+{
+ TSK_DECLARE_OBJECT;
+
+ struct{ // RFC 5109 - 7.3. FEC Header for FEC Packets
+ unsigned E : 1;
+ unsigned L : 1;
+ unsigned P : 1;
+ unsigned X : 1;
+ unsigned CC : 4;
+ unsigned M : 1;
+ unsigned PT : 7;
+ struct{
+ uint16_t value;
+ unsigned set : 1;
+ }SN_base;
+ uint32_t TS;
+ uint16_t length;
+ }hdr;
+
+ tdav_fec_levels_L_t* levels;
+}
+tdav_fec_pkt_t;
+static tsk_object_t* tdav_fec_pkt_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_fec_pkt_t *pkt = self;
+ if (pkt){
+ if (!(pkt->levels = tsk_list_create())){
+ TSK_DEBUG_ERROR("Failed to create levels");
+ return tsk_null;
+ }
+ }
+ return self;
+}
+static tsk_object_t* tdav_fec_pkt_dtor(tsk_object_t * self)
+{
+ tdav_fec_pkt_t *pkt = self;
+ if (pkt){
+ TSK_OBJECT_SAFE_FREE(pkt->levels);
+ }
+
+ return self;
+}
+static int tdav_fec_pkt_cmp(const tsk_object_t *_p1, const tsk_object_t *_p2)
+{
+ const tdav_fec_pkt_t *p1 = _p1;
+ const tdav_fec_pkt_t *p2 = _p2;
+
+ if (p1 && p2){
+ return (int)(p1->hdr.SN_base.value - p2->hdr.SN_base.value);
+ }
+ else if (!p1 && !p2) return 0;
+ else return -1;
+}
+static const tsk_object_def_t tdav_fec_pkt_def_s =
+{
+ sizeof(tdav_fec_pkt_t),
+ tdav_fec_pkt_ctor,
+ tdav_fec_pkt_dtor,
+ tdav_fec_pkt_cmp,
+};
+const tsk_object_def_t *tdav_fec_pkt_def_t = &tdav_fec_pkt_def_s;
+
+
+tsk_size_t tdav_codec_ulpfec_guess_serialbuff_size(const tdav_codec_ulpfec_t* self)
+{
+ tsk_size_t size = TDAV_FEC_PKT_HDR_SIZE;
+ tsk_list_item_t *item;
+ tdav_fec_level_t* level;
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ tsk_list_foreach(item, self->encoder.pkt->levels){
+ if (!(level = item->data)){
+ continue;
+ }
+ size += 2 /* Protection length */ + (level->hdr.mask_size >> 3) + level->hdr.length;
+ }
+
+ return size;
+}
+
+int tdav_codec_ulpfec_enc_reset(tdav_codec_ulpfec_t* self)
+{
+ tsk_list_item_t *item;
+ tdav_fec_level_t* level;
+
+ if (!self || !self->encoder.pkt){
+ TSK_DEBUG_ERROR("invalid parameter");
+ return -1;
+ }
+
+ // reset packet
+ memset(&self->encoder.pkt->hdr, 0, sizeof(self->encoder.pkt->hdr));
+
+ // reset levels
+ tsk_list_foreach(item, self->encoder.pkt->levels){
+ if ((level = item->data)){
+ memset(&level->hdr, 0, sizeof(level->hdr));
+ if (level->payload.ptr){
+ memset(level->payload.ptr, 0, level->payload.size);
+ }
+ }
+ }
+ return 0;
+}
+
+int tdav_codec_ulpfec_enc_protect(tdav_codec_ulpfec_t* self, const trtp_rtp_packet_t* rtp_packet)
+{
+ if (!self || !self->encoder.pkt || !rtp_packet || !rtp_packet->header){
+ TSK_DEBUG_ERROR("invalid parameter");
+ return -1;
+ }
+
+ // Packet
+ self->encoder.pkt->hdr.P ^= rtp_packet->header->padding;
+ self->encoder.pkt->hdr.X ^= rtp_packet->header->extension;
+ self->encoder.pkt->hdr.CC ^= rtp_packet->header->csrc_count;
+ self->encoder.pkt->hdr.M ^= rtp_packet->header->marker;
+ self->encoder.pkt->hdr.PT ^= rtp_packet->header->payload_type;
+ if (!self->encoder.pkt->hdr.SN_base.set){
+ self->encoder.pkt->hdr.SN_base.value = rtp_packet->header->seq_num;
+ self->encoder.pkt->hdr.SN_base.set = 1;
+ }
+ else{
+ self->encoder.pkt->hdr.SN_base.value = TSK_MIN(self->encoder.pkt->hdr.SN_base.value, rtp_packet->header->seq_num);
+ }
+ self->encoder.pkt->hdr.TS ^= rtp_packet->header->timestamp;
+ self->encoder.pkt->hdr.length ^= (trtp_rtp_packet_guess_serialbuff_size(rtp_packet) - TRTP_RTP_HEADER_MIN_SIZE);
+
+ // Level
+ // For now, always single-level protection
+ {
+ tdav_fec_level_t* level0 = TSK_LIST_FIRST_DATA(self->encoder.pkt->levels);
+ const uint8_t* rtp_payload = (const uint8_t*)(rtp_packet->payload.data_const ? rtp_packet->payload.data_const : rtp_packet->payload.data);
+ tsk_size_t i;
+ if (!level0){
+ tdav_fec_level_t* _level0;
+ if (!(_level0 = tsk_object_new(tdav_fec_level_def_t))){
+ TSK_DEBUG_ERROR("Failed to create level");
+ return -2;
+ }
+ level0 = _level0;
+ tsk_list_push_back_data(self->encoder.pkt->levels, (void**)&_level0);
+ }
+ if (level0->payload.size < rtp_packet->payload.size){
+ if (!(level0->payload.ptr = tsk_realloc(level0->payload.ptr, rtp_packet->payload.size))){
+ TSK_DEBUG_ERROR("Failed to realloc size %d", rtp_packet->payload.size);
+ level0->payload.size = 0;
+ return -3;
+ }
+ level0->payload.size = rtp_packet->payload.size;
+ }
+ for (i = 0; i < rtp_packet->payload.size; ++i){
+ level0->payload.ptr[i] ^= rtp_payload[i];
+ }
+ level0->hdr.mask_size = self->encoder.pkt->hdr.L ? 48 : 16;
+ level0->hdr.mask |= (uint64_t)((uint64_t)1 << (level0->hdr.mask_size - (rtp_packet->header->seq_num - self->encoder.pkt->hdr.SN_base.value)));
+ level0->hdr.length = (uint16_t)(TSK_MAX(level0->hdr.length, rtp_packet->payload.size));
+ }
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_ulpfec_enc_serialize(const tdav_codec_ulpfec_t* self, void** out_data, tsk_size_t* out_max_size)
+{
+ uint8_t* pdata;
+ tsk_size_t xsize;
+ int32_t i;
+ tsk_list_item_t* item;
+ tdav_fec_level_t* level;
+
+ if (!self || !self->encoder.pkt || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+ xsize = tdav_codec_ulpfec_guess_serialbuff_size(self);
+
+ if (*out_max_size < xsize){
+ if (!(*out_data = tsk_realloc(*out_data, xsize))){
+ TSK_DEBUG_ERROR("Failed to reallocate buffer with size =%d", xsize);
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = xsize;
+ }
+ pdata = (uint8_t*)*out_data;
+
+ // E(1), L(1), P(1), X(1), CC(4)
+ pdata[0] =
+ (self->encoder.pkt->hdr.E << 7) |
+ (self->encoder.pkt->hdr.L << 6) |
+ (self->encoder.pkt->hdr.P << 5) |
+ (self->encoder.pkt->hdr.X << 4) |
+ (self->encoder.pkt->hdr.CC & 0x0F);
+ // M(1), PT(7)
+ pdata[1] = (self->encoder.pkt->hdr.M << 7) | (self->encoder.pkt->hdr.PT & 0x7F);
+ // SN base (16)
+ pdata[2] = (self->encoder.pkt->hdr.SN_base.value >> 8);
+ pdata[3] = (self->encoder.pkt->hdr.SN_base.value & 0xFF);
+ // TS (32)
+ pdata[4] = self->encoder.pkt->hdr.TS >> 24;
+ pdata[5] = (self->encoder.pkt->hdr.TS >> 16) & 0xFF;
+ pdata[6] = (self->encoder.pkt->hdr.TS >> 8) & 0xFF;
+ pdata[7] = (self->encoder.pkt->hdr.TS & 0xFF);
+ // Length (16)
+ pdata[8] = (self->encoder.pkt->hdr.length >> 8);
+ pdata[9] = (self->encoder.pkt->hdr.length & 0xFF);
+
+ pdata += 10;
+
+ tsk_list_foreach(item, self->encoder.pkt->levels){
+ if (!(level = item->data)){
+ continue;
+ }
+ // Protection length (16)
+ pdata[0] = (level->hdr.length >> 8);
+ pdata[1] = (level->hdr.length & 0xFF);
+ pdata += 2;
+ // mask (16 or 48)
+ for (i = (int32_t)(level->hdr.mask_size - 8); i >= 0; i -= 8){
+ *pdata = ((level->hdr.mask >> i) & 0xFF); ++pdata;
+ }
+ // payload
+ memcpy(pdata, level->payload.ptr, level->hdr.length);
+ }
+
+ return xsize;
+}
+
+
+
+static int tdav_codec_ulpfec_open(tmedia_codec_t* self)
+{
+ return 0;
+}
+
+static int tdav_codec_ulpfec_close(tmedia_codec_t* self)
+{
+ return 0;
+}
+
+static tsk_size_t tdav_codec_ulpfec_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ TSK_DEBUG_ERROR("Not expected to be called");
+ return 0;
+}
+
+static tsk_size_t tdav_codec_ulpfec_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ TSK_DEBUG_ERROR("Not expected to be called");
+ return 0;
+}
+
+static tsk_bool_t tdav_codec_ulpfec_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ return tsk_true;
+}
+
+static char* tdav_codec_ulpfec_sdp_att_get(const tmedia_codec_t* self, const char* att_name)
+{
+ return tsk_null;
+}
+
+
+/* ============ ULPFEC object definition ================= */
+
+/* constructor */
+static tsk_object_t* tdav_codec_ulpfec_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_ulpfec_t *ulpfec = self;
+ if (ulpfec){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ if (!(ulpfec->encoder.pkt = tsk_object_new(tdav_fec_pkt_def_t))){
+ TSK_DEBUG_ERROR("Failed to create FEC packet");
+ return tsk_null;
+ }
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_ulpfec_dtor(tsk_object_t * self)
+{
+ tdav_codec_ulpfec_t *ulpfec = self;
+ if (ulpfec){
+ /* deinit base */
+ tmedia_codec_video_deinit(ulpfec);
+ /* deinit self */
+ TSK_OBJECT_SAFE_FREE(ulpfec->encoder.pkt);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_ulpfec_def_s =
+{
+ sizeof(tdav_codec_ulpfec_t),
+ tdav_codec_ulpfec_ctor,
+ tdav_codec_ulpfec_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_ulpfec_plugin_def_s =
+{
+ &tdav_codec_ulpfec_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_none, // fake codec
+ "ulpfec",
+ "ulpfec codec",
+ TMEDIA_CODEC_FORMAT_ULPFEC,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video (defaul width,height,fps) */
+ { 176, 144, 15 },
+
+ tsk_null, // set()
+ tdav_codec_ulpfec_open,
+ tdav_codec_ulpfec_close,
+ tdav_codec_ulpfec_encode,
+ tdav_codec_ulpfec_decode,
+ tdav_codec_ulpfec_sdp_att_match,
+ tdav_codec_ulpfec_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_ulpfec_plugin_def_t = &tdav_codec_ulpfec_plugin_def_s; \ No newline at end of file
diff --git a/tinyDAV/src/codecs/g711/g711.c b/tinyDAV/src/codecs/g711/g711.c
new file mode 100644
index 0000000..fa7c8be
--- /dev/null
+++ b/tinyDAV/src/codecs/g711/g711.c
@@ -0,0 +1,295 @@
+/*
+ * This source code is a product of Sun Microsystems, Inc. and is provided
+ * for unrestricted use. Users may copy or modify this source code without
+ * charge.
+ *
+ * SUN SOURCE CODE IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING
+ * THE WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun source code is provided with no support and without any obligation on
+ * the part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY THIS SOFTWARE
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+/*
+ * g711.c
+ *
+ * u-law, A-law and linear PCM conversions.
+ */
+
+/*
+ * December 30, 1994:
+ * Functions linear2alaw, linear2ulaw have been updated to correctly
+ * convert unquantized 16 bit values.
+ * Tables for direct u- to A-law and A- to u-law conversions have been
+ * corrected.
+ * Borge Lindberg, Center for PersonKommunikation, Aalborg University.
+ * bli@cpk.auc.dk
+ *
+ */
+
+#include "tinydav/codecs/g711/g711.h"
+
+#define SIGN_BIT (0x80) /* Sign bit for a A-law byte. */
+#define QUANT_MASK (0xf) /* Quantization field mask. */
+#define NSEGS (8) /* Number of A-law segments. */
+#define SEG_SHIFT (4) /* Left shift for segment number. */
+#define SEG_MASK (0x70) /* Segment field mask. */
+
+static short seg_aend[8] = {0x1F, 0x3F, 0x7F, 0xFF,
+ 0x1FF, 0x3FF, 0x7FF, 0xFFF};
+static short seg_uend[8] = {0x3F, 0x7F, 0xFF, 0x1FF,
+ 0x3FF, 0x7FF, 0xFFF, 0x1FFF};
+
+/* copy from CCITT G.711 specifications */
+unsigned char _u2a[128] = { /* u- to A-law conversions */
+ 1, 1, 2, 2, 3, 3, 4, 4,
+ 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 27, 29, 31, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44,
+ 46, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+/* corrected:
+ 81, 82, 83, 84, 85, 86, 87, 88,
+ should be: */
+ 80, 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96,
+ 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128};
+
+unsigned char _a2u[128] = { /* A- to u-law conversions */
+ 1, 3, 5, 7, 9, 11, 13, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 32, 33, 33, 34, 34, 35, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 48, 49, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72,
+/* corrected:
+ 73, 74, 75, 76, 77, 78, 79, 79,
+ should be: */
+ 73, 74, 75, 76, 77, 78, 79, 80,
+
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127};
+
+static short search(short val, short *table, short size)
+{
+ short i;
+
+ for (i = 0; i < size; i++) {
+ if (val <= *table++)
+ return (i);
+ }
+ return (size);
+}
+
+/*
+ * linear2alaw() - Convert a 16-bit linear PCM value to 8-bit A-law
+ *
+ * linear2alaw() accepts an 16-bit integer and encodes it as A-law data.
+ *
+ * Linear Input Code Compressed Code
+ * ------------------------ ---------------
+ * 0000000wxyza 000wxyz
+ * 0000001wxyza 001wxyz
+ * 000001wxyzab 010wxyz
+ * 00001wxyzabc 011wxyz
+ * 0001wxyzabcd 100wxyz
+ * 001wxyzabcde 101wxyz
+ * 01wxyzabcdef 110wxyz
+ * 1wxyzabcdefg 111wxyz
+ *
+ * For further information see John C. Bellamy's Digital Telephony, 1982,
+ * John Wiley & Sons, pps 98-111 and 472-476.
+ */
+unsigned char linear2alaw(short pcm_val) /* 2's complement (16-bit range) */
+{
+ short mask;
+ short seg;
+ unsigned char aval;
+
+ pcm_val = pcm_val >> 3;
+
+ if (pcm_val >= 0) {
+ mask = 0xD5; /* sign (7th) bit = 1 */
+ } else {
+ mask = 0x55; /* sign bit = 0 */
+ pcm_val = -pcm_val - 1;
+ }
+
+ /* Convert the scaled magnitude to segment number. */
+ seg = search(pcm_val, seg_aend, 8);
+
+ /* Combine the sign, segment, and quantization bits. */
+
+ if (seg >= 8) /* out of range, return maximum value. */
+ return (unsigned char) (0x7F ^ mask);
+ else {
+ aval = (unsigned char) seg << SEG_SHIFT;
+ if (seg < 2)
+ aval |= (pcm_val >> 1) & QUANT_MASK;
+ else
+ aval |= (pcm_val >> seg) & QUANT_MASK;
+ return (aval ^ mask);
+ }
+}
+
+/*
+ * alaw2linear() - Convert an A-law value to 16-bit linear PCM
+ *
+ */
+short alaw2linear(unsigned char a_val)
+{
+ short t;
+ short seg;
+
+ a_val ^= 0x55;
+
+ t = (a_val & QUANT_MASK) << 4;
+ seg = ((unsigned)a_val & SEG_MASK) >> SEG_SHIFT;
+ switch (seg) {
+ case 0:
+ t += 8;
+ break;
+ case 1:
+ t += 0x108;
+ break;
+ default:
+ t += 0x108;
+ t <<= seg - 1;
+ }
+ return ((a_val & SIGN_BIT) ? t : -t);
+}
+
+#define BIAS (0x84) /* Bias for linear code. */
+#define CLIP 8159
+
+/*
+ * linear2ulaw() - Convert a linear PCM value to u-law
+ *
+ * In order to simplify the encoding process, the original linear magnitude
+ * is biased by adding 33 which shifts the encoding range from (0 - 8158) to
+ * (33 - 8191). The result can be seen in the following encoding table:
+ *
+ * Biased Linear Input Code Compressed Code
+ * ------------------------ ---------------
+ * 00000001wxyza 000wxyz
+ * 0000001wxyzab 001wxyz
+ * 000001wxyzabc 010wxyz
+ * 00001wxyzabcd 011wxyz
+ * 0001wxyzabcde 100wxyz
+ * 001wxyzabcdef 101wxyz
+ * 01wxyzabcdefg 110wxyz
+ * 1wxyzabcdefgh 111wxyz
+ *
+ * Each biased linear code has a leading 1 which identifies the segment
+ * number. The value of the segment number is equal to 7 minus the number
+ * of leading 0's. The quantization interval is directly available as the
+ * four bits wxyz. * The trailing bits (a - h) are ignored.
+ *
+ * Ordinarily the complement of the resulting code word is used for
+ * transmission, and so the code word is complemented before it is returned.
+ *
+ * For further information see John C. Bellamy's Digital Telephony, 1982,
+ * John Wiley & Sons, pps 98-111 and 472-476.
+ */
+unsigned char linear2ulaw(short pcm_val) /* 2's complement (16-bit range) */
+{
+ short mask;
+ short seg;
+ unsigned char uval;
+
+ /* Get the sign and the magnitude of the value. */
+ pcm_val = pcm_val >> 2;
+ if (pcm_val < 0) {
+ pcm_val = -pcm_val;
+ mask = 0x7F;
+ } else {
+ mask = 0xFF;
+ }
+ if ( pcm_val > CLIP ) pcm_val = CLIP; /* clip the magnitude */
+ pcm_val += (BIAS >> 2);
+
+ /* Convert the scaled magnitude to segment number. */
+ seg = search(pcm_val, seg_uend, 8);
+
+ /*
+ * Combine the sign, segment, quantization bits;
+ * and complement the code word.
+ */
+ if (seg >= 8) /* out of range, return maximum value. */
+ return (unsigned char) (0x7F ^ mask);
+ else {
+ uval = (unsigned char) (seg << 4) | ((pcm_val >> (seg + 1)) & 0xF);
+ return (uval ^ mask);
+ }
+
+}
+
+/*
+ * ulaw2linear() - Convert a u-law value to 16-bit linear PCM
+ *
+ * First, a biased linear code is derived from the code word. An unbiased
+ * output can then be obtained by subtracting 33 from the biased code.
+ *
+ * Note that this function expects to be passed the complement of the
+ * original code word. This is in keeping with ISDN conventions.
+ */
+short ulaw2linear(unsigned char u_val)
+{
+ short t;
+
+ /* Complement to obtain normal u-law value. */
+ u_val = ~u_val;
+
+ /*
+ * Extract and bias the quantization bits. Then
+ * shift up by the segment number and subtract out the bias.
+ */
+ t = ((u_val & QUANT_MASK) << 3) + BIAS;
+ t <<= ((unsigned)u_val & SEG_MASK) >> SEG_SHIFT;
+
+ return ((u_val & SIGN_BIT) ? (BIAS - t) : (t - BIAS));
+}
+
+/* A-law to u-law conversion */
+unsigned char alaw2ulaw(unsigned char aval)
+{
+ aval &= 0xff;
+ return (unsigned char) ((aval & 0x80) ? (0xFF ^ _a2u[aval ^ 0xD5]) :
+ (0x7F ^ _a2u[aval ^ 0x55]));
+}
+
+/* u-law to A-law conversion */
+unsigned char ulaw2alaw(unsigned char uval)
+{
+ uval &= 0xff;
+ return (unsigned char) ((uval & 0x80) ? (0xD5 ^ (_u2a[0xFF ^ uval] - 1)) :
+ (unsigned char) (0x55 ^ (_u2a[0x7F ^ uval] - 1)));
+}
diff --git a/tinyDAV/src/codecs/g711/tdav_codec_g711.c b/tinyDAV/src/codecs/g711/tdav_codec_g711.c
new file mode 100644
index 0000000..fa970e1
--- /dev/null
+++ b/tinyDAV/src/codecs/g711/tdav_codec_g711.c
@@ -0,0 +1,326 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_g711.c
+ * @brief G.711u and G.711a (a.k.a PCMU and PCMA) codec plugins.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/g711/tdav_codec_g711.h"
+
+#include "tinydav/codecs/g711/g711.h" /* algorithms */
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+/* ============ G.711u Plugin interface ================= */
+
+#define tdav_codec_g711u_open tsk_null
+#define tdav_codec_g711u_close tsk_null
+#define tdav_codec_g711u_sdp_att_get tsk_null
+
+static tsk_size_t tdav_codec_g711u_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ register tsk_size_t i;
+ register uint8_t* pout_data;
+ register int16_t* pin_data;
+ tsk_size_t out_size;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ out_size = (in_size >> 1);
+
+ if(*out_max_size <out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+
+ pout_data = *out_data;
+ pin_data = (int16_t*)in_data;
+ for(i = 0; i<out_size; i++){
+ pout_data[i] = linear2ulaw(pin_data[i]);
+ }
+
+ return out_size;
+}
+
+static tsk_size_t tdav_codec_g711u_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tsk_size_t i;
+ tsk_size_t out_size;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ out_size = (in_size << 1);
+
+ /* allocate new buffer */
+ if(*out_max_size<out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+
+ for(i = 0; i<in_size; i++){
+ ((short*)*out_data)[i] = ulaw2linear(((uint8_t*)in_data)[i]);
+ }
+
+ return out_size;
+}
+
+static tsk_bool_t tdav_codec_g711u_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{ /* always match */
+ return tsk_true;
+}
+
+
+//
+// G.711u Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_g711u_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_g711u_t *g711u = self;
+ if(g711u){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_g711u_dtor(tsk_object_t * self)
+{
+ tdav_codec_g711u_t *g711u = self;
+ if(g711u){
+ /* deinit base */
+ tmedia_codec_audio_deinit(g711u);
+ /* deinit self */
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_g711u_def_s =
+{
+ sizeof(tdav_codec_g711u_t),
+ tdav_codec_g711u_ctor,
+ tdav_codec_g711u_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_g711u_plugin_def_s =
+{
+ &tdav_codec_g711u_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_pcmu,
+ "PCMU",
+ "G.711u codec (native)",
+ TMEDIA_CODEC_FORMAT_G711u,
+ tsk_false,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 0 // ptime @deprecated
+ },
+
+ /* video */
+ {0},
+
+ tsk_null, // set()
+ tdav_codec_g711u_open,
+ tdav_codec_g711u_close,
+ tdav_codec_g711u_encode,
+ tdav_codec_g711u_decode,
+ tdav_codec_g711u_sdp_att_match,
+ tdav_codec_g711u_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_g711u_plugin_def_t = &tdav_codec_g711u_plugin_def_s;
+
+
+/* ============ G.711a Plugin interface ================= */
+
+#define tdav_codec_g711a_open tsk_null
+#define tdav_codec_g711a_close tsk_null
+#define tdav_codec_g711a_sdp_att_get tsk_null
+
+static tsk_size_t tdav_codec_g711a_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ register tsk_size_t i;
+ register uint8_t* pout_data;
+ register int16_t* pin_data;
+ tsk_size_t out_size;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ out_size = (in_size >> 1);
+
+ if(*out_max_size < out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+
+ pout_data = *out_data;
+ pin_data = (int16_t*)in_data;
+ for(i = 0; i<out_size; i++){
+ pout_data[i] = linear2alaw(pin_data[i]);
+ }
+
+ return out_size;
+}
+
+#if 0
+FILE* file = tsk_null;
+int count = 0;
+#endif
+static tsk_size_t tdav_codec_g711a_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tsk_size_t i, out_size;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+ out_size = (in_size << 1);
+#if 0
+ if(!file && count<=1000){
+ file = fopen("./g711a.pcm", "wb");
+ }
+#endif
+ /* allocate new buffer */
+ if(*out_max_size < out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+
+ for(i = 0; i<in_size; i++){
+ ((short*)*out_data)[i] = alaw2linear(((uint8_t*)in_data)[i]);
+ }
+#if 0
+ if(++count<=1000){
+ fwrite(*out_data, sizeof(short), in_size, file);
+ }
+ else if(file){
+ fclose(file);
+ file = tsk_null;
+ }
+#endif
+ return out_size;
+}
+
+static tsk_bool_t tdav_codec_g711a_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{ /* always match */
+ return tsk_true;
+}
+
+
+//
+// G.711a Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_g711a_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_g711a_t *g711a = self;
+ if(g711a){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_g711a_dtor(tsk_object_t * self)
+{
+ tdav_codec_g711a_t *g711a = self;
+ if(g711a){
+ /* deinit base */
+ tmedia_codec_audio_deinit(g711a);
+ /* deinit self */
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_g711a_def_s =
+{
+ sizeof(tdav_codec_g711a_t),
+ tdav_codec_g711a_ctor,
+ tdav_codec_g711a_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_g711a_plugin_def_s =
+{
+ &tdav_codec_g711a_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_pcma,
+ "PCMA",
+ "G.711a codec (native)",
+ TMEDIA_CODEC_FORMAT_G711a,
+ tsk_false,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 0 // ptime @deprecated
+ },
+
+ /* video */
+ {0},
+
+ tsk_null, // set()
+ tdav_codec_g711a_open,
+ tdav_codec_g711a_close,
+ tdav_codec_g711a_encode,
+ tdav_codec_g711a_decode,
+ tdav_codec_g711a_sdp_att_match,
+ tdav_codec_g711a_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_g711a_plugin_def_t = &tdav_codec_g711a_plugin_def_s;
diff --git a/tinyDAV/src/codecs/g722/g722_decode.c b/tinyDAV/src/codecs/g722/g722_decode.c
new file mode 100644
index 0000000..b6b7830
--- /dev/null
+++ b/tinyDAV/src/codecs/g722/g722_decode.c
@@ -0,0 +1,400 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * g722_decode.c - The ITU G.722 codec, decode part.
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2005 Steve Underwood
+ *
+ * Despite my general liking of the GPL, I place my own contributions
+ * to this code in the public domain for the benefit of all mankind -
+ * even the slimy ones who might try to proprietize my work and use it
+ * to my detriment.
+ *
+ * Based in part on a single channel G.722 codec which is:
+ *
+ * Copyright (c) CMU 1993
+ * Computer Science, Speech Group
+ * Chengxiang Lu and Alex Hauptmann
+ *
+ * $Id: g722_decode.c,v 1.15 2006/07/07 16:37:49 steveu Exp $
+ *
+ * Modifications for WebRtc, 2011/04/28, by tlegrand:
+ * -Removed usage of inttypes.h and tgmath.h
+ * -Changed to use WebRtc types
+ * -Changed __inline__ to __inline
+ * -Added saturation check on output
+ */
+
+/*! \file */
+
+#include <stdio.h>
+#include <memory.h>
+#include <stdlib.h>
+
+#include "tinydav/codecs/g722/g722_enc_dec.h"
+
+#if !defined(FALSE)
+#define FALSE 0
+#endif
+#if !defined(TRUE)
+#define TRUE (!FALSE)
+#endif
+
+static __inline int16_t saturate(int32_t amp)
+{
+ int16_t amp16;
+
+ /* Hopefully this is optimised for the common case - not clipping */
+ amp16 = (int16_t) amp;
+ if (amp == amp16)
+ return amp16;
+ if (amp > TDAV_INT16_MAX)
+ return TDAV_INT16_MAX;
+ return TDAV_INT16_MIN;
+}
+/*- End of function --------------------------------------------------------*/
+
+static void block4(g722_decode_state_t *s, int band, int d);
+
+static void block4(g722_decode_state_t *s, int band, int d)
+{
+ int wd1;
+ int wd2;
+ int wd3;
+ int i;
+
+ /* Block 4, RECONS */
+ s->band[band].d[0] = d;
+ s->band[band].r[0] = saturate(s->band[band].s + d);
+
+ /* Block 4, PARREC */
+ s->band[band].p[0] = saturate(s->band[band].sz + d);
+
+ /* Block 4, UPPOL2 */
+ for (i = 0; i < 3; i++)
+ s->band[band].sg[i] = s->band[band].p[i] >> 15;
+ wd1 = saturate(s->band[band].a[1] << 2);
+
+ wd2 = (s->band[band].sg[0] == s->band[band].sg[1]) ? -wd1 : wd1;
+ if (wd2 > 32767)
+ wd2 = 32767;
+ wd3 = (s->band[band].sg[0] == s->band[band].sg[2]) ? 128 : -128;
+ wd3 += (wd2 >> 7);
+ wd3 += (s->band[band].a[2]*32512) >> 15;
+ if (wd3 > 12288)
+ wd3 = 12288;
+ else if (wd3 < -12288)
+ wd3 = -12288;
+ s->band[band].ap[2] = wd3;
+
+ /* Block 4, UPPOL1 */
+ s->band[band].sg[0] = s->band[band].p[0] >> 15;
+ s->band[band].sg[1] = s->band[band].p[1] >> 15;
+ wd1 = (s->band[band].sg[0] == s->band[band].sg[1]) ? 192 : -192;
+ wd2 = (s->band[band].a[1]*32640) >> 15;
+
+ s->band[band].ap[1] = saturate(wd1 + wd2);
+ wd3 = saturate(15360 - s->band[band].ap[2]);
+ if (s->band[band].ap[1] > wd3)
+ s->band[band].ap[1] = wd3;
+ else if (s->band[band].ap[1] < -wd3)
+ s->band[band].ap[1] = -wd3;
+
+ /* Block 4, UPZERO */
+ wd1 = (d == 0) ? 0 : 128;
+ s->band[band].sg[0] = d >> 15;
+ for (i = 1; i < 7; i++)
+ {
+ s->band[band].sg[i] = s->band[band].d[i] >> 15;
+ wd2 = (s->band[band].sg[i] == s->band[band].sg[0]) ? wd1 : -wd1;
+ wd3 = (s->band[band].b[i]*32640) >> 15;
+ s->band[band].bp[i] = saturate(wd2 + wd3);
+ }
+
+ /* Block 4, DELAYA */
+ for (i = 6; i > 0; i--)
+ {
+ s->band[band].d[i] = s->band[band].d[i - 1];
+ s->band[band].b[i] = s->band[band].bp[i];
+ }
+
+ for (i = 2; i > 0; i--)
+ {
+ s->band[band].r[i] = s->band[band].r[i - 1];
+ s->band[band].p[i] = s->band[band].p[i - 1];
+ s->band[band].a[i] = s->band[band].ap[i];
+ }
+
+ /* Block 4, FILTEP */
+ wd1 = saturate(s->band[band].r[1] + s->band[band].r[1]);
+ wd1 = (s->band[band].a[1]*wd1) >> 15;
+ wd2 = saturate(s->band[band].r[2] + s->band[band].r[2]);
+ wd2 = (s->band[band].a[2]*wd2) >> 15;
+ s->band[band].sp = saturate(wd1 + wd2);
+
+ /* Block 4, FILTEZ */
+ s->band[band].sz = 0;
+ for (i = 6; i > 0; i--)
+ {
+ wd1 = saturate(s->band[band].d[i] + s->band[band].d[i]);
+ s->band[band].sz += (s->band[band].b[i]*wd1) >> 15;
+ }
+ s->band[band].sz = saturate(s->band[band].sz);
+
+ /* Block 4, PREDIC */
+ s->band[band].s = saturate(s->band[band].sp + s->band[band].sz);
+}
+/*- End of function --------------------------------------------------------*/
+
+g722_decode_state_t *g722_decode_init(g722_decode_state_t *s, int rate, int options)
+{
+ if (s == NULL)
+ {
+ if ((s = (g722_decode_state_t *) malloc(sizeof(*s))) == NULL)
+ return NULL;
+ }
+ memset(s, 0, sizeof(*s));
+ if (rate == 48000)
+ s->bits_per_sample = 6;
+ else if (rate == 56000)
+ s->bits_per_sample = 7;
+ else
+ s->bits_per_sample = 8;
+ if ((options & G722_SAMPLE_RATE_8000))
+ s->eight_k = TRUE;
+ if ((options & G722_PACKED) && s->bits_per_sample != 8)
+ s->packed = TRUE;
+ else
+ s->packed = FALSE;
+ s->band[0].det = 32;
+ s->band[1].det = 8;
+ return s;
+}
+/*- End of function --------------------------------------------------------*/
+
+int g722_decode_release(g722_decode_state_t *s)
+{
+ free(s);
+ return 0;
+}
+/*- End of function --------------------------------------------------------*/
+
+int g722_decode(g722_decode_state_t *s, int16_t amp[],
+ const uint8_t g722_data[], int len)
+{
+ static const int wl[8] = {-60, -30, 58, 172, 334, 538, 1198, 3042 };
+ static const int rl42[16] = {0, 7, 6, 5, 4, 3, 2, 1, 7, 6, 5, 4, 3, 2, 1, 0 };
+ static const int ilb[32] =
+ {
+ 2048, 2093, 2139, 2186, 2233, 2282, 2332,
+ 2383, 2435, 2489, 2543, 2599, 2656, 2714,
+ 2774, 2834, 2896, 2960, 3025, 3091, 3158,
+ 3228, 3298, 3371, 3444, 3520, 3597, 3676,
+ 3756, 3838, 3922, 4008
+ };
+ static const int wh[3] = {0, -214, 798};
+ static const int rh2[4] = {2, 1, 2, 1};
+ static const int qm2[4] = {-7408, -1616, 7408, 1616};
+ static const int qm4[16] =
+ {
+ 0, -20456, -12896, -8968,
+ -6288, -4240, -2584, -1200,
+ 20456, 12896, 8968, 6288,
+ 4240, 2584, 1200, 0
+ };
+ static const int qm5[32] =
+ {
+ -280, -280, -23352, -17560,
+ -14120, -11664, -9752, -8184,
+ -6864, -5712, -4696, -3784,
+ -2960, -2208, -1520, -880,
+ 23352, 17560, 14120, 11664,
+ 9752, 8184, 6864, 5712,
+ 4696, 3784, 2960, 2208,
+ 1520, 880, 280, -280
+ };
+ static const int qm6[64] =
+ {
+ -136, -136, -136, -136,
+ -24808, -21904, -19008, -16704,
+ -14984, -13512, -12280, -11192,
+ -10232, -9360, -8576, -7856,
+ -7192, -6576, -6000, -5456,
+ -4944, -4464, -4008, -3576,
+ -3168, -2776, -2400, -2032,
+ -1688, -1360, -1040, -728,
+ 24808, 21904, 19008, 16704,
+ 14984, 13512, 12280, 11192,
+ 10232, 9360, 8576, 7856,
+ 7192, 6576, 6000, 5456,
+ 4944, 4464, 4008, 3576,
+ 3168, 2776, 2400, 2032,
+ 1688, 1360, 1040, 728,
+ 432, 136, -432, -136
+ };
+ static const int qmf_coeffs[12] =
+ {
+ 3, -11, 12, 32, -210, 951, 3876, -805, 362, -156, 53, -11,
+ };
+
+ int dlowt;
+ int rlow;
+ int ihigh;
+ int dhigh;
+ int rhigh;
+ int xout1;
+ int xout2;
+ int wd1;
+ int wd2;
+ int wd3;
+ int code;
+ int outlen;
+ int i;
+ int j;
+
+ outlen = 0;
+ rhigh = 0;
+ for (j = 0; j < len; )
+ {
+ if (s->packed)
+ {
+ /* Unpack the code bits */
+ if (s->in_bits < s->bits_per_sample)
+ {
+ s->in_buffer |= (g722_data[j++] << s->in_bits);
+ s->in_bits += 8;
+ }
+ code = s->in_buffer & ((1 << s->bits_per_sample) - 1);
+ s->in_buffer >>= s->bits_per_sample;
+ s->in_bits -= s->bits_per_sample;
+ }
+ else
+ {
+ code = g722_data[j++];
+ }
+
+ switch (s->bits_per_sample)
+ {
+ default:
+ case 8:
+ wd1 = code & 0x3F;
+ ihigh = (code >> 6) & 0x03;
+ wd2 = qm6[wd1];
+ wd1 >>= 2;
+ break;
+ case 7:
+ wd1 = code & 0x1F;
+ ihigh = (code >> 5) & 0x03;
+ wd2 = qm5[wd1];
+ wd1 >>= 1;
+ break;
+ case 6:
+ wd1 = code & 0x0F;
+ ihigh = (code >> 4) & 0x03;
+ wd2 = qm4[wd1];
+ break;
+ }
+ /* Block 5L, LOW BAND INVQBL */
+ wd2 = (s->band[0].det*wd2) >> 15;
+ /* Block 5L, RECONS */
+ rlow = s->band[0].s + wd2;
+ /* Block 6L, LIMIT */
+ if (rlow > 16383)
+ rlow = 16383;
+ else if (rlow < -16384)
+ rlow = -16384;
+
+ /* Block 2L, INVQAL */
+ wd2 = qm4[wd1];
+ dlowt = (s->band[0].det*wd2) >> 15;
+
+ /* Block 3L, LOGSCL */
+ wd2 = rl42[wd1];
+ wd1 = (s->band[0].nb*127) >> 7;
+ wd1 += wl[wd2];
+ if (wd1 < 0)
+ wd1 = 0;
+ else if (wd1 > 18432)
+ wd1 = 18432;
+ s->band[0].nb = wd1;
+
+ /* Block 3L, SCALEL */
+ wd1 = (s->band[0].nb >> 6) & 31;
+ wd2 = 8 - (s->band[0].nb >> 11);
+ wd3 = (wd2 < 0) ? (ilb[wd1] << -wd2) : (ilb[wd1] >> wd2);
+ s->band[0].det = wd3 << 2;
+
+ block4(s, 0, dlowt);
+
+ if (!s->eight_k)
+ {
+ /* Block 2H, INVQAH */
+ wd2 = qm2[ihigh];
+ dhigh = (s->band[1].det*wd2) >> 15;
+ /* Block 5H, RECONS */
+ rhigh = dhigh + s->band[1].s;
+ /* Block 6H, LIMIT */
+ if (rhigh > 16383)
+ rhigh = 16383;
+ else if (rhigh < -16384)
+ rhigh = -16384;
+
+ /* Block 2H, INVQAH */
+ wd2 = rh2[ihigh];
+ wd1 = (s->band[1].nb*127) >> 7;
+ wd1 += wh[wd2];
+ if (wd1 < 0)
+ wd1 = 0;
+ else if (wd1 > 22528)
+ wd1 = 22528;
+ s->band[1].nb = wd1;
+
+ /* Block 3H, SCALEH */
+ wd1 = (s->band[1].nb >> 6) & 31;
+ wd2 = 10 - (s->band[1].nb >> 11);
+ wd3 = (wd2 < 0) ? (ilb[wd1] << -wd2) : (ilb[wd1] >> wd2);
+ s->band[1].det = wd3 << 2;
+
+ block4(s, 1, dhigh);
+ }
+
+ if (s->itu_test_mode)
+ {
+ amp[outlen++] = (int16_t) (rlow << 1);
+ amp[outlen++] = (int16_t) (rhigh << 1);
+ }
+ else
+ {
+ if (s->eight_k)
+ {
+ amp[outlen++] = (int16_t) (rlow << 1);
+ }
+ else
+ {
+ /* Apply the receive QMF */
+ for (i = 0; i < 22; i++)
+ s->x[i] = s->x[i + 2];
+ s->x[22] = rlow + rhigh;
+ s->x[23] = rlow - rhigh;
+
+ xout1 = 0;
+ xout2 = 0;
+ for (i = 0; i < 12; i++)
+ {
+ xout2 += s->x[2*i]*qmf_coeffs[i];
+ xout1 += s->x[2*i + 1]*qmf_coeffs[11 - i];
+ }
+ /* We shift by 12 to allow for the QMF filters (DC gain = 4096), less 1
+ to allow for the 15 bit input to the G.722 algorithm. */
+ /* WebRtc, tlegrand: added saturation */
+ amp[outlen++] = saturate(xout1 >> 11);
+ amp[outlen++] = saturate(xout2 >> 11);
+ }
+ }
+ }
+ return outlen;
+}
+/*- End of function --------------------------------------------------------*/
+/*- End of file ------------------------------------------------------------*/
diff --git a/tinyDAV/src/codecs/g722/g722_encode.c b/tinyDAV/src/codecs/g722/g722_encode.c
new file mode 100644
index 0000000..68758eb
--- /dev/null
+++ b/tinyDAV/src/codecs/g722/g722_encode.c
@@ -0,0 +1,426 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * g722_encode.c - The ITU G.722 codec, encode part.
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2005 Steve Underwood
+ *
+ * All rights reserved.
+ *
+ * Despite my general liking of the GPL, I place my own contributions
+ * to this code in the public domain for the benefit of all mankind -
+ * even the slimy ones who might try to proprietize my work and use it
+ * to my detriment.
+ *
+ * Based on a single channel 64kbps only G.722 codec which is:
+ *
+ ***** Copyright (c) CMU 1993 *****
+ * Computer Science, Speech Group
+ * Chengxiang Lu and Alex Hauptmann
+ *
+ * $Id: g722_encode.c,v 1.14 2006/07/07 16:37:49 steveu Exp $
+ *
+ * Modifications for WebRtc, 2011/04/28, by tlegrand:
+ * -Removed usage of inttypes.h and tgmath.h
+ * -Changed to use WebRtc types
+ * -Added option to run encoder bitexact with ITU-T reference implementation
+ */
+
+#include <stdio.h>
+#include <memory.h>
+#include <stdlib.h>
+
+#include "tinydav/codecs/g722/g722_enc_dec.h"
+
+#if !defined(FALSE)
+#define FALSE 0
+#endif
+#if !defined(TRUE)
+#define TRUE (!FALSE)
+#endif
+
+static __inline int16_t saturate(int32_t amp)
+{
+ int16_t amp16;
+
+ /* Hopefully this is optimised for the common case - not clipping */
+ amp16 = (int16_t) amp;
+ if (amp == amp16)
+ return amp16;
+ if (amp > TDAV_INT16_MAX)
+ return TDAV_INT16_MAX;
+ return TDAV_INT16_MIN;
+}
+/*- End of function --------------------------------------------------------*/
+
+static void block4(g722_encode_state_t *s, int band, int d)
+{
+ int wd1;
+ int wd2;
+ int wd3;
+ int i;
+
+ /* Block 4, RECONS */
+ s->band[band].d[0] = d;
+ s->band[band].r[0] = saturate(s->band[band].s + d);
+
+ /* Block 4, PARREC */
+ s->band[band].p[0] = saturate(s->band[band].sz + d);
+
+ /* Block 4, UPPOL2 */
+ for (i = 0; i < 3; i++)
+ s->band[band].sg[i] = s->band[band].p[i] >> 15;
+ wd1 = saturate(s->band[band].a[1] << 2);
+
+ wd2 = (s->band[band].sg[0] == s->band[band].sg[1]) ? -wd1 : wd1;
+ if (wd2 > 32767)
+ wd2 = 32767;
+ wd3 = (wd2 >> 7) + ((s->band[band].sg[0] == s->band[band].sg[2]) ? 128 : -128);
+ wd3 += (s->band[band].a[2]*32512) >> 15;
+ if (wd3 > 12288)
+ wd3 = 12288;
+ else if (wd3 < -12288)
+ wd3 = -12288;
+ s->band[band].ap[2] = wd3;
+
+ /* Block 4, UPPOL1 */
+ s->band[band].sg[0] = s->band[band].p[0] >> 15;
+ s->band[band].sg[1] = s->band[band].p[1] >> 15;
+ wd1 = (s->band[band].sg[0] == s->band[band].sg[1]) ? 192 : -192;
+ wd2 = (s->band[band].a[1]*32640) >> 15;
+
+ s->band[band].ap[1] = saturate(wd1 + wd2);
+ wd3 = saturate(15360 - s->band[band].ap[2]);
+ if (s->band[band].ap[1] > wd3)
+ s->band[band].ap[1] = wd3;
+ else if (s->band[band].ap[1] < -wd3)
+ s->band[band].ap[1] = -wd3;
+
+ /* Block 4, UPZERO */
+ wd1 = (d == 0) ? 0 : 128;
+ s->band[band].sg[0] = d >> 15;
+ for (i = 1; i < 7; i++)
+ {
+ s->band[band].sg[i] = s->band[band].d[i] >> 15;
+ wd2 = (s->band[band].sg[i] == s->band[band].sg[0]) ? wd1 : -wd1;
+ wd3 = (s->band[band].b[i]*32640) >> 15;
+ s->band[band].bp[i] = saturate(wd2 + wd3);
+ }
+
+ /* Block 4, DELAYA */
+ for (i = 6; i > 0; i--)
+ {
+ s->band[band].d[i] = s->band[band].d[i - 1];
+ s->band[band].b[i] = s->band[band].bp[i];
+ }
+
+ for (i = 2; i > 0; i--)
+ {
+ s->band[band].r[i] = s->band[band].r[i - 1];
+ s->band[band].p[i] = s->band[band].p[i - 1];
+ s->band[band].a[i] = s->band[band].ap[i];
+ }
+
+ /* Block 4, FILTEP */
+ wd1 = saturate(s->band[band].r[1] + s->band[band].r[1]);
+ wd1 = (s->band[band].a[1]*wd1) >> 15;
+ wd2 = saturate(s->band[band].r[2] + s->band[band].r[2]);
+ wd2 = (s->band[band].a[2]*wd2) >> 15;
+ s->band[band].sp = saturate(wd1 + wd2);
+
+ /* Block 4, FILTEZ */
+ s->band[band].sz = 0;
+ for (i = 6; i > 0; i--)
+ {
+ wd1 = saturate(s->band[band].d[i] + s->band[band].d[i]);
+ s->band[band].sz += (s->band[band].b[i]*wd1) >> 15;
+ }
+ s->band[band].sz = saturate(s->band[band].sz);
+
+ /* Block 4, PREDIC */
+ s->band[band].s = saturate(s->band[band].sp + s->band[band].sz);
+}
+/*- End of function --------------------------------------------------------*/
+
+g722_encode_state_t *g722_encode_init(g722_encode_state_t *s, int rate, int options)
+{
+ if (s == NULL)
+ {
+ if ((s = (g722_encode_state_t *) malloc(sizeof(*s))) == NULL)
+ return NULL;
+ }
+ memset(s, 0, sizeof(*s));
+ if (rate == 48000)
+ s->bits_per_sample = 6;
+ else if (rate == 56000)
+ s->bits_per_sample = 7;
+ else
+ s->bits_per_sample = 8;
+ if ((options & G722_SAMPLE_RATE_8000))
+ s->eight_k = TRUE;
+ if ((options & G722_PACKED) && s->bits_per_sample != 8)
+ s->packed = TRUE;
+ else
+ s->packed = FALSE;
+ s->band[0].det = 32;
+ s->band[1].det = 8;
+ return s;
+}
+/*- End of function --------------------------------------------------------*/
+
+int g722_encode_release(g722_encode_state_t *s)
+{
+ free(s);
+ return 0;
+}
+/*- End of function --------------------------------------------------------*/
+
+/* WebRtc, tlegrand:
+ * Only define the following if bit-exactness with reference implementation
+ * is needed. Will only have any effect if input signal is saturated.
+ */
+//#define RUN_LIKE_REFERENCE_G722
+#ifdef RUN_LIKE_REFERENCE_G722
+int16_t limitValues (int16_t rl)
+{
+
+ int16_t yl;
+
+ yl = (rl > 16383) ? 16383 : ((rl < -16384) ? -16384 : rl);
+
+ return (yl);
+}
+#endif
+
+int g722_encode(g722_encode_state_t *s, uint8_t g722_data[],
+ const int16_t amp[], int len)
+{
+ static const int q6[32] =
+ {
+ 0, 35, 72, 110, 150, 190, 233, 276,
+ 323, 370, 422, 473, 530, 587, 650, 714,
+ 786, 858, 940, 1023, 1121, 1219, 1339, 1458,
+ 1612, 1765, 1980, 2195, 2557, 2919, 0, 0
+ };
+ static const int iln[32] =
+ {
+ 0, 63, 62, 31, 30, 29, 28, 27,
+ 26, 25, 24, 23, 22, 21, 20, 19,
+ 18, 17, 16, 15, 14, 13, 12, 11,
+ 10, 9, 8, 7, 6, 5, 4, 0
+ };
+ static const int ilp[32] =
+ {
+ 0, 61, 60, 59, 58, 57, 56, 55,
+ 54, 53, 52, 51, 50, 49, 48, 47,
+ 46, 45, 44, 43, 42, 41, 40, 39,
+ 38, 37, 36, 35, 34, 33, 32, 0
+ };
+ static const int wl[8] =
+ {
+ -60, -30, 58, 172, 334, 538, 1198, 3042
+ };
+ static const int rl42[16] =
+ {
+ 0, 7, 6, 5, 4, 3, 2, 1, 7, 6, 5, 4, 3, 2, 1, 0
+ };
+ static const int ilb[32] =
+ {
+ 2048, 2093, 2139, 2186, 2233, 2282, 2332,
+ 2383, 2435, 2489, 2543, 2599, 2656, 2714,
+ 2774, 2834, 2896, 2960, 3025, 3091, 3158,
+ 3228, 3298, 3371, 3444, 3520, 3597, 3676,
+ 3756, 3838, 3922, 4008
+ };
+ static const int qm4[16] =
+ {
+ 0, -20456, -12896, -8968,
+ -6288, -4240, -2584, -1200,
+ 20456, 12896, 8968, 6288,
+ 4240, 2584, 1200, 0
+ };
+ static const int qm2[4] =
+ {
+ -7408, -1616, 7408, 1616
+ };
+ static const int qmf_coeffs[12] =
+ {
+ 3, -11, 12, 32, -210, 951, 3876, -805, 362, -156, 53, -11,
+ };
+ static const int ihn[3] = {0, 1, 0};
+ static const int ihp[3] = {0, 3, 2};
+ static const int wh[3] = {0, -214, 798};
+ static const int rh2[4] = {2, 1, 2, 1};
+
+ int dlow;
+ int dhigh;
+ int el;
+ int wd;
+ int wd1;
+ int ril;
+ int wd2;
+ int il4;
+ int ih2;
+ int wd3;
+ int eh;
+ int mih;
+ int i;
+ int j;
+ /* Low and high band PCM from the QMF */
+ int xlow;
+ int xhigh;
+ int g722_bytes;
+ /* Even and odd tap accumulators */
+ int sumeven;
+ int sumodd;
+ int ihigh;
+ int ilow;
+ int code;
+
+ g722_bytes = 0;
+ xhigh = 0;
+ for (j = 0; j < len; )
+ {
+ if (s->itu_test_mode)
+ {
+ xlow =
+ xhigh = amp[j++] >> 1;
+ }
+ else
+ {
+ if (s->eight_k)
+ {
+ /* We shift by 1 to allow for the 15 bit input to the G.722 algorithm. */
+ xlow = amp[j++] >> 1;
+ }
+ else
+ {
+ /* Apply the transmit QMF */
+ /* Shuffle the buffer down */
+ for (i = 0; i < 22; i++)
+ s->x[i] = s->x[i + 2];
+ s->x[22] = amp[j++];
+ s->x[23] = amp[j++];
+
+ /* Discard every other QMF output */
+ sumeven = 0;
+ sumodd = 0;
+ for (i = 0; i < 12; i++)
+ {
+ sumodd += s->x[2*i]*qmf_coeffs[i];
+ sumeven += s->x[2*i + 1]*qmf_coeffs[11 - i];
+ }
+ /* We shift by 12 to allow for the QMF filters (DC gain = 4096), plus 1
+ to allow for us summing two filters, plus 1 to allow for the 15 bit
+ input to the G.722 algorithm. */
+ xlow = (sumeven + sumodd) >> 14;
+ xhigh = (sumeven - sumodd) >> 14;
+
+#ifdef RUN_LIKE_REFERENCE_G722
+ /* The following lines are only used to verify bit-exactness
+ * with reference implementation of G.722. Higher precision
+ * is achieved without limiting the values.
+ */
+ xlow = limitValues(xlow);
+ xhigh = limitValues(xhigh);
+#endif
+ }
+ }
+ /* Block 1L, SUBTRA */
+ el = saturate(xlow - s->band[0].s);
+
+ /* Block 1L, QUANTL */
+ wd = (el >= 0) ? el : -(el + 1);
+
+ for (i = 1; i < 30; i++)
+ {
+ wd1 = (q6[i]*s->band[0].det) >> 12;
+ if (wd < wd1)
+ break;
+ }
+ ilow = (el < 0) ? iln[i] : ilp[i];
+
+ /* Block 2L, INVQAL */
+ ril = ilow >> 2;
+ wd2 = qm4[ril];
+ dlow = (s->band[0].det*wd2) >> 15;
+
+ /* Block 3L, LOGSCL */
+ il4 = rl42[ril];
+ wd = (s->band[0].nb*127) >> 7;
+ s->band[0].nb = wd + wl[il4];
+ if (s->band[0].nb < 0)
+ s->band[0].nb = 0;
+ else if (s->band[0].nb > 18432)
+ s->band[0].nb = 18432;
+
+ /* Block 3L, SCALEL */
+ wd1 = (s->band[0].nb >> 6) & 31;
+ wd2 = 8 - (s->band[0].nb >> 11);
+ wd3 = (wd2 < 0) ? (ilb[wd1] << -wd2) : (ilb[wd1] >> wd2);
+ s->band[0].det = wd3 << 2;
+
+ block4(s, 0, dlow);
+
+ if (s->eight_k)
+ {
+ /* Just leave the high bits as zero */
+ code = (0xC0 | ilow) >> (8 - s->bits_per_sample);
+ }
+ else
+ {
+ /* Block 1H, SUBTRA */
+ eh = saturate(xhigh - s->band[1].s);
+
+ /* Block 1H, QUANTH */
+ wd = (eh >= 0) ? eh : -(eh + 1);
+ wd1 = (564*s->band[1].det) >> 12;
+ mih = (wd >= wd1) ? 2 : 1;
+ ihigh = (eh < 0) ? ihn[mih] : ihp[mih];
+
+ /* Block 2H, INVQAH */
+ wd2 = qm2[ihigh];
+ dhigh = (s->band[1].det*wd2) >> 15;
+
+ /* Block 3H, LOGSCH */
+ ih2 = rh2[ihigh];
+ wd = (s->band[1].nb*127) >> 7;
+ s->band[1].nb = wd + wh[ih2];
+ if (s->band[1].nb < 0)
+ s->band[1].nb = 0;
+ else if (s->band[1].nb > 22528)
+ s->band[1].nb = 22528;
+
+ /* Block 3H, SCALEH */
+ wd1 = (s->band[1].nb >> 6) & 31;
+ wd2 = 10 - (s->band[1].nb >> 11);
+ wd3 = (wd2 < 0) ? (ilb[wd1] << -wd2) : (ilb[wd1] >> wd2);
+ s->band[1].det = wd3 << 2;
+
+ block4(s, 1, dhigh);
+ code = ((ihigh << 6) | ilow) >> (8 - s->bits_per_sample);
+ }
+
+ if (s->packed)
+ {
+ /* Pack the code bits */
+ s->out_buffer |= (code << s->out_bits);
+ s->out_bits += s->bits_per_sample;
+ if (s->out_bits >= 8)
+ {
+ g722_data[g722_bytes++] = (uint8_t) (s->out_buffer & 0xFF);
+ s->out_bits -= 8;
+ s->out_buffer >>= 8;
+ }
+ }
+ else
+ {
+ g722_data[g722_bytes++] = (uint8_t) code;
+ }
+ }
+ return g722_bytes;
+}
+/*- End of function --------------------------------------------------------*/
+/*- End of file ------------------------------------------------------------*/
diff --git a/tinyDAV/src/codecs/g722/tdav_codec_g722.c b/tinyDAV/src/codecs/g722/tdav_codec_g722.c
new file mode 100644
index 0000000..749fa04
--- /dev/null
+++ b/tinyDAV/src/codecs/g722/tdav_codec_g722.c
@@ -0,0 +1,219 @@
+/*
+* Copyright (C) 2011-2015 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_g722.c
+ * @brief G.722 codec plugins.
+ */
+#include "tinydav/codecs/g722/tdav_codec_g722.h"
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+typedef struct tdav_codec_g722_s
+{
+ TMEDIA_DECLARE_CODEC_AUDIO;
+
+ g722_encode_state_t *enc_state;
+ g722_decode_state_t *dec_state;
+}
+tdav_codec_g722_t;
+
+static int tdav_codec_g722_open(tmedia_codec_t* self)
+{
+ tdav_codec_g722_t* g722 = (tdav_codec_g722_t*)self;
+
+ if (!g722){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ // Initialize the decoder
+ if (!g722->dec_state){
+ if (!(g722->dec_state = tsk_calloc(1, sizeof(g722_decode_state_t)))){
+ TSK_DEBUG_ERROR("Failed to create G.722 decoder state");
+ return -2;
+ }
+ // Create and/or reset the G.722 decoder
+ // Bitrate 64 kbps and wideband mode (2)
+ if (!(g722->dec_state = g722_decode_init(g722->dec_state, 64000, 2))){
+ TSK_DEBUG_ERROR("g722_decode_init failed");
+ return -3;
+ }
+ }
+
+ // Initialize the encoder
+ if (!g722->enc_state){
+ if (!(g722->enc_state = tsk_calloc(1, sizeof(g722_encode_state_t)))){
+ TSK_DEBUG_ERROR("Failed to create G.722 encoder state");
+ return -4;
+ }
+ // Create and/or reset the G.722 encoder
+ // Bitrate 64 kbps and wideband mode (2)
+ if (!(g722->enc_state = g722_encode_init(g722->enc_state, 64000, 2))){
+ TSK_DEBUG_ERROR("g722_encode_init failed");
+ return -5;
+ }
+ }
+
+ return 0;
+}
+
+static int tdav_codec_g722_close(tmedia_codec_t* self)
+{
+ tdav_codec_g722_t* g722 = (tdav_codec_g722_t*)self;
+
+ (void)(g722);
+
+ /* resources will be freed by the dctor() */
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_g722_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tsk_size_t out_g722_size;
+ tdav_codec_g722_t* g722 = (tdav_codec_g722_t*)self;
+
+ if (!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ out_g722_size = in_size >> 2;
+
+ if (*out_max_size < out_g722_size){
+ if (!(*out_data = tsk_realloc(*out_data, out_g722_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_g722_size;
+ }
+
+ g722_encode(g722->enc_state, (uint8_t*)*out_data, (int16_t*)in_data, (int)in_size / sizeof(int16_t));
+
+ return out_g722_size;
+}
+
+static tsk_size_t tdav_codec_g722_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tdav_codec_g722_t* g722 = (tdav_codec_g722_t*)self;
+
+ if (!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* allocate new buffer */
+ if (*out_max_size < (in_size << 2)){
+ if (!(*out_data = tsk_realloc(*out_data, in_size << 2))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = in_size << 2;
+ }
+
+ g722_decode(g722->dec_state, (int16_t*)*out_data, (uint8_t*)in_data, (int)in_size);
+
+ return (in_size << 2);
+}
+
+static tsk_bool_t tdav_codec_g722_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ return tsk_true;
+}
+
+static char* tdav_codec_g722_sdp_att_get(const tmedia_codec_t* codec, const char* att_name)
+{
+ return tsk_null;
+}
+
+//
+// g722 Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_g722_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_g722_t *g722 = self;
+ if (g722){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_g722_dtor(tsk_object_t * self)
+{
+ tdav_codec_g722_t *g722 = self;
+ if (g722){
+ /* deinit base */
+ tmedia_codec_audio_deinit(g722);
+ /* deinit self */
+ if (g722->enc_state){
+ g722_encode_release(g722->enc_state), g722->enc_state = tsk_null;
+ }
+ if (g722->dec_state){
+ g722_decode_release(g722->dec_state), g722->dec_state = tsk_null;
+ }
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_g722_def_s =
+{
+ sizeof(tdav_codec_g722_t),
+ tdav_codec_g722_ctor,
+ tdav_codec_g722_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_g722_plugin_def_s =
+{
+ &tdav_codec_g722_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_g722,
+ "G722",
+ "g722 Codec (native)",
+ TMEDIA_CODEC_FORMAT_G722,
+ tsk_false,
+ 16000,
+
+ { /* audio */
+ 1, // channels
+ 0 // ptime @deprecated
+ },
+
+ /* video */
+ { 0 },
+
+ tsk_null, // set()
+ tdav_codec_g722_open,
+ tdav_codec_g722_close,
+ tdav_codec_g722_encode,
+ tdav_codec_g722_decode,
+ tdav_codec_g722_sdp_att_match,
+ tdav_codec_g722_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_g722_plugin_def_t = &tdav_codec_g722_plugin_def_s;
diff --git a/tinyDAV/src/codecs/g729/tdav_codec_g729.c b/tinyDAV/src/codecs/g729/tdav_codec_g729.c
new file mode 100644
index 0000000..8981687
--- /dev/null
+++ b/tinyDAV/src/codecs/g729/tdav_codec_g729.c
@@ -0,0 +1,466 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_g729.c
+ * @brief G729ab codec.
+ * Source from: http://www.itu.int/rec/T-REC-G.729-199610-S!AnnB/en
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ */
+#include "tinydav/codecs/g729/tdav_codec_g729.h"
+
+#if HAVE_G729
+
+#include "g729b/dtx.h"
+#include "g729b/octet.h"
+
+#include "tsk_string.h"
+#include "tsk_params.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "../thirdparties/win32/lib/g729b/g729b.a")
+#endif
+
+int16_t bad_lsf; /* bad LSF indicator */
+
+#ifndef G729_ENABLE_VAD
+# define G729_ENABLE_VAD 0 // FIXME: speexJB not prepared for such feature
+#endif
+
+static int16_t bin2int(int16_t no_of_bits, const int16_t *bitstream);
+static void int2bin(int16_t value, int16_t no_of_bits, int16_t *bitstream);
+
+static void unpack_G729(const uint8_t bitstream[], int16_t bits[], int len);
+static void unpack_SID(const uint8_t bitstream[], int16_t bits[]);
+
+static void pack_G729(const int16_t ituBits[], uint8_t bitstream[]);
+static void pack_SID(const int16_t ituBits[], uint8_t bitstream[]);
+
+/* ============ G.729ab Plugin interface ================= */
+
+#define tdav_codec_g729ab_set tsk_null
+
+static int tdav_codec_g729ab_open(tmedia_codec_t* self)
+{
+ tdav_codec_g729ab_t* g729a = (tdav_codec_g729ab_t*)self;
+
+ // Initialize the decoder
+ bad_lsf = 0;
+ g729a->decoder.synth = (g729a->decoder.synth_buf + M);
+
+ Init_Decod_ld8a();
+ Init_Post_Filter();
+ Init_Post_Process();
+ /* for G.729B */
+ Init_Dec_cng();
+
+ // Initialize the encoder
+ Init_Pre_Process();
+ Init_Coder_ld8a();
+ Set_zero(g729a->encoder.prm, PRM_SIZE + 1);
+ /* for G.729B */
+ Init_Cod_cng();
+
+
+ return 0;
+}
+
+static int tdav_codec_g729ab_close(tmedia_codec_t* self)
+{
+ tdav_codec_g729ab_t* g729a = (tdav_codec_g729ab_t*)self;
+
+ (void)(g729a);
+
+ /* resources will be freed by the dctor() */
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_g729ab_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tsk_size_t ex_size, out_size = 0;
+ tdav_codec_g729ab_t* g729a = (tdav_codec_g729ab_t*)self;
+ int i, frame_count = (in_size / 160);
+
+
+ if(!self || !in_data || !in_size || !out_data || (in_size % 160)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ ex_size = (frame_count * 10);
+
+ // allocate new buffer if needed
+ if(*out_max_size <ex_size){
+ if(!(*out_data = tsk_realloc(*out_data, ex_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = ex_size;
+ }
+
+ for(i=0; i<frame_count; i++){
+ extern int16_t *new_speech;
+
+ if(g729a->encoder.frame == 32767){
+ g729a->encoder.frame = 256;
+ }
+ else{
+ g729a->encoder.frame++;
+ }
+
+ memcpy(new_speech, &((uint8_t*)in_data)[i*L_FRAME*sizeof(int16_t)], sizeof(int16_t)*L_FRAME);
+
+ Pre_Process(new_speech, L_FRAME);
+ Coder_ld8a(g729a->encoder.prm, g729a->encoder.frame, g729a->encoder.vad_enable);
+ prm2bits_ld8k(g729a->encoder.prm, g729a->encoder.serial);
+
+ if(g729a->encoder.serial[1] == RATE_8000){
+ pack_G729(&g729a->encoder.serial[2], &((uint8_t*)(*out_data))[out_size]);
+ out_size += 10;
+ }
+ else if(g729a->encoder.serial[1] == RATE_SID_OCTET){
+ pack_SID(&g729a->encoder.serial[2], &((uint8_t*)(*out_data))[out_size]);
+ out_size += 2;
+ }
+ else{ // RATE_0
+ //TSK_DEBUG_INFO("G729_RATE_0 - Not transmitted");
+ if (!g729a->encoder.vad_enable) {
+ // silence
+ memset(&((uint8_t*)(*out_data))[out_size], 0, 10);
+ out_size += 10;
+ }
+ }
+ }
+
+ return out_size;
+}
+
+static tsk_size_t tdav_codec_g729ab_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tsk_size_t out_size = 0;
+ int i, frame_count;
+ const uint8_t* data_start = (const uint8_t*)in_data;
+ const uint8_t* data_end;
+ tdav_codec_g729ab_t* g729a = (tdav_codec_g729ab_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data || ((in_size % 10) && (in_size % 10 != 2))){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ data_end = (data_start + in_size);
+
+ frame_count = (in_size/10) + ((in_size % 10) ? 1 : 0);
+
+ out_size = 160*frame_count;
+
+ /* allocate new buffer if needed */
+ if(*out_max_size <out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+
+ for(i=0; i<frame_count; i++){
+ memset(g729a->decoder.synth_buf, 0, M);
+ g729a->decoder.synth = g729a->decoder.synth_buf + M;
+
+ if((data_end - data_start) == 2){
+ unpack_SID(data_start, g729a->decoder.serial);
+ data_start += 2;
+ }
+ else{
+ unpack_G729(data_start, g729a->decoder.serial, 10);
+ data_start += 10;
+ }
+
+ bits2prm_ld8k(&g729a->decoder.serial[1], g729a->decoder.parm);
+
+ /* This part was modified for version V1.3 */
+ /* for speech and SID frames, the hardware detects frame erasures
+ by checking if all bits are set to zero */
+ /* for untransmitted frames, the hardware detects frame erasures
+ by testing serial[0] */
+
+ g729a->decoder.parm[0] = 0; /* No frame erasure */
+ if(g729a->decoder.serial[1] != 0) {
+ int j;
+ for (j=0; j < g729a->decoder.serial[1]; j++){
+ if (g729a->decoder.serial[j+2] == 0){
+ g729a->decoder.parm[0] = 1; /* frame erased */
+ break;
+ }
+ }
+ }
+ else if(g729a->decoder.serial[0] != SYNC_WORD){
+ g729a->decoder.parm[0] = 1;
+ }
+ if(g729a->decoder.parm[1] == 1) {
+ /* check parity and put 1 in parm[5] if parity error */
+ g729a->decoder.parm[5] = Check_Parity_Pitch(g729a->decoder.parm[4], g729a->decoder.parm[5]);
+ }
+
+ Decod_ld8a(g729a->decoder.parm, g729a->decoder.synth, g729a->decoder.Az_dec, g729a->decoder.T2, &g729a->decoder.Vad);
+ Post_Filter(g729a->decoder.synth, g729a->decoder.Az_dec, g729a->decoder.T2, g729a->decoder.Vad); /* Post-filter */
+ Post_Process(g729a->decoder.synth, L_FRAME);
+
+ memcpy(&((uint8_t*)*out_data)[160*i], g729a->decoder.synth, 160);
+ }
+
+
+ return out_size;
+}
+
+static tsk_bool_t tdav_codec_g729ab_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ tdav_codec_g729ab_t* g729a = (tdav_codec_g729ab_t*)codec;
+
+ if(tsk_striequals(att_name, "fmtp")){
+ tsk_params_L_t* params = tsk_null;
+ const char* val_str;
+ if((params = tsk_params_fromstring(att_value, ";", tsk_true))){
+ if((val_str = tsk_params_get_param_value(params, "annexb"))){
+ g729a->encoder.vad_enable &= tsk_strequals(val_str, "yes") ? 1 : 0;
+ }
+ TSK_OBJECT_SAFE_FREE(params);
+ }
+ }
+ return tsk_true;
+}
+
+static char* tdav_codec_g729ab_sdp_att_get(const tmedia_codec_t* codec, const char* att_name)
+{
+ tdav_codec_g729ab_t* g729a = (tdav_codec_g729ab_t*)codec;
+
+ if(tsk_striequals(att_name, "fmtp")){
+ if(g729a->encoder.vad_enable){
+ return tsk_strdup("annexb=yes");
+ }
+ else{
+ return tsk_strdup("annexb=no");
+ }
+ }
+ return tsk_null;
+}
+
+
+
+
+
+
+/* ============ Internal functions ================= */
+
+
+/**
+* Converts from bitstream (ITU bits) to int16_t value
+* @param no_of_bits number of bits to read
+* @param bitstream array containing bits
+* @retval decimal value of bit pattern
+*/
+static int16_t bin2int(int16_t no_of_bits, const int16_t *bitstream)
+{
+ int16_t value, i;
+ int16_t bit;
+
+ value = 0;
+ for(i = 0; i < no_of_bits; i++){
+ value <<= 1;
+ bit = *bitstream++;
+ if (bit == BIT_1){
+ value += 1;
+ }
+ }
+ return(value);
+}
+
+/*----------------------------------------------------------------------------
+ * int2bin convert integer to binary and write the bits bitstream array
+ *----------------------------------------------------------------------------
+ */
+
+/**
+* Writes int16_t value to bitstream
+* @param value decimal value to write
+* @param no_of_bits number of bits from value to write
+* @param bitstream pointer to the destination stream (ITU bits)
+*/
+static void int2bin(int16_t value, int16_t no_of_bits, int16_t *bitstream)
+{
+ int16_t *pt_bitstream;
+ int16_t i, bit;
+
+ pt_bitstream = bitstream + no_of_bits;
+
+ for (i = 0; i < no_of_bits; i++){
+ bit = value & (int16_t)0x0001; /* get lsb */
+ if (bit == 0){
+ *--pt_bitstream = BIT_0;
+ }
+ else{
+ *--pt_bitstream = BIT_1;
+ }
+ value >>= 1;
+ }
+}
+
+/**
+* UnPack RTP bitstream as unpacked ITU stream
+* @param bitstream RTP bitstream to unpack
+* @param bits ITU bitstream used as destination (0 - BIT_0, 1 - BIT_1)
+* @param len length of the RTP bitstream
+*/
+static void unpack_G729(const uint8_t bitstream[], int16_t bits[], int len)
+{
+ int16_t i;
+ *bits++ = SYNC_WORD; /* bit[0], at receiver this bits indicates BFI */
+ switch(len){
+ case 10:
+ *bits++ = SIZE_WORD;
+ break;
+ case 8: // RATE_6400
+ case 15: //RATE_11800
+ default:
+ TSK_DEBUG_ERROR("%d is an invalid lenght value", len);
+ return;
+ }
+
+ for(i=0; i<len; i++){
+ int2bin(bitstream[i], 8, &bits[i*8]);
+ }
+}
+
+/**
+* UnPack RTP bitstream containing SID frame as unpacked ITU stream
+* @param bitstream RTP bitstream to unpack
+* @param bits ITU bitstream used as destination (0 - BIT_0, 1 - BIT_1)
+*/
+static void unpack_SID(const uint8_t bitstream[], int16_t bits[])
+{
+ *bits++ = SYNC_WORD;
+ *bits++ = RATE_SID_OCTET;
+ int2bin((int16_t)bitstream[0], 8, &bits[0]);
+ int2bin((int16_t)bitstream[1], 8, &bits[8]);
+}
+
+/**
+* Pack ITU bits into RTP stream
+* @param ituBits ITU stream to pack (80 shorts)
+* @param bitstream RTP bitstream (80 bits, 5 shorts, 10 bytes)
+*/
+static void pack_G729(const int16_t ituBits[], uint8_t bitstream[])
+{
+ int16_t word16, i;
+ for(i=0; i<5; i++){
+ word16 = bin2int(16, (int16_t*)&ituBits[i*16]);
+ bitstream[i*2] = word16>>8, bitstream[(i*2)+1] = (word16 & 0xFF);
+ }
+}
+
+/**
+* Pack ITU bits containing SID frame as RTP stream
+* @param ituBits ITU stream to pack
+* @param bitstream RTP bitstream (15 bits, 1 short, 2 bytes)
+*/
+static void pack_SID(const int16_t ituBits[], uint8_t bitstream[])
+{
+ int16_t word16 = bin2int(16, ituBits);
+ bitstream[0] = word16>>8, bitstream[1] = (word16 & 0xFF);
+}
+
+
+//
+// g729ab Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_g729ab_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_g729ab_t *g729a = self;
+ if(g729a){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ g729a->encoder.vad_enable = G729_ENABLE_VAD; // AnnexB
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_g729ab_dtor(tsk_object_t * self)
+{
+ tdav_codec_g729ab_t *g729a = self;
+ if(g729a){
+ /* deinit base */
+ tmedia_codec_audio_deinit(g729a);
+ /* deinit self */
+
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_g729ab_def_s =
+{
+ sizeof(tdav_codec_g729ab_t),
+ tdav_codec_g729ab_ctor,
+ tdav_codec_g729ab_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_g729ab_plugin_def_s =
+{
+ &tdav_codec_g729ab_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_g729ab,
+ "g729",
+ "g729ab Codec (libg729)",
+ TMEDIA_CODEC_FORMAT_G729,
+ tsk_false,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 0 // ptime @deprecated
+ },
+
+ /* video */
+ {0},
+
+ tdav_codec_g729ab_set,
+ tdav_codec_g729ab_open,
+ tdav_codec_g729ab_close,
+ tdav_codec_g729ab_encode,
+ tdav_codec_g729ab_decode,
+ tdav_codec_g729ab_sdp_att_match,
+ tdav_codec_g729ab_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_g729ab_plugin_def_t = &tdav_codec_g729ab_plugin_def_s;
+
+#endif /* HAVE_G729 */
diff --git a/tinyDAV/src/codecs/gsm/tdav_codec_gsm.c b/tinyDAV/src/codecs/gsm/tdav_codec_gsm.c
new file mode 100644
index 0000000..8b5f1bc
--- /dev/null
+++ b/tinyDAV/src/codecs/gsm/tdav_codec_gsm.c
@@ -0,0 +1,209 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_gsm.c
+ * @brief GSM Full Rate Codec (Based on libgsm)
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/gsm/tdav_codec_gsm.h"
+
+#if HAVE_LIBGSM
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_GSM_FRAME_SIZE 33
+
+/* ============ GSM Plugin interface ================= */
+
+#define tdav_codec_gsm_sdp_att_get tsk_null
+
+int tdav_codec_gsm_open(tmedia_codec_t* self)
+{
+ tdav_codec_gsm_t* gsm = (tdav_codec_gsm_t*)self;
+
+ if(!gsm->encoder && !(gsm->encoder = gsm_create())){
+ TSK_DEBUG_ERROR("Failed to create GSM encoder");
+ return -2;
+ }
+ if(!gsm->decoder && !(gsm->decoder = gsm_create())){
+ TSK_DEBUG_ERROR("Failed to create GSM decoder");
+ return -3;
+ }
+
+ return 0;
+}
+
+int tdav_codec_gsm_close(tmedia_codec_t* self)
+{
+ tdav_codec_gsm_t* gsm = (tdav_codec_gsm_t*)self;
+
+ if(gsm->encoder){
+ gsm_destroy(gsm->encoder);
+ gsm->encoder = tsk_null;
+ }
+ if(gsm->decoder){
+ gsm_destroy(gsm->decoder);
+ gsm->decoder = tsk_null;
+ }
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_gsm_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tsk_size_t out_size;
+ tdav_codec_gsm_t* gsm = (tdav_codec_gsm_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ out_size = ((in_size / (TMEDIA_CODEC_PCM_FRAME_SIZE_AUDIO_ENCODING(self) * sizeof(short))) * TDAV_GSM_FRAME_SIZE);
+
+ /* allocate new buffer if needed */
+ if(*out_max_size <out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+
+ gsm_encode(gsm->encoder, (gsm_signal*)in_data, (gsm_byte*)*out_data);
+
+ return out_size;
+}
+
+tsk_size_t tdav_codec_gsm_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tsk_size_t out_size;
+ int ret;
+ tdav_codec_gsm_t* gsm = (tdav_codec_gsm_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data || (in_size % TDAV_GSM_FRAME_SIZE)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ out_size = (in_size / TDAV_GSM_FRAME_SIZE) * (TMEDIA_CODEC_PCM_FRAME_SIZE_AUDIO_DECODING(self) * sizeof(short));
+
+ /* allocate new buffer if needed */
+ if(*out_max_size <out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+
+ ret = gsm_decode(gsm->decoder, (gsm_byte*)in_data, (gsm_signal*)*out_data);
+
+ return out_size;
+}
+
+tsk_bool_t tdav_codec_gsm_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{ /* always match */
+ return tsk_true;
+}
+
+
+//
+// GSM Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_gsm_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_gsm_t *gsm = self;
+ if(gsm){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_gsm_dtor(tsk_object_t * self)
+{
+ tdav_codec_gsm_t *gsm = self;
+ if(gsm){
+ /* deinit base */
+ tmedia_codec_audio_deinit(gsm);
+ /* deinit self */
+ if(gsm->encoder){
+ gsm_destroy(gsm->encoder);
+ }
+ if(gsm->decoder){
+ gsm_destroy(gsm->decoder);
+ }
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_gsm_def_s =
+{
+ sizeof(tdav_codec_gsm_t),
+ tdav_codec_gsm_ctor,
+ tdav_codec_gsm_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_gsm_plugin_def_s =
+{
+ &tdav_codec_gsm_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_gsm,
+ "GSM",
+ "GSM Full Rate (libgsm)",
+ TMEDIA_CODEC_FORMAT_GSM,
+ tsk_false,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 0 // ptime @deprecated
+ },
+
+ /* video */
+ {0},
+
+ tsk_null, // set()
+ tdav_codec_gsm_open,
+ tdav_codec_gsm_close,
+ tdav_codec_gsm_encode,
+ tdav_codec_gsm_decode,
+ tdav_codec_gsm_sdp_att_match,
+ tdav_codec_gsm_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_gsm_plugin_def_t = &tdav_codec_gsm_plugin_def_s;
+
+
+#endif /* HAVE_LIBGSM */
diff --git a/tinyDAV/src/codecs/h261/tdav_codec_h261.c b/tinyDAV/src/codecs/h261/tdav_codec_h261.c
new file mode 100644
index 0000000..27aaab7
--- /dev/null
+++ b/tinyDAV/src/codecs/h261/tdav_codec_h261.c
@@ -0,0 +1,536 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_h261.c
+ * @brief H.261 codec plugin.
+ * RTP payloader follows RFC 4587
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/h261/tdav_codec_h261.h"
+
+#if HAVE_FFMPEG
+
+#include "tinydav/video/tdav_converter_video.h"
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tnet_endianness.h"
+
+#include "tsk_string.h"
+#include "tsk_time.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define RTP_PAYLOAD_SIZE 700
+#define H261_HEADER_SIZE 4
+
+static void *run(void* self);
+static void tdav_codec_h261_rtp_callback(tdav_codec_h261_t *self, const void *data, tsk_size_t size, tsk_bool_t marker);
+static void tdav_codec_h261_encap(const tdav_codec_h261_t* h261, const uint8_t* pdata, tsk_size_t size);
+
+/* ============ H.261 Plugin interface ================= */
+
+//
+// H.261 object definition
+//
+int tdav_codec_h261_open(tmedia_codec_t* self)
+{
+ int ret;
+ int size;
+
+ tdav_codec_h261_t* h261 = (tdav_codec_h261_t*)self;
+
+ if(!h261){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is not opened */
+
+ //
+ // Encoder
+ //
+ if(!(h261->encoder.codec = avcodec_find_encoder(CODEC_ID_H261))){
+ TSK_DEBUG_ERROR("Failed to find H.261 encoder");
+ return -2;
+ }
+ h261->encoder.context = avcodec_alloc_context();
+ avcodec_get_context_defaults(h261->encoder.context);
+
+ h261->encoder.context->pix_fmt = PIX_FMT_YUV420P;
+ h261->encoder.context->time_base.num = 1;
+ h261->encoder.context->time_base.den = TMEDIA_CODEC_VIDEO(h261)->out.fps;
+ h261->encoder.context->width = TMEDIA_CODEC_VIDEO(h261)->out.width;
+ h261->encoder.context->height = TMEDIA_CODEC_VIDEO(h261)->out.height;
+
+ /*h261->encoder.context->mb_qmin =*/ h261->encoder.context->qmin = 4;
+ /*h261->encoder.context->mb_qmax =*/ h261->encoder.context->qmax = 31;
+ h261->encoder.context->mb_decision = FF_MB_DECISION_SIMPLE;
+
+ h261->encoder.context->thread_count = 1;
+ h261->encoder.context->rtp_payload_size = RTP_PAYLOAD_SIZE;
+ h261->encoder.context->opaque = tsk_null;
+ h261->encoder.context->bit_rate = (float) (500000) * 0.80f;
+ h261->encoder.context->bit_rate_tolerance = (int) (500000 * 0.20f);
+ h261->encoder.context->gop_size = TMEDIA_CODEC_VIDEO(h261)->out.fps*4; /* each 4 seconds */
+
+ // Picture (YUV 420)
+ if(!(h261->encoder.picture = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create encoder picture");
+ return -2;
+ }
+ avcodec_get_frame_defaults(h261->encoder.picture);
+ //if((ret = avpicture_alloc((AVPicture*)h261->encoder.picture, PIX_FMT_YUV420P, h261->encoder.context->width, h261->encoder.context->height))){
+ // TSK_DEBUG_ERROR("Failed to allocate encoder picture");
+ // return ret;
+ //}
+
+ size = avpicture_get_size(PIX_FMT_YUV420P, h261->encoder.context->width, h261->encoder.context->height);
+ if(!(h261->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocate encoder buffer");
+ return -2;
+ }
+
+ // Open encoder
+ if((ret = avcodec_open(h261->encoder.context, h261->encoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open H.261 encoder");
+ return ret;
+ }
+
+ //
+ // Decoder
+ //
+ if(!(h261->decoder.codec = avcodec_find_decoder(CODEC_ID_H261))){
+ TSK_DEBUG_ERROR("Failed to find H.261 decoder");
+ }
+ h261->decoder.context = avcodec_alloc_context();
+ avcodec_get_context_defaults(h261->decoder.context);
+
+ h261->decoder.context->pix_fmt = PIX_FMT_YUV420P;
+ h261->decoder.context->width = TMEDIA_CODEC_VIDEO(h261)->in.width;
+ h261->decoder.context->height = TMEDIA_CODEC_VIDEO(h261)->in.height;
+
+ // Picture (YUV 420)
+ if(!(h261->decoder.picture = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create decoder picture");
+ return -2;
+ }
+ avcodec_get_frame_defaults(h261->decoder.picture);
+
+ size = avpicture_get_size(PIX_FMT_YUV420P, h261->decoder.context->width, h261->decoder.context->height);
+ if(!(h261->decoder.accumulator = tsk_calloc((size + FF_INPUT_BUFFER_PADDING_SIZE), sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocate decoder buffer");
+ return -2;
+ }
+
+ // Open decoder
+ if((ret = avcodec_open(h261->decoder.context, h261->decoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open H.261 decoder");
+ return ret;
+ }
+
+ return 0;
+}
+
+int tdav_codec_h261_close(tmedia_codec_t* self)
+{
+ tdav_codec_h261_t* h261 = (tdav_codec_h261_t*)self;
+
+ if(!h261){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is opened */
+
+ //
+ // Encoder
+ //
+ if(h261->encoder.context){
+ avcodec_close(h261->encoder.context);
+ av_free(h261->encoder.context);
+ h261->encoder.context = tsk_null;
+ }
+ if(h261->encoder.picture){
+ av_free(h261->encoder.picture);
+ }
+ if(h261->encoder.buffer){
+ TSK_FREE(h261->encoder.buffer);
+ }
+
+ //
+ // Decoder
+ //
+ if(h261->decoder.context){
+ avcodec_close(h261->decoder.context);
+ av_free(h261->decoder.context);
+ h261->decoder.context = tsk_null;
+ }
+ if(h261->decoder.picture){
+ av_free(h261->decoder.picture);
+ h261->decoder.picture = tsk_null;
+ }
+ if(h261->decoder.accumulator){
+ TSK_FREE(h261->decoder.accumulator);
+ h261->decoder.accumulator_pos = 0;
+ }
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_h261_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ int ret;
+ int size;
+
+ tdav_codec_h261_t* h261 = (tdav_codec_h261_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ // delete old buffer
+ if(*out_data){
+ TSK_FREE(*out_data);
+ }
+
+ // wrap yuv420 buffer
+ size = avpicture_fill((AVPicture *)h261->encoder.picture, (uint8_t*)in_data, PIX_FMT_YUV420P, h261->encoder.context->width, h261->encoder.context->height);
+ if(size != in_size){
+ /* guard */
+ TSK_DEBUG_ERROR("Invalid size");
+ return 0;
+ }
+
+ // Encode data
+ h261->encoder.picture->pts = AV_NOPTS_VALUE;
+ //h261->encoder.picture->pict_type = FF_I_TYPE;
+ ret = avcodec_encode_video(h261->encoder.context, h261->encoder.buffer, size, h261->encoder.picture);
+ if(ret > 0){
+ tdav_codec_h261_encap(h261, h261->encoder.buffer, (tsk_size_t)ret);
+ }
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_h261_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ uint8_t sbit, ebit;
+ const uint8_t* pdata = in_data;
+ const uint8_t* pay_ptr;
+ tsk_size_t pay_size;
+ tsk_size_t xsize, retsize = 0;
+ int got_picture_ptr;
+ int ret;
+
+ tdav_codec_h261_t* h261 = (tdav_codec_h261_t*)self;
+ const trtp_rtp_header_t* rtp_hdr = proto_hdr;
+
+ if(!self || !in_data || !in_size || !out_data || !h261->decoder.context){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* RFC 4587
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |SBIT |EBIT |I|V| GOBN | MBAP | QUANT | HMVD | VMVD |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ sbit = *pdata >> 5;
+ ebit = (*pdata >> 2) & 0x07;
+
+ /* Check size */
+ if(in_size < H261_HEADER_SIZE){
+ TSK_DEBUG_ERROR("Too short");
+ return 0;
+ }
+
+ pay_ptr = (pdata + H261_HEADER_SIZE);
+ pay_size = (in_size - H261_HEADER_SIZE);
+ xsize = avpicture_get_size(h261->decoder.context->pix_fmt, h261->decoder.context->width, h261->decoder.context->height);
+
+ /* Packet lost? */
+ if(h261->decoder.last_seq != (rtp_hdr->seq_num - 1) && h261->decoder.last_seq){
+ TSK_DEBUG_INFO("Packet lost");
+ }
+ h261->decoder.last_seq = rtp_hdr->seq_num;
+
+ if((int)(h261->decoder.accumulator_pos + pay_size) <= xsize){
+
+ if((h261->decoder.ebit + sbit) == 8){ /* Perfect one Byte to clean up */
+ if(h261->decoder.accumulator_pos){
+ ((uint8_t*)h261->decoder.accumulator)[h261->decoder.accumulator_pos-1] =
+ (((uint8_t*)h261->decoder.accumulator)[h261->decoder.accumulator_pos-1] & (0xFF << h261->decoder.ebit)) |
+ (*pay_ptr << sbit);
+ }
+ pay_ptr++, pay_size--;
+ }
+ h261->decoder.ebit = ebit;
+
+ memcpy(&((uint8_t*)h261->decoder.accumulator)[h261->decoder.accumulator_pos], pay_ptr, pay_size);
+ h261->decoder.accumulator_pos += pay_size;
+ }
+ else{
+ TSK_DEBUG_WARN("Buffer overflow");
+ h261->decoder.accumulator_pos = 0;
+ return 0;
+ }
+
+ if(rtp_hdr->marker){
+ AVPacket packet;
+ /* allocate destination buffer */
+ if(*out_max_size <xsize){
+ if(!(*out_data = tsk_realloc(*out_data, xsize))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ h261->decoder.accumulator_pos = 0;
+ return 0;
+ }
+ *out_max_size = xsize;
+ }
+
+ /* decode the picture */
+ av_init_packet(&packet);
+ packet.size = (int)h261->decoder.accumulator_pos;
+ packet.data = h261->decoder.accumulator;
+ ret = avcodec_decode_video2(h261->decoder.context, h261->decoder.picture, &got_picture_ptr, &packet);
+
+ if(ret <0 || !got_picture_ptr){
+ TSK_DEBUG_WARN("Failed to decode the buffer");
+ }
+ else{
+ retsize = xsize;
+ TMEDIA_CODEC_VIDEO(h261)->in.width = h261->decoder.context->width;
+ TMEDIA_CODEC_VIDEO(h261)->in.height = h261->decoder.context->height;
+ /* copy picture into a linear buffer */
+ avpicture_layout((AVPicture *)h261->decoder.picture, h261->decoder.context->pix_fmt, (int)h261->decoder.context->width, (int)h261->decoder.context->height,
+ *out_data, (int)retsize);
+ }
+ /* in all cases: reset accumulator */
+ h261->decoder.accumulator_pos = 0;
+ }
+
+ return retsize;
+}
+
+tsk_bool_t tdav_codec_h261_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ int ret;
+ unsigned maxbr, fps, width, height;
+ tmedia_codec_video_t* h261 = (tmedia_codec_video_t*)codec;
+
+ if(tsk_striequals(att_value, "fmtp")){
+ if(!(ret = tmedia_codec_parse_fmtp(att_value, &maxbr, &fps, &width, &height))){
+ h261->in.max_br = h261->out.max_br = maxbr * 1000;
+ h261->in.fps = h261->out.fps = fps;
+ h261->in.width = h261->out.width = width;
+ h261->in.height = h261->out.height = height;
+ return tsk_true;
+ }
+ else{
+ TSK_DEBUG_WARN("Failed to match fmtp [%s]", att_value);
+ }
+ }
+ return tsk_false;
+}
+
+char* tdav_codec_h261_sdp_att_get(const tmedia_codec_t* self, const char* att_name)
+{
+#if 0
+ return tsk_strdup("CIF=2/MaxBR=3840;QCIF=2/MaxBR=1920");
+#else
+ return tsk_strdup("QCIF=2");
+#endif
+}
+
+/* constructor */
+static tsk_object_t* tdav_codec_h261_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h261_t *h261 = self;
+ if(h261){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h261_dtor(tsk_object_t * self)
+{
+ tdav_codec_h261_t *h261 = self;
+ if(h261){
+ /* deinit base */
+ tmedia_codec_video_deinit(h261); // will call close()
+ /* deinit self */
+ TSK_FREE(h261->rtp.ptr);
+ h261->rtp.size = 0;
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h261_def_s =
+{
+ sizeof(tdav_codec_h261_t),
+ tdav_codec_h261_ctor,
+ tdav_codec_h261_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h261_plugin_def_s =
+{
+ &tdav_codec_h261_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_h261,
+ "H261",
+ "H261 codec (FFmpeg)",
+ TMEDIA_CODEC_FORMAT_H261,
+ tsk_false,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video */
+ {176, 144, 15},
+
+ tsk_null, // set()
+ tdav_codec_h261_open,
+ tdav_codec_h261_close,
+ tdav_codec_h261_encode,
+ tdav_codec_h261_decode,
+ tdav_codec_h261_sdp_att_match,
+ tdav_codec_h261_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_h261_plugin_def_t = &tdav_codec_h261_plugin_def_s;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/* ============ Callbacks ================= */
+
+static void tdav_codec_h261_encap(const tdav_codec_h261_t* h261, const uint8_t* pdata, tsk_size_t size)
+{
+ uint32_t i, last_index = 0;
+
+ if(size < RTP_PAYLOAD_SIZE){
+ goto last;
+ }
+
+ for(i = 4; i<(size - 4); i++){
+ if(pdata[i] == 0x00 && pdata[i+1] == 0x00 && pdata[i+2]>=0x80){ /* PSC or (GBSC) found */
+ if((i - last_index) >= RTP_PAYLOAD_SIZE){
+ tdav_codec_h261_rtp_callback((tdav_codec_h261_t*)h261, pdata+last_index,
+ (i - last_index), (last_index == size));
+ }
+ last_index = i;
+ }
+ }
+last:
+ if(last_index < size - 3/*PSC/GBSC size*/){
+ tdav_codec_h261_rtp_callback((tdav_codec_h261_t*)h261, pdata + last_index,
+ (size - last_index), tsk_true);
+ }
+}
+
+//static void *run(void* self)
+//{
+// uint32_t i, last_index;
+// tsk_list_item_t *curr;
+//
+// const uint8_t* pdata;
+// tsk_size_t size;
+//
+// const tdav_codec_h261_t* h261 = ((tdav_runnable_video_t*)self)->userdata;
+//
+// TSK_DEBUG_INFO("H261 thread === START");
+//
+// TSK_RUNNABLE_RUN_BEGIN(self);
+//
+// if((curr = TSK_RUNNABLE_POP_FIRST(self))){
+// /* 4 is sizeof(uint32_t) */
+// pdata = ((const tsk_buffer_t*)curr->data)->data;
+// size = ((const tsk_buffer_t*)curr->data)->size;
+// last_index = 0;
+//
+// if(size < RTP_PAYLOAD_SIZE){
+// goto last;
+// }
+//
+// for(i = 4; i<(size - 4); i++){
+// if(pdata[i] == 0x00 && pdata[i+1] == 0x00 && pdata[i+2]>=0x80){ /* PSC or (GBSC) found */
+// if((i - last_index) >= RTP_PAYLOAD_SIZE){
+// tdav_codec_h261_rtp_callback((tdav_codec_h261_t*)h261, pdata+last_index,
+// (i - last_index), (last_index == size));
+// }
+// last_index = i;
+// }
+// }
+//last:
+// if(last_index < size - 3/*PSC/GBSC size*/){
+// tdav_codec_h261_rtp_callback((tdav_codec_h261_t*)h261, pdata + last_index,
+// (size - last_index), tsk_true);
+// }
+//
+// tsk_object_unref(curr);
+// }
+//
+// TSK_RUNNABLE_RUN_END(self);
+//
+// TSK_DEBUG_INFO("H261 thread === STOP");
+//
+// return tsk_null;
+//}
+
+static void tdav_codec_h261_rtp_callback(tdav_codec_h261_t *self, const void *data, tsk_size_t size, tsk_bool_t marker)
+{
+
+}
+
+tsk_bool_t tdav_codec_ffmpeg_h261_is_supported()
+{
+ return /*(avcodec_find_encoder(CODEC_ID_H261) && avcodec_find_decoder(CODEC_ID_H261))*/tsk_false /* @deprecated */;
+}
+
+
+#endif /* HAVE_FFMPEG */ \ No newline at end of file
diff --git a/tinyDAV/src/codecs/h263/tdav_codec_h263.c b/tinyDAV/src/codecs/h263/tdav_codec_h263.c
new file mode 100644
index 0000000..ed5d77f
--- /dev/null
+++ b/tinyDAV/src/codecs/h263/tdav_codec_h263.c
@@ -0,0 +1,1373 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_h263.c
+ * @brief H.263-1996 and H.263-1998 codec plugins.
+ * RTP payloader follows RFC 4629 for H263+ and RFC 2190 for H263.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/h263/tdav_codec_h263.h"
+
+#if HAVE_FFMPEG
+
+#include "tinydav/video/tdav_converter_video.h"
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tnet_endianness.h"
+
+#include "tinymedia/tmedia_params.h"
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tsk_string.h"
+#include "tsk_time.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <libavcodec/avcodec.h>
+
+#define TDAV_H263_GOP_SIZE_IN_SECONDS 25
+#define RTP_PAYLOAD_SIZE 750
+
+#define H263P_HEADER_SIZE 2
+#define H263_HEADER_MODE_A_SIZE 4
+#define H263_HEADER_MODE_B_SIZE 8
+#define H263_HEADER_MODE_C_SIZE 12
+
+#define tdav_codec_h263p_set tdav_codec_h263_set
+#define tdav_codec_h263p_open tdav_codec_h263_open
+#define tdav_codec_h263p_close tdav_codec_h263_close
+#define tdav_codec_h263p_encode tdav_codec_h263_encode
+#define tdav_codec_h263p_sdp_att_match tdav_codec_h263_sdp_att_match
+#define tdav_codec_h263p_sdp_att_get tdav_codec_h263_sdp_att_get
+
+#define tdav_codec_h263pp_set tdav_codec_h263_set
+#define tdav_codec_h263pp_open tdav_codec_h263_open
+#define tdav_codec_h263pp_close tdav_codec_h263_close
+#define tdav_codec_h263pp_encode tdav_codec_h263_encode
+#define tdav_codec_h263pp_decode tdav_codec_h263_decode
+#define tdav_codec_h263pp_sdp_att_match tdav_codec_h263_sdp_att_match
+#define tdav_codec_h263pp_sdp_att_get tdav_codec_h263_sdp_att_get
+
+#define TDAV_CODEC_H263(self) ((tdav_codec_h263_t*)(self))
+
+typedef enum tdav_codec_h263_type_e
+{
+ tdav_codec_h263_1996,
+ tdav_codec_h263_1998,
+ tdav_codec_h263_2000,
+}
+tdav_codec_h263_type_t;
+
+/** H.263-1996 codec */
+typedef struct tdav_codec_h263_s
+{
+ TMEDIA_DECLARE_CODEC_VIDEO;
+
+ tdav_codec_h263_type_t type;
+
+ struct{
+ uint8_t* ptr;
+ tsk_size_t size;
+ } rtp;
+
+ // Encoder
+ struct{
+ AVCodec* codec;
+ AVCodecContext* context;
+ AVFrame* picture;
+ void* buffer;
+ tsk_bool_t force_idr;
+ int32_t quality; // [1-31]
+ int32_t max_bw_kpbs;
+ } encoder;
+
+ // decoder
+ struct{
+ AVCodec* codec;
+ AVCodecContext* context;
+ AVFrame* picture;
+
+ void* accumulator;
+ uint8_t ebit;
+ tsk_size_t accumulator_pos;
+ uint16_t last_seq;
+ } decoder;
+}
+tdav_codec_h263_t;
+
+#define TDAV_DECLARE_CODEC_H263 tdav_codec_h263_t __codec_h263__
+
+static int tdav_codec_h263_init(tdav_codec_h263_t* self, tdav_codec_h263_type_t type, enum CodecID encoder, enum CodecID decoder);
+static int tdav_codec_h263_deinit(tdav_codec_h263_t* self);
+static int tdav_codec_h263_open_encoder(tdav_codec_h263_t* self);
+static int tdav_codec_h263_open_decoder(tdav_codec_h263_t* self);
+static int tdav_codec_h263_close_encoder(tdav_codec_h263_t* self);
+static int tdav_codec_h263_close_decoder(tdav_codec_h263_t* self);
+
+/** H.263-1998 codec */
+typedef struct tdav_codec_h263p_s
+{
+ TDAV_DECLARE_CODEC_H263;
+}
+tdav_codec_h263p_t;
+
+/** H.263-2000 codec */
+typedef struct tdav_codec_h263pp_s
+{
+ TDAV_DECLARE_CODEC_H263;
+}
+tdav_codec_h263pp_t;
+
+
+static void tdav_codec_h263_rtp_callback(tdav_codec_h263_t *self, const void *data, tsk_size_t size, tsk_bool_t marker);
+static void tdav_codec_h263p_rtp_callback(tdav_codec_h263_t *self, const void *data, tsk_size_t size, tsk_bool_t frag, tsk_bool_t marker);
+
+static void tdav_codec_h263_encap(const tdav_codec_h263_t* h263, const uint8_t* pdata, tsk_size_t size);
+
+
+/* ============ Common To all H263 codecs ================= */
+
+static int tdav_codec_h263_set(tmedia_codec_t* self, const tmedia_param_t* param)
+{
+ tdav_codec_h263_t* h263 = (tdav_codec_h263_t*)self;
+ if(!self->opened){
+ TSK_DEBUG_ERROR("Codec not opened");
+ return -1;
+ }
+ if(param->value_type == tmedia_pvt_int32){
+ if(tsk_striequals(param->key, "action")){
+ tmedia_codec_action_t action = (tmedia_codec_action_t)TSK_TO_INT32((uint8_t*)param->value);
+ switch(action){
+ case tmedia_codec_action_encode_idr:
+ {
+ h263->encoder.force_idr = tsk_true;
+ break;
+ }
+ case tmedia_codec_action_bw_down:
+ {
+ h263->encoder.quality = TSK_CLAMP(1, (h263->encoder.quality + 1), 31);
+ h263->encoder.context->global_quality = FF_QP2LAMBDA * h263->encoder.quality;
+ break;
+ }
+ case tmedia_codec_action_bw_up:
+ {
+ h263->encoder.quality = TSK_CLAMP(1, (h263->encoder.quality - 1), 31);
+ h263->encoder.context->global_quality = FF_QP2LAMBDA * h263->encoder.quality;
+ break;
+ }
+ }
+ return 0;
+ }
+ }
+ return -1;
+}
+
+int tdav_codec_h263_init(tdav_codec_h263_t* self, tdav_codec_h263_type_t type, enum CodecID encoder, enum CodecID decoder)
+{
+ int ret = 0;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ self->type = type;
+ self->encoder.quality = 1;
+
+ if(!(self->encoder.codec = avcodec_find_encoder(encoder))){
+ TSK_DEBUG_ERROR("Failed to find [%d]encoder", encoder);
+ ret = -2;
+ }
+
+ if(!(self->decoder.codec = avcodec_find_decoder(decoder))){
+ TSK_DEBUG_ERROR("Failed to find [%d] decoder", decoder);
+ ret = -3;
+ }
+
+ self->encoder.max_bw_kpbs = tmedia_defaults_get_bandwidth_video_upload_max();
+
+ /* allocations MUST be done by open() */
+ return ret;
+}
+
+int tdav_codec_h263_deinit(tdav_codec_h263_t* self)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ self->encoder.codec = tsk_null;
+ self->decoder.codec = tsk_null;
+
+ // FFMpeg resources are destroyed by close()
+
+
+
+ TSK_FREE(self->rtp.ptr);
+ self->rtp.size = 0;
+
+ return 0;
+}
+
+
+
+/* ============ H.263-1996 Plugin interface ================= */
+
+//
+// H.263-1996 object definition
+//
+static int tdav_codec_h263_open(tmedia_codec_t* self)
+{
+ int ret;
+
+ tdav_codec_h263_t* h263 = (tdav_codec_h263_t*)self;
+
+ if(!h263){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is not opened */
+
+ // Encoder
+ if((ret = tdav_codec_h263_open_encoder(h263))){
+ return ret;
+ }
+
+ // Decoder
+ if((ret = tdav_codec_h263_open_decoder(h263))){
+ return ret;
+ }
+
+ return ret;
+}
+
+static int tdav_codec_h263_close(tmedia_codec_t* self)
+{
+ tdav_codec_h263_t* h263 = (tdav_codec_h263_t*)self;
+ int ret;
+
+ if(!h263){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is opened */
+
+ // Encoder
+ ret = tdav_codec_h263_close_encoder(h263);
+ // Decoder
+ ret = tdav_codec_h263_close_decoder(h263);
+
+ return ret;
+}
+
+static tsk_size_t tdav_codec_h263_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ int ret;
+ int size;
+
+ tdav_codec_h263_t* h263 = (tdav_codec_h263_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ // wrap yuv420 buffer
+ size = avpicture_fill((AVPicture *)h263->encoder.picture, (uint8_t*)in_data, PIX_FMT_YUV420P, h263->encoder.context->width, h263->encoder.context->height);
+ if(size != in_size){
+ /* guard */
+ TSK_DEBUG_ERROR("Invalid size");
+ return 0;
+ }
+#if LIBAVCODEC_VERSION_MAJOR <= 53
+ h263->encoder.picture->pict_type = h263->encoder.force_idr ? FF_I_TYPE : 0;
+#else
+ h263->encoder.picture->pict_type = h263->encoder.force_idr ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_NONE;
+#endif
+ h263->encoder.picture->pts = AV_NOPTS_VALUE;
+ h263->encoder.picture->quality = h263->encoder.context->global_quality;
+ ret = avcodec_encode_video(h263->encoder.context, h263->encoder.buffer, size, h263->encoder.picture);
+ if(ret > 0){
+ tdav_codec_h263_encap(h263, h263->encoder.buffer, (tsk_size_t)ret);
+ }
+ h263->encoder.force_idr = tsk_false;
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_h263_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ uint8_t F, P, sbit, ebit;
+ const uint8_t* pdata = in_data;
+ const uint8_t* pay_ptr;
+ tsk_size_t pay_size;
+ tsk_size_t hdr_size;
+ tsk_size_t xsize, retsize = 0;
+ int got_picture_ptr;
+ int ret;
+
+ tdav_codec_h263_t* h263 = (tdav_codec_h263_t*)self;
+ const trtp_rtp_header_t* rtp_hdr = proto_hdr;
+ tsk_bool_t is_idr = tsk_false;
+
+ if(!self || !in_data || !in_size || !out_data || !h263->decoder.context){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* RFC 2190
+ get F and P bits, used to determine the header Mode (A, B or C)
+ F: 1 bit
+ The flag bit indicates the mode of the payload header. F=0, mode A;
+ F=1, mode B or mode C depending on P bit defined below.
+ P: 1 bit
+ Optional PB-frames mode as defined by the H.263 [4]. "0" implies
+ normal I or P frame, "1" PB-frames. When F=1, P also indicates modes:
+ mode B if P=0, mode C if P=1.
+
+ I: 1 bit.
+ Picture coding type, bit 9 in PTYPE defined by H.263[4], "0" is
+ intra-coded, "1" is inter-coded.
+ */
+ F = *pdata >> 7;
+ P = (*pdata >> 6) & 0x01;
+
+ /* SBIT and EBIT */
+ sbit = (*pdata >> 3) & 0x0F;
+ ebit = (*pdata & 0x07);
+
+ if(F == 0){
+ /* MODE A
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |F|P|SBIT |EBIT | SRC |I|U|S|A|R |DBQ| TRB | TR |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ hdr_size = H263_HEADER_MODE_A_SIZE;
+ is_idr = (in_size >= 2) && !(pdata[1] & 0x10) /* I==1 */;
+ }
+ else if(P == 0){ // F=1 and P=0
+ /* MODE B
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |F|P|SBIT |EBIT | SRC | QUANT | GOBN | MBA |R |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |I|U|S|A| HMV1 | VMV1 | HMV2 | VMV2 |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ hdr_size = H263_HEADER_MODE_B_SIZE;
+ is_idr = (in_size >= 5) && !(pdata[4] & 0x80) /* I==1 */;
+ }
+ else{ // F=1 and P=1
+ /* MODE C
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |F|P|SBIT |EBIT | SRC | QUANT | GOBN | MBA |R |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |I|U|S|A| HMV1 | VMV1 | HMV2 | VMV2 |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | RR |DBQ| TRB | TR |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ hdr_size = H263_HEADER_MODE_C_SIZE;
+ is_idr = (in_size >= 5) && !(pdata[4] & 0x80) /* I==1 */;
+ }
+
+ /* Check size */
+ if(in_size < hdr_size){
+ TSK_DEBUG_ERROR("Too short");
+ return 0;
+ }
+
+ pay_ptr = (pdata + hdr_size);
+ pay_size = (in_size - hdr_size);
+ xsize = avpicture_get_size(h263->decoder.context->pix_fmt, h263->decoder.context->width, h263->decoder.context->height);
+
+ /* Packet lost? */
+ if(h263->decoder.last_seq != (rtp_hdr->seq_num - 1) && h263->decoder.last_seq){
+ if(h263->decoder.last_seq == rtp_hdr->seq_num){
+ // Could happen on some stupid emulators
+ //TSK_DEBUG_INFO("Packet duplicated, seq_num=%d", rtp_hdr->seq_num);
+ return 0;
+ }
+ TSK_DEBUG_INFO("[H.263] Packet loss, seq_num=%d", rtp_hdr->seq_num);
+ }
+ h263->decoder.last_seq = rtp_hdr->seq_num;
+
+ if((int)(h263->decoder.accumulator_pos + pay_size) <= xsize){
+ if((h263->decoder.ebit + sbit) == 8){ /* Perfect one Byte to clean up */
+ if(h263->decoder.accumulator_pos){
+ ((uint8_t*)h263->decoder.accumulator)[h263->decoder.accumulator_pos-1] = (((uint8_t*)h263->decoder.accumulator)[h263->decoder.accumulator_pos-1] & (0xFF << h263->decoder.ebit)) |
+ (*pay_ptr & (0xFF >> sbit));
+ }
+ pay_ptr++, pay_size--;
+ }
+ h263->decoder.ebit = ebit;
+
+ memcpy(&((uint8_t*)h263->decoder.accumulator)[h263->decoder.accumulator_pos], pay_ptr, pay_size);
+ h263->decoder.accumulator_pos += pay_size;
+ }
+ else{
+ TSK_DEBUG_WARN("Buffer overflow");
+ h263->decoder.accumulator_pos = 0;
+ return 0;
+ }
+
+ if(rtp_hdr->marker){
+ AVPacket packet;
+ /* allocate destination buffer */
+ if(*out_max_size <xsize){
+ if(!(*out_data = tsk_realloc(*out_data, xsize))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ h263->decoder.accumulator_pos = 0;
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = xsize;
+ }
+
+ av_init_packet(&packet);
+ packet.size = (int)h263->decoder.accumulator_pos;
+ packet.data = h263->decoder.accumulator;
+ ret = avcodec_decode_video2(h263->decoder.context, h263->decoder.picture, &got_picture_ptr, &packet);
+
+ if(ret < 0){
+ TSK_DEBUG_WARN("Failed to decode the buffer with error code = %d", ret);
+ if(TMEDIA_CODEC_VIDEO(self)->in.callback){
+ TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_error;
+ TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
+ TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
+ }
+ }
+ else if(got_picture_ptr){
+ retsize = xsize;
+ // Is it IDR frame?
+ if(is_idr && TMEDIA_CODEC_VIDEO(self)->in.callback){
+ TSK_DEBUG_INFO("Decoded H.263 IDR");
+ TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_idr;
+ TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
+ TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
+ }
+ TMEDIA_CODEC_VIDEO(h263)->in.width = h263->decoder.context->width;
+ TMEDIA_CODEC_VIDEO(h263)->in.height = h263->decoder.context->height;
+ /* copy picture into a linear buffer */
+ avpicture_layout((AVPicture *)h263->decoder.picture, h263->decoder.context->pix_fmt, (int)h263->decoder.context->width, (int)h263->decoder.context->height,
+ *out_data, (int)retsize);
+ }
+ /* in all cases: reset accumulator */
+ h263->decoder.accumulator_pos = 0;
+ }
+
+ return retsize;
+}
+
+static tsk_bool_t tdav_codec_h263_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ if(tsk_striequals(att_name, "fmtp")){
+ unsigned width, height, fps;
+ if(tmedia_parse_video_fmtp(att_value, TMEDIA_CODEC_VIDEO(codec)->pref_size, &width, &height, &fps)){
+ TSK_DEBUG_ERROR("Failed to match fmtp=%s", att_value);
+ return tsk_false;
+ }
+ TMEDIA_CODEC_VIDEO(codec)->in.width = TMEDIA_CODEC_VIDEO(codec)->out.width = width;
+ TMEDIA_CODEC_VIDEO(codec)->in.height = TMEDIA_CODEC_VIDEO(codec)->out.height = height;
+ TMEDIA_CODEC_VIDEO(codec)->in.fps = TMEDIA_CODEC_VIDEO(codec)->out.fps = fps;
+ }
+#if 0
+ else if(tsk_striequals(att_name, "imageattr")){
+ unsigned in_width, in_height, out_width, out_height;
+ if(tmedia_parse_video_imageattr(att_value, TMEDIA_CODEC_VIDEO(codec)->pref_size, &in_width, &in_height, &out_width, &out_height) != 0){
+ return tsk_false;
+ }
+ TMEDIA_CODEC_VIDEO(codec)->in.width = in_width;
+ TMEDIA_CODEC_VIDEO(codec)->in.height = in_height;
+ TMEDIA_CODEC_VIDEO(codec)->out.width = out_width;
+ TMEDIA_CODEC_VIDEO(codec)->out.height = out_height;
+ }
+#endif
+
+ return tsk_true;
+}
+
+static char* tdav_codec_h263_sdp_att_get(const tmedia_codec_t* codec, const char* att_name)
+{
+ if(tsk_striequals(att_name, "fmtp")){
+ tmedia_pref_video_size_t cif_vs;
+ if(tmedia_video_get_closest_cif_size(TMEDIA_CODEC_VIDEO(codec)->pref_size, &cif_vs)){
+ TSK_DEBUG_ERROR("Failed to get closest CIF family size");
+ return tsk_null;
+ }
+ return tmedia_get_video_fmtp(cif_vs);
+ }
+#if 0
+ else if(tsk_striequals(att_name, "imageattr")){
+ return tmedia_get_video_imageattr(TMEDIA_CODEC_VIDEO(codec)->pref_size,
+ TMEDIA_CODEC_VIDEO(codec)->in.width, TMEDIA_CODEC_VIDEO(codec)->in.height, TMEDIA_CODEC_VIDEO(codec)->out.width, TMEDIA_CODEC_VIDEO(codec)->out.height);
+ }
+#endif
+ return tsk_null;
+}
+
+/* constructor */
+static tsk_object_t* tdav_codec_h263_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h263_t *h263 = self;
+ if(h263){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ tdav_codec_h263_init(TDAV_CODEC_H263(self), tdav_codec_h263_1996, CODEC_ID_H263, CODEC_ID_H263);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h263_dtor(tsk_object_t * self)
+{
+ tdav_codec_h263_t *h263 = self;
+ if(h263){
+ /* deinit base */
+ tmedia_codec_video_deinit(h263);
+ /* deinit self */
+ tdav_codec_h263_deinit(TDAV_CODEC_H263(self));
+
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h263_def_s =
+{
+ sizeof(tdav_codec_h263_t),
+ tdav_codec_h263_ctor,
+ tdav_codec_h263_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h263_plugin_def_s =
+{
+ &tdav_codec_h263_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_h263,
+ "H263",
+ "H263-1996 codec (FFmpeg)",
+ TMEDIA_CODEC_FORMAT_H263,
+ tsk_false,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video */
+ {176, 144, 15},
+
+ tdav_codec_h263_set,
+ tdav_codec_h263_open,
+ tdav_codec_h263_close,
+ tdav_codec_h263_encode,
+ tdav_codec_h263_decode,
+ tdav_codec_h263_sdp_att_match,
+ tdav_codec_h263_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_h263_plugin_def_t = &tdav_codec_h263_plugin_def_s;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/* ============ H.263-1998 Plugin interface ================= */
+
+//
+// H.263-1998 object definition
+//
+
+static tsk_size_t tdav_codec_h263p_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ uint8_t P, V, PLEN, PEBIT;
+ uint8_t* pdata = (uint8_t*)in_data;
+ const uint8_t* pay_ptr;
+ tsk_size_t pay_size;
+ int hdr_size = H263P_HEADER_SIZE;
+ tsk_size_t xsize, retsize = 0;
+ int got_picture_ptr;
+ int ret;
+
+ tdav_codec_h263_t* h263 = (tdav_codec_h263_t*)self;
+ const trtp_rtp_header_t* rtp_hdr = proto_hdr;
+
+ if(!self || !in_data || !in_size || ((int)in_size <= hdr_size) || !out_data || !h263->decoder.context){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+/*
+ rfc4629 - 5.1. General H.263+ Payload Header
+
+ 0 1
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | RR |P|V| PLEN |PEBIT|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+ P = (pdata[0] & 0x04)>>2;
+ V = (pdata[0] & 0x02)>>1;
+ PLEN = (((pdata[0] & 0x01)<<5) | pdata[1]>>3);
+ PEBIT = pdata[1] & 0x07;
+
+ if(V){
+ /*
+ Indicates the presence of an 8-bit field containing information
+ for Video Redundancy Coding (VRC), which follows immediately after
+ the initial 16 bits of the payload header, if present. For syntax
+ and semantics of that 8-bit VRC field, see Section 5.2.
+ */
+ }
+ if(PLEN){
+ /*
+ Length, in bytes, of the extra picture header. If no extra
+ picture header is attached, PLEN is 0. If PLEN>0, the extra
+ picture header is attached immediately following the rest of the
+ payload header. Note that the length reflects the omission of the
+ first two bytes of the picture start code (PSC). See Section 6.1.
+ */
+ hdr_size += PLEN;
+ if(PEBIT){
+ /*
+ Indicates the number of bits that shall be ignored in the last
+ byte of the picture header. If PLEN is not zero, the ignored bits
+ shall be the least significant bits of the byte. If PLEN is zero,
+ then PEBIT shall also be zero.
+ */
+ TSK_DEBUG_WARN("PEBIT ignored");
+ }
+ }
+ if(P){ /* MUST be done after PLEN and PEBIT */
+ /*
+ Indicates the picture start or a picture segment (GOB/Slice) start
+ or a video sequence end (EOS or EOSBS). Two bytes of zero bits
+ then have to be prefixed to the payload of such a packet to
+ compose a complete picture/GOB/slice/EOS/EOSBS start code. This
+ bit allows the omission of the two first bytes of the start codes,
+ thus improving the compression ratio.
+ */
+ hdr_size -= 2;
+ pdata[hdr_size] = 0x00, pdata[hdr_size + 1] = 0x00;
+ }
+
+ pay_ptr = (pdata + hdr_size);
+ pay_size = (in_size - hdr_size);
+ xsize = avpicture_get_size(h263->decoder.context->pix_fmt, h263->decoder.context->width, h263->decoder.context->height);
+
+ /* Packet lost? */
+ if(h263->decoder.last_seq != (rtp_hdr->seq_num - 1) && h263->decoder.last_seq){
+ if(h263->decoder.last_seq == rtp_hdr->seq_num){
+ // Could happen on some stupid emulators
+ //TSK_DEBUG_INFO("Packet duplicated, seq_num=%d", rtp_hdr->seq_num);
+ return 0;
+ }
+ TSK_DEBUG_INFO("[H.263+] Packet loss, seq_num=%d", rtp_hdr->seq_num);
+ }
+ h263->decoder.last_seq = rtp_hdr->seq_num;
+
+ if((int)(h263->decoder.accumulator_pos + pay_size) <= xsize){
+ /* PEBIT is ignored */
+ memcpy(&((uint8_t*)h263->decoder.accumulator)[h263->decoder.accumulator_pos], pay_ptr, pay_size);
+ h263->decoder.accumulator_pos += pay_size;
+ }
+ else{
+ TSK_DEBUG_WARN("Buffer overflow");
+ h263->decoder.accumulator_pos = 0;
+ return 0;
+ }
+
+ if(rtp_hdr->marker){
+ AVPacket packet;
+ /* allocate destination buffer */
+ if(*out_max_size < xsize){
+ if(!(*out_data = tsk_realloc(*out_data, xsize))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ h263->decoder.accumulator_pos = 0;
+ return 0;
+ }
+ *out_max_size = xsize;
+ }
+
+ /* decode the picture */
+ av_init_packet(&packet);
+ packet.size = (int)h263->decoder.accumulator_pos;
+ packet.data = h263->decoder.accumulator;
+ ret = avcodec_decode_video2(h263->decoder.context, h263->decoder.picture, &got_picture_ptr, &packet);
+
+ if(ret <0 || !got_picture_ptr){
+ TSK_DEBUG_WARN("Failed to decode the buffer");
+ }
+ else{
+ retsize = xsize;
+ TMEDIA_CODEC_VIDEO(h263)->in.width = h263->decoder.context->width;
+ TMEDIA_CODEC_VIDEO(h263)->in.height = h263->decoder.context->height;
+ /* copy picture into a linear buffer */
+ avpicture_layout((AVPicture *)h263->decoder.picture, h263->decoder.context->pix_fmt, (int)h263->decoder.context->width, (int)h263->decoder.context->height,
+ *out_data, (int)retsize);
+ }
+ /* in all cases: reset accumulator */
+ h263->decoder.accumulator_pos = 0;
+ }
+
+ return retsize;
+}
+
+/* constructor */
+static tsk_object_t* tdav_codec_h263p_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h263p_t *h263p = self;
+ if(h263p){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ tdav_codec_h263_init(TDAV_CODEC_H263(self), tdav_codec_h263_1998, CODEC_ID_H263P, CODEC_ID_H263);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h263p_dtor(tsk_object_t * self)
+{
+ tdav_codec_h263p_t *h263p = self;
+ if(h263p){
+ /* deinit base */
+ tmedia_codec_video_deinit(h263p);
+ /* deinit self */
+ tdav_codec_h263_deinit(TDAV_CODEC_H263(self));
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h263p_def_s =
+{
+ sizeof(tdav_codec_h263p_t),
+ tdav_codec_h263p_ctor,
+ tdav_codec_h263p_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h263p_plugin_def_s =
+{
+ &tdav_codec_h263p_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_h263p,
+ "H263-1998",
+ "H263-1998 codec (FFmpeg)",
+ TMEDIA_CODEC_FORMAT_H263_1998,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video (width, height, fps) */
+ {176, 144, 0},// fps is @deprecated
+
+ tdav_codec_h263p_set,
+ tdav_codec_h263p_open,
+ tdav_codec_h263p_close,
+ tdav_codec_h263p_encode,
+ tdav_codec_h263p_decode,
+ tdav_codec_h263p_sdp_att_match,
+ tdav_codec_h263p_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_h263p_plugin_def_t = &tdav_codec_h263p_plugin_def_s;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/* ============ H.263-2000 Plugin interface ================= */
+
+//
+// H.263-2000 object definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_h263pp_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h263pp_t *h263pp = self;
+ if(h263pp){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ tdav_codec_h263_init(TDAV_CODEC_H263(self), tdav_codec_h263_2000, CODEC_ID_H263P, CODEC_ID_H263);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h263pp_dtor(tsk_object_t * self)
+{
+ tdav_codec_h263pp_t *h263pp = self;
+ if(h263pp){
+ /* deinit base */
+ tmedia_codec_video_deinit(h263pp);
+ /* deinit self */
+ tdav_codec_h263_deinit(TDAV_CODEC_H263(self));
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h263pp_def_s =
+{
+ sizeof(tdav_codec_h263pp_t),
+ tdav_codec_h263pp_ctor,
+ tdav_codec_h263pp_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h263pp_plugin_def_s =
+{
+ &tdav_codec_h263pp_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_h263pp,
+ "H263-2000",
+ "H263-2000 codec (FFmpeg)",
+ TMEDIA_CODEC_FORMAT_H263_2000,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video (width, height, fps)*/
+ {176, 144, 0},// fps is @deprecated
+
+ tdav_codec_h263pp_set,
+ tdav_codec_h263pp_open,
+ tdav_codec_h263pp_close,
+ tdav_codec_h263pp_encode,
+ tdav_codec_h263pp_decode,
+ tdav_codec_h263pp_sdp_att_match,
+ tdav_codec_h263pp_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_h263pp_plugin_def_t = &tdav_codec_h263pp_plugin_def_s;
+
+
+
+int tdav_codec_h263_open_encoder(tdav_codec_h263_t* self)
+{
+ int ret;
+ int size;
+ int32_t max_bw_kpbs;
+ if(self->encoder.context){
+ TSK_DEBUG_ERROR("Encoder already opened");
+ return -1;
+ }
+
+ self->encoder.context = avcodec_alloc_context();
+ avcodec_get_context_defaults(self->encoder.context);
+
+ self->encoder.context->pix_fmt = PIX_FMT_YUV420P;
+ self->encoder.context->time_base.num = 1;
+ self->encoder.context->time_base.den = TMEDIA_CODEC_VIDEO(self)->out.fps;
+ self->encoder.context->width = TMEDIA_CODEC_VIDEO(self)->out.width;
+ self->encoder.context->height = TMEDIA_CODEC_VIDEO(self)->out.height;
+
+ self->encoder.context->qmin = 10;
+ self->encoder.context->qmax = 51;
+#if LIBAVCODEC_VERSION_MAJOR <= 53
+ self->encoder.context->mb_qmin = self->encoder.context->qmin;
+ self->encoder.context->mb_qmax = self->encoder.context->qmax;
+#endif
+ self->encoder.context->mb_decision = FF_MB_DECISION_RD;
+ max_bw_kpbs = TSK_CLAMP(
+ 0,
+ tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, TMEDIA_CODEC_VIDEO(self)->out.fps),
+ self->encoder.max_bw_kpbs
+ );
+ self->encoder.context->bit_rate = (max_bw_kpbs * 1024);// bps
+ //self->encoder.context->rc_lookahead = 0;
+ self->encoder.context->rtp_payload_size = RTP_PAYLOAD_SIZE;
+ self->encoder.context->opaque = tsk_null;
+ self->encoder.context->gop_size = (TMEDIA_CODEC_VIDEO(self)->out.fps * TDAV_H263_GOP_SIZE_IN_SECONDS);
+ self->encoder.context->flags |= CODEC_FLAG_QSCALE;
+ self->encoder.context->global_quality = FF_QP2LAMBDA * self->encoder.quality;
+ self->encoder.context->max_b_frames = 0;
+
+ // Picture (YUV 420)
+ if(!(self->encoder.picture = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create encoder picture");
+ return -2;
+ }
+ avcodec_get_frame_defaults(self->encoder.picture);
+ //if((ret = avpicture_alloc((AVPicture*)self->encoder.picture, PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height))){
+ // TSK_DEBUG_ERROR("Failed to allocate encoder picture");
+ // return ret;
+ //}
+
+ size = avpicture_get_size(PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height);
+ if(!(self->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocate encoder buffer");
+ return -2;
+ }
+
+
+ // RTP Callback
+ switch(self->type){
+ case tdav_codec_h263_1996:
+ { // H263 - 1996
+ break;
+ }
+ case tdav_codec_h263_1998:
+ { // H263 - 1998
+#if defined(CODEC_FLAG_H263P_UMV)
+ self->encoder.context->flags |= CODEC_FLAG_H263P_UMV; // Annex D+
+#endif
+ self->encoder.context->flags |= CODEC_FLAG_AC_PRED; // Annex I and T
+ self->encoder.context->flags |= CODEC_FLAG_LOOP_FILTER; // Annex J
+#if defined(CODEC_FLAG_H263P_SLICE_STRUCT)
+ self->encoder.context->flags |= CODEC_FLAG_H263P_SLICE_STRUCT; // Annex K
+#endif
+#if defined(CODEC_FLAG_H263P_AIV)
+ self->encoder.context->flags |= CODEC_FLAG_H263P_AIV; // Annex S
+#endif
+ break;
+ }
+ case tdav_codec_h263_2000:
+ { // H263 - 2000
+#if defined(CODEC_FLAG_H263P_UMV)
+ self->encoder.context->flags |= CODEC_FLAG_H263P_UMV; // Annex D+
+#endif
+ self->encoder.context->flags |= CODEC_FLAG_AC_PRED; // Annex I and T
+ self->encoder.context->flags |= CODEC_FLAG_LOOP_FILTER; // Annex J
+#if defined(CODEC_FLAG_H263P_SLICE_STRUCT)
+ self->encoder.context->flags |= CODEC_FLAG_H263P_SLICE_STRUCT; // Annex K
+#endif
+#if defined(CODEC_FLAG_H263P_AIV)
+ self->encoder.context->flags |= CODEC_FLAG_H263P_AIV; // Annex S
+#endif
+ break;
+ }
+ }
+ // Open encoder
+ if((ret = avcodec_open(self->encoder.context, self->encoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(self)->plugin->desc);
+ return ret;
+ }
+
+ TSK_DEBUG_INFO("[H.263] bitrate=%d bps", self->encoder.context->bit_rate);
+
+ return ret;
+}
+
+int tdav_codec_h263_open_decoder(tdav_codec_h263_t* self)
+{
+ int ret, size;
+
+ if(self->decoder.context){
+ TSK_DEBUG_ERROR("Decoder already opened");
+ return -1;
+ }
+
+ self->decoder.context = avcodec_alloc_context();
+ avcodec_get_context_defaults(self->decoder.context);
+
+ self->decoder.context->pix_fmt = PIX_FMT_YUV420P;
+ self->decoder.context->width = TMEDIA_CODEC_VIDEO(self)->in.width;
+ self->decoder.context->height = TMEDIA_CODEC_VIDEO(self)->in.height;
+
+ // Picture (YUV 420)
+ if(!(self->decoder.picture = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create decoder picture");
+ return -2;
+ }
+ avcodec_get_frame_defaults(self->decoder.picture);
+
+ size = avpicture_get_size(PIX_FMT_YUV420P, self->decoder.context->width, self->decoder.context->height);
+ if(!(self->decoder.accumulator = tsk_calloc((size + FF_INPUT_BUFFER_PADDING_SIZE), sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocate decoder buffer");
+ return -2;
+ }
+
+ // Open decoder
+ if((ret = avcodec_open(self->decoder.context, self->decoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(self)->plugin->desc);
+ return ret;
+ }
+
+ self->decoder.last_seq = 0;
+
+ return ret;
+}
+
+int tdav_codec_h263_close_encoder(tdav_codec_h263_t* self)
+{
+ if(self->encoder.context){
+ avcodec_close(self->encoder.context);
+ av_free(self->encoder.context);
+ self->encoder.context = tsk_null;
+ }
+ if(self->encoder.picture){
+ av_free(self->encoder.picture);
+ self->encoder.picture = tsk_null;
+ }
+ if(self->encoder.buffer){
+ TSK_FREE(self->encoder.buffer);
+ }
+ return 0;
+}
+
+int tdav_codec_h263_close_decoder(tdav_codec_h263_t* self)
+{
+ if(self->decoder.context){
+ avcodec_close(self->decoder.context);
+ av_free(self->decoder.context);
+ self->decoder.context = tsk_null;
+ }
+ if(self->decoder.picture){
+ av_free(self->decoder.picture);
+ self->decoder.picture = tsk_null;
+ }
+ if(self->decoder.accumulator){
+ TSK_FREE(self->decoder.accumulator);
+ self->decoder.accumulator_pos = 0;
+ }
+ return 0;
+}
+
+/* ============ Callbacks ================= */
+
+static void tdav_codec_h263_encap(const tdav_codec_h263_t* h263, const uint8_t* pdata, tsk_size_t size)
+{
+ tsk_bool_t frag = tsk_false;
+ register uint32_t i, last_index = 0;
+
+ if(size < RTP_PAYLOAD_SIZE){
+ goto last;
+ }
+
+ for(i = 4; i<(size - 4); i++){
+ if(pdata[i] == 0x00 && pdata[i+1] == 0x00 && pdata[i+2]>=0x80){ /* PSC or (GBSC) found */
+ if((i - last_index) >= RTP_PAYLOAD_SIZE || tsk_true/* FIXME */){
+ switch(h263->type){
+ case tdav_codec_h263_1996:
+ tdav_codec_h263_rtp_callback((tdav_codec_h263_t*) h263, pdata+last_index,
+ (i - last_index), (last_index == size));
+ break;
+ default:
+ tdav_codec_h263p_rtp_callback((tdav_codec_h263_t*) h263, pdata + last_index,
+ (i - last_index), frag, (last_index == size));
+ frag = tsk_true;
+ break;
+ }
+ last_index = i;
+ }
+ }
+ }
+last:
+ if(last_index < size){
+ switch(h263->type){
+ case tdav_codec_h263_1996:
+ tdav_codec_h263_rtp_callback((tdav_codec_h263_t*) h263, pdata + last_index,
+ (size - last_index), tsk_true);
+ break;
+ default:
+ tdav_codec_h263p_rtp_callback((tdav_codec_h263_t*) h263, pdata + last_index,
+ (size - last_index), frag, tsk_true);
+ break;
+ }
+ }
+}
+
+
+static void tdav_codec_h263_rtp_callback(tdav_codec_h263_t *self, const void *data, tsk_size_t size, tsk_bool_t marker)
+{
+ uint8_t* pdata = (uint8_t*)data;
+
+ if(self->rtp.size < (size + H263_HEADER_MODE_A_SIZE)){
+ if(!(self->rtp.ptr = tsk_realloc(self->rtp.ptr, (size + H263_HEADER_MODE_A_SIZE)))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ return;
+ }
+ self->rtp.size = (size + H263_HEADER_MODE_A_SIZE);
+ }
+ memcpy((self->rtp.ptr + H263_HEADER_MODE_A_SIZE), data, size);
+
+ /* http://eu.sabotage.org/www/ITU/H/H0263e.pdf section 5.1
+ * 5.1.1 Picture Start Code (PSC) (22 bits) - PSC is a word of 22 bits. Its value is 0000 0000 0000 0000 1 00000.
+
+ *
+ * 5.1.1 Picture Start Code (PSC) (22 bits)
+ * 5.1.2 Temporal Reference (TR) (8 bits)
+ * 5.1.3 Type Information (PTYPE) (Variable Length)
+ * – Bit 1: Always "1", in order to avoid start code emulation.
+ * – Bit 2: Always "0", for distinction with Recommendation H.261.
+
+ * – Bit 3: Split screen indicator, "0" off, "1" on.
+ * – Bit 4: Document camera indicator, "0" off, "1" on.
+ * – Bit 5: Full Picture Freeze Release, "0" off, "1" on.
+ * – Bits 6-8: Source Format, "000" forbidden, "001" sub-QCIF, "010" QCIF, "011" CIF,
+ "100" 4CIF, "101" 16CIF, "110" reserved, "111" extended PTYPE.
+ If bits 6-8 are not equal to "111", which indicates an extended PTYPE (PLUSPTYPE), the following
+ five bits are also present in PTYPE:
+ – Bit 9: Picture Coding Type, "0" INTRA (I-picture), "1" INTER (P-picture).
+ – Bit 10: Optional Unrestricted Motion Vector mode (see Annex D), "0" off, "1" on.
+ – Bit 11: Optional Syntax-based Arithmetic Coding mode (see Annex E), "0" off, "1" on.
+ – Bit 12: Optional Advanced Prediction mode (see Annex F), "0" off, "1" on.
+ – Bit 13: Optional PB-frames mode (see Annex G), "0" normal I- or P-picture, "1" PB-frame.
+ */
+ if(pdata[0] == 0x00 && pdata[1] == 0x00 && (pdata[2] & 0xfc)==0x80){ /* PSC */
+ /* RFC 2190 -5.1 Mode A
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |F|P|SBIT |EBIT | SRC |I|U|S|A|R |DBQ| TRB | TR |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ SRC : 3 bits
+ Source format, bit 6,7 and 8 in PTYPE defined by H.263 [4], specifies
+ the resolution of the current picture.
+
+ I: 1 bit.
+ Picture coding type, bit 9 in PTYPE defined by H.263[4], "0" is
+ intra-coded, "1" is inter-coded.
+ */
+
+ // PDATA[4] ======> Bits 3-10 of PTYPE
+ uint32_t rtp_hdr = 0;
+ uint8_t format, pict_type;
+
+ // Source Format = 4,5,6
+ format = (pdata[4] & 0x3C)>>2;
+ // Picture Coding Type = 7
+ pict_type = (pdata[4] & 0x02)>>1;
+ // RTP mode A header
+ ((uint8_t*)&rtp_hdr)[1] = (format <<5) | (pict_type << 4);
+ //rtp_hdr = tnet_htonl(rtp_hdr);
+ memcpy(self->rtp.ptr, &rtp_hdr, sizeof(rtp_hdr));
+ }
+
+ // Send data over the network
+ if(TMEDIA_CODEC_VIDEO(self)->out.callback){
+ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.ptr = self->rtp.ptr;
+ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.size = (size + H263_HEADER_MODE_A_SIZE);
+ TMEDIA_CODEC_VIDEO(self)->out.result.duration = (uint32_t)((1./(double)TMEDIA_CODEC_VIDEO(self)->out.fps) * TMEDIA_CODEC(self)->plugin->rate);
+ TMEDIA_CODEC_VIDEO(self)->out.result.last_chunck = marker;
+ TMEDIA_CODEC_VIDEO(self)->out.callback(&TMEDIA_CODEC_VIDEO(self)->out.result);
+ }
+}
+
+static void tdav_codec_h263p_rtp_callback(tdav_codec_h263_t *self, const void *data, tsk_size_t size, tsk_bool_t frag, tsk_bool_t marker)
+{
+ uint8_t* pdata = (uint8_t*)data;
+ //uint8_t rtp_hdr[2] = {0x00, 0x00};
+ //tsk_bool_t eos = tsk_false;
+
+ const void* _ptr = tsk_null;
+ tsk_size_t _size = 0;
+ //static tsk_bool_t frag = tsk_false;
+ //tsk_bool_t found_gob = tsk_false;
+
+ /* RFC 4629 - 5.1. General H.263+ Payload Header
+ The H.263+ payload header is structured as follows:
+ 0 1
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | RR |P|V| PLEN |PEBIT|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+ /* http://eu.sabotage.org/www/ITU/H/H0263e.pdf
+ *
+ * 5.1.1 Picture Start Code (PSC) (22 bits)
+ * ->PSC is a word of 22 bits. Its value is 0000 0000 0000 0000 1 00000.
+ * 5.1.27 End Of Sequence (EOS) (22 bits)
+ * ->A codeword of 22 bits. Its value is 0000 0000 0000 0000 1 11111
+ * 5.2.2 Group of Block Start Code (GBSC) (17 bits)
+ * ->A word of 17 bits. Its value is 0000 0000 0000 0000 1
+ * C.4.1 End Of Sub-Bitstream code (EOSBS) (23 bits)
+ * ->The EOSBS code is a codeword of 23 bits. Its value is 0000 0000 0000 0000 1 11110 0
+ *
+ *
+ * 5.2.3 Group Number (GN) (5 bits)
+ * -> last 5 bits
+ */
+ //if(pdata[0] == 0x00 && pdata[1] == 0x00 && pdata[2] >= 0x80){ /* PSC or EOS or GBSC */
+ // uint8_t GN = ((pdata[2]>>2) & 0x1F);
+ // found_gob = tsk_true;
+ // //TSK_DEBUG_INFO("GN=%u", pdata[2]);
+ //
+ // /* RFC 4629 - 6.1.1. Packets that begin with a Picture Start Code
+ // A packet that begins at the location of a Picture, GOB, slice, EOS,
+ // or EOSBS start code shall omit the first two (all zero) bytes from
+ // the H.263+ bitstream and signify their presence by setting P=1 in the
+ // payload header.
+ // */
+
+ // if(GN == 0x00){ /* PSC 00000 */
+ // /* Use the two first bytes as RTP header */
+ // //pdata[0] |= 0x04; // P=1
+
+ // /*
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | RR |1|V|0|0|0|0|0|0|0|0|0| bitstream data without the :
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // : first two 0 bytes of the PSC
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // */
+
+ // //TSK_DEBUG_INFO("H263 - PSC");
+ // }
+ // else if(GN == 0x1F){ /* EOS 11111 */
+ // /* Use the two first bytes as RTP header */
+ // //pdata[0] |= 0x04; // P=1
+ // eos = tsk_true;
+ // /* RFC 4629 - 6.1.3. Packets that begin with an EOS or EOSBS Code
+ // 0 1 2
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | RR |1|V|0|0|0|0|0|0|0|0|0|1|1|1|1|1|1|0|0|
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // */
+ // //TSK_DEBUG_INFO("H263 - EOS");
+ // }
+ // else /*if((GN >> 4) == 0x01)*/{ /* GBSC 10000 */
+ // /* Use the two first bytes as RTP header */
+ // //pdata[0] |= 0x04; // P=1
+ //
+ // /* RFC 4629 - 6.1.2. Packets that begin with GBSC or SSC
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | RR |1|V|0 0 1 0 0 1|PEBIT|1 0 0 0 0 0| picture header :
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // : starting with TR, PTYPE ... |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | ... | bitstream :
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // : data starting with GBSC/SSC without its first two 0 bytes
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // */
+ // //TSK_DEBUG_INFO("H263 - GBSC");
+ // found_gob = tsk_false;
+ // }
+ // //else if(EOSBS) -> Not Supported
+ //}
+ //else{
+ // /* 6.2. Encapsulating Follow-on Packet (P=0) */
+ // int i = 0;
+ // i++;
+ //}
+
+ //if(/*eos*/!found_gob && frag){
+ // if(self->rtp.size < (size + 2/* H263+ Header size */)){
+ // if(!(self->rtp.ptr = tsk_realloc(self->rtp.ptr, (size + 2)))){
+ // TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ // return;
+ // }
+ // self->rtp.size = (size + 2);
+ // }
+ // /* RFC 4629 - 6. Packetization Schemes */
+ // //rtp_hdr[0] |= 0x00;
+ // //memcpy(self->rtp.ptr, rtp_hdr/* zeros-> is it corretc? */, 2);
+ // //memcpy((self->rtp.ptr + 2), pdata, size);
+ // //_ptr = self->rtp.ptr;
+ // //_size = (size + 2);
+
+ // pdata[0] |= pdata[2] > 0x80 ? 0x04 : 0x04;
+ // _ptr = pdata;
+ // _size = size;
+ //}
+ //else{
+ // pdata[0] |= pdata[2] > 0x80 ? 0x04 : 0x04;
+ // _ptr = pdata;
+ // _size = size;
+ //}
+
+// FIXME
+ pdata[0] |= pdata[2] > 0x80 ? 0x04 : 0x04;
+ _ptr = pdata;
+ _size = size;
+
+
+ // Send data over the network
+ if(TMEDIA_CODEC_VIDEO(self)->out.callback){
+ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.ptr = _ptr;
+ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.size = _size;
+ TMEDIA_CODEC_VIDEO(self)->out.result.duration = (uint32_t)((1./(double)TMEDIA_CODEC_VIDEO(self)->out.fps) * TMEDIA_CODEC(self)->plugin->rate);
+ TMEDIA_CODEC_VIDEO(self)->out.result.last_chunck = marker;
+ TMEDIA_CODEC_VIDEO(self)->out.callback(&TMEDIA_CODEC_VIDEO(self)->out.result);
+ }
+}
+
+tsk_bool_t tdav_codec_ffmpeg_h263_is_supported()
+{
+ return (avcodec_find_encoder(CODEC_ID_H263) && avcodec_find_decoder(CODEC_ID_H263));
+}
+
+tsk_bool_t tdav_codec_ffmpeg_h263p_is_supported()
+{
+ return (avcodec_find_encoder(CODEC_ID_H263P) && avcodec_find_decoder(CODEC_ID_H263));
+}
+
+tsk_bool_t tdav_codec_ffmpeg_h263pp_is_supported()
+{
+ return tdav_codec_ffmpeg_h263p_is_supported();
+}
+
+
+#endif /* HAVE_FFMPEG */
diff --git a/tinyDAV/src/codecs/h264/tdav_codec_h264.c b/tinyDAV/src/codecs/h264/tdav_codec_h264.c
new file mode 100644
index 0000000..0ec3760
--- /dev/null
+++ b/tinyDAV/src/codecs/h264/tdav_codec_h264.c
@@ -0,0 +1,993 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_h264.c
+ * @brief H.264 codec plugin using FFmpeg for decoding and x264 for encoding
+ * RTP payloader/depayloader follows RFC 3984
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/h264/tdav_codec_h264.h"
+
+#if HAVE_FFMPEG || HAVE_H264_PASSTHROUGH
+
+#include "tinydav/codecs/h264/tdav_codec_h264_rtp.h"
+#include "tinydav/video/tdav_converter_video.h"
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tinymedia/tmedia_params.h"
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tsk_params.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#if HAVE_FFMPEG
+# include <libavcodec/avcodec.h>
+# if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0))
+# include <libavutil/opt.h>
+# endif
+#endif
+
+typedef struct tdav_codec_h264_s
+{
+ TDAV_DECLARE_CODEC_H264_COMMON;
+
+ // Encoder
+ struct{
+#if HAVE_FFMPEG
+ AVCodec* codec;
+ AVCodecContext* context;
+ AVFrame* picture;
+#endif
+ void* buffer;
+ int64_t frame_count;
+ tsk_bool_t force_idr;
+ int32_t quality; // [1-31]
+ int rotation;
+ int32_t max_bw_kpbs;
+ tsk_bool_t passthrough; // whether to bypass encoding
+ } encoder;
+
+ // decoder
+ struct{
+#if HAVE_FFMPEG
+ AVCodec* codec;
+ AVCodecContext* context;
+ AVFrame* picture;
+#endif
+ void* accumulator;
+ tsk_size_t accumulator_pos;
+ tsk_size_t accumulator_size;
+ uint16_t last_seq;
+ tsk_bool_t passthrough; // whether to bypass decoding
+ } decoder;
+}
+tdav_codec_h264_t;
+
+#if !defined(TDAV_H264_GOP_SIZE_IN_SECONDS)
+# define TDAV_H264_GOP_SIZE_IN_SECONDS 25
+#endif
+
+#define kResetRotationTrue tsk_true
+#define kResetRotationFalse tsk_false
+
+static int tdav_codec_h264_init(tdav_codec_h264_t* self, profile_idc_t profile);
+static int tdav_codec_h264_deinit(tdav_codec_h264_t* self);
+static int tdav_codec_h264_open_encoder(tdav_codec_h264_t* self);
+static int tdav_codec_h264_close_encoder(tdav_codec_h264_t* self, tsk_bool_t reset_rotation);
+static int tdav_codec_h264_open_decoder(tdav_codec_h264_t* self);
+static int tdav_codec_h264_close_decoder(tdav_codec_h264_t* self);
+
+/* ============ H.264 Base/Main Profile X.X Plugin interface functions ================= */
+
+static int tdav_codec_h264_set(tmedia_codec_t* self, const tmedia_param_t* param)
+{
+ tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self;
+ if (param->value_type == tmedia_pvt_int32) {
+ if(tsk_striequals(param->key, "action")){
+ tmedia_codec_action_t action = (tmedia_codec_action_t)TSK_TO_INT32((uint8_t*)param->value);
+ switch(action){
+ case tmedia_codec_action_encode_idr:
+ {
+ h264->encoder.force_idr = tsk_true;
+ break;
+ }
+ case tmedia_codec_action_bw_down:
+ {
+ h264->encoder.quality = TSK_CLAMP(1, (h264->encoder.quality + 1), 31);
+#if HAVE_FFMPEG
+ if (h264->encoder.context) {
+ h264->encoder.context->global_quality = FF_QP2LAMBDA * h264->encoder.quality;
+ }
+#endif
+ break;
+ }
+ case tmedia_codec_action_bw_up:
+ {
+ h264->encoder.quality = TSK_CLAMP(1, (h264->encoder.quality - 1), 31);
+#if HAVE_FFMPEG
+ if (h264->encoder.context) {
+ h264->encoder.context->global_quality = FF_QP2LAMBDA * h264->encoder.quality;
+ }
+#endif
+ break;
+ }
+ }
+ return 0;
+ }
+ else if(tsk_striequals(param->key, "bw_kbps")){
+ int32_t max_bw_userdefine = self->bandwidth_max_upload;
+ int32_t max_bw_new = *((int32_t*)param->value);
+ if (max_bw_userdefine > 0) {
+ // do not use more than what the user defined in it's configuration
+ h264->encoder.max_bw_kpbs = TSK_MIN(max_bw_new, max_bw_userdefine);
+ }
+ else {
+ h264->encoder.max_bw_kpbs = max_bw_new;
+ }
+ return 0;
+ }
+ else if(tsk_striequals(param->key, "bypass-encoding")){
+ h264->encoder.passthrough = *((int32_t*)param->value) ? tsk_true : tsk_false;
+ TSK_DEBUG_INFO("[H.264] bypass-encoding = %d", h264->encoder.passthrough);
+ return 0;
+ }
+ else if(tsk_striequals(param->key, "bypass-decoding")){
+ h264->decoder.passthrough = *((int32_t*)param->value) ? tsk_true : tsk_false;
+ TSK_DEBUG_INFO("[H.264] bypass-decoding = %d", h264->decoder.passthrough);
+ return 0;
+ }
+ else if(tsk_striequals(param->key, "rotation")){
+ int32_t rotation = *((int32_t*)param->value);
+ if(h264->encoder.rotation != rotation){
+ h264->encoder.rotation = rotation;
+ if (self->opened) {
+ int ret;
+ if ((ret = tdav_codec_h264_close_encoder(h264, kResetRotationFalse))) {
+ return ret;
+ }
+ if ((ret = tdav_codec_h264_open_encoder(h264))) {
+ return ret;
+ }
+#if 0 // Not working
+ if((ret = avcodec_close(h264->encoder.context))){
+ TSK_DEBUG_ERROR("Failed to close [%s] codec", TMEDIA_CODEC(h264)->plugin->desc);
+ return ret;
+ }
+ h264->encoder.context->width = (rotation == 90 || rotation == 270) ? TMEDIA_CODEC_VIDEO(h264)->out.height : TMEDIA_CODEC_VIDEO(h264)->out.width;
+ h264->encoder.context->height = (rotation == 90 || rotation == 270) ? TMEDIA_CODEC_VIDEO(h264)->out.width : TMEDIA_CODEC_VIDEO(h264)->out.height;
+ if((ret = avcodec_open(h264->encoder.context, h264->encoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(h264)->plugin->desc);
+ return ret;
+ }
+ h264->encoder.force_idr = tsk_true;
+#endif
+ }
+ }
+ return 0;
+ }
+ }
+ return -1;
+}
+
+
+static int tdav_codec_h264_open(tmedia_codec_t* self)
+{
+ int ret;
+ tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self;
+
+ if(!h264){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is not opened */
+
+ // Encoder
+ if((ret = tdav_codec_h264_open_encoder(h264))){
+ return ret;
+ }
+
+ // Decoder
+ if((ret = tdav_codec_h264_open_decoder(h264))){
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tdav_codec_h264_close(tmedia_codec_t* self)
+{
+ tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self;
+
+ if(!h264){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) alreasy checked that the codec is opened */
+
+ // Encoder
+ tdav_codec_h264_close_encoder(h264, kResetRotationTrue);
+
+ // Decoder
+ tdav_codec_h264_close_decoder(h264);
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_h264_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ int ret = 0;
+
+#if HAVE_FFMPEG
+ int size;
+ tsk_bool_t send_idr, send_hdr;
+#endif
+
+ tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self;
+
+ if(!self || !in_data || !in_size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if(!self->opened){
+ TSK_DEBUG_ERROR("Codec not opened");
+ return 0;
+ }
+
+ if(h264->encoder.passthrough) {
+ tdav_codec_h264_rtp_encap(TDAV_CODEC_H264_COMMON(h264), (const uint8_t*)in_data, in_size);
+ }
+ else { // !h264->encoder.passthrough
+#if HAVE_FFMPEG // wrap yuv420 buffer
+ size = avpicture_fill((AVPicture *)h264->encoder.picture, (uint8_t*)in_data, PIX_FMT_YUV420P, h264->encoder.context->width, h264->encoder.context->height);
+ if (size != in_size){
+ /* guard */
+ TSK_DEBUG_ERROR("Invalid size: %u<>%u", size, in_size);
+ return 0;
+ }
+
+ // send IDR for:
+ // - the first frame
+ // - remote peer requested an IDR
+ // - every second within the first 4seconds
+ send_idr = (
+ h264->encoder.frame_count++ == 0
+ || h264 ->encoder.force_idr
+ //|| ( (h264->encoder.frame_count < (int)TMEDIA_CODEC_VIDEO(h264)->out.fps * 4) && ((h264->encoder.frame_count % TMEDIA_CODEC_VIDEO(h264)->out.fps)==0) )
+ );
+
+ // send SPS and PPS headers for:
+ // - IDR frames (not required but it's the easiest way to deal with pkt loss)
+ // - every 5 seconds after the first 4seconds
+ send_hdr = (
+ send_idr
+ //|| ( (h264->encoder.frame_count % (TMEDIA_CODEC_VIDEO(h264)->out.fps * 5))==0 )
+ );
+ if(send_hdr){
+ tdav_codec_h264_rtp_encap(TDAV_CODEC_H264_COMMON(h264), h264->encoder.context->extradata, (tsk_size_t)h264->encoder.context->extradata_size);
+ }
+
+ // Encode data
+ #if LIBAVCODEC_VERSION_MAJOR <= 53
+ h264->encoder.picture->pict_type = send_idr ? FF_I_TYPE : 0;
+ #else
+ h264->encoder.picture->pict_type = send_idr ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_NONE;
+ #endif
+ h264->encoder.picture->key_frame = send_idr ? 1 : 0;
+ h264->encoder.picture->pts = AV_NOPTS_VALUE;
+ h264->encoder.picture->quality = h264->encoder.context->global_quality;
+ // h264->encoder.picture->pts = h264->encoder.frame_count; MUST NOT
+ ret = avcodec_encode_video(h264->encoder.context, h264->encoder.buffer, size, h264->encoder.picture);
+ if(ret > 0){
+ tdav_codec_h264_rtp_encap(TDAV_CODEC_H264_COMMON(h264), h264->encoder.buffer, (tsk_size_t)ret);
+ }
+ h264 ->encoder.force_idr = tsk_false;
+#endif
+ }// else(!h264->encoder.passthrough)
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_h264_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self;
+ const trtp_rtp_header_t* rtp_hdr = (const trtp_rtp_header_t*)proto_hdr;
+
+ const uint8_t* pay_ptr = tsk_null;
+ tsk_size_t pay_size = 0;
+ int ret;
+ tsk_bool_t sps_or_pps, append_scp, end_of_unit;
+ tsk_size_t retsize = 0, size_to_copy = 0;
+ static const tsk_size_t xmax_size = (3840 * 2160 * 3) >> 3; // >>3 instead of >>1 (not an error)
+ static tsk_size_t start_code_prefix_size = sizeof(H264_START_CODE_PREFIX);
+#if HAVE_FFMPEG
+ int got_picture_ptr = 0;
+#endif
+
+ if(!h264 || !in_data || !in_size || !out_data
+#if HAVE_FFMPEG
+ || !h264->decoder.context
+#endif
+ )
+ {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ //TSK_DEBUG_INFO("SeqNo=%hu", rtp_hdr->seq_num);
+
+ /* Packet lost? */
+ if((h264->decoder.last_seq + 1) != rtp_hdr->seq_num && h264->decoder.last_seq){
+ TSK_DEBUG_INFO("[H.264] Packet loss, seq_num=%d", (h264->decoder.last_seq + 1));
+ }
+ h264->decoder.last_seq = rtp_hdr->seq_num;
+
+
+ /* 5.3. NAL Unit Octet Usage
+ +---------------+
+ |0|1|2|3|4|5|6|7|
+ +-+-+-+-+-+-+-+-+
+ |F|NRI| Type |
+ +---------------+
+ */
+ if(*((uint8_t*)in_data) & 0x80){
+ TSK_DEBUG_WARN("F=1");
+ /* reset accumulator */
+ h264->decoder.accumulator_pos = 0;
+ return 0;
+ }
+
+ /* get payload */
+ if((ret = tdav_codec_h264_get_pay(in_data, in_size, (const void**)&pay_ptr, &pay_size, &append_scp, &end_of_unit)) || !pay_ptr || !pay_size){
+ TSK_DEBUG_ERROR("Depayloader failed to get H.264 content");
+ return 0;
+ }
+ //append_scp = tsk_true;
+ size_to_copy = pay_size + (append_scp ? start_code_prefix_size : 0);
+ // whether it's SPS or PPS (append_scp is false for subsequent FUA chuncks)
+ sps_or_pps = append_scp && pay_ptr && ((pay_ptr[0] & 0x1F) == 7 || (pay_ptr[0] & 0x1F) == 8);
+
+ // start-accumulator
+ if(!h264->decoder.accumulator){
+ if(size_to_copy > xmax_size){
+ TSK_DEBUG_ERROR("%u too big to contain valid encoded data. xmax_size=%u", size_to_copy, xmax_size);
+ return 0;
+ }
+ if(!(h264->decoder.accumulator = tsk_calloc(size_to_copy, sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocated new buffer");
+ return 0;
+ }
+ h264->decoder.accumulator_size = size_to_copy;
+ }
+ if((h264->decoder.accumulator_pos + size_to_copy) >= xmax_size){
+ TSK_DEBUG_ERROR("BufferOverflow");
+ h264->decoder.accumulator_pos = 0;
+ return 0;
+ }
+ if((h264->decoder.accumulator_pos + size_to_copy) > h264->decoder.accumulator_size){
+ if(!(h264->decoder.accumulator = tsk_realloc(h264->decoder.accumulator, (h264->decoder.accumulator_pos + size_to_copy)))){
+ TSK_DEBUG_ERROR("Failed to reallocated new buffer");
+ h264->decoder.accumulator_pos = 0;
+ h264->decoder.accumulator_size = 0;
+ return 0;
+ }
+ h264->decoder.accumulator_size = (h264->decoder.accumulator_pos + size_to_copy);
+ }
+
+ if(append_scp){
+ memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], H264_START_CODE_PREFIX, start_code_prefix_size);
+ h264->decoder.accumulator_pos += start_code_prefix_size;
+ }
+ memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], pay_ptr, pay_size);
+ h264->decoder.accumulator_pos += pay_size;
+ // end-accumulator
+
+ if(sps_or_pps){
+ // http://libav-users.943685.n4.nabble.com/Decode-H264-streams-how-to-fill-AVCodecContext-from-SPS-PPS-td2484472.html
+ // SPS and PPS should be bundled with IDR
+ TSK_DEBUG_INFO("Receiving SPS or PPS ...to be tied to an IDR");
+ }
+ else if(rtp_hdr->marker){
+ if(h264->decoder.passthrough){
+ if(*out_max_size < h264->decoder.accumulator_pos){
+ if((*out_data = tsk_realloc(*out_data, h264->decoder.accumulator_pos))){
+ *out_max_size = h264->decoder.accumulator_pos;
+ }
+ else{
+ *out_max_size = 0;
+ return 0;
+ }
+ }
+ memcpy(*out_data, h264->decoder.accumulator, h264->decoder.accumulator_pos);
+ retsize = h264->decoder.accumulator_pos;
+ }
+ else { // !h264->decoder.passthrough
+#if HAVE_FFMPEG
+ AVPacket packet;
+
+ /* decode the picture */
+ av_init_packet(&packet);
+ packet.dts = packet.pts = AV_NOPTS_VALUE;
+ packet.size = (int)h264->decoder.accumulator_pos;
+ packet.data = h264->decoder.accumulator;
+ ret = avcodec_decode_video2(h264->decoder.context, h264->decoder.picture, &got_picture_ptr, &packet);
+
+ if(ret <0){
+ TSK_DEBUG_INFO("Failed to decode the buffer with error code =%d, size=%u, append=%s", ret, h264->decoder.accumulator_pos, append_scp ? "yes" : "no");
+ if(TMEDIA_CODEC_VIDEO(self)->in.callback){
+ TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_error;
+ TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
+ TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
+ }
+ }
+ else if(got_picture_ptr){
+ tsk_size_t xsize;
+
+ /* IDR ? */
+ if(((pay_ptr[0] & 0x1F) == 0x05) && TMEDIA_CODEC_VIDEO(self)->in.callback){
+ TSK_DEBUG_INFO("Decoded H.264 IDR");
+ TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_idr;
+ TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
+ TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
+ }
+ /* fill out */
+ xsize = avpicture_get_size(h264->decoder.context->pix_fmt, h264->decoder.context->width, h264->decoder.context->height);
+ if(*out_max_size<xsize){
+ if((*out_data = tsk_realloc(*out_data, (xsize + FF_INPUT_BUFFER_PADDING_SIZE)))){
+ *out_max_size = xsize;
+ }
+ else{
+ *out_max_size = 0;
+ return 0;
+ }
+ }
+ retsize = xsize;
+ TMEDIA_CODEC_VIDEO(h264)->in.width = h264->decoder.context->width;
+ TMEDIA_CODEC_VIDEO(h264)->in.height = h264->decoder.context->height;
+ avpicture_layout((AVPicture *)h264->decoder.picture, h264->decoder.context->pix_fmt, (int)h264->decoder.context->width, (int)h264->decoder.context->height,
+ *out_data, (int)retsize);
+ }
+#endif /* HAVE_FFMPEG */
+ } // else(h264->decoder.passthrough)
+
+ h264->decoder.accumulator_pos = 0;
+ } // else if(rtp_hdr->marker)
+
+ return retsize;
+}
+
+static tsk_bool_t tdav_codec_h264_sdp_att_match(const tmedia_codec_t* self, const char* att_name, const char* att_value)
+{
+ return tdav_codec_h264_common_sdp_att_match((tdav_codec_h264_common_t*)self, att_name, att_value);
+}
+
+static char* tdav_codec_h264_sdp_att_get(const tmedia_codec_t* self, const char* att_name)
+{
+ char* att = tdav_codec_h264_common_sdp_att_get((const tdav_codec_h264_common_t*)self, att_name);
+ if(att && tsk_striequals(att_name, "fmtp")) {
+ tsk_strcat_2(&att, "; impl=%s",
+#if HAVE_FFMPEG
+ "FFMPEG"
+#elif HAVE_H264_PASSTHROUGH
+ "PASSTHROUGH"
+#endif
+ );
+ }
+ return att;
+}
+
+
+
+
+/* ============ H.264 Base Profile Plugin interface ================= */
+
+/* constructor */
+static tsk_object_t* tdav_codec_h264_base_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h264_t *h264 = (tdav_codec_h264_t*)self;
+ if(h264){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ if(tdav_codec_h264_init(h264, profile_idc_baseline) != 0){
+ return tsk_null;
+ }
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h264_base_dtor(tsk_object_t * self)
+{
+ tdav_codec_h264_t *h264 = (tdav_codec_h264_t*)self;
+ if(h264){
+ /* deinit base */
+ tdav_codec_h264_common_deinit((tdav_codec_h264_common_t*)self);
+ /* deinit self */
+ tdav_codec_h264_deinit(h264);
+
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h264_base_def_s =
+{
+ sizeof(tdav_codec_h264_t),
+ tdav_codec_h264_base_ctor,
+ tdav_codec_h264_base_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h264_base_plugin_def_s =
+{
+ &tdav_codec_h264_base_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_h264_bp,
+ "H264",
+ "H264 Base Profile (FFmpeg, x264)",
+ TMEDIA_CODEC_FORMAT_H264_BP,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video (width, height, fps) */
+ {176, 144, 0}, // fps is @deprecated
+
+ tdav_codec_h264_set,
+ tdav_codec_h264_open,
+ tdav_codec_h264_close,
+ tdav_codec_h264_encode,
+ tdav_codec_h264_decode,
+ tdav_codec_h264_sdp_att_match,
+ tdav_codec_h264_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_h264_base_plugin_def_t = &tdav_codec_h264_base_plugin_def_s;
+
+/* ============ H.264 Main Profile Plugin interface ================= */
+
+/* constructor */
+static tsk_object_t* tdav_codec_h264_main_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h264_t *h264 = (tdav_codec_h264_t*)self;
+ if(h264){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ if(tdav_codec_h264_init(h264, profile_idc_main) != 0){
+ return tsk_null;
+ }
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h264_main_dtor(tsk_object_t * self)
+{
+ tdav_codec_h264_t *h264 = (tdav_codec_h264_t*)self;
+ if(h264){
+ /* deinit base */
+ tdav_codec_h264_common_deinit((tdav_codec_h264_common_t*)self);
+ /* deinit self */
+ tdav_codec_h264_deinit(h264);
+
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h264_main_def_s =
+{
+ sizeof(tdav_codec_h264_t),
+ tdav_codec_h264_main_ctor,
+ tdav_codec_h264_main_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h264_main_plugin_def_s =
+{
+ &tdav_codec_h264_main_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_h264_mp,
+ "H264",
+ "H264 Main Profile (FFmpeg, x264)",
+ TMEDIA_CODEC_FORMAT_H264_MP,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video (width, height, fps)*/
+ {176, 144, 0},// fps is @deprecated
+
+ tdav_codec_h264_set,
+ tdav_codec_h264_open,
+ tdav_codec_h264_close,
+ tdav_codec_h264_encode,
+ tdav_codec_h264_decode,
+ tdav_codec_h264_sdp_att_match,
+ tdav_codec_h264_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_h264_main_plugin_def_t = &tdav_codec_h264_main_plugin_def_s;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/* ============ Common To all H264 codecs ================= */
+
+int tdav_codec_h264_open_encoder(tdav_codec_h264_t* self)
+{
+#if HAVE_FFMPEG
+ int ret;
+ tsk_size_t size;
+
+ if(self->encoder.context){
+ TSK_DEBUG_ERROR("Encoder already opened");
+ return -1;
+ }
+
+#if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0))
+ if((self->encoder.context = avcodec_alloc_context3(self->encoder.codec))){
+ avcodec_get_context_defaults3(self->encoder.context, self->encoder.codec);
+ }
+#else
+ if((self->encoder.context = avcodec_alloc_context())){
+ avcodec_get_context_defaults(self->encoder.context);
+ }
+#endif
+
+ if(!self->encoder.context){
+ TSK_DEBUG_ERROR("Failed to allocate context");
+ return -1;
+ }
+
+#if TDAV_UNDER_X86 && LIBAVCODEC_VERSION_MAJOR <= 53
+ self->encoder.context->dsp_mask = (FF_MM_MMX | FF_MM_MMXEXT | FF_MM_SSE);
+#endif
+
+ self->encoder.context->pix_fmt = PIX_FMT_YUV420P;
+ self->encoder.context->time_base.num = 1;
+ self->encoder.context->time_base.den = TMEDIA_CODEC_VIDEO(self)->out.fps;
+ self->encoder.context->width = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.height : TMEDIA_CODEC_VIDEO(self)->out.width;
+ self->encoder.context->height = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.width : TMEDIA_CODEC_VIDEO(self)->out.height;
+ self->encoder.max_bw_kpbs = TSK_CLAMP(
+ 0,
+ tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, TMEDIA_CODEC_VIDEO(self)->out.fps),
+ TMEDIA_CODEC(self)->bandwidth_max_upload
+ );
+ self->encoder.context->bit_rate = (self->encoder.max_bw_kpbs * 1024);// bps
+
+ self->encoder.context->rc_min_rate = (self->encoder.context->bit_rate >> 3);
+ self->encoder.context->rc_max_rate = self->encoder.context->bit_rate;
+
+#if LIBAVCODEC_VERSION_MAJOR <= 53
+ self->encoder.context->rc_lookahead = 0;
+#endif
+ self->encoder.context->global_quality = FF_QP2LAMBDA * self->encoder.quality;
+
+#if LIBAVCODEC_VERSION_MAJOR <= 53
+ self->encoder.context->partitions = X264_PART_I4X4 | X264_PART_I8X8 | X264_PART_P8X8 | X264_PART_B8X8;
+#endif
+ self->encoder.context->me_method = ME_UMH;
+ self->encoder.context->me_range = 16;
+ self->encoder.context->qmin = 10;
+ self->encoder.context->qmax = 51;
+#if LIBAVCODEC_VERSION_MAJOR <= 53
+ self->encoder.context->mb_qmin = self->encoder.context->qmin;
+ self->encoder.context->mb_qmax = self->encoder.context->qmax;
+#endif
+ /* METROPOLIS = G2J.COM TelePresence client. Check Issue 378: No video when calling "TANDBERG/4129 (X8.1.1)" */
+#if !METROPOLIS && 0
+ self->encoder.context->flags |= CODEC_FLAG_GLOBAL_HEADER;
+#endif
+ self->encoder.context->flags |= CODEC_FLAG_LOW_DELAY;
+ if (self->encoder.context->profile == FF_PROFILE_H264_BASELINE) {
+ self->encoder.context->max_b_frames = 0;
+ }
+
+ switch(TDAV_CODEC_H264_COMMON(self)->profile){
+ case profile_idc_baseline:
+ default:
+ self->encoder.context->profile = FF_PROFILE_H264_BASELINE;
+ self->encoder.context->level = TDAV_CODEC_H264_COMMON(self)->level;
+ break;
+ case profile_idc_main:
+ self->encoder.context->profile = FF_PROFILE_H264_MAIN;
+ self->encoder.context->level = TDAV_CODEC_H264_COMMON(self)->level;
+ break;
+ }
+
+ /* Comment from libavcodec/libx264.c:
+ * Allow x264 to be instructed through AVCodecContext about the maximum
+ * size of the RTP payload. For example, this enables the production of
+ * payload suitable for the H.264 RTP packetization-mode 0 i.e. single
+ * NAL unit per RTP packet.
+ */
+ self->encoder.context->rtp_payload_size = H264_RTP_PAYLOAD_SIZE;
+ self->encoder.context->opaque = tsk_null;
+ self->encoder.context->gop_size = (TMEDIA_CODEC_VIDEO(self)->out.fps * TDAV_H264_GOP_SIZE_IN_SECONDS);
+
+#if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0))
+ if((ret = av_opt_set_int(self->encoder.context->priv_data, "slice-max-size", H264_RTP_PAYLOAD_SIZE, 0))){
+ TSK_DEBUG_ERROR("Failed to set x264 slice-max-size to %d", H264_RTP_PAYLOAD_SIZE);
+ }
+ if((ret = av_opt_set(self->encoder.context->priv_data, "profile", (self->encoder.context->profile == FF_PROFILE_H264_BASELINE ? "baseline" : "main"), 0))){
+ TSK_DEBUG_ERROR("Failed to set x264 profile");
+ }
+ if((ret = av_opt_set(self->encoder.context->priv_data, "preset", "veryfast", 0))){
+ TSK_DEBUG_ERROR("Failed to set x264 preset to veryfast");
+ }
+ if((ret = av_opt_set_int(self->encoder.context->priv_data, "rc-lookahead", 0, 0)) && (ret = av_opt_set_int(self->encoder.context->priv_data, "rc_lookahead", 0, 0))){
+ TSK_DEBUG_ERROR("Failed to set x264 rc_lookahead=0");
+ }
+ if((ret = av_opt_set(self->encoder.context->priv_data, "tune", "animation+zerolatency", 0))){
+ TSK_DEBUG_ERROR("Failed to set x264 tune to zerolatency");
+ }
+#endif
+
+ // Picture (YUV 420)
+ if(!(self->encoder.picture = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create encoder picture");
+ return -2;
+ }
+ avcodec_get_frame_defaults(self->encoder.picture);
+
+
+ size = avpicture_get_size(PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height);
+ if(!(self->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocate encoder buffer");
+ return -2;
+ }
+
+ // Open encoder
+ if((ret = avcodec_open(self->encoder.context, self->encoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(self)->plugin->desc);
+ return ret;
+ }
+
+ self->encoder.frame_count = 0;
+
+ TSK_DEBUG_INFO("[H.264] bitrate=%d bps", self->encoder.context->bit_rate);
+
+ return ret;
+#elif HAVE_H264_PASSTHROUGH
+ self->encoder.frame_count = 0;
+ return 0;
+#endif
+
+ TSK_DEBUG_ERROR("Not expected code called");
+ return -1;
+}
+
+int tdav_codec_h264_close_encoder(tdav_codec_h264_t* self, tsk_bool_t reset_rotation)
+{
+#if HAVE_FFMPEG
+ if(self->encoder.context){
+ avcodec_close(self->encoder.context);
+ av_free(self->encoder.context);
+ self->encoder.context = tsk_null;
+ }
+ if(self->encoder.picture){
+ av_free(self->encoder.picture);
+ self->encoder.picture = tsk_null;
+ }
+#endif
+ if(self->encoder.buffer){
+ TSK_FREE(self->encoder.buffer);
+ }
+ self->encoder.frame_count = 0;
+ if (reset_rotation) {
+ self->encoder.rotation = 0; // reset rotation
+ }
+
+ return 0;
+}
+
+int tdav_codec_h264_open_decoder(tdav_codec_h264_t* self)
+{
+#if HAVE_FFMPEG
+ int ret;
+
+ if(self->decoder.context){
+ TSK_DEBUG_ERROR("Decoder already opened");
+ return -1;
+ }
+
+ self->decoder.context = avcodec_alloc_context();
+ avcodec_get_context_defaults(self->decoder.context);
+
+ self->decoder.context->pix_fmt = PIX_FMT_YUV420P;
+ self->decoder.context->flags2 |= CODEC_FLAG2_FAST;
+ self->decoder.context->width = TMEDIA_CODEC_VIDEO(self)->in.width;
+ self->decoder.context->height = TMEDIA_CODEC_VIDEO(self)->in.height;
+
+ // Picture (YUV 420)
+ if(!(self->decoder.picture = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create decoder picture");
+ return -2;
+ }
+ avcodec_get_frame_defaults(self->decoder.picture);
+
+ // Open decoder
+ if((ret = avcodec_open(self->decoder.context, self->decoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(self)->plugin->desc);
+ return ret;
+ }
+ self->decoder.last_seq = 0;
+
+ return ret;
+
+#elif HAVE_H264_PASSTHROUGH
+ return 0;
+#endif
+
+ TSK_DEBUG_ERROR("Unexpected code called");
+ return -1;
+
+}
+
+int tdav_codec_h264_close_decoder(tdav_codec_h264_t* self)
+{
+#if HAVE_FFMPEG
+ if(self->decoder.context){
+ avcodec_close(self->decoder.context);
+ av_free(self->decoder.context);
+ self->decoder.context = tsk_null;
+ }
+ if(self->decoder.picture){
+ av_free(self->decoder.picture);
+ self->decoder.picture = tsk_null;
+ }
+#endif
+ TSK_FREE(self->decoder.accumulator);
+ self->decoder.accumulator_pos = 0;
+
+ return 0;
+}
+
+int tdav_codec_h264_init(tdav_codec_h264_t* self, profile_idc_t profile)
+{
+ int ret = 0;
+ level_idc_t level;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if((ret = tdav_codec_h264_common_init(TDAV_CODEC_H264_COMMON(self)))){
+ TSK_DEBUG_ERROR("tdav_codec_h264_common_init() faile with error code=%d", ret);
+ return ret;
+ }
+
+ if((ret = tdav_codec_h264_common_level_from_size(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, &level))){
+ TSK_DEBUG_ERROR("Failed to find level for size=[%u, %u]", TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height);
+ return ret;
+ }
+
+ (self)->encoder.max_bw_kpbs = TMEDIA_CODEC(self)->bandwidth_max_upload;
+ TDAV_CODEC_H264_COMMON(self)->pack_mode_local = H264_PACKETIZATION_MODE;
+ TDAV_CODEC_H264_COMMON(self)->profile = profile;
+ TDAV_CODEC_H264_COMMON(self)->level = level;
+ TMEDIA_CODEC_VIDEO(self)->in.max_mbps = TMEDIA_CODEC_VIDEO(self)->out.max_mbps = H264_MAX_MBPS*1000;
+ TMEDIA_CODEC_VIDEO(self)->in.max_br = TMEDIA_CODEC_VIDEO(self)->out.max_br = H264_MAX_BR*1000;
+
+#if HAVE_FFMPEG
+ if(!(self->encoder.codec = avcodec_find_encoder(CODEC_ID_H264))){
+ TSK_DEBUG_ERROR("Failed to find H.264 encoder");
+ ret = -2;
+ }
+
+ if(!(self->decoder.codec = avcodec_find_decoder(CODEC_ID_H264))){
+ TSK_DEBUG_ERROR("Failed to find H.264 decoder");
+ ret = -3;
+ }
+#endif
+#if HAVE_H264_PASSTHROUGH
+ TMEDIA_CODEC(self)->passthrough = tsk_true;
+ self->decoder.passthrough = tsk_true;
+ self->encoder.passthrough = tsk_true;
+#endif
+
+ self->encoder.quality = 1;
+
+ /* allocations MUST be done by open() */
+ return ret;
+}
+
+int tdav_codec_h264_deinit(tdav_codec_h264_t* self)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+#if HAVE_FFMPEG
+ self->encoder.codec = tsk_null;
+ self->decoder.codec = tsk_null;
+
+ // FFMpeg resources are destroyed by close()
+#endif
+
+ return 0;
+}
+
+tsk_bool_t tdav_codec_ffmpeg_h264_is_supported()
+{
+#if HAVE_FFMPEG
+ return (avcodec_find_encoder(CODEC_ID_H264) && avcodec_find_decoder(CODEC_ID_H264));
+#else
+ return tsk_false;
+#endif
+}
+
+tsk_bool_t tdav_codec_passthrough_h264_is_supported()
+{
+#if HAVE_H264_PASSTHROUGH
+ return tsk_true;
+#else
+ return tsk_false;
+#endif
+}
+
+#endif /* HAVE_FFMPEG || HAVE_H264_PASSTHROUGH */
diff --git a/tinyDAV/src/codecs/h264/tdav_codec_h264_cisco.cxx b/tinyDAV/src/codecs/h264/tdav_codec_h264_cisco.cxx
new file mode 100644
index 0000000..a501221
--- /dev/null
+++ b/tinyDAV/src/codecs/h264/tdav_codec_h264_cisco.cxx
@@ -0,0 +1,882 @@
+/*
+* Copyright (C) 2014-2015 Mamadou DIOP.
+*
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_h264_cisco.cxx
+ * @brief H.264 codec plugin using OpenH264 (https://github.com/cisco/openh264) v1.1 for encoding/decoding.
+ */
+#include "tinydav/codecs/h264/tdav_codec_h264_cisco.h"
+
+#if HAVE_OPENH264
+
+#include "tinydav/codecs/h264/tdav_codec_h264_common.h"
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tinymedia/tmedia_codec.h"
+#include "tinymedia/tmedia_params.h"
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tsk_mutex.h"
+#include "tsk_params.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+extern "C" {
+#include <wels/codec_api.h>
+#include <wels/codec_app_def.h>
+}
+
+#include <limits.h> /* INT_MAX */
+
+typedef struct tdav_codec_h264_cisco_s
+{
+ TDAV_DECLARE_CODEC_H264_COMMON;
+
+ // Encoder
+ struct{
+ ISVCEncoder *pInst;
+ SEncParamExt sEncParam;
+ SSourcePicture sEncPic;
+ void* buffer;
+ int64_t frame_count;
+ tsk_bool_t force_idr;
+ int rotation;
+ int neg_width;
+ int neg_height;
+ int neg_fps;
+ tsk_mutex_handle_t* mutex;
+ } encoder;
+
+ // decoder
+ struct{
+ ISVCDecoder* pInst;
+ void* accumulator;
+ tsk_size_t accumulator_pos;
+ tsk_size_t accumulator_size;
+ uint16_t last_seq;
+ } decoder;
+}
+tdav_codec_h264_cisco_t;
+
+#if !defined(CISCO_H264_GOP_SIZE_IN_SECONDS)
+# define CISCO_H264_GOP_SIZE_IN_SECONDS 25
+#endif
+
+#define kResetRotationTrue tsk_true
+#define kResetRotationFalse tsk_false
+
+static int tdav_codec_h264_cisco_init(tdav_codec_h264_cisco_t* self, profile_idc_t profile);
+static int tdav_codec_h264_cisco_deinit(tdav_codec_h264_cisco_t* self);
+static int tdav_codec_h264_cisco_open_encoder(tdav_codec_h264_cisco_t* self);
+static int tdav_codec_h264_cisco_close_encoder(tdav_codec_h264_cisco_t* self, tsk_bool_t reset_rotation);
+static int tdav_codec_h264_cisco_open_decoder(tdav_codec_h264_cisco_t* self);
+static int tdav_codec_h264_cisco_close_decoder(tdav_codec_h264_cisco_t* self);
+static ELevelIdc tdav_codec_h264_cisco_convert_level(enum level_idc_e level);
+static void tdav_codec_h264_cisco_debug_cb(void* context, int level, const char* message);
+
+static void (*__tdav_codec_h264_cisco_debug_cb)(void* context, int level, const char* message) = tdav_codec_h264_cisco_debug_cb;
+
+/* ============ H.264 Base/Main Profile X.X Plugin interface functions ================= */
+
+static int tdav_codec_h264_cisco_set(tmedia_codec_t* self, const tmedia_param_t* param)
+{
+ tdav_codec_h264_cisco_t* h264 = (tdav_codec_h264_cisco_t*)self;
+ tsk_bool_t reconf = tsk_false;
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "action")) {
+ tmedia_codec_action_t action = (tmedia_codec_action_t)TSK_TO_INT32((uint8_t*)param->value);
+ switch (action) {
+ case tmedia_codec_action_encode_idr:
+ {
+ TSK_DEBUG_INFO("OpenH264 force_idr action");
+ h264->encoder.force_idr = tsk_true;
+ return 0;
+ }
+ case tmedia_codec_action_bw_up:
+ case tmedia_codec_action_bw_down:
+ {
+ int32_t rc_target_bitrate;
+ int32_t bandwidth_max_upload_bps = TMEDIA_CODEC(h264)->bandwidth_max_upload == INT_MAX ? TMEDIA_CODEC(h264)->bandwidth_max_upload : (TMEDIA_CODEC(h264)->bandwidth_max_upload * 1024); // kbps -> bps
+ if (action == tmedia_codec_action_bw_up) {
+ rc_target_bitrate = TSK_CLAMP(0, (int32_t)((h264->encoder.sEncParam.iTargetBitrate * 3) >> 1), bandwidth_max_upload_bps);
+ }
+ else {
+ rc_target_bitrate = TSK_CLAMP(0, (int32_t)((h264->encoder.sEncParam.iTargetBitrate << 1) / 3), bandwidth_max_upload_bps);
+ }
+ h264->encoder.sEncParam.iTargetBitrate = rc_target_bitrate;
+ h264->encoder.sEncParam.iMaxBitrate = rc_target_bitrate;
+ SSpatialLayerConfig* layer = &h264->encoder.sEncParam.sSpatialLayers[0];
+ layer->iMaxSpatialBitrate = h264->encoder.sEncParam.iMaxBitrate;
+ layer->iSpatialBitrate = h264->encoder.sEncParam.iTargetBitrate;
+ reconf = tsk_true;
+ TSK_DEBUG_INFO("OpenH264 new target bitrate = %d bps", rc_target_bitrate);
+ break;
+ }
+ }
+ }
+ else if (tsk_striequals(param->key, "bw_kbps")) { // both up and down (from the SDP)
+ int32_t max_bw_userdefine_kbps = tmedia_defaults_get_bandwidth_video_upload_max();
+ int32_t max_bw_new_kbps = *((int32_t*)param->value);
+ if (max_bw_userdefine_kbps > 0) {
+ // do not use more than what the user defined in it's configuration
+ TMEDIA_CODEC(h264)->bandwidth_max_upload = TSK_MIN(max_bw_new_kbps, max_bw_userdefine_kbps);
+ }
+ else {
+ TMEDIA_CODEC(h264)->bandwidth_max_upload = max_bw_new_kbps;
+ }
+ TSK_DEBUG_INFO("OpenH264 codec: bandwidth-max-upload= %d kbps", TMEDIA_CODEC(h264)->bandwidth_max_upload);
+ reconf = tsk_true;
+ }
+ else if (tsk_striequals(param->key, "bandwidth-max-upload")) {
+ int32_t bw_max_upload_kbps = *((int32_t*)param->value);
+ TSK_DEBUG_INFO("OpenH264 codec: bandwidth-max-upload= %d kbps", bw_max_upload_kbps);
+ TMEDIA_CODEC(h264)->bandwidth_max_upload = bw_max_upload_kbps;
+ reconf = tsk_true;
+ }
+ else if (tsk_striequals(param->key, "rotation")) {
+ int rotation = *((int32_t*)param->value);
+ if (h264->encoder.rotation != rotation) {
+ h264->encoder.rotation = rotation;
+ if (self->opened) {
+ int ret;
+ if ((ret = tdav_codec_h264_cisco_close_encoder(h264, kResetRotationFalse))) {
+ return ret;
+ }
+ if ((ret = tdav_codec_h264_cisco_open_encoder(h264))) {
+ return ret;
+ }
+ }
+ }
+ return 0;
+ }
+ }
+
+ if (reconf) {
+ if (h264->encoder.pInst) {
+ long err;
+ // lock required because of https://code.google.com/p/doubango/issues/detail?id=422
+ tsk_mutex_lock(h264->encoder.mutex);
+ err = h264->encoder.pInst->InitializeExt(&h264->encoder.sEncParam);
+ tsk_mutex_unlock(h264->encoder.mutex);
+ if (err != cmResultSuccess) {
+ TSK_DEBUG_ERROR("InitializeExt failed: %ld", err);
+ return -1;
+ }
+ }
+ return 0;
+ }
+
+ return -1;
+}
+
+
+static int tdav_codec_h264_cisco_open(tmedia_codec_t* self)
+{
+ int ret;
+ tdav_codec_h264_cisco_t* h264 = (tdav_codec_h264_cisco_t*)self;
+
+ if (!h264) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is not opened */
+
+ // Encoder
+ if ((ret = tdav_codec_h264_cisco_open_encoder(h264))) {
+ return ret;
+ }
+
+ // Decoder
+ if ((ret = tdav_codec_h264_cisco_open_decoder(h264))) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tdav_codec_h264_cisco_close(tmedia_codec_t* self)
+{
+ tdav_codec_h264_cisco_t* h264 = (tdav_codec_h264_cisco_t*)self;
+
+ if (!h264) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is opened */
+
+ // Encoder
+ tdav_codec_h264_cisco_close_encoder(h264, kResetRotationTrue);
+
+ // Decoder
+ tdav_codec_h264_cisco_close_decoder(h264);
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_h264_cisco_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ long err;
+ tsk_bool_t send_idr, send_hdr;
+ tsk_size_t in_xsize;
+ SFrameBSInfo bsInfo;
+
+ tdav_codec_h264_cisco_t* h264 = (tdav_codec_h264_cisco_t*)self;
+ tdav_codec_h264_common_t* common = (tdav_codec_h264_common_t*)self;
+
+ if (!self || !in_data || !in_size) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if (!self->opened || !h264->encoder.pInst) {
+ TSK_DEBUG_ERROR("Encoder not opened or not ready");
+ return 0;
+ }
+
+ in_xsize = (h264->encoder.sEncPic.iPicHeight * h264->encoder.sEncPic.iPicWidth * 3) >> 1;
+ if (in_xsize != in_size){
+ /* guard */
+ TSK_DEBUG_ERROR("Invalid size: %u<>%u", in_xsize, in_size);
+ return 0;
+ }
+
+ // send IDR for:
+ // - the first frame
+ // - remote peer requested an IDR
+ // - every second within the first 4seconds
+ send_idr = (
+ h264->encoder.frame_count++ == 0
+ || h264 ->encoder.force_idr
+ //|| ( (h264->encoder.frame_count < (int)TMEDIA_CODEC_VIDEO(h264)->out.fps * 4) && ((h264->encoder.frame_count % TMEDIA_CODEC_VIDEO(h264)->out.fps)==0) )
+ );
+
+ // send SPS and PPS headers for:
+ // - IDR frames (not required but it's the easiest way to deal with pkt loss)
+ // - every 5 seconds after the first 4seconds
+ send_hdr = (
+ send_idr
+ //|| ( (h264->encoder.frame_count % (TMEDIA_CODEC_VIDEO(h264)->out.fps * 5))==0 )
+ );
+
+ if (send_idr) {
+ TSK_DEBUG_INFO("OpenH264 call ForceIntraFrame");
+ if ((err = h264->encoder.pInst->ForceIntraFrame(true)) != cmResultSuccess) {
+ TSK_DEBUG_WARN("OpenH264 ForceIntraFrame(%d) failed: %ld", send_idr, err);
+ }
+ }
+ if (send_hdr) {
+#if 0 // Not needed
+ memset(&bsInfo, 0, sizeof(bsInfo));
+ tsk_mutex_lock(h264->encoder.mutex);
+ if ((err = h264->encoder.pInst->EncodeParameterSets(&bsInfo)) != cmResultSuccess) {
+ TSK_DEBUG_WARN("OpenH264 EncodeParameterSets(%d) failed: %ld", send_idr, err);
+ }
+ else {
+ for (int iLayerNum = 0; iLayerNum < bsInfo.iLayerNum; ++iLayerNum) {
+ unsigned char* pBsBuf = bsInfo.sLayerInfo[iLayerNum].pBsBuf;
+ int iNalLengthInByte = 0, _iNalLengthInByte;
+ for (int iNalCount = 0; iNalCount < bsInfo.sLayerInfo[iLayerNum].iNalCount; ++iNalCount) {
+ if ((_iNalLengthInByte = bsInfo.sLayerInfo[iLayerNum].pNalLengthInByte[iNalCount]) > 0) {
+ iNalLengthInByte += _iNalLengthInByte;
+ }
+ }
+ if (iNalLengthInByte > 0) {
+ tdav_codec_h264_rtp_encap(TDAV_CODEC_H264_COMMON(h264), pBsBuf, (tsk_size_t)iNalLengthInByte);
+ }
+ }
+ }
+ tsk_mutex_unlock(h264->encoder.mutex);
+#endif
+ }
+
+ h264->encoder.sEncPic.pData[0] = ((unsigned char*)in_data);
+ h264->encoder.sEncPic.pData[1] = h264->encoder.sEncPic.pData[0] + (h264->encoder.sEncPic.iPicHeight * h264->encoder.sEncPic.iPicWidth);
+ h264->encoder.sEncPic.pData[2] = h264->encoder.sEncPic.pData[1] + ((h264->encoder.sEncPic.iPicHeight * h264->encoder.sEncPic.iPicWidth) >> 2);
+ // h264->encoder.sEncPic.uiTimeStamp = rand();
+
+ memset(&bsInfo, 0, sizeof(bsInfo));
+
+ tsk_mutex_lock(h264->encoder.mutex);
+ if ((err = h264->encoder.pInst->EncodeFrame(&h264->encoder.sEncPic, &bsInfo)) != cmResultSuccess) {
+ TSK_DEBUG_ERROR("OpenH264 setting EncodeFrame() failed: %ld", err);
+ tsk_mutex_unlock(h264->encoder.mutex);
+ return 0;
+ }
+
+ // Memory held by bsInfo is freed when "InitializeExt()" is called this is why the unlock is after reading the output stream
+ if (bsInfo.eFrameType != videoFrameTypeInvalid) {
+ for (int iLayerNum = 0; iLayerNum < bsInfo.iLayerNum; ++iLayerNum) {
+ unsigned char* pBsBuf = bsInfo.sLayerInfo[iLayerNum].pBsBuf;
+ int iNalLengthInByte = 0, _iNalLengthInByte;
+ for (int iNalCount = 0; iNalCount < bsInfo.sLayerInfo[iLayerNum].iNalCount; ++iNalCount) {
+ if ((_iNalLengthInByte = bsInfo.sLayerInfo[iLayerNum].pNalLengthInByte[iNalCount]) > 0) {
+ iNalLengthInByte += _iNalLengthInByte;
+ }
+ }
+ if (iNalLengthInByte > 0) {
+ tdav_codec_h264_rtp_encap(TDAV_CODEC_H264_COMMON(h264), pBsBuf, (tsk_size_t)iNalLengthInByte);
+ }
+ }
+ }
+ tsk_mutex_unlock(h264->encoder.mutex);
+
+ h264 ->encoder.force_idr = tsk_false; // reset
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_h264_cisco_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tdav_codec_h264_cisco_t* h264 = (tdav_codec_h264_cisco_t*)self;
+ const trtp_rtp_header_t* rtp_hdr = (const trtp_rtp_header_t*)proto_hdr;
+
+ const uint8_t* pay_ptr = tsk_null;
+ tsk_size_t pay_size = 0;
+ int ret;
+ long err = cmResultSuccess;
+ tsk_bool_t append_scp, end_of_unit, got_picture_ptr = tsk_false;
+ tsk_bool_t sps_or_pps;
+ tsk_size_t retsize = 0, size_to_copy = 0;
+ static const tsk_size_t xmax_size = (3840 * 2160 * 3) >> 3; // >>3 instead of >>1 (not an error)
+ static tsk_size_t start_code_prefix_size = sizeof(H264_START_CODE_PREFIX);
+
+ if (!h264 || !in_data || !in_size || !out_data) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if (!self->opened || !h264->encoder.pInst) {
+ TSK_DEBUG_ERROR("Decoder not opened or not ready");
+ return 0;
+ }
+
+ /* Packet lost? */
+ if ((h264->decoder.last_seq + 1) != rtp_hdr->seq_num && h264->decoder.last_seq) {
+ TSK_DEBUG_INFO("[H.264] Packet loss, seq_num=%d", (h264->decoder.last_seq + 1));
+ }
+ h264->decoder.last_seq = rtp_hdr->seq_num;
+
+
+ /* 5.3. NAL Unit Octet Usage
+ +---------------+
+ |0|1|2|3|4|5|6|7|
+ +-+-+-+-+-+-+-+-+
+ |F|NRI| Type |
+ +---------------+
+ */
+ if (*((uint8_t*)in_data) & 0x80) {
+ TSK_DEBUG_WARN("F=1");
+ /* reset accumulator */
+ h264->decoder.accumulator_pos = 0;
+ return 0;
+ }
+
+ /* get payload */
+ if ((ret = tdav_codec_h264_get_pay(in_data, in_size, (const void**)&pay_ptr, &pay_size, &append_scp, &end_of_unit)) || !pay_ptr || !pay_size) {
+ TSK_DEBUG_ERROR("Depayloader failed to get H.264 content");
+ return 0;
+ }
+ //append_scp = tsk_true;
+ size_to_copy = pay_size + (append_scp ? start_code_prefix_size : 0);
+ // whether it's SPS or PPS (append_scp is false for subsequent FUA chuncks)
+ sps_or_pps = append_scp && pay_ptr && ((pay_ptr[0] & 0x1F) == 7 || (pay_ptr[0] & 0x1F) == 8);
+
+ // start-accumulator
+ if (!h264->decoder.accumulator) {
+ if (size_to_copy > xmax_size) {
+ TSK_DEBUG_ERROR("%u too big to contain valid encoded data. xmax_size=%u", size_to_copy, xmax_size);
+ return 0;
+ }
+ if (!(h264->decoder.accumulator = tsk_calloc(size_to_copy, sizeof(uint8_t)))) {
+ TSK_DEBUG_ERROR("Failed to allocated new buffer");
+ return 0;
+ }
+ h264->decoder.accumulator_size = size_to_copy;
+ }
+ if ((h264->decoder.accumulator_pos + size_to_copy) >= xmax_size) {
+ TSK_DEBUG_ERROR("BufferOverflow");
+ h264->decoder.accumulator_pos = 0;
+ return 0;
+ }
+ if ((h264->decoder.accumulator_pos + size_to_copy) > h264->decoder.accumulator_size) {
+ if (!(h264->decoder.accumulator = tsk_realloc(h264->decoder.accumulator, (h264->decoder.accumulator_pos + size_to_copy)))) {
+ TSK_DEBUG_ERROR("Failed to reallocated new buffer");
+ h264->decoder.accumulator_pos = 0;
+ h264->decoder.accumulator_size = 0;
+ return 0;
+ }
+ h264->decoder.accumulator_size = (h264->decoder.accumulator_pos + size_to_copy);
+ }
+
+ if (append_scp) {
+ memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], H264_START_CODE_PREFIX, start_code_prefix_size);
+ h264->decoder.accumulator_pos += start_code_prefix_size;
+ }
+ memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], pay_ptr, pay_size);
+ h264->decoder.accumulator_pos += pay_size;
+ // end-accumulator
+
+ if (/*rtp_hdr->marker*/end_of_unit) {
+ /* decode the picture */
+ unsigned char* out_ptr[3] = { NULL };
+ int out_stride[2] = {0}, out_width = 0, out_height = 0;
+ tsk_size_t out_xsize;
+
+ // Decode a Unit
+ err = h264->decoder.pInst->DecodeFrame(
+ (const unsigned char*)h264->decoder.accumulator, h264->decoder.accumulator_pos,
+ out_ptr, out_stride, out_width, out_height);
+
+ if (err != cmResultSuccess) {
+ if (0 && err == dsDataErrorConcealed) {
+ TSK_DEBUG_INFO("OpenH264: Data error concealed");
+ err = cmResultSuccess;
+ }
+ else {
+ TSK_DEBUG_WARN("OpenH264: DecodeFrame failed: %ld", err);
+ goto bail;
+ }
+ }
+ // Do we have a complete frame?
+ if (!(got_picture_ptr = ((out_ptr[0] && out_ptr[1] && out_ptr[2]) && (out_stride[0] && out_stride[1]) && out_width && out_height))) {
+ goto bail;
+ }
+ out_xsize = (out_width * out_height * 3) >> 1; // I420
+ /* IDR ? */
+ if (((pay_ptr[0] & 0x1F) == 0x05) && TMEDIA_CODEC_VIDEO(self)->in.callback) {
+ TSK_DEBUG_INFO("Decoded H.264 IDR");
+ TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_idr;
+ TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
+ TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
+ }
+ /* fill out */
+ if (*out_max_size < out_xsize) {
+ if ((*out_data = tsk_realloc(*out_data, out_xsize))){
+ *out_max_size = out_xsize;
+ }
+ else {
+ *out_max_size = 0;
+ return 0;
+ }
+ }
+ TMEDIA_CODEC_VIDEO(h264)->in.width = out_width;
+ TMEDIA_CODEC_VIDEO(h264)->in.height = out_height;
+ /* layout picture */
+ {
+ int plane, y, stride;
+ retsize = 0;
+ for (plane=0; plane < 3; plane++) {
+ unsigned char *buf = out_ptr[plane];
+ stride = out_stride[plane ? 1 : 0];
+ for (y=0; y<out_height >> (plane ? 1 : 0); y++) {
+ unsigned int w_count = out_width >> (plane ? 1 : 0);
+ if ((ret + w_count) > *out_max_size) {
+ TSK_DEBUG_ERROR("BufferOverflow");
+ ret = 0;
+ goto bail;
+ }
+ memcpy(((uint8_t*)*out_data) + retsize, buf, w_count);
+ retsize += w_count;
+ buf += stride;
+ }
+ }
+ }
+ } // else if(rtp_hdr->marker)
+
+bail:
+ /* end of frame */
+ if (got_picture_ptr) {
+ int32_t endOfStream = 1;
+ err = h264->decoder.pInst->SetOption(DECODER_OPTION_END_OF_STREAM, (void*)&endOfStream);
+ if (err != cmResultSuccess) {
+ TSK_DEBUG_WARN("OpenH264 setting DECODER_OPTION_END_OF_STREAM failed: %ld", err);
+ goto bail;
+ }
+ }
+ if (/*rtp_hdr->marker*/end_of_unit) {
+ /* reset accumulator */
+ h264->decoder.accumulator_pos = 0;
+ }
+ if (err != cmResultSuccess){
+ TSK_DEBUG_INFO("Failed to decode the buffer with error code =%ld, size=%u, append=%s", err, h264->decoder.accumulator_pos, append_scp ? "yes" : "no");
+ if (TMEDIA_CODEC_VIDEO(self)->in.callback) {
+ TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_error;
+ TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
+ TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
+ }
+ }
+ return retsize;
+}
+
+static tsk_bool_t tdav_codec_h264_cisco_sdp_att_match(const tmedia_codec_t* self, const char* att_name, const char* att_value)
+{
+ return tdav_codec_h264_common_sdp_att_match((tdav_codec_h264_common_t*)self, att_name, att_value);
+}
+
+static char* tdav_codec_h264_cisco_sdp_att_get(const tmedia_codec_t* self, const char* att_name)
+{
+ char* att = tdav_codec_h264_common_sdp_att_get((const tdav_codec_h264_common_t*)self, att_name);
+ if (att && tsk_striequals(att_name, "fmtp")) {
+ tsk_strcat(&att, "; impl=openh264");
+ }
+ return att;
+}
+
+/* ============ H.264 Base Profile Plugin interface ================= */
+
+/* constructor */
+static tsk_object_t* tdav_codec_h264_cisco_base_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h264_cisco_t *h264 = (tdav_codec_h264_cisco_t*)self;
+ if (h264) {
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ if (tdav_codec_h264_cisco_init(h264, profile_idc_baseline) != 0) {
+ return tsk_null;
+ }
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h264_cisco_base_dtor(tsk_object_t * self)
+{
+ tdav_codec_h264_cisco_t *h264 = (tdav_codec_h264_cisco_t*)self;
+ if (h264) {
+ /* deinit base */
+ tdav_codec_h264_common_deinit(TDAV_CODEC_H264_COMMON(self));
+ /* deinit self */
+ tdav_codec_h264_cisco_deinit(h264);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h264_cisco_base_def_s =
+{
+ sizeof(tdav_codec_h264_cisco_t),
+ tdav_codec_h264_cisco_base_ctor,
+ tdav_codec_h264_cisco_base_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h264_cisco_base_plugin_def_s =
+{
+ &tdav_codec_h264_cisco_base_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_h264_bp,
+ "H264",
+ "H264 Base Profile (OpenH264)",
+ TMEDIA_CODEC_FORMAT_H264_BP,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video (width, height, fps) */
+ {176, 144, 0}, // fps is @deprecated
+
+ tdav_codec_h264_cisco_set,
+ tdav_codec_h264_cisco_open,
+ tdav_codec_h264_cisco_close,
+ tdav_codec_h264_cisco_encode,
+ tdav_codec_h264_cisco_decode,
+ tdav_codec_h264_cisco_sdp_att_match,
+ tdav_codec_h264_cisco_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_h264_cisco_base_plugin_def_t = &tdav_codec_h264_cisco_base_plugin_def_s;
+
+/* ============ Common To all H264 profiles ================= */
+
+static int tdav_codec_h264_cisco_open_encoder(tdav_codec_h264_cisco_t* self)
+{
+ int ret = -1, max_bitrate_bps;
+ long err;
+ SSpatialLayerConfig* layer;
+
+ int32_t max_bw_kpbs;
+ tdav_codec_h264_common_t* common = (tdav_codec_h264_common_t*)self;
+
+ if (self->encoder.pInst) {
+ TSK_DEBUG_ERROR("Encoder already initialized");
+ goto bail;
+ }
+
+ // create encoder
+ if ((err = WelsCreateSVCEncoder(&self->encoder.pInst)) != cmResultSuccess) {
+ TSK_DEBUG_ERROR("Failed to create ancoder: %ld", err);
+ goto bail;
+ }
+
+ self->encoder.pInst->SetOption(ENCODER_OPTION_TRACE_CALLBACK_CONTEXT, self);
+ self->encoder.pInst->SetOption(ENCODER_OPTION_TRACE_CALLBACK, &__tdav_codec_h264_cisco_debug_cb);
+
+ if ((err = self->encoder.pInst->GetDefaultParams(&self->encoder.sEncParam)) != cmResultSuccess) {
+ TSK_DEBUG_ERROR("GetDefaultParams failed: %ld", err);
+ goto bail;
+ }
+
+ self->encoder.neg_width = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.height : TMEDIA_CODEC_VIDEO(self)->out.width;
+ self->encoder.neg_height = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.width : TMEDIA_CODEC_VIDEO(self)->out.height;
+ self->encoder.neg_fps = TMEDIA_CODEC_VIDEO(self)->out.fps;
+ max_bw_kpbs = TSK_CLAMP(
+ 0,
+ tmedia_get_video_bandwidth_kbps_2(self->encoder.neg_width, self->encoder.neg_height, self->encoder.neg_fps),
+ TMEDIA_CODEC(self)->bandwidth_max_upload
+ );
+ max_bitrate_bps = (max_bw_kpbs * 1024);
+
+ TSK_DEBUG_INFO("[H.264 OpenH264 Encoder] neg_width=%d, neg_height=%d, neg_fps=%d, max_bitrate_bps=%d",
+ self->encoder.neg_width,
+ self->encoder.neg_height,
+ self->encoder.neg_fps,
+ max_bitrate_bps
+ );
+
+ self->encoder.sEncParam.iInputCsp = videoFormatI420;
+ self->encoder.sEncParam.iSpatialLayerNum = 1;
+ self->encoder.sEncParam.iTemporalLayerNum = 1;
+ self->encoder.sEncParam.uiIntraPeriod = (self->encoder.neg_fps * CISCO_H264_GOP_SIZE_IN_SECONDS);
+ self->encoder.sEncParam.iUsageType = CAMERA_VIDEO_REAL_TIME; // TODO: use "SCREEN_CONTENT_REAL_TIME" screencast
+ self->encoder.sEncParam.iPicWidth = self->encoder.neg_width;
+ self->encoder.sEncParam.iPicHeight = self->encoder.neg_height;
+ self->encoder.sEncParam.iTargetBitrate = max_bitrate_bps;
+ self->encoder.sEncParam.iMaxBitrate = max_bitrate_bps;
+ self->encoder.sEncParam.fMaxFrameRate = (float)self->encoder.neg_fps;
+ self->encoder.sEncParam.uiMaxNalSize = H264_RTP_PAYLOAD_SIZE;
+ self->encoder.sEncParam.bEnableSpsPpsIdAddition = true;
+ self->encoder.sEncParam.bEnableFrameCroppingFlag = true;
+
+ layer = &self->encoder.sEncParam.sSpatialLayers[0];
+ layer->uiProfileIdc = PRO_BASELINE;
+#if BUILD_TYPE_TCH
+ layer->uiLevelIdc = tdav_codec_h264_cisco_convert_level(common->level);
+#else
+ layer->uiLevelIdc = LEVEL_UNKNOWN; // auto-detect
+#endif
+ layer->fFrameRate = self->encoder.sEncParam.fMaxFrameRate;
+ layer->iMaxSpatialBitrate = self->encoder.sEncParam.iMaxBitrate;
+ layer->iSpatialBitrate = self->encoder.sEncParam.iTargetBitrate;
+ layer->iVideoWidth = self->encoder.sEncParam.iPicWidth;
+ layer->iVideoHeight = self->encoder.sEncParam.iPicHeight;
+ layer->sSliceCfg.uiSliceMode = SM_DYN_SLICE;
+ layer->sSliceCfg.sSliceArgument.uiSliceSizeConstraint = H264_RTP_PAYLOAD_SIZE;
+ layer->sSliceCfg.sSliceArgument.uiSliceNum = 1;
+ //layer->sSliceCfg.sSliceArgument.uiSliceMbNum[0] = 960;
+
+ if ((err = self->encoder.pInst->InitializeExt(&self->encoder.sEncParam)) != cmResultSuccess) {
+ TSK_DEBUG_ERROR("InitializeExt failed: %ld", err);
+ goto bail;
+ }
+
+ self->encoder.sEncPic.iColorFormat = videoFormatI420;
+ self->encoder.sEncPic.iPicWidth = self->encoder.sEncParam.iPicWidth;
+ self->encoder.sEncPic.iPicHeight = self->encoder.sEncParam.iPicHeight;
+ self->encoder.sEncPic.iStride[0] = self->encoder.sEncPic.iPicWidth;
+ self->encoder.sEncPic.iStride[1] = self->encoder.sEncPic.iStride[0] >> 1;
+ self->encoder.sEncPic.iStride[2] = self->encoder.sEncPic.iStride[1];
+
+ // Create encoder mutex
+ if (!self->encoder.mutex && !(self->encoder.mutex = tsk_mutex_create())) {
+ TSK_DEBUG_ERROR("Failed to create mutex for the encoder");
+ goto bail;
+ }
+
+ self->encoder.frame_count = 0;
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static int tdav_codec_h264_cisco_close_encoder(tdav_codec_h264_cisco_t* self, tsk_bool_t reset_rotation)
+{
+ if (self) {
+ if (self->encoder.pInst) {
+ self->encoder.pInst->Uninitialize();
+ WelsDestroySVCEncoder(self->encoder.pInst);
+ self->encoder.pInst = NULL;
+ }
+ if (self->encoder.buffer) {
+ TSK_FREE(self->encoder.buffer);
+ }
+ if (self->encoder.mutex) {
+ tsk_mutex_destroy(&self->encoder.mutex);
+ }
+ self->encoder.frame_count = 0;
+ if (reset_rotation) {
+ self->encoder.rotation = 0; // reset rotation
+ }
+ }
+ return 0;
+}
+
+int tdav_codec_h264_cisco_open_decoder(tdav_codec_h264_cisco_t* self)
+{
+ int ret = -1;
+ long err;
+ tdav_codec_h264_common_t* common = (tdav_codec_h264_common_t*)self;
+ SDecodingParam sDecParam = { 0 };
+
+ if (self->decoder.pInst) {
+ TSK_DEBUG_ERROR("Decoder already initialized");
+ goto bail;
+ }
+
+ // create decoder
+ if ((err = WelsCreateDecoder(&self->decoder.pInst)) != cmResultSuccess) {
+ TSK_DEBUG_ERROR("Failed to create decoder: %ld", err);
+ goto bail;
+ }
+ self->decoder.pInst->SetOption(DECODER_OPTION_TRACE_CALLBACK_CONTEXT, self);
+ self->decoder.pInst->SetOption(DECODER_OPTION_TRACE_CALLBACK, &__tdav_codec_h264_cisco_debug_cb);
+
+ // initialize decoder
+ sDecParam.iOutputColorFormat = videoFormatI420;
+ sDecParam.sVideoProperty.eVideoBsType = VIDEO_BITSTREAM_AVC;
+ if ((err = self->decoder.pInst->Initialize(&sDecParam)) != cmResultSuccess) {
+ TSK_DEBUG_ERROR("Failed to initialize decoder: %ld", err);
+ goto bail;
+ }
+ self->decoder.last_seq = 0;
+ TSK_DEBUG_INFO("[OpenH264 Decoder] neg_width=%d, neg_height=%d, neg_fps=%d",
+ TMEDIA_CODEC_VIDEO(self)->in.width,
+ TMEDIA_CODEC_VIDEO(self)->in.height,
+ TMEDIA_CODEC_VIDEO(self)->in.fps
+ );
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static int tdav_codec_h264_cisco_close_decoder(tdav_codec_h264_cisco_t* self)
+{
+ if (self) {
+ if (self->decoder.pInst) {
+ self->decoder.pInst->Uninitialize();
+ WelsDestroyDecoder(self->decoder.pInst);
+ self->decoder.pInst = NULL;
+ }
+ TSK_FREE(self->decoder.accumulator);
+ self->decoder.accumulator_pos = 0;
+ }
+
+ return 0;
+}
+
+static ELevelIdc tdav_codec_h264_cisco_convert_level(enum level_idc_e level)
+{
+ switch(level) {
+ case level_idc_1_0: return LEVEL_1_0;
+ case level_idc_1_b: return LEVEL_1_B;
+ case level_idc_1_1: return LEVEL_1_1;
+ case level_idc_1_2: return LEVEL_1_2;
+ case level_idc_1_3: return LEVEL_1_3;
+ case level_idc_2_0: return LEVEL_2_0;
+ case level_idc_2_1: return LEVEL_2_1;
+ case level_idc_2_2: return LEVEL_2_2;
+ case level_idc_3_0: return LEVEL_3_0;
+ case level_idc_3_1: return LEVEL_3_1;
+ case level_idc_3_2: return LEVEL_3_2;
+ case level_idc_4_0: return LEVEL_4_0;
+ case level_idc_4_1: return LEVEL_4_1;
+ case level_idc_4_2: return LEVEL_4_2;
+ case level_idc_5_0: return LEVEL_5_0;
+ case level_idc_5_1: return LEVEL_5_1;
+ case level_idc_5_2: return LEVEL_2_2;
+ default: return LEVEL_UNKNOWN;
+ }
+}
+
+static void tdav_codec_h264_cisco_debug_cb(void* context, int level, const char* message)
+{
+ switch (level) {
+ case WELS_LOG_ERROR:
+ case WELS_LOG_QUIET:
+ TSK_DEBUG_ERROR("OpenH264: level=%d, message=%s", level, message);
+ break;
+ case WELS_LOG_WARNING:
+ TSK_DEBUG_WARN("OpenH264: level=%d, message=%s", level, message);
+ break;
+ default:
+ TSK_DEBUG_INFO("OpenH264: level=%d, message=%s", level, message);
+ break;
+ }
+}
+
+static int tdav_codec_h264_cisco_init(tdav_codec_h264_cisco_t* self, profile_idc_t profile)
+{
+ int ret = -1;
+ level_idc_t level;
+ tdav_codec_h264_common_t* common = (tdav_codec_h264_common_t*)self;
+
+ if (!self || profile != profile_idc_baseline) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ goto bail;
+ }
+
+ if ((ret = tdav_codec_h264_common_init(common))) {
+ TSK_DEBUG_ERROR("tdav_codec_h264_cisco_common_init() faile with error code=%d", ret);
+ goto bail;
+ }
+
+ if ((ret = tdav_codec_h264_common_level_from_size(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, &level))) {
+ TSK_DEBUG_ERROR("Failed to find level for size=[%u, %u]", TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height);
+ goto bail;
+ }
+
+ common->pack_mode_local = H264_PACKETIZATION_MODE;
+ common->profile = profile;
+ common->level = level;
+ // A.2.1.1 Constrained Baseline profile
+ // Conformance of a bitstream to the Constrained Baseline profile is indicated by profile_idc being equal to 66 with
+ // constraint_set1_flag being equal to 1.
+ common->profile_iop = 0xe0; // "constraint_set0_flag=1 and constraint_set1_flag=1" -> Constrained Baseline profile
+ TMEDIA_CODEC_VIDEO(self)->in.max_mbps = TMEDIA_CODEC_VIDEO(self)->out.max_mbps = H264_MAX_MBPS*1000;
+ TMEDIA_CODEC_VIDEO(self)->in.max_br = TMEDIA_CODEC_VIDEO(self)->out.max_br = H264_MAX_BR*1000;
+
+ TMEDIA_CODEC_VIDEO(self)->in.chroma = tmedia_chroma_yuv420p;
+ TMEDIA_CODEC_VIDEO(self)->out.chroma = tmedia_chroma_yuv420p;
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static int tdav_codec_h264_cisco_deinit(tdav_codec_h264_cisco_t* self)
+{
+ if (!self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_codec_h264_cisco_close((tmedia_codec_t*)self);
+
+ return 0;
+}
+
+#endif /* HAVE_OPENH264 */
diff --git a/tinyDAV/src/codecs/h264/tdav_codec_h264_cuda.cxx b/tinyDAV/src/codecs/h264/tdav_codec_h264_cuda.cxx
new file mode 100644
index 0000000..f9fbc11
--- /dev/null
+++ b/tinyDAV/src/codecs/h264/tdav_codec_h264_cuda.cxx
@@ -0,0 +1,1130 @@
+/*
+* Copyright (C) 2011 Doubango Telecom <http://www.doubango.org>.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango(DOT)org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_h264_cuda.c
+ * @brief H.264 codec plugin using NVIDIA CUDA for encoding/decoding
+ * Env: gpucomputingsdk_4.0.17_win_32, cudatoolkit_4.0.17_win_32 and 280.26-notebook-win7-winvista-32bit-international-whql.
+ * http://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/CUDA_VideoDecoder_Library.pdf
+ * http://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/CUDA_VideoEncoder_Library.pdf
+ * 2.0: https://developer.nvidia.com/sites/default/files/akamai/cuda/files/CUDADownloads/NVENC_VideoEncoder_API_ProgGuide.pdf
+ *
+ * RTP payloader/depayloader follows RFC 3984.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango(DOT)org>
+ *
+ */
+#include "tinydav/codecs/h264/tdav_codec_h264_cuda.h"
+
+#if HAVE_CUDA
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "nvcuvenc.lib")
+# pragma comment(lib, "nvcuvid.lib")
+# pragma comment(lib, "cuda.lib")
+
+# pragma comment(lib, "d3d9.lib")
+# pragma comment(lib, "d3dx9.lib")
+#endif
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <cutil_inline.h>
+#include <cuda.h>
+
+#define tdav_codec_h264_cuda_fmtp_set tsk_null
+
+#if !defined(CUDA_MAX_FRM_CNT)
+# define CUDA_MAX_FRM_CNT 10
+#endif
+
+#include "tsk_semaphore.h"
+tsk_semaphore_handle_t *sem = tsk_null;
+
+#define InitH264DecoderInfo(_self) \
+ memset(&_self->decoder.info, 0, sizeof(_self->decoder.info)); \
+ _self->decoder.info.ulCreationFlags = cudaVideoCreate_PreferCUDA; \
+ _self->decoder.info.CodecType = cudaVideoCodec_H264; \
+ _self->decoder.info.ulWidth = TMEDIA_CODEC_VIDEO(_self)->in.width; \
+ _self->decoder.info.ulTargetWidth = TMEDIA_CODEC_VIDEO(_self)->in.width; \
+ _self->decoder.info.ulHeight = TMEDIA_CODEC_VIDEO(_self)->in.height; \
+ _self->decoder.info.ulTargetHeight = TMEDIA_CODEC_VIDEO(_self)->in.height; \
+ _self->decoder.info.ulNumDecodeSurfaces = CUDA_MAX_FRM_CNT; \
+ _self->decoder.info.ulNumOutputSurfaces = 1; \
+ _self->decoder.info.ChromaFormat = cudaVideoChromaFormat_420; \
+ _self->decoder.info.OutputFormat = cudaVideoSurfaceFormat_NV12; \
+ _self->decoder.info.DeinterlaceMode = cudaVideoDeinterlaceMode_Adaptive;
+
+static int CUDAAPI _NVCallback_HandleVideoSequence(void *pvUserData, CUVIDEOFORMAT *pFormat);
+static int CUDAAPI _NVCallback_HandlePictureDecode(void *pvUserData, CUVIDPICPARAMS *pPicParams);
+static int CUDAAPI _NVCallback_HandlePictureDisplay(void *pvUserData, CUVIDPARSERDISPINFO *pPicParams);
+static unsigned char* CUDAAPI _NVCallback_HandleAcquireBitStream(int *pBufferSize, void *pUserdata);
+static void CUDAAPI _NVCallback_HandleReleaseBitStream(int nBytesInBuffer, unsigned char *cb,void *pUserdata);
+static void CUDAAPI _NVCallback_HandleOnBeginFrame(const NVVE_BeginFrameInfo *pbfi, void *pUserdata);
+static void CUDAAPI _NVCallback_HandleOnEndFrame(const NVVE_EndFrameInfo *pefi, void *pUserdata);
+
+static inline void _tdav_codec_h264_cuda_encap(const tdav_codec_h264_cuda_t* h264, const uint8_t* pdata, tsk_size_t size);
+static inline tsk_size_t _tdav_codec_h264_cuda_pict_layout(tdav_codec_h264_cuda_t* self, void**output, tsk_size_t *output_size);
+
+static int tdav_codec_h264_cuda_open(tmedia_codec_t* self)
+{
+ int ret = 0, i;
+ int bestGPU = 0, gpuPerf = 0, adapterCount;
+ static int low_latency = 1;
+ HRESULT hr;
+ CUresult cuResult;
+ D3DPRESENT_PARAMETERS d3dpp;
+ tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)self;
+
+ if(!h264){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ //
+ // encoder
+ //
+ memset(&h264->encoder.clb_params, 0, sizeof(h264->encoder.clb_params));
+ memset(&h264->encoder.ctx_params, 0, sizeof(h264->encoder.ctx_params));
+
+ h264->encoder.ctx_params.iInputSize[0] = TMEDIA_CODEC_VIDEO(h264)->out.width;
+ h264->encoder.ctx_params.iInputSize[1] = TMEDIA_CODEC_VIDEO(h264)->out.height;
+ h264->encoder.ctx_params.iOutputSize[0] = TMEDIA_CODEC_VIDEO(h264)->out.width;
+ h264->encoder.ctx_params.iOutputSize[1] = TMEDIA_CODEC_VIDEO(h264)->out.height;
+ h264->encoder.ctx_params.GPUOffloadLevel= NVVE_GPU_OFFLOAD_DEFAULT;
+ h264->encoder.ctx_params.iSurfaceFormat = (int)IYUV;
+ h264->encoder.ctx_params.iPictureType = (int)FRAME_PICTURE;
+ h264->encoder.ctx_params.Fieldmode = MODE_FRAME;
+ h264->encoder.ctx_params.Presets = (NVVE_PRESETS_TARGET)-1;//Should be iPod, Zune ...
+ h264->encoder.ctx_params.iP_Interval = 1;
+ h264->encoder.ctx_params.iAspectRatio[0] = 4;
+ h264->encoder.ctx_params.iAspectRatio[1] = 3;
+ h264->encoder.ctx_params.iAspectRatio[2] = 0;
+ h264->encoder.ctx_params.iIDR_Period = TMEDIA_CODEC_VIDEO(h264)->out.fps * 3;
+ h264->encoder.ctx_params.iUseDeviceMem = 0;
+ h264->encoder.ctx_params.iDynamicGOP = 0;
+ h264->encoder.ctx_params.RCType = RC_VBR;
+ h264->encoder.ctx_params.iAvgBitrate = 400000;
+ h264->encoder.ctx_params.iPeakBitrate = 800000;
+ h264->encoder.ctx_params.iQP_Level_Intra = 25;
+ h264->encoder.ctx_params.iQP_Level_InterP = 28;
+ h264->encoder.ctx_params.iQP_Level_InterB = 31;
+ h264->encoder.ctx_params.iFrameRate[0] = TMEDIA_CODEC_VIDEO(h264)->out.fps * 1000;
+ h264->encoder.ctx_params.iFrameRate[1] = 1000;
+ h264->encoder.ctx_params.iDeblockMode = 1;
+ h264->encoder.ctx_params.iForceIntra = 0;
+ h264->encoder.ctx_params.iForceIDR = 0;
+ h264->encoder.ctx_params.iClearStat = 0;
+ h264->encoder.ctx_params.DIMode = DI_MEDIAN;
+ h264->encoder.ctx_params.iDisableSPSPPS = 1; // Do not include SPS/PPS frames
+ h264->encoder.ctx_params.iNaluFramingType = 0; // StartCodes
+ h264->encoder.ctx_params.iMultiGPU = 1;
+ switch(TDAV_CODEC_H264_COMMON(h264)->profile){
+ case tdav_codec_h264_bp10:
+ h264->encoder.ctx_params.iDisableCabac = 1;
+ h264->encoder.ctx_params.iProfileLevel = 0xff42;
+ break;
+ case tdav_codec_h264_bp20:
+ h264->encoder.ctx_params.iDisableCabac = 1;
+ h264->encoder.ctx_params.iProfileLevel = 0xff42;
+ break;
+ case tdav_codec_h264_bp30:
+ h264->encoder.ctx_params.iDisableCabac = 1;
+ h264->encoder.ctx_params.iProfileLevel = 0xff42;
+ break;
+ default:
+ break;
+ }
+
+ hr = NVCreateEncoder(&h264->encoder.context);
+ if(FAILED(hr)){
+ TSK_DEBUG_ERROR("NVCreateEncoder failed with error code = %08x", hr);
+ return -2;
+ }
+
+ hr = NVSetCodec(h264->encoder.context, NV_CODEC_TYPE_H264);
+ if(FAILED(hr)){
+ TSK_DEBUG_ERROR("NVSetCodec failed with error code = %08x", hr);
+ return -2;
+ }
+
+ hr = NVSetDefaultParam(h264->encoder.context);
+ if(FAILED(hr)){
+ TSK_DEBUG_ERROR("NVSetDefaultParam() failed with error code = %08x", hr);
+ return -2;
+ }
+
+ hr = NVGetParamValue(h264->encoder.context, NVVE_GET_GPU_COUNT, &h264->encoder.ctx_params.GPU_count);
+ if(SUCCEEDED(hr)){
+ int temp = 0, deviceCount;
+ for (deviceCount=0; deviceCount < h264->encoder.ctx_params.GPU_count; deviceCount++) {
+ NVVE_GPUAttributes GPUAttributes = {0};
+
+ GPUAttributes.iGpuOrdinal = deviceCount;
+ hr = NVGetParamValue(h264->encoder.context, NVVE_GET_GPU_ATTRIBUTES, &GPUAttributes);
+ if(FAILED(hr)){
+ TSK_DEBUG_ERROR("NVGetParamValue(NVVE_GET_GPU_ATTRIBUTES) failed with error code = %08x", hr);
+ continue;
+ }
+
+ temp = GPUAttributes.iClockRate * GPUAttributes.iMultiProcessorCount;
+ temp = temp * _ConvertSMVer2Cores(GPUAttributes.iMajor, GPUAttributes.iMinor);
+
+ if(temp > gpuPerf){
+ gpuPerf = temp;
+ bestGPU = deviceCount;
+ }
+ }
+ }
+ else{
+ TSK_DEBUG_ERROR("NVGetParamValue(NVVE_GET_GPU_COUNT) failed with error code = %08x", hr);
+ return -2;
+ }
+
+ h264->encoder.ctx_params.iForcedGPU = bestGPU;
+ hr = NVSetParamValue(h264->encoder.context, NVVE_FORCE_GPU_SELECTION, &h264->encoder.ctx_params.iForcedGPU);
+ if(FAILED(hr)){
+ TSK_DEBUG_WARN("NVSetParamValue(NVVE_FORCE_GPU_SELECTION) failed with error code = %08x", hr);
+ }
+
+ hr = NVSetParamValue(h264->encoder.context, NVVE_DEVICE_MEMORY_INPUT, &(h264->encoder.ctx_params.iUseDeviceMem));
+ if(FAILED(hr)){
+ TSK_DEBUG_ERROR("NVSetParamValue(NVVE_OUT_SIZE) failed with error code = %08x", hr);
+ return -2;
+ }
+
+ h264->encoder.buffer_size = (h264->encoder.ctx_params.iOutputSize[1] * h264->encoder.ctx_params.iOutputSize[0] * 3) >> 4;
+ if(!h264->encoder.buffer && !(h264->encoder.buffer = tsk_realloc(h264->encoder.buffer, h264->encoder.buffer_size))){
+ TSK_DEBUG_ERROR("Failed to allocate buffer with size=%u", h264->encoder.buffer_size);
+ h264->encoder.buffer_size = 0;
+ return -2;
+ }
+
+ hr = NVSetParamValue(h264->encoder.context,NVVE_OUT_SIZE, &(h264->encoder.ctx_params.iOutputSize)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_OUT_SIZE) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_IN_SIZE, &(h264->encoder.ctx_params.iInputSize)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_IN_SIZE) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_MULTI_GPU, &(h264->encoder.ctx_params.iMultiGPU)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_MULTI_GPU) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_ASPECT_RATIO, &(h264->encoder.ctx_params.iAspectRatio));if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_ASPECT_RATIO) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_FIELD_ENC_MODE, &(h264->encoder.ctx_params.Fieldmode)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_FIELD_ENC_MODE) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_P_INTERVAL, &(h264->encoder.ctx_params.iP_Interval)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_P_INTERVAL) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_IDR_PERIOD, &(h264->encoder.ctx_params.iIDR_Period)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_IDR_PERIOD) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_DYNAMIC_GOP, &(h264->encoder.ctx_params.iDynamicGOP)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_DYNAMIC_GOP) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_RC_TYPE, &(h264->encoder.ctx_params.RCType)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_RC_TYPE) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_AVG_BITRATE, &(h264->encoder.ctx_params.iAvgBitrate)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_AVG_BITRATE) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_PEAK_BITRATE, &(h264->encoder.ctx_params.iPeakBitrate)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_PEAK_BITRATE) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_QP_LEVEL_INTRA, &(h264->encoder.ctx_params.iQP_Level_Intra)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_OUT_SIZE) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_QP_LEVEL_INTER_P,&(h264->encoder.ctx_params.iQP_Level_InterP)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_QP_LEVEL_INTER_P) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_QP_LEVEL_INTER_B,&(h264->encoder.ctx_params.iQP_Level_InterB)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_QP_LEVEL_INTER_B) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_FRAME_RATE, &(h264->encoder.ctx_params.iFrameRate)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_FRAME_RATE) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_DEBLOCK_MODE, &(h264->encoder.ctx_params.iDeblockMode)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_DEBLOCK_MODE) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_PROFILE_LEVEL, &(h264->encoder.ctx_params.iProfileLevel)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_PROFILE_LEVEL) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_FORCE_INTRA, &(h264->encoder.ctx_params.iForceIntra)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_FORCE_INTRA) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_FORCE_IDR, &(h264->encoder.ctx_params.iForceIDR)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_FORCE_IDR) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_CLEAR_STAT, &(h264->encoder.ctx_params.iClearStat)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_CLEAR_STAT) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_SET_DEINTERLACE,&(h264->encoder.ctx_params.DIMode)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_SET_DEINTERLACE) failed with error code = %08x", hr); }
+ if (h264->encoder.ctx_params.Presets != -1) {
+ hr = NVSetParamValue(h264->encoder.context,NVVE_PRESETS, &(h264->encoder.ctx_params.Presets)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_PRESETS) failed with error code = %08x", hr); }
+ }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_DISABLE_CABAC, &(h264->encoder.ctx_params.iDisableCabac)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_DISABLE_CABAC) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_CONFIGURE_NALU_FRAMING_TYPE, &(h264->encoder.ctx_params.iNaluFramingType)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_CONFIGURE_NALU_FRAMING_TYPE) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_DISABLE_SPS_PPS,&(h264->encoder.ctx_params.iDisableSPSPPS)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_DISABLE_SPS_PPS) failed with error code = %08x", hr); }
+ hr = NVSetParamValue(h264->encoder.context,NVVE_LOW_LATENCY,&low_latency); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_LOW_LATENCY) failed with error code = %08x", hr); }
+
+ h264->encoder.clb_params.pfnacquirebitstream = _NVCallback_HandleAcquireBitStream;
+ h264->encoder.clb_params.pfnonbeginframe = _NVCallback_HandleOnBeginFrame;
+ h264->encoder.clb_params.pfnonendframe = _NVCallback_HandleOnEndFrame;
+ h264->encoder.clb_params.pfnreleasebitstream = _NVCallback_HandleReleaseBitStream;
+ NVRegisterCB(h264->encoder.context, h264->encoder.clb_params, h264);
+
+
+ hr = NVCreateHWEncoder(h264->encoder.context);
+ if(FAILED(hr)){
+ TSK_DEBUG_ERROR("NVCreateHWEncoder failed with error code = %08x", hr);
+ return -2;
+ }
+
+
+
+ //
+ // decoder
+ //
+ if((cuResult = cuInit(0)) != CUDA_SUCCESS){
+ TSK_DEBUG_ERROR("cuInit(0) failed with error code = %d", (int)cuResult);
+ return -3;
+ }
+
+ InitH264DecoderInfo(h264);
+
+ h264->decoder.cu_device = cutilDrvGetMaxGflopsGraphicsDeviceId();
+
+#if _DEBUG || DEBUG
+ {
+ int major, minor;
+ size_t totalGlobalMem;
+ char deviceName[256];
+ cuDeviceComputeCapability(&major, &minor, h264->decoder.cu_device);
+ cuDeviceGetName(deviceName, sizeof(deviceName), h264->decoder.cu_device);
+ TSK_DEBUG_INFO("Using GPU Device %d: %s has SM %d.%d compute capability", h264->decoder.cu_device, deviceName, major, minor);
+
+ cutilDrvSafeCallNoSync(cuDeviceTotalMem(&totalGlobalMem, h264->decoder.cu_device) );
+ TSK_DEBUG_INFO("Total amount of global memory in GPU device: %4.4f MB", (float)totalGlobalMem/(1024*1024));
+ }
+#endif
+
+ // create Direct3D instance
+ h264->decoder.dx_d3d = Direct3DCreate9(D3D_SDK_VERSION);
+ if(!h264->decoder.dx_d3d){
+ TSK_DEBUG_ERROR("Direct3DCreate9 failed");
+ return -3;
+ }
+ adapterCount = h264->decoder.dx_d3d->GetAdapterCount();
+ for(i=0; i<adapterCount; ++i){
+ ZeroMemory(&d3dpp, sizeof(d3dpp));
+ d3dpp.Windowed = TRUE;
+ d3dpp.BackBufferFormat = D3DFMT_X8R8G8B8;
+ d3dpp.BackBufferWidth = h264->decoder.info.ulTargetWidth;
+ d3dpp.BackBufferHeight = h264->decoder.info.ulTargetHeight;
+ d3dpp.BackBufferCount = 1;
+ d3dpp.SwapEffect = D3DSWAPEFFECT_COPY;
+ d3dpp.PresentationInterval = D3DPRESENT_INTERVAL_IMMEDIATE;
+ d3dpp.Flags = D3DPRESENTFLAG_VIDEO;//D3DPRESENTFLAG_LOCKABLE_BACKBUFFER;
+ hr = h264->decoder.dx_d3d->CreateDevice(i,
+ D3DDEVTYPE_HAL,
+ GetDesktopWindow(),
+ D3DCREATE_FPU_PRESERVE | D3DCREATE_MULTITHREADED | D3DCREATE_HARDWARE_VERTEXPROCESSING,
+ &d3dpp,
+ &h264->decoder.dx_d3ddevice);
+ if(hr == S_OK){
+ cuResult = cuD3D9CtxCreate(&h264->decoder.cu_context, &h264->decoder.cu_device, 0, h264->decoder.dx_d3ddevice);
+ if(cuResult == CUDA_SUCCESS){
+ break;
+ }
+ if(h264->decoder.dx_d3ddevice){
+ h264->decoder.dx_d3ddevice->Release();
+ h264->decoder.dx_d3ddevice = NULL;
+ }
+ }
+ }
+
+ memset(&h264->decoder.cu_paser_params, 0, sizeof(h264->decoder.cu_paser_params));
+ h264->decoder.cu_paser_params.CodecType = cudaVideoCodec_H264;
+ h264->decoder.cu_paser_params.ulMaxNumDecodeSurfaces = CUDA_MAX_FRM_CNT;
+ h264->decoder.cu_paser_params.pUserData = h264;
+ h264->decoder.cu_paser_params.pfnSequenceCallback = _NVCallback_HandleVideoSequence;
+ h264->decoder.cu_paser_params.pfnDecodePicture = _NVCallback_HandlePictureDecode;
+ h264->decoder.cu_paser_params.pfnDisplayPicture = _NVCallback_HandlePictureDisplay;
+ cuResult = cuvidCreateVideoParser(&h264->decoder.cu_parser, &h264->decoder.cu_paser_params);
+ if(cuResult != CUDA_SUCCESS){
+ TSK_DEBUG_ERROR("cuvidCreateVideoParser(0) failed with error code = %d", (int)cuResult);
+ return -3;
+ }
+
+ cuResult = cuvidCreateDecoder(&h264->decoder.context, &h264->decoder.info);
+ if(CUDA_SUCCESS != cuResult){
+ TSK_DEBUG_ERROR("cuvidCreateDecoder failed with error code=%d", (int)cuResult);
+ return -3;
+ }
+
+ return ret;
+}
+
+static int tdav_codec_h264_cuda_close(tmedia_codec_t* self)
+{
+ tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)self;
+
+ if(!h264){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(h264->encoder.context){
+ NVDestroyEncoder(h264->encoder.context);
+ h264->encoder.context = NULL;
+ }
+ return 0;
+}
+
+static tsk_size_t tdav_codec_h264_cuda_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ NVVE_EncodeFrameParams efparams;
+ int ret = 0;
+ unsigned long flags = 0;
+ HRESULT hr;
+
+ tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if(!self->opened){
+ TSK_DEBUG_ERROR("Codec not opened");
+ return 0;
+ }
+
+ if((h264->encoder.ctx_params.iOutputSize[1] * h264->encoder.ctx_params.iOutputSize[0] * 3)>>1 != in_size){
+ /* guard */
+ TSK_DEBUG_ERROR("Invalid size");
+ return 0;
+ }
+
+ //return 0;
+
+ efparams.Width = h264->encoder.ctx_params.iOutputSize[0];
+ efparams.Height = h264->encoder.ctx_params.iOutputSize[1];
+ efparams.Pitch = (h264->encoder.ctx_params.nDeviceMemPitch ? h264->encoder.ctx_params.nDeviceMemPitch : h264->encoder.ctx_params.iOutputSize[0]);
+ efparams.PictureStruc = (NVVE_PicStruct)h264->encoder.ctx_params.iPictureType;
+ efparams.SurfFmt = (NVVE_SurfaceFormat)h264->encoder.ctx_params.iSurfaceFormat;
+ efparams.progressiveFrame = (h264->encoder.ctx_params.iSurfaceFormat == 3) ? 1 : 0;
+ efparams.repeatFirstField = 0;
+ efparams.topfieldfirst = (h264->encoder.ctx_params.iSurfaceFormat == 1) ? 1 : 0;
+ efparams.picBuf = (unsigned char *)in_data;
+ efparams.bLast = 0;
+
+ // send keyframe for:
+ // - the first frame
+ // - every second within the first 4seconds
+ // - every 7 seconds after the first 4seconds
+ if(h264->encoder.frame_count++ == 0
+ ||
+ ( (h264->encoder.frame_count < (int)TMEDIA_CODEC_VIDEO(h264)->out.fps * 4) && ((h264->encoder.frame_count % TMEDIA_CODEC_VIDEO(h264)->out.fps)==0) )
+ ||
+ ( (h264->encoder.frame_count % (TMEDIA_CODEC_VIDEO(h264)->out.fps * 5))==0 )
+ )
+ {
+ flags |= 0x04; // FORCE IDR
+ if(h264->encoder.ctx_params.iDisableSPSPPS){
+ unsigned char SPSPPSBuff[1024];
+ int SPSPPSBuffSize = sizeof(SPSPPSBuff);
+ hr = NVGetSPSPPS(h264->encoder.context, SPSPPSBuff, SPSPPSBuffSize, &SPSPPSBuffSize);
+ if(SUCCEEDED(hr)){
+ int size = 0;
+ while(size < SPSPPSBuffSize - 2){
+ int16_t next_size = ((int16_t)SPSPPSBuff[size])<<1 | ((int16_t)SPSPPSBuff[size + 1]);
+ _tdav_codec_h264_cuda_encap(h264, &SPSPPSBuff[size + 2], next_size);
+ size+=next_size + 2;
+ }
+ }
+ else{
+ TSK_DEBUG_ERROR("NVGetSPSPPS failed with error code = %08x", hr)
+ }
+ }
+ }
+
+ hr = NVEncodeFrame(h264->encoder.context, &efparams, flags, NULL);
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_h264_cuda_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)self;
+ const trtp_rtp_header_t* rtp_hdr = (const trtp_rtp_header_t*)proto_hdr;
+ const uint8_t* pay_ptr = tsk_null;
+ tsk_size_t pay_size = 0, retsize = 0, size_to_copy = 0;
+ int ret = 0;
+ tsk_bool_t append_scp = tsk_false;
+ static tsk_size_t xmax_size = (1920 * 1080 * 3) >> 3;
+ static tsk_size_t start_code_prefix_size = sizeof(H264_START_CODE_PREFIX);
+
+ // Packet lost?
+ if(h264->decoder.last_seq != (rtp_hdr->seq_num - 1) && h264->decoder.last_seq){
+ if(h264->decoder.last_seq == rtp_hdr->seq_num){
+ // Could happen on some stupid emulators
+ TSK_DEBUG_INFO("Packet duplicated, seq_num=%d", rtp_hdr->seq_num);
+ return 0;
+ }
+ TSK_DEBUG_INFO("Packet lost, seq_num=%d", rtp_hdr->seq_num);
+ }
+ h264->decoder.last_seq = rtp_hdr->seq_num;
+
+ /* 5.3. NAL Unit Octet Usage
+ +---------------+
+ |0|1|2|3|4|5|6|7|
+ +-+-+-+-+-+-+-+-+
+ |F|NRI| Type |
+ +---------------+
+ */
+ if(*((uint8_t*)in_data) >> 7){
+ TSK_DEBUG_WARN("F=1");
+ /* reset accumulator */
+ h264->decoder.accumulator_pos = 0;
+ goto bail;
+ }
+
+ // get payload
+ if((ret = tdav_codec_h264_get_pay(in_data, in_size, (const void**)&pay_ptr, &pay_size, &append_scp)) || !pay_ptr || !pay_size){
+ TSK_DEBUG_ERROR("Depayloader failed to get H.264 content");
+ goto bail;
+ }
+ //append_scp = tsk_true;
+ size_to_copy = pay_size + (append_scp ? start_code_prefix_size : 0);
+
+ // start-accumulator
+ if(!h264->decoder.accumulator){
+ if(size_to_copy > xmax_size){
+ TSK_DEBUG_ERROR("%u too big to contain valid encoded data. xmax_size=%u", size_to_copy, xmax_size);
+ goto bail;
+ }
+ if(!(h264->decoder.accumulator = tsk_calloc(size_to_copy, sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocated new buffer");
+ goto bail;
+ }
+ h264->decoder.accumulator_size = size_to_copy;
+ }
+ if((h264->decoder.accumulator_pos + size_to_copy) >= xmax_size){
+ TSK_DEBUG_ERROR("BufferOverflow");
+ h264->decoder.accumulator_pos = 0;
+ goto bail;
+ }
+ if((h264->decoder.accumulator_pos + size_to_copy) > h264->decoder.accumulator_size){
+ if(!(h264->decoder.accumulator = tsk_realloc(h264->decoder.accumulator, (h264->decoder.accumulator_pos + size_to_copy)))){
+ TSK_DEBUG_ERROR("Failed to reallocated new buffer");
+ h264->decoder.accumulator_pos = 0;
+ h264->decoder.accumulator_size = 0;
+ goto bail;
+ }
+ h264->decoder.accumulator_size = (h264->decoder.accumulator_pos + size_to_copy);
+ }
+
+ if(append_scp){
+ memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], H264_START_CODE_PREFIX, start_code_prefix_size);
+ h264->decoder.accumulator_pos += start_code_prefix_size;
+ }
+ memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], pay_ptr, pay_size);
+ h264->decoder.accumulator_pos += pay_size;
+ // end-accumulator
+
+ if(rtp_hdr->marker){
+ CUVIDSOURCEDATAPACKET pkt;
+ CUresult cuResult;
+ pkt.flags = 0;
+ pkt.payload_size = (unsigned long) h264->decoder.accumulator_pos;
+ pkt.payload = (unsigned char *)h264->decoder.accumulator;
+ pkt.timestamp = 0;
+
+ // reset accumulator
+ h264->decoder.accumulator_pos = 0;
+ cuResult = cuvidParseVideoData(h264->decoder.cu_parser, &pkt);
+ if(cuResult != CUDA_SUCCESS){
+ TSK_DEBUG_ERROR("cuvidParseVideoData() failed with error code = %d", (int)cuResult);
+ goto bail;
+ }
+
+ if(h264->decoder.cu_buffer_avail){
+ h264->decoder.cu_buffer_avail = tsk_false;
+ if((retsize = _tdav_codec_h264_cuda_pict_layout(h264, out_data, out_max_size)) == 0){
+ TSK_DEBUG_ERROR("_tdav_codec_h264_cuda_pict_layout failed");
+ goto bail;
+ }
+ }
+ }
+
+bail:
+ return retsize;
+}
+
+static tsk_bool_t tdav_codec_h264_cuda_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)codec;
+ profile_idc_t profile;
+
+ if(!h264){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_false;
+ }
+
+ /* Check whether the profile match (If the profile is missing, then we consider that it's ok) */
+ if(((profile = tdav_codec_h264_common_get_profile(fmtp)) != tdav_codec_h264_bp99) && (profile != TDAV_CODEC_H264_COMMON(h264)->profile)){
+ TSK_DEBUG_INFO("Profile not matching");
+ return tsk_false;
+ }
+
+ TMEDIA_CODEC_VIDEO(h264)->in.width = 800, TMEDIA_CODEC_VIDEO(h264)->in.height = 640;
+ TMEDIA_CODEC_VIDEO(h264)->out.width = 800, TMEDIA_CODEC_VIDEO(h264)->out.height = 640;
+ //TMEDIA_CODEC_VIDEO(h264)->in.width = 352, TMEDIA_CODEC_VIDEO(h264)->in.height = 288;
+ //TMEDIA_CODEC_VIDEO(h264)->out.width = 352, TMEDIA_CODEC_VIDEO(h264)->out.height = 288;
+
+ return tsk_true;
+}
+
+static char* tdav_codec_h264_cuda_sdp_att_get(const tmedia_codec_t* self, const char* att_name)
+{
+ char* fmtp = tsk_null;
+ tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)self;
+
+ switch(TDAV_CODEC_H264_COMMON(h264)->profile){
+ case tdav_codec_h264_bp10:
+ fmtp = tsk_strdup("profile-level-id=42e00a");
+ break;
+ case tdav_codec_h264_bp20:
+ fmtp = tsk_strdup("profile-level-id=42e014");
+ break;
+ case tdav_codec_h264_bp30:
+ fmtp = tsk_strdup("profile-level-id=42e01e");
+ break;
+ }
+
+ //1080p(1920 x 1080), 720p(1280 x 720), SVGA(800 x 600), VGA(640 x 480), 4CIF(704 x 576), CIF(352 x 288), QCIF(176 x 144), SQCIF(128 x 96)
+ return fmtp;
+}
+
+tsk_bool_t tdav_codec_h264_cuda_is_supported()
+{
+ static tsk_bool_t __already_checked = tsk_false;
+ static tsk_bool_t __is_supported = tsk_false;
+ if(!__already_checked){
+ HRESULT hr;
+ __already_checked = tsk_true;
+ hr = NVGetHWEncodeCaps();
+ if(SUCCEEDED(hr)){
+ NVEncoder encoder;
+ hr = NVCreateEncoder(&encoder);
+ if(SUCCEEDED(hr)){
+ hr = NVIsSupportedCodec(encoder, NV_CODEC_TYPE_H264);
+ __is_supported = SUCCEEDED(hr);
+ }
+ else{
+ TSK_DEBUG_ERROR("NVCreateEncoder() failed with error code = %08x", hr);
+ }
+ if(encoder){
+ NVDestroyEncoder(encoder);
+ encoder = NULL;
+ }
+ }
+ }
+ return __is_supported;
+}
+
+static int tdav_codec_h264_cuda_init(tdav_codec_h264_cuda_t* self, profile_idc_t profile)
+{
+ int ret = 0;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if((ret = tdav_codec_h264_common_init(TDAV_CODEC_H264_COMMON(self)))){
+ TSK_DEBUG_ERROR("tdav_codec_h264_common_init() faile with error code=%d", ret);
+ return ret;
+ }
+
+ if(!self->decoder.mutex && !(self->decoder.mutex = tsk_mutex_create_2(tsk_false))){
+ TSK_DEBUG_ERROR("Failed to create mutex");
+ return -2;
+ }
+
+ sem = tsk_semaphore_create_2(1);
+
+ TDAV_CODEC_H264_COMMON(self)->pack_mode_local = H264_PACKETIZATION_MODE;
+ TDAV_CODEC_H264_COMMON(self)->profile = profile;
+ TMEDIA_CODEC_VIDEO(self)->in.max_mbps = TMEDIA_CODEC_VIDEO(self)->out.max_mbps = H264_MAX_MBPS*1000;
+ TMEDIA_CODEC_VIDEO(self)->in.max_br = TMEDIA_CODEC_VIDEO(self)->out.max_br = H264_MAX_BR*1000;
+
+ // At this time self->plugin is Null
+ TMEDIA_CODEC_VIDEO(self)->in.width = TMEDIA_CODEC_VIDEO(self)->out.width = 176;
+ TMEDIA_CODEC_VIDEO(self)->in.height = TMEDIA_CODEC_VIDEO(self)->out.height = 144;
+ TMEDIA_CODEC_VIDEO(self)->in.fps = TMEDIA_CODEC_VIDEO(self)->out.fps = 15;
+ TMEDIA_CODEC_VIDEO(self)->in.chroma = tmedia_chroma_yuv420p;// no choice
+
+ return 0;
+}
+
+static int tdav_codec_h264_cuda_deinit(tdav_codec_h264_cuda_t* self)
+{
+ tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)self;
+
+ if(!h264){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(h264->encoder.context){
+ NVDestroyEncoder(h264->encoder.context);
+ h264->encoder.context = NULL;
+ }
+ TSK_FREE(h264->encoder.buffer);
+ h264->encoder.buffer_size = 0;
+
+ if(h264->decoder.context){
+ cuvidDestroyDecoder(h264->decoder.context);
+ h264->decoder.context = NULL;
+ }
+ if(h264->decoder.cu_context){
+ cuCtxDestroy(h264->decoder.cu_context);
+ h264->decoder.cu_context = NULL;
+ }
+ if (h264->decoder.dx_d3ddevice){
+ h264->decoder.dx_d3ddevice->Release();
+ h264->decoder.dx_d3ddevice = NULL;
+ }
+ if (h264->decoder.dx_d3d){
+ h264->decoder.dx_d3d->Release();
+ h264->decoder.dx_d3d = NULL;
+ }
+ if(h264->decoder.cu_parser){
+ cuvidDestroyVideoParser(h264->decoder.cu_parser);
+ h264->decoder.cu_parser = NULL;
+ }
+ if(h264->decoder.cu_buffer){
+ cuMemFreeHost(h264->decoder.cu_buffer);
+ h264->decoder.cu_buffer = NULL;
+ }
+ h264->decoder.cu_buffer_size = 0;
+ if(self->decoder.mutex){
+ tsk_mutex_destroy(&self->decoder.mutex);
+ }
+
+ TSK_FREE(h264->decoder.accumulator);
+ h264->decoder.accumulator_pos = 0;
+ h264->decoder.accumulator_size = 0;
+
+ return 0;
+}
+
+static inline void _tdav_codec_h264_cuda_encap(const tdav_codec_h264_cuda_t* h264, const uint8_t* pdata, tsk_size_t size)
+{
+ register int32_t i;
+ int32_t last_scp, prev_scp;
+ static int32_t size_of_scp = sizeof(H264_START_CODE_PREFIX); /* we know it's equal to 4 ..but */
+
+ if(!pdata || !size){
+ return;
+ }
+
+ last_scp = 0, prev_scp = 0;
+
+ for(i = size_of_scp; i<(int32_t)(size - size_of_scp); i++){
+ if(pdata[i] == H264_START_CODE_PREFIX[0] && pdata[i+1] == H264_START_CODE_PREFIX[1] && pdata[i+2] == H264_START_CODE_PREFIX[2] && pdata[i+3] == H264_START_CODE_PREFIX[3]){ /* Found Start Code Prefix */
+ prev_scp = last_scp;
+ if((i - last_scp) >= H264_RTP_PAYLOAD_SIZE || 1){
+ tdav_codec_h264_rtp_callback(TDAV_CODEC_H264_COMMON(h264), pdata + prev_scp,
+ (i - prev_scp), (prev_scp == size));
+ }
+ last_scp = i;
+ }
+ }
+ if(last_scp < (int32_t)size){
+ tdav_codec_h264_rtp_callback(TDAV_CODEC_H264_COMMON(h264), pdata + last_scp,
+ (size - last_scp), tsk_true);
+ }
+}
+
+static inline tsk_size_t _tdav_codec_h264_cuda_pict_layout(tdav_codec_h264_cuda_t* self, void**output, tsk_size_t *output_size)
+{
+ if(self && self->decoder.cu_buffer && self->decoder.cu_buffer_size){
+ unsigned int w = TMEDIA_CODEC_VIDEO(self)->in.width;
+ unsigned int h = TMEDIA_CODEC_VIDEO(self)->in.height;
+ unsigned int pitch = self->decoder.cu_buffer_pitch;
+ tsk_size_t xsize = (w * h * 3) >> 1;
+ // resize if too short
+ if(*output_size<xsize){
+ if((*output = tsk_realloc(*output, xsize))){
+ *output_size = xsize;
+ }
+ else{
+ *output_size = 0;
+ return 0;
+ }
+ }
+
+ tsk_semaphore_decrement(sem);
+
+ register unsigned int y;
+ register unsigned int x, x2;
+ const unsigned char *p = (const unsigned char *)self->decoder.cu_buffer;
+ register unsigned char *iyuv = (unsigned char *)*output, *i = iyuv, *j;
+ // copy luma
+ for (y=0; y<h; y++){
+ memcpy(i+y*w, p+y*pitch, w);
+ }
+ // de-interleave chroma (NV12 stored as U,V,U,V,...)
+ p += h * pitch;
+ i += h * w;
+ j = i + (h/2)*(w/2);
+ for (y=0; y<h/2; y++){
+ for (x=0, x2=0; x<w/2; x++, x2+=2) i[x] = p[x2], j[x] = p[x2+1];
+ p += pitch, i += w/2, j += w/2;
+ }
+
+ tsk_semaphore_increment(sem);
+
+ return xsize;
+ }
+ return 0;
+}
+
+static int CUDAAPI _NVCallback_HandleVideoSequence(void *pvUserData, CUVIDEOFORMAT *pFormat)
+{
+ tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)pvUserData;
+ if(!h264 || !pFormat){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;//error
+ }
+
+ int ret = 1;
+
+ if(pFormat->coded_width != TMEDIA_CODEC_VIDEO(h264)->in.width || pFormat->coded_height != TMEDIA_CODEC_VIDEO(h264)->in.height){
+ tsk_mutex_lock(h264->decoder.mutex);
+
+ TMEDIA_CODEC_VIDEO(h264)->in.width = pFormat->coded_width;
+ TMEDIA_CODEC_VIDEO(h264)->in.height = pFormat->coded_height;
+
+ InitH264DecoderInfo(h264);
+ CUresult cuResult;
+ if(h264->decoder.context){
+ cuResult = cuvidDestroyDecoder(h264->decoder.context);
+ if(CUDA_SUCCESS != cuResult){
+ TSK_DEBUG_ERROR("cuvidDestroyDecoder failed with error code=%d", (int)cuResult);
+ ret = 0;
+ }
+ h264->decoder.context = NULL;
+ }
+ cuResult = cuvidCreateDecoder(&h264->decoder.context, &h264->decoder.info);
+ if(CUDA_SUCCESS != cuResult){
+ TSK_DEBUG_ERROR("cuvidCreateDecoder failed with error code=%d", (int)cuResult);
+ ret = 0;
+ }
+
+ tsk_mutex_unlock(h264->decoder.mutex);
+ }
+
+
+
+ return ret;//success
+}
+
+static int CUDAAPI _NVCallback_HandlePictureDecode(void *pvUserData, CUVIDPICPARAMS *pPicParams)
+{
+ tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)pvUserData;
+ if(!h264 || !pPicParams){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;//error
+ }
+
+ tsk_mutex_lock(h264->decoder.mutex);
+ CUresult cuResult = cuvidDecodePicture(h264->decoder.context, pPicParams);
+ tsk_mutex_unlock(h264->decoder.mutex);
+
+ if(cuResult != CUDA_SUCCESS){
+ TSK_DEBUG_ERROR("cuvidDecodePicture failed with error code= %d", cuResult);
+ return 0;//error
+ }
+
+ return 1;//success
+}
+
+static int CUDAAPI _NVCallback_HandlePictureDisplay(void *pvUserData, CUVIDPARSERDISPINFO *pPicParams)
+{
+ tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)pvUserData;
+ CUVIDPROCPARAMS vpp;
+ CUdeviceptr devPtr;
+ CUresult cuResult;
+ tsk_size_t nv12_size;
+ tsk_bool_t mapped = tsk_false;
+ int ret = 1;//success
+
+ if(!h264 || !pPicParams){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;//error
+ }
+tsk_semaphore_decrement(sem);
+ cuResult = cuCtxPushCurrent(h264->decoder.cu_context);
+ if(cuResult != CUDA_SUCCESS){
+ TSK_DEBUG_ERROR("cuCtxPushCurrent failed with error code = %d", (int)cuResult);
+ ret = 0;//error
+ goto bail;
+ }
+
+ memset(&vpp, 0, sizeof(vpp));
+ vpp.progressive_frame = pPicParams->progressive_frame;
+ vpp.top_field_first = pPicParams->top_field_first;
+ cuResult = cuvidMapVideoFrame(h264->decoder.context, pPicParams->picture_index, &devPtr, &h264->decoder.cu_buffer_pitch, &vpp);
+
+ if(cuResult != CUDA_SUCCESS){
+ TSK_DEBUG_ERROR("cuvidMapVideoFrame failed with error code = %d", (int)cuResult);
+ ret = 0;//error
+ goto bail;
+ }
+ mapped = tsk_true;
+ nv12_size = h264->decoder.cu_buffer_pitch * (h264->decoder.info.ulTargetHeight + h264->decoder.info.ulTargetHeight/2); // 12bpp
+ //nv12_size = (w * h * 3) >> 1;
+ if ((!h264->decoder.cu_buffer) || (nv12_size > h264->decoder.cu_buffer_size)){
+ h264->decoder.cu_buffer_size = 0;
+ if (h264->decoder.cu_buffer){
+ cuResult = cuMemFreeHost(h264->decoder.cu_buffer);
+ h264->decoder.cu_buffer = NULL;
+ }
+ cuResult = cuMemAllocHost((void**)&h264->decoder.cu_buffer, nv12_size);
+ if (cuResult != CUDA_SUCCESS){
+ TSK_DEBUG_ERROR("cuMemAllocHost failed to allocate %d bytes (error code=%d)", nv12_size, (int)cuResult);
+ h264->decoder.cu_buffer = 0;
+ h264->decoder.cu_buffer_size = 0;
+ ret = 0;//error
+ }
+ else{
+ h264->decoder.cu_buffer_size = nv12_size;
+ }
+ }
+ if(h264->decoder.cu_buffer){
+ cuResult = cuMemcpyDtoH(h264->decoder.cu_buffer, devPtr, nv12_size);
+ }
+
+bail:
+ if(mapped){
+ cuResult = cuvidUnmapVideoFrame(h264->decoder.context, devPtr);
+ }
+ cuResult = cuCtxPopCurrent(NULL);
+tsk_semaphore_increment(sem);
+ h264->decoder.cu_buffer_avail = (ret == 1);
+ return ret;
+}
+
+static unsigned char* CUDAAPI _NVCallback_HandleAcquireBitStream(int *pBufferSize, void *pUserdata)
+{
+ tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)pUserdata;
+ if(!h264 || !pBufferSize){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+
+ *pBufferSize = (int)h264->encoder.buffer_size;
+ return (unsigned char*)h264->encoder.buffer;
+}
+
+static void CUDAAPI _NVCallback_HandleReleaseBitStream(int nBytesInBuffer, unsigned char *cb,void *pUserdata)
+{
+ tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)pUserdata;
+ if(!h264 || !cb || !nBytesInBuffer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return;
+ }
+ _tdav_codec_h264_cuda_encap(h264, cb, (tsk_size_t)nBytesInBuffer);
+
+ return;
+}
+
+static void CUDAAPI _NVCallback_HandleOnBeginFrame(const NVVE_BeginFrameInfo *pbfi, void *pUserdata)
+{
+ return;
+}
+
+static void CUDAAPI _NVCallback_HandleOnEndFrame(const NVVE_EndFrameInfo *pefi, void *pUserdata)
+{
+ return;
+}
+
+/* ============ H.264 Base Profile 1.0 Plugin interface ================= */
+
+/* constructor */
+static tsk_object_t* tdav_codec_h264_cuda_bp10_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h264_cuda_t *h264 = (tdav_codec_h264_cuda_t *)self;
+ if(h264){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ tdav_codec_h264_cuda_init(h264, tdav_codec_h264_bp10);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h264_cuda_bp10_dtor(tsk_object_t * self)
+{
+ tdav_codec_h264_cuda_t *h264 = (tdav_codec_h264_cuda_t *)self;
+ if(h264){
+ /* deinit base */
+ tdav_codec_h264_common_deinit(TDAV_CODEC_H264_COMMON(self));
+ /* deinit self */
+ tdav_codec_h264_cuda_deinit(h264);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h264_cuda_bp10_def_s =
+{
+ sizeof(tdav_codec_h264_cuda_t),
+ tdav_codec_h264_cuda_bp10_ctor,
+ tdav_codec_h264_cuda_bp10_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h264_cuda_bp10_plugin_def_s =
+{
+ &tdav_codec_h264_cuda_bp10_def_s,
+
+ tmedia_video,
+ "H264",
+ "H264 Base Profile 1.0 using CUDA",
+ TMEDIA_CODEC_FORMAT_H264_BP10,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video */
+ {176, 144, 15},
+
+ tdav_codec_h264_cuda_open,
+ tdav_codec_h264_cuda_close,
+ tdav_codec_h264_cuda_encode,
+ tdav_codec_h264_cuda_decode,
+ tdav_codec_h264_cuda_sdp_att_match,
+ tdav_codec_h264_cuda_sdp_att_get,
+ tdav_codec_h264_cuda_fmtp_set
+};
+extern const tmedia_codec_plugin_def_t *tdav_codec_h264_cuda_bp10_plugin_def_t = &tdav_codec_h264_cuda_bp10_plugin_def_s;
+
+
+/* ============ H.264 Base Profile 2.0 Plugin interface ================= */
+
+/* constructor */
+static tsk_object_t* tdav_codec_h264_cuda_bp20_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h264_cuda_t *h264 = (tdav_codec_h264_cuda_t *)self;
+ if(h264){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ tdav_codec_h264_cuda_init(h264, tdav_codec_h264_bp20);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h264_cuda_bp20_dtor(tsk_object_t * self)
+{
+ tdav_codec_h264_cuda_t *h264 = (tdav_codec_h264_cuda_t *)self;
+ if(h264){
+ /* deinit base */
+ tdav_codec_h264_common_deinit(TDAV_CODEC_H264_COMMON(self));
+ /* deinit self */
+ tdav_codec_h264_cuda_deinit(h264);
+
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h264_cuda_bp20_def_s =
+{
+ sizeof(tdav_codec_h264_cuda_t),
+ tdav_codec_h264_cuda_bp20_ctor,
+ tdav_codec_h264_cuda_bp20_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h264_cuda_bp20_plugin_def_s =
+{
+ &tdav_codec_h264_cuda_bp20_def_s,
+
+ tmedia_video,
+ "H264",
+ "H264 Base Profile 2.0 using CUDA",
+ TMEDIA_CODEC_FORMAT_H264_BP20,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video */
+ {352, 288, 15},
+
+ tdav_codec_h264_cuda_open,
+ tdav_codec_h264_cuda_close,
+ tdav_codec_h264_cuda_encode,
+ tdav_codec_h264_cuda_decode,
+ tdav_codec_h264_cuda_sdp_att_match,
+ tdav_codec_h264_cuda_sdp_att_get,
+ tdav_codec_h264_cuda_fmtp_set
+};
+extern const tmedia_codec_plugin_def_t *tdav_codec_h264_cuda_bp20_plugin_def_t = &tdav_codec_h264_cuda_bp20_plugin_def_s;
+
+
+/* ============ H.264 Base Profile 3.0 Plugin interface ================= */
+
+/* constructor */
+static tsk_object_t* tdav_codec_h264_cuda_bp30_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h264_cuda_t *h264 = (tdav_codec_h264_cuda_t *)self;
+ if(h264){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ tdav_codec_h264_cuda_init(h264, tdav_codec_h264_bp30);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h264_cuda_bp30_dtor(tsk_object_t * self)
+{
+ tdav_codec_h264_cuda_t *h264 = (tdav_codec_h264_cuda_t *)self;
+ if(h264){
+ /* deinit base */
+ tdav_codec_h264_common_deinit(TDAV_CODEC_H264_COMMON(self));
+ /* deinit self */
+ tdav_codec_h264_cuda_deinit(h264);
+
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h264_cuda_bp30_def_s =
+{
+ sizeof(tdav_codec_h264_cuda_t),
+ tdav_codec_h264_cuda_bp30_ctor,
+ tdav_codec_h264_cuda_bp30_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h264_cuda_bp30_plugin_def_s =
+{
+ &tdav_codec_h264_cuda_bp30_def_s,
+
+ tmedia_video,
+ "H264",
+ "H264 Base Profile 3.0 using CUDA",
+ TMEDIA_CODEC_FORMAT_H264_BP30,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video */
+ {352, 288, 15},
+
+ tdav_codec_h264_cuda_open,
+ tdav_codec_h264_cuda_close,
+ tdav_codec_h264_cuda_encode,
+ tdav_codec_h264_cuda_decode,
+ tdav_codec_h264_cuda_sdp_att_match,
+ tdav_codec_h264_cuda_sdp_att_get,
+ tdav_codec_h264_cuda_fmtp_set
+};
+extern const tmedia_codec_plugin_def_t *tdav_codec_h264_cuda_bp30_plugin_def_t = &tdav_codec_h264_cuda_bp30_plugin_def_s;
+
+
+#endif /* HAVE_CUDA */
diff --git a/tinyDAV/src/codecs/h264/tdav_codec_h264_intel.cxx b/tinyDAV/src/codecs/h264/tdav_codec_h264_intel.cxx
new file mode 100644
index 0000000..49f9e1c
--- /dev/null
+++ b/tinyDAV/src/codecs/h264/tdav_codec_h264_intel.cxx
@@ -0,0 +1,2221 @@
+/*
+* Copyright (C) 2014-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_h264_intel.cxx
+* @brief H.264 codec plugin using Intel Media SDK 2014 R2 for Clients (https://software.intel.com/en-us/media-solutions-portal) v1.1 for encoding/decoding.
+* Low latency encoding/decoding: https://software.intel.com/en-us/articles/video-conferencing-features-of-intel-media-software-development-kit
+*/
+#include "tinydav/codecs/h264/tdav_codec_h264_intel.h"
+
+#if HAVE_INTEL_MEDIA_SDK
+
+#include "tinydav/codecs/h264/tdav_codec_h264_common.h"
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tinymedia/tmedia_codec.h"
+#include "tinymedia/tmedia_params.h"
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tsk_thread.h"
+#include "tsk_params.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <mfxvideo++.h>
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "libmfx.lib")
+#endif /* _MSC_VER */
+
+#if !defined(INTEL_DX11_D3D)
+# define INTEL_DX11_D3D 1
+#endif /* INTEL_DX11_D3D */
+
+#if INTEL_DX11_D3D
+#include <d3d11.h>
+#include <dxgi1_2.h>
+#include <atlbase.h>
+# if defined(_MSC_VER)
+# pragma comment(lib, "d3d11.lib")
+# pragma comment(lib, "dxgi.lib")
+# endif /* _MSC_VER */
+
+#endif /* INTEL_DX11_D3D */
+
+#define INTEL_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[MSDK H264 Codec] " FMT, ##__VA_ARGS__)
+#define INTEL_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[MSDK H264 Codec] " FMT, ##__VA_ARGS__)
+#define INTEL_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[MSDK H264 Codec] " FMT, ##__VA_ARGS__)
+#define INTEL_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[MSDK H264 Codec] " FMT, ##__VA_ARGS__)
+
+#define INTEL_ALIGN16(i) ((((i) + 15) >> 4) << 4)
+#define INTEL_ALIGN32(i) (((mfxU32)((i)+31)) & (~ (mfxU32)31))
+#define INTEL_SAFE_RELEASE(X) {if ((X)) { (X)->Release(); (X) = NULL; }}
+
+#define INTEL_CHECK_STATUS(x) { mfxStatus __status__ = (x); if (__status__ != MFX_ERR_NONE) { INTEL_DEBUG_ERROR("Operation Failed (%d)", __status__); goto bail; } }
+#define INTEL_BREAK(msg) { INTEL_DEBUG_ERROR("%s", msg); goto bail; }
+
+#define INTEL_ENABLE_REALTIME 1
+
+static mfxIMPL __IntelDefaultImpl = MFX_IMPL_AUTO_ANY
+#if INTEL_DX11_D3D
+| MFX_IMPL_VIA_D3D11
+#endif
+;
+static mfxVersion __IntelDefaultVer = { 0, 1 };
+
+// TODO: Test against FFmpeg, CUDA, OpenH264 and Microsoft implementations
+// TODO: When Bandwidth change (or any other event) Reset() fails
+
+class IntelCodecEncoder;
+class IntelCodecDecoder;
+
+typedef struct tdav_codec_h264_intel_s
+{
+ TDAV_DECLARE_CODEC_H264_COMMON;
+
+ MFXVideoSession* mfxSession;
+
+ // DX11_D3D
+#if INTEL_DX11_D3D
+ mfxFrameAllocResponse D3D11SavedAllocResponses[2/*Encode=0, Decode=1*/];
+ mfxFrameAllocator D3D11Allocator;
+
+ CComPtr<ID3D11Device> pD3D11Device;
+ CComPtr<ID3D11DeviceContext> pD3D11Ctx;
+ CComPtr<IDXGIAdapter>pAdapter;
+ CComPtr<IDXGIFactory1> pDXGIFactory;
+ CComPtr<IDXGIAdapter> hAdapter;
+#endif
+
+ // Encoder
+ struct{
+ IntelCodecEncoder *pInst;
+ int64_t frame_count;
+ tsk_bool_t force_idr;
+ int rotation;
+ int neg_width;
+ int neg_height;
+ int neg_fps;
+ int max_bitrate_bps;
+ } encoder;
+
+ // decoder
+ struct{
+ IntelCodecDecoder *pInst;
+ } decoder;
+}
+tdav_codec_h264_intel_t;
+
+#if !defined(INTEL_H264_GOP_SIZE_IN_SECONDS)
+# define INTEL_H264_GOP_SIZE_IN_SECONDS 25
+#endif
+
+static int tdav_codec_h264_intel_init(tdav_codec_h264_intel_t* self, profile_idc_t profile);
+static int tdav_codec_h264_intel_deinit(tdav_codec_h264_intel_t* self);
+static int tdav_codec_h264_intel_open_encoder(tdav_codec_h264_intel_t* self);
+static int tdav_codec_h264_intel_close_encoder(tdav_codec_h264_intel_t* self);
+static int tdav_codec_h264_intel_open_decoder(tdav_codec_h264_intel_t* self);
+static int tdav_codec_h264_intel_close_decoder(tdav_codec_h264_intel_t* self);
+
+#if INTEL_DX11_D3D
+#define D3D11_WILL_READ 0x1000
+#define D3D11_WILL_WRITE 0x2000
+
+typedef struct {
+ mfxMemId memId;
+ mfxMemId memIdStage;
+ mfxU16 rw;
+} CustomMemId;
+
+const struct {
+ mfxIMPL impl; // actual implementation
+ mfxU32 adapterID; // device adapter number
+} implTypes[] = {
+ { MFX_IMPL_HARDWARE, 0 },
+ { MFX_IMPL_HARDWARE2, 1 },
+ { MFX_IMPL_HARDWARE3, 2 },
+ { MFX_IMPL_HARDWARE4, 3 }
+};
+
+static mfxStatus D3D11_CreateHWDevice(mfxHDL pthis, mfxSession session, mfxHDL* deviceHandle, HWND hWnd);
+static void D3D11_CleanupHWDevice(mfxHDL pthis);
+static void D3D11_SetHWDeviceContext(mfxHDL pthis, CComPtr<ID3D11DeviceContext> devCtx);
+
+// Intel Media SDK memory allocator entrypoints....
+// - A slightly different allocation procedure is used for encode, decode and VPP
+static mfxStatus D3D11_SimpleAlloc(mfxHDL pthis, mfxFrameAllocRequest *request, mfxFrameAllocResponse *response);
+static mfxStatus D3D11_SimpleLock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr);
+static mfxStatus D3D11_SimpleUnlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr);
+static mfxStatus D3D11_SimpleGethdl(mfxHDL pthis, mfxMemId mid, mfxHDL *handle);
+static mfxStatus D3D11_SimpleFree(mfxHDL pthis, mfxFrameAllocResponse *response);
+#endif /* INTEL_DX11_D3D */
+
+//
+// IntelCodec
+//
+class IntelCodec
+{
+protected:
+ IntelCodec(MFXVideoSession* pSession)
+ : m_bOpened(false)
+ , m_pSession(pSession)
+ , n_nNumSurfaces(0)
+ , m_nSurfaceWidth(0)
+ , m_nSurfaceHeight(0)
+ , m_nSurfaceBitsPerPixel(0)
+ , m_nSurfaceSize(0)
+ , m_pSurfaceBuffers(NULL)
+ , m_ppSurfacePtrs(NULL)
+ {
+ memset(&m_sBitstream, 0, sizeof(m_sBitstream));
+ memset(&m_sParamReq, 0, sizeof(m_sParamReq));
+ memset(&m_sParamSel, 0, sizeof(m_sParamSel));
+ memset(&m_sAllocRequest, 0, sizeof(m_sAllocRequest));
+ }
+public:
+ virtual ~IntelCodec()
+ {
+ Close();
+ }
+ virtual mfxStatus Open(struct tdav_codec_h264_intel_s* pWrappedCodec) = 0;
+
+ virtual mfxStatus Close()
+ {
+ DeAllocSurfaces();
+ DeAllocateBitstream();
+
+ memset(&m_sAllocRequest, 0, sizeof(m_sAllocRequest));
+
+ m_bOpened = false;
+
+ return MFX_ERR_NONE;
+ }
+
+protected:
+ int GetFreeSurfaceIndex()
+ {
+ if (m_ppSurfacePtrs)
+ {
+ for (mfxU16 i = 0; i < n_nNumSurfaces; i++)
+ {
+ if (0 == m_ppSurfacePtrs[i]->Data.Locked)
+ {
+ return i;
+ }
+ }
+ }
+ return MFX_ERR_NOT_FOUND;
+ }
+
+ mfxStatus ReadPlaneData(mfxU16 w, mfxU16 h, mfxU8 *buf, mfxU8 *ptr, mfxU16 pitch, mfxU16 offset, const mfxU8* &src)
+ {
+ for (mfxU16 i = 0; i < h; i++)
+ {
+ memcpy(buf, src, w);
+ src += w;
+
+ for (mfxU16 j = 0; j < w; j++)
+ ptr[i * pitch + j * 2 + offset] = buf[j];
+ }
+ return MFX_ERR_NONE;
+ }
+
+ mfxStatus LoadRawFrame(int nSurfaceIndex, const mfxU8* src)
+ {
+ mfxFrameSurface1* pSurface = (m_ppSurfacePtrs && nSurfaceIndex >= 0 && nSurfaceIndex < n_nNumSurfaces) ? m_ppSurfacePtrs[nSurfaceIndex] : NULL;
+ if (!pSurface)
+ {
+ INTEL_DEBUG_ERROR("Failed to find surface at index=%d", nSurfaceIndex);
+ return MFX_ERR_NOT_FOUND;
+ }
+
+ mfxStatus sts = MFX_ERR_NONE;
+ mfxU16 w, h, i, pitch;
+ mfxU8 *ptr;
+ mfxFrameInfo* pInfo = &pSurface->Info;
+ mfxFrameData* pData = &pSurface->Data;
+
+ if (pInfo->CropH > 0 && pInfo->CropW > 0) {
+ w = pInfo->CropW;
+ h = pInfo->CropH;
+ }
+ else {
+ w = pInfo->Width;
+ h = pInfo->Height;
+ }
+
+ pitch = pData->Pitch;
+ ptr = pData->Y + pInfo->CropX + pInfo->CropY * pData->Pitch;
+
+ // read luminance plane
+ for (i = 0; i < h; i++)
+ {
+ memcpy(ptr + i * pitch, src, w);
+ src += w;
+ }
+
+ mfxU8 buf[2048]; // maximum supported chroma width for nv12
+ w /= 2;
+ h /= 2;
+ ptr = pData->UV + pInfo->CropX + (pInfo->CropY / 2) * pitch;
+ if (w > 2048)
+ return MFX_ERR_UNSUPPORTED;
+
+ // load U
+ sts = ReadPlaneData(w, h, buf, ptr, pitch, 0, src);
+ if (MFX_ERR_NONE != sts) return sts;
+ // load V
+ sts = ReadPlaneData(w, h, buf, ptr, pitch, 1, src);
+ if (MFX_ERR_NONE != sts) return sts;
+
+ return MFX_ERR_NONE;
+ }
+
+ virtual mfxStatus AllocSurfaces(mfxU16 nNumSurfaces, mfxU16 nSurfaceWidth, mfxU16 nSurfaceHeight, const mfxFrameInfo* pcFrameInfo)
+ {
+ mfxStatus status = MFX_ERR_UNKNOWN;
+
+ INTEL_DEBUG_INFO("Alloc surfaces: num=%u, width=%u, height=%u", nNumSurfaces, nSurfaceWidth, nSurfaceHeight);
+
+ DeAllocSurfaces();
+
+ n_nNumSurfaces = nNumSurfaces;
+ m_nSurfaceWidth = (mfxU16)INTEL_ALIGN32(nSurfaceWidth);
+ m_nSurfaceHeight = (mfxU16)INTEL_ALIGN32(nSurfaceHeight);
+ m_nSurfaceBitsPerPixel = 12; // NV12 format is a 12 bits per pixel format
+ m_nSurfaceSize = m_nSurfaceWidth * m_nSurfaceHeight * m_nSurfaceBitsPerPixel / 8;
+
+#if !INTEL_DX11_D3D
+ if (!(m_pSurfaceBuffers = (mfxU8 *)new mfxU8[m_nSurfaceSize * n_nNumSurfaces]))
+ {
+ INTEL_CHECK_STATUS(MFX_ERR_MEMORY_ALLOC);
+ }
+#endif
+
+ if (!(m_ppSurfacePtrs = new mfxFrameSurface1*[n_nNumSurfaces]))
+ {
+ INTEL_CHECK_STATUS(MFX_ERR_MEMORY_ALLOC);
+ }
+ for (mfxU16 i = 0; i < n_nNumSurfaces; i++)
+ {
+ if (!(m_ppSurfacePtrs[i] = new mfxFrameSurface1))
+ {
+ INTEL_CHECK_STATUS(MFX_ERR_MEMORY_ALLOC);
+ }
+ memset(m_ppSurfacePtrs[i], 0, sizeof(mfxFrameSurface1));
+ memcpy(&(m_ppSurfacePtrs[i]->Info), pcFrameInfo, sizeof(mfxFrameInfo));
+#if INTEL_DX11_D3D
+ m_ppSurfacePtrs[i]->Data.MemId = m_sD3D11Response.mids[i]; // MID (memory id) represent one D3D NV12 surface
+#else
+ m_ppSurfacePtrs[i]->Data.Y = &m_pSurfaceBuffers[m_nSurfaceSize * i];
+ m_ppSurfacePtrs[i]->Data.U = m_ppSurfacePtrs[i]->Data.Y + m_nSurfaceWidth * m_nSurfaceHeight;
+ m_ppSurfacePtrs[i]->Data.V = m_ppSurfacePtrs[i]->Data.U + 1;
+ m_ppSurfacePtrs[i]->Data.Pitch = m_nSurfaceWidth;
+#endif
+ }
+
+ return MFX_ERR_NONE;
+
+ bail:
+ DeAllocSurfaces();
+ return status;
+ }
+
+ mfxStatus AllocateBitstream(mfxU32 nMaxLength)
+ {
+ DeAllocateBitstream();
+
+ m_sBitstream.MaxLength = nMaxLength;
+ if (!(m_sBitstream.Data = new mfxU8[nMaxLength]))
+ {
+ INTEL_CHECK_STATUS(MFX_ERR_MEMORY_ALLOC);
+ }
+
+ return MFX_ERR_NONE;
+
+ bail:
+ return MFX_ERR_MEMORY_ALLOC;
+ }
+private:
+ mfxStatus DeAllocSurfaces()
+ {
+ if (m_ppSurfacePtrs)
+ {
+ for (mfxU16 i = 0; i < n_nNumSurfaces; i++)
+ {
+ if (m_ppSurfacePtrs[i])
+ {
+ delete m_ppSurfacePtrs[i];
+ }
+ }
+ delete[] m_ppSurfacePtrs;
+ m_ppSurfacePtrs = NULL;
+ }
+ n_nNumSurfaces = 0;
+
+ if (m_pSurfaceBuffers)
+ {
+ delete[] m_pSurfaceBuffers;
+ m_pSurfaceBuffers = NULL;
+ }
+
+ m_nSurfaceWidth = 0;
+ m_nSurfaceHeight = 0;
+ m_nSurfaceBitsPerPixel = 0;
+ m_nSurfaceSize = 0;
+
+ return MFX_ERR_NONE;
+ }
+
+ mfxStatus DeAllocateBitstream()
+ {
+ if (m_sBitstream.Data)
+ {
+ delete[]m_sBitstream.Data;
+ }
+ memset(&m_sBitstream, 0, sizeof(m_sBitstream));
+
+ return MFX_ERR_NONE;
+ }
+
+
+protected:
+ bool m_bOpened;
+ MFXVideoSession* m_pSession;
+ mfxU16 n_nNumSurfaces;
+ mfxU16 m_nSurfaceWidth;
+ mfxU16 m_nSurfaceHeight;
+ mfxU8 m_nSurfaceBitsPerPixel;
+ mfxU32 m_nSurfaceSize;
+ mfxU8* m_pSurfaceBuffers; // mfxU8[];
+ mfxFrameSurface1** m_ppSurfacePtrs; // mfxFrameSurface1[]
+ mfxBitstream m_sBitstream;
+ mfxVideoParam m_sParamReq; // requested params
+ mfxVideoParam m_sParamSel; // selected params
+ mfxFrameAllocRequest m_sAllocRequest;
+#if INTEL_DX11_D3D
+ mfxFrameAllocResponse m_sD3D11Response;
+#endif
+};
+
+
+//
+// IntelCodecEncoder
+//
+class IntelCodecEncoder : public IntelCodec
+{
+public:
+ IntelCodecEncoder(MFXVideoSession* pSession)
+ : IntelCodec(pSession)
+ , m_Inst(*pSession)
+ {
+ memset(&m_sFrameCtrl, 0, sizeof(m_sFrameCtrl));
+ }
+ virtual ~IntelCodecEncoder()
+ {
+ Close();
+ }
+
+ virtual mfxStatus Close()
+ {
+ m_Inst.Close();
+ memset(&m_sFrameCtrl, 0, sizeof(m_sFrameCtrl));
+ return IntelCodec::Close();
+ }
+
+ mfxStatus Reset()
+ {
+ if (m_bOpened)
+ {
+ return m_Inst.Reset(&m_sParamSel);
+ }
+ return MFX_ERR_NONE;
+ }
+
+ mfxStatus Open(struct tdav_codec_h264_intel_s* pWrappedCodec)
+ {
+ int32_t max_bw_kpbs;
+ tdav_codec_h264_common_t* pWrappedCodecCommon = (tdav_codec_h264_common_t*)pWrappedCodec;
+ mfxStatus status = MFX_ERR_UNKNOWN;
+ mfxU16 uSelWidth, uSelHeight;
+
+ pWrappedCodec->encoder.neg_width = (pWrappedCodec->encoder.rotation == 90 || pWrappedCodec->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(pWrappedCodec)->out.height : TMEDIA_CODEC_VIDEO(pWrappedCodec)->out.width;
+ pWrappedCodec->encoder.neg_height = (pWrappedCodec->encoder.rotation == 90 || pWrappedCodec->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(pWrappedCodec)->out.width : TMEDIA_CODEC_VIDEO(pWrappedCodec)->out.height;
+ pWrappedCodec->encoder.neg_fps = TMEDIA_CODEC_VIDEO(pWrappedCodec)->out.fps;
+ max_bw_kpbs = TSK_CLAMP(
+ 0,
+ tmedia_get_video_bandwidth_kbps_2(pWrappedCodec->encoder.neg_width, pWrappedCodec->encoder.neg_height, pWrappedCodec->encoder.neg_fps),
+ TMEDIA_CODEC(pWrappedCodec)->bandwidth_max_upload
+ );
+ pWrappedCodec->encoder.max_bitrate_bps = (max_bw_kpbs * 1024);
+
+ INTEL_DEBUG_INFO("neg_width=%d, neg_height=%d, neg_fps=%d, max_bitrate_bps=%d",
+ pWrappedCodec->encoder.neg_width,
+ pWrappedCodec->encoder.neg_height,
+ pWrappedCodec->encoder.neg_fps,
+ pWrappedCodec->encoder.max_bitrate_bps
+ );
+
+ // Initialize encoder parameters
+ memset(&m_sParamReq, 0, sizeof(m_sParamReq));
+ m_sParamReq.mfx.CodecId = MFX_CODEC_AVC;
+ m_sParamReq.mfx.CodecProfile = pWrappedCodecCommon->profile == profile_idc_main ? MFX_PROFILE_AVC_MAIN : MFX_PROFILE_AVC_BASELINE;
+ m_sParamReq.mfx.CodecLevel = (mfxU16)pWrappedCodecCommon->level;
+ // TODO: Update "CodecProfile" based on "common->profile_iop"
+ m_sParamReq.mfx.TargetUsage = MFX_TARGETUSAGE_BALANCED;
+ m_sParamReq.mfx.TargetKbps = max_bw_kpbs;
+ m_sParamReq.mfx.RateControlMethod = MFX_RATECONTROL_CBR;
+ m_sParamReq.mfx.IdrInterval = (pWrappedCodec->encoder.neg_fps * INTEL_H264_GOP_SIZE_IN_SECONDS);
+ m_sParamReq.mfx.FrameInfo.FrameRateExtN = pWrappedCodec->encoder.neg_fps;
+ m_sParamReq.mfx.FrameInfo.FrameRateExtD = 1;
+ m_sParamReq.mfx.FrameInfo.FourCC = MFX_FOURCC_NV12;
+ m_sParamReq.mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
+ m_sParamReq.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
+ m_sParamReq.mfx.FrameInfo.CropX = 0;
+ m_sParamReq.mfx.FrameInfo.CropY = 0;
+ m_sParamReq.mfx.FrameInfo.CropW = pWrappedCodec->encoder.neg_width;
+ m_sParamReq.mfx.FrameInfo.CropH = pWrappedCodec->encoder.neg_height;
+ m_sParamReq.mfx.FrameInfo.Width = INTEL_ALIGN16(pWrappedCodec->encoder.neg_width); // must be a multiple of 16
+ m_sParamReq.mfx.FrameInfo.Height = INTEL_ALIGN16(pWrappedCodec->encoder.neg_height); // must be a multiple of 16
+#if INTEL_DX11_D3D
+ m_sParamReq.IOPattern = MFX_IOPATTERN_IN_VIDEO_MEMORY;
+#else
+ m_sParamReq.IOPattern = MFX_IOPATTERN_IN_SYSTEM_MEMORY;
+#endif
+
+ memset(&m_sOpt2MaxFrameSize, 0, sizeof(m_sOpt2MaxFrameSize));
+ m_sOpt2MaxFrameSize.Header.BufferId = MFX_EXTBUFF_CODING_OPTION2;
+ m_sOpt2MaxFrameSize.Header.BufferSz = sizeof(m_sOpt2MaxFrameSize);
+ m_sOpt2MaxFrameSize.MaxSliceSize = (H264_RTP_PAYLOAD_SIZE - 100);
+ m_sOpt2MaxFrameSize.RepeatPPS = MFX_CODINGOPTION_OFF;
+ m_pExtendedBuffers[0] = (mfxExtBuffer*)&m_sOpt2MaxFrameSize;
+#if INTEL_ENABLE_REALTIME
+ m_sParamReq.AsyncDepth = 1; // limits internal frame buffering
+ m_sParamReq.mfx.GopRefDist = 1; // No B-Frames
+ m_sParamReq.mfx.NumRefFrame = 1;
+ memset(&m_sOptLowLatency, 0, sizeof(m_sOptLowLatency));
+ m_sOptLowLatency.Header.BufferId = MFX_EXTBUFF_CODING_OPTION;
+ m_sOptLowLatency.Header.BufferSz = sizeof(m_sOptLowLatency);
+ m_sOptLowLatency.MaxDecFrameBuffering = 1;
+ m_pExtendedBuffers[1] = (mfxExtBuffer*)&m_sOptLowLatency;
+ m_sParamReq.NumExtParam = 2;
+#else
+ m_sParamReq.NumExtParam = 1;
+#endif
+ m_sParamReq.ExtParam = m_pExtendedBuffers;
+
+ // Check parameters
+ status = m_Inst.Query(&m_sParamReq, &m_sParamReq);
+ if (status != MFX_ERR_NONE && status != MFX_WRN_INCOMPATIBLE_VIDEO_PARAM /* Best one will be selected by the encoder */) {
+ INTEL_CHECK_STATUS(status);
+ }
+ if (m_sOpt2MaxFrameSize.MaxSliceSize == 0)
+ {
+ INTEL_DEBUG_INFO("The encoder doesn't support setting 'MaxSliceSize' :(");
+ }
+
+ // Query number required surfaces for encoder
+ memset(&m_sAllocRequest, 0, sizeof(m_sAllocRequest));
+ INTEL_CHECK_STATUS(status = m_Inst.QueryIOSurf(&m_sParamReq, &m_sAllocRequest));
+ INTEL_DEBUG_INFO("nEncSurfNum = %hu", m_sAllocRequest.NumFrameSuggested);
+#if INTEL_DX11_D3D
+ m_sAllocRequest.Type |= D3D11_WILL_WRITE; // Hint to DX11 memory handler that application will write data to input surfaces
+#endif
+
+ // Allocate surfaces for encoder
+#if INTEL_DX11_D3D
+ INTEL_CHECK_STATUS(status = pWrappedCodec->D3D11Allocator.Alloc(pWrappedCodec->D3D11Allocator.pthis, &m_sAllocRequest, &m_sD3D11Response));
+ if (m_sD3D11Response.NumFrameActual == 0)
+ {
+ INTEL_CHECK_STATUS(status = MFX_ERR_UNKNOWN);
+ }
+ INTEL_CHECK_STATUS(status = AllocSurfaces(m_sD3D11Response.NumFrameActual, m_sAllocRequest.Info.Width, m_sAllocRequest.Info.Height, &m_sParamReq.mfx.FrameInfo));
+#else
+ INTEL_CHECK_STATUS(status = AllocSurfaces(m_sAllocRequest.NumFrameSuggested, m_sAllocRequest.Info.Width, m_sAllocRequest.Info.Height, &m_sParamReq.mfx.FrameInfo));
+#endif
+
+ // Initialize the Media SDK encoder
+ status = m_Inst.Init(&m_sParamReq);
+ if (status != MFX_ERR_NONE && status != MFX_WRN_PARTIAL_ACCELERATION) {
+ INTEL_CHECK_STATUS(status);
+ }
+ INTEL_DEBUG_INFO("Encoder->Init() returned: %d", status);
+
+ // Retrieve video parameters selected by encoder.
+ memset(&m_sParamSel, 0, sizeof(m_sParamSel));
+ INTEL_CHECK_STATUS(status = m_Inst.GetVideoParam(&m_sParamSel));
+ INTEL_DEBUG_INFO("sel_width=%u.crop=%u, sel_height=%u.crop=%u, sel_fps=%u/%u",
+ m_sParamSel.mfx.FrameInfo.Width, m_sParamSel.mfx.FrameInfo.CropW,
+ m_sParamSel.mfx.FrameInfo.Height, m_sParamSel.mfx.FrameInfo.CropH,
+ m_sParamReq.mfx.FrameInfo.FrameRateExtN,
+ m_sParamReq.mfx.FrameInfo.FrameRateExtD
+ );
+ if (m_sParamSel.mfx.FrameInfo.CropW > 0 && m_sParamSel.mfx.FrameInfo.CropH > 0)
+ {
+ uSelWidth = m_sParamSel.mfx.FrameInfo.CropW;
+ uSelHeight = m_sParamSel.mfx.FrameInfo.CropH;
+ }
+ else
+ {
+ uSelWidth = m_sParamSel.mfx.FrameInfo.Width;
+ uSelHeight = m_sParamSel.mfx.FrameInfo.Height;
+ }
+ if (pWrappedCodec->encoder.neg_width != uSelWidth || pWrappedCodec->encoder.neg_height != uSelHeight) {
+ INTEL_DEBUG_INFO("Encoder neg size <> sel size: %dx%d<>%dx%d", pWrappedCodec->encoder.neg_width, pWrappedCodec->encoder.neg_height, uSelWidth, uSelHeight);
+ pWrappedCodec->encoder.neg_width = uSelWidth;
+ pWrappedCodec->encoder.neg_height = uSelHeight;
+ TMEDIA_CODEC_VIDEO(pWrappedCodec)->out.width = pWrappedCodec->encoder.neg_width;
+ TMEDIA_CODEC_VIDEO(pWrappedCodec)->out.height = pWrappedCodec->encoder.neg_height;
+ }
+
+ // Allocate BitStream
+ INTEL_CHECK_STATUS(status = AllocateBitstream(m_sParamSel.mfx.BufferSizeInKB * 1000));
+
+ m_bOpened = true;
+ return MFX_ERR_NONE;
+
+ bail:
+ Close();
+ return status;
+ }
+
+ mfxStatus UpdateBandwidth(bool bUp, mfxU16 max)
+ {
+ if (bUp)
+ {
+ m_sParamSel.mfx.TargetKbps = TSK_CLAMP(0, (mfxU16)((m_sParamSel.mfx.TargetKbps * 3) >> 1), max);
+ }
+ else
+ {
+ m_sParamSel.mfx.TargetKbps = TSK_CLAMP(0, (mfxU16)((m_sParamSel.mfx.TargetKbps << 1) / 3), max);
+ }
+ m_sParamReq.mfx.TargetKbps = m_sParamSel.mfx.TargetKbps;
+ INTEL_DEBUG_INFO("Setting new target bandwidth to %ukbps", m_sParamSel.mfx.TargetKbps);
+ return m_Inst.Reset(&m_sParamSel);
+ }
+
+ mfxStatus SetMaxBandwidth(mfxU16 max)
+ {
+ m_sParamSel.mfx.TargetKbps = TSK_CLAMP(0, m_sParamSel.mfx.TargetKbps, max);
+ m_sParamReq.mfx.TargetKbps = m_sParamSel.mfx.TargetKbps;
+ INTEL_DEBUG_INFO("Setting new target bandwidth to %ukbps", m_sParamSel.mfx.TargetKbps);
+ return m_Inst.Reset(&m_sParamSel);
+ }
+
+ mfxStatus Encode(struct tmedia_codec_s* pWrappedCodec, const mfxU8* pcInDataPtr, mfxU32 nInDataSize)
+ {
+ tdav_codec_h264_intel_t* pWrappedCodecH264 = (tdav_codec_h264_intel_t*)pWrappedCodec;
+ tdav_codec_h264_common_t* pWrappedCodecCommon = (tdav_codec_h264_common_t*)pWrappedCodec;
+ mfxU32 nInDataXSize;
+ tsk_bool_t bSendIDR;
+ int nEncSurfIdx = 0;
+ mfxSyncPoint syncp;
+ mfxStatus status = MFX_ERR_UNKNOWN;
+
+ if (!pWrappedCodec || !pcInDataPtr || !nInDataSize) {
+ INTEL_CHECK_STATUS(MFX_ERR_NULL_PTR);
+ }
+
+ if (!m_bOpened) {
+ INTEL_CHECK_STATUS(MFX_ERR_NOT_INITIALIZED);
+ }
+
+ nInDataXSize = (pWrappedCodecH264->encoder.neg_width * pWrappedCodecH264->encoder.neg_height * 3) >> 1;
+ if (nInDataXSize != nInDataSize)
+ {
+ /* guard */
+ INTEL_DEBUG_ERROR("Invalid size: %u<>%u", nInDataXSize, nInDataSize);
+ goto bail;
+ }
+
+ bSendIDR = (pWrappedCodecH264->encoder.frame_count++ == 0 || pWrappedCodecH264->encoder.force_idr);
+
+ nEncSurfIdx = GetFreeSurfaceIndex();
+ if (MFX_ERR_NOT_FOUND == nEncSurfIdx)
+ {
+ INTEL_CHECK_STATUS(MFX_ERR_MEMORY_ALLOC);
+ }
+
+ // Surface locking required when read/write D3D surfaces
+
+#if INTEL_DX11_D3D
+ INTEL_CHECK_STATUS(status = pWrappedCodecH264->D3D11Allocator.Lock(pWrappedCodecH264->D3D11Allocator.pthis, m_ppSurfacePtrs[nEncSurfIdx]->Data.MemId, &(m_ppSurfacePtrs[nEncSurfIdx]->Data)));
+#endif
+
+ INTEL_CHECK_STATUS(status = LoadRawFrame(nEncSurfIdx, pcInDataPtr));
+
+#if INTEL_DX11_D3D
+ INTEL_CHECK_STATUS(status = pWrappedCodecH264->D3D11Allocator.Unlock(pWrappedCodecH264->D3D11Allocator.pthis, m_ppSurfacePtrs[nEncSurfIdx]->Data.MemId, &(m_ppSurfacePtrs[nEncSurfIdx]->Data)));
+#endif
+
+ m_sFrameCtrl.FrameType = bSendIDR ? (MFX_FRAMETYPE_I | MFX_FRAMETYPE_REF | MFX_FRAMETYPE_IDR) : MFX_FRAMETYPE_UNKNOWN;
+
+ //
+ // Stage 1: Main encoding loop
+ //
+ do
+ {
+ for (;;)
+ {
+ // Encode a frame asychronously (returns immediately)
+ status = m_Inst.EncodeFrameAsync(&m_sFrameCtrl, m_ppSurfacePtrs[nEncSurfIdx], &m_sBitstream, &syncp);
+
+ if (MFX_ERR_NONE < status && !syncp) // Repeat the call if warning and no output
+ {
+ if (MFX_WRN_DEVICE_BUSY == status)
+ {
+ tsk_thread_sleep(1); // Wait if device is busy, then repeat the same call
+ }
+ }
+ else if (MFX_ERR_NONE < status && syncp)
+ {
+ status = MFX_ERR_NONE; // Ignore warnings if output is available
+ break;
+ }
+ else if (MFX_ERR_NOT_ENOUGH_BUFFER == status)
+ {
+ // Allocate more bitstream buffer memory here if needed...
+ break;
+ }
+ else
+ {
+ if (status != MFX_ERR_MORE_DATA)
+ {
+ INTEL_CHECK_STATUS(status);
+ }
+ break;
+ }
+ }
+ if (MFX_ERR_NONE == status)
+ {
+ INTEL_CHECK_STATUS(m_pSession->SyncOperation(syncp, 60000)); // Synchronize. Wait until encoded frame is ready
+ if (m_sBitstream.DataLength > 0)
+ {
+ tdav_codec_h264_rtp_encap(pWrappedCodecCommon, (const uint8_t*)(m_sBitstream.Data + m_sBitstream.DataOffset), (tsk_size_t)m_sBitstream.DataLength);
+ m_sBitstream.DataLength = 0;
+ pWrappedCodecH264->encoder.force_idr = tsk_false; // reset
+ }
+ }
+ } while (0);
+
+ //
+ // Stage 2: Retrieve the buffered encoded frames
+ //
+ while (MFX_ERR_NONE <= status)
+ {
+ for (;;)
+ {
+ // Encode a frame asychronously (returns immediately)
+ status = m_Inst.EncodeFrameAsync(&m_sFrameCtrl, NULL, &m_sBitstream, &syncp);
+
+ if (MFX_ERR_NONE < status && !syncp) // Repeat the call if warning and no output
+ {
+ if (MFX_WRN_DEVICE_BUSY == status)
+ {
+ tsk_thread_sleep(1); // Wait if device is busy, then repeat the same call
+ }
+ }
+ else if (MFX_ERR_NONE < status && syncp)
+ {
+ status = MFX_ERR_NONE; // Ignore warnings if output is available
+ break;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ if (MFX_ERR_NONE == status)
+ {
+ INTEL_CHECK_STATUS(m_pSession->SyncOperation(syncp, 60000)); // Synchronize. Wait until encoded frame is
+ if (m_sBitstream.DataLength > 0)
+ {
+ tdav_codec_h264_rtp_encap(pWrappedCodecCommon, (const uint8_t*)(m_sBitstream.Data + m_sBitstream.DataOffset), (tsk_size_t)m_sBitstream.DataLength);
+ m_sBitstream.DataLength = 0;
+ pWrappedCodecH264->encoder.force_idr = tsk_false; // reset
+ }
+ }
+ }
+
+ bail:
+ return MFX_ERR_NONE;
+ }
+
+private:
+ MFXVideoENCODE m_Inst;
+ mfxEncodeCtrl m_sFrameCtrl;
+ mfxExtCodingOption m_sOptLowLatency;
+ mfxExtCodingOption2 m_sOpt2MaxFrameSize;
+ mfxExtBuffer* m_pExtendedBuffers[2]; // Not allocated
+};
+
+
+//
+// IntelCodecDecoder
+//
+class IntelCodecDecoder : public IntelCodec
+{
+public:
+ IntelCodecDecoder(MFXVideoSession* pSession)
+ : IntelCodec(pSession)
+ , m_Inst(*pSession)
+ , m_pAccumulatorPtr(NULL)
+ , m_nAccumulatorSize(0)
+ , m_nAccumulatorPos(0)
+ , m_bInit(false)
+ {
+
+ }
+ virtual ~IntelCodecDecoder()
+ {
+ Close();
+ }
+
+ virtual mfxStatus Close()
+ {
+ m_Inst.Close();
+ TSK_FREE(m_pAccumulatorPtr);
+ m_nAccumulatorSize = 0;
+ m_nAccumulatorPos = 0;
+ m_bInit = false;
+ return IntelCodec::Close();
+ }
+
+ mfxStatus Open(struct tdav_codec_h264_intel_s* pWrappedCodec)
+ {
+ tdav_codec_h264_common_t* pWrappedCodecCommon = (tdav_codec_h264_common_t*)pWrappedCodec;
+
+ INTEL_DEBUG_INFO("Decoder.Open width=%d, height=%d, fps=%d",
+ TMEDIA_CODEC_VIDEO(pWrappedCodec)->in.width,
+ TMEDIA_CODEC_VIDEO(pWrappedCodec)->in.height,
+ TMEDIA_CODEC_VIDEO(pWrappedCodec)->in.fps
+ );
+
+ // Allocation will be done each time we decode the SPS:PPS header
+
+ m_bOpened = true;
+ return MFX_ERR_NONE;
+ }
+
+ mfxU32 Decode(struct tmedia_codec_s* pWrappedCodec, const mfxU8* pcInDataPtr, mfxU32 nInDataSize, void **ppOutDataPtr, tsk_size_t *pOutDataMaxSize, const trtp_rtp_header_t* pcRtpHdr)
+ {
+ mfxU32 nRetSize = 0, nOutXSize;
+ mfxStatus status = MFX_ERR_NONE;
+ tsk_bool_t append_scp, end_of_unit;
+ tsk_bool_t sps_or_pps;
+ const uint8_t* pay_ptr = tsk_null;
+ tsk_size_t pay_size = 0, size_to_copy = 0;
+ bool bGotFrame = false;
+ mfxFrameSurface1* pmfxOutSurface = NULL;
+ static const tsk_size_t xmax_size = (3840 * 2160 * 3) >> 3; // >>3 instead of >>1 (not an error)
+ static tsk_size_t start_code_prefix_size = sizeof(H264_START_CODE_PREFIX);
+
+ tdav_codec_h264_intel_t* pWrappedCodecH264 = (tdav_codec_h264_intel_t*)pWrappedCodec;
+ tdav_codec_h264_common_t* pWrappedCodecCommon = (tdav_codec_h264_common_t*)pWrappedCodec;
+
+ if (!pWrappedCodec || !pcInDataPtr || !nInDataSize || !ppOutDataPtr)
+ {
+ INTEL_CHECK_STATUS(MFX_ERR_NULL_PTR);
+ }
+ //INTEL_DEBUG_INFO("Size=%u", nInDataSize);
+ if (!m_bOpened)
+ {
+ INTEL_CHECK_STATUS(MFX_ERR_NOT_INITIALIZED);
+ }
+
+
+ /* 5.3. NAL Unit Octet Usage
+ +---------------+
+ |0|1|2|3|4|5|6|7|
+ +-+-+-+-+-+-+-+-+
+ |F|NRI| Type |
+ +---------------+
+ */
+ if (pcInDataPtr[0] & 0x80) // F ?== 1
+ {
+ /* reset accumulator */
+ m_nAccumulatorPos = 0;
+ INTEL_CHECK_STATUS(status = MFX_ERR_UNDEFINED_BEHAVIOR);
+ }
+
+ // New frame?
+ if (m_nLastRtpTimestamp != pcRtpHdr->timestamp)
+ {
+ m_nAccumulatorPos = 0;
+ m_nLastRtpTimestamp = pcRtpHdr->timestamp;
+ }
+
+ /* get payload */
+ if ((tdav_codec_h264_get_pay(pcInDataPtr, nInDataSize, (const void**)&pay_ptr, &pay_size, &append_scp, &end_of_unit) != 0) || !pay_ptr || !pay_size)
+ {
+ INTEL_BREAK("Depayloader failed to get H.264 content");
+ }
+#if 1 // TODO: MSDK cannot decode slices
+ end_of_unit = pcRtpHdr->marker;
+#endif
+ //append_scp = tsk_true;
+ size_to_copy = pay_size + (append_scp ? start_code_prefix_size : 0);
+ // whether it's SPS or PPS (append_scp is false for subsequent FUA chuncks)
+ sps_or_pps = append_scp && pay_ptr && ((pay_ptr[0] & 0x1F) == 7 || (pay_ptr[0] & 0x1F) == 8);
+
+ // start-accumulator
+ if (!m_pAccumulatorPtr)
+ {
+ if (size_to_copy > xmax_size)
+ {
+ INTEL_DEBUG_ERROR("%u too big to contain valid encoded data. xmax_size=%u", size_to_copy, xmax_size);
+ m_nAccumulatorPos = 0;
+ return 0;
+ }
+ if (!(m_pAccumulatorPtr = (mfxU8*)tsk_calloc(size_to_copy, sizeof(mfxU8))))
+ {
+ INTEL_DEBUG_ERROR("Failed to allocated new buffer");
+ m_nAccumulatorPos = 0;
+ return 0;
+ }
+ m_nAccumulatorSize = (mfxU32)size_to_copy;
+ }
+ if ((m_nAccumulatorPos + size_to_copy) >= xmax_size)
+ {
+ INTEL_DEBUG_ERROR("BufferOverflow");
+ m_nAccumulatorPos = 0;
+ return 0;
+ }
+ if ((m_nAccumulatorPos + size_to_copy) > m_nAccumulatorSize)
+ {
+ if (!(m_pAccumulatorPtr = (mfxU8*)tsk_realloc(m_pAccumulatorPtr, (m_nAccumulatorPos + size_to_copy))))
+ {
+ INTEL_DEBUG_ERROR("Failed to reallocated new buffer");
+ m_nAccumulatorPos = 0;
+ m_nAccumulatorSize = 0;
+ return 0;
+ }
+ m_nAccumulatorSize = (mfxU32)(m_nAccumulatorPos + size_to_copy);
+ }
+
+ if (append_scp)
+ {
+ memcpy(&m_pAccumulatorPtr[m_nAccumulatorPos], H264_START_CODE_PREFIX, start_code_prefix_size);
+ m_nAccumulatorPos += (mfxU32)start_code_prefix_size;
+ }
+ memcpy(&m_pAccumulatorPtr[m_nAccumulatorPos], pay_ptr, pay_size);
+ m_nAccumulatorPos += (mfxU32)pay_size;
+ // end-accumulator
+
+ if (/*rtp_hdr->marker*/end_of_unit)
+ {
+ /* decode the picture */
+ mfxU32 nOutWidth, nOutHeight;
+
+ // Decode a Unit
+ status = DecodeFrame(pWrappedCodecH264, m_pAccumulatorPtr, m_nAccumulatorPos, !!sps_or_pps, &pmfxOutSurface, bGotFrame);
+ if (status != MFX_ERR_NONE)
+ {
+ INTEL_DEBUG_WARN("DecodeFrame failed: %d", status);
+ goto bail;
+ }
+
+ // Do we have a complete frame?
+ if (!bGotFrame || !pmfxOutSurface)
+ {
+ goto bail;
+ }
+
+#if INTEL_DX11_D3D
+ INTEL_CHECK_STATUS(status = pWrappedCodecH264->D3D11Allocator.Lock(pWrappedCodecH264->D3D11Allocator.pthis, pmfxOutSurface->Data.MemId, &(pmfxOutSurface->Data)));
+#endif
+ if (!pmfxOutSurface->Data.Y || !pmfxOutSurface->Data.U || !pmfxOutSurface->Data.V)
+ {
+#if INTEL_DX11_D3D
+ INTEL_CHECK_STATUS(status = pWrappedCodecH264->D3D11Allocator.Unlock(pWrappedCodecH264->D3D11Allocator.pthis, pmfxOutSurface->Data.MemId, &(pmfxOutSurface->Data)));
+#endif
+ goto bail;
+ }
+
+ if (pmfxOutSurface->Info.CropW > 0 && pmfxOutSurface->Info.CropH > 0)
+ {
+ nOutWidth = pmfxOutSurface->Info.CropW;
+ nOutHeight = pmfxOutSurface->Info.CropH;
+ }
+ else
+ {
+ nOutWidth = pmfxOutSurface->Info.Width;
+ nOutHeight = pmfxOutSurface->Info.Height;
+ }
+
+ nOutXSize = (nOutWidth * nOutHeight * 3) >> 1; // I420
+ /* IDR ? */
+ if (((pay_ptr[0] & 0x1F) == 0x05) && TMEDIA_CODEC_VIDEO(pWrappedCodec)->in.callback)
+ {
+ INTEL_DEBUG_INFO("Decoded H.264 IDR");
+ TMEDIA_CODEC_VIDEO(pWrappedCodec)->in.result.type = tmedia_video_decode_result_type_idr;
+ TMEDIA_CODEC_VIDEO(pWrappedCodec)->in.result.proto_hdr = pcRtpHdr;
+ TMEDIA_CODEC_VIDEO(pWrappedCodec)->in.callback(&TMEDIA_CODEC_VIDEO(pWrappedCodec)->in.result);
+ }
+ /* fill out */
+ if (*pOutDataMaxSize < nOutXSize)
+ {
+ if ((*ppOutDataPtr = tsk_realloc(*ppOutDataPtr, nOutXSize)))
+ {
+ *pOutDataMaxSize = nOutXSize;
+ }
+ else
+ {
+ *pOutDataMaxSize = 0;
+ return 0;
+ }
+ }
+ TMEDIA_CODEC_VIDEO(pWrappedCodec)->in.width = nOutWidth;
+ TMEDIA_CODEC_VIDEO(pWrappedCodec)->in.height = nOutHeight;
+
+ /* layout picture */
+ INTEL_CHECK_STATUS(status = IntelCodecDecoder::LayoutPicture(pmfxOutSurface, (mfxU8 *)*ppOutDataPtr));
+ nRetSize = nOutXSize;
+#if INTEL_DX11_D3D
+ INTEL_CHECK_STATUS(status = pWrappedCodecH264->D3D11Allocator.Unlock(pWrappedCodecH264->D3D11Allocator.pthis, pmfxOutSurface->Data.MemId, &(pmfxOutSurface->Data)));
+#endif
+ } // else if(rtp_hdr->marker)
+
+ bail:
+ if (end_of_unit)
+ {
+ /* reset accumulator */
+ m_nAccumulatorPos = 0;
+ }
+
+ if (status != MFX_ERR_NONE)
+ {
+ INTEL_DEBUG_INFO("Failed to decode the buffer with error code =%d, size=%u, append=%s", status, m_nAccumulatorPos, append_scp ? "yes" : "no");
+ if (TMEDIA_CODEC_VIDEO(pWrappedCodec)->in.callback)
+ {
+ TMEDIA_CODEC_VIDEO(pWrappedCodec)->in.result.type = tmedia_video_decode_result_type_error;
+ TMEDIA_CODEC_VIDEO(pWrappedCodec)->in.result.proto_hdr = pcRtpHdr;
+ TMEDIA_CODEC_VIDEO(pWrappedCodec)->in.callback(&TMEDIA_CODEC_VIDEO(pWrappedCodec)->in.result);
+ }
+ }
+ return nRetSize;
+ }
+
+private:
+#if 0
+ static mfxStatus WriteSection(mfxU8* plane, mfxU16 factor, mfxU16 chunksize, mfxFrameInfo *pInfo, mfxFrameData *pData, mfxU32 i, mfxU32 j, mfxU8 *pDstPtr)
+ {
+ memcpy(pDstPtr, plane + (pInfo->CropY * pData->Pitch / factor + pInfo->CropX) + i * pData->Pitch + j, chunksize);
+ return MFX_ERR_NONE;
+ }
+#else
+#define WriteSection(_plane, _factor, _chunksize, _pInfo, _pData, _i, _j, _pDstPtr) \
+ memcpy((_pDstPtr), (_plane) + ((_pInfo)->CropY * (_pData)->Pitch / (_factor) + (_pInfo)->CropX) + (_i) * (_pData)->Pitch + (_j), (_chunksize))
+#define WriteSection1(_plane, _factor, _pInfo, _pData, _i, _j, _pDstPtr) \
+ *(_pDstPtr) = *((_plane) + ((_pInfo)->CropY * (_pData)->Pitch / (_factor) + (_pInfo)->CropX) + (_i) * (_pData)->Pitch + (_j));
+#endif
+
+ static mfxStatus LayoutPicture(mfxFrameSurface1 *pSurface, mfxU8 *pDstPtr)
+ {
+#if 1 // ->YUV420
+ mfxFrameInfo *pInfo = &pSurface->Info;
+ mfxFrameData *pData = &pSurface->Data;
+ mfxU32 i, j, h, w;
+
+ if (pSurface->Info.CropW > 0 && pSurface->Info.CropH > 0)
+ {
+ w = pSurface->Info.CropW;
+ h = pSurface->Info.CropH;
+ }
+ else
+ {
+ w = pSurface->Info.Width;
+ h = pSurface->Info.Height;
+ }
+
+ for (i = 0; i < h; i++)
+ {
+ WriteSection(pData->Y, 1, w, pInfo, pData, i, 0, pDstPtr);
+ pDstPtr += w;
+ }
+
+ h >>= 1;
+ for (i = 0; i < h; i++)
+ {
+ for (j = 0; j < w; j += 2)
+ {
+ WriteSection1(pData->UV, 2, pInfo, pData, i, j, pDstPtr);
+ pDstPtr += 1;
+ }
+ }
+
+ for (i = 0; i < h; i++)
+ {
+ for (j = 1; j < w; j += 2)
+ {
+ WriteSection1(pData->UV, 2, pInfo, pData, i, j, pDstPtr);
+ pDstPtr += 1;
+ }
+ }
+#elif 1 // ->NV12
+ mfxFrameInfo *pInfo = &pSurface->Info;
+ mfxFrameData *pData = &pSurface->Data;
+ mfxU32 i, j, h, w;
+
+ if (pSurface->Info.CropW > 0 && pSurface->Info.CropH > 0)
+ {
+ w = pSurface->Info.CropW;
+ h = pSurface->Info.CropH;
+ }
+ else
+ {
+ w = pSurface->Info.Width;
+ h = pSurface->Info.Height;
+ }
+
+ for (i = 0; i < h; i++)
+ {
+ WriteSection(pData->Y, 1, w, pInfo, pData, i, 0, pDstPtr);
+ pDstPtr += w;
+ }
+
+ h >>= 1;
+ for (i = 0; i < h; i++)
+ {
+ for (j = 0; j < w; j += 2)
+ {
+ WriteSection1(pData->UV, 2, pInfo, pData, i, j, &pDstPtr[0]);
+ WriteSection1(pData->UV, 2, pInfo, pData, i, j + 1, &pDstPtr[1]);
+ pDstPtr += 2;
+ }
+ }
+#endif
+
+ return MFX_ERR_NONE;
+ }
+
+ mfxStatus DecodeFrame(struct tdav_codec_h264_intel_s* pWrappedCodec, const mfxU8* pcInDataPtr, mfxU32 nInDataSize, bool bSpsOrPps, mfxFrameSurface1** ppmfxOutSurface, bool &bGotFrame)
+ {
+ mfxStatus status = MFX_ERR_NONE;
+ int nSurfaceIndex;
+ mfxSyncPoint syncp;
+ bGotFrame = false;
+ *ppmfxOutSurface = NULL;
+ mfxFrameSurface1* pmfxOutSurface = NULL;
+
+#if 0
+ if (!bSpsOrPps && !m_bInit)
+ {
+ INTEL_CHECK_STATUS(status = MFX_ERR_NOT_INITIALIZED);
+ }
+#endif
+
+ if (m_sBitstream.DataLength < nInDataSize)
+ {
+ INTEL_CHECK_STATUS(status = AllocateBitstream(nInDataSize));
+ }
+ memcpy(m_sBitstream.Data, pcInDataPtr, nInDataSize);
+ m_sBitstream.DataOffset = 0;
+ m_sBitstream.DataLength = nInDataSize;
+ m_sBitstream.DataFlag = MFX_BITSTREAM_COMPLETE_FRAME;
+
+ if (bSpsOrPps || !m_bInit)
+ {
+ memset(&m_sParamReq, 0, sizeof(m_sParamReq));
+ m_sParamReq.mfx.CodecId = MFX_CODEC_AVC;
+ m_sParamReq.mfx.FrameInfo.FourCC = MFX_FOURCC_NV12;
+ m_sParamReq.mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
+#if INTEL_DX11_D3D
+ m_sParamReq.IOPattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
+#else
+ m_sParamReq.IOPattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
+#endif
+ m_sParamReq.AsyncDepth = 1; // Low latency: limits internal frame buffering
+
+ status = m_Inst.DecodeHeader(&m_sBitstream, &m_sParamReq);
+ if (status == MFX_WRN_PARTIAL_ACCELERATION)
+ {
+ status = MFX_ERR_NONE;
+ }
+ INTEL_CHECK_STATUS(status);
+
+ memcpy(&m_sParamSel, &m_sParamReq, sizeof(m_sParamSel));
+
+ // Check parameters
+ status = m_Inst.Query(&m_sParamReq, &m_sParamReq);
+ if (status != MFX_ERR_NONE && status != MFX_WRN_INCOMPATIBLE_VIDEO_PARAM /* Best one will be selected by the encoder */)
+ {
+ INTEL_CHECK_STATUS(status);
+ }
+
+ // Query number required surfaces for encoder
+ memset(&m_sAllocRequest, 0, sizeof(m_sAllocRequest));
+ INTEL_CHECK_STATUS(status = m_Inst.QueryIOSurf(&m_sParamReq, &m_sAllocRequest));
+#if INTEL_DX11_D3D
+ m_sAllocRequest.Type |= D3D11_WILL_READ; // Hint to DX11 memory handler that application will read data from output surfaces
+#endif
+
+ // Allocate surfaces for decoder
+#if INTEL_DX11_D3D
+ INTEL_CHECK_STATUS(status = pWrappedCodec->D3D11Allocator.Alloc(pWrappedCodec->D3D11Allocator.pthis, &m_sAllocRequest, &m_sD3D11Response));
+ if (m_sD3D11Response.NumFrameActual == 0)
+ {
+ INTEL_CHECK_STATUS(status = MFX_ERR_UNKNOWN);
+ }
+ INTEL_DEBUG_INFO("nEncSurfNum = %hu", m_sD3D11Response.NumFrameActual);
+ INTEL_CHECK_STATUS(status = AllocSurfaces(m_sD3D11Response.NumFrameActual, m_sAllocRequest.Info.Width, m_sAllocRequest.Info.Height, &m_sParamReq.mfx.FrameInfo));
+#else
+ INTEL_DEBUG_INFO("nEncSurfNum = %hu", m_sAllocRequest.NumFrameSuggested);
+ INTEL_CHECK_STATUS(status = AllocSurfaces(m_sAllocRequest.NumFrameSuggested, m_sAllocRequest.Info.Width, m_sAllocRequest.Info.Height, &m_sParamReq.mfx.FrameInfo));
+#endif
+ // Initialize the Media SDK decoder
+ status = m_Inst.Init(&m_sParamReq);
+ if (status != MFX_ERR_NONE && status != MFX_WRN_PARTIAL_ACCELERATION)
+ {
+ INTEL_CHECK_STATUS(status);
+ }
+ INTEL_DEBUG_INFO("Decoder->Init() returned: %d", status);
+
+ m_bInit = true;
+ }
+
+ //
+ // Stage 1: Main decoding loop
+ //
+ while (MFX_ERR_NONE <= status || MFX_ERR_MORE_DATA == status || MFX_ERR_MORE_SURFACE == status)
+ {
+ if (MFX_WRN_DEVICE_BUSY == status)
+ {
+ tsk_thread_sleep(1); // Wait if device is busy, then repeat the same call to DecodeFrameAsync
+ }
+
+ if (MFX_ERR_MORE_DATA == status)
+ {
+ return MFX_ERR_NONE;
+ }
+
+ if (MFX_ERR_MORE_SURFACE == status || MFX_ERR_NONE == status)
+ {
+ nSurfaceIndex = GetFreeSurfaceIndex(); // Find free frame surface
+ if (MFX_ERR_NOT_FOUND == nSurfaceIndex)
+ {
+ INTEL_CHECK_STATUS((status = MFX_ERR_MEMORY_ALLOC));
+ }
+ }
+
+ // Decode a frame asychronously (returns immediately)
+ // - If input bitstream contains multiple frames DecodeFrameAsync will start decoding multiple frames, and remove them from bitstream
+ status = m_Inst.DecodeFrameAsync(&m_sBitstream, m_ppSurfacePtrs[nSurfaceIndex], &pmfxOutSurface, &syncp);
+
+ // Ignore warnings if output is available,
+ // if no output and no action required just repeat the DecodeFrameAsync call
+ if (MFX_ERR_NONE < status && syncp)
+ {
+ status = MFX_ERR_NONE;
+ }
+
+ if (MFX_ERR_NONE == status)
+ {
+ status = m_pSession->SyncOperation(syncp, 60000); // Synchronize. Wait until decoded frame is ready
+ }
+
+ if (MFX_ERR_NONE == status)
+ {
+ bGotFrame = true;
+ if (pmfxOutSurface)
+ {
+ *ppmfxOutSurface = pmfxOutSurface;
+ }
+ }
+ }
+
+ //
+ // Stage 2: Retrieve the buffered decoded frames
+ //
+ while (MFX_ERR_NONE <= status || MFX_ERR_MORE_SURFACE == status)
+ {
+ if (MFX_WRN_DEVICE_BUSY == status)
+ {
+ tsk_thread_sleep(1); // Wait if device is busy, then repeat the same call to DecodeFrameAsync
+ }
+
+ nSurfaceIndex = GetFreeSurfaceIndex(); // Find free frame surface
+ if (MFX_ERR_NOT_FOUND == nSurfaceIndex)
+ {
+ INTEL_CHECK_STATUS((status = MFX_ERR_MEMORY_ALLOC));
+ }
+
+ // Decode a frame asychronously (returns immediately)
+ status = m_Inst.DecodeFrameAsync(NULL, m_ppSurfacePtrs[nSurfaceIndex], ppmfxOutSurface, &syncp);
+
+ // Ignore warnings if output is available,
+ // if no output and no action required just repeat the DecodeFrameAsync call
+ if (MFX_ERR_NONE < status && syncp)
+ {
+ status = MFX_ERR_NONE;
+ }
+
+ if (MFX_ERR_NONE == status)
+ {
+ status = m_pSession->SyncOperation(syncp, 60000); // Synchronize. Waits until decoded frame is ready
+ }
+
+ if (MFX_ERR_NONE == status)
+ {
+ bGotFrame = true;
+ if (pmfxOutSurface)
+ {
+ *ppmfxOutSurface = pmfxOutSurface;
+ }
+ }
+ }
+
+ status = MFX_ERR_NONE;
+
+ bail:
+ return status;
+ }
+private:
+ mfxU8 *m_pAccumulatorPtr;
+ mfxU32 m_nAccumulatorSize;
+ mfxU32 m_nAccumulatorPos;
+ mfxU32 m_nLastRtpTimestamp;
+ MFXVideoDECODE m_Inst;
+ bool m_bInit;
+};
+
+/* ============ H.264 Base/Main Profile X.X Plugin interface functions ================= */
+
+static int tdav_codec_h264_intel_set(tmedia_codec_t* self, const tmedia_param_t* param)
+{
+ tdav_codec_h264_intel_t* h264 = (tdav_codec_h264_intel_t*)self;
+ if (param->value_type == tmedia_pvt_int32)
+ {
+ if (tsk_striequals(param->key, "action"))
+ {
+ tmedia_codec_action_t action = (tmedia_codec_action_t)TSK_TO_INT32((uint8_t*)param->value);
+ switch (action) {
+ case tmedia_codec_action_encode_idr:
+ {
+ h264->encoder.force_idr = tsk_true;
+ return 0;
+ }
+ case tmedia_codec_action_bw_up:
+ case tmedia_codec_action_bw_down:
+ {
+ if (self->opened)
+ {
+ INTEL_CHECK_STATUS(h264->encoder.pInst->UpdateBandwidth(action == tmedia_codec_action_bw_up, TMEDIA_CODEC(h264)->bandwidth_max_upload));
+ }
+ break;
+ }
+ }
+ }
+ else if (tsk_striequals(param->key, "bw_kbps")) // both up and down (from the SDP)
+ {
+ int32_t max_bw_userdefine = tmedia_defaults_get_bandwidth_video_upload_max();
+ int32_t max_bw_new = *((int32_t*)param->value);
+ if (max_bw_userdefine > 0)
+ {
+ // do not use more than what the user defined in it's configuration
+ TMEDIA_CODEC(h264)->bandwidth_max_upload = TSK_MIN(max_bw_new, max_bw_userdefine);
+ }
+ else
+ {
+ TMEDIA_CODEC(h264)->bandwidth_max_upload = max_bw_new;
+ }
+ INTEL_DEBUG_INFO("bandwidth-max-upload=%d", TMEDIA_CODEC(h264)->bandwidth_max_upload);
+ if (self->opened)
+ {
+ INTEL_CHECK_STATUS(h264->encoder.pInst->SetMaxBandwidth(TMEDIA_CODEC(h264)->bandwidth_max_upload));
+ }
+ return 0;
+ }
+ else if (tsk_striequals(param->key, "bandwidth-max-upload"))
+ {
+ int32_t bw_max_upload = *((int32_t*)param->value);
+ TSK_DEBUG_INFO("OpenH264 codec: bandwidth-max-upload=%d", bw_max_upload);
+ TMEDIA_CODEC(h264)->bandwidth_max_upload = bw_max_upload;
+ if (self->opened)
+ {
+ INTEL_CHECK_STATUS(h264->encoder.pInst->SetMaxBandwidth(TMEDIA_CODEC(h264)->bandwidth_max_upload));
+ }
+ return 0;
+ }
+ else if (tsk_striequals(param->key, "rotation"))
+ {
+ int rotation = *((int32_t*)param->value);
+ if (h264->encoder.rotation != rotation)
+ {
+ if (self->opened)
+ {
+ int ret;
+ h264->encoder.rotation = rotation;
+ if ((ret = tdav_codec_h264_intel_close_encoder(h264)))
+ {
+ return ret;
+ }
+ if ((ret = tdav_codec_h264_intel_open_encoder(h264)))
+ {
+ return ret;
+ }
+ }
+ }
+ return 0;
+ }
+ }
+bail:
+
+ return -1;
+}
+
+
+static int tdav_codec_h264_intel_open(tmedia_codec_t* self)
+{
+ int ret;
+ tdav_codec_h264_intel_t* h264 = (tdav_codec_h264_intel_t*)self;
+
+ if (!h264) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is not opened */
+
+ // Encoder
+ if ((ret = tdav_codec_h264_intel_open_encoder(h264))) {
+ return ret;
+ }
+
+ // Decoder
+ if ((ret = tdav_codec_h264_intel_open_decoder(h264))) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tdav_codec_h264_intel_close(tmedia_codec_t* self)
+{
+ tdav_codec_h264_intel_t* h264 = (tdav_codec_h264_intel_t*)self;
+
+ if (!h264)
+ {
+ INTEL_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) alreasy checked that the codec is opened */
+
+ // Encoder
+ tdav_codec_h264_intel_close_encoder(h264);
+
+ // Decoder
+ tdav_codec_h264_intel_close_decoder(h264);
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_h264_intel_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tdav_codec_h264_intel_t* h264 = (tdav_codec_h264_intel_t*)self;
+ (void)(out_data);
+ (void)(out_max_size);
+
+ if (!h264 || !h264->encoder.pInst)
+ {
+ INTEL_CHECK_STATUS(MFX_ERR_NULL_PTR);
+ }
+ INTEL_CHECK_STATUS(h264->encoder.pInst->Encode(self, (const mfxU8*)in_data, (mfxU32)in_size));
+bail:
+ return 0;
+}
+
+static tsk_size_t tdav_codec_h264_intel_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tdav_codec_h264_intel_t* h264 = (tdav_codec_h264_intel_t*)self;
+
+ if (!h264 || !h264->decoder.pInst)
+ {
+ INTEL_CHECK_STATUS(MFX_ERR_NULL_PTR);
+ }
+ return (tsk_size_t)h264->decoder.pInst->Decode(self, (const mfxU8*)in_data, (mfxU32)in_size, out_data, out_max_size, (const trtp_rtp_header_t*)proto_hdr);
+bail:
+ return 0;
+}
+
+static tsk_bool_t tdav_codec_h264_intel_sdp_att_match(const tmedia_codec_t* self, const char* att_name, const char* att_value)
+{
+ return tdav_codec_h264_common_sdp_att_match((tdav_codec_h264_common_t*)self, att_name, att_value);
+}
+
+static char* tdav_codec_h264_intel_sdp_att_get(const tmedia_codec_t* self, const char* att_name)
+{
+ char* att = tdav_codec_h264_common_sdp_att_get((const tdav_codec_h264_common_t*)self, att_name);
+ if (att && tsk_striequals(att_name, "fmtp")) {
+ tsk_strcat(&att, "; impl=intel");
+ }
+ return att;
+}
+
+/* ============ H.264 Base Profile Plugin interface ================= */
+
+/* constructor */
+static tsk_object_t* tdav_codec_h264_intel_base_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h264_intel_t *h264 = (tdav_codec_h264_intel_t*)self;
+ if (h264) {
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ if (tdav_codec_h264_intel_init(h264, profile_idc_baseline) != 0) {
+ return tsk_null;
+ }
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h264_intel_base_dtor(tsk_object_t * self)
+{
+ tdav_codec_h264_intel_t *h264 = (tdav_codec_h264_intel_t*)self;
+ if (h264) {
+ /* deinit base */
+ tdav_codec_h264_common_deinit(TDAV_CODEC_H264_COMMON(self));
+ /* deinit self */
+ tdav_codec_h264_intel_deinit(h264);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h264_intel_base_def_s =
+{
+ sizeof(tdav_codec_h264_intel_t),
+ tdav_codec_h264_intel_base_ctor,
+ tdav_codec_h264_intel_base_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h264_intel_base_plugin_def_s =
+{
+ &tdav_codec_h264_intel_base_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_h264_bp,
+ "H264",
+ "H264 Base Profile (Intel Media SDK)",
+ TMEDIA_CODEC_FORMAT_H264_BP,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video (width, height, fps) */
+ { 176, 144, 0 }, // fps is @deprecated
+
+ tdav_codec_h264_intel_set,
+ tdav_codec_h264_intel_open,
+ tdav_codec_h264_intel_close,
+ tdav_codec_h264_intel_encode,
+ tdav_codec_h264_intel_decode,
+ tdav_codec_h264_intel_sdp_att_match,
+ tdav_codec_h264_intel_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_h264_intel_base_plugin_def_t = &tdav_codec_h264_intel_base_plugin_def_s;
+
+
+/* ============ H.264 Main Profile Plugin interface ================= */
+
+/* constructor */
+static tsk_object_t* tdav_codec_h264_intel_main_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h264_intel_t *h264 = (tdav_codec_h264_intel_t*)self;
+ if (h264) {
+ /* init main: called by tmedia_codec_create() */
+ /* init self */
+ if (tdav_codec_h264_intel_init(h264, profile_idc_main) != 0) {
+ return tsk_null;
+ }
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h264_intel_main_dtor(tsk_object_t * self)
+{
+ tdav_codec_h264_intel_t *h264 = (tdav_codec_h264_intel_t*)self;
+ if (h264)
+ {
+ /* deinit main */
+ tdav_codec_h264_common_deinit(TDAV_CODEC_H264_COMMON(self));
+ /* deinit self */
+ tdav_codec_h264_intel_deinit(h264);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h264_intel_main_def_s =
+{
+ sizeof(tdav_codec_h264_intel_t),
+ tdav_codec_h264_intel_main_ctor,
+ tdav_codec_h264_intel_main_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h264_intel_main_plugin_def_s =
+{
+ &tdav_codec_h264_intel_main_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_h264_mp,
+ "H264",
+ "H264 main Profile (Intel Media SDK)",
+ TMEDIA_CODEC_FORMAT_H264_MP,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video (width, height, fps) */
+ { 176, 144, 0 }, // fps is @deprecated
+
+ tdav_codec_h264_intel_set,
+ tdav_codec_h264_intel_open,
+ tdav_codec_h264_intel_close,
+ tdav_codec_h264_intel_encode,
+ tdav_codec_h264_intel_decode,
+ tdav_codec_h264_intel_sdp_att_match,
+ tdav_codec_h264_intel_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_h264_intel_main_plugin_def_t = &tdav_codec_h264_intel_main_plugin_def_s;
+
+/* ============ Common To all H264 profiles ================= */
+
+static int tdav_codec_h264_intel_open_encoder(tdav_codec_h264_intel_t* self)
+{
+ if (self->encoder.pInst)
+ {
+ INTEL_BREAK("Already initialized");
+ }
+
+ if (!(self->encoder.pInst = new IntelCodecEncoder(self->mfxSession)))
+ {
+ INTEL_CHECK_STATUS(MFX_ERR_MEMORY_ALLOC);
+ }
+ INTEL_CHECK_STATUS(self->encoder.pInst->Open(self));
+
+ return 0;
+
+bail:
+ if (self->encoder.pInst)
+ {
+ delete self->encoder.pInst, self->encoder.pInst = NULL;
+ }
+ return -1;
+}
+
+static int tdav_codec_h264_intel_close_encoder(tdav_codec_h264_intel_t* self)
+{
+ if (self)
+ {
+ if (self->encoder.pInst)
+ {
+ delete self->encoder.pInst;
+ self->encoder.pInst = NULL;
+ }
+ self->encoder.frame_count = 0;
+ }
+ return 0;
+}
+
+int tdav_codec_h264_intel_open_decoder(tdav_codec_h264_intel_t* self)
+{
+ if (self->decoder.pInst)
+ {
+ INTEL_BREAK("Already initialized");
+ }
+
+ if (!(self->decoder.pInst = new IntelCodecDecoder(self->mfxSession)))
+ {
+ INTEL_CHECK_STATUS(MFX_ERR_MEMORY_ALLOC);
+ }
+ INTEL_CHECK_STATUS(self->decoder.pInst->Open(self));
+
+ return 0;
+
+bail:
+ if (self->decoder.pInst)
+ {
+ delete self->decoder.pInst, self->decoder.pInst = NULL;
+ }
+ return -1;
+}
+
+static int tdav_codec_h264_intel_close_decoder(tdav_codec_h264_intel_t* self)
+{
+ if (self)
+ {
+ if (self->decoder.pInst)
+ {
+ delete self->decoder.pInst;
+ self->decoder.pInst = NULL;
+ }
+ }
+ return 0;
+}
+
+static int tdav_codec_h264_intel_init(tdav_codec_h264_intel_t* self, profile_idc_t profile)
+{
+ int ret = -1;
+ level_idc_t level;
+ tdav_codec_h264_common_t* common = (tdav_codec_h264_common_t*)self;
+#if INTEL_DX11_D3D
+ mfxHDL deviceHandle = NULL;
+#endif
+
+ if (!self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ goto bail;
+ }
+
+ if ((ret = tdav_codec_h264_common_init(common))) {
+ TSK_DEBUG_ERROR("tdav_codec_h264_intel_common_init() faile with error code=%d", ret);
+ goto bail;
+ }
+
+ if ((ret = tdav_codec_h264_common_level_from_size(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, &level))) {
+ TSK_DEBUG_ERROR("Failed to find level for size=[%u, %u]", TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height);
+ goto bail;
+ }
+
+ if (/*MFUtils::IsLowLatencyH264SupportsMaxSliceSize()*/0) { // TODO: MSDK doesn't support PM=0. Neg. PM=1 but try to do the best to produce SingleNalUnits
+ common->pack_mode_local = H264_PACKETIZATION_MODE;
+ }
+ else {
+ common->pack_mode_local = Non_Interleaved_Mode;
+ }
+ common->profile = profile;
+ common->level = level;
+#if 0
+ // A.2.1.1 Constrained Baseline profile
+ // Conformance of a bitstream to the Constrained Baseline profile is indicated by profile_idc being equal to 66 with
+ // constraint_set1_flag being equal to 1.
+ common->profile_iop = 0xe0; // "constraint_set0_flag=1 and constraint_set1_flag=1" -> Constrained Baseline profile
+#endif
+ TMEDIA_CODEC_VIDEO(self)->in.max_mbps = TMEDIA_CODEC_VIDEO(self)->out.max_mbps = H264_MAX_MBPS * 1000;
+ TMEDIA_CODEC_VIDEO(self)->in.max_br = TMEDIA_CODEC_VIDEO(self)->out.max_br = H264_MAX_BR * 1000;
+
+ TMEDIA_CODEC_VIDEO(self)->in.chroma = tmedia_chroma_yuv420p; // decoded
+ TMEDIA_CODEC_VIDEO(self)->out.chroma = tmedia_chroma_yuv420p; // encoded
+
+ if (!self->mfxSession && !(self->mfxSession = new MFXVideoSession()))
+ {
+ INTEL_CHECK_STATUS(MFX_ERR_MEMORY_ALLOC);
+ }
+ INTEL_CHECK_STATUS(self->mfxSession->Init(__IntelDefaultImpl, &__IntelDefaultVer));
+
+#if INTEL_DX11_D3D
+ // Create DirectX device context
+ INTEL_CHECK_STATUS(D3D11_CreateHWDevice(self, *self->mfxSession, &deviceHandle, NULL));
+
+ // Provide device manager to Media SDK
+ INTEL_CHECK_STATUS(self->mfxSession->SetHandle(MFX_HANDLE_D3D11_DEVICE, deviceHandle));
+
+ self->D3D11Allocator.Alloc = D3D11_SimpleAlloc;
+ self->D3D11Allocator.Free = D3D11_SimpleFree;
+ self->D3D11Allocator.Lock = D3D11_SimpleLock;
+ self->D3D11Allocator.Unlock = D3D11_SimpleUnlock;
+ self->D3D11Allocator.GetHDL = D3D11_SimpleGethdl;
+ self->D3D11Allocator.pthis = self;
+
+ // Since we are using video memory we must provide Media SDK with an external allocator
+ INTEL_CHECK_STATUS(self->mfxSession->SetFrameAllocator(&self->D3D11Allocator));
+#endif /* INTEL_DX11_D3D */
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static int tdav_codec_h264_intel_deinit(tdav_codec_h264_intel_t* self)
+{
+ if (!self)
+ {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_codec_h264_intel_close((tmedia_codec_t*)self);
+
+ if (self->mfxSession)
+ {
+ delete self->mfxSession, self->mfxSession = NULL;
+ }
+
+#if INTEL_DX11_D3D
+ D3D11_CleanupHWDevice(self);
+#endif /* INTEL_DX11_D3D */
+
+ return 0;
+}
+
+
+#if INTEL_DX11_D3D
+
+static IDXGIAdapter* D3D11_GetIntelDeviceAdapterHandle(mfxHDL _pthis, mfxSession session)
+{
+ mfxU32 adapterNum = 0;
+ mfxIMPL impl;
+ tdav_codec_h264_intel_t* pthis = (tdav_codec_h264_intel_t*)_pthis;
+
+ MFXQueryIMPL(session, &impl);
+
+ mfxIMPL baseImpl = MFX_IMPL_BASETYPE(impl); // Extract Media SDK base implementation type
+
+ // get corresponding adapter number
+ for (mfxU8 i = 0; i < sizeof(implTypes) / sizeof(implTypes[0]); i++)
+ {
+ if (implTypes[i].impl == baseImpl)
+ {
+ adapterNum = implTypes[i].adapterID;
+ break;
+ }
+ }
+
+ HRESULT hres = CreateDXGIFactory(__uuidof(IDXGIFactory2), (void**)(&pthis->pDXGIFactory));
+ if (FAILED(hres))
+ {
+ INTEL_DEBUG_ERROR("CreateDXGIFactory failed: %ld", hres);
+ return NULL;
+ }
+
+ IDXGIAdapter* adapter = NULL;
+ hres = pthis->pDXGIFactory->EnumAdapters(adapterNum, &adapter);
+ if (FAILED(hres))
+ {
+ INTEL_DEBUG_ERROR("EnumAdapters failed: %ld", hres);
+ return NULL;
+ }
+
+ return adapter;
+}
+
+// Create DirectX 11 device context
+// - Required when using D3D surfaces.
+// - D3D Device created and handed to Intel Media SDK
+// - Intel graphics device adapter will be determined automatically (does not have to be primary),
+// but with the following caveats:
+// - Device must be active (but monitor does NOT have to be attached)
+// - Device must be enabled in BIOS. Required for the case when used together with a discrete graphics card
+// - For switchable graphics solutions (mobile) make sure that Intel device is the active device
+static mfxStatus D3D11_CreateHWDevice(mfxHDL _pthis, mfxSession session, mfxHDL* deviceHandle, HWND hWnd)
+{
+ hWnd; // Window handle not required by DX11 since we do not showcase rendering.
+
+ HRESULT hres = S_OK;
+ tdav_codec_h264_intel_t* pthis = (tdav_codec_h264_intel_t*)_pthis;
+
+ static D3D_FEATURE_LEVEL FeatureLevels[] = {
+ D3D_FEATURE_LEVEL_11_1,
+ D3D_FEATURE_LEVEL_11_0,
+ D3D_FEATURE_LEVEL_10_1,
+ D3D_FEATURE_LEVEL_10_0
+ };
+ D3D_FEATURE_LEVEL pFeatureLevelsOut;
+
+ pthis->hAdapter = D3D11_GetIntelDeviceAdapterHandle(_pthis, session);
+ if (NULL == pthis->hAdapter)
+ {
+ INTEL_DEBUG_ERROR("D3D11_GetIntelDeviceAdapterHandle failed");
+ return MFX_ERR_DEVICE_FAILED;
+ }
+
+ hres = D3D11CreateDevice(pthis->hAdapter,
+ D3D_DRIVER_TYPE_UNKNOWN,
+ NULL,
+ 0,
+ FeatureLevels,
+ (sizeof(FeatureLevels) / sizeof(FeatureLevels[0])),
+ D3D11_SDK_VERSION,
+ &pthis->pD3D11Device,
+ &pFeatureLevelsOut,
+ &pthis->pD3D11Ctx);
+ if (FAILED(hres))
+ {
+ INTEL_DEBUG_ERROR("D3D11CreateDevice failed: %ld", hres);
+ return MFX_ERR_DEVICE_FAILED;
+ }
+
+ // turn on multithreading for the DX11 cntext
+ CComQIPtr<ID3D10Multithread> p_mt(pthis->pD3D11Ctx);
+ if (p_mt)
+ {
+ p_mt->SetMultithreadProtected(true);
+ }
+ else
+ {
+ INTEL_DEBUG_ERROR("Failed to create ID3D10Multithread object");
+ return MFX_ERR_DEVICE_FAILED;
+ }
+
+ *deviceHandle = (mfxHDL)pthis->pD3D11Device;
+ return MFX_ERR_NONE;
+}
+
+static void D3D11_CleanupHWDevice(mfxHDL _pthis)
+{
+ tdav_codec_h264_intel_t* pthis = (tdav_codec_h264_intel_t*)_pthis;
+ pthis->pD3D11Device = NULL;
+ pthis->pDXGIFactory = NULL;
+ pthis->pD3D11Ctx = NULL;
+ pthis->hAdapter = NULL;
+ pthis->pAdapter = NULL;
+}
+
+static void D3D11_SetHWDeviceContext(mfxHDL _pthis, CComPtr<ID3D11DeviceContext> devCtx)
+{
+ tdav_codec_h264_intel_t* pthis = (tdav_codec_h264_intel_t*)_pthis;
+ pthis->pD3D11Ctx = devCtx;
+ devCtx->GetDevice(&pthis->pD3D11Device);
+}
+
+// Intel Media SDK memory allocator entrypoints....
+// - A slightly different allocation procedure is used for encode, decode and VPP
+static mfxStatus _D3D11_SimpleAlloc(mfxHDL _pthis, mfxFrameAllocRequest *request, mfxFrameAllocResponse *response)
+{
+ HRESULT hRes;
+ tdav_codec_h264_intel_t* pthis = (tdav_codec_h264_intel_t*)_pthis;
+
+ // Determine surface format (current simple implementation only supports NV12 and RGB4(32))
+ DXGI_FORMAT format;
+ if (MFX_FOURCC_NV12 == request->Info.FourCC)
+ {
+ format = DXGI_FORMAT_NV12;
+ }
+ else if (MFX_FOURCC_RGB4 == request->Info.FourCC)
+ {
+ format = DXGI_FORMAT_B8G8R8A8_UNORM;
+ }
+ else
+ {
+ format = DXGI_FORMAT_P8;
+ }
+
+ // Allocate custom container to keep texture and stage buffers for each surface
+ // Container also stores the intended read and/or write operation.
+ CustomMemId** mids = new CustomMemId *[request->NumFrameSuggested];
+ if (!mids) return MFX_ERR_MEMORY_ALLOC;
+ for (int i = 0; i < request->NumFrameSuggested; i++)
+ {
+ mids[i] = new CustomMemId;
+ memset(mids[i], 0, sizeof(CustomMemId));
+ mids[i]->rw = request->Type & 0xF000; // Set intended read/write operation
+ }
+
+ request->Type = request->Type & 0x0FFF;
+
+ // because P8 data (bitstream) for h264 encoder should be allocated by CreateBuffer()
+ // but P8 data (MBData) for MPEG2 encoder should be allocated by CreateTexture2D()
+ if (request->Info.FourCC == MFX_FOURCC_P8)
+ {
+ D3D11_BUFFER_DESC desc = { 0 };
+
+ desc.ByteWidth = request->Info.Width * request->Info.Height;
+ desc.Usage = D3D11_USAGE_STAGING;
+ desc.BindFlags = 0;
+ desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
+ desc.MiscFlags = 0;
+ desc.StructureByteStride = 0;
+
+ ID3D11Buffer * buffer = 0;
+ hRes = pthis->pD3D11Device->CreateBuffer(&desc, 0, &buffer);
+ if (FAILED(hRes))
+ {
+ INTEL_DEBUG_ERROR("CreateBuffer failed:%ld", hRes);
+ return MFX_ERR_MEMORY_ALLOC;
+ }
+
+ mids[0]->memId = reinterpret_cast<ID3D11Texture2D *>(buffer);
+ }
+ else
+ {
+ D3D11_TEXTURE2D_DESC desc = { 0 };
+
+ desc.Width = request->Info.Width;
+ desc.Height = request->Info.Height;
+ desc.MipLevels = 1;
+ desc.ArraySize = 1; // number of subresources is 1 in this case
+ desc.Format = format;
+ desc.SampleDesc.Count = 1;
+ desc.Usage = D3D11_USAGE_DEFAULT;
+ desc.BindFlags = D3D11_BIND_DECODER;
+ desc.MiscFlags = 0;
+
+ if ((MFX_MEMTYPE_FROM_VPPIN & request->Type) &&
+ (DXGI_FORMAT_B8G8R8A8_UNORM == desc.Format))
+ {
+ desc.BindFlags = D3D11_BIND_RENDER_TARGET;
+ if (desc.ArraySize > 2)
+ return MFX_ERR_MEMORY_ALLOC;
+ }
+
+ if ((MFX_MEMTYPE_FROM_VPPOUT & request->Type) ||
+ (MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET & request->Type))
+ {
+ desc.BindFlags = D3D11_BIND_RENDER_TARGET;
+ if (desc.ArraySize > 2)
+ {
+ return MFX_ERR_MEMORY_ALLOC;
+ }
+ }
+
+ if (DXGI_FORMAT_P8 == desc.Format)
+ desc.BindFlags = 0;
+
+ ID3D11Texture2D* pTexture2D;
+
+ // Create surface textures
+ for (size_t i = 0; i < request->NumFrameSuggested / desc.ArraySize; i++)
+ {
+ hRes = pthis->pD3D11Device->CreateTexture2D(&desc, NULL, &pTexture2D);
+
+ if (FAILED(hRes))
+ {
+ INTEL_DEBUG_ERROR("CreateTexture2D failed:%ld", hRes);
+ return MFX_ERR_MEMORY_ALLOC;
+ }
+
+ mids[i]->memId = pTexture2D;
+ }
+
+ desc.ArraySize = 1;
+ desc.Usage = D3D11_USAGE_STAGING;
+ desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ /*| D3D11_CPU_ACCESS_WRITE*/;
+ desc.BindFlags = 0;
+ desc.MiscFlags = 0;
+
+ // Create surface staging textures
+ for (size_t i = 0; i < request->NumFrameSuggested; i++)
+ {
+ hRes = pthis->pD3D11Device->CreateTexture2D(&desc, NULL, &pTexture2D);
+
+ if (FAILED(hRes))
+ {
+ INTEL_DEBUG_ERROR("CreateTexture2D failed:%ld", hRes);
+ return MFX_ERR_MEMORY_ALLOC;
+ }
+
+ mids[i]->memIdStage = pTexture2D;
+ }
+ }
+
+ response->mids = (mfxMemId*)mids;
+ response->NumFrameActual = request->NumFrameSuggested;
+
+ return MFX_ERR_NONE;
+}
+
+static mfxStatus D3D11_SimpleAlloc(mfxHDL _pthis, mfxFrameAllocRequest *request, mfxFrameAllocResponse *response)
+{
+ tdav_codec_h264_intel_t* pthis = (tdav_codec_h264_intel_t*)_pthis;
+ mfxStatus sts = MFX_ERR_NONE;
+ int idx = (MFX_MEMTYPE_FROM_DECODE & request->Type) ? 1 : 0;
+
+ if (request->NumFrameSuggested <= pthis->D3D11SavedAllocResponses[idx].NumFrameActual &&
+ MFX_MEMTYPE_EXTERNAL_FRAME & request->Type &&
+ MFX_MEMTYPE_FROM_DECODE & request->Type &&
+ pthis->D3D11SavedAllocResponses[idx].NumFrameActual != 0)
+ {
+ // Memory for this request was already allocated during manual allocation stage. Return saved response
+ // When decode acceleration device (DXVA) is created it requires a list of d3d surfaces to be passed.
+ // Therefore Media SDK will ask for the surface info/mids again at Init() stage, thus requiring us to return the saved response
+ // (No such restriction applies to Encode or VPP)
+
+ *response = pthis->D3D11SavedAllocResponses[idx];
+ }
+ else
+ {
+ sts = _D3D11_SimpleAlloc(_pthis, request, response);
+ pthis->D3D11SavedAllocResponses[idx] = *response;
+ }
+
+ return sts;
+}
+
+static mfxStatus D3D11_SimpleLock(mfxHDL _pthis, mfxMemId mid, mfxFrameData *ptr)
+{
+ HRESULT hRes = S_OK;
+ tdav_codec_h264_intel_t* pthis = (tdav_codec_h264_intel_t*)_pthis;
+
+ D3D11_TEXTURE2D_DESC desc = { 0 };
+ D3D11_MAPPED_SUBRESOURCE lockedRect = { 0 };
+
+ CustomMemId* memId = (CustomMemId*)mid;
+ ID3D11Texture2D* pSurface = (ID3D11Texture2D *)memId->memId;
+ ID3D11Texture2D* pStage = (ID3D11Texture2D *)memId->memIdStage;
+
+ D3D11_MAP mapType = D3D11_MAP_READ;
+ UINT mapFlags = D3D11_MAP_FLAG_DO_NOT_WAIT;
+
+ if (NULL == pStage)
+ {
+ hRes = pthis->pD3D11Ctx->Map(pSurface, 0, mapType, mapFlags, &lockedRect);
+ desc.Format = DXGI_FORMAT_P8;
+ }
+ else
+ {
+ pSurface->GetDesc(&desc);
+
+ // copy data only in case of user wants o read from stored surface
+ if (memId->rw & D3D11_WILL_READ)
+ {
+ pthis->pD3D11Ctx->CopySubresourceRegion(pStage, 0, 0, 0, 0, pSurface, 0, NULL);
+ }
+
+ do
+ {
+ hRes = pthis->pD3D11Ctx->Map(pStage, 0, mapType, mapFlags, &lockedRect);
+ if (S_OK != hRes && DXGI_ERROR_WAS_STILL_DRAWING != hRes)
+ {
+ return MFX_ERR_LOCK_MEMORY;
+ }
+ } while (DXGI_ERROR_WAS_STILL_DRAWING == hRes);
+ }
+
+ if (FAILED(hRes))
+ {
+ return MFX_ERR_LOCK_MEMORY;
+ }
+
+ switch (desc.Format)
+ {
+ case DXGI_FORMAT_NV12:
+ ptr->Pitch = (mfxU16)lockedRect.RowPitch;
+ ptr->Y = (mfxU8 *)lockedRect.pData;
+ ptr->U = (mfxU8 *)lockedRect.pData + desc.Height * lockedRect.RowPitch;
+ ptr->V = ptr->U + 1;
+ break;
+ case DXGI_FORMAT_B8G8R8A8_UNORM:
+ ptr->Pitch = (mfxU16)lockedRect.RowPitch;
+ ptr->B = (mfxU8 *)lockedRect.pData;
+ ptr->G = ptr->B + 1;
+ ptr->R = ptr->B + 2;
+ ptr->A = ptr->B + 3;
+ break;
+ case DXGI_FORMAT_P8:
+ ptr->Pitch = (mfxU16)lockedRect.RowPitch;
+ ptr->Y = (mfxU8 *)lockedRect.pData;
+ ptr->U = 0;
+ ptr->V = 0;
+ break;
+ default:
+ return MFX_ERR_LOCK_MEMORY;
+ }
+
+ return MFX_ERR_NONE;
+}
+
+static mfxStatus D3D11_SimpleUnlock(mfxHDL _pthis, mfxMemId mid, mfxFrameData *ptr)
+{
+ tdav_codec_h264_intel_t* pthis = (tdav_codec_h264_intel_t*)_pthis;
+
+ CustomMemId* memId = (CustomMemId*)mid;
+ ID3D11Texture2D* pSurface = (ID3D11Texture2D *)memId->memId;
+ ID3D11Texture2D* pStage = (ID3D11Texture2D *)memId->memIdStage;
+
+ if (NULL == pStage)
+ {
+ pthis->pD3D11Ctx->Unmap(pSurface, 0);
+ }
+ else
+ {
+ pthis->pD3D11Ctx->Unmap(pStage, 0);
+ // copy data only in case of user wants to write to stored surface
+ if (memId->rw & D3D11_WILL_WRITE)
+ {
+ pthis->pD3D11Ctx->CopySubresourceRegion(pSurface, 0, 0, 0, 0, pStage, 0, NULL);
+ }
+ }
+
+ if (ptr)
+ {
+ ptr->Pitch = 0;
+ ptr->U = ptr->V = ptr->Y = 0;
+ ptr->A = ptr->R = ptr->G = ptr->B = 0;
+ }
+
+ return MFX_ERR_NONE;
+}
+
+static mfxStatus D3D11_SimpleGethdl(mfxHDL _pthis, mfxMemId mid, mfxHDL *handle)
+{
+ _pthis;
+ if (NULL == handle)
+ {
+ return MFX_ERR_INVALID_HANDLE;
+ }
+
+ mfxHDLPair* pPair = (mfxHDLPair*)handle;
+ CustomMemId* memId = (CustomMemId*)mid;
+
+ pPair->first = memId->memId; // surface texture
+ pPair->second = 0;
+
+ return MFX_ERR_NONE;
+}
+
+static mfxStatus D3D11_SimpleFree(mfxHDL _pthis, mfxFrameAllocResponse *response)
+{
+ _pthis;
+ if (NULL == response)
+ {
+ return MFX_ERR_NULL_PTR;
+ }
+
+ if (response->mids)
+ {
+ for (mfxU32 i = 0; i < response->NumFrameActual; i++)
+ {
+ if (response->mids[i])
+ {
+ CustomMemId* mid = (CustomMemId*)response->mids[i];
+ ID3D11Texture2D* pSurface = (ID3D11Texture2D *)mid->memId;
+ ID3D11Texture2D* pStage = (ID3D11Texture2D *)mid->memIdStage;
+ if (pSurface)
+ {
+ pSurface->Release();
+ }
+ if (pStage)
+ {
+ pStage->Release();
+ }
+ delete mid;
+ }
+ }
+ }
+
+ delete[] response->mids;
+ response->mids = 0;
+
+ return MFX_ERR_NONE;
+}
+
+#endif /* INTEL_DX11_D3D */
+
+#endif /* HAVE_INTEL_MEDIA_SDK */
diff --git a/tinyDAV/src/codecs/h264/tdav_codec_h264_rtp.c b/tinyDAV/src/codecs/h264/tdav_codec_h264_rtp.c
new file mode 100644
index 0000000..5742f43
--- /dev/null
+++ b/tinyDAV/src/codecs/h264/tdav_codec_h264_rtp.c
@@ -0,0 +1,411 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_h264_rtp.c
+ * @brief H.264 payloader/depayloder as per RFC 3984
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ */
+#include "tinydav/codecs/h264/tdav_codec_h264_rtp.h"
+
+#include "tinydav/codecs/h264/tdav_codec_h264_common.h"
+
+#include "tinymedia/tmedia_codec.h"
+
+#include "tsk_string.h"
+#include "tsk_debug.h"
+
+#include "tsk_memory.h"
+#include <string.h> /* strlen() */
+#include <stdlib.h> /* strtol() */
+
+/*
+* ITU H.264 - http://www.itu.int/rec/T-REC-H.264-200903-S/en
+*/
+
+uint8_t H264_START_CODE_PREFIX[4] = { 0x00, 0x00, 0x00, 0x01 };
+
+#define H264_NAL_UNIT_TYPE_HEADER_SIZE 1
+#define H264_F_UNIT_TYPE_HEADER_SIZE 1
+#define H264_FUA_HEADER_SIZE 2
+#define H264_FUB_HEADER_SIZE 4
+#define H264_NAL_AGG_MAX_SIZE 65535
+
+static int tdav_codec_h264_get_fua_pay(const uint8_t* in_data, tsk_size_t in_size, const void** out_data, tsk_size_t *out_size, tsk_bool_t* append_scp, tsk_bool_t* end_of_unit);
+static int tdav_codec_h264_get_nalunit_pay(const uint8_t* in_data, tsk_size_t in_size, const void** out_data, tsk_size_t *out_size);
+
+// profile_level_id MUST be a "null-terminated" string
+int tdav_codec_h264_parse_profile(const char* profile_level_id, profile_idc_t *p_idc, profile_iop_t *p_iop, level_idc_t *l_idc)
+{
+ uint32_t value;
+
+ if(tsk_strlen(profile_level_id) != 6){
+ TSK_DEBUG_ERROR("I say [%s] is an invalid profile-level-id", profile_level_id);
+ return -1;
+ }
+
+ value = strtol(profile_level_id, tsk_null, 16);
+
+ /* profile-idc */
+ if(p_idc){
+ switch((value >> 16)){
+ case profile_idc_baseline:
+ *p_idc = profile_idc_baseline;
+ break;
+ case profile_idc_extended:
+ *p_idc = profile_idc_extended;
+ break;
+ case profile_idc_main:
+ *p_idc = profile_idc_main;
+ break;
+ case profile_idc_high:
+ *p_idc = profile_idc_high;
+ break;
+ default:
+ *p_idc = profile_idc_none;
+ break;
+ }
+ }
+
+ /* profile-iop */
+ if(p_iop){
+ p_iop->constraint_set0_flag = ((value >> 8) & 0x80)>>7;
+ p_iop->constraint_set1_flag = ((value >> 8) & 0x40)>>6;
+ p_iop->constraint_set2_flag = ((value >> 8) & 0x20)>>5;
+ p_iop->reserved_zero_5bits = ((value >> 8) & 0x1F);
+ }
+
+ /* level-idc */
+ if(l_idc){
+ switch((value & 0xFF)){
+ case level_idc_1_0:
+ *l_idc = level_idc_1_0;
+ break;
+ case level_idc_1_b:
+ *l_idc = level_idc_1_b;
+ break;
+ case level_idc_1_1:
+ *l_idc = level_idc_1_1;
+ break;
+ case level_idc_1_2:
+ *l_idc = level_idc_1_2;
+ break;
+ case level_idc_1_3:
+ *l_idc = level_idc_1_3;
+ break;
+ case level_idc_2_0:
+ *l_idc = level_idc_2_0;
+ break;
+ case level_idc_2_1:
+ *l_idc = level_idc_2_1;
+ break;
+ case level_idc_2_2:
+ *l_idc = level_idc_2_2;
+ break;
+ case level_idc_3_0:
+ *l_idc = level_idc_3_0;
+ break;
+ case level_idc_3_1:
+ *l_idc = level_idc_3_1;
+ break;
+ case level_idc_3_2:
+ *l_idc = level_idc_3_2;
+ break;
+ case level_idc_4_0:
+ *l_idc = level_idc_4_0;
+ break;
+ case level_idc_4_1:
+ *l_idc = level_idc_4_1;
+ break;
+ case level_idc_4_2:
+ *l_idc = level_idc_4_2;
+ break;
+ case level_idc_5_0:
+ *l_idc = level_idc_5_0;
+ break;
+ case level_idc_5_1:
+ *l_idc = level_idc_5_1;
+ break;
+ case level_idc_5_2:
+ *l_idc = level_idc_5_2;
+ break;
+ default:
+ *l_idc = level_idc_none;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int tdav_codec_h264_get_pay(const void* in_data, tsk_size_t in_size, const void** out_data, tsk_size_t *out_size, tsk_bool_t* append_scp, tsk_bool_t* end_of_unit)
+{
+ const uint8_t* pdata = (const uint8_t*)in_data;
+ uint8_t nal_type;
+ if (!in_data || !in_size || !out_data || !out_size || !append_scp || !end_of_unit) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ *out_data = tsk_null;
+ *out_size = 0;
+
+ /* 5.3. NAL Unit Octet Usage
+ +---------------+
+ |0|1|2|3|4|5|6|7|
+ +-+-+-+-+-+-+-+-+
+ |F|NRI| Type |
+ +---------------+
+ */
+ switch ((nal_type = (pdata[0] & 0x1F))) {
+ case undefined_0:
+ case undefined_30:
+ case undefined_31:
+ case stap_a:
+ case stap_b:
+ case mtap16:
+ case mtap24:
+ case fu_b:
+ break;
+ case fu_a:
+ return tdav_codec_h264_get_fua_pay(pdata, in_size, out_data, out_size, append_scp, end_of_unit);
+ default: /* NAL unit (1-23) */
+ *append_scp = tsk_true; //(nal_type != 7 && nal_type != 8); // SPS or PPS
+ *end_of_unit = tsk_true;
+ return tdav_codec_h264_get_nalunit_pay(pdata, in_size, out_data, out_size);
+ }
+
+ TSK_DEBUG_WARN("%d not supported as valid NAL Unit type", (*pdata & 0x1F));
+ return -1;
+}
+
+
+static int tdav_codec_h264_get_fua_pay(const uint8_t* in_data, tsk_size_t in_size, const void** out_data, tsk_size_t *out_size, tsk_bool_t* append_scp, tsk_bool_t* end_of_unit)
+{
+ if (in_size <=H264_FUA_HEADER_SIZE) {
+ TSK_DEBUG_ERROR("Too short");
+ return -1;
+ }
+ /* RFC 3984 - 5.8. Fragmentation Units (FUs)
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | FU indicator | FU header | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
+ | |
+ | FU payload |
+ | |
+ | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | :...OPTIONAL RTP padding |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ The FU indicator octet has the following format:
+
+ +---------------+
+ |0|1|2|3|4|5|6|7|
+ +-+-+-+-+-+-+-+-+
+ |F|NRI| Type |
+ +---------------+
+
+ The FU header has the following format:
+
+ +---------------+
+ |0|1|2|3|4|5|6|7|
+ +-+-+-+-+-+-+-+-+
+ |S|E|R| Type |
+ +---------------+
+ */
+
+ if (((in_data[1] & 0x80) /*S*/)) {
+ /* discard "FU indicator" */
+ *out_data = (in_data + H264_NAL_UNIT_TYPE_HEADER_SIZE);
+ *out_size = (in_size - H264_NAL_UNIT_TYPE_HEADER_SIZE);
+
+ // Do need to append Start Code Prefix ?
+ /* S: 1 bit
+ When set to one, the Start bit indicates the start of a fragmented
+ NAL unit. When the following FU payload is not the start of a
+ fragmented NAL unit payload, the Start bit is set to zero.*/
+ *append_scp = tsk_true;
+
+ // F, NRI and Type
+ *((uint8_t*)*out_data) = (in_data[0] & 0xe0) /* F,NRI from "FU indicator"*/ | (in_data[1] & 0x1f) /* type from "FU header" */;
+ }
+ else {
+ *append_scp = tsk_false;
+ *out_data = (in_data + H264_FUA_HEADER_SIZE);
+ *out_size = (in_size - H264_FUA_HEADER_SIZE);
+ }
+ /*
+ E: 1 bit
+ When set to one, the End bit indicates the end of a fragmented
+ NAL unit, i.e., the last byte of the payload is also the last
+ byte of the fragmented NAL unit. When the following FU
+ payload is not the last fragment of a fragmented NAL unit, the
+ End bit is set to zero.
+ */
+ *end_of_unit = (((in_data[1] & 0x40) /*E*/)) ? tsk_true : tsk_false;
+
+ return 0;
+}
+
+static int tdav_codec_h264_get_nalunit_pay(const uint8_t* in_data, tsk_size_t in_size, const void** out_data, tsk_size_t *out_size)
+{
+
+/* 5.6. Single NAL Unit Packet
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |F|NRI| type | |
+ +-+-+-+-+-+-+-+-+ |
+ | |
+ | Bytes 2..n of a Single NAL unit |
+ | |
+ | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | :...OPTIONAL RTP padding |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+
+ *out_data = in_data;
+ *out_size = in_size;
+
+ return 0;
+}
+
+void tdav_codec_h264_rtp_encap(struct tdav_codec_h264_common_s* self, const uint8_t* pdata, tsk_size_t size)
+{
+ static const tsk_size_t size_of_scp = sizeof(H264_START_CODE_PREFIX); /* we know it's equal to 4 .. */
+ register tsk_size_t i;
+ tsk_size_t last_scp, prev_scp;
+ tsk_size_t _size;
+
+ if (!pdata || size < size_of_scp) {
+ return;
+ }
+
+ if (pdata[0] == 0 && pdata[1] == 0) {
+ if (pdata[2] == 1) {
+ pdata += 3, size -= 3;
+ }
+ else if (pdata[2] == 0 && pdata[3] == 1) {
+ pdata += 4, size -= 4;
+ }
+ }
+
+ _size = (size - size_of_scp);
+ last_scp = 0, prev_scp = 0;
+ for (i = size_of_scp; i<_size; i++) {
+ if (pdata[i] == 0 && pdata[i+1] == 0 && (pdata[i+2] == 1 || (pdata[i+2] == 0 && pdata[i+3] == 1))) { /* Find Start Code Prefix */
+ prev_scp = last_scp;
+ if ((i - last_scp) >= H264_RTP_PAYLOAD_SIZE || 1) {
+ tdav_codec_h264_rtp_callback(self, pdata + prev_scp,
+ (i - prev_scp), (prev_scp == size));
+ }
+ last_scp = i;
+ i += (pdata[i+2] == 1) ? 3 : 4;
+ }
+ }
+
+ if (last_scp < (int32_t)size) {
+ tdav_codec_h264_rtp_callback(self, pdata + last_scp,
+ (size - last_scp), tsk_true);
+ }
+}
+
+void tdav_codec_h264_rtp_callback(struct tdav_codec_h264_common_s *self, const void *data, tsk_size_t size, tsk_bool_t marker)
+{
+ uint8_t* pdata = (uint8_t*)data;
+
+ //TSK_DEBUG_INFO("%x %x %x %x -- %u", pdata[0], pdata[1], pdata[2], pdata[3], size);
+
+ if (size>4 && pdata[0] == H264_START_CODE_PREFIX[0] && pdata[1] == H264_START_CODE_PREFIX[1]) {
+ if(pdata[2] == H264_START_CODE_PREFIX[3]){
+ pdata += 3, size -= 3;
+ }
+ else if (pdata[2] == H264_START_CODE_PREFIX[2] && pdata[3] == H264_START_CODE_PREFIX[3]) {
+ pdata += 4, size -= 4;
+ }
+ }
+
+ //TSK_DEBUG_INFO("==> SCP %2x %2x %2x %2x", pdata[0], pdata[1], pdata[2], pdata[3]);
+
+ if (self->pack_mode_local == Single_NAL_Unit_Mode || size < H264_RTP_PAYLOAD_SIZE) {
+ if (self->pack_mode_local == Single_NAL_Unit_Mode && size > H264_RTP_PAYLOAD_SIZE) {
+ TSK_DEBUG_WARN("pack_mode=Single_NAL_Unit_Mode but size(%d) > H264_RTP_PAYLOAD_SIZE(%d). Did you forget to set \"avctx->rtp_payload_size\"?", size, H264_RTP_PAYLOAD_SIZE);
+ }
+ // Can be packet in a Single Nal Unit
+ // Send data over the network
+ if (TMEDIA_CODEC_VIDEO(self)->out.callback) {
+ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.ptr = pdata;
+ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.size = size;
+ TMEDIA_CODEC_VIDEO(self)->out.result.duration = (uint32_t)((1./(double)TMEDIA_CODEC_VIDEO(self)->out.fps) * TMEDIA_CODEC(self)->plugin->rate);
+ TMEDIA_CODEC_VIDEO(self)->out.result.last_chunck = marker;
+ TMEDIA_CODEC_VIDEO(self)->out.callback(&TMEDIA_CODEC_VIDEO(self)->out.result);
+ }
+ }
+ else if (size > H264_NAL_UNIT_TYPE_HEADER_SIZE) {
+ /* Should be Fragmented as FUA */
+ uint8_t fua_hdr[H264_FUA_HEADER_SIZE]; /* "FU indicator" and "FU header" - 2bytes */
+ fua_hdr[0] = pdata[0] & 0x60/* NRI */, fua_hdr[0] |= fu_a;
+ fua_hdr[1] = 0x80/* S=1,E=0,R=0 */, fua_hdr[1] |= pdata[0] & 0x1f; /* type */
+ // discard header
+ pdata += H264_NAL_UNIT_TYPE_HEADER_SIZE;
+ size -= H264_NAL_UNIT_TYPE_HEADER_SIZE;
+
+ while(size) {
+ tsk_size_t packet_size = TSK_MIN(H264_RTP_PAYLOAD_SIZE, size);
+
+ if (self->rtp.size < (packet_size + H264_FUA_HEADER_SIZE)){
+ if(!(self->rtp.ptr = (uint8_t*)tsk_realloc(self->rtp.ptr, (packet_size + H264_FUA_HEADER_SIZE)))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ return;
+ }
+ self->rtp.size = (packet_size + H264_FUA_HEADER_SIZE);
+ }
+ // set E bit
+ if((size - packet_size) == 0){
+ // Last packet
+ fua_hdr[1] |= 0x40;
+ }
+ // copy FUA header
+ memcpy(self->rtp.ptr, fua_hdr, H264_FUA_HEADER_SIZE);
+ // reset "S" bit
+ fua_hdr[1] &= 0x7F;
+ // copy data
+ memcpy((self->rtp.ptr + H264_FUA_HEADER_SIZE), pdata, packet_size);
+ pdata += packet_size;
+ size -= packet_size;
+
+ // send data
+ if(TMEDIA_CODEC_VIDEO(self)->out.callback){
+ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.ptr = self->rtp.ptr;
+ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.size = (packet_size + H264_FUA_HEADER_SIZE);
+ TMEDIA_CODEC_VIDEO(self)->out.result.duration = (uint32_t)((1./(double)TMEDIA_CODEC_VIDEO(self)->out.fps) * TMEDIA_CODEC(self)->plugin->rate);
+ TMEDIA_CODEC_VIDEO(self)->out.result.last_chunck = (size == 0);
+ TMEDIA_CODEC_VIDEO(self)->out.callback(&TMEDIA_CODEC_VIDEO(self)->out.result);
+ }
+ }
+ }
+}
diff --git a/tinyDAV/src/codecs/ilbc/tdav_codec_ilbc.c b/tinyDAV/src/codecs/ilbc/tdav_codec_ilbc.c
new file mode 100644
index 0000000..65df6ad
--- /dev/null
+++ b/tinyDAV/src/codecs/ilbc/tdav_codec_ilbc.c
@@ -0,0 +1,265 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_ilbc.c
+ * @brief iLBC codec
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/ilbc/tdav_codec_ilbc.h"
+
+#if HAVE_ILBC
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_ILBC_MODE 20
+
+/* ============ iLBC Plugin interface ================= */
+
+#define tdav_codec_ilbc_set tsk_null
+
+static int tdav_codec_ilbc_open(tmedia_codec_t* self)
+{
+ tdav_codec_ilbc_t* ilbc = (tdav_codec_ilbc_t*)self;
+
+ initEncode(&ilbc->encoder, TDAV_ILBC_MODE);
+ initDecode(&ilbc->decoder, TDAV_ILBC_MODE, tsk_true/* Enhancer */);
+
+ return 0;
+}
+
+static int tdav_codec_ilbc_close(tmedia_codec_t* self)
+{
+ tdav_codec_ilbc_t* ilbc = (tdav_codec_ilbc_t*)self;
+
+ //ilbc->encoder = {0};
+ //ilbc->decoder = {0};
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_ilbc_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tdav_codec_ilbc_t* ilbc = (tdav_codec_ilbc_t*)self;
+ int k;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* convert signal to float */
+ for(k=0; k<ilbc->encoder.blockl; k++){
+ ilbc->encblock[k] = (float)((short*)in_data)[k];
+ }
+
+ /* allocate new buffer if needed */
+ if((int)*out_max_size <ilbc->encoder.no_of_bytes){
+ if(!(*out_data = tsk_realloc(*out_data, ilbc->encoder.no_of_bytes))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = ilbc->encoder.no_of_bytes;
+ }
+
+ /* do the actual encoding */
+ iLBC_encode(*out_data, ilbc->encblock, &ilbc->encoder);
+
+ return ilbc->encoder.no_of_bytes;
+}
+
+static tsk_size_t tdav_codec_ilbc_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ int blocks, i, k, block_size;
+ float dtmp;
+ tsk_size_t out_size;
+ tdav_codec_ilbc_t* ilbc = (tdav_codec_ilbc_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if((in_size % NO_OF_BYTES_20MS) == 0){
+ /* Using 20ms mode */
+ blocks = (in_size/NO_OF_BYTES_20MS);
+ out_size = (BLOCKL_20MS * blocks) * sizeof(short);
+ block_size = out_size/blocks;
+ if(ilbc->decoder.mode != 20){
+ initDecode(&ilbc->decoder, 20, tsk_true/* Enhancer */);
+ }
+ }
+ else if((in_size % NO_OF_BYTES_30MS) == 0){
+ /* Using 30ms mode */
+ blocks = (in_size/NO_OF_BYTES_30MS);
+ out_size = (BLOCKL_30MS * blocks) * sizeof(short);
+ block_size = out_size/blocks;
+ if(ilbc->decoder.mode != 30){
+ initDecode(&ilbc->decoder, 30, tsk_true/* Enhancer */);
+ }
+ }
+ else{
+ TSK_DEBUG_ERROR("Invalid iLBC mode");
+ return 0;
+ }
+
+ /* allocate new buffer if needed */
+ if(*out_max_size<out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+
+ for(i = 0; i<blocks; i++){
+ iLBC_decode(ilbc->decblock, &((uint8_t*)in_data)[i*block_size], &ilbc->decoder, 1/* Normal */);
+
+ /* convert to short */
+ for(k=0; k<ilbc->decoder.blockl; k++){
+ dtmp=ilbc->decblock[k];
+
+ if(dtmp<MIN_SAMPLE){
+ dtmp = MIN_SAMPLE;
+ }
+ else if(dtmp>MAX_SAMPLE){
+ dtmp = MAX_SAMPLE;
+ }
+
+ ((short*)*out_data)[(i*block_size) + k] = ((short) dtmp);
+ }
+ }
+
+ return out_size;
+}
+
+static char* tdav_codec_ilbc_sdp_att_get(const tmedia_codec_t* codec, const char* att_name)
+{
+ if(tsk_striequals(att_name, "fmtp")){
+ char* fmtp = tsk_null;
+ tsk_sprintf(&fmtp, "mode=%d", TDAV_ILBC_MODE);
+ return fmtp;
+ }
+ return tsk_null;
+}
+
+static tsk_bool_t tdav_codec_ilbc_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ if(tsk_striequals(att_name, "fmtp")){
+ /* RFC 3952 - 5. Mapping To SDP Parameters
+
+ The offer contains the preferred mode of the offerer. The answerer
+ may agree to that mode by including the same mode in the answer, or
+ may include a different mode. The resulting mode used by both
+ parties SHALL be the lower of the bandwidth modes in the offer and
+ answer.
+
+ That is, an offer of "mode=20" receiving an answer of "mode=30" will
+ result in "mode=30" being used by both participants. Similarly, an
+ offer of "mode=30" and an answer of "mode=20" will result in
+ "mode=30" being used by both participants.
+
+ This is important when one end point utilizes a bandwidth constrained
+ link (e.g., 28.8k modem link or slower), where only the lower frame
+ size will work.
+ */
+ return tsk_true; // FIXME
+ }
+ return tsk_true;
+}
+
+
+//
+// iLBC Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_ilbc_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_ilbc_t *ilbc = self;
+ if(ilbc){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_ilbc_dtor(tsk_object_t * self)
+{
+ tdav_codec_ilbc_t *ilbc = self;
+ if(ilbc){
+ /* deinit base */
+ tmedia_codec_audio_deinit(ilbc);
+ /* deinit self */
+ //ilbc->encoder = {0};
+ //ilbc->decoder = {0};
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_ilbc_def_s =
+{
+ sizeof(tdav_codec_ilbc_t),
+ tdav_codec_ilbc_ctor,
+ tdav_codec_ilbc_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_ilbc_plugin_def_s =
+{
+ &tdav_codec_ilbc_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_ilbc,
+ "iLBC",
+ "iLBC codec (libILBc)",
+ TMEDIA_CODEC_FORMAT_ILBC,
+ tsk_true,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 20 // ptime
+ },
+
+ /* video */
+ {0},
+
+ tdav_codec_ilbc_set,
+ tdav_codec_ilbc_open,
+ tdav_codec_ilbc_close,
+ tdav_codec_ilbc_encode,
+ tdav_codec_ilbc_decode,
+ tdav_codec_ilbc_sdp_att_match,
+ tdav_codec_ilbc_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_ilbc_plugin_def_t = &tdav_codec_ilbc_plugin_def_s;
+
+
+#endif /* HAVE_ILBC */
diff --git a/tinyDAV/src/codecs/mp4ves/tdav_codec_mp4ves.c b/tinyDAV/src/codecs/mp4ves/tdav_codec_mp4ves.c
new file mode 100644
index 0000000..e48cb00
--- /dev/null
+++ b/tinyDAV/src/codecs/mp4ves/tdav_codec_mp4ves.c
@@ -0,0 +1,818 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_mp4ves.c
+ * @brief MP4V-ES codec plugin
+ * RTP payloader/depayloader follows RFC 3016.
+ * ISO-IEC-14496-2: http://www.csus.edu/indiv/p/pangj/aresearch/video_compression/presentation/ISO-IEC-14496-2_2001_MPEG4_Visual.pdf
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * @date Created: Th Dec 2 16:54:58 2010 mdiop
+ */
+#include "tinydav/codecs/mp4ves/tdav_codec_mp4ves.h"
+
+#if HAVE_FFMPEG
+
+#include "tinydav/video/tdav_converter_video.h"
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tnet_endianness.h"
+
+#include "tinymedia/tmedia_params.h"
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tsk_string.h"
+#include "tsk_params.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <libavcodec/avcodec.h>
+
+#define DEFAULT_PROFILE_LEVEL_ID Simple_Profile_Level_1
+
+#define MP4V_GOP_SIZE_IN_SECONDS 25
+#define MP4V_RTP_PAYLOAD_SIZE 900
+
+typedef struct tdav_codec_mp4ves_s
+{
+ TMEDIA_DECLARE_CODEC_VIDEO;
+
+ int profile;
+
+ struct{
+ uint8_t* ptr;
+ tsk_size_t size;
+ } rtp;
+
+ // Encoder
+ struct{
+ AVCodec* codec;
+ AVCodecContext* context;
+ AVFrame* picture;
+ void* buffer;
+ tsk_bool_t force_idr;
+ int quality; // [1-31]
+ int rotation;
+ int32_t max_bw_kpbs;
+ } encoder;
+
+ // decoder
+ struct{
+ AVCodec* codec;
+ AVCodecContext* context;
+ AVFrame* picture;
+
+ void* accumulator;
+ uint8_t ebit;
+ tsk_size_t accumulator_pos;
+ uint16_t last_seq;
+ } decoder;
+}
+tdav_codec_mp4ves_t;
+
+// From ISO-IEC-14496-2
+typedef enum mp4v_codes_e
+{
+ // To initiate a visual session (6.3.2)
+ visual_object_sequence_start_code = 0x000001B0,
+ // To terminate a visual session (6.3.2)
+ visual_object_sequence_end_code = 0x000001B1,
+ // To initiate a visual object (6.3.2)
+ visual_object_start_code = 0x000001B5,
+ // To identify the beginning of user data. The user data continues until receipt of another start code. (6.3.2.1)
+ user_data_start_code = 0x000001B2,
+ // The video_object_layer_start_code is a string of 32 bits. The first 28 bits are
+ // ‘0000 0000 0000 0000 0000 0001 0010‘ in binary and the last 4-bits represent one of the values in the range of
+ // ‘0000’ to ‘1111’ in binary. The video_object_layer_start_code marks a new video object layer. (6.3.3)
+ video_object_layer_start_code = 0x0000012,
+ // To identify the beginning of a GOV header (6.3.4)
+ group_of_vop_start_code = 0x000001B3,
+ // To mark the start of a video object plane (6.3.5 )
+ vop_start_code = 0x000001B6,
+}
+mp4v_start_code_t;
+
+// From ISO-IEC-14496-2 Annex G
+typedef enum mp4v_profiles_e
+{
+ /* Reserved = 0x00000000 */
+ Simple_Profile_Level_1 = 1,
+ Simple_Profile_Level_2 = 2,
+ Simple_Profile_Level_3 = 3,
+ /* Reserved 00000100 ? 00010000 */
+ Simple_Scalable_Profile_Level_1 = 17,
+ Simple_Scalable_Profile_Level_2 = 18,
+ /* Reserved 00010011 ? = 0x00100000 */
+ Core_Profile_Level_1 = 33,
+ Core_Profile_Level_2 = 34,
+ /* Reserved 00100011 ? = 0x00110001 */
+ Main_Profile_Level_2 = 50,
+ Main_Profile_Level_3 = 51,
+ Main_Profile_Level_4 = 52,
+ /* Reserved 00110101 ? = 0x01000001 */
+ N_bit_Profile_Level_2 = 66,
+ /* Reserved 01000011 ? = 0x01010000 */
+ Scalable_Texture_Profile_Level_1 = 81,
+ /* Reserved 01010010 ? 01100000 */
+ Simple_Face_Animation_Profile_Level_1 = 97,
+ Simple_Face_Animation_Profile_Level_2 = 98,
+ Simple_FBA_Profile_Level_1 = 99,
+ Simple_FBA_Profile_Level_2 = 100,
+ /* Reserved 01100101 ? 01110000 */
+ Basic_Animated_Texture_Profile_Level_1 = 113,
+ Basic_Animated_Texture_Profile_Level_2 = 114,
+ /* Reserved 01110011 ? 10000000 */
+ Hybrid_Profile_Level_1 = 129,
+ Hybrid_Profile_Level_2 = 130,
+ /* Reserved 10000011 ? 10010000 */
+ Advanced_Real_Time_Simple_Profile_Level_1 = 145,
+ Advanced_Real_Time_Simple_Profile_Level_2 = 146,
+ Advanced_Real_Time_Simple_Profile_Level_3 = 147,
+ Advanced_Real_Time_Simple_Profile_Level_4 = 148,
+ /* Reserved 10010101 ? 10100000 */
+}
+mp4v_profiles_t;
+
+static int tdav_codec_mp4ves_open_encoder(tdav_codec_mp4ves_t* self);
+static int tdav_codec_mp4ves_open_decoder(tdav_codec_mp4ves_t* self);
+static int tdav_codec_mp4ves_close_encoder(tdav_codec_mp4ves_t* self);
+static int tdav_codec_mp4ves_close_decoder(tdav_codec_mp4ves_t* self);
+
+static void tdav_codec_mp4ves_encap(tdav_codec_mp4ves_t* mp4v, const uint8_t* pdata, tsk_size_t size);
+static void tdav_codec_mp4ves_rtp_callback(tdav_codec_mp4ves_t *mp4v, const void *data, tsk_size_t size, tsk_bool_t marker);
+
+/* ============ MP4V-ES Plugin interface functions ================= */
+
+static int tdav_codec_mp4ves_set(tmedia_codec_t* self, const tmedia_param_t* param)
+{
+ tdav_codec_mp4ves_t* mp4ves = (tdav_codec_mp4ves_t*)self;
+ if(!self->opened){
+ TSK_DEBUG_ERROR("Codec not opened");
+ return -1;
+ }
+ if(param->value_type == tmedia_pvt_int32){
+ if(tsk_striequals(param->key, "action")){
+ tmedia_codec_action_t action = (tmedia_codec_action_t)TSK_TO_INT32((uint8_t*)param->value);
+ switch(action){
+ case tmedia_codec_action_encode_idr:
+ {
+ mp4ves->encoder.force_idr = tsk_true;
+ break;
+ }
+ case tmedia_codec_action_bw_down:
+ {
+ mp4ves->encoder.quality = TSK_CLAMP(1, (mp4ves->encoder.quality + 1), 31);
+ mp4ves->encoder.context->global_quality = FF_QP2LAMBDA * mp4ves->encoder.quality;
+ break;
+ }
+ case tmedia_codec_action_bw_up:
+ {
+ mp4ves->encoder.quality = TSK_CLAMP(1, (mp4ves->encoder.quality - 1), 31);
+ mp4ves->encoder.context->global_quality = FF_QP2LAMBDA * mp4ves->encoder.quality;
+ break;
+ }
+ }
+ }
+ else if(tsk_striequals(param->key, "rotation")){
+ int rotation = *((int32_t*)param->value);
+ if(mp4ves->encoder.rotation != rotation){
+ if(self->opened){
+ int ret;
+ mp4ves->encoder.rotation = rotation;
+ if((ret = tdav_codec_mp4ves_close_encoder(mp4ves))){
+ return ret;
+ }
+ if((ret = tdav_codec_mp4ves_open_encoder(mp4ves))){
+ return ret;
+ }
+ }
+ }
+ return 0;
+ }
+ }
+ return -1;
+}
+
+int tdav_codec_mp4ves_open(tmedia_codec_t* self)
+{
+ int ret;
+
+ tdav_codec_mp4ves_t* mp4v = (tdav_codec_mp4ves_t*)self;
+
+ if(!mp4v){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is not opened */
+
+
+ // Encoder
+ if((ret = tdav_codec_mp4ves_open_encoder(mp4v))){
+ return ret;
+ }
+
+ // Decoder
+ if((ret = tdav_codec_mp4ves_open_decoder(mp4v))){
+ return ret;
+ }
+
+ return 0;
+}
+
+int tdav_codec_mp4ves_close(tmedia_codec_t* self)
+{
+ tdav_codec_mp4ves_t* mp4v = (tdav_codec_mp4ves_t*)self;
+
+ if(!mp4v){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is opened */
+
+ // Encoder
+ tdav_codec_mp4ves_close_encoder(mp4v);
+
+ // Decoder
+ tdav_codec_mp4ves_close_decoder(mp4v);
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_mp4ves_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ int ret;
+ int size;
+
+ tdav_codec_mp4ves_t* mp4v = (tdav_codec_mp4ves_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ // wrap yuv420 buffer
+ size = avpicture_fill((AVPicture *)mp4v->encoder.picture, (uint8_t*)in_data, PIX_FMT_YUV420P, mp4v->encoder.context->width, mp4v->encoder.context->height);
+ if(size != in_size){
+ /* guard */
+ TSK_DEBUG_ERROR("Invalid size");
+ return 0;
+ }
+
+ if(mp4v->encoder.force_idr){
+#if LIBAVCODEC_VERSION_MAJOR <= 53
+ mp4v->encoder.picture->pict_type = FF_I_TYPE;
+#else
+ mp4v->encoder.picture->pict_type = AV_PICTURE_TYPE_I;
+#endif
+ mp4v->encoder.force_idr = tsk_false;
+ }
+ else{
+ mp4v->encoder.picture->pict_type = 0;// reset
+ }
+ mp4v->encoder.picture->pts = AV_NOPTS_VALUE;
+ mp4v->encoder.picture->quality = mp4v->encoder.context->global_quality;
+ ret = avcodec_encode_video(mp4v->encoder.context, mp4v->encoder.buffer, size, mp4v->encoder.picture);
+ if(ret > 0){
+ tdav_codec_mp4ves_encap(mp4v, mp4v->encoder.buffer, (tsk_size_t)ret);
+ }
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_mp4ves_decode(tmedia_codec_t* _self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tdav_codec_mp4ves_t* self = (tdav_codec_mp4ves_t*)_self;
+ const trtp_rtp_header_t* rtp_hdr = proto_hdr;
+
+ tsk_size_t xsize, retsize = 0;
+ int got_picture_ptr;
+ int ret;
+
+ if(!self || !in_data || !in_size || !out_data || !self->decoder.context){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ // get expected size
+ xsize = avpicture_get_size(self->decoder.context->pix_fmt, self->decoder.context->width, self->decoder.context->height);
+
+ /* Packet lost? */
+ if(self->decoder.last_seq != (rtp_hdr->seq_num - 1) && self->decoder.last_seq){
+ if(self->decoder.last_seq == rtp_hdr->seq_num){
+ // Could happen on some stupid emulators
+ TSK_DEBUG_INFO("Packet duplicated, seq_num=%d", rtp_hdr->seq_num);
+ return 0;
+ }
+ TSK_DEBUG_INFO("Packet lost, seq_num=%d", rtp_hdr->seq_num);
+ }
+ self->decoder.last_seq = rtp_hdr->seq_num;
+
+ if((self->decoder.accumulator_pos + in_size) <= xsize){
+ memcpy(&((uint8_t*)self->decoder.accumulator)[self->decoder.accumulator_pos], in_data, in_size);
+ self->decoder.accumulator_pos += in_size;
+ }
+ else{
+ TSK_DEBUG_WARN("Buffer overflow");
+ self->decoder.accumulator_pos = 0;
+ return 0;
+ }
+
+ if(rtp_hdr->marker){
+ AVPacket packet;
+ /* allocate destination buffer */
+ if(*out_max_size <xsize){
+ if(!(*out_data = tsk_realloc(*out_data, xsize))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ self->decoder.accumulator_pos = 0;
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = xsize;
+ }
+
+ av_init_packet(&packet);
+ packet.size = (int)self->decoder.accumulator_pos;
+ packet.data = self->decoder.accumulator;
+ ret = avcodec_decode_video2(self->decoder.context, self->decoder.picture, &got_picture_ptr, &packet);
+
+ if(ret < 0){
+ TSK_DEBUG_WARN("Failed to decode the buffer with error code = %d", ret);
+ if(TMEDIA_CODEC_VIDEO(self)->in.callback){
+ TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_error;
+ TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
+ TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
+ }
+ }
+ else if(got_picture_ptr){
+ retsize = xsize;
+ TMEDIA_CODEC_VIDEO(self)->in.width = self->decoder.context->width;
+ TMEDIA_CODEC_VIDEO(self)->in.height = self->decoder.context->height;
+
+ /* copy picture into a linear buffer */
+ avpicture_layout((AVPicture *)self->decoder.picture, self->decoder.context->pix_fmt, (int)self->decoder.context->width, (int)self->decoder.context->height,
+ *out_data, (int)retsize);
+ }
+ /* in all cases: reset accumulator */
+ self->decoder.accumulator_pos = 0;
+ }
+
+ return retsize;
+}
+
+tsk_bool_t tdav_codec_mp4ves_sdp_att_match(const tmedia_codec_t* _self, const char* att_name, const char* att_value)
+{
+ tdav_codec_mp4ves_t *self = (tdav_codec_mp4ves_t *)_self;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_false;
+ }
+
+ if(tsk_striequals(att_name, "fmtp")){
+ tsk_params_L_t* params ;
+ /* e.g. profile-level-id=1; xx=yy */
+ if((params = tsk_params_fromstring(att_value, ";", tsk_true))){
+ int val_int;
+ if((val_int = tsk_params_get_param_value_as_int(params, "profile-level-id")) != -1){
+ TSK_DEBUG_INFO("Proposed profile-level-id=%d", val_int);
+ self->profile = val_int; // FIXME: Take the remote profile-level-id even if the bandwidth level doesn't match
+ }
+ TSK_OBJECT_SAFE_FREE(params);
+ }
+
+ switch (self->profile ) {
+ case Simple_Profile_Level_1:
+ TMEDIA_CODEC_VIDEO(self)->out.width = TMEDIA_CODEC_VIDEO(self)->in.width = 176; TMEDIA_CODEC_VIDEO(self)->in.height = TMEDIA_CODEC_VIDEO(self)->out.height = 144;
+ break;
+ case Simple_Profile_Level_2:
+ case Simple_Profile_Level_3:
+ default:
+ TMEDIA_CODEC_VIDEO(self)->out.width = TMEDIA_CODEC_VIDEO(self)->in.width = 352; TMEDIA_CODEC_VIDEO(self)->in.height = TMEDIA_CODEC_VIDEO(self)->out.height = 288;
+ break;
+ }
+ }
+ else if(tsk_striequals(att_name, "imageattr")){
+ unsigned in_width, in_height, out_width, out_height;
+ if(tmedia_parse_video_imageattr(att_value, TMEDIA_CODEC_VIDEO(self)->pref_size, &in_width, &in_height, &out_width, &out_height) != 0){
+ return tsk_false;
+ }
+ TMEDIA_CODEC_VIDEO(self)->in.width = in_width;
+ TMEDIA_CODEC_VIDEO(self)->in.height = in_height;
+ TMEDIA_CODEC_VIDEO(self)->out.width = out_width;
+ TMEDIA_CODEC_VIDEO(self)->out.height = out_height;
+ }
+
+ return tsk_true;
+}
+
+char* tdav_codec_mp4ves_sdp_att_get(const tmedia_codec_t* _self, const char* att_name)
+{
+ tdav_codec_mp4ves_t *self = (tdav_codec_mp4ves_t *)_self;
+
+ if(tsk_striequals(att_name, "fmtp")){
+ char* fmtp = tsk_null;
+ switch(_self->bl){//FIXME: deprecated
+ case tmedia_bl_low:
+ default:
+ self->profile = Simple_Profile_Level_1;
+ break;
+ case tmedia_bl_medium:
+ self->profile = Simple_Profile_Level_2;
+ break;
+ case tmedia_bl_hight:
+ case tmedia_bl_unrestricted:
+ self->profile = Simple_Profile_Level_3;
+ break;
+ }
+ tsk_sprintf(&fmtp, "profile-level-id=%d", self->profile);
+ return fmtp;
+ }
+ else if(tsk_striequals(att_name, "imageattr")){
+ return tmedia_get_video_imageattr(TMEDIA_CODEC_VIDEO(self)->pref_size,
+ TMEDIA_CODEC_VIDEO(self)->in.width, TMEDIA_CODEC_VIDEO(self)->in.height, TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height);
+ }
+ return tsk_null;
+}
+
+
+/* ============ Internal functions ================= */
+int tdav_codec_mp4ves_open_encoder(tdav_codec_mp4ves_t* self)
+{
+ int ret, size;
+ int32_t max_bw_kpbs;
+ if(!self->encoder.codec && !(self->encoder.codec = avcodec_find_encoder(CODEC_ID_MPEG4))){
+ TSK_DEBUG_ERROR("Failed to find mp4v encoder");
+ return -1;
+ }
+
+ if(self->encoder.context){
+ TSK_DEBUG_ERROR("Encoder already opened");
+ return -1;
+ }
+ self->encoder.context = avcodec_alloc_context();
+ avcodec_get_context_defaults(self->encoder.context);
+
+ self->encoder.context->pix_fmt = PIX_FMT_YUV420P;
+ self->encoder.context->time_base.num = 1;
+ self->encoder.context->time_base.den = TMEDIA_CODEC_VIDEO(self)->in.fps;
+ self->encoder.context->width = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.height : TMEDIA_CODEC_VIDEO(self)->out.width;
+ self->encoder.context->height = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.width : TMEDIA_CODEC_VIDEO(self)->out.height;
+ self->encoder.context->mb_decision = FF_MB_DECISION_RD;
+ self->encoder.context->noise_reduction = 250;
+ self->encoder.context->flags |= CODEC_FLAG_QSCALE;
+ self->encoder.context->global_quality = FF_QP2LAMBDA * self->encoder.quality;
+
+ max_bw_kpbs = TSK_CLAMP(
+ 0,
+ tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, TMEDIA_CODEC_VIDEO(self)->out.fps),
+ self->encoder.max_bw_kpbs
+ );
+ self->encoder.context->bit_rate = (max_bw_kpbs * 1024);// bps
+ self->encoder.context->rtp_payload_size = MP4V_RTP_PAYLOAD_SIZE;
+ self->encoder.context->opaque = tsk_null;
+ self->encoder.context->profile = self->profile>>4;
+ self->encoder.context->level = self->profile & 0x0F;
+ self->encoder.context->gop_size = (TMEDIA_CODEC_VIDEO(self)->in.fps * MP4V_GOP_SIZE_IN_SECONDS);
+ self->encoder.context->max_b_frames = 0;
+ self->encoder.context->b_frame_strategy = 1;
+ self->encoder.context->flags |= CODEC_FLAG_AC_PRED;
+
+ // Picture (YUV 420)
+ if(!(self->encoder.picture = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create MP4V-ES encoder picture");
+ return -2;
+ }
+ avcodec_get_frame_defaults(self->encoder.picture);
+
+ size = avpicture_get_size(PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height);
+ if(!(self->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocate MP4V-ES encoder buffer");
+ return -2;
+ }
+
+ // Open encoder
+ if((ret = avcodec_open(self->encoder.context, self->encoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open MP4V-ES encoder");
+ return ret;
+ }
+
+ TSK_DEBUG_INFO("[MP4V-ES] bitrate=%d bps", self->encoder.context->bit_rate);
+
+ return ret;
+}
+
+int tdav_codec_mp4ves_open_decoder(tdav_codec_mp4ves_t* self)
+{
+ int ret, size;
+
+ if(!self->decoder.codec && !(self->decoder.codec = avcodec_find_decoder(CODEC_ID_MPEG4))){
+ TSK_DEBUG_ERROR("Failed to find MP4V-ES decoder");
+ return -1;
+ }
+
+ if(self->decoder.context){
+ TSK_DEBUG_ERROR("Decoder already opened");
+ return -1;
+ }
+
+ self->decoder.context = avcodec_alloc_context();
+ avcodec_get_context_defaults(self->decoder.context);
+
+ self->decoder.context->pix_fmt = PIX_FMT_YUV420P;
+ self->decoder.context->width = TMEDIA_CODEC_VIDEO(self)->out.width;
+ self->decoder.context->height = TMEDIA_CODEC_VIDEO(self)->out.height;
+
+ // Picture (YUV 420)
+ if(!(self->decoder.picture = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create decoder picture");
+ return -2;
+ }
+ avcodec_get_frame_defaults(self->decoder.picture);
+
+ size = avpicture_get_size(PIX_FMT_YUV420P, self->decoder.context->width, self->decoder.context->height);
+ if(!(self->decoder.accumulator = tsk_calloc((size + FF_INPUT_BUFFER_PADDING_SIZE), sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocate decoder buffer");
+ return -2;
+ }
+
+ if(!(self->decoder.accumulator = tsk_calloc((size + FF_INPUT_BUFFER_PADDING_SIZE), sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocate decoder buffer");
+ return -2;
+ }
+
+ // Open decoder
+ if((ret = avcodec_open(self->decoder.context, self->decoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open MP4V-ES decoder");
+ return ret;
+ }
+
+ self->decoder.last_seq = 0;
+
+ return ret;
+}
+
+int tdav_codec_mp4ves_close_encoder(tdav_codec_mp4ves_t* self)
+{
+ if(self->encoder.context){
+ avcodec_close(self->encoder.context);
+ av_free(self->encoder.context);
+ self->encoder.context = tsk_null;
+ }
+ if(self->encoder.picture){
+ av_free(self->encoder.picture);
+ }
+ if(self->encoder.buffer){
+ TSK_FREE(self->encoder.buffer);
+ }
+ return 0;
+}
+
+int tdav_codec_mp4ves_close_decoder(tdav_codec_mp4ves_t* self)
+{
+ if(self->decoder.context){
+ avcodec_close(self->decoder.context);
+ if(self->decoder.context->extradata){
+ TSK_FREE(self->decoder.context->extradata);
+ self->decoder.context->extradata_size = 0;
+ }
+ av_free(self->decoder.context);
+ self->decoder.context = tsk_null;
+ }
+ if(self->decoder.picture){
+ av_free(self->decoder.picture);
+ self->decoder.picture = tsk_null;
+ }
+ if(self->decoder.accumulator){
+ TSK_FREE(self->decoder.accumulator);
+ }
+
+ return 0;
+}
+
+static void tdav_codec_mp4ves_encap(tdav_codec_mp4ves_t* mp4v, const uint8_t* pdata, tsk_size_t size)
+{
+ uint32_t scode; // start code
+
+ if(size <= 4/*32bits: start code size*/){
+ TSK_DEBUG_ERROR("Too short");
+ return;
+ }
+ // first 32bits
+ scode = tnet_htonl_2(pdata);
+
+/* RFC 3016 - 3.3 Examples of packetized MPEG-4 Visual bitstream
+
+ VS= Visual Object Sequence
+ VO= Visual Object
+ VOL= Visual Object Layer
+ VOP= Visual Object Plane
+ GOV= Group of Visual Object Plane
+ VP= Video Plane
+
+ +------+------+------+------+
+(a) | RTP | VS | VO | VOL |
+ |header|header|header|header|
+ +------+------+------+------+
+
+ +------+------+------+------+------------+
+(b) | RTP | VS | VO | VOL |Video Packet|
+ |header|header|header|header| |
+ +------+------+------+------+------------+
+
+ +------+-----+------------------+
+(c) | RTP | GOV |Video Object Plane|
+ |header| | |
+ +------+-----+------------------+
+
+ +------+------+------------+ +------+------+------------+
+(d) | RTP | VOP |Video Packet| | RTP | VP |Video Packet|
+ |header|header| (1) | |header|header| (2) |
+ +------+------+------------+ +------+------+------------+
+
+ +------+------+------------+------+------------+------+------------+
+(e) | RTP | VP |Video Packet| VP |Video Packet| VP |Video Packet|
+ |header|header| (1) |header| (2) |header| (3) |
+ +------+------+------------+------+------------+------+------------+
+
+ +------+------+------------+ +------+------------+
+(f) | RTP | VOP |VOP fragment| | RTP |VOP fragment|
+ |header|header| (1) | |header| (2) | ___
+ +------+------+------------+ +------+------------+
+
+ Figure 2 - Examples of RTP packetized MPEG-4 Visual bitstream
+*/
+
+/* RFC 3016 - 3.2 Fragmentation of MPEG-4 Visual bitstream
+
+ A fragmented MPEG-4 Visual bitstream is mapped directly onto the RTP
+ payload without any addition of extra header fields or any removal of
+ Visual syntax elements. The Combined Configuration/Elementary
+ streams mode is used.
+
+ In the following, header means one of the following:
+
+ - Configuration information (Visual Object Sequence Header, Visual
+ Object Header and Video Object Layer Header)
+ - visual_object_sequence_end_code
+ - The header of the entry point function for an elementary stream
+ (Group_of_VideoObjectPlane() or the header of VideoObjectPlane(),
+ video_plane_with_short_header(), MeshObject() or FaceObject())
+ - The video packet header (video_packet_header() excluding
+ next_resync_marker())
+ - The header of gob_layer()
+ See 6.2.1 "Start codes" of ISO/IEC 14496-2 [2][9][4] for the
+ definition of the configuration information and the entry point
+ functions.
+*/
+
+ switch(scode){
+ case visual_object_sequence_start_code:
+ case visual_object_start_code:
+ case user_data_start_code:
+ case video_object_layer_start_code:
+ case group_of_vop_start_code:
+ case vop_start_code:
+ {
+ register uint32_t i, last_index = 0;
+ int startcode = 0xffffffff;
+
+ if(scode == visual_object_sequence_start_code && size >=5){
+ //uint8_t profile_and_level_indication = pdata[4]; /* IEC 14496-2: 6.3.2 Visual Object Sequence and Visual Object */
+ // TSK_DEBUG_INFO("profile_and_level_indication=%d", profile_and_level_indication);
+ }
+
+ if(size < MP4V_RTP_PAYLOAD_SIZE){
+ goto last;
+ }
+
+ for(i = 4; i<(size - 4); i++){
+ startcode = (startcode <<8) | pdata[i];
+ switch(startcode){
+ case visual_object_sequence_start_code:
+ case group_of_vop_start_code:
+ case vop_start_code:
+ tdav_codec_mp4ves_rtp_callback(mp4v, pdata + last_index, (i - last_index), (last_index == size));
+ last_index = i;
+ }
+ }
+last:
+ if(last_index < size){
+ tdav_codec_mp4ves_rtp_callback(mp4v, pdata + last_index, (size - last_index), tsk_true);
+ }
+ break;
+ }
+ default:
+ TSK_DEBUG_ERROR("%x is an invalide start code", scode);
+ break;
+ }
+}
+
+static void tdav_codec_mp4ves_rtp_callback(tdav_codec_mp4ves_t *mp4v, const void *data, tsk_size_t size, tsk_bool_t marker)
+{
+ // Send data over the network
+ if(TMEDIA_CODEC_VIDEO(mp4v)->out.callback){
+ TMEDIA_CODEC_VIDEO(mp4v)->out.result.buffer.ptr = data;
+ TMEDIA_CODEC_VIDEO(mp4v)->out.result.buffer.size = size;
+ TMEDIA_CODEC_VIDEO(mp4v)->out.result.duration = (uint32_t)((1./(double)TMEDIA_CODEC_VIDEO(mp4v)->out.fps) * TMEDIA_CODEC(mp4v)->plugin->rate);
+ TMEDIA_CODEC_VIDEO(mp4v)->out.result.last_chunck = marker;
+ TMEDIA_CODEC_VIDEO(mp4v)->out.callback(&TMEDIA_CODEC_VIDEO(mp4v)->out.result);
+ }
+}
+
+
+/* ============ MP4V-ES Plugin interface ================= */
+
+/* constructor */
+static tsk_object_t* tdav_codec_mp4ves_ctor(tsk_object_t * _self, va_list * app)
+{
+ tdav_codec_mp4ves_t *self = _self;
+ if(self){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ self->profile = DEFAULT_PROFILE_LEVEL_ID;
+ self->encoder.quality = 1;
+ self->encoder.max_bw_kpbs = tmedia_defaults_get_bandwidth_video_upload_max();
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_mp4ves_dtor(tsk_object_t * _self)
+{
+ tdav_codec_mp4ves_t *self = _self;
+ if(self){
+ /* deinit base */
+ tmedia_codec_video_deinit(self); // will close the codec if opened
+ /* deinit self */
+ TSK_FREE(self->rtp.ptr);
+ self->rtp.size = 0;
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_mp4ves_def_s =
+{
+ sizeof(tdav_codec_mp4ves_t),
+ tdav_codec_mp4ves_ctor,
+ tdav_codec_mp4ves_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_mp4ves_plugin_def_s =
+{
+ &tdav_codec_mp4ves_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_mp4ves_es,
+ "MP4V-ES",
+ "MP4V-ES Codec",
+ TMEDIA_CODEC_FORMAT_MP4V_ES,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video (width, height, fps) */
+ {176, 144, 0},// fps is @deprecated
+
+ tdav_codec_mp4ves_set,
+ tdav_codec_mp4ves_open,
+ tdav_codec_mp4ves_close,
+ tdav_codec_mp4ves_encode,
+ tdav_codec_mp4ves_decode,
+ tdav_codec_mp4ves_sdp_att_match,
+ tdav_codec_mp4ves_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_mp4ves_plugin_def_t = &tdav_codec_mp4ves_plugin_def_s;
+
+tsk_bool_t tdav_codec_ffmpeg_mp4ves_is_supported()
+{
+ return (avcodec_find_encoder(CODEC_ID_MPEG4) && avcodec_find_decoder(CODEC_ID_MPEG4));
+}
+
+#endif /* HAVE_FFMPEG */
+
diff --git a/tinyDAV/src/codecs/msrp/tdav_codec_msrp.c b/tinyDAV/src/codecs/msrp/tdav_codec_msrp.c
new file mode 100644
index 0000000..5b72ded
--- /dev/null
+++ b/tinyDAV/src/codecs/msrp/tdav_codec_msrp.c
@@ -0,0 +1,106 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_msrp.c
+ * @brief The Message Session Relay Protocol (MSRP) fake codec.
+ * Used for both Message (RFC 4975) and file transfer (RFC 5547).
+ *
+ */
+#include "tinydav/codecs/msrp/tdav_codec_msrp.h"
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+/* ============ MSRP Plugin interface ================= */
+#define tdav_codec_msrp_open tsk_null
+#define tdav_codec_msrp_close tsk_null
+#define tdav_codec_msrp_sdp_att_get tsk_null
+#define tdav_codec_msrp_sdp_att_get tsk_null
+#define tdav_codec_msrp_encode tsk_null
+#define tdav_codec_msrp_decode tsk_null
+
+static tsk_bool_t tdav_codec_msrp_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{ /* always match */
+ return tsk_true;
+}
+
+//
+// MSRP Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_msrp_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_msrp_t *msrp = self;
+ if(msrp){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_msrp_dtor(tsk_object_t * self)
+{
+ tdav_codec_msrp_t *msrp = self;
+ if(msrp){
+ /* deinit base */
+ tmedia_codec_msrp_deinit(msrp);
+ /* deinit self */
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_msrp_def_s =
+{
+ sizeof(tdav_codec_msrp_t),
+ tdav_codec_msrp_ctor,
+ tdav_codec_msrp_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_msrp_plugin_def_s =
+{
+ &tdav_codec_msrp_def_s,
+
+ tmedia_msrp,
+ tmedia_codec_id_none, // fake codec without real id
+ "message",
+ "MSRP fake codec",
+ TMEDIA_CODEC_FORMAT_MSRP,
+ tsk_false,
+ 0, // rate
+
+ /* audio */
+ {0},
+
+ /* video */
+ {0},
+
+ tsk_null, // set()
+ tdav_codec_msrp_open,
+ tdav_codec_msrp_close,
+ tdav_codec_msrp_encode,
+ tdav_codec_msrp_decode,
+ tdav_codec_msrp_sdp_att_match,
+ tdav_codec_msrp_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_msrp_plugin_def_t = &tdav_codec_msrp_plugin_def_s;
diff --git a/tinyDAV/src/codecs/opus/tdav_codec_opus.c b/tinyDAV/src/codecs/opus/tdav_codec_opus.c
new file mode 100644
index 0000000..355fc73
--- /dev/null
+++ b/tinyDAV/src/codecs/opus/tdav_codec_opus.c
@@ -0,0 +1,363 @@
+/*
+* Copyright (C) 2010-2013 Doubango Telecom <http://www.doubango.org>.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_opus.c
+ * @brief OPUS audio codec.
+ * SDP: http://tools.ietf.org/html/draft-spittka-payload-rtp-opus-03
+ */
+#include "tinydav/codecs/opus/tdav_codec_opus.h"
+
+#if HAVE_LIBOPUS
+
+#include <opus/opus.h>
+
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tsk_params.h"
+#include "tsk_memory.h"
+#include "tsk_string.h"
+#include "tsk_debug.h"
+
+#if !defined(TDAV_OPUS_MAX_FRAME_SIZE_IN_SAMPLES)
+# define TDAV_OPUS_MAX_FRAME_SIZE_IN_SAMPLES (5760) /* 120ms@48kHz */
+#endif
+#if !defined(TDAV_OPUS_MAX_FRAME_SIZE_IN_BYTES)
+# define TDAV_OPUS_MAX_FRAME_SIZE_IN_BYTES (TDAV_OPUS_MAX_FRAME_SIZE_IN_SAMPLES << 1) /* 120ms@48kHz */
+#endif
+#if !defined(TDAV_OPUS_FEC_ENABLED)
+# define TDAV_OPUS_FEC_ENABLED 0
+#endif
+#if !defined(TDAV_OPUS_DTX_ENABLED)
+# define TDAV_OPUS_DTX_ENABLED 0
+#endif
+
+typedef struct tdav_codec_opus_s
+{
+ TMEDIA_DECLARE_CODEC_AUDIO;
+
+ struct {
+ OpusEncoder *inst;
+ } encoder;
+
+ struct {
+ OpusDecoder *inst;
+ opus_int16 buff[TDAV_OPUS_MAX_FRAME_SIZE_IN_SAMPLES];
+ tsk_bool_t fec_enabled;
+ tsk_bool_t dtx_enabled;
+ uint16_t last_seq;
+ } decoder;
+}
+tdav_codec_opus_t;
+
+
+static tsk_bool_t _tdav_codec_opus_rate_is_valid(const int32_t rate)
+{
+ switch(rate){
+ case 8000: case 12000: case 16000: case 24000: case 48000: return tsk_true;
+ default: return tsk_false;
+ }
+}
+
+static int tdav_codec_opus_open(tmedia_codec_t* self)
+{
+ tdav_codec_opus_t* opus = (tdav_codec_opus_t*)self;
+ int opus_err;
+
+ if(!opus){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ // Initialize the decoder
+ if(!opus->decoder.inst){
+ TSK_DEBUG_INFO("[OPUS] Open decoder: rate=%d, channels=%d", (int)self->in.rate, (int)TMEDIA_CODEC_AUDIO(self)->in.channels);
+ if(!(opus->decoder.inst = opus_decoder_create((opus_int32)self->in.rate, (int)TMEDIA_CODEC_AUDIO(self)->in.channels, &opus_err)) || opus_err != OPUS_OK){
+ TSK_DEBUG_ERROR("Failed to create Opus decoder(rate=%d, channels=%d) instance with error code=%d.", (int)self->in.rate, (int)TMEDIA_CODEC_AUDIO(self)->in.channels, opus_err);
+ return -2;
+ }
+ }
+ opus->decoder.last_seq = 0;
+
+ // Initialize the encoder
+ if(!opus->encoder.inst){
+ TSK_DEBUG_INFO("[OPUS] Open encoder: rate=%d, channels=%d", (int)self->out.rate, (int)TMEDIA_CODEC_AUDIO(self)->out.channels);
+ if(!(opus->encoder.inst = opus_encoder_create((opus_int32)self->out.rate, (int)TMEDIA_CODEC_AUDIO(self)->out.channels, OPUS_APPLICATION_VOIP, &opus_err)) || opus_err != OPUS_OK){
+ TSK_DEBUG_ERROR("Failed to create Opus decoder(rate=%d, channels=%d) instance with error code=%d.", (int)self->out.rate, (int)TMEDIA_CODEC_AUDIO(self)->out.channels, opus_err);
+ return -2;
+ }
+ }
+#if TDAV_UNDER_MOBILE /* iOS, Android and WP8 */
+ opus_encoder_ctl(opus->encoder.inst, OPUS_SET_COMPLEXITY(3));
+#endif
+ opus_encoder_ctl(opus->encoder.inst, OPUS_SET_SIGNAL(OPUS_SIGNAL_VOICE));
+
+ return 0;
+}
+
+static int tdav_codec_opus_close(tmedia_codec_t* self)
+{
+ tdav_codec_opus_t* opus = (tdav_codec_opus_t*)self;
+
+ (void)(opus);
+
+ /* resources will be freed by the dctor() */
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_opus_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tdav_codec_opus_t* opus = (tdav_codec_opus_t*)self;
+ opus_int32 ret;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if(!opus->encoder.inst){
+ TSK_DEBUG_ERROR("Encoder not ready");
+ return 0;
+ }
+
+ // we're sure that the output (encoded) size cannot be higher than the input (raw)
+ if(*out_max_size < in_size){
+ if(!(*out_data = tsk_realloc(*out_data, in_size))){
+ TSK_DEBUG_ERROR("Failed to allocate buffer with size = %u", in_size);
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = in_size;
+ }
+
+ ret = opus_encode(opus->encoder.inst,
+ (const opus_int16 *)in_data, (int)(in_size >> 1),
+ (unsigned char *)*out_data, (opus_int32)*out_max_size);
+
+ if(ret < 0){
+ TSK_DEBUG_ERROR("opus_encode() failed with error code = %d", ret);
+ return 0;
+ }
+
+ return (tsk_size_t)ret;
+}
+
+static tsk_size_t tdav_codec_opus_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tdav_codec_opus_t* opus = (tdav_codec_opus_t*)self;
+ int frame_size;
+ const trtp_rtp_header_t* rtp_hdr = proto_hdr;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if(!opus->decoder.inst){
+ TSK_DEBUG_ERROR("Decoder not ready");
+ return 0;
+ }
+
+ /* Packet loss? */
+ if(opus->decoder.last_seq != (rtp_hdr->seq_num - 1) && opus->decoder.last_seq){
+ if(opus->decoder.last_seq == rtp_hdr->seq_num){
+ // Could happen on some stupid emulators
+ //TSK_DEBUG_INFO("Packet duplicated, seq_num=%d", rtp_hdr->seq_num);
+ return 0;
+ }
+ TSK_DEBUG_INFO("[Opus] Packet loss, seq_num=%d", rtp_hdr->seq_num);
+ opus_decode(opus->decoder.inst, tsk_null/*packet loss*/, (opus_int32)0, opus->decoder.buff, TDAV_OPUS_MAX_FRAME_SIZE_IN_SAMPLES, opus->decoder.fec_enabled);
+ }
+ opus->decoder.last_seq = rtp_hdr->seq_num;
+
+ frame_size = opus_decode(opus->decoder.inst, (const unsigned char *)in_data, (opus_int32)in_size, opus->decoder.buff, TDAV_OPUS_MAX_FRAME_SIZE_IN_SAMPLES, opus->decoder.fec_enabled ? 1 : 0);
+ if(frame_size > 0){
+ tsk_size_t frame_size_inbytes = (frame_size << 1);
+ if(*out_max_size < frame_size_inbytes){
+ if(!(*out_data = tsk_realloc(*out_data, frame_size_inbytes))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = frame_size_inbytes;
+ }
+ memcpy(*out_data, opus->decoder.buff, frame_size_inbytes);
+ return frame_size_inbytes;
+ }
+ else{
+ return 0;
+ }
+}
+
+static tsk_bool_t tdav_codec_opus_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ tdav_codec_opus_t* opus = (tdav_codec_opus_t*)codec;
+
+ if(!opus){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_false;
+ }
+
+ TSK_DEBUG_INFO("[OPUS] Trying to match [%s:%s]", att_name, att_value);
+
+ if(tsk_striequals(att_name, "fmtp")){
+ int val_int;
+ tsk_params_L_t* params;
+ /* e.g. FIXME */
+ if((params = tsk_params_fromstring(att_value, ";", tsk_true))){
+ tsk_bool_t ret = tsk_false;
+ /* === maxplaybackrate ===*/
+ if((val_int = tsk_params_get_param_value_as_int(params, "maxplaybackrate")) != -1){
+ if(!_tdav_codec_opus_rate_is_valid(val_int)){
+ TSK_DEBUG_ERROR("[OPUS] %d not valid as maxplaybackrate value", val_int);
+ goto done;
+ }
+ TMEDIA_CODEC(opus)->out.rate = TSK_MIN((int32_t)TMEDIA_CODEC(opus)->out.rate, val_int);
+ TMEDIA_CODEC_AUDIO(opus)->out.timestamp_multiplier = tmedia_codec_audio_get_timestamp_multiplier(codec->id, codec->out.rate);
+ }
+ /* === sprop-maxcapturerate ===*/
+ if((val_int = tsk_params_get_param_value_as_int(params, "sprop-maxcapturerate")) != -1){
+ if(!_tdav_codec_opus_rate_is_valid(val_int)){
+ TSK_DEBUG_ERROR("[OPUS] %d not valid as sprop-maxcapturerate value", val_int);
+ goto done;
+ }
+ TMEDIA_CODEC(opus)->in.rate = TSK_MIN((int32_t)TMEDIA_CODEC(opus)->in.rate, val_int);
+ TMEDIA_CODEC_AUDIO(opus)->in.timestamp_multiplier = tmedia_codec_audio_get_timestamp_multiplier(codec->id, codec->in.rate);
+ }
+ ret = tsk_true;
+done:
+ TSK_OBJECT_SAFE_FREE(params);
+ return ret;
+ }
+ }
+
+ return tsk_true;
+}
+
+static char* tdav_codec_opus_sdp_att_get(const tmedia_codec_t* codec, const char* att_name)
+{
+ tdav_codec_opus_t* opus = (tdav_codec_opus_t*)codec;
+
+ if(!opus){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+
+ if(tsk_striequals(att_name, "fmtp")){
+ char* fmtp = tsk_null;
+ tsk_sprintf(&fmtp, "maxplaybackrate=%d; sprop-maxcapturerate=%d; stereo=%d; sprop-stereo=%d; useinbandfec=%d; usedtx=%d",
+ TMEDIA_CODEC(opus)->in.rate,
+ TMEDIA_CODEC(opus)->out.rate,
+ (TMEDIA_CODEC_AUDIO(opus)->in.channels == 2) ? 1 : 0,
+ (TMEDIA_CODEC_AUDIO(opus)->out.channels == 2) ? 1 : 0,
+ opus->decoder.fec_enabled ? 1 : 0,
+ opus->decoder.dtx_enabled ? 1 : 0
+ );
+ return fmtp;
+ }
+
+ return tsk_null;
+}
+
+//
+// OPUS Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_opus_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_opus_t *opus = self;
+ if(opus){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ TMEDIA_CODEC(opus)->in.rate = tmedia_defaults_get_opus_maxplaybackrate();
+ TMEDIA_CODEC(opus)->out.rate = tmedia_defaults_get_opus_maxcapturerate();
+ TMEDIA_CODEC_AUDIO(opus)->in.channels = 1;
+ TMEDIA_CODEC_AUDIO(opus)->out.channels = 1;
+#if TDAV_OPUS_FEC_ENABLED
+ opus->decoder.fec_enabled = tsk_true;
+#endif
+#if TDAV_OPUS_DTX_ENABLED
+ opus->decoder.dtx_enabled = tsk_true;
+#endif
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_opus_dtor(tsk_object_t * self)
+{
+ tdav_codec_opus_t *opus = self;
+ if(opus){
+ /* deinit base */
+ tmedia_codec_audio_deinit(opus);
+ /* deinit self */
+ if(opus->decoder.inst){
+ opus_decoder_destroy(opus->decoder.inst), opus->decoder.inst = tsk_null;
+ }
+ if(opus->encoder.inst){
+ opus_encoder_destroy(opus->encoder.inst), opus->encoder.inst = tsk_null;
+ }
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_opus_def_s =
+{
+ sizeof(tdav_codec_opus_t),
+ tdav_codec_opus_ctor,
+ tdav_codec_opus_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_opus_plugin_def_s =
+{
+ &tdav_codec_opus_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_opus,
+ "opus",
+ "opus Codec",
+ TMEDIA_CODEC_FORMAT_OPUS,
+ tsk_true,
+ 48000, // this is the default sample rate
+
+ { /* audio */
+ 2, // channels
+ 0 // ptime @deprecated
+ },
+
+ /* video */
+ {0},
+
+ tsk_null, // set()
+ tdav_codec_opus_open,
+ tdav_codec_opus_close,
+ tdav_codec_opus_encode,
+ tdav_codec_opus_decode,
+ tdav_codec_opus_sdp_att_match,
+ tdav_codec_opus_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_opus_plugin_def_t = &tdav_codec_opus_plugin_def_s;
+
+
+#endif /* HAVE_LIBOPUS */
diff --git a/tinyDAV/src/codecs/speex/tdav_codec_speex.c b/tinyDAV/src/codecs/speex/tdav_codec_speex.c
new file mode 100644
index 0000000..18c4440
--- /dev/null
+++ b/tinyDAV/src/codecs/speex/tdav_codec_speex.c
@@ -0,0 +1,286 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_speex.c
+ * @brief Speex codecs
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/speex/tdav_codec_speex.h"
+
+#if HAVE_LIB_SPEEX
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define SPEEX_BUFFER_MAX_SIZE 1024
+#define SPEEX_DEFAULT_QUALITY 6
+
+/* ============ Common ================= */
+int tdav_codec_speex_init(tdav_codec_speex_t* self, tdav_codec_speex_type_t type);
+int tdav_codec_speex_deinit(tdav_codec_speex_t* self);
+
+/* ============ Speex Plugin interface ================= */
+
+int tdav_codec_speex_open(tmedia_codec_t* self)
+{
+ static int quality = SPEEX_DEFAULT_QUALITY;
+ tdav_codec_speex_t* speex = (tdav_codec_speex_t*)self;
+
+ switch(speex->type){
+ case tdav_codec_speex_type_nb:
+ speex->encoder.state = speex_encoder_init(&speex_nb_mode);
+ speex->decoder.state = speex_decoder_init(&speex_nb_mode);
+ break;
+ case tdav_codec_speex_type_wb:
+ speex->encoder.state = speex_encoder_init(&speex_wb_mode);
+ speex->decoder.state = speex_decoder_init(&speex_wb_mode);
+ break;
+ case tdav_codec_speex_type_uwb:
+ speex->encoder.state = speex_encoder_init(&speex_uwb_mode);
+ speex->decoder.state = speex_decoder_init(&speex_uwb_mode);
+ break;
+ default:
+ TSK_DEBUG_ERROR("Not implemented");
+ return -2;
+ }
+
+ speex_decoder_ctl(speex->decoder.state, SPEEX_GET_FRAME_SIZE, &speex->decoder.size);
+ speex->decoder.size *= sizeof(spx_int16_t);
+ if(!(speex->decoder.buffer = tsk_calloc(speex->decoder.size, 1))){
+ speex->decoder.size = speex->decoder.size = 0;
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ return -3;
+ }
+
+ speex_encoder_ctl(speex->encoder.state, SPEEX_SET_QUALITY, &quality);
+ speex_encoder_ctl(speex->encoder.state, SPEEX_GET_FRAME_SIZE, &speex->encoder.size);
+
+ speex_bits_init(&speex->encoder.bits);
+ speex_bits_init(&speex->decoder.bits);
+ speex_bits_reset(&speex->encoder.bits);
+ speex_bits_reset(&speex->decoder.bits);
+
+ return 0;
+}
+
+int tdav_codec_speex_close(tmedia_codec_t* self)
+{
+ tdav_codec_speex_t* speex = (tdav_codec_speex_t*)self;
+
+ (void)(speex);
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_speex_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tdav_codec_speex_t* speex = (tdav_codec_speex_t*)self;
+ tsk_size_t outsize = 0;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ speex_bits_reset(&speex->encoder.bits);
+ speex_encode_int(speex->encoder.state, (spx_int16_t*)in_data, &speex->encoder.bits);
+
+ if(*out_max_size <speex->encoder.size){
+ if((*out_data = tsk_realloc(*out_data, speex->encoder.size))){
+ *out_max_size = speex->encoder.size;
+ }
+ else{
+ *out_max_size = 0;
+ return 0;
+ }
+ }
+
+ outsize = speex_bits_write(&speex->encoder.bits, *out_data, (speex->encoder.size >> 1));
+
+ return outsize;
+}
+
+tsk_size_t tdav_codec_speex_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ int ret;
+ tsk_size_t out_size = 0;
+ tdav_codec_speex_t* speex = (tdav_codec_speex_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ // initializes the bit-stream
+ speex_bits_read_from(&speex->decoder.bits, (char*)in_data, in_size);
+
+ do{
+ // performs decode()
+ if((ret = speex_decode_int(speex->decoder.state, &speex->decoder.bits, speex->decoder.buffer))){
+ TSK_DEBUG_ERROR("Failed to decode the buffer. retcode=%d", ret);
+ break;
+ }
+
+ if(*out_max_size <(out_size + speex->decoder.size)){
+ if((*out_data = tsk_realloc(*out_data, (out_size + speex->decoder.size)))){
+ *out_max_size = (out_size + speex->decoder.size);
+ }
+ else{
+ *out_max_size = 0;
+ return 0;
+ }
+ }
+
+ // copy output buffer
+ memcpy(&((uint8_t*)*out_data)[out_size], speex->decoder.buffer, speex->decoder.size);
+ out_size += speex->decoder.size;
+ }
+ while(speex_bits_remaining(&speex->decoder.bits) >= 5);
+
+
+ return out_size;
+}
+
+char* tdav_codec_speex_sdp_att_get(const tmedia_codec_t* codec, const char* att_name)
+{
+ return tsk_null;
+}
+
+tsk_bool_t tdav_codec_speex_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ return tsk_true;
+}
+
+
+//
+// Speex Codec Object definition
+//
+#define SPEEX_OBJECT_DEFINITION(mode,name,description,format,rate) \
+ static tsk_object_t* tdav_codec_speex_##mode##_ctor(tsk_object_t * self, va_list * app) \
+ { \
+ tdav_codec_speex_t *speex = self; \
+ if(speex){ \
+ tdav_codec_speex_init(speex, tdav_codec_speex_type_##mode); \
+ } \
+ return self; \
+ } \
+ static tsk_object_t* tdav_codec_speex_##mode##_dtor(tsk_object_t * self) \
+ { \
+ tdav_codec_speex_t *speex = self; \
+ if(speex){ \
+ /* deinit base */ \
+ tmedia_codec_audio_deinit(speex); \
+ /* deinit self */ \
+ tdav_codec_speex_deinit(speex); \
+ } \
+ \
+ return self; \
+ } \
+ static const tsk_object_def_t tdav_codec_speex_##mode##_def_s = \
+ { \
+ sizeof(tdav_codec_speex_t), \
+ tdav_codec_speex_##mode##_ctor, \
+ tdav_codec_speex_##mode##_dtor, \
+ tmedia_codec_cmp, \
+ }; \
+ static const tmedia_codec_plugin_def_t tdav_codec_speex_##mode##_plugin_def_s = \
+ { \
+ &tdav_codec_speex_##mode##_def_s, \
+ \
+ tmedia_audio, \
+ tmedia_codec_id_speex_##mode, \
+ name, \
+ description, \
+ format, \
+ tsk_true, \
+ rate, /* rate*/ \
+ \
+ { /* audio */ \
+ 1, /* channels*/ \
+ 0 /* ptime @deprecated*/ \
+ }, \
+ \
+ /* video */ \
+ {0}, \
+ \
+ tsk_null, /* set()*/ \
+ tdav_codec_speex_open, \
+ tdav_codec_speex_close, \
+ tdav_codec_speex_encode, \
+ tdav_codec_speex_decode, \
+ tdav_codec_speex_sdp_att_match, \
+ tdav_codec_speex_sdp_att_get \
+ }; \
+ const tmedia_codec_plugin_def_t *tdav_codec_speex_##mode##_plugin_def_t = &tdav_codec_speex_##mode##_plugin_def_s;
+
+
+SPEEX_OBJECT_DEFINITION(nb,"SPEEX","Speex-NB Codec",TMEDIA_CODEC_FORMAT_SPEEX_NB,8000);
+SPEEX_OBJECT_DEFINITION(wb,"SPEEX","Speex-WB Codec",TMEDIA_CODEC_FORMAT_SPEEX_WB,16000);
+SPEEX_OBJECT_DEFINITION(uwb,"SPEEX","Speex-UWB Codec",TMEDIA_CODEC_FORMAT_SPEEX_UWB,32000);
+
+//
+// Common functions
+//
+int tdav_codec_speex_init(tdav_codec_speex_t* self, tdav_codec_speex_type_t type)
+{
+ if(self){
+ self->type = type;
+ return 0;
+ }
+ else{
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+}
+
+int tdav_codec_speex_deinit(tdav_codec_speex_t* self)
+{
+ if(self){
+ if(self->decoder.state){
+ speex_decoder_destroy(self->decoder.state);
+ self->decoder.state = tsk_null;
+ }
+ speex_bits_destroy(&self->decoder.bits);
+ if(self->decoder.buffer){
+ TSK_FREE(self->decoder.buffer);
+ self->decoder.size = 0;
+ }
+
+ if(self->encoder.state){
+ speex_encoder_destroy(self->encoder.state);
+ self->encoder.state = tsk_null;
+ }
+ speex_bits_destroy(&self->encoder.bits);
+ self->encoder.size = 0;
+
+ return 0;
+ }
+ else{
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+}
+
+#endif /* HAVE_LIB_SPEEX */
diff --git a/tinyDAV/src/codecs/t140/tdav_codec_t140.c b/tinyDAV/src/codecs/t140/tdav_codec_t140.c
new file mode 100644
index 0000000..b401321
--- /dev/null
+++ b/tinyDAV/src/codecs/t140/tdav_codec_t140.c
@@ -0,0 +1,175 @@
+/*
+* Copyright (C) 2012 Mamadou Diop.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_t140.c
+ * @brief T140 codec implementation (RFC 4103)
+ */
+#include "tinydav/codecs/t140/tdav_codec_t140.h"
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+// RFC 4103 - 6. Parameter for Character Transmission Rate
+#define TDAV_CODEC_T140_CPS 30
+
+static int tdav_codec_t140_set(tmedia_codec_t* self, const struct tmedia_param_s* param)
+{
+ return 0;
+}
+
+static int tdav_codec_t140_open(tmedia_codec_t* self)
+{
+ return 0;
+}
+
+static int tdav_codec_t140_close(tmedia_codec_t* self)
+{
+ return 0;
+}
+
+
+static tsk_size_t tdav_codec_t140_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tsk_size_t out_size = in_size;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if(*out_max_size <out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+ memcpy(*out_data, in_data, out_size);
+
+ return out_size;
+}
+
+static tsk_size_t tdav_codec_t140_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tsk_size_t out_size = in_size;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* allocate new buffer */
+ if(*out_max_size < out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+ memcpy(*out_data, in_data, out_size);
+
+ return out_size;
+}
+
+static tsk_bool_t tdav_codec_t140_sdp_att_match(const tmedia_codec_t* self, const char* att_name, const char* att_value)
+{
+ return tsk_true;
+}
+
+static char* tdav_codec_t140_sdp_att_get(const tmedia_codec_t* self, const char* att_name)
+{
+ if(!self || !att_name){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+
+ if(tsk_striequals("fmtp", att_name)){
+ char* fmtp = tsk_null;
+ tsk_sprintf(&fmtp, "cps=%d", TDAV_CODEC_T140_CPS);
+ return fmtp;
+ }
+ return tsk_null;
+}
+
+
+//
+// G.711u Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_t140_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_t140_t *t140 = self;
+ if(t140){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_t140_dtor(tsk_object_t * self)
+{
+ tdav_codec_t140_t *t140 = self;
+ if(t140){
+ /* deinit base */
+ /* deinit self */
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_t140_def_s =
+{
+ sizeof(tdav_codec_t140_t),
+ tdav_codec_t140_ctor,
+ tdav_codec_t140_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_t140_plugin_def_s =
+{
+ &tdav_codec_t140_def_s,
+
+ tmedia_t140,
+ tmedia_codec_id_t140,
+ "t140",
+ "T140 codec (From tinyDAV)",
+ TMEDIA_CODEC_FORMAT_T140,
+ tsk_true,
+ 1000, // rate
+
+ /* audio */
+ {0},
+
+ /* video */
+ {0},
+
+ tdav_codec_t140_set,
+ tdav_codec_t140_open,
+ tdav_codec_t140_close,
+ tdav_codec_t140_encode,
+ tdav_codec_t140_decode,
+ tdav_codec_t140_sdp_att_match,
+ tdav_codec_t140_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_t140_plugin_def_t = &tdav_codec_t140_plugin_def_s;
diff --git a/tinyDAV/src/codecs/theora/tdav_codec_theora.c b/tinyDAV/src/codecs/theora/tdav_codec_theora.c
new file mode 100644
index 0000000..01072c3
--- /dev/null
+++ b/tinyDAV/src/codecs/theora/tdav_codec_theora.c
@@ -0,0 +1,862 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_theora.c
+ * @brief Theora codec plugin
+ * RTP payloader/depayloader follows draft-barbato-avt-rtp-theora-01.
+ * For more information about Theora, http://www.theora.org/doc/Theora.pdf.
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/theora/tdav_codec_theora.h"
+
+#if HAVE_FFMPEG
+
+#include "tinydav/video/tdav_converter_video.h"
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tinymedia/tmedia_params.h"
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tsk_string.h"
+#include "tsk_buffer.h"
+#include "tsk_time.h"
+#include "tsk_params.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <libavcodec/avcodec.h>
+
+#define THEORA_RTP_PAYLOAD_SIZE 900
+#define THEORA_GOP_SIZE_IN_SECONDS 25
+#define THEORA_PAYLOAD_HEADER_SIZE 4 /* 2.2. Payload Header */
+#define THEORA_PAYLOAD_LENGTH_SIZE 2 /* 2.2. Payload Header */
+#define THEORA_IDENT_HEADER_SIZE 42 /* 6.2 Identification Header Decode */
+#define THEORA_CONF_SEND_COUNT 10 /* at 250ms, 500ms, 1000ms, .... */
+
+typedef struct tdav_codec_theora_s
+{
+ TMEDIA_DECLARE_CODEC_VIDEO;
+
+ struct{
+ uint8_t* ptr;
+ tsk_size_t size;
+ } rtp;
+
+ // Encoder
+ struct{
+ AVCodec* codec;
+ AVCodecContext* context;
+ AVFrame* picture;
+ void* buffer;
+
+ uint64_t conf_last;
+ int conf_count;
+ tsk_bool_t force_idr;
+ int quality;
+ int rotation;
+ int32_t max_bw_kpbs;
+ } encoder;
+
+ // decoder
+ struct{
+ AVCodec* codec;
+ AVCodecContext* context;
+ AVFrame* picture;
+
+ tsk_bool_t opened;
+ uint8_t conf_ident[3];
+ tsk_buffer_t* conf_pkt;
+
+ void* accumulator;
+ uint8_t ebit;
+ tsk_size_t accumulator_pos;
+ uint16_t last_seq;
+ } decoder;
+}
+tdav_codec_theora_t;
+
+
+/* 2.2. Payload Header filed 'F'*/
+typedef enum theora_frag_type_e{
+ Not_Fragmented = 0,
+ Start_Fragment = 1,
+ Continuation_Fragment = 2,
+ End_Fragment = 3,
+}
+theora_frag_type_t;
+
+/* 2.2. Payload Header field 'TDT'*/
+typedef enum theora_datatype_e{
+ Raw_Theora_payload = 0,
+ Theora_Packed_Configuration_payload = 1,
+ Legacy_Theora_Comment_payload = 2,
+ Reserved = 3,
+}
+theora_datatype_t;
+
+static int tdav_codec_theora_open_encoder(tdav_codec_theora_t* self);
+static int tdav_codec_theora_open_decoder(tdav_codec_theora_t* self);
+static int tdav_codec_theora_close_encoder(tdav_codec_theora_t* self);
+static int tdav_codec_theora_close_decoder(tdav_codec_theora_t* self);
+
+static int tdav_codec_theora_send(tdav_codec_theora_t* self, const uint8_t* data, tsk_size_t size, theora_datatype_t tdt);
+static void tdav_codec_theora_rtp_callback(tdav_codec_theora_t *self, const void *data, tsk_size_t size, tsk_bool_t marker);
+
+static void tdav_codec_theora_encap(tdav_codec_theora_t* theora, const uint8_t* pdata, tsk_size_t size);
+
+/* ============ Theora Plugin interface functions ================= */
+
+static int tdav_codec_theora_set(tmedia_codec_t* self, const tmedia_param_t* param)
+{
+ tdav_codec_theora_t* theora = (tdav_codec_theora_t*)self;
+ if(!self->opened){
+ TSK_DEBUG_ERROR("Codec not opened");
+ return -1;
+ }
+ if(param->value_type == tmedia_pvt_int32){
+ if(tsk_striequals(param->key, "action")){
+ tmedia_codec_action_t action = (tmedia_codec_action_t)TSK_TO_INT32((uint8_t*)param->value);
+ switch(action){
+ case tmedia_codec_action_encode_idr:
+ {
+ theora->encoder.force_idr = tsk_true;
+ break;
+ }
+ case tmedia_codec_action_bw_down:
+ {
+ theora->encoder.quality = TSK_CLAMP(1, (theora->encoder.quality + 1), 31);
+ theora->encoder.context->global_quality = FF_QP2LAMBDA * theora->encoder.quality;
+ break;
+ }
+ case tmedia_codec_action_bw_up:
+ {
+ theora->encoder.quality = TSK_CLAMP(1, (theora->encoder.quality - 1), 31);
+ theora->encoder.context->global_quality = FF_QP2LAMBDA * theora->encoder.quality;
+ break;
+ }
+ }
+ }
+ // FIXME: not working as expected
+ /*else if(tsk_striequals(param->key, "rotation")){
+ int rotation = *((int32_t*)param->value);
+ if(theora->encoder.rotation != rotation){
+ if(self->opened){
+ int ret;
+ theora->encoder.rotation = rotation;
+ if((ret = tdav_codec_theora_close_encoder(theora))){
+ return ret;
+ }
+ if((ret = tdav_codec_theora_open_encoder(theora))){
+ return ret;
+ }
+ }
+ }
+ return 0;
+ }*/
+ }
+ return -1;
+}
+
+int tdav_codec_theora_open(tmedia_codec_t* self)
+{
+ int ret;
+
+ tdav_codec_theora_t* theora = (tdav_codec_theora_t*)self;
+
+ if(!theora){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is not opened */
+
+
+ // Encoder
+ if((ret = tdav_codec_theora_open_encoder(theora))){
+ return ret;
+ }
+
+ // Decoder
+ if((ret = tdav_codec_theora_open_decoder(theora))){
+ return ret;
+ }
+
+ return 0;
+}
+
+int tdav_codec_theora_close(tmedia_codec_t* self)
+{
+ tdav_codec_theora_t* theora = (tdav_codec_theora_t*)self;
+
+ if(!theora){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is opened */
+
+
+ // Encoder
+ tdav_codec_theora_close_encoder(theora);
+
+ // Decoder
+ tdav_codec_theora_close_decoder(theora);
+
+ return 0;
+}
+
+//#include "tsk_time.h"
+tsk_size_t tdav_codec_theora_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ int ret;
+ int size;
+
+ tdav_codec_theora_t* theora = (tdav_codec_theora_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ // wrap yuv420 buffer
+ size = avpicture_fill((AVPicture *)theora->encoder.picture, (uint8_t*)in_data, PIX_FMT_YUV420P, theora->encoder.context->width, theora->encoder.context->height);
+ if(size != in_size){
+ /* guard */
+ TSK_DEBUG_ERROR("Invalid size");
+ return 0;
+ }
+
+ // Encode data
+#if LIBAVCODEC_VERSION_MAJOR <= 53
+ theora->encoder.picture->pict_type = theora->encoder.force_idr ? FF_I_TYPE : 0;
+#else
+ theora->encoder.picture->pict_type = theora->encoder.force_idr ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_NONE;
+#endif
+ theora->encoder.picture->pts = AV_NOPTS_VALUE;
+ theora->encoder.picture->quality = theora->encoder.context->global_quality;
+ ret = avcodec_encode_video(theora->encoder.context, theora->encoder.buffer, size, theora->encoder.picture);
+ if(ret > 0){
+ tdav_codec_theora_encap(theora, theora->encoder.buffer, (tsk_size_t)ret);
+ }
+ theora->encoder.force_idr = tsk_false;
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_theora_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ const uint8_t* pdata = in_data;
+ int pkts;
+ const uint8_t* pay_ptr;
+ tsk_size_t pay_size;
+ //tsk_size_t hdr_size;
+ tsk_size_t xsize, retsize = 0;
+ int got_picture_ptr;
+ int ret;
+
+ tdav_codec_theora_t* theora = (tdav_codec_theora_t*)self;
+ const trtp_rtp_header_t* rtp_hdr = proto_hdr;
+
+ if(!self || !in_data || (in_size<(THEORA_PAYLOAD_HEADER_SIZE + THEORA_PAYLOAD_LENGTH_SIZE)) || !out_data || !theora->decoder.context){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* Packet lost? */
+ if(theora->decoder.last_seq != (rtp_hdr->seq_num - 1) && theora->decoder.last_seq){
+ if(theora->decoder.last_seq == rtp_hdr->seq_num){
+ // Could happen on some stupid emulators
+ //TSK_DEBUG_INFO("Packet duplicated, seq_num=%d", rtp_hdr->seq_num);
+ return 0;
+ }
+ TSK_DEBUG_INFO("Packet lost, seq_num=%d", rtp_hdr->seq_num);
+ }
+ theora->decoder.last_seq = rtp_hdr->seq_num;
+
+ xsize = avpicture_get_size(theora->decoder.context->pix_fmt, theora->decoder.context->width, theora->decoder.context->height);
+
+ /* 2.2. Payload Header
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Configuration Ident | F |TDT|# pkts.|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ /* 2.3. Payload Data
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Payload Length | Theora Data ..
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ pkts = (pdata[3] & 0x0F);
+ pay_ptr = (pdata + THEORA_PAYLOAD_HEADER_SIZE);
+
+ do{ /* pkts=0 for fragmented packets */
+
+ pay_size = pay_ptr[0], pay_size<<=8, pay_size |= pay_ptr[1]; /* Big Endian read */
+ pay_ptr += THEORA_PAYLOAD_LENGTH_SIZE;
+ /* check size validity */
+ if((pay_ptr + pay_size)>(pdata + in_size)){
+ TSK_DEBUG_ERROR("Too short");
+ break;
+ }
+
+ switch((pdata[3]>>4) & 0x03){
+ case Raw_Theora_payload:
+ { /* ====== Theora data (2.2. Payload Header, 2.3. Payload Data) ====== */
+ /* append buffer */
+ if((int)(theora->decoder.accumulator_pos + pay_size) <= xsize){
+ memcpy(&((uint8_t*)theora->decoder.accumulator)[theora->decoder.accumulator_pos], pay_ptr, pay_size);
+ theora->decoder.accumulator_pos += pay_size;
+ }
+ else{
+ TSK_DEBUG_WARN("Buffer overflow");
+ theora->decoder.accumulator_pos = 0;
+ break;
+ }
+ /* only take care if last packet (What about the RTP marker?) */
+ if(((pdata[3]>>6) == Not_Fragmented || (pdata[3]>>6) == End_Fragment /*|| rtp_hdr->marker*/) && theora->decoder.opened){
+ AVPacket packet;
+ /* Perform decoding */
+ av_init_packet(&packet);
+ packet.size = (int)theora->decoder.accumulator_pos;
+ packet.data = theora->decoder.accumulator;
+ ret = avcodec_decode_video2(theora->decoder.context, theora->decoder.picture, &got_picture_ptr, &packet);
+
+ if(ret < 0){
+ TSK_DEBUG_WARN("Failed to decode the buffer with error code = %d", ret);
+ if(TMEDIA_CODEC_VIDEO(self)->in.callback){
+ TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_error;
+ TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
+ TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
+ }
+ }
+ else if(got_picture_ptr){
+ retsize = xsize;
+ TMEDIA_CODEC_VIDEO(theora)->in.width = theora->decoder.context->width;
+ TMEDIA_CODEC_VIDEO(theora)->in.height = theora->decoder.context->height;
+
+ /* allocate buffer */
+ if(*out_max_size <xsize){
+ if((*out_data = tsk_realloc(*out_data, xsize))){
+ *out_max_size = xsize;
+ }
+ else{
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ }
+ /* copy picture into a linear buffer */
+ avpicture_layout((AVPicture *)theora->decoder.picture, theora->decoder.context->pix_fmt, (int)theora->decoder.context->width, (int)theora->decoder.context->height,
+ *out_data, (int)retsize);
+ }
+ /* in all cases: reset accumulator */
+ theora->decoder.accumulator_pos = 0;
+ }
+ break;
+ }
+ case Theora_Packed_Configuration_payload:
+ {/* ====== Configuration packet (3.1.1. Packed Configuration) ====== */
+ static uint8_t __theora_comment_hdr[] = {0x81, 0x74, 0x68, 0x65, 0x6F, 0x72, 0x61,
+ 0x00, 0x00, 0x00, 0x08, /* 4-byte length */
+ 'd', 'o', 'u', 'b', 'a', 'n', 'g', 'o', /* UTF-8 encoded string */
+ };
+
+ /* http://www.theora.org/doc/Theora.pdf - Chapter 6
+ A Theora bitstream begins with three header packets. The header packets
+ are, in order, the identifcation header, the comment header, and the setup
+ header. All are required for decode compliance. An end-of-packet condition
+ encountered while decoding the identification or setup header packets renders
+ the stream undecodable. An end-of-packet condition encountered while decode
+ the comment header is a non-fatal error condition, and MAY be ignored by a
+ decoder.
+
+ Decode continues according to HEADERTYPE. The identification header
+ is type 0x80, the comment header is type 0x81, and the setup header is type
+ 0x82.
+ */
+ /*TSK_DEBUG_INFO("Theora_Packed_Configuration_payload");*/
+
+ if(!theora->decoder.opened /*|| (conf_ident changed)*/){
+ if(!theora->decoder.conf_pkt){
+ theora->decoder.conf_pkt = tsk_buffer_create(pay_ptr, pay_size);
+ }
+ else{
+ tsk_buffer_append(theora->decoder.conf_pkt, pay_ptr, pay_size);
+ }
+
+ if((pdata[3]>>6) == Not_Fragmented || (pdata[3]>>6) == End_Fragment || rtp_hdr->marker){
+ if(theora->decoder.conf_pkt->size > THEORA_IDENT_HEADER_SIZE){
+ const uint8_t* conf_ptr = theora->decoder.conf_pkt->data;
+ int setup_size = (int)theora->decoder.conf_pkt->size - THEORA_IDENT_HEADER_SIZE;
+ int extradata_size = (2 + THEORA_IDENT_HEADER_SIZE) + (2 + setup_size) + (2 + sizeof(__theora_comment_hdr));
+ if(conf_ptr[0] == 0x80 && conf_ptr[THEORA_IDENT_HEADER_SIZE] == 0x82){ /* Do not check for 't'h'e'o'r'a' */
+ /* save configration identification */
+ memcpy(theora->decoder.conf_ident, &pdata[0], sizeof(theora->decoder.conf_ident));
+ if(theora->decoder.context->extradata){
+ TSK_FREE(theora->decoder.context->extradata);
+ }
+ if((theora->decoder.context->extradata = tsk_calloc(extradata_size + FF_INPUT_BUFFER_PADDING_SIZE, 1))){
+ int index = 0;
+ /* Because of endianess pb. do not use uint16_t or uint32_t */
+ theora->decoder.context->extradata[index++] = 0x00;
+ theora->decoder.context->extradata[index++] = THEORA_IDENT_HEADER_SIZE;
+ memcpy(&theora->decoder.context->extradata[index], &conf_ptr[0], THEORA_IDENT_HEADER_SIZE);
+ index += THEORA_IDENT_HEADER_SIZE;
+
+ theora->decoder.context->extradata[index++] = (setup_size >>8) & 0xFF;
+ theora->decoder.context->extradata[index++] = (setup_size & 0xFF);
+ memcpy(&theora->decoder.context->extradata[index], &conf_ptr[THEORA_IDENT_HEADER_SIZE], setup_size);
+ index+=setup_size;
+
+ theora->decoder.context->extradata[index++] = 0x00;
+ theora->decoder.context->extradata[index++] = sizeof(__theora_comment_hdr);/* <0xFF */
+ memcpy(&theora->decoder.context->extradata[index], __theora_comment_hdr, sizeof(__theora_comment_hdr));
+
+ theora->decoder.context->extradata_size = extradata_size;
+
+ if((ret = avcodec_open(theora->decoder.context, theora->decoder.codec)) == 0){
+ theora->decoder.opened = tsk_true;
+ }
+ else{
+ TSK_DEBUG_ERROR("Failed to open theora decoder %d", ret);
+ TSK_FREE(theora->decoder.context->extradata);
+ theora->decoder.context->extradata_size = 0;
+ }
+ }
+ }
+ else{
+ TSK_DEBUG_ERROR("Invalid configuration packet");
+ }
+ }
+ else{
+ TSK_DEBUG_ERROR("Too short");
+ }
+ tsk_buffer_cleanup(theora->decoder.conf_pkt);
+ }
+ }
+ break;
+ }
+ case Legacy_Theora_Comment_payload:
+ /*TSK_DEBUG_INFO("Legacy_Theora_Comment_payload");*/
+ break;
+ case Reserved:
+ /*TSK_DEBUG_INFO("Reserved");*/
+ break;
+ }
+ }
+ while(--pkts>0);
+
+
+
+ return retsize;
+}
+
+tsk_bool_t tdav_codec_theora_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ tsk_bool_t ret = tsk_true; // accept decoding any size
+
+ if(tsk_striequals(att_name, "fmtp")){
+ tsk_params_L_t* params;
+ if((params = tsk_params_fromstring(att_value, ";", tsk_true))){
+ int pref_width, pref_height;
+ int prop_width = tsk_params_get_param_value_as_int(params, "width");
+ int prop_height = tsk_params_get_param_value_as_int(params, "height");
+
+ if(prop_width > 0 && prop_height > 0){
+ if(tmedia_video_get_size(TMEDIA_CODEC_VIDEO(codec)->pref_size, (unsigned *)&pref_width, (unsigned *)&pref_height) != 0){
+ TSK_OBJECT_SAFE_FREE(params);
+ return tsk_false;
+ }
+ TMEDIA_CODEC_VIDEO(codec)->in.width = TMEDIA_CODEC_VIDEO(codec)->out.width = TSK_MIN(pref_width, prop_width);
+ TMEDIA_CODEC_VIDEO(codec)->in.height = TMEDIA_CODEC_VIDEO(codec)->out.height = TSK_MIN(pref_height, prop_height);
+ }
+ TSK_OBJECT_SAFE_FREE(params);
+ }
+ }
+ else if(tsk_striequals(att_name, "imageattr")){
+ unsigned in_width, in_height, out_width, out_height;
+ if(tmedia_parse_video_imageattr(att_value, TMEDIA_CODEC_VIDEO(codec)->pref_size, &in_width, &in_height, &out_width, &out_height) != 0){
+ return tsk_false;
+ }
+ TMEDIA_CODEC_VIDEO(codec)->in.width = in_width;
+ TMEDIA_CODEC_VIDEO(codec)->in.height = in_height;
+ TMEDIA_CODEC_VIDEO(codec)->out.width = out_width;
+ TMEDIA_CODEC_VIDEO(codec)->out.height = out_height;
+ }
+
+ return ret;
+}
+
+char* tdav_codec_theora_sdp_att_get(const tmedia_codec_t* codec, const char* att_name)
+{
+ if(tsk_striequals(att_name, "fmtp")){
+ char* fmtp = tsk_null;
+ tsk_sprintf(&fmtp, "sampling=YCbCr-4:2:0; width=%u; height=%u", TMEDIA_CODEC_VIDEO(codec)->out.width, TMEDIA_CODEC_VIDEO(codec)->out.height);
+ return fmtp;
+ }
+ else if(tsk_striequals(att_name, "imageattr")){
+ return tmedia_get_video_imageattr(TMEDIA_CODEC_VIDEO(codec)->pref_size,
+ TMEDIA_CODEC_VIDEO(codec)->in.width, TMEDIA_CODEC_VIDEO(codec)->in.height, TMEDIA_CODEC_VIDEO(codec)->out.width, TMEDIA_CODEC_VIDEO(codec)->out.height);
+ }
+ return tsk_null;
+}
+
+
+
+/* constructor */
+static tsk_object_t* tdav_codec_theora_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_theora_t *theora = self;
+ if(theora){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ theora->encoder.quality = 1;
+ theora->encoder.max_bw_kpbs = tmedia_defaults_get_bandwidth_video_upload_max();
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_theora_dtor(tsk_object_t * self)
+{
+ tdav_codec_theora_t *theora = self;
+ if(theora){
+ /* deinit base */
+ tmedia_codec_video_deinit(self);
+ /* deinit self */
+ TSK_OBJECT_SAFE_FREE(theora->decoder.conf_pkt);
+ TSK_FREE(theora->rtp.ptr);
+ theora->rtp.size = 0;
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_theora_def_s =
+{
+ sizeof(tdav_codec_theora_t),
+ tdav_codec_theora_ctor,
+ tdav_codec_theora_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_theora_plugin_def_s =
+{
+ &tdav_codec_theora_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_theora,
+ "theora",
+ "Theora Codec",
+ TMEDIA_CODEC_FORMAT_THEORA,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video (width, height, fps)*/
+ {176, 144, 0},// fps is @deprecated
+
+ tdav_codec_theora_set,
+ tdav_codec_theora_open,
+ tdav_codec_theora_close,
+ tdav_codec_theora_encode,
+ tdav_codec_theora_decode,
+ tdav_codec_theora_sdp_att_match,
+ tdav_codec_theora_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_theora_plugin_def_t = &tdav_codec_theora_plugin_def_s;
+
+
+
+int tdav_codec_theora_open_encoder(tdav_codec_theora_t* self)
+{
+ int ret, size;
+ int32_t max_bw_kpbs;
+ if(!self->encoder.codec && !(self->encoder.codec = avcodec_find_encoder(CODEC_ID_THEORA))){
+ TSK_DEBUG_ERROR("Failed to find Theora encoder");
+ return -1;
+ }
+ if(self->encoder.context){
+ TSK_DEBUG_ERROR("Encoder already initialized");
+ return -1;
+ }
+ self->encoder.context = avcodec_alloc_context();
+ avcodec_get_context_defaults(self->encoder.context);
+
+ self->encoder.context->pix_fmt = PIX_FMT_YUV420P;
+ self->encoder.context->time_base.num = 1;
+ self->encoder.context->time_base.den = TMEDIA_CODEC_VIDEO(self)->out.fps;
+ self->encoder.context->width = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.height : TMEDIA_CODEC_VIDEO(self)->out.width;
+ self->encoder.context->height = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.width : TMEDIA_CODEC_VIDEO(self)->out.height;
+ self->encoder.context->mb_decision = FF_MB_DECISION_RD;
+
+ // Theoraenc doesn't honor 'CODEC_FLAG_QSCALE'
+ max_bw_kpbs = TSK_CLAMP(
+ 0,
+ tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, TMEDIA_CODEC_VIDEO(self)->out.fps),
+ self->encoder.max_bw_kpbs
+ );
+ self->encoder.context->bit_rate = (max_bw_kpbs * 1024);// bps
+#if LIBAVCODEC_VERSION_MAJOR <= 53
+ self->encoder.context->rc_lookahead = 0;
+#endif
+ self->encoder.context->global_quality = FF_QP2LAMBDA * self->encoder.quality;
+
+ self->encoder.context->thread_count = 0;
+ self->encoder.context->rtp_payload_size = THEORA_RTP_PAYLOAD_SIZE;
+ self->encoder.context->opaque = tsk_null;
+ self->encoder.context->gop_size = (TMEDIA_CODEC_VIDEO(self)->out.fps * THEORA_GOP_SIZE_IN_SECONDS);
+
+ // Picture (YUV 420)
+ if(!(self->encoder.picture = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create encoder picture");
+ return -2;
+ }
+ avcodec_get_frame_defaults(self->encoder.picture);
+
+ size = avpicture_get_size(PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height);
+ if(!(self->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocate encoder buffer");
+ return -2;
+ }
+
+ // Open encoder
+ if((ret = avcodec_open(self->encoder.context, self->encoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open Theora encoder");
+ return ret;
+ }
+
+ self->encoder.conf_last = 0;
+ self->encoder.conf_count = 0;
+
+ TSK_DEBUG_INFO("[THEORA] bitrate=%d bps", self->encoder.context->bit_rate);
+
+ return ret;
+}
+
+int tdav_codec_theora_open_decoder(tdav_codec_theora_t* self)
+{
+ int size;
+ if(!self->decoder.codec && !(self->decoder.codec = avcodec_find_decoder(CODEC_ID_THEORA))){
+ TSK_DEBUG_ERROR("Failed to find Theora decoder");
+ return -1;
+ }
+ if(self->decoder.context){
+ TSK_DEBUG_ERROR("Decoder already opened");
+ return -1;
+ }
+ self->decoder.context = avcodec_alloc_context();
+ avcodec_get_context_defaults(self->decoder.context);
+
+ self->decoder.context->pix_fmt = PIX_FMT_YUV420P;
+ self->decoder.context->width = TMEDIA_CODEC_VIDEO(self)->in.width;
+ self->decoder.context->height = TMEDIA_CODEC_VIDEO(self)->in.height;
+
+ // Picture (YUV 420)
+ if(!(self->decoder.picture = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create decoder picture");
+ return -2;
+ }
+ avcodec_get_frame_defaults(self->decoder.picture);
+
+ size = avpicture_get_size(PIX_FMT_YUV420P, self->decoder.context->width, self->decoder.context->height);
+ if(!(self->decoder.accumulator = tsk_calloc((size + FF_INPUT_BUFFER_PADDING_SIZE), sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocate decoder buffer");
+ return -2;
+ }
+
+ if(!(self->decoder.accumulator = tsk_calloc((size + FF_INPUT_BUFFER_PADDING_SIZE), sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocate decoder buffer");
+ return -2;
+ }
+
+ // Open decoder
+ //if((ret = avcodec_open(self->decoder.context, self->decoder.codec)) < 0){
+ // TSK_DEBUG_ERROR("Failed to open Theora decoder");
+ // return ret;
+ //}
+
+ return 0;
+}
+
+int tdav_codec_theora_close_encoder(tdav_codec_theora_t* self)
+{
+ if(self->encoder.context){
+ avcodec_close(self->encoder.context);
+ av_free(self->encoder.context);
+ self->encoder.context = tsk_null;
+ }
+ if(self->encoder.picture){
+ av_free(self->encoder.picture);
+ self->encoder.picture = tsk_null;
+ }
+ if(self->encoder.buffer){
+ TSK_FREE(self->encoder.buffer);
+ }
+ return 0;
+}
+
+int tdav_codec_theora_close_decoder(tdav_codec_theora_t* self)
+{
+ if(self->decoder.context){
+ avcodec_close(self->decoder.context);
+ if(self->decoder.context->extradata){
+ TSK_FREE(self->decoder.context->extradata);
+ self->decoder.context->extradata_size = 0;
+ }
+ av_free(self->decoder.context);
+ self->decoder.context = tsk_null;
+ }
+ if(self->decoder.picture){
+ av_free(self->decoder.picture);
+ self->decoder.picture = tsk_null;
+ }
+ if(self->decoder.accumulator){
+ TSK_FREE(self->decoder.accumulator);
+ }
+ return 0;
+}
+
+static void tdav_codec_theora_encap(tdav_codec_theora_t* theora, const uint8_t* pdata, tsk_size_t size)
+{
+ if((theora->encoder.conf_count < THEORA_CONF_SEND_COUNT) && theora->encoder.context && theora->encoder.context->extradata){
+ if((theora->encoder.conf_last + (250 *theora->encoder.conf_count)) < tsk_time_now()){
+ int hdr_size, i, exd_size = theora->encoder.context->extradata_size, conf_pkt_size = 0;
+ uint8_t *conf_pkt_ptr = tsk_null, *exd_ptr = theora->encoder.context->extradata;
+ for(i=0; i<3 && exd_size; i++){
+ hdr_size = exd_ptr[0], hdr_size<<=8, hdr_size |= exd_ptr[1];
+ exd_ptr += 2;
+ exd_size -= 2;
+ if(hdr_size > exd_size){
+ TSK_DEBUG_ERROR("Invalid extradata");
+ TSK_FREE(conf_pkt_ptr);
+ conf_pkt_size = 0;
+ }
+
+ if(exd_ptr[0] == 0x80 || exd_ptr[0] == 0x82){ /* Ignore 'comment' which is equal to '0x81' */
+ if((conf_pkt_ptr = tsk_realloc(conf_pkt_ptr, (conf_pkt_size + hdr_size)))){
+ memcpy((conf_pkt_ptr + conf_pkt_size), exd_ptr, hdr_size);
+ conf_pkt_size += hdr_size;
+ }
+ }
+ exd_size -= hdr_size;
+ exd_ptr += hdr_size;
+ }
+
+ /* Send the conf pack */
+ if(conf_pkt_ptr && conf_pkt_size){
+ /*TSK_DEBUG_INFO("Sending Configuration Packet");*/
+ tdav_codec_theora_send(theora, conf_pkt_ptr, conf_pkt_size, Theora_Packed_Configuration_payload);
+ TSK_FREE(conf_pkt_ptr);
+ }
+
+ theora->encoder.conf_last = tsk_time_now();
+ theora->encoder.conf_count++;
+ }
+ }
+
+ /* Send Theora Raw data */
+ tdav_codec_theora_send(theora, pdata, size, Raw_Theora_payload);
+}
+
+int tdav_codec_theora_send(tdav_codec_theora_t* self, const uint8_t* data, tsk_size_t size, theora_datatype_t tdt)
+{
+ /* 2.2. Payload Header
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Configuration Ident | F |TDT|# pkts.|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ uint8_t pay_hdr[THEORA_PAYLOAD_HEADER_SIZE/*4*/ + THEORA_PAYLOAD_LENGTH_SIZE/*2*/] = {0x01, 0x19, 0x83, 0x00, 0x00, 0x00};
+ //uint8_t* pay_ptr = tsk_null;
+ tsk_size_t pay_size;
+ tsk_bool_t frag, first = tsk_true;
+
+ pay_hdr[3] = (tdt & 0xFF) <<4;
+
+ /* whether the packet will be fragmented or not */
+ frag = (size > THEORA_RTP_PAYLOAD_SIZE);
+
+ while(size){
+ pay_size = TSK_MIN(THEORA_RTP_PAYLOAD_SIZE, size);
+ pay_hdr[4] = (uint8_t)(pay_size >> 8), pay_hdr[5] = (uint8_t)(pay_size & 0xFF);
+
+ if(frag){
+ if(first){
+ first = tsk_false;
+ pay_hdr[3] &= 0x3F, pay_hdr[3] |= (Start_Fragment <<6);
+ }
+ else{ /* could not be 'first' and 'last' */
+ if(size<=THEORA_RTP_PAYLOAD_SIZE){
+ /* Last frag */
+ pay_hdr[3] &= 0x3F, pay_hdr[3] |= (End_Fragment <<6);
+ }
+ else{
+ /* Continuation frag */
+ pay_hdr[3] &= 0x3F, pay_hdr[3] |= (Continuation_Fragment <<6);
+ }
+ }
+ }
+ else{
+ pay_hdr[3] |= 0x01; /* 'pkts' */
+ pay_hdr[3] &= 0x3F, pay_hdr[3] |= (Not_Fragmented <<6);
+ }
+
+ if(self->rtp.size < (pay_size + sizeof(pay_hdr))){
+ if(!(self->rtp.ptr = tsk_realloc(self->rtp.ptr, (pay_size + sizeof(pay_hdr))))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ return -2;
+ }
+ self->rtp.size = (pay_size + sizeof(pay_hdr));
+ }
+
+ memcpy(self->rtp.ptr, pay_hdr, sizeof(pay_hdr));
+ memcpy((self->rtp.ptr + sizeof(pay_hdr)), data, pay_size);
+ data += pay_size;
+ size -= pay_size;
+
+ // Send data over the network
+ if(TMEDIA_CODEC_VIDEO(self)->out.callback){
+ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.ptr = self->rtp.ptr;
+ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.size = (pay_size + sizeof(pay_hdr));
+ TMEDIA_CODEC_VIDEO(self)->out.result.duration = (1./(double)TMEDIA_CODEC_VIDEO(self)->out.fps) * TMEDIA_CODEC(self)->plugin->rate;
+ TMEDIA_CODEC_VIDEO(self)->out.result.last_chunck = (size == 0);
+ TMEDIA_CODEC_VIDEO(self)->out.callback(&TMEDIA_CODEC_VIDEO(self)->out.result);
+ }
+ }
+
+ return 0;
+}
+
+tsk_bool_t tdav_codec_ffmpeg_theora_is_supported()
+{
+ return (avcodec_find_encoder(CODEC_ID_THEORA) && avcodec_find_decoder(CODEC_ID_THEORA));
+}
+
+#endif /* HAVE_FFMPEG */ \ No newline at end of file
diff --git a/tinyDAV/src/codecs/vpx/tdav_codec_vp8.c b/tinyDAV/src/codecs/vpx/tdav_codec_vp8.c
new file mode 100644
index 0000000..2c097e4
--- /dev/null
+++ b/tinyDAV/src/codecs/vpx/tdav_codec_vp8.c
@@ -0,0 +1,1059 @@
+/*
+* Copyright (C) 2011-2015 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_vp8.c
+* @brief VP8 codec
+* The RTP packetizer/depacketizer follows draft-ietf-payload-vp8 and draft-bankoski-vp8-bitstream-05
+* Google's VP8 (http://www.webmproject.org/) encoder/decoder
+*
+* We require v1.3.0 (2013-12-02 10:37:51) or later. For iOS, because of issue 423 (https://code.google.com/p/doubango/issues/detail?id=423) we require a version after "Mon, 28 Apr 2014 22:42:23 +0100 (14:42 -0700)" integrating fix in http://git.chromium.org/gitweb/?p=webm/libvpx.git;a=commit;h=33df6d1fc1d268b4901b74b4141f83594266f041
+*
+*/
+#include "tinydav/codecs/vpx/tdav_codec_vp8.h"
+
+#if HAVE_LIBVPX
+
+#if TDAV_UNDER_WINDOWS
+# include <windows.h>
+#endif
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tinymedia/tmedia_params.h"
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_time.h"
+#include "tsk_debug.h"
+
+#define VPX_CODEC_DISABLE_COMPAT 1 /* strict compliance with the latest SDK by disabling some backwards compatibility */
+#include <vpx/vpx_encoder.h>
+#include <vpx/vpx_decoder.h>
+#include <vpx/vp8cx.h>
+#include <vpx/vp8dx.h>
+
+#if !defined(TDAV_VP8_DISABLE_EXTENSION)
+# define TDAV_VP8_DISABLE_EXTENSION 0 /* Set X fied value to zero */
+#endif
+
+#if TDAV_VP8_DISABLE_EXTENSION
+# define TDAV_VP8_PAY_DESC_SIZE 1
+#else
+# define TDAV_VP8_PAY_DESC_SIZE 4
+#endif
+#define TDAV_SYSTEM_CORES_COUNT 0
+#define TDAV_VP8_GOP_SIZE_IN_SECONDS 60
+#define TDAV_VP8_RTP_PAYLOAD_MAX_SIZE 1050
+#if !defined(TDAV_VP8_MAX_BANDWIDTH_KB)
+# define TDAV_VP8_MAX_BANDWIDTH_KB 6000
+#endif
+#if !defined(TDAV_VP8_MIN_BANDWIDTH_KB)
+# define TDAV_VP8_MIN_BANDWIDTH_KB 100
+#endif
+
+/* VP8 codec */
+typedef struct tdav_codec_vp8_s
+{
+ TMEDIA_DECLARE_CODEC_VIDEO;
+
+ // Encoder
+ struct {
+ vpx_codec_enc_cfg_t cfg;
+ tsk_bool_t initialized;
+ vpx_codec_pts_t pts;
+ vpx_codec_ctx_t context;
+ unsigned pic_id : 15;
+ uint64_t frame_count;
+ tsk_bool_t force_idr;
+ int rotation;
+
+ struct {
+ uint8_t* ptr;
+ tsk_size_t size;
+ } rtp;
+
+ tsk_mutex_handle_t* mutex;
+ } encoder;
+
+ // decoder
+ struct {
+ vpx_codec_dec_cfg_t cfg;
+ unsigned initialized : 1;
+ vpx_codec_ctx_t context;
+ void* accumulator;
+ tsk_size_t accumulator_pos;
+ tsk_size_t accumulator_size;
+ tsk_size_t first_part_size;
+ uint16_t last_seq;
+ uint32_t last_timestamp;
+ tsk_bool_t idr;
+ tsk_bool_t corrupted;
+ } decoder;
+}
+tdav_codec_vp8_t;
+
+#define vp8_interface_enc (vpx_codec_vp8_cx())
+#define vp8_interface_dec (vpx_codec_vp8_dx())
+
+static int tdav_codec_vp8_open_encoder(tdav_codec_vp8_t* self);
+static int tdav_codec_vp8_open_decoder(tdav_codec_vp8_t* self);
+static int tdav_codec_vp8_close_encoder(tdav_codec_vp8_t* self);
+static int tdav_codec_vp8_close_decoder(tdav_codec_vp8_t* self);
+
+static void tdav_codec_vp8_encap(tdav_codec_vp8_t* self, const vpx_codec_cx_pkt_t *pkt);
+static void tdav_codec_vp8_rtp_callback(tdav_codec_vp8_t *self, const void *data, tsk_size_t size, uint32_t partID, tsk_bool_t part_start, tsk_bool_t non_ref, tsk_bool_t last);
+
+/* ============ VP8 Plugin interface ================= */
+
+static int tdav_codec_vp8_set(tmedia_codec_t* self, const tmedia_param_t* param)
+{
+ tdav_codec_vp8_t* vp8 = (tdav_codec_vp8_t*)self;
+ vpx_codec_err_t vpx_ret = VPX_CODEC_OK;
+ tsk_bool_t reconf = tsk_false;
+
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "action")) {
+ tmedia_codec_action_t action = (tmedia_codec_action_t)TSK_TO_INT32((uint8_t*)param->value);
+ switch (action) {
+ case tmedia_codec_action_encode_idr:
+ {
+ vp8->encoder.force_idr = tsk_true;
+ return 0;
+ }
+ case tmedia_codec_action_bw_down:
+ {
+ vp8->encoder.cfg.rc_target_bitrate = TSK_CLAMP(0, (int32_t)((vp8->encoder.cfg.rc_target_bitrate << 1) / 3), TMEDIA_CODEC(vp8)->bandwidth_max_upload);
+ TSK_DEBUG_INFO("New target bitrate = %d kbps", vp8->encoder.cfg.rc_target_bitrate);
+ reconf = tsk_true;
+ break;
+ }
+ case tmedia_codec_action_bw_up:
+ {
+ vp8->encoder.cfg.rc_target_bitrate = TSK_CLAMP(0, (int32_t)((vp8->encoder.cfg.rc_target_bitrate * 3) >> 1), TMEDIA_CODEC(vp8)->bandwidth_max_upload);
+ TSK_DEBUG_INFO("New target bitrate = %d kbps", vp8->encoder.cfg.rc_target_bitrate);
+ reconf = tsk_true;
+ break;
+ }
+ }
+ }
+ else if (tsk_striequals(param->key, "bw_kbps")) { // both up and down (from the SDP)
+ int32_t max_bw_userdefine = tmedia_defaults_get_bandwidth_video_upload_max();
+ int32_t max_bw_new = *((int32_t*)param->value);
+ if (max_bw_userdefine > 0) {
+ // do not use more than what the user defined in it's configuration
+ TMEDIA_CODEC(vp8)->bandwidth_max_upload = TSK_MIN(max_bw_new, max_bw_userdefine);
+ }
+ else {
+ TMEDIA_CODEC(vp8)->bandwidth_max_upload = max_bw_new;
+ }
+ vp8->encoder.cfg.rc_target_bitrate = TSK_CLAMP(0, vp8->encoder.cfg.rc_target_bitrate, TMEDIA_CODEC(vp8)->bandwidth_max_upload);
+ TSK_DEBUG_INFO("New target bitrate = %d kbps", vp8->encoder.cfg.rc_target_bitrate);
+ reconf = tsk_true;
+ }
+ else if (tsk_striequals(param->key, "bandwidth-max-upload")) {
+ int32_t bw_max_upload = *((int32_t*)param->value);
+ TSK_DEBUG_INFO("VP8 codec: bandwidth-max-upload=%d", bw_max_upload);
+ TMEDIA_CODEC(vp8)->bandwidth_max_upload = bw_max_upload;
+ reconf = tsk_true;
+ }
+ else if (tsk_striequals(param->key, "rotation")) {
+ // IMPORTANT: changing resolution requires at least libvpx v1.1.0 "Eider"
+ int32_t rotation = *((int32_t*)param->value);
+ if (vp8->encoder.rotation != rotation) {
+ vp8->encoder.rotation = rotation;
+ vp8->encoder.cfg.g_w = (rotation == 90 || rotation == 270) ? TMEDIA_CODEC_VIDEO(vp8)->out.height : TMEDIA_CODEC_VIDEO(vp8)->out.width;
+ vp8->encoder.cfg.g_h = (rotation == 90 || rotation == 270) ? TMEDIA_CODEC_VIDEO(vp8)->out.width : TMEDIA_CODEC_VIDEO(vp8)->out.height;
+ reconf = tsk_true;
+ }
+ }
+ }
+
+ if (reconf) {
+ if (vp8->encoder.initialized) {
+ // The encoder isn't thread safe. Without this lock (and the one in the encode() function) we may have corruptions in the video (issue report from GE).
+ // Google says the encoder is thread-safe but this is not the case. But it is *multi-instance* thread-safe.
+ tsk_mutex_lock(vp8->encoder.mutex);
+ if ((vpx_ret = vpx_codec_enc_config_set(&vp8->encoder.context, &vp8->encoder.cfg)) != VPX_CODEC_OK) {
+ TSK_DEBUG_ERROR("vpx_codec_enc_config_set failed with error =%s", vpx_codec_err_to_string(vpx_ret));
+ }
+ tsk_mutex_unlock(vp8->encoder.mutex);
+ }
+ return (vpx_ret == VPX_CODEC_OK) ? 0 : -2;
+ }
+
+ return -1;
+}
+
+static int tdav_codec_vp8_open(tmedia_codec_t* self)
+{
+ tdav_codec_vp8_t* vp8 = (tdav_codec_vp8_t*)self;
+ int ret;
+
+ if (!vp8) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is not opened */
+
+
+ // Encoder
+ if ((ret = tdav_codec_vp8_open_encoder(vp8))) {
+ return ret;
+ }
+
+ // Decoder
+ if ((ret = tdav_codec_vp8_open_decoder(vp8))) {
+ return ret;
+ }
+
+ return ret;
+}
+
+static int tdav_codec_vp8_close(tmedia_codec_t* self)
+{
+ tdav_codec_vp8_t* vp8 = (tdav_codec_vp8_t*)self;
+
+ if (!vp8) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_codec_vp8_close_encoder(vp8);
+ tdav_codec_vp8_close_decoder(vp8);
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_vp8_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tdav_codec_vp8_t* vp8 = (tdav_codec_vp8_t*)self;
+ vpx_enc_frame_flags_t flags = 0;
+ vpx_codec_err_t vpx_ret = VPX_CODEC_OK;
+ const vpx_codec_cx_pkt_t *pkt;
+ vpx_codec_iter_t iter = tsk_null;
+ vpx_image_t image = {0};
+
+ if (!vp8 || !in_data || !in_size) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if (in_size != (vp8->encoder.context.config.enc->g_w * vp8->encoder.context.config.enc->g_h * 3) >> 1) {
+ TSK_DEBUG_ERROR("Invalid size");
+ return 0;
+ }
+
+ // wrap yuv420 buffer
+ if (!vpx_img_wrap(&image, VPX_IMG_FMT_I420, vp8->encoder.context.config.enc->g_w, vp8->encoder.context.config.enc->g_h, 1, (unsigned char*)in_data)) {
+ TSK_DEBUG_ERROR("vpx_img_wrap failed");
+ return 0;
+ }
+
+ // encode data
+ ++vp8->encoder.pts;
+ if (vp8->encoder.force_idr) {
+ flags |= VPX_EFLAG_FORCE_KF;
+ vp8->encoder.force_idr = tsk_false;
+ }
+ tsk_mutex_lock(vp8->encoder.mutex); // must
+ vpx_ret = vpx_codec_encode(&vp8->encoder.context, &image, vp8->encoder.pts, 1, flags, VPX_DL_REALTIME);
+ tsk_mutex_unlock(vp8->encoder.mutex);
+
+ if (vpx_ret != VPX_CODEC_OK) {
+ TSK_DEBUG_ERROR("vpx_codec_encode failed with error =%s", vpx_codec_err_to_string(vpx_ret));
+ goto bail;
+ }
+
+ ++vp8->encoder.frame_count;
+ ++vp8->encoder.pic_id;
+
+ while ((pkt = vpx_codec_get_cx_data(&vp8->encoder.context, &iter))) {
+ switch (pkt->kind) {
+ case VPX_CODEC_CX_FRAME_PKT:
+ {
+ tdav_codec_vp8_encap(vp8, pkt);
+ break;
+ }
+ default:
+ case VPX_CODEC_STATS_PKT: /**< Two-pass statistics for this frame */
+ case VPX_CODEC_PSNR_PKT: /**< PSNR statistics for this frame */
+ case VPX_CODEC_CUSTOM_PKT: /**< Algorithm extensions */
+ {
+ TSK_DEBUG_INFO("pkt->kind=%d not supported", (int)pkt->kind);
+ break;
+ }
+ }
+ }
+
+bail:
+ vpx_img_free(&image);
+ return 0;
+}
+
+static tsk_size_t tdav_codec_vp8_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tdav_codec_vp8_t* vp8 = (tdav_codec_vp8_t*)self;
+ const trtp_rtp_header_t* rtp_hdr = proto_hdr;
+ const uint8_t* pdata = in_data;
+ const uint8_t* pdata_end = (pdata + in_size);
+ tsk_size_t ret = 0;
+ tsk_bool_t fatal_error = tsk_false;
+ static const tsk_size_t xmax_size = (3840 * 2160 * 3) >> 3; // >>3 instead of >>1 (not an error)
+ uint8_t S, PartID;
+
+ if (!self || !in_data || in_size < 1 || !out_data || !vp8->decoder.initialized) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ { /* 4.2. VP8 Payload Descriptor */
+ uint8_t X, R, N, I, L, T, K;//TODO: store
+
+ X = ((*pdata & 0x80) >> 7);
+ R = ((*pdata & 0x40) >> 6);
+ if (R) {
+ TSK_DEBUG_ERROR("R<>0");
+ fatal_error = tsk_true;
+ goto bail;
+ }
+ N = ((*pdata & 0x20) >> 5);
+ S = ((*pdata & 0x10) >> 4);
+ PartID = (*pdata & 0x0F);
+ // skip "REQUIRED" header
+ if (++pdata >= pdata_end) {
+ TSK_DEBUG_ERROR("Too short"); goto bail;
+ }
+ // check "OPTIONAL" headers
+ if (X) {
+ I = (*pdata & 0x80);
+ L = (*pdata & 0x40);
+ T = (*pdata & 0x20);
+ K = (*pdata & 0x10);
+ if (++pdata >= pdata_end) {
+ TSK_DEBUG_ERROR("Too short"); goto bail;
+ }
+
+ if (I) {
+ if (*pdata & 0x80) { // M
+ // PictureID on 16bits
+ if ((pdata += 2) >= pdata_end) {
+ TSK_DEBUG_ERROR("Too short"); goto bail;
+ }
+ }
+ else {
+ // PictureID on 8bits
+ if (++pdata >= pdata_end) {
+ TSK_DEBUG_ERROR("Too short"); goto bail;
+ }
+ }
+ }
+ if (L) {
+ if (++pdata >= pdata_end) {
+ TSK_DEBUG_ERROR("Too short"); goto bail;
+ }
+ }
+ if (T || K) {
+ if (++pdata >= pdata_end) {
+ TSK_DEBUG_ERROR("Too short"); goto bail;
+ }
+ }
+ }
+ }
+
+ in_size = (pdata_end - pdata);
+
+ // Packet lost?
+ if (vp8->decoder.last_seq && (vp8->decoder.last_seq + 1) != rtp_hdr->seq_num) {
+ TSK_DEBUG_INFO("[VP8] Packet loss, seq_num=%d", (vp8->decoder.last_seq + 1));
+ vp8->decoder.corrupted = tsk_true;
+ }
+ vp8->decoder.last_seq = rtp_hdr->seq_num;
+
+ // New frame ?
+ if (vp8->decoder.last_timestamp != rtp_hdr->timestamp) {
+ /* 4.3. VP8 Payload Header
+ Note that the header is present only in packets
+ which have the S bit equal to one and the PartID equal to zero in the
+ payload descriptor. Subsequent packets for the same frame do not
+ carry the payload header.
+ 0 1 2 3 4 5 6 7
+ +-+-+-+-+-+-+-+-+
+ |Size0|H| VER |P|
+ +-+-+-+-+-+-+-+-+
+ | Size1 |
+ +-+-+-+-+-+-+-+-+
+ | Size2 |
+ +-+-+-+-+-+-+-+-+
+ | Bytes 4..N of |
+ | VP8 payload |
+ : :
+ +-+-+-+-+-+-+-+-+
+ | OPTIONAL RTP |
+ | padding |
+ : :
+ +-+-+-+-+-+-+-+-+
+ P: Inverse key frame flag. When set to 0 the current frame is a key
+ frame. When set to 1 the current frame is an interframe. Defined
+ in [RFC6386]
+ */
+
+ // Reset accumulator position
+ vp8->decoder.accumulator_pos = 0;
+
+ // Make sure the header is present
+ if (S != 1 || PartID != 0 || in_size < 3) {
+ TSK_DEBUG_WARN("VP8 payload header is missing");
+#if 0
+ if (in_size < 3)
+#endif
+ {
+ fatal_error = tsk_true;
+ goto bail;
+ }
+ }
+ {
+ /* SizeN: The size of the first partition in bytes is calculated from
+ the 19 bits in Size0, Size1, and Size2 as 1stPartitionSize = Size0
+ + 8 * Size1 + 2048 * Size2. [RFC6386]. */
+ vp8->decoder.first_part_size = ((pdata[0] >> 5) & 0xFF) + 8 * pdata[1] + 2048 * pdata[2];
+ }
+
+ // Starting new frame...reset "corrupted" value
+ vp8->decoder.corrupted = tsk_false;
+
+ // Key frame?
+ vp8->decoder.idr = !(pdata[0] & 0x01);
+
+ // Update timestamp
+ vp8->decoder.last_timestamp = rtp_hdr->timestamp;
+ }
+
+ if (in_size > xmax_size) {
+ vp8->decoder.accumulator_pos = 0;
+ TSK_DEBUG_ERROR("%u too big to contain valid encoded data. xmax_size=%u", (unsigned)in_size, (unsigned)xmax_size);
+ fatal_error = tsk_true;
+ goto bail;
+ }
+ // start-accumulator
+ if (!vp8->decoder.accumulator) {
+ if (!(vp8->decoder.accumulator = tsk_calloc(in_size, sizeof(uint8_t)))) {
+ TSK_DEBUG_ERROR("Failed to allocated new buffer");
+ fatal_error = tsk_true;
+ goto bail;
+ }
+ vp8->decoder.accumulator_size = in_size;
+ }
+ if ((vp8->decoder.accumulator_pos + in_size) >= xmax_size) {
+ TSK_DEBUG_ERROR("BufferOverflow");
+ vp8->decoder.accumulator_pos = 0;
+ fatal_error = tsk_true;
+ goto bail;
+ }
+ if ((vp8->decoder.accumulator_pos + in_size) > vp8->decoder.accumulator_size) {
+ if (!(vp8->decoder.accumulator = tsk_realloc(vp8->decoder.accumulator, (vp8->decoder.accumulator_pos + in_size)))) {
+ TSK_DEBUG_ERROR("Failed to reallocated new buffer");
+ vp8->decoder.accumulator_pos = 0;
+ vp8->decoder.accumulator_size = 0;
+ fatal_error = tsk_true;
+ goto bail;
+ }
+ vp8->decoder.accumulator_size = (vp8->decoder.accumulator_pos + in_size);
+ }
+
+ memcpy(&((uint8_t*)vp8->decoder.accumulator)[vp8->decoder.accumulator_pos], pdata, in_size);
+ vp8->decoder.accumulator_pos += in_size;
+ // end-accumulator
+
+ // Decode the frame if we have a marker or the first partition is complete and not corrupted
+ if (rtp_hdr->marker /*|| (!vp8->decoder.corrupted && vp8->decoder.first_part_size == vp8->decoder.accumulator_pos)*/) {
+ vpx_image_t *img;
+ vpx_codec_iter_t iter = tsk_null;
+ vpx_codec_err_t vpx_ret;
+ const uint8_t* pay_ptr = (const uint8_t*)vp8->decoder.accumulator;
+ const tsk_size_t pay_size = vp8->decoder.accumulator_pos;
+
+ // in all cases: reset accumulator position
+ vp8->decoder.accumulator_pos = 0;
+
+#if 0 /* http://groups.google.com/a/webmproject.org/group/apps-devel/browse_thread/thread/c84438e70fe122fa/2dfc322018aa22a8 */
+ // libvpx will crash very ofen when the frame is corrupted => for now we decided not to decode such frame
+ // according to the latest release there is a function to check if the frame
+ // is corrupted or not => To be checked
+ if(vp8->decoder.corrupted) {
+ vp8->decoder.corrupted = tsk_false;
+ goto bail;
+ }
+#endif
+
+ if (pay_size < vp8->decoder.first_part_size) {
+ TSK_DEBUG_WARN("[VP8] No enough bytes for the first part: %u < %u", (unsigned)pay_size, (unsigned)vp8->decoder.first_part_size);
+ // Not a fatal error
+ goto bail;
+ }
+
+ vpx_ret = vpx_codec_decode(&vp8->decoder.context, pay_ptr, (int)pay_size, tsk_null, 0);
+
+ if (vpx_ret != VPX_CODEC_OK) {
+ TSK_DEBUG_INFO("vpx_codec_decode failed with error =%s", vpx_codec_err_to_string(vpx_ret));
+ fatal_error = tsk_true;
+ goto bail;
+ }
+ else if (vp8->decoder.idr) {
+ TSK_DEBUG_INFO("Decoded VP8 IDR");
+ if (TMEDIA_CODEC_VIDEO(self)->in.callback) {
+ TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_idr;
+ TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
+ TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
+ }
+ }
+
+ // copy decoded data
+ ret = 0;
+ while ((img = vpx_codec_get_frame(&vp8->decoder.context, &iter))) {
+ unsigned int plane, y;
+ tsk_size_t xsize;
+
+ // update sizes
+ TMEDIA_CODEC_VIDEO(vp8)->in.width = img->d_w;
+ TMEDIA_CODEC_VIDEO(vp8)->in.height = img->d_h;
+ xsize = (TMEDIA_CODEC_VIDEO(vp8)->in.width * TMEDIA_CODEC_VIDEO(vp8)->in.height * 3) >> 1;
+ // allocate destination buffer
+ if (*out_max_size < xsize) {
+ if (!(*out_data = tsk_realloc(*out_data, xsize))) {
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ goto bail;
+ }
+ *out_max_size = xsize;
+ }
+
+ // layout picture
+ for (plane = 0; plane < 3; plane++) {
+ unsigned char *buf = img->planes[plane];
+ for (y = 0; y < img->d_h >> (plane ? 1 : 0); y++) {
+ unsigned int w_count = img->d_w >> (plane ? 1 : 0);
+ if ((ret + w_count) > *out_max_size) {
+ TSK_DEBUG_ERROR("BufferOverflow");
+ ret = 0;
+ goto bail;
+ }
+ memcpy(((uint8_t*)*out_data) + ret, buf, w_count);
+ ret += w_count;
+ buf += img->stride[plane];
+ }
+ }
+ }
+ }
+
+bail:
+ if (fatal_error && TMEDIA_CODEC_VIDEO(self)->in.callback) {
+ TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_error;
+ TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
+ TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
+ }
+
+ // vp8->decoder.last_PartID = PartID;
+ // vp8->decoder.last_S = S;
+ // vp8->decoder.last_N = N;
+ return ret;
+}
+
+static tsk_bool_t tdav_codec_vp8_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+#if 0
+ if(tsk_striequals(att_name, "fmtp")) {
+ unsigned width, height, fps;
+ if(tmedia_parse_video_fmtp(att_value, TMEDIA_CODEC_VIDEO(codec)->pref_size, &width, &height, &fps)) {
+ TSK_DEBUG_ERROR("Failed to match fmtp=%s", att_value);
+ return tsk_false;
+ }
+ TMEDIA_CODEC_VIDEO(codec)->in.width = TMEDIA_CODEC_VIDEO(codec)->out.width = width;
+ TMEDIA_CODEC_VIDEO(codec)->in.height = TMEDIA_CODEC_VIDEO(codec)->out.height = height;
+ TMEDIA_CODEC_VIDEO(codec)->in.fps = TMEDIA_CODEC_VIDEO(codec)->out.fps = fps;
+ }
+ else
+#endif
+ if (tsk_striequals(att_name, "imageattr")) {
+ unsigned in_width, in_height, out_width, out_height;
+ if (tmedia_parse_video_imageattr(att_value, TMEDIA_CODEC_VIDEO(codec)->pref_size, &in_width, &in_height, &out_width, &out_height) != 0) {
+ return tsk_false;
+ }
+ TMEDIA_CODEC_VIDEO(codec)->in.width = in_width;
+ TMEDIA_CODEC_VIDEO(codec)->in.height = in_height;
+ TMEDIA_CODEC_VIDEO(codec)->out.width = out_width;
+ TMEDIA_CODEC_VIDEO(codec)->out.height = out_height;
+ }
+
+ return tsk_true;
+}
+
+static char* tdav_codec_vp8_sdp_att_get(const tmedia_codec_t* codec, const char* att_name)
+{
+#if 0
+ if(tsk_striequals(att_name, "fmtp")) {
+ return tmedia_get_video_fmtp(TMEDIA_CODEC_VIDEO(codec)->pref_size);
+ }
+ else
+#endif
+ if (tsk_striequals(att_name, "imageattr")) {
+ return tmedia_get_video_imageattr(TMEDIA_CODEC_VIDEO(codec)->pref_size,
+ TMEDIA_CODEC_VIDEO(codec)->in.width, TMEDIA_CODEC_VIDEO(codec)->in.height, TMEDIA_CODEC_VIDEO(codec)->out.width, TMEDIA_CODEC_VIDEO(codec)->out.height);
+ }
+ return tsk_null;
+}
+
+/* ============ VP8 object definition ================= */
+
+/* constructor */
+static tsk_object_t* tdav_codec_vp8_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_vp8_t *vp8 = self;
+ if (vp8) {
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_vp8_dtor(tsk_object_t * self)
+{
+ tdav_codec_vp8_t *vp8 = self;
+ TSK_DEBUG_INFO("*** tdav_codec_vp8_dtor destroyed ***");
+ if (vp8) {
+ /* deinit base */
+ tmedia_codec_video_deinit(vp8);
+ /* deinit self */
+ tdav_codec_vp8_close_encoder(vp8);
+ tdav_codec_vp8_close_decoder(vp8);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_vp8_def_s =
+{
+ sizeof(tdav_codec_vp8_t),
+ tdav_codec_vp8_ctor,
+ tdav_codec_vp8_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_vp8_plugin_def_s =
+{
+ &tdav_codec_vp8_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_vp8,
+ "VP8",
+ "VP8 codec (libvpx)",
+ TMEDIA_CODEC_FORMAT_VP8,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video (defaul width,height,fps) */
+ { 176, 144, 0 }, // fps is @deprecated
+
+ tdav_codec_vp8_set,
+ tdav_codec_vp8_open,
+ tdav_codec_vp8_close,
+ tdav_codec_vp8_encode,
+ tdav_codec_vp8_decode,
+ tdav_codec_vp8_sdp_att_match,
+ tdav_codec_vp8_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_vp8_plugin_def_t = &tdav_codec_vp8_plugin_def_s;
+
+/* ============ Internal functions ================= */
+
+int tdav_codec_vp8_open_encoder(tdav_codec_vp8_t* self)
+{
+ vpx_codec_err_t vpx_ret;
+ vpx_enc_frame_flags_t enc_flags = 0; // VPX_EFLAG_XXX
+
+ if (self->encoder.initialized) {
+ TSK_DEBUG_ERROR("VP8 encoder already inialized");
+ return -1;
+ }
+
+ if ((vpx_ret = vpx_codec_enc_config_default(vp8_interface_enc, &self->encoder.cfg, 0)) != VPX_CODEC_OK) {
+ TSK_DEBUG_ERROR("vpx_codec_enc_config_default failed with error =%s", vpx_codec_err_to_string(vpx_ret));
+ return -2;
+ }
+ self->encoder.cfg.g_timebase.num = 1;
+ self->encoder.cfg.g_timebase.den = TMEDIA_CODEC_VIDEO(self)->out.fps;
+ self->encoder.cfg.rc_target_bitrate = TSK_CLAMP(
+ 0,
+ tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, TMEDIA_CODEC_VIDEO(self)->out.fps),
+ TMEDIA_CODEC(self)->bandwidth_max_upload
+ );
+ self->encoder.cfg.g_w = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.height : TMEDIA_CODEC_VIDEO(self)->out.width;
+ self->encoder.cfg.g_h = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.width : TMEDIA_CODEC_VIDEO(self)->out.height;
+ self->encoder.cfg.kf_mode = VPX_KF_AUTO;
+ /*self->encoder.cfg.kf_min_dist =*/ self->encoder.cfg.kf_max_dist = (TDAV_VP8_GOP_SIZE_IN_SECONDS * TMEDIA_CODEC_VIDEO(self)->out.fps);
+#if defined(VPX_ERROR_RESILIENT_DEFAULT)
+ self->encoder.cfg.g_error_resilient = VPX_ERROR_RESILIENT_DEFAULT;
+#else
+ self->encoder.cfg.g_error_resilient = 1;
+#endif
+#if defined(VPX_ERROR_RESILIENT_PARTITIONS)
+ self->encoder.cfg.g_error_resilient |= VPX_ERROR_RESILIENT_PARTITIONS;
+#endif
+#if defined(VPX_CODEC_USE_OUTPUT_PARTITION)
+ enc_flags |= VPX_CODEC_USE_OUTPUT_PARTITION;
+#endif
+ self->encoder.cfg.g_lag_in_frames = 0;
+#if TDAV_UNDER_WINDOWS
+ {
+ SYSTEM_INFO SystemInfo;
+ GetSystemInfo(&SystemInfo);
+ self->encoder.cfg.g_threads = SystemInfo.dwNumberOfProcessors;
+ }
+#endif
+ self->encoder.cfg.rc_end_usage = VPX_CBR;
+ self->encoder.cfg.g_pass = VPX_RC_ONE_PASS;
+#if 0
+ self->encoder.cfg.rc_dropframe_thresh = 30;
+ self->encoder.cfg.rc_resize_allowed = 0;
+ self->encoder.cfg.rc_min_quantizer = 2;
+ self->encoder.cfg.rc_max_quantizer = 56;
+ self->encoder.cfg.rc_undershoot_pct = 100;
+ self->encoder.cfg.rc_overshoot_pct = 15;
+ self->encoder.cfg.rc_buf_initial_sz = 500;
+ self->encoder.cfg.rc_buf_optimal_sz = 600;
+ self->encoder.cfg.rc_buf_sz = 1000;
+#endif
+
+ if ((vpx_ret = vpx_codec_enc_init(&self->encoder.context, vp8_interface_enc, &self->encoder.cfg, enc_flags)) != VPX_CODEC_OK) {
+ TSK_DEBUG_ERROR("vpx_codec_enc_init failed with error =%s", vpx_codec_err_to_string(vpx_ret));
+ return -3;
+ }
+ self->encoder.pic_id = /*(rand() ^ rand()) % 0x7FFF*/0/*Use zero: why do you want to make your life harder?*/;
+
+ /* vpx_codec_control(&self->encoder.context, VP8E_SET_STATIC_THRESHOLD, 800); */
+#if !TDAV_UNDER_MOBILE /* must not remove: crash on Android for sure and probably on iOS also (all ARM devices ?) */
+ vpx_codec_control(&self->encoder.context, VP8E_SET_NOISE_SENSITIVITY, 2);
+#elif TDAV_UNDER_WINDOWS_CE
+ vpx_codec_control(&self->encoder.context, VP8E_SET_NOISE_SENSITIVITY, 16);
+ vpx_codec_control(&self->encoder.context, VP8E_SET_CPUUSED, 16);
+ vpx_codec_control(&self->encoder.context, VP8E_SET_STATIC_THRESHOLD, 16);
+ vpx_codec_control(&self->encoder.context, VP8E_SET_SHARPNESS, 16);
+#endif
+
+ // Set number of partitions
+#if defined(VPX_CODEC_USE_OUTPUT_PARTITION)
+ {
+ unsigned _s = TMEDIA_CODEC_VIDEO(self)->out.height * TMEDIA_CODEC_VIDEO(self)->out.width;
+ if (_s < (352 * 288)) {
+ vpx_codec_control(&self->encoder.context, VP8E_SET_TOKEN_PARTITIONS, VP8_ONE_TOKENPARTITION);
+ }
+ else if (_s < (352 * 288) * 2 * 2) {
+ vpx_codec_control(&self->encoder.context, VP8E_SET_TOKEN_PARTITIONS, VP8_TWO_TOKENPARTITION);
+ }
+ else if (_s < (352 * 288) * 4 * 4) {
+ vpx_codec_control(&self->encoder.context, VP8E_SET_TOKEN_PARTITIONS, VP8_FOUR_TOKENPARTITION);
+ }
+ else if (_s < (352 * 288) * 8 * 8) {
+ vpx_codec_control(&self->encoder.context, VP8E_SET_TOKEN_PARTITIONS, VP8_EIGHT_TOKENPARTITION);
+ }
+ }
+#endif
+
+ // Create the mutex if not already done
+ if (!self->encoder.mutex && !(self->encoder.mutex = tsk_mutex_create())) {
+ vpx_codec_destroy(&self->encoder.context);
+ TSK_DEBUG_ERROR("Failed to create mutex");
+ return -4;
+ }
+
+ self->encoder.frame_count = 0;
+
+ self->encoder.initialized = tsk_true;
+
+ TSK_DEBUG_INFO("[VP8] target_bitrate=%d kbps", self->encoder.cfg.rc_target_bitrate);
+
+ return 0;
+}
+
+int tdav_codec_vp8_open_decoder(tdav_codec_vp8_t* self)
+{
+ vpx_codec_err_t vpx_ret;
+ vpx_codec_caps_t dec_caps;
+ vpx_codec_flags_t dec_flags = 0;
+#if !TDAV_UNDER_MOBILE
+ static vp8_postproc_cfg_t __pp = { VP8_DEBLOCK | VP8_DEMACROBLOCK, 4, 0 };
+#endif
+
+ if (self->decoder.initialized) {
+ TSK_DEBUG_ERROR("VP8 decoder already initialized");
+ return -1;
+ }
+
+ self->decoder.cfg.w = TMEDIA_CODEC_VIDEO(self)->out.width;
+ self->decoder.cfg.h = TMEDIA_CODEC_VIDEO(self)->out.height;
+#if TDAV_UNDER_WINDOWS
+ {
+ SYSTEM_INFO SystemInfo;
+ GetSystemInfo(&SystemInfo);
+ self->decoder.cfg.threads = SystemInfo.dwNumberOfProcessors;
+ }
+#endif
+
+ dec_caps = vpx_codec_get_caps(&vpx_codec_vp8_dx_algo);
+#if !TDAV_UNDER_MOBILE
+ if (dec_caps & VPX_CODEC_CAP_POSTPROC) {
+ dec_flags |= VPX_CODEC_USE_POSTPROC;
+ }
+#endif
+#if defined(VPX_CODEC_CAP_ERROR_CONCEALMENT)
+ if (dec_caps & VPX_CODEC_CAP_ERROR_CONCEALMENT) {
+ dec_flags |= VPX_CODEC_USE_ERROR_CONCEALMENT;
+ }
+#endif
+
+ if ((vpx_ret = vpx_codec_dec_init(&self->decoder.context, vp8_interface_dec, &self->decoder.cfg, dec_flags)) != VPX_CODEC_OK) {
+ TSK_DEBUG_ERROR("vpx_codec_dec_init failed with error =%s", vpx_codec_err_to_string(vpx_ret));
+ return -4;
+ }
+#if !TDAV_UNDER_MOBILE
+ if ((vpx_ret = vpx_codec_control(&self->decoder.context, VP8_SET_POSTPROC, &__pp))) {
+ TSK_DEBUG_WARN("vpx_codec_dec_init failed with error =%s", vpx_codec_err_to_string(vpx_ret));
+ }
+#endif
+ self->decoder.initialized = tsk_true;
+
+ return 0;
+}
+
+int tdav_codec_vp8_close_encoder(tdav_codec_vp8_t* self)
+{
+ TSK_DEBUG_INFO("tdav_codec_vp8_close_encoder(begin)");
+ if (self->encoder.initialized) {
+ vpx_codec_destroy(&self->encoder.context);
+ self->encoder.initialized = tsk_false;
+ }
+ if (self->encoder.mutex) {
+ tsk_mutex_destroy(&self->encoder.mutex);
+ }
+ TSK_FREE(self->encoder.rtp.ptr);
+ self->encoder.rtp.size = 0;
+ self->encoder.rotation = 0; // reset rotation
+ TSK_DEBUG_INFO("tdav_codec_vp8_close_encoder(end)");
+ return 0;
+}
+
+int tdav_codec_vp8_close_decoder(tdav_codec_vp8_t* self)
+{
+ TSK_DEBUG_INFO("tdav_codec_vp8_close_decoder(begin)");
+ if (self->decoder.initialized) {
+ vpx_codec_destroy(&self->decoder.context);
+ self->decoder.initialized = tsk_false;
+ }
+ TSK_FREE(self->decoder.accumulator);
+ self->decoder.accumulator_size = 0;
+ self->decoder.accumulator_pos = 0;
+ TSK_DEBUG_INFO("tdav_codec_vp8_close_decoder(end)");
+
+ return 0;
+}
+
+/* ============ VP8 RTP packetizer/depacketizer ================= */
+
+
+static void tdav_codec_vp8_encap(tdav_codec_vp8_t* self, const vpx_codec_cx_pkt_t *pkt)
+{
+ tsk_bool_t non_ref, is_keyframe, part_start;
+ uint8_t *frame_ptr;
+ uint32_t part_size, part_ID, pkt_size, index;
+
+ if (!self || !pkt || !pkt->data.frame.buf || !pkt->data.frame.sz) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return;
+ }
+
+ index = 0;
+ frame_ptr = pkt->data.frame.buf;
+ pkt_size = (uint32_t)pkt->data.frame.sz;
+ non_ref = (pkt->data.frame.flags & VPX_FRAME_IS_DROPPABLE);
+ is_keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY);
+
+
+#if defined(VPX_CODEC_USE_OUTPUT_PARTITION)
+ part_ID = pkt->data.frame.partition_id;
+ part_start = tsk_true;
+ part_size = pkt_size;
+ while (index < part_size) {
+ uint32_t frag_size = TSK_MIN(TDAV_VP8_RTP_PAYLOAD_MAX_SIZE, (part_size - index));
+ tdav_codec_vp8_rtp_callback(
+ self,
+ &frame_ptr[index],
+ frag_size,
+ part_ID,
+ part_start,
+ non_ref,
+ ((pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT) == 0 && (index + frag_size) == part_size) // RTP marker?
+ );
+ part_start = tsk_false;
+ index += frag_size;
+ }
+#else
+ // first partition (contains modes and motion vectors)
+ part_ID = 0; // The first VP8 partition(containing modes and motion vectors) MUST be labeled with PartID = 0
+ part_start = tsk_true;
+ part_size = (frame_ptr[2] << 16) | (frame_ptr[1] << 8) | frame_ptr[0];
+ part_size = (part_size >> 5) & 0x7FFFF;
+ if (part_size > pkt_size) {
+ TSK_DEBUG_ERROR("part_size is > pkt_size(%u,%u)", part_size, pkt_size);
+ return;
+ }
+
+ // first,first,....partitions (or fragment if part_size > TDAV_VP8_RTP_PAYLOAD_MAX_SIZE)
+ while (index<part_size) {
+ uint32_t frag_size = TSK_MIN(TDAV_VP8_RTP_PAYLOAD_MAX_SIZE, (part_size - index));
+ tdav_codec_vp8_rtp_callback(self, &frame_ptr[index], frag_size, part_ID, part_start, non_ref, tsk_false);
+ part_start = tsk_false;
+ index += frag_size;
+ }
+
+ // second,third,... partitions (or fragment if part_size > TDAV_VP8_RTP_PAYLOAD_MAX_SIZE)
+ // FIXME: low FEC
+ part_start = tsk_true;
+ while (index<pkt_size) {
+ if (part_start) {
+ /* PartID SHOULD be incremented for each subsequent partition,
+ but MAY be kept at 0 for all packets. PartID MUST NOT be larger
+ than 8.
+ */
+ part_ID++;
+ }
+ part_size = TSK_MIN(TDAV_VP8_RTP_PAYLOAD_MAX_SIZE, (pkt_size - index));
+
+ tdav_codec_vp8_rtp_callback(self, &frame_ptr[index], part_size, part_ID, part_start, non_ref, (index + part_size)==pkt_size);
+ index += part_size;
+ /*
+ If more than one packet in an encoded frame contains the
+ same PartID, the S bit MUST NOT be set for any other packet than
+ the first packet with that PartID.
+ */
+ part_start = tsk_false;
+ }
+#endif /* VPX_CODEC_USE_OUTPUT_PARTITION */
+}
+
+static void tdav_codec_vp8_rtp_callback(tdav_codec_vp8_t *self, const void *data, tsk_size_t size, uint32_t partID, tsk_bool_t part_start, tsk_bool_t non_ref, tsk_bool_t last)
+{
+ tsk_size_t paydesc_and_hdr_size = TDAV_VP8_PAY_DESC_SIZE;
+ tsk_bool_t has_hdr;
+ /* draft-ietf-payload-vp8-04 - 4.2. VP8 Payload Descriptor
+ 0 1 2 3 4 5 6 7
+ +-+-+-+-+-+-+-+-+
+ |X|R|N|S|PartID | (REQUIRED)
+ +-+-+-+-+-+-+-+-+
+ X: |I|L|T|K| RSV | (OPTIONAL)
+ +-+-+-+-+-+-+-+-+
+ I: |M| PictureID | (OPTIONAL)
+ +-+-+-+-+-+-+-+-+
+ L: | TL0PICIDX | (OPTIONAL)
+ +-+-+-+-+-+-+-+-+
+ T/K: |TID|Y| KEYIDX | (OPTIONAL)
+ +-+-+-+-+-+-+-+-+
+
+ draft-ietf-payload-vp8-04 - 4.3. VP8 Payload Header
+ 0 1 2 3 4 5 6 7
+ +-+-+-+-+-+-+-+-+
+ |Size0|H| VER |P|
+ +-+-+-+-+-+-+-+-+
+ | Size1 |
+ +-+-+-+-+-+-+-+-+
+ | Size2 |
+ +-+-+-+-+-+-+-+-+
+ | Bytes 4..N of |
+ | VP8 payload |
+ : :
+ +-+-+-+-+-+-+-+-+
+ | OPTIONAL RTP |
+ | padding |
+ : :
+ +-+-+-+-+-+-+-+-+
+ */
+
+ /*
+ Note that the header is present only in packets which have the S bit equal to one and the
+ PartID equal to zero in the payload descriptor.
+ */
+ if ((has_hdr = (part_start && partID == 0))) {
+ has_hdr = tsk_true;
+ paydesc_and_hdr_size += 0; // encoded data already contains payload header?
+ }
+
+ if (!data || !size) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return;
+ }
+ if (self->encoder.rtp.size < (size + paydesc_and_hdr_size)) {
+ if (!(self->encoder.rtp.ptr = tsk_realloc(self->encoder.rtp.ptr, (size + paydesc_and_hdr_size)))) {
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ return;
+ }
+ self->encoder.rtp.size = (size + paydesc_and_hdr_size);
+ }
+ memcpy((self->encoder.rtp.ptr + paydesc_and_hdr_size), data, size);
+
+ /* VP8 Payload Descriptor */
+ // |X|R|N|S|PartID|
+ self->encoder.rtp.ptr[0] = (partID & 0x0F) // PartID
+ | ((part_start << 4) & 0x10)// S
+ | ((non_ref << 5) & 0x20) // N
+ // R = 0
+#if TDAV_VP8_DISABLE_EXTENSION
+ | (0x00) // X=0
+#else
+ | (0x80) // X=1
+#endif
+ ;
+
+#if !TDAV_VP8_DISABLE_EXTENSION
+ // X: |I|L|T|K| RSV |
+ self->encoder.rtp.ptr[1] = 0x80; // I = 1, L = 0, T = 0, K = 0, RSV = 0
+ // I: |M| PictureID |
+ self->encoder.rtp.ptr[2] = (0x80 | ((self->encoder.pic_id >> 8) & 0x7F)); // M = 1 (PictureID on 15 bits)
+ self->encoder.rtp.ptr[3] = (self->encoder.pic_id & 0xFF);
+#endif
+
+ /* 4.2. VP8 Payload Header */
+ //if(has_hdr) {
+ // already part of the encoded stream
+ //}
+
+ // Send data over the network
+ if (TMEDIA_CODEC_VIDEO(self)->out.callback) {
+ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.ptr = self->encoder.rtp.ptr;
+ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.size = (size + TDAV_VP8_PAY_DESC_SIZE);
+ TMEDIA_CODEC_VIDEO(self)->out.result.duration = (uint32_t)((1. / (double)TMEDIA_CODEC_VIDEO(self)->out.fps) * TMEDIA_CODEC(self)->plugin->rate);
+ TMEDIA_CODEC_VIDEO(self)->out.result.last_chunck = last;
+ TMEDIA_CODEC_VIDEO(self)->out.callback(&TMEDIA_CODEC_VIDEO(self)->out.result);
+ }
+}
+
+#endif /* HAVE_LIBVPX */
diff --git a/tinyDAV/src/msrp/tdav_consumer_msrp.c b/tinyDAV/src/msrp/tdav_consumer_msrp.c
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tinyDAV/src/msrp/tdav_consumer_msrp.c
diff --git a/tinyDAV/src/msrp/tdav_producer_msrp.c b/tinyDAV/src/msrp/tdav_producer_msrp.c
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tinyDAV/src/msrp/tdav_producer_msrp.c
diff --git a/tinyDAV/src/msrp/tdav_session_msrp.c b/tinyDAV/src/msrp/tdav_session_msrp.c
new file mode 100644
index 0000000..325dfe3
--- /dev/null
+++ b/tinyDAV/src/msrp/tdav_session_msrp.c
@@ -0,0 +1,984 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_session_msrp.c
+ * @brief The Message Session Relay Protocol (MSRP) session.
+ * Used for both Message (RFC 4975) and file transfer (RFC 5547).
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/msrp/tdav_session_msrp.h"
+
+#if !defined(HAVE_TINYMSRP) || HAVE_TINYMSRP
+
+#include "tsk_memory.h" /* TSK_FREE */
+
+#define TDAV_MSRP_CONNECT_TIMEOUT 2000
+
+static void send_pending_file(tdav_session_msrp_t *session);
+static void send_bodiless(tdav_session_msrp_t *msrp);
+
+/*
+ * http://tools.ietf.org/html/draft-ietf-simple-msrp-acm-09
+ * http://tools.ietf.org/html/draft-ietf-simple-msrp-sessmatch-06
+ * http://www.openmobilealliance.org/technical/release_program/docs/SIMPLE_IM/V1_0-20100322-C/OMA-TS-SIMPLE_IM-V1_0-20100322-C.pdf
+*/
+
+int tdav_msrp_event_proxy_cb(tmsrp_event_t* _event/*!Not the owner of the object*/)
+{
+ tdav_session_msrp_t* msrp;
+ int ret = 0;
+
+ if(!_event || !_event->callback_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ msrp = tsk_object_ref((void*)_event->callback_data);
+ if(TMEDIA_SESSION_MSRP(msrp)->callback.func){
+ _event->callback_data = TMEDIA_SESSION_MSRP(msrp)->callback.data; // steal callback data
+ ret = TMEDIA_SESSION_MSRP(msrp)->callback.func(_event); // call callback function()
+ }
+ tsk_object_unref(msrp);
+
+ return ret;
+}
+
+int tdav_transport_layer_stream_cb(const tnet_transport_event_t* e)
+{
+ const tdav_session_msrp_t *session = e->callback_data;
+ tmsrp_receiver_t* receiver;
+ int ret = -1;
+
+#define TMSRP_ALERT_USER(type) \
+ { \
+ tdav_session_msrp_t *msrp = tsk_object_ref((void*)session); \
+ tmsrp_event_t* _event = tmsrp_event_create(msrp, tsk_false, type, tsk_null); \
+ tdav_msrp_event_proxy_cb(_event); \
+ TSK_OBJECT_SAFE_FREE(_event); \
+ tsk_object_unref(msrp); \
+ }
+
+ switch(e->type){
+ case event_data: {
+ break;
+ }
+ case event_accepted:
+ if(!session->connectedFD){
+ tdav_session_msrp_t *msrp = tsk_object_ref((void*)session);
+ /* We are passive ==> update connection info */
+ msrp->connectedFD = e->local_fd;
+ tmsrp_sender_set_fd(msrp->sender, msrp->connectedFD);
+ tmsrp_receiver_set_fd(msrp->receiver, msrp->connectedFD);
+ msrp->fresh_conn = tsk_true;
+ /* Send Bodiless request */
+ if(msrp->send_bodiless){
+ send_bodiless(msrp);
+ msrp->send_bodiless = tsk_false;
+ }
+ /* Alert user */
+ TMSRP_ALERT_USER(tmsrp_event_type_connected);
+
+ tsk_object_unref(msrp);
+ }
+ break;
+ case event_closed:
+ if(e->local_fd == session->connectedFD){
+ TSK_DEBUG_INFO("MSRP Socket closed");
+ TMSRP_ALERT_USER(tmsrp_event_type_disconnected);
+ }
+ return 0;
+
+ case event_connected:
+ {
+ tdav_session_msrp_t *msrp = tsk_object_ref((void*)session);
+ if(e->local_fd == msrp->connectedFD){
+ msrp->fresh_conn = tsk_true;
+ /* Send Bodiless request */
+ if(msrp->send_bodiless){
+ send_bodiless(msrp);
+ msrp->send_bodiless = tsk_false;
+ }
+ /* Alert user */
+ TMSRP_ALERT_USER(tmsrp_event_type_connected);
+ }
+ tsk_object_unref(msrp);
+ }
+ break;
+ default:{
+ return 0;
+ }
+ }
+
+ if(e->data && (receiver = tsk_object_ref((void*)session->receiver))){
+ ret = tmsrp_receiver_recv(receiver, e->data, e->size);
+ tsk_object_unref(receiver);
+ }
+
+ if(session->fresh_conn){
+ tdav_session_msrp_t *msrp = tsk_object_ref((void*)session);
+ /* send pending file - and data?*/
+ if(session->offerer){
+ send_pending_file(msrp);
+ }
+ msrp->fresh_conn = tsk_false;
+ tsk_object_unref(msrp);
+ }
+
+ return ret;
+}
+
+static void send_pending_file(tdav_session_msrp_t *msrp){
+ if(msrp && msrp->file.path && !msrp->file.sent){
+ msrp->file.sent = tsk_true;
+ tsmrp_sender_send_file(msrp->sender, msrp->file.path);
+ }
+}
+
+static void send_bodiless(tdav_session_msrp_t *msrp){
+ tmsrp_request_t* BODILESS;
+ if(msrp->config->To_Path && msrp->config->From_Path){
+ if((BODILESS = tmsrp_create_bodiless(msrp->config->To_Path->uri, msrp->config->From_Path->uri))){
+ char* str;
+ if((str = tmsrp_message_tostring(BODILESS))){
+ if(!tnet_sockfd_send(msrp->connectedFD, str, tsk_strlen(str), 0)){
+ TSK_DEBUG_WARN("Failed to send bodiless request.");
+ }
+ TSK_FREE(str);
+ }
+
+ TSK_OBJECT_SAFE_FREE(BODILESS);
+ }
+ }
+}
+
+static tdav_msrp_setup_t setup_from_string(const char* setup)
+{
+ tdav_msrp_setup_t ret = msrp_setup_active;
+
+ if(setup){
+ if(tsk_strequals(setup, "holdconn")){
+ ret = msrp_setup_holdconn;
+ }
+ else if(tsk_strequals(setup, "passive")){
+ ret = msrp_setup_passive;
+ }
+ else if(tsk_strequals(setup, "actpass")){
+ ret = msrp_setup_actpass;
+ }
+ else{
+ ret = msrp_setup_active;
+ }
+ }
+ return ret;
+}
+
+static const char* setup_to_string(tdav_msrp_setup_t setup)
+{
+ switch(setup){
+ case msrp_setup_active:
+ return "active";
+ case msrp_setup_passive:
+ return "passive";
+ case msrp_setup_actpass:
+ return "actpass";
+ case msrp_setup_holdconn:
+ return "holdconn";
+ }
+ return "active";
+}
+
+static int init_neg_types(tdav_session_msrp_t* msrp, const tsdp_header_M_t* m)
+{
+ const tsdp_header_A_t* A;
+
+ if((A = tsdp_header_M_findA(m, "accept-types"))){
+ char *saveptr;
+ char* atype = tsk_strtok_r((char*)A->value, " ", &saveptr);
+ const char* default_atype = atype;
+ while(atype){
+ if(tsk_striequals(atype, "message/CPIM")){
+ tsk_strupdate(&msrp->neg_accept_type, atype);
+ if((A = tsdp_header_M_findA(m, "accept-wrapped-types"))){
+ char* awtype = tsk_strtok_r((char*)A->value, " ", &saveptr);
+ tsk_strupdate(&msrp->neg_accept_w_type, awtype); // first
+ }
+ break;
+ }
+ atype = tsk_strtok_r(tsk_null, " ", &saveptr);
+ }
+
+ if(!msrp->neg_accept_type){
+ tsk_strupdate(&msrp->neg_accept_type, default_atype);
+ }
+
+ return 0;
+ }
+ return -1;
+}
+
+static int populate_lo(tdav_session_msrp_t* self, tsk_bool_t initial)
+{
+ if(!self || !TMEDIA_SESSION(self)->M.lo){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(initial){
+ const char* att_dir = tsk_null;
+ tsdp_header_M_add_headers(TMEDIA_SESSION(self)->M.lo,
+ TSDP_HEADER_A_VA_ARGS("setup", setup_to_string(self->setup)),
+
+ tsk_null
+ );
+
+ if(self->accept_types || self->neg_accept_type){
+ /* a=accept-types:message/CPIM application/octet-stream */
+ tsdp_header_M_add_headers(TMEDIA_SESSION(self)->M.lo,
+ TSDP_HEADER_A_VA_ARGS("accept-types", self->accept_types ? self->accept_types : self->neg_accept_type),
+ tsk_null);
+ }
+ if(self->accept_w_types || self->neg_accept_w_type){
+ /* a=accept-wrapped-types:application/octet-stream */
+ tsdp_header_M_add_headers(TMEDIA_SESSION(self)->M.lo,
+ TSDP_HEADER_A_VA_ARGS("accept-wrapped-types", self->accept_w_types ? self->accept_w_types : self->neg_accept_w_type),
+ tsk_null);
+ }
+
+ /* direction */
+ switch(self->dir){
+ case tdav_msrp_dir_sendonly: att_dir = "sendonly"; break;
+ case tdav_msrp_dir_recvonly: att_dir = "recvonly"; break;
+ case tdav_msrp_dir_sendrecv:
+ case tdav_msrp_dir_none:
+ {
+ att_dir = "sendrecv";
+ break;
+ }
+ }
+ tsdp_header_M_add_headers(TMEDIA_SESSION(self)->M.lo,
+ TSDP_HEADER_A_VA_ARGS(att_dir, tsk_null),
+ tsk_null);
+
+ /*=== File Transfer ===*/
+ if(self->file.path){
+ /* Compute default 'file-selector' */
+ if(!self->file.selector && !TMEDIA_SESSION(self)->M.ro){
+ int index = tsk_strLastIndexOf(self->file.path, tsk_strlen(self->file.path), "\\");
+ if(index == -1){
+ index = tsk_strLastIndexOf(self->file.path, tsk_strlen(self->file.path), "/");
+ }
+ index++;
+ tsk_sprintf(&self->file.selector, "name:\"%s\" type:application/octet-stream", (self->file.path + index));
+ }
+ /* Compute default 'file-transfer-id' */
+ if(!self->file.transfer_id && !TMEDIA_SESSION(self)->M.ro){
+ tsk_istr_t rand_string;
+ tsk_strrandom(&rand_string);
+ self->file.transfer_id = tsk_strdup(rand_string);
+ }
+ }
+
+ if(self->file.selector){
+ /* a=file-selector:name:"test.pcap" type:application/octet-stream size:11376 hash:sha-1:8D:55:24:2B:F4:F9:9B:A2:54:A3:5B:91:00:15:9E:A3:D4:48:D7:DF */
+ tsdp_header_M_add_headers(TMEDIA_SESSION(self)->M.lo,
+ TSDP_HEADER_A_VA_ARGS("file-selector", self->file.selector),
+ tsk_null);
+ }
+ if(self->file.transfer_id){
+ /* a=file-transfer-id:vscxggbwkfnqduxwfnfozvsrtkjprepg */
+ tsdp_header_M_add_headers(TMEDIA_SESSION(self)->M.lo,
+ TSDP_HEADER_A_VA_ARGS("file-transfer-id", self->file.transfer_id),
+ tsk_null);
+ }
+ if(self->file.disposition){
+ /* a=file-disposition:attachment */
+ tsdp_header_M_add_headers(TMEDIA_SESSION(self)->M.lo,
+ TSDP_HEADER_A_VA_ARGS("file-disposition", self->file.disposition),
+ tsk_null);
+ }
+ if(self->file.date){
+ /* a=file-date:creation:2010-02-13T17:50:31.763Z */
+ tsdp_header_M_add_headers(TMEDIA_SESSION(self)->M.lo,
+ TSDP_HEADER_A_VA_ARGS("file-date", self->file.date),
+ tsk_null);
+ }
+ if(self->file.icon){
+ /* a=file-icon:cid:test@doubango.org */
+ tsdp_header_M_add_headers(TMEDIA_SESSION(self)->M.lo,
+ TSDP_HEADER_A_VA_ARGS("file-icon", self->file.icon),
+ tsk_null);
+ }
+ }
+
+ return 0;
+}
+
+static tsk_bool_t match_offer(const tdav_session_msrp_t* self, const tsdp_header_M_t* M)
+{
+ return tsk_true;
+}
+
+/* ============ Plugin interface ================= */
+
+int tdav_session_msrp_set(tmedia_session_t* self, const tmedia_param_t* param)
+{
+ int ret = 0;
+ tdav_session_msrp_t* msrp;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_DEBUG_INFO("tdav_session_msrp_set");
+
+ msrp = (tdav_session_msrp_t*)self;
+
+ if(param->value_type == tmedia_pvt_pchar){
+ if(tsk_striequals(param->key, "remote-ip")){
+ // only if no ip associated to the "m=" line
+ if(param->value && !msrp->remote_ip){
+ msrp->remote_ip = tsk_strdup(param->value);
+ }
+ }
+ else if(tsk_striequals(param->key, "local-ip")){
+ tsk_strupdate(&msrp->local_ip, param->value);
+ }
+ else if(tsk_striequals(param->key, "local-ipver")){
+ msrp->useIPv6 = tsk_striequals(param->value, "ipv6");
+ }
+ else if(tsk_striequals(param->key, "accept-types")){
+ tsk_strupdate(&msrp->accept_types, param->value);
+ }
+ else if(tsk_striequals(param->key, "accept-wrapped-types")){
+ tsk_strupdate(&msrp->accept_w_types, param->value);
+ }
+
+ /* Configuration */
+ else if(tsk_striequals(param->key, "Failure-Report")){
+ msrp->config->Failure_Report = tsk_striequals(param->value, "yes");
+ }
+ else if(tsk_striequals(param->key, "Success-Report")){
+ msrp->config->Success_Report = tsk_striequals(param->value, "yes");
+ }
+
+ /* File Transfer */
+ else if(tsk_striequals(param->key, "file-path") && !tsk_strnullORempty((const char*)param->value)){
+ tsk_strupdate(&msrp->file.path, param->value);
+ }
+ else if(tsk_striequals(param->key, "file-selector")){
+ tsk_strupdate(&msrp->file.selector, param->value);
+ }
+ else if(tsk_striequals(param->key, "file-disposition")){
+ tsk_strupdate(&msrp->file.disposition, param->value);
+ }
+ else if(tsk_striequals(param->key, "file-date")){
+ tsk_strupdate(&msrp->file.date, param->value);
+ }
+ else if(tsk_striequals(param->key, "file-icon")){
+ tsk_strupdate(&msrp->file.icon, param->value);
+ }
+ else if(tsk_striequals(param->key, "file-transfer-id")){
+ tsk_strupdate(&msrp->file.transfer_id, param->value);
+ }
+ }
+ else if(param->value_type == tmedia_pvt_pobject){
+ if(tsk_striequals(param->key, "natt-ctx")){
+ TSK_OBJECT_SAFE_FREE(msrp->natt_ctx);
+ msrp->natt_ctx = tsk_object_ref(param->value);
+ }
+ }
+ else if(param->value_type == tmedia_pvt_int64 || param->value_type == tmedia_pvt_int32){
+ if(tsk_striequals(param->key, "chunck-duration")){
+ msrp->chunck_duration = TSK_TO_UINT32((uint8_t*)param->value);
+ if(msrp->sender){
+ msrp->sender->chunck_duration = msrp->chunck_duration;
+ }
+ }
+ }
+
+ return ret;
+}
+
+int tdav_session_msrp_get(tmedia_session_t* self, tmedia_param_t* param)
+{
+ return -1;
+}
+
+int tdav_session_msrp_prepare(tmedia_session_t* self)
+{
+ tdav_session_msrp_t* msrp;
+ tnet_socket_type_t socket_type = tnet_socket_type_tcp_ipv4;
+ int ret = 0;
+
+#if 1
+ tnet_port_t local_port = TNET_SOCKET_PORT_ANY;
+#else
+ tnet_port_t local_port = 2000;
+#endif
+
+ TSK_DEBUG_INFO("tdav_session_msrp_prepare");
+
+ msrp = (tdav_session_msrp_t*)self;
+
+ /* set local port */
+ if(!msrp->transport){
+ if(msrp->useIPv6){
+ TNET_SOCKET_TYPE_SET_IPV6Only(socket_type);
+ }
+
+ if(!msrp->local_ip){
+ tnet_host_t local;
+ tnet_gethostname(&local);
+ msrp->transport = tnet_transport_create(local, local_port, socket_type, "MSRP/MSRPS transport");
+ }
+ else{
+ msrp->transport = tnet_transport_create(msrp->local_ip, local_port, socket_type, "MSRP/MSRPS transport");
+ }
+ /* Set NAT context */
+ if(msrp->natt_ctx){
+ tnet_transport_set_natt_ctx(msrp->transport, msrp->natt_ctx);
+ }
+ }
+
+ /* sender will be prepared in tdav_session_msrp_start() */
+ /* receiver will be prepared in tdav_session_msrp_start() */
+
+ return ret;
+}
+
+int tdav_session_msrp_start(tmedia_session_t* self)
+{
+ tdav_session_msrp_t* msrp;
+ int ret;
+
+ TSK_DEBUG_INFO("tdav_session_msrp_start");
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ msrp = (tdav_session_msrp_t*)self;
+
+ /* start the transport */
+ if((ret = tnet_transport_start(msrp->transport))){
+ goto bail;
+ }
+
+ switch(msrp->setup){
+ case msrp_setup_active:
+ case msrp_setup_actpass:
+ {
+ //
+ // ACTIVE
+ //
+ TSK_DEBUG_INFO("connectto(%s:%d)", msrp->remote_ip, msrp->remote_port);
+ if((msrp->connectedFD = tnet_transport_connectto_2(msrp->transport, msrp->remote_ip, msrp->remote_port)) == TNET_INVALID_FD){
+ TSK_DEBUG_ERROR("Failed to connect to the remote party");
+ ret = -2;
+ goto bail;
+ }
+ else{
+ //TSK_DEBUG_INFO("Msrp connected FD=%d", msrp->connectedFD);
+ //if((ret = tnet_sockfd_waitUntilWritable(msrp->connectedFD, TDAV_MSRP_CONNECT_TIMEOUT)) && msrp->offerer){
+ // TSK_DEBUG_ERROR("%d milliseconds elapsed and the socket is still not connected to (%s:%d).", TDAV_MSRP_CONNECT_TIMEOUT, msrp->remote_ip, msrp->remote_port);
+ // goto bail;
+ //}
+ /* draft-denis-simple-msrp-comedia-02 - 4.2.3. Setting up the connection
+ Once the TCP session is established, and if the answerer was the
+ active connection endpoint, it MUST send an MSRP request. In
+ particular, if it has no pending data to send, it MUST send an empty
+ MSRP SEND request. That is necessary for the other endpoint to
+ authenticate this TCP session.
+
+ ...RFC 4975 - 7.1
+ */
+ msrp->send_bodiless = tsk_true;
+ }
+ break;
+ }
+ default:
+ {
+ //
+ // PASSIVE
+ //
+ break;
+ }
+ }
+
+ // create and start the receiver
+ if(!msrp->receiver){
+ if((msrp->receiver = tmsrp_receiver_create(msrp->config, msrp->connectedFD))){
+ tnet_transport_set_callback(msrp->transport, TNET_TRANSPORT_CB_F(tdav_transport_layer_stream_cb), msrp);
+ if((ret = tmsrp_receiver_start(msrp->receiver, msrp, tdav_msrp_event_proxy_cb))){
+ TSK_DEBUG_ERROR("Failed to start the MSRP receiver");
+ goto bail;
+ }
+ }
+ }
+
+ // create and start the sender
+ if(!msrp->sender){
+ if((msrp->sender = tmsrp_sender_create(msrp->config, msrp->connectedFD))){
+ msrp->sender->chunck_duration = msrp->chunck_duration;
+ if((ret = tmsrp_sender_start(msrp->sender))){
+ TSK_DEBUG_ERROR("Failed to start the MSRP sender");
+ goto bail;
+ }
+ }
+ }
+
+bail:
+ return ret;
+}
+
+int tdav_session_msrp_pause(tmedia_session_t* self)
+{
+ TSK_DEBUG_ERROR("Not Implemented");
+ return -1;
+}
+
+int tdav_session_msrp_stop(tmedia_session_t* self)
+{
+ tdav_session_msrp_t* msrp;
+ int ret;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ msrp = (tdav_session_msrp_t*)self;
+
+ if(msrp->sender){
+ if((ret = tmsrp_sender_stop(msrp->sender))){
+ TSK_DEBUG_ERROR("Failed to stop the MSRP sender");
+ }
+ }
+ if(msrp->receiver){
+ if((ret = tmsrp_receiver_stop(msrp->receiver))){
+ TSK_DEBUG_ERROR("Failed to stop the MSRP receiver");
+ }
+ }
+
+ if(msrp->transport){
+ if((ret = tnet_transport_shutdown(msrp->transport))){
+ TSK_DEBUG_ERROR("Failed to stop the MSRP transport");
+ }
+ }
+
+ return 0;
+}
+
+const tsdp_header_M_t* tdav_session_msrp_get_lo(tmedia_session_t* self)
+{
+ tdav_session_msrp_t* msrp;
+ tsk_bool_t changed = tsk_false;
+
+ const char* proto = "TCP/MSRP";
+ const char* sheme = "msrp";
+
+ TSK_DEBUG_INFO("tdav_session_msrp_get_lo");
+
+ if(!self || !self->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+
+ msrp = (tdav_session_msrp_t*)self;
+
+ if(!msrp->transport){
+ TSK_DEBUG_ERROR("Not prepared");
+ return tsk_null;
+ }
+
+ if(/*TNET_SOCKET_TYPE_IS_TLS(type)*/ tsk_false){
+ proto = "TCP/TLS/MSRP";
+ sheme = "msrps";
+ }
+
+ if(self->ro_changed && self->M.lo){
+ /* Codecs */
+ tsdp_header_A_removeAll_by_field(self->M.lo->Attributes, "fmtp");
+ tsdp_header_A_removeAll_by_field(self->M.lo->Attributes, "rtpmap");
+ tsk_list_clear_items(self->M.lo->FMTs);
+
+ /* QoS */
+ tsdp_header_A_removeAll_by_field(self->M.lo->Attributes, "curr");
+ tsdp_header_A_removeAll_by_field(self->M.lo->Attributes, "des");
+ tsdp_header_A_removeAll_by_field(self->M.lo->Attributes, "conf");
+ }
+
+ changed = (self->ro_changed || !self->M.lo);
+
+ if(!self->M.lo){
+ tsk_istr_t id;
+ char* path = tsk_null;
+ tnet_ip_t ip = "0.0.0.0";
+ tnet_port_t port = 0;
+
+ tsk_strrandom(&id);
+ tnet_transport_get_public_ip_n_port(msrp->transport, msrp->transport->master->fd, &ip, &port);
+ tsk_sprintf(&path, "%s://%s:%u/%s;tcp", sheme, ip, port, id); //tcp is ok even if tls is used.
+
+ if((self->M.lo = tsdp_header_M_create(self->plugin->media, port, proto))){
+ tmsrp_uri_t* uri;
+
+ tsdp_header_M_add_headers(self->M.lo,
+ TSDP_FMT_VA_ARGS("*"),
+ TSDP_HEADER_C_VA_ARGS("IN", msrp->useIPv6 ? "IP6" : "IP4", ip),
+ TSDP_HEADER_A_VA_ARGS("path", path),
+ tsk_null);
+
+ if((uri = tmsrp_uri_parse(path, tsk_strlen(path)))){
+ TSK_OBJECT_SAFE_FREE(msrp->config->From_Path);
+ msrp->config->From_Path = tmsrp_header_From_Path_create(uri);
+ TSK_OBJECT_SAFE_FREE(uri);
+ }
+ }
+ TSK_FREE(path);
+
+ if(self->M.ro){ /* We are probably about to send 2xx INVITE(sdp) */
+ /* [OMA-TS-SIMPLE_IM-V1_0-20100322-C] - 5.8.1 Negotiate direction of the MSRP connection setup
+ Offer Answer
+ ________________
+ active passive / holdconn
+ passive active / holdconn
+ actpass active / passive / holdconn
+ holdconn holdconn
+ */
+ const tsdp_header_A_t* A;
+ if((A = tsdp_header_M_findA(self->M.ro, "setup"))){
+ tdav_msrp_setup_t setup = setup_from_string(A->value);
+ switch(setup){
+ case msrp_setup_passive:
+ case msrp_setup_actpass: // Because of NAT issues "actpass -(always)-> active"
+ msrp->setup = msrp_setup_active;
+ break;
+ case msrp_setup_active:
+ msrp->setup = msrp_setup_passive;
+ break;
+ default: break;
+ }
+ tsdp_header_M_add_headers(self->M.lo,
+ TSDP_HEADER_A_VA_ARGS("connection", "new"),
+ tsk_null
+ );
+ }
+ msrp->offerer = tsk_false;
+ }
+ else{ /* We are probably about to send initial INVITE */
+ tsdp_header_M_add_headers(self->M.lo,
+ TSDP_HEADER_A_VA_ARGS("connection", "new"),
+ tsk_null
+ );
+ msrp->offerer = tsk_true;
+ }
+
+ /* direction */
+ if(msrp->dir == tdav_msrp_dir_none){
+ msrp->dir = msrp->file.path ? tdav_msrp_dir_sendonly : tdav_msrp_dir_sendrecv;
+ }
+
+ /* Other SDP fields */
+ populate_lo(msrp, tsk_true);
+ } // !lo
+
+
+ return self->M.lo;
+}
+
+int tdav_session_msrp_set_ro(tmedia_session_t* self, const tsdp_header_M_t* m)
+{
+ tdav_session_msrp_t* msrp;
+ const tsdp_header_A_t* A;
+ tsk_bool_t answer;
+
+ TSK_DEBUG_INFO("tdav_session_msrp_set_ro");
+
+ if(!self || !m){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ msrp = (tdav_session_msrp_t*)self;
+
+ // answer or initial offer?
+ answer = (self->M.lo != tsk_null);
+
+ /* update remote offer */
+ TSK_OBJECT_SAFE_FREE(self->M.ro);
+ self->M.ro = tsk_object_ref((void*)m);
+
+ if(self->M.lo){
+ if((match_offer(msrp, m))){
+
+ }
+ else{
+ TSK_DEBUG_ERROR("MSRP offer doesn't match");
+ return -1;
+ }
+ /* QoS */
+ if(self->qos){
+ tmedia_qos_tline_t* ro_tline;
+ if(self->M.ro && (ro_tline = tmedia_qos_tline_from_sdp(self->M.ro))){
+ tmedia_qos_tline_set_ro(self->qos, ro_tline);
+ TSK_OBJECT_SAFE_FREE(ro_tline);
+ }
+ }
+ }
+
+ /* To-Path */
+ if((A = tsdp_header_M_findA(m, "path"))){
+ tmsrp_uri_t* uri;
+ if((uri = tmsrp_uri_parse(A->value, tsk_strlen(A->value)))){
+ TSK_OBJECT_SAFE_FREE(msrp->config->To_Path);
+ msrp->config->To_Path = tmsrp_header_To_Path_create(uri);
+ TSK_OBJECT_SAFE_FREE(uri);
+ }
+ }
+
+ // OMA-TS-SIMPLE_IM-V1_0-20080903-C - 5.8.1 Negotiate direction of the MSRP connection setup
+ if((A = tsdp_header_M_findA(m, "setup"))){
+ tdav_msrp_setup_t setup = setup_from_string(A->value);
+ switch(setup){
+ case msrp_setup_actpass:
+ case msrp_setup_passive:
+ msrp->setup = msrp_setup_active;
+ break;
+ case msrp_setup_active:
+ msrp->setup = msrp_setup_passive;
+ break;
+ default: break;
+ }
+ }
+
+ /* direction attribute */
+ if(msrp->dir == tdav_msrp_dir_none){
+ if((A = tsdp_header_M_findA(m, "sendonly"))){
+ msrp->dir = tdav_msrp_dir_recvonly;
+ }
+ else if((A = tsdp_header_M_findA(m, "sendrecv"))){
+ msrp->dir = tdav_msrp_dir_sendrecv;
+ }
+ else if((A = tsdp_header_M_findA(m, "recvonly"))){
+ msrp->dir = tdav_msrp_dir_sendonly;
+ }
+ }
+
+ /* Neg parameters */
+ init_neg_types(msrp, m);
+
+
+ /* [OMA-TS-SIMPLE_IM-V1_0-20100322-C] - 5.8.2 Support of Application Level Gateway */
+
+ /* get connection associated to this media line
+ * If the connnection is global, then the manager will call tdav_session_audio_set() */
+ if(m->C && m->C->addr && !msrp->remote_ip){
+ tsk_strupdate(&msrp->remote_ip, m->C->addr);
+ msrp->useIPv6 = tsk_striequals(m->C->addrtype, "IP6");
+ }
+ /* set remote port */
+ msrp->remote_port = m->port;
+
+ return 0;
+}
+
+/* ============ Public functions ================= */
+int tdav_session_msrp_send_file(tmedia_session_msrp_t* self, const char* path, va_list *app)
+{
+ tdav_session_msrp_t* msrp;
+ int ret;
+
+ if(!path || !(msrp = (tdav_session_msrp_t*)self) || !msrp->sender){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ //if(1 || !msrp->file.selector){
+ // /* File transfer without selector line
+ // - a=file-selector:name:"test.pcap" type:application/octet-stream size:20312
+ // */
+ // FILE* file = fopen(path, "rb");
+ // const char* fname = path + tsk_strlen(path);
+ // while(fname && (fname> path) && *fname != '\\' && *fname != '/'){
+ // fname--;
+ // }
+
+ // if(file){
+ // tsk_size_t size = 0;
+ // fseek(file, 0L, SEEK_END);
+ // size = ftell(file);
+ // fclose(file);
+
+ // tsk_sprintf(&msrp->file.selector, "name:\"%s\" type:application/octet-stream size:%u",
+ // fname, size);
+ // tsdp_header_M_add_headers(TMEDIA_SESSION(self)->M.lo,
+ // TSDP_HEADER_A_VA_ARGS("file-selector", msrp->file.selector),
+ // tsk_null);
+ // }
+ //}
+
+ ret = tsmrp_sender_send_file(msrp->sender, path);
+
+ return ret;
+}
+
+int tdav_session_msrp_send_message(tmedia_session_msrp_t* self, const void* data, tsk_size_t size, const tmedia_params_L_t *params)
+{
+ const tdav_session_msrp_t* msrp;
+ const tmedia_param_t* param;
+ int ret;
+ const tsk_list_item_t* item;
+ const char* content_type = tsk_null;
+ const char* w_content_type = tsk_null;
+
+ if(!data || !size || !(msrp = (tdav_session_msrp_t*)self) || !msrp->sender){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_list_foreach(item, params){
+ if((param = TMEDIA_PARAM(item->data))){
+ if((param->media_type & tmedia_msrp) == param->media_type
+ && param->plugin_type == tmedia_ppt_session
+ && param->value_type == tmedia_pvt_pchar){
+
+ if(tsk_striequals(param->key, "content-type")){
+ content_type = (const char*)param->value;
+ }
+ else if(tsk_striequals(param->key, "w-content-type")){
+ w_content_type = (const char*)param->value;
+ }
+ }
+ }
+ }
+
+ if(content_type || w_content_type){ // user-defined content-types
+ ret = tsmrp_sender_send_data(msrp->sender, data, size, content_type, w_content_type);
+ }
+ else{ // neg. content-types
+ ret = tsmrp_sender_send_data(msrp->sender, data, size,
+ msrp->neg_accept_type, msrp->neg_accept_w_type
+ );
+ }
+
+ return ret;
+}
+
+
+
+
+
+//=================================================================================================
+// Session MSRp Plugin object definition
+//
+/* constructor */
+static tsk_object_t* tdav_session_msrp_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_session_msrp_t *session = self;
+ if(session){
+ /* init base: called by tmedia_session_create() */
+ /* init self */
+ TMEDIA_SESSION_MSRP(session)->send_file = tdav_session_msrp_send_file;
+ TMEDIA_SESSION_MSRP(session)->send_message = tdav_session_msrp_send_message;
+
+ session->config = tmsrp_config_create();
+ session->setup = msrp_setup_actpass; /* draft-denis-simple-msrp-comedia-02 - 4.1.1. Sending the offer */
+ session->dir = tdav_msrp_dir_none;
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_session_msrp_dtor(tsk_object_t * self)
+{
+ tdav_session_msrp_t *session = self;
+ if(session){
+
+ /* deinit self (rtp manager should be destroyed after the producer) */
+ TSK_OBJECT_SAFE_FREE(session->config);
+
+ TSK_OBJECT_SAFE_FREE(session->receiver);
+ TSK_OBJECT_SAFE_FREE(session->sender);
+ TSK_OBJECT_SAFE_FREE(session->transport);
+
+ TSK_FREE(session->remote_ip);
+ TSK_FREE(session->local_ip);
+
+ TSK_FREE(session->neg_accept_type);
+ TSK_FREE(session->neg_accept_w_type);
+ TSK_FREE(session->accept_types);
+ TSK_FREE(session->accept_w_types);
+
+ /* File */
+ TSK_FREE(session->file.path);
+ TSK_FREE(session->file.selector);
+ TSK_FREE(session->file.disposition);
+ TSK_FREE(session->file.date);
+ TSK_FREE(session->file.icon);
+ TSK_FREE(session->file.transfer_id);
+
+ /* NAT Traversal context */
+ TSK_OBJECT_SAFE_FREE(session->natt_ctx);
+
+ /* deinit base */
+ tmedia_session_deinit(self);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_session_msrp_def_s =
+{
+ sizeof(tdav_session_msrp_t),
+ tdav_session_msrp_ctor,
+ tdav_session_msrp_dtor,
+ tmedia_session_cmp,
+};
+/* plugin definition*/
+static const tmedia_session_plugin_def_t tdav_session_msrp_plugin_def_s =
+{
+ &tdav_session_msrp_def_s,
+
+ tmedia_msrp,
+ "message",
+
+ tdav_session_msrp_set,
+ tdav_session_msrp_get,
+ tdav_session_msrp_prepare,
+ tdav_session_msrp_start,
+ tdav_session_msrp_pause,
+ tdav_session_msrp_stop,
+
+ /* Audio part */
+ { tsk_null },
+
+ tdav_session_msrp_get_lo,
+ tdav_session_msrp_set_ro
+};
+const tmedia_session_plugin_def_t *tdav_session_msrp_plugin_def_t = &tdav_session_msrp_plugin_def_s;
+
+#endif /* !defined(HAVE_TINYMSRP) || HAVE_TINYMSRP */ \ No newline at end of file
diff --git a/tinyDAV/src/t140/tdav_consumer_t140.c b/tinyDAV/src/t140/tdav_consumer_t140.c
new file mode 100644
index 0000000..36779f4
--- /dev/null
+++ b/tinyDAV/src/t140/tdav_consumer_t140.c
@@ -0,0 +1,137 @@
+/*
+* Copyright (C) 2012 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_consumer_t140.c
+ * @brief Consumer for T140 protocol (RFC 4103)
+ */
+#include "tinydav/t140/tdav_consumer_t140.h"
+
+#include "tsk_debug.h"
+
+static int tdav_consumer_t140_set(tmedia_consumer_t* self, const tmedia_param_t* param)
+{
+ return 0;
+}
+
+static int tdav_consumer_t140_prepare(tmedia_consumer_t* self, const tmedia_codec_t* param)
+{
+ return 0;
+}
+
+static int tdav_consumer_t140_start(tmedia_consumer_t* self)
+{
+ return 0;
+}
+
+// input data = [type:4bytes][data]
+static int tdav_consumer_t140_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ if(!self || (size < 4)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(TDAV_CONSUMER_T140(self)->cb_ondata.func){
+ return TDAV_CONSUMER_T140(self)->cb_ondata.func(TDAV_CONSUMER_T140(self)->cb_ondata.context,
+ (enum tmedia_t140_data_type_e)*((int32_t*)buffer),
+ &((uint8_t*)buffer)[4],
+ (unsigned int)(size - 4));
+ }
+
+ return 0;
+}
+
+static int tdav_consumer_t140_pause(tmedia_consumer_t* self)
+{
+ return 0;
+}
+
+static int tdav_consumer_t140_stop(tmedia_consumer_t* self)
+{
+ return 0;
+}
+
+int tdav_consumer_t140_set_ondata_cbfn(tdav_consumer_t140_t* self, const void* context, tmedia_session_t140_ondata_cb_f func)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ self->cb_ondata.context = context;
+ self->cb_ondata.func = func;
+ return 0;
+}
+
+//
+// T.140 consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_t140_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_t140_t *consumer = self;
+ if(consumer){
+ /* init base */
+
+ /* init self */
+
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_t140_dtor(tsk_object_t * self)
+{
+ tdav_consumer_t140_t *consumer = self;
+ if(consumer){
+ /* stop */
+ if(consumer->started){
+ tdav_consumer_t140_stop(self);
+ }
+
+ /* deinit base */
+
+ /* deinit self */
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_t140_def_s =
+{
+ sizeof(tdav_consumer_t140_t),
+ tdav_consumer_t140_ctor,
+ tdav_consumer_t140_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_t140_plugin_def_s =
+{
+ &tdav_consumer_t140_def_s,
+
+ tmedia_t140,
+ "T.140 consumer",
+
+ tdav_consumer_t140_set,
+ tdav_consumer_t140_prepare,
+ tdav_consumer_t140_start,
+ tdav_consumer_t140_consume,
+ tdav_consumer_t140_pause,
+ tdav_consumer_t140_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_t140_plugin_def_t = &tdav_consumer_t140_plugin_def_s; \ No newline at end of file
diff --git a/tinyDAV/src/t140/tdav_producer_t140.c b/tinyDAV/src/t140/tdav_producer_t140.c
new file mode 100644
index 0000000..8d147c9
--- /dev/null
+++ b/tinyDAV/src/t140/tdav_producer_t140.c
@@ -0,0 +1,139 @@
+/*
+* Copyright (C) 2012 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_producer_t140.c
+ * @brief Producer for T140 protocol (RFC 4103)
+ */
+#include "tinydav/t140/tdav_producer_t140.h"
+
+#include "tsk_debug.h"
+
+static int tdav_producer_t140_set(tmedia_producer_t* self, const tmedia_param_t* param)
+{
+ return 0;
+}
+
+static int tdav_producer_t140_prepare(tmedia_producer_t* self, const tmedia_codec_t* param)
+{
+ return 0;
+}
+
+static int tdav_producer_t140_start(tmedia_producer_t* self)
+{
+ return 0;
+}
+
+static int tdav_producer_t140_pause(tmedia_producer_t* self)
+{
+ return 0;
+}
+
+static int tdav_producer_t140_stop(tmedia_producer_t* self)
+{
+ return 0;
+}
+
+int tdav_producer_send_data(tdav_producer_t140_t* self, enum tmedia_t140_data_type_e data_type, const void* data_ptr, unsigned data_size)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(TMEDIA_PRODUCER(self)->enc_cb.callback){
+ if(data_type != tmedia_t140_data_type_utf8){
+ // build data
+ tsk_size_t cmd_size = 0, i;
+ int32_t cmd_val = (int32_t)data_type;
+ if(data_ptr || data_size){
+ TSK_DEBUG_WARN("Data not expected for commands");
+ }
+ // TODO: use ASM POPCNT
+ for(i = 0; i < 32; i+= 8){
+ if(((cmd_val >> i) & 0xFF)){
+ ++cmd_size;
+ }
+ }
+ if(cmd_size){
+ TMEDIA_PRODUCER(self)->enc_cb.callback(TMEDIA_PRODUCER(self)->enc_cb.callback_data, &cmd_val, cmd_size);
+ }
+ }
+ else{
+ TMEDIA_PRODUCER(self)->enc_cb.callback(TMEDIA_PRODUCER(self)->enc_cb.callback_data, data_ptr, data_size);
+ }
+ }
+ return 0;
+}
+
+//
+// T.140 producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_t140_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_t140_t *producer = self;
+ if(producer){
+ /* init base */
+
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_t140_dtor(tsk_object_t * self)
+{
+ tdav_producer_t140_t *producer = self;
+ if(producer){
+
+ /* stop */
+ if(producer->started){
+ tdav_producer_t140_stop(self);
+ }
+
+ /* deinit base */
+
+ /* deinit self */
+
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_t140_def_s =
+{
+ sizeof(tdav_producer_t140_t),
+ tdav_producer_t140_ctor,
+ tdav_producer_t140_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_t140_plugin_def_s =
+{
+ &tdav_producer_t140_def_s,
+
+ tmedia_t140,
+ "T.140 producer",
+
+ tdav_producer_t140_set,
+ tdav_producer_t140_prepare,
+ tdav_producer_t140_start,
+ tdav_producer_t140_pause,
+ tdav_producer_t140_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_t140_plugin_def_t = &tdav_producer_t140_plugin_def_s; \ No newline at end of file
diff --git a/tinyDAV/src/t140/tdav_session_t140.c b/tinyDAV/src/t140/tdav_session_t140.c
new file mode 100644
index 0000000..cd4b17c
--- /dev/null
+++ b/tinyDAV/src/t140/tdav_session_t140.c
@@ -0,0 +1,1165 @@
+/*
+* Copyright (C) 2012 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_session_t140.c
+ * @brief Session for T140 protocol (RFC 4103/5194/2198)
+ */
+#include "tinydav/t140/tdav_session_t140.h"
+#include "tinydav/t140/tdav_consumer_t140.h"
+#include "tinydav/t140/tdav_producer_t140.h"
+#include "tinydav/codecs/fec/tdav_codec_red.h"
+
+#include "tinymedia/tmedia_consumer.h"
+#include "tinymedia/tmedia_producer.h"
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tinyrtp/trtp_manager.h"
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tsk_debug.h"
+
+#include <limits.h>
+
+#define T140_BLOCKS_MAX_COUNT 20 /* number of blocks to buffer for packet loss detection and reordering */
+#define T140_BLOCK_MAX_TRANSMIT_COUNT 4 /* maximum number of times to retransmit a bloc when RED is ON */
+
+#define T140_ZERO_WIDTH_NO_BREAK_SPACE 0xEFBBBF /* send at the begining of the T.140 session */
+#define T140_BACKSPACE 0x08
+#define T140_ESC 0x1B
+#define T140_CR 0x0D
+#define T140_LF 0x0A
+#define T140_CR_LF 0x0D0A
+#define T140_BELL 0x07
+#define T140_SOS 0x98
+#define T140_STRING_TERM 0x9C
+#define T140_GRAPHIC_START 0x9B
+#define T140_GRAPHIC_END 0x6D
+#define T140_LOSS_CHAR_CHAR 0xFFFD
+#define T140_LOSS_UTF8 0xEFBFBD
+
+#define T140_WAIT_FOR_MISSING_PKT_RED_OFF 500 /*Time to wait for missing pkts when RED is OFF (in ms) */
+#define T140_WAIT_FOR_MISSING_PKT_RED_ON 3000 /*Time to wait for missing pkts when RED is ON (in ms) */
+#define T140_WAIT_FOR_BUFFERING 300 /* Time to wait for buffering (T.140 blocks forming) */
+#define T140_WAIT_FOR_IDLE 10000 /* Time to wait before entering in IDLE state */
+
+static const char zero_width_no_break_space[3] =
+{
+ (T140_ZERO_WIDTH_NO_BREAK_SPACE >> 16) & 0xFF,
+ (T140_ZERO_WIDTH_NO_BREAK_SPACE >> 8) & 0xFF,
+ T140_ZERO_WIDTH_NO_BREAK_SPACE & 0xFF
+};
+
+typedef struct t140_block_s
+{
+ TSK_DECLARE_OBJECT;
+
+ uint8_t pt;
+ uint16_t seq_num;
+ uint32_t timestamp;
+ uint32_t transmit_count; // number of times this block have been transmitted
+ // whether the block have been forwarded to the end user for UI display or sent through the RTP channel
+ tsk_bool_t delivered;
+ // whether the block contains data skipped because too late
+ tsk_bool_t too_late;
+ struct{
+ uint8_t* ptr;
+ tsk_size_t size;
+ }data;
+}
+t140_block_t;
+#define T140_BLOCK(self) ((t140_block_t*)(self))
+extern const tsk_object_def_t *t140_block_def_t;
+
+static int _tdav_session_t140_producer_enc_cb(const void* callback_data, const void* buffer, tsk_size_t size);
+static int _tdav_session_t140_save_outgoing_data(tdav_session_t140_t* self, const void* data_ptr, tsk_size_t data_size);
+static tsk_size_t _tdav_session_t140_encap_red(tdav_session_t140_t* self, void** out_data_ptr, tsk_size_t* out_data_size, tsk_bool_t *last_data);
+static int _tdav_session_t140_rtp_cb(const void* callback_data, const struct trtp_rtp_packet_s* packet);
+static int _tdav_session_t140_recv_red(tdav_session_t140_t* self, const struct trtp_rtp_packet_s* packet);
+static int _tdav_session_t140_recv_raw(tdav_session_t140_t* self, const void* data_ptr, tsk_size_t data_size);
+static int _tdav_session_t140_decode_and_deliver_blocks(tdav_session_t140_t* self, tsk_bool_t called_from_timer);
+static int _tdav_session_t140_consume_data(tdav_session_t140_t* self, enum tmedia_t140_data_type_e data_type, const void *data_ptr, tsk_size_t data_size);
+static int _tdav_session_t140_timer_cb(const void* arg, tsk_timer_id_t timer_id);
+static int _tdav_session_t140_pred_block_with_transmit_count_more_than_or_equal(const tsk_list_item_t *item, const void *transmit_count);
+
+static tsk_bool_t _tdav_session_t140_blocks_has_gap(tdav_session_t140_blocks_L_t* blocks);
+static tsk_bool_t _tdav_session_t140_blocks_has_seqnum(tdav_session_t140_blocks_L_t* blocks, uint16_t seq_num);
+static int _tdav_session_t140_blocks_add(tdav_session_t140_blocks_L_t* blocks, int64_t* blocks_count, t140_block_t** block, int64_t blocks_count_max);
+static t140_block_t* _tdav_session_t140_block_create(uint8_t pt, uint16_t seq_num, uint32_t timestamp, tsk_bool_t delivered, const void* data_ptr, tsk_size_t data_size);
+
+#define T140_RESIZE_BUFFER(_in_buff_ptr, _in_buff_size, _new_size) \
+if((_in_buff_size) < (_new_size)){ \
+ if(!((_in_buff_ptr) = tsk_realloc((_in_buff_ptr), (_new_size)))){ \
+ TSK_DEBUG_ERROR("Failed to allocate new buffer"); \
+ (_in_buff_size) = 0; \
+ } \
+ (_in_buff_size) = _new_size; \
+}
+
+static int tdav_session_t140_send_data(tmedia_session_t* self, enum tmedia_t140_data_type_e data_type, const void* data_ptr, unsigned data_size);
+#define tdav_session_t140_send_data_2(self, data_type) tdav_session_t140_send_data((self), (data_type), tsk_null, 0)
+
+static int tdav_session_t140_set(tmedia_session_t* self, const tmedia_param_t* param)
+{
+ int ret = 0;
+ tdav_session_t140_t* t140;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(tdav_session_av_set(TDAV_SESSION_AV(self), param) == tsk_true){
+ return 0;
+ }
+
+ t140 = (tdav_session_t140_t*)self;
+
+ if(param->plugin_type == tmedia_ppt_consumer){
+ TSK_DEBUG_ERROR("Not expected consumer(%s)", param->key);
+ }
+ else if(param->plugin_type == tmedia_ppt_producer){
+ TSK_DEBUG_ERROR("Not expected producer_set(%s)", param->key);
+ }
+ else{
+ TSK_DEBUG_ERROR("Not expected producer_set(%s)", param->key);
+ }
+
+ return ret;
+}
+
+static int tdav_session_t140_get(tmedia_session_t* self, tmedia_param_t* param)
+{
+ if(!self || !param){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(tdav_session_av_get(TDAV_SESSION_AV(self), param) == tsk_true){
+ return 0;
+ }
+
+ TSK_DEBUG_ERROR("(%s) not expected param key for plugin_type=%d", param->key, param->plugin_type);
+ return -2;
+}
+
+static int tdav_session_t140_prepare(tmedia_session_t* self)
+{
+ tdav_session_av_t* base = (tdav_session_av_t*)(self);
+ int ret;
+
+ if((ret = tdav_session_av_prepare(base))){
+ TSK_DEBUG_ERROR("tdav_session_av_prepare(t140) failed");
+ return ret;
+ }
+
+ if(base->rtp_manager){
+ ret = trtp_manager_set_rtp_callback(base->rtp_manager, _tdav_session_t140_rtp_cb, base);
+ }
+
+ return ret;
+}
+
+static int tdav_session_t140_start(tmedia_session_t* self)
+{
+ int ret;
+ tdav_session_t140_t* t140;
+ const tmedia_codec_t* best_codec;
+ tdav_session_av_t* base;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ t140 = (tdav_session_t140_t*)self;
+ base = (tdav_session_av_t*)self;
+
+ if(!(best_codec = tdav_session_av_get_best_neg_codec(base))){
+ TSK_DEBUG_ERROR("No codec matched");
+ return -2;
+ }
+
+ TSK_OBJECT_SAFE_FREE(t140->encoder.codec);
+ t140->encoder.codec = tsk_object_ref((tsk_object_t*)best_codec);
+
+ if((ret = tdav_session_av_start(base, best_codec))){
+ TSK_DEBUG_ERROR("tdav_session_av_start(t140) failed");
+ return ret;
+ }
+
+ if((ret = tsk_timer_manager_start(t140->h_timer)) != 0){
+ TSK_DEBUG_ERROR("Failed to start the timer");
+ return ret;
+ }
+
+ if(base->rtp_manager){
+ int pt;
+ t140->encoder.payload_type = t140->encoder.codec->neg_format ? atoi(t140->encoder.codec->neg_format) : atoi(t140->encoder.codec->format);
+ pt = base->red.codec ?
+ ( base->red.codec->neg_format ? atoi(base->red.codec->neg_format) : atoi(base->red.codec->format) ) :
+ ( t140->encoder.payload_type );
+ trtp_manager_set_payload_type(base->rtp_manager, pt);
+ }
+
+ t140->decoder.last_seq_num = -1;
+
+ t140->started = (ret == 0);
+
+ if(t140->started){
+ t140->encoder.timer_idle.timeout = T140_WAIT_FOR_IDLE;
+ t140->encoder.timer_buffering.timeout = T140_WAIT_FOR_BUFFERING;
+ t140->encoder.timer_idle.id = tsk_timer_manager_schedule(t140->h_timer, t140->encoder.timer_idle.timeout, _tdav_session_t140_timer_cb, t140);
+ TSK_DEBUG_INFO("T.140 - Start IDLE timer");
+ }
+
+ return ret;
+}
+
+static int tdav_session_t140_pause(tmedia_session_t* self)
+{
+ return 0;
+}
+
+static int tdav_session_t140_stop(tmedia_session_t* self)
+{
+ int ret;
+ ret = tsk_timer_manager_stop(TDAV_SESSION_T140(self)->h_timer);
+ ret = tdav_session_av_stop(TDAV_SESSION_AV(self));
+ TSK_OBJECT_SAFE_FREE(TDAV_SESSION_T140(self)->encoder.codec);
+ TSK_OBJECT_SAFE_FREE(TDAV_SESSION_T140(self)->decoder.codec);
+ TDAV_SESSION_T140(self)->started = tsk_false;
+ return ret;
+}
+
+static const tsdp_header_M_t* tdav_session_t140_get_lo(tmedia_session_t* self)
+{
+ tsk_bool_t updated = tsk_false;
+ const tsdp_header_M_t* ret;
+ tdav_session_av_t* base = TDAV_SESSION_AV(self);
+
+
+ if(!(ret = tdav_session_av_get_lo(base, &updated))){
+ TSK_DEBUG_ERROR("tdav_session_av_get_lo(t140) failed");
+ return tsk_null;
+ }
+
+ if(updated){
+ tsk_safeobj_lock(base);
+ TSK_OBJECT_SAFE_FREE(TDAV_SESSION_T140(self)->encoder.codec);
+ tsk_safeobj_unlock(base);
+ }
+
+ return ret;
+}
+
+static int tdav_session_t140_set_ro(tmedia_session_t* self, const tsdp_header_M_t* ro)
+{
+ int ret;
+ tsk_bool_t updated = tsk_false;
+ tdav_session_av_t* base = TDAV_SESSION_AV(self);
+
+ if((ret = tdav_session_av_set_ro(base, ro, &updated))){
+ TSK_DEBUG_ERROR("tdav_session_av_set_ro(t140) failed");
+ return ret;
+ }
+
+ if(updated){
+ tsk_safeobj_lock(base);
+ TSK_OBJECT_SAFE_FREE(TDAV_SESSION_T140(self)->encoder.codec);
+ tsk_safeobj_unlock(base);
+ }
+
+ return ret;
+}
+
+static int tdav_session_t140_set_ondata_cbfn(tmedia_session_t* self, const void* context, tmedia_session_t140_ondata_cb_f func)
+{
+ tdav_session_t140_t* t140 = (tdav_session_t140_t*)self;
+ tdav_session_av_t* base = (tdav_session_av_t*)self;
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ t140->cb_ondata.context = context;
+ t140->cb_ondata.func = func;
+
+ if(base->consumer){
+ return tdav_consumer_t140_set_ondata_cbfn(TDAV_CONSUMER_T140(base->consumer), context, func);
+ }
+ return -2;
+}
+
+static int tdav_session_t140_send_data(tmedia_session_t* self, enum tmedia_t140_data_type_e data_type, const void* data_ptr, unsigned data_size)
+{
+ tdav_session_av_t* base = (tdav_session_av_t*)self;
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(base->producer){
+ return tdav_producer_send_data(TDAV_PRODUCER_T140(base->producer), data_type, data_ptr, data_size);
+ }
+ return -2;
+}
+
+// RTP/RTCP callback (From the network to the consumer)
+static int _tdav_session_t140_rtp_cb(const void* callback_data, const struct trtp_rtp_packet_s* packet)
+{
+ tdav_session_t140_t* t140 = (tdav_session_t140_t*)callback_data;
+ tdav_session_av_t* base = (tdav_session_av_t*)callback_data;
+
+ if(!t140 || !packet || !packet->header){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!t140->started || !base->consumer){
+ TSK_DEBUG_ERROR("Not ready yet");
+ return -1;
+ }
+
+ if(packet->header->payload_type == base->red.payload_type){
+ //static void* __red_buffer_ptr = tsk_null; // Never used
+ //static tsk_size_t __red_buffer_size = 0; // Never used
+ if(!base->red.codec){
+ TSK_DEBUG_ERROR("No RED codec could be found");
+ return -2;
+ }
+ // Decode RED data
+ /* base->red.codec->plugin->decode(
+ base->red.codec,
+ (packet->payload.data ? packet->payload.data : packet->payload.data_const), packet->payload.size,
+ &__red_buffer_ptr, &__red_buffer_size,
+ packet->header
+ );
+ return 0;
+ */
+ _tdav_session_t140_recv_red(t140, packet);
+ }
+ else{
+ tsk_size_t out_size;
+ // Find the codec to use to decode the RTP payload
+ if(!t140->decoder.codec || t140->decoder.payload_type != packet->header->payload_type){
+ tsk_istr_t format;
+ TSK_OBJECT_SAFE_FREE(t140->decoder.codec);
+ tsk_itoa(packet->header->payload_type, &format);
+ if(!(t140->decoder.codec = tmedia_codec_find_by_format(TMEDIA_SESSION(t140)->neg_codecs, format)) || !t140->decoder.codec->plugin || !t140->decoder.codec->plugin->decode){
+ TSK_DEBUG_ERROR("%s is not a valid payload for this session", format);
+ return -2;
+ }
+ t140->decoder.payload_type = packet->header->payload_type;
+ }
+
+ // Open codec if not already done
+ if(!TMEDIA_CODEC(t140->decoder.codec)->opened){
+ int ret;
+ tsk_safeobj_lock(base);
+ if((ret = tmedia_codec_open(t140->decoder.codec))){
+ tsk_safeobj_unlock(base);
+ TSK_OBJECT_SAFE_FREE(t140->decoder.codec);
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", t140->decoder.codec->plugin->desc);
+ return ret;
+ }
+ tsk_safeobj_unlock(base);
+ }
+ // Decode data
+ out_size = t140->decoder.codec->plugin->decode(t140->decoder.codec, packet->payload.data, packet->payload.size, &t140->decoder.buffer, &t140->decoder.buffer_size, packet->header);
+ if(out_size){
+ _tdav_session_t140_recv_raw(t140, t140->decoder.buffer, out_size);
+ }
+ }
+
+ return 0;
+}
+
+// Producer callback (From the producer to the network). Will encode() data before sending
+static int _tdav_session_t140_producer_enc_cb(const void* callback_data, const void* buffer, tsk_size_t size)
+{
+ int ret = 0;
+
+ tdav_session_t140_t* t140 = (tdav_session_t140_t*)callback_data;
+ tdav_session_av_t* base = (tdav_session_av_t*)callback_data;
+
+ if(!t140){
+ TSK_DEBUG_ERROR("Null session");
+ return 0;
+ }
+
+ if(t140->started && base->rtp_manager && base->rtp_manager->is_started && t140->encoder.codec){
+ /* encode */
+ //tsk_size_t out_size = 0;
+
+ if(!base->rtp_manager->is_started){
+ TSK_DEBUG_ERROR("Not started");
+ return 0;
+ }
+
+ // Open codec if not already done
+ if(!t140->encoder.codec->opened){
+ tsk_safeobj_lock(base);
+ if((ret = tmedia_codec_open(t140->encoder.codec))){
+ tsk_safeobj_unlock(base);
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", t140->encoder.codec->plugin->desc);
+ return -4;
+ }
+ tsk_safeobj_unlock(base);
+ }
+
+ ret = _tdav_session_t140_save_outgoing_data(t140, buffer, size);
+
+ // start buffering timer if not already done
+ if(!TSK_TIMER_ID_IS_VALID(t140->encoder.timer_buffering.id)){
+ TSK_DEBUG_INFO("T.140 - Schedule buffering timer");
+ t140->encoder.timer_buffering.id = tsk_timer_manager_schedule(t140->h_timer, t140->encoder.timer_buffering.timeout, _tdav_session_t140_timer_cb, t140);
+ }
+
+ //if((t140->encoder.codec = tsk_object_ref(t140->encoder.codec))){ /* Thread safeness (SIP reINVITE or UPDATE could update the encoder) */
+ //out_size = t140->encoder.codec->plugin->encode(t140->encoder.codec, buffer, size, &t140->encoder.buffer, &t140->encoder.buffer_size);
+ //if(out_size){
+ // trtp_manager_send_rtp(base->rtp_manager, t140->encoder.buffer, out_size, TMEDIA_CODEC_PCM_FRAME_SIZE(t140->encoder.codec), tsk_false/*Marker*/, tsk_true/*lastPacket*/);
+ //}
+ //tsk_object_unref(t140->encoder.codec);
+ //}
+ }
+ return ret;
+}
+
+// save data before sending (when timer fires)
+static int _tdav_session_t140_save_outgoing_data(tdav_session_t140_t* self, const void* data_ptr, tsk_size_t data_size)
+{
+ tdav_session_av_t* base = (tdav_session_av_t*)self;
+
+ if(!self || !data_ptr || !data_size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(base);
+
+ T140_RESIZE_BUFFER(self->encoder.buffer, self->encoder.buffer_size, (self->encoder.buffer_idx + data_size));
+ if(self->encoder.buffer && self->encoder.buffer_size){
+ memcpy(&((uint8_t*)self->encoder.buffer)[self->encoder.buffer_idx], data_ptr, data_size);
+ self->encoder.buffer_idx += data_size;
+ }
+
+ tsk_safeobj_unlock(base);
+
+ return 0;
+}
+
+// encapsulated data for sending
+static tsk_size_t _tdav_session_t140_encap_red(tdav_session_t140_t* self, void** out_data_ptr, tsk_size_t* out_data_size, tsk_bool_t *last_data)
+{
+ tdav_session_av_t* base = (tdav_session_av_t*)self;
+ t140_block_t* block, *block_curr = tsk_null;
+ t140_block_t* red_blocks[T140_BLOCK_MAX_TRANSMIT_COUNT] = { tsk_null };
+ tsk_size_t red_blocks_idx, idx, red_blocks_size, red_blocks_hdrs_size;
+ tsk_list_item_t* item;
+ uint16_t rtp_seq_num;
+ uint32_t* pout_data_ptr32;
+ uint8_t* pout_data_ptr8;
+ static const uint32_t max_transmit_count = T140_BLOCK_MAX_TRANSMIT_COUNT;
+
+ if(!self || !base->rtp_manager || !out_data_ptr || !out_data_size || !last_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ *last_data = tsk_false;
+
+ tsk_safeobj_lock(base);
+ tsk_list_lock(self->encoder.blocks);
+
+ // cancel IDLE timer
+ if(TSK_TIMER_ID_IS_VALID(self->encoder.timer_idle.id)){
+ tsk_timer_manager_cancel(self->h_timer, self->encoder.timer_idle.id);
+ self->encoder.timer_idle.id = TSK_INVALID_TIMER_ID;
+ TSK_DEBUG_INFO("T.140 - Cancel IDLE timer");
+ }
+
+ rtp_seq_num = base->rtp_manager->rtp.seq_num;
+
+ if(self->encoder.buffer && self->encoder.buffer_idx){
+ block_curr = _tdav_session_t140_block_create(self->encoder.payload_type, rtp_seq_num, 0, tsk_false, self->encoder.buffer, self->encoder.buffer_idx);
+ self->encoder.buffer_idx = 0;
+ }
+
+ // build RED blocks
+ red_blocks_idx = 0;
+ tsk_list_foreach(item, self->encoder.blocks){
+ if(!(block = item->data) || (block->transmit_count + 1) >= T140_BLOCK_MAX_TRANSMIT_COUNT){
+ continue;
+ }
+
+ if(block->seq_num <= rtp_seq_num && (idx = (rtp_seq_num - block->seq_num)) < T140_BLOCK_MAX_TRANSMIT_COUNT){
+ ++block->transmit_count; // increment transmission count
+ red_blocks[(T140_BLOCK_MAX_TRANSMIT_COUNT - idx - 1)] = block; // backwards counting
+ ++red_blocks_idx;
+ }
+
+ if((red_blocks_idx + 1) >= T140_BLOCK_MAX_TRANSMIT_COUNT){
+ break;
+ }
+ }
+
+ // compute RED blocks size
+ red_blocks_hdrs_size = ((T140_BLOCK_MAX_TRANSMIT_COUNT - 1) << 2) + 1;
+ red_blocks_size = red_blocks_hdrs_size;
+ for(idx = 0; idx < T140_BLOCK_MAX_TRANSMIT_COUNT; ++idx){
+ if(red_blocks[idx] && red_blocks[idx]->data.ptr && red_blocks[idx]->data.size){
+ red_blocks_size += red_blocks[idx]->data.size;
+ }
+ }
+ if(block_curr && block_curr->data.ptr && block_curr->data.size){
+ red_blocks_size += block_curr->data.size;
+ }
+
+ // resize output buffer
+ T140_RESIZE_BUFFER(*out_data_ptr, *out_data_size, red_blocks_size);
+ if(!*out_data_ptr) {
+ red_blocks_size = 0;
+ goto bail;
+ }
+
+ // compute RED headers
+ pout_data_ptr32 = (uint32_t*)*out_data_ptr;
+ for(idx = 0; idx < T140_BLOCK_MAX_TRANSMIT_COUNT; ++idx){
+ if((idx + 1) == T140_BLOCK_MAX_TRANSMIT_COUNT){ // last header ?
+ *((uint8_t*)&pout_data_ptr32[idx]) = (self->encoder.payload_type & 0x7F); // F=0 | PT
+ }
+ else{
+ pout_data_ptr32[idx] = tnet_htonl(
+ ( (0x80 | (self->encoder.payload_type & 0x7F)) << 24 ) | // F=1 | PT
+ ( 0x0000 << 8 ) | // imestamp offset
+ ( red_blocks[idx] ? (red_blocks[idx]->data.size & 0xFF) : 0x00 ) // block length
+ );
+ }
+ }
+
+ // compute RED data
+ pout_data_ptr8 = &((uint8_t*)*out_data_ptr)[red_blocks_hdrs_size];
+ for(idx = 0; idx < T140_BLOCK_MAX_TRANSMIT_COUNT; ++idx){
+ if(red_blocks[idx] && red_blocks[idx]->data.ptr && red_blocks[idx]->data.size){
+ memcpy(pout_data_ptr8, red_blocks[idx]->data.ptr, red_blocks[idx]->data.size);
+ pout_data_ptr8 += red_blocks[idx]->data.size;
+ }
+ }
+ if(block_curr && block_curr->data.ptr && block_curr->data.size){
+ memcpy(pout_data_ptr8, block_curr->data.ptr, block_curr->data.size);
+ pout_data_ptr8 += block_curr->data.size;
+ }
+
+ if((*last_data = (red_blocks_hdrs_size == red_blocks_size))){
+ // no data available -> clear items, send empty block, schedule idle timer, cancel buffering timer
+ if(TSK_TIMER_ID_IS_VALID(self->encoder.timer_buffering.id)){
+ tsk_timer_manager_cancel(self->h_timer, self->encoder.timer_buffering.id);
+ self->encoder.timer_buffering.id = TSK_INVALID_TIMER_ID;
+ TSK_DEBUG_INFO("T.140 - Cancel buffering timer");
+ }
+ self->encoder.timer_idle.id = tsk_timer_manager_schedule(self->h_timer, self->encoder.timer_idle.timeout, _tdav_session_t140_timer_cb, self);
+ TSK_DEBUG_INFO("T.140 - Schedule IDLE timer");
+ tsk_list_clear_items(self->encoder.blocks);
+ }
+ else{
+ // clean up all blocks retransmitted more than max times
+ while(tsk_list_remove_item_by_pred(self->encoder.blocks, _tdav_session_t140_pred_block_with_transmit_count_more_than_or_equal, &max_transmit_count)) ;
+ }
+
+bail:
+ if(block_curr){
+ _tdav_session_t140_blocks_add(self->encoder.blocks, &self->encoder.blocks_count, &block_curr, INT_MAX);
+ TSK_OBJECT_SAFE_FREE(block_curr);
+ }
+ tsk_list_unlock(self->encoder.blocks);
+ tsk_safeobj_unlock(base);
+
+ return red_blocks_size;
+}
+
+// decode data from RTP channel
+static int _tdav_session_t140_recv_red(tdav_session_t140_t* self, const struct trtp_rtp_packet_s* packet)
+{
+ const uint8_t* pdata;
+ const uint8_t* red_hdr;
+ tsk_size_t red_hdrs_count, i, in_size;
+ tsk_bool_t last, block_add;
+ uint8_t F, pt;
+ int32_t pkt_loss_start = -1, pkt_loss_count, seq_num;
+ uint16_t timestamp_offset, block_length;
+
+ if(!self || !packet || !packet->header || !(packet->payload.data || packet->payload.data_const) || (packet->payload.size < TDAV_CODEC_RED_MIN_PKT_SIZE)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ pdata = red_hdr = packet->payload.data ? packet->payload.data : packet->payload.data_const;
+ in_size = packet->payload.size;
+
+ if((F = (pdata[0] & 0x80)) == 0){
+ i = 1;
+ red_hdrs_count = 1;
+ }
+ else{
+ for(i = 0, red_hdrs_count = 0; i < in_size; i+= 4, ++red_hdrs_count){
+ if((F = (pdata[i] & 0x80)) == 0){ ++i; ++red_hdrs_count; break; }
+ }
+ }
+
+ if(i == in_size){
+ TSK_DEBUG_INFO("T.140 - Empty block");
+ return 0;
+ }
+
+ pdata += i;
+ in_size -= i;
+ if(self->decoder.last_seq_num != -1 && (self->decoder.last_seq_num + 1) != packet->header->seq_num){
+ pkt_loss_start = (self->decoder.last_seq_num + 1);
+ pkt_loss_count = (packet->header->seq_num - pkt_loss_start);
+ TSK_DEBUG_INFO("T.140 - Packet loss[curr=%d, loss_start=%d, loss_count=%d]", packet->header->seq_num, pkt_loss_start, pkt_loss_count);
+ }
+
+ for(i = 0, block_add = tsk_false; i < red_hdrs_count; ++i, block_add = tsk_false){
+ last = (i == (red_hdrs_count - 1));
+ F = (red_hdr[0] & 0x80);
+ pt = (red_hdr[0] & 0x7F);
+ if(last || !F){
+ /*
+ 0 1 2 3 4 5 6 7
+ +-+-+-+-+-+-+-+-+
+ |0| Block PT |
+ +-+-+-+-+-+-+-+-+
+ */
+ block_length = (uint16_t)in_size;
+ seq_num = packet->header->seq_num;
+ timestamp_offset = 0;
+ block_add = tsk_true;
+ }
+ else{
+ /*
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |1| block PT=7 | timestamp offset | block length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ seq_num = (int32_t)(packet->header->seq_num - (red_hdrs_count - 1 - i)); // inferred by counting backwards
+ block_add = (pkt_loss_start != -1 && (seq_num <= pkt_loss_start && pkt_loss_start >= seq_num));
+ timestamp_offset = ((red_hdr[1] << 8) | red_hdr[2]) >> 2;
+ block_length = ((red_hdr[2] & 0x03) << 8) | red_hdr[3];
+ /* block_add &= (block_length > 0); */ // take empty block to avoid scheduling pkt loss timer
+ if(pkt_loss_start != -1){
+ TSK_DEBUG_INFO("retransmit seq_num=%d, takken=%d", seq_num, block_add);
+ }
+ if(block_length > in_size){
+ TSK_DEBUG_ERROR("Invalid 'block length'");
+ break;
+ }
+ red_hdr += 4;
+ }
+
+ if(block_add && !_tdav_session_t140_blocks_has_seqnum(self->decoder.blocks, seq_num)){
+ t140_block_t* block = _tdav_session_t140_block_create(
+ pt,
+ seq_num,
+ (packet->header->timestamp + timestamp_offset),
+ tsk_false,
+ pdata,
+ block_length);
+ _tdav_session_t140_blocks_add(self->decoder.blocks, &self->decoder.blocks_count, &block, T140_BLOCKS_MAX_COUNT);
+ TSK_OBJECT_SAFE_FREE(block);
+ }
+
+
+ pdata += block_length;
+ in_size -= block_length;
+ }
+
+ self->decoder.last_seq_num = packet->header->seq_num;
+
+ return _tdav_session_t140_decode_and_deliver_blocks(self, tsk_false);
+}
+
+static int _tdav_session_t140_recv_raw(tdav_session_t140_t* self, const void* data_ptr, tsk_size_t data_size)
+{
+ tsk_size_t i, j, cmd_extra_len, utf8_start, utf8_end;
+ tsk_bool_t is_cmd;
+ enum tmedia_t140_data_type_e cmd_type;
+ const uint8_t* _data_ptr = (const uint8_t*)data_ptr;
+
+ if(!self || !data_ptr || !data_size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ for(i = 0, j = 0, is_cmd = tsk_false, cmd_extra_len = 0; i < data_size; ++i, is_cmd = tsk_false, cmd_extra_len = 0){
+ switch(_data_ptr[i]){
+ case T140_BACKSPACE:
+ {
+ TSK_DEBUG_INFO("T.140 - BACKSPACE");
+ is_cmd = tsk_true;
+ cmd_type = tmedia_t140_data_type_backspace;
+ break;
+ }
+ case T140_ESC:
+ {
+ TSK_DEBUG_INFO("T.140 - ESC");
+ is_cmd = tsk_true;
+ cmd_type = tmedia_t140_data_type_esc;
+ break;
+ }
+ case T140_LF:
+ {
+ TSK_DEBUG_INFO("T.140 - LF");
+ is_cmd = tsk_true;
+ cmd_type = tmedia_t140_data_type_lf;
+ break;
+ }
+ case T140_CR:
+ {
+ if((i + 1) < data_size && _data_ptr[i + 1] == T140_LF){
+ // CR_LF
+ TSK_DEBUG_INFO("T.140 - CRLF");
+ cmd_extra_len = 1;
+ is_cmd = tsk_true;
+ cmd_type = tmedia_t140_data_type_cr_lf;
+ }
+ else{
+ // CR
+ TSK_DEBUG_INFO("T.140 - CR");
+ is_cmd = tsk_true;
+ cmd_type = tmedia_t140_data_type_cr;
+ }
+ break;
+ }
+ case T140_BELL:
+ {
+ TSK_DEBUG_INFO("T.140 - BELL");
+ is_cmd = tsk_true;
+ cmd_type = tmedia_t140_data_type_bell;
+ break;
+ }
+ case T140_SOS:
+ {
+ TSK_DEBUG_INFO("T.140 - SOS");
+ is_cmd = tsk_true;
+ cmd_type = tmedia_t140_data_type_sos;
+ break;
+ }
+ case T140_STRING_TERM:
+ {
+ TSK_DEBUG_INFO("T.140 - ST");
+ is_cmd = tsk_true;
+ cmd_type = tmedia_t140_data_type_string_term;
+ break;
+ }
+ case (T140_LOSS_CHAR_CHAR >> 8):
+ {
+ if((i + 1) < data_size && _data_ptr[i + 1] == (T140_LOSS_CHAR_CHAR & 0xFF)){
+ TSK_DEBUG_INFO("T.140 - LOSS_CHAR_CHAR");
+ cmd_extra_len = 1;
+ is_cmd = tsk_true;
+ cmd_type = tmedia_t140_data_type_loss_char_char;
+ }
+ break;
+ }
+ case T140_GRAPHIC_START:
+ {
+ TSK_DEBUG_INFO("T.140 - GRAPHIC_START");
+ is_cmd = tsk_true;
+ cmd_type = tmedia_t140_data_type_graphic_start;
+ break;
+ }
+ case T140_GRAPHIC_END:
+ {
+ TSK_DEBUG_INFO("T.140 - GRAPHIC_END");
+ is_cmd = tsk_true;
+ cmd_type = tmedia_t140_data_type_graphic_end;
+ break;
+ }
+ // case (T140_ZERO_WIDTH_NO_BREAK_SPACE >> 16):
+ // case (T140_LOSS_UTF8 >> 16):
+ case 0xEF:
+ {
+ if((i + 2) < data_size && _data_ptr[i + 1] == ((T140_ZERO_WIDTH_NO_BREAK_SPACE >> 8) & 0xFF) && _data_ptr[i + 2] == (T140_ZERO_WIDTH_NO_BREAK_SPACE & 0xFF)){
+ TSK_DEBUG_INFO("T.140 - ZERO_WIDTH_NO_BREAK_SPACE");
+ cmd_extra_len = 2;
+ is_cmd = tsk_true;
+ cmd_type = tmedia_t140_data_type_zero_width_no_break_space;
+ }
+ else if((i + 2) < data_size && _data_ptr[i + 1] == ((T140_LOSS_UTF8 >> 8) & 0xFF) && _data_ptr[i + 2] == (T140_LOSS_UTF8 & 0xFF)){
+ TSK_DEBUG_INFO("T.140 - LOSS_UTF8");
+ cmd_extra_len = 2;
+ is_cmd = tsk_true;
+ cmd_type = tmedia_t140_data_type_loss_utf8;
+ }
+ break;
+ }
+ }//switch
+
+ if(is_cmd || ((i + 1) >= data_size)){
+ utf8_start = j;
+ utf8_end = utf8_start + ( (j < i || (j == i && !is_cmd)) ? (i - j + 1) : 0 );
+ if(utf8_start < utf8_end){ // there is utf8 data to deliver
+ _tdav_session_t140_consume_data(self, tmedia_t140_data_type_utf8, &_data_ptr[utf8_start], (utf8_end - utf8_start));
+ }
+ if(is_cmd){ // there is cmd data to deliver
+ i += cmd_extra_len;
+ _tdav_session_t140_consume_data(self, cmd_type, tsk_null, 0);
+ }
+ j = i;
+ }
+ }// for
+
+ return 0;
+}
+
+static int _tdav_session_t140_decode_and_deliver_blocks(tdav_session_t140_t* self, tsk_bool_t called_from_timer)
+{
+ tdav_session_av_t* base = (tdav_session_av_t*)self;
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(base);
+ tsk_list_lock(self->decoder.blocks);
+
+ if(!called_from_timer && _tdav_session_t140_blocks_has_gap(self->decoder.blocks)){
+ self->decoder.timer_pkt_loss.timeout = (base->red.codec ? T140_WAIT_FOR_MISSING_PKT_RED_ON : T140_WAIT_FOR_MISSING_PKT_RED_OFF);
+ self->decoder.timer_pkt_loss.id = tsk_timer_manager_schedule(self->h_timer, self->decoder.timer_pkt_loss.timeout, _tdav_session_t140_timer_cb, self);
+ }
+ else{
+ const tsk_list_item_t* item;
+ const t140_block_t* block;
+
+ tsk_list_foreach(item, self->decoder.blocks){
+ if(!(block = (const t140_block_t*)item->data) || block->delivered || block->too_late){
+ continue;
+ }
+ if(block->data.ptr && block->data.size){
+ _tdav_session_t140_recv_raw(self, block->data.ptr, block->data.size);
+ }
+ ((t140_block_t*)block)->delivered = tsk_true;
+ }
+ }
+
+ tsk_list_unlock(self->decoder.blocks);
+ tsk_safeobj_unlock(base);
+
+ return 0;
+}
+
+static int _tdav_session_t140_consume_data(tdav_session_t140_t* self, enum tmedia_t140_data_type_e data_type, const void *data_ptr, tsk_size_t data_size)
+{
+ tdav_session_av_t* base = (tdav_session_av_t*)self;
+ if(!self || !base->consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ T140_RESIZE_BUFFER(self->decoder.consumer_buffer, self->decoder.consumer_buffer_size, (data_size + 4));
+ if(self->decoder.consumer_buffer){
+ *((int32_t*)self->decoder.consumer_buffer) = data_type;
+ if(data_ptr && data_size){
+ memcpy(&((uint8_t*)self->decoder.consumer_buffer)[4], data_ptr, data_size);
+ }
+ tmedia_consumer_consume(base->consumer, self->decoder.consumer_buffer, (data_size + 4), tsk_null);
+ }
+ return 0;
+}
+
+static int _tdav_session_t140_pred_block_with_transmit_count_more_than_or_equal(const tsk_list_item_t *item, const void *transmit_count)
+{
+ if(item && item->data){
+ t140_block_t *block = item->data;
+ return (block->transmit_count >= *((uint32_t*)transmit_count)) ? 0 : -1;
+ }
+ return -1;
+}
+
+static int _tdav_session_t140_timer_cb(const void* arg, tsk_timer_id_t timer_id)
+{
+ tdav_session_t140_t* t140 = (tdav_session_t140_t*)arg;
+ tdav_session_av_t* base = (tdav_session_av_t*)arg;
+ static const tsk_bool_t called_from_timer = tsk_true;
+ int ret = 0;
+
+ tsk_safeobj_lock(base);
+
+ if(timer_id == t140->decoder.timer_pkt_loss.id){
+ ret = _tdav_session_t140_decode_and_deliver_blocks(t140, called_from_timer);
+ }
+ else if(timer_id == t140->encoder.timer_idle.id){
+ ret = tdav_session_t140_send_data_2(TMEDIA_SESSION(t140), tmedia_t140_data_type_zero_width_no_break_space);
+ t140->encoder.timer_idle.id = tsk_timer_manager_schedule(t140->h_timer, t140->encoder.timer_idle.timeout, _tdav_session_t140_timer_cb, t140);
+ }
+ else if(timer_id == t140->encoder.timer_buffering.id){
+ void* buffer_ptr;
+ tsk_size_t buffer_size;
+ tsk_bool_t was_idle = TSK_TIMER_ID_IS_VALID(t140->encoder.timer_idle.id);
+ tsk_bool_t last_data = tsk_false;
+ if(base->red.codec){
+ // encapsulated red data and schedule timers if required
+ buffer_size = _tdav_session_t140_encap_red(t140, &t140->encoder.red_buffer, &t140->encoder.red_buffer_size, &last_data);
+ buffer_ptr = t140->encoder.red_buffer;
+ }
+ else{
+ // send buffered data
+ buffer_ptr = t140->encoder.buffer;
+ buffer_size = t140->encoder.buffer_idx;
+ last_data = tsk_true;
+ t140->encoder.buffer_idx = 0;
+ }
+ if(buffer_ptr && buffer_size){
+ trtp_manager_send_rtp(base->rtp_manager, buffer_ptr, buffer_size, 300, was_idle, last_data);
+ }
+ t140->encoder.timer_buffering.id = last_data
+ ? TSK_INVALID_TIMER_ID
+ : tsk_timer_manager_schedule(t140->h_timer, t140->encoder.timer_buffering.timeout, _tdav_session_t140_timer_cb, t140);
+ }
+
+ tsk_safeobj_unlock(base);
+
+ return ret;
+}
+
+
+//=================================================================================================
+// Session Audio Plugin object definition
+//
+/* constructor */
+static tsk_object_t* tdav_session_t140_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_session_t140_t *t140 = self;
+ if(t140){
+ int ret;
+ tdav_session_av_t *base = TDAV_SESSION_AV(self);
+
+ /* init() base */
+ if((ret = tdav_session_av_init(base, tmedia_t140)) != 0){
+ TSK_DEBUG_ERROR("tdav_session_av_init(t140) failed");
+ return tsk_null;
+ }
+
+ /* init() self */
+ t140->encoder.timer_buffering.id = t140->decoder.timer_pkt_loss.id = TSK_INVALID_TIMER_ID;
+ if(base->producer){
+ tmedia_producer_set_enc_callback(base->producer, _tdav_session_t140_producer_enc_cb, t140);
+ }
+ if(base->consumer){
+ }
+ if(!(t140->encoder.blocks = tsk_list_create())){
+ TSK_DEBUG_ERROR("Failed to create encoder blocks list");
+ return tsk_null;
+ }
+ if(!(t140->decoder.blocks = tsk_list_create())){
+ TSK_DEBUG_ERROR("Failed to create decoder blocks list");
+ return tsk_null;
+ }
+ if(!(t140->h_timer = tsk_timer_manager_create())){
+ TSK_DEBUG_ERROR("Failed to create timer manager");
+ return tsk_null;
+ }
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_session_t140_dtor(tsk_object_t * self)
+{
+ tdav_session_t140_t *t140 = self;
+ if(t140){
+ tdav_session_t140_stop((tmedia_session_t*)t140);
+ TSK_OBJECT_SAFE_FREE(t140->h_timer);
+ // Do it in this order (deinit self first)
+ TSK_OBJECT_SAFE_FREE(t140->encoder.codec);
+ TSK_FREE(t140->encoder.buffer);
+ TSK_FREE(t140->encoder.red_buffer);
+ TSK_OBJECT_SAFE_FREE(t140->encoder.blocks);
+ TSK_OBJECT_SAFE_FREE(t140->decoder.codec);
+ TSK_FREE(t140->decoder.buffer);
+ TSK_FREE(t140->decoder.consumer_buffer);
+ TSK_OBJECT_SAFE_FREE(t140->decoder.blocks);
+
+ /* deinit base */
+ tdav_session_av_deinit(TDAV_SESSION_AV(self));
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_session_t140_def_s =
+{
+ sizeof(tdav_session_t140_t),
+ tdav_session_t140_ctor,
+ tdav_session_t140_dtor,
+ tmedia_session_cmp,
+};
+/* plugin definition*/
+static const tmedia_session_plugin_def_t tdav_session_t140_plugin_def_s =
+{
+ &tdav_session_t140_def_s,
+
+ tmedia_t140,
+ "text",
+
+ tdav_session_t140_set,
+ tdav_session_t140_get,
+ tdav_session_t140_prepare,
+ tdav_session_t140_start,
+ tdav_session_t140_pause,
+ tdav_session_t140_stop,
+
+ /* Audio part */
+ {0},
+
+ tdav_session_t140_get_lo,
+ tdav_session_t140_set_ro,
+
+ /* T.140 part */
+ {
+ tdav_session_t140_set_ondata_cbfn,
+ tdav_session_t140_send_data
+ }
+};
+const tmedia_session_plugin_def_t *tdav_session_t140_plugin_def_t = &tdav_session_t140_plugin_def_s;
+
+//=================================================================================================
+// T140Block object definition
+//
+static tsk_object_t* t140_block_ctor(tsk_object_t * self, va_list * app)
+{
+ t140_block_t *block = self;
+ if(block){
+ }
+ return self;
+}
+static tsk_object_t* t140_block_dtor(tsk_object_t * self)
+{
+ t140_block_t *block = self;
+ if(block){
+ TSK_FREE(block->data.ptr);
+ }
+
+ return self;
+}
+static int t140_block_cmp(const tsk_object_t *_b1, const tsk_object_t *_b2)
+{
+ const t140_block_t *b1 = _b1;
+ const t140_block_t *b2 = _b2;
+
+ if(b1 && b2){
+ return (int)(b1->seq_num - b2->seq_num);
+ }
+ else if(!b1 && !b2) return 0;
+ else return -1;
+}
+static const tsk_object_def_t t140_block_def_s =
+{
+ sizeof(t140_block_t),
+ t140_block_ctor,
+ t140_block_dtor,
+ t140_block_cmp,
+};
+const tsk_object_def_t *t140_block_def_t = &t140_block_def_s;
+
+
+static tsk_bool_t _tdav_session_t140_blocks_has_gap(tdav_session_t140_blocks_L_t* blocks)
+{
+ if(blocks && !TSK_LIST_IS_EMPTY(blocks)){
+ const tsk_list_item_t* item;
+ int32_t last_seq_num = -1;
+ tsk_bool_t has_gap = tsk_false;
+ tsk_list_lock(blocks);
+ tsk_list_foreach(item, blocks){
+ if(last_seq_num > 0 && ((last_seq_num + 1) != T140_BLOCK(item->data)->seq_num)){
+ has_gap = tsk_true;
+ }
+ last_seq_num = T140_BLOCK(item->data)->seq_num;
+ if(has_gap){
+ break;
+ }
+ }
+ tsk_list_unlock(blocks);
+ return has_gap;
+ }
+ return tsk_false;
+}
+
+static tsk_bool_t _tdav_session_t140_blocks_has_seqnum(tdav_session_t140_blocks_L_t* blocks, uint16_t seq_num)
+{
+ if(blocks){
+ const tsk_list_item_t* item;
+ tsk_bool_t has_seqnum = tsk_false;
+ tsk_list_lock(blocks);
+ tsk_list_foreach(item, blocks){
+ if(seq_num == T140_BLOCK(item->data)->seq_num){
+ has_seqnum = tsk_true;
+ break;
+ }
+ }
+ tsk_list_unlock(blocks);
+ return has_seqnum;
+ }
+ return tsk_false;
+}
+
+static int _tdav_session_t140_blocks_add(tdav_session_t140_blocks_L_t* blocks, int64_t* blocks_count, t140_block_t** block, int64_t blocks_count_max)
+{
+ if(blocks && blocks_count && block){
+ tsk_list_lock(blocks);
+ if(tsk_list_push_ascending_data(blocks, (void**)block) == 0){
+ if((*blocks_count = (*blocks_count) + 1) > blocks_count_max){
+ tsk_list_item_t* first_item = tsk_list_pop_first_item(blocks);
+ TSK_OBJECT_SAFE_FREE(first_item);
+ }
+ }
+ tsk_list_unlock(blocks);
+ }
+ return 0;
+}
+
+static t140_block_t* _tdav_session_t140_block_create(uint8_t pt, uint16_t seq_num, uint32_t timestamp, tsk_bool_t delivered, const void* data_ptr, tsk_size_t data_size)
+{
+ void* _data_ptr = tsk_null;
+ t140_block_t* block = tsk_null;
+
+ if(data_ptr && data_size){
+ if(!(_data_ptr = tsk_malloc(data_size))){
+ TSK_DEBUG_ERROR("Failed to alloc data with size = %u", data_size);
+ goto bail;
+ }
+ memcpy(_data_ptr, data_ptr, data_size);
+ }
+ if(!(block = tsk_object_new(t140_block_def_t))){
+ TSK_DEBUG_ERROR("Failed to create new block object");
+ goto bail;
+ }
+ if(_data_ptr){
+ block->data.ptr = _data_ptr, _data_ptr = tsk_null;
+ block->data.size = data_size;
+ }
+ block->pt = pt;
+ block->seq_num = seq_num;
+ block->timestamp = timestamp;
+
+bail:
+ TSK_FREE(_data_ptr);
+ return block;
+} \ No newline at end of file
diff --git a/tinyDAV/src/tdav.c b/tinyDAV/src/tdav.c
new file mode 100644
index 0000000..fedf028
--- /dev/null
+++ b/tinyDAV/src/tdav.c
@@ -0,0 +1,758 @@
+/*
+* Copyright (C) 2010-2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+#include "tinydav/tdav.h"
+
+static tsk_bool_t __b_initialized = tsk_false;
+static tsk_bool_t __b_ipsec_supported = tsk_false;
+static const struct tmedia_codec_plugin_def_s* __codec_plugins_all[0xFF] = { tsk_null }; // list of all codecs BEFORE filtering
+static const tsk_size_t __codec_plugins_all_count = sizeof(__codec_plugins_all)/sizeof(__codec_plugins_all[0]);
+
+#if TDAV_UNDER_WINDOWS
+# include "tinydav/tdav_win32.h"
+#elif TDAV_UNDER_APPLE
+# include "tinydav/tdav_apple.h"
+#endif
+
+// Shared libraries not allowed on WP8 and iOS
+#if !TDAV_UNDER_WINDOWS_PHONE && !TDAV_UNDER_IPHONE
+#include "tsk_plugin.h"
+# if TDAV_UNDER_WINDOWS
+# define TDAV_HAVE_PLUGIN_EXT_WIN32 1
+ static struct tsk_plugin_s* __dll_plugin_wasapi = tsk_null; /* Windows Audio Session API (WASAPI): Windows [Vista - 8] */
+ static struct tsk_plugin_s* __dll_plugin_dshow = tsk_null; /* DirectShow: Windows [XP - 8] */
+ static struct tsk_plugin_s* __dll_plugin_mf = tsk_null; /* Media Foundation and WASAPI : Windows [7 - 8] */
+ static struct tsk_plugin_s* __dll_plugin_dd = tsk_null; /* Microsoft Desktop Duplication API : Windows [8 - ] */
+ static struct tsk_plugin_s* __dll_plugin_cuda = tsk_null; /* Media Foundation and WASAPI : Windows [XP - 8] */
+ static struct tsk_plugin_s* __dll_plugin_audio_dsp = tsk_null; /* Audio DSP, Resampler, AEC, NS, AGC...: Windows [Vista - 8] */
+ static struct tsk_plugin_s* __dll_plugin_ipsec_wfp = tsk_null; /* IPSec implementation using WFP (Windows Filtering platform): Windows [Vista - 8] */
+# endif /* TDAV_UNDER_WINDOWS */
+#endif
+
+// Media Contents, plugins defintion...
+#include "tinymedia.h"
+
+// IPSec
+#if !defined(HAVE_TINYIPSEC) || HAVE_TINYIPSEC
+#include "tipsec.h"
+#endif
+
+// Converters
+#include "tinymedia/tmedia_converter_video.h"
+// Converters
+#include "tinymedia/tmedia_converter_video.h"
+#include "tinydav/video/tdav_converter_video.h"
+
+// Sessions
+#include "tinymedia/tmedia_session_ghost.h"
+#include "tinydav/audio/tdav_session_audio.h"
+#include "tinydav/video/tdav_session_video.h"
+#include "tinydav/msrp/tdav_session_msrp.h"
+#include "tinydav/bfcp/tdav_session_bfcp.h"
+#include "tinydav/t140/tdav_session_t140.h"
+
+// Codecs
+#include "tinydav/codecs/dtmf/tdav_codec_dtmf.h"
+#include "tinydav/codecs/t140/tdav_codec_t140.h"
+#include "tinydav/codecs/fec/tdav_codec_ulpfec.h"
+#include "tinydav/codecs/fec/tdav_codec_red.h"
+#if !defined(HAVE_TINYMSRP) || HAVE_TINYMSRP
+#include "tinydav/codecs/msrp/tdav_codec_msrp.h"
+#endif
+#if !defined(HAVE_TINYBFCP) || HAVE_TINYBFCP
+#include "tinydav/codecs/bfcp/tdav_codec_bfcp.h"
+#endif
+#include "tinydav/codecs/amr/tdav_codec_amr.h"
+#include "tinydav/codecs/bv/tdav_codec_bv16.h"
+#include "tinydav/codecs/g711/tdav_codec_g711.h"
+#include "tinydav/codecs/gsm/tdav_codec_gsm.h"
+#include "tinydav/codecs/ilbc/tdav_codec_ilbc.h"
+#include "tinydav/codecs/g729/tdav_codec_g729.h"
+#include "tinydav/codecs/g722/tdav_codec_g722.h"
+#include "tinydav/codecs/speex/tdav_codec_speex.h"
+#include "tinydav/codecs/opus/tdav_codec_opus.h"
+#include "tinydav/codecs/h261/tdav_codec_h261.h"
+#include "tinydav/codecs/h263/tdav_codec_h263.h"
+#include "tinydav/codecs/h264/tdav_codec_h264.h"
+#include "tinydav/codecs/h264/tdav_codec_h264_cuda.h"
+#include "tinydav/codecs/h264/tdav_codec_h264_cisco.h"
+#include "tinydav/codecs/h264/tdav_codec_h264_intel.h"
+#include "tinydav/codecs/theora/tdav_codec_theora.h"
+#include "tinydav/codecs/mp4ves/tdav_codec_mp4ves.h"
+#include "tinydav/codecs/vpx/tdav_codec_vp8.h"
+
+// Consumers
+#include "tinydav/audio/waveapi/tdav_consumer_waveapi.h"
+#include "tinydav/audio/directsound/tdav_consumer_dsound.h"
+#include "tinydav/audio/coreaudio/tdav_consumer_audioqueue.h"
+#include "tinydav/audio/coreaudio/tdav_consumer_audiounit.h"
+#include "tinydav/audio/wasapi/tdav_consumer_wasapi.h"
+#include "tinydav/audio/alsa/tdav_consumer_alsa.h"
+#include "tinydav/audio/oss/tdav_consumer_oss.h"
+#include "tinydav/video/winm/tdav_consumer_winm.h"
+#include "tinydav/video/mf/tdav_consumer_video_mf.h"
+#include "tinydav/video/gdi/tdav_consumer_video_gdi.h"
+#include "tinydav/t140/tdav_consumer_t140.h"
+
+// Producers
+#include "tinydav/audio/waveapi/tdav_producer_waveapi.h"
+#include "tinydav/audio/directsound/tdav_producer_dsound.h"
+#include "tinydav/video/gdi/tdav_producer_screencast_gdi.h"
+#include "tinydav/video/directx/tdav_producer_screencast_ddraw.h"
+#include "tinydav/video/v4linux/tdav_producer_video_v4l2.h"
+#include "tinydav/audio/coreaudio/tdav_producer_audioqueue.h"
+#include "tinydav/audio/coreaudio/tdav_producer_audiounit.h"
+#include "tinydav/audio/wasapi/tdav_producer_wasapi.h"
+#include "tinydav/audio/alsa/tdav_producer_alsa.h"
+#include "tinydav/audio/oss/tdav_producer_oss.h"
+#include "tinydav/video/winm/tdav_producer_winm.h"
+#include "tinydav/video/mf/tdav_producer_video_mf.h"
+#include "tinydav/t140/tdav_producer_t140.h"
+
+// Audio Denoise (AGC, Noise Suppression, VAD and AEC)
+#if HAVE_SPEEX_DSP && (!defined(HAVE_SPEEX_DENOISE) || HAVE_SPEEX_DENOISE)
+# include "tinydav/audio/tdav_speex_denoise.h"
+#endif
+#if HAVE_WEBRTC && (!defined(HAVE_WEBRTC_DENOISE) || HAVE_WEBRTC_DENOISE)
+# include "tinydav/audio/tdav_webrtc_denoise.h"
+#endif
+
+// Audio resampler
+#if HAVE_SPEEX_DSP && (!defined(HAVE_SPEEX_RESAMPLER) || HAVE_SPEEX_RESAMPLER)
+# include "tinydav/audio/tdav_speex_resampler.h"
+#endif
+
+// Audio/Video JitterBuffer
+#if HAVE_SPEEX_DSP && HAVE_SPEEX_JB
+# include "tinydav/audio/tdav_speex_jitterbuffer.h"
+#else
+# include "tinydav/audio/tdav_speakup_jitterbuffer.h"
+#endif
+
+#if HAVE_FFMPEG
+# include <libavcodec/avcodec.h>
+#endif
+
+static inline int _tdav_codec_plugins_collect();
+static inline int _tdav_codec_plugins_disperse();
+static inline tsk_bool_t _tdav_codec_is_supported(tdav_codec_id_t codec, const tmedia_codec_plugin_def_t* plugin);
+
+int tdav_init()
+{
+ int ret = 0;
+
+ if(__b_initialized){
+ TSK_DEBUG_INFO("TINYDAV already initialized");
+ return 0;
+ }
+
+ /* === OS specific === */
+#if TDAV_UNDER_WINDOWS
+ if ((ret = tdav_win32_init())) {
+ return ret;
+ }
+#elif TDAV_UNDER_APPLE
+ if ((ret = tdav_apple_init())) {
+ return ret;
+ }
+#endif
+
+ /* === Initialize ffmpeg === */
+#if HAVE_FFMPEG
+# if LIBAVCODEC_VERSION_MAJOR <= 53
+ avcodec_init();
+# endif
+#endif
+
+ /* === stand-alone plugins === */
+#if TDAV_HAVE_PLUGIN_EXT_WIN32
+ {
+ tsk_size_t plugins_count = 0;
+ char* full_path = tsk_null; // Loading plugins from ActiveX fails when using relative path.
+ /* WASAPI (Audio consumer, Audio producer) */
+#if 0 // disable WASAPI by default (AEC issue because of code#consumer rate mismatch)
+ if(tdav_win32_is_winvista_or_later()){
+ tsk_sprintf(&full_path, "%s/pluginWASAPI.dll", tdav_get_current_directory_const());
+ if(tsk_plugin_file_exist(full_path) && (__dll_plugin_wasapi = tsk_plugin_create(full_path))){
+ plugins_count += tmedia_plugin_register(__dll_plugin_wasapi, tsk_plugin_def_type_all, tsk_plugin_def_media_type_all);
+ }
+ }
+#endif
+ /* CUDA (H.264 codec) */
+#if 1 // Enable CUDA by default
+ tsk_sprintf(&full_path, "%s/pluginCUDA.dll", tdav_get_current_directory_const()); // CUDA works on all Windows versions
+ if(tsk_plugin_file_exist(full_path) && (__dll_plugin_cuda = tsk_plugin_create(full_path))){
+ plugins_count += tmedia_plugin_register(__dll_plugin_cuda, tsk_plugin_def_type_all, tsk_plugin_def_media_type_all);
+ }
+#endif
+ /* Microsoft Desktop Duplication API (Screen capture) */
+ if (tdav_win32_is_win8_or_later()){
+ tsk_sprintf(&full_path, "%s/pluginWinDD.dll", tdav_get_current_directory_const());
+ if (tsk_plugin_file_exist(full_path) && (__dll_plugin_dd = tsk_plugin_create(full_path))){
+ plugins_count += tmedia_plugin_register(__dll_plugin_dd, tsk_plugin_def_type_all, tsk_plugin_def_media_type_all);
+ }
+ }
+
+ /* Media Foundation (Video converter, Video consumer, Video producer, Microsoft H.264 codec, Intel Quick Sync H.264 codec) */
+ if(tdav_win32_is_win7_or_later()){
+ tsk_sprintf(&full_path, "%s/pluginWinMF.dll", tdav_get_current_directory_const());
+ if(tsk_plugin_file_exist(full_path) && (__dll_plugin_mf = tsk_plugin_create(full_path))){
+ plugins_count += tmedia_plugin_register(__dll_plugin_mf, tsk_plugin_def_type_all, tsk_plugin_def_media_type_all);
+ }
+ }
+ /* DirectShow (Video consumer, Video producer) */
+ if (tdav_win32_is_winxp_or_later()) {
+ tsk_sprintf(&full_path, "%s/pluginDirectShow.dll", tdav_get_current_directory_const());
+ if (tsk_plugin_file_exist(full_path) && (__dll_plugin_dshow = tsk_plugin_create(full_path))) {
+ plugins_count += tmedia_plugin_register(__dll_plugin_dshow, tsk_plugin_def_type_all, tsk_plugin_def_media_type_all);
+ }
+ }
+ /* Audio DSP (Resampler, AEC, NS, AGC...) */
+ if (tdav_win32_is_winvista_or_later()){
+ tsk_sprintf(&full_path, "%s/pluginWinAudioDSP.dll", tdav_get_current_directory_const());
+ if(tsk_plugin_file_exist(full_path) && (__dll_plugin_audio_dsp = tsk_plugin_create(full_path))) {
+ plugins_count += tmedia_plugin_register(__dll_plugin_audio_dsp, tsk_plugin_def_type_all, tsk_plugin_def_media_type_all);
+ }
+ }
+ /* IPSec implementation using Windows Filtering Platform (WFP) */
+#if !defined(HAVE_TINYIPSEC) || HAVE_TINYIPSEC
+ if (tdav_win32_is_winvista_or_later()) {
+ tsk_sprintf(&full_path, "%s/pluginWinIPSecVista.dll", tdav_get_current_directory_const());
+ if (tsk_plugin_file_exist(full_path) && (tipsec_plugin_register_file(full_path, &__dll_plugin_ipsec_wfp) == 0)) {
+ plugins_count += 1; // at least one
+ __b_ipsec_supported = tsk_true;
+ }
+ }
+#endif
+
+ TSK_FREE(full_path);
+ TSK_DEBUG_INFO("Windows stand-alone plugins loaded = %u", plugins_count);
+ }
+#endif
+
+ /* === Register media contents === */
+ tmedia_content_plugin_register("text/html", tmedia_content_dummy_plugin_def_t);
+ tmedia_content_plugin_register("text/plain", tmedia_content_dummy_plugin_def_t);
+ tmedia_content_plugin_register("application/octet-stream", tmedia_content_dummy_plugin_def_t);
+ tmedia_content_plugin_register("message/CPIM", tmedia_content_cpim_plugin_def_t);
+ /* To be implemented
+ tmedia_content_plugin_register("message/sipfrag", tmedia_content_sipfrag_plugin_def_t);
+ tmedia_content_plugin_register("multipart/digest", tmedia_content_multipart_plugin_def_t);
+ tmedia_content_plugin_register("multipart/mixed", tmedia_content_multipart_plugin_def_t);
+ tmedia_content_plugin_register("multipart/related", tmedia_content_multipart_plugin_def_t);
+ tmedia_content_plugin_register("multipart/alternative", tmedia_content_multipart_plugin_def_t);
+ tmedia_content_plugin_register("multipart/encrypted", tmedia_content_multipart_plugin_def_t);
+ tmedia_content_plugin_register("multipart/parallel", tmedia_content_multipart_plugin_def_t);
+ tmedia_content_plugin_register("multipart/signed", tmedia_content_multipart_plugin_def_t);
+ */
+
+ /* === Register sessions === */
+ tmedia_session_plugin_register(tmedia_session_ghost_plugin_def_t);
+ tmedia_session_plugin_register(tdav_session_audio_plugin_def_t);
+ tmedia_session_plugin_register(tdav_session_video_plugin_def_t);
+#if !defined(HAVE_TINYMSRP) || HAVE_TINYMSRP
+ tmedia_session_plugin_register(tdav_session_msrp_plugin_def_t);
+#endif
+ tmedia_session_plugin_register(tdav_session_t140_plugin_def_t);
+#if !defined(HAVE_TINYBFCP) || HAVE_TINYBFCP
+ tmedia_session_plugin_register(tdav_session_bfcp_plugin_def_t);
+#endif
+ tmedia_session_plugin_register(tdav_session_bfcpaudio_plugin_def_t);
+ tmedia_session_plugin_register(tdav_session_bfcpvideo_plugin_def_t);
+
+ /* === Register codecs === */
+#if HAVE_FFMPEG
+ avcodec_register_all();
+#endif
+#if !defined(HAVE_TINYMSRP) || HAVE_TINYMSRP
+ tmedia_codec_plugin_register(tdav_codec_msrp_plugin_def_t);
+#endif
+ tmedia_codec_plugin_register(tdav_codec_t140_plugin_def_t);
+#if !defined(HAVE_TINYBFCP) || HAVE_TINYBFCP
+ tmedia_codec_plugin_register(tdav_codec_bfcp_plugin_def_t);
+#endif
+ tmedia_codec_plugin_register(tdav_codec_red_plugin_def_t);
+ tmedia_codec_plugin_register(tdav_codec_g711a_plugin_def_t);
+ tmedia_codec_plugin_register(tdav_codec_g711u_plugin_def_t);
+ tmedia_codec_plugin_register(tdav_codec_g722_plugin_def_t);
+#if HAVE_OPENCORE_AMR
+ tmedia_codec_plugin_register(tdav_codec_amrnb_oa_plugin_def_t);
+ tmedia_codec_plugin_register(tdav_codec_amrnb_be_plugin_def_t);
+#endif
+#if HAVE_BV16
+ tmedia_codec_plugin_register(tdav_codec_bv16_plugin_def_t);
+#endif
+#if HAVE_LIBGSM
+ tmedia_codec_plugin_register(tdav_codec_gsm_plugin_def_t);
+#endif
+#if HAVE_ILBC
+ tmedia_codec_plugin_register(tdav_codec_ilbc_plugin_def_t);
+#endif
+#if HAVE_LIB_SPEEX
+ tmedia_codec_plugin_register(tdav_codec_speex_nb_plugin_def_t);
+ tmedia_codec_plugin_register(tdav_codec_speex_wb_plugin_def_t);
+ tmedia_codec_plugin_register(tdav_codec_speex_uwb_plugin_def_t);
+#endif
+#if HAVE_LIBOPUS
+ tmedia_codec_plugin_register(tdav_codec_opus_plugin_def_t);
+#endif
+#if HAVE_G729
+ tmedia_codec_plugin_register(tdav_codec_g729ab_plugin_def_t);
+#endif
+ // last: dtmf, ULPFEC and RED
+ tmedia_codec_plugin_register(tdav_codec_dtmf_plugin_def_t);
+ // tmedia_codec_plugin_register(tdav_codec_ulpfec_plugin_def_t);
+ // tmedia_codec_plugin_register(tdav_codec_red_plugin_def_t);
+
+#if HAVE_LIBVPX
+ tmedia_codec_plugin_register(tdav_codec_vp8_plugin_def_t);
+#endif
+#if HAVE_CUDA
+ #error "Support for H.264 Cuda is deprecated"
+ if(tdav_codec_h264_cuda_is_supported()){
+ tmedia_codec_plugin_register(tdav_codec_h264_cuda_bp10_plugin_def_t);
+ tmedia_codec_plugin_register(tdav_codec_h264_cuda_bp20_plugin_def_t);
+ tmedia_codec_plugin_register(tdav_codec_h264_cuda_bp30_plugin_def_t);
+ }
+#endif
+#if HAVE_FFMPEG
+ if(tdav_codec_ffmpeg_mp4ves_is_supported()){
+ tmedia_codec_plugin_register(tdav_codec_mp4ves_plugin_def_t);
+ }
+ if(tdav_codec_ffmpeg_h264_is_supported()){
+ if(!tmedia_codec_plugin_is_registered_2(tmedia_codec_id_h264_bp)) { // could be already registered by stand alone plugins (e.g. pluginWinMF.DLL)
+ tmedia_codec_plugin_register(tdav_codec_h264_base_plugin_def_t);
+ }
+ if(!tmedia_codec_plugin_is_registered_2(tmedia_codec_id_h264_mp)) { // could be already registered by stand alone plugins (e.g. pluginWinMF.DLL)
+ tmedia_codec_plugin_register(tdav_codec_h264_main_plugin_def_t);
+ }
+ }
+ tmedia_codec_plugin_register(tdav_codec_h263p_plugin_def_t);
+ tmedia_codec_plugin_register(tdav_codec_h263pp_plugin_def_t);
+ if(tdav_codec_ffmpeg_theora_is_supported()){
+ tmedia_codec_plugin_register(tdav_codec_theora_plugin_def_t);
+ }
+ tmedia_codec_plugin_register(tdav_codec_h263_plugin_def_t);
+ tmedia_codec_plugin_register(tdav_codec_h261_plugin_def_t);
+#elif HAVE_H264_PASSTHROUGH
+ tmedia_codec_plugin_register(tdav_codec_h264_base_plugin_def_t);
+ tmedia_codec_plugin_register(tdav_codec_h264_main_plugin_def_t);
+#endif
+#if HAVE_INTEL_MEDIA_SDK
+ tmedia_codec_plugin_register(tdav_codec_h264_intel_base_plugin_def_t);
+ tmedia_codec_plugin_register(tdav_codec_h264_intel_main_plugin_def_t);
+#endif
+#if HAVE_OPENH264
+ tmedia_codec_plugin_register(tdav_codec_h264_cisco_base_plugin_def_t);
+#endif
+
+ /* === Register converters === */
+ // register several convertors and try them all (e.g. LIBYUV only support to/from I420)
+#if HAVE_LIBYUV
+ tmedia_converter_video_plugin_register(tdav_converter_video_libyuv_plugin_def_t);
+#endif
+#if HAVE_FFMPEG || HAVE_SWSSCALE
+ tmedia_converter_video_plugin_register(tdav_converter_video_ffmpeg_plugin_def_t);
+#endif
+
+ /* === Register consumers === */
+ tmedia_consumer_plugin_register(tdav_consumer_t140_plugin_def_t); /* T140 */
+#if HAVE_DSOUND_H
+ tmedia_consumer_plugin_register(tdav_consumer_dsound_plugin_def_t);
+#elif HAVE_WAVE_API
+ tmedia_consumer_plugin_register(tdav_consumer_waveapi_plugin_def_t);
+#elif HAVE_WASAPI
+ tmedia_consumer_plugin_register(tdav_consumer_wasapi_plugin_def_t);
+#endif
+#if TDAV_UNDER_WINDOWS && !TDAV_UNDER_WINDOWS_RT // Windows GDI
+ tmedia_consumer_plugin_register(tdav_consumer_video_gdi_plugin_def_t);
+#endif
+#if HAVE_WINM // Windows Media (WP8)
+ tmedia_consumer_plugin_register(tdav_consumer_winm_plugin_def_t);
+#endif
+#if HAVE_ALSA_ASOUNDLIB_H // Linux
+ tmedia_consumer_plugin_register(tdav_consumer_alsa_plugin_def_t);
+#endif
+#if HAVE_LINUX_SOUNDCARD_H // Linux
+ tmedia_consumer_plugin_register(tdav_consumer_oss_plugin_def_t);
+#endif
+
+#if HAVE_COREAUDIO_AUDIO_UNIT // CoreAudio based on AudioUnit
+ tmedia_consumer_plugin_register(tdav_consumer_audiounit_plugin_def_t);
+#elif HAVE_COREAUDIO_AUDIO_QUEUE // CoreAudio based on AudioQueue
+ tmedia_consumer_plugin_register(tdav_consumer_audioqueue_plugin_def_t);
+#endif
+
+#if HAVE_OSS_H
+ tmedia_consumer_plugin_register(tmedia_consumer_oss_plugin_def_t);
+#endif
+
+ /* === Register producers === */
+ tmedia_producer_plugin_register(tdav_producer_t140_plugin_def_t); /* T140 */
+#if HAVE_DSOUND_H // DirectSound
+ tmedia_producer_plugin_register(tdav_producer_dsound_plugin_def_t);
+#elif HAVE_WAVE_API // WaveAPI
+ tmedia_producer_plugin_register(tdav_producer_waveapi_plugin_def_t);
+#elif HAVE_WASAPI // WASAPI
+ tmedia_producer_plugin_register(tdav_producer_wasapi_plugin_def_t);
+#endif
+#if TDAV_UNDER_WINDOWS && !TDAV_UNDER_WINDOWS_RT // Windows DirectDraw (DirectX)
+ if (tdav_producer_screencast_ddraw_plugin_is_supported()) {
+ tmedia_producer_plugin_register(tdav_producer_screencast_ddraw_plugin_def_t);
+ }
+#endif
+#if TDAV_UNDER_WINDOWS && !TDAV_UNDER_WINDOWS_RT // Windows GDI
+ tmedia_producer_plugin_register(tdav_producer_screencast_gdi_plugin_def_t);
+#endif
+#if HAVE_WINM // Windows Media (WP8)
+ tmedia_producer_plugin_register(tdav_producer_winm_plugin_def_t);
+#endif
+#if HAVE_ALSA_ASOUNDLIB_H // Linux
+ tmedia_producer_plugin_register(tdav_producer_alsa_plugin_def_t);
+#endif
+#if HAVE_LINUX_SOUNDCARD_H // Linux
+ tmedia_producer_plugin_register(tdav_producer_oss_plugin_def_t);
+#endif
+#if HAVE_LINUX_VIDEODEV2_H // V4L2 (Linux)
+ tmedia_producer_plugin_register(tdav_producer_video_v4l2_plugin_def_t);
+ tmedia_producer_plugin_register(tdav_producer_screencast_v4l2_plugin_def_t);
+#endif
+
+#if HAVE_COREAUDIO_AUDIO_UNIT // CoreAudio based on AudioUnit
+ tmedia_producer_plugin_register(tdav_producer_audiounit_plugin_def_t);
+#elif HAVE_COREAUDIO_AUDIO_QUEUE // CoreAudio based on AudioQueue
+ tmedia_producer_plugin_register(tdav_producer_audioqueue_plugin_def_t);
+#endif
+
+ /* === Register Audio Denoise (AGC, VAD, Noise Suppression and AEC) === */
+#if HAVE_WEBRTC && (!defined(HAVE_WEBRTC_DENOISE) || HAVE_WEBRTC_DENOISE)
+ tmedia_denoise_plugin_register(tdav_webrtc_denoise_plugin_def_t);
+#endif
+#if HAVE_SPEEX_DSP && (!defined(HAVE_SPEEX_DENOISE) || HAVE_SPEEX_DENOISE)
+ tmedia_denoise_plugin_register(tdav_speex_denoise_plugin_def_t);
+#endif
+
+ /* === Register Audio Resampler === */
+#if HAVE_SPEEX_DSP && (!defined(HAVE_SPEEX_RESAMPLER) || HAVE_SPEEX_RESAMPLER)
+ tmedia_resampler_plugin_register(tdav_speex_resampler_plugin_def_t);
+#endif
+
+ /* === Register Audio/video JitterBuffer === */
+#if HAVE_SPEEX_DSP && HAVE_SPEEX_JB
+ tmedia_jitterbuffer_plugin_register(tdav_speex_jitterbuffer_plugin_def_t);
+#else
+ tmedia_jitterbuffer_plugin_register(tdav_speakup_jitterbuffer_plugin_def_t);
+#endif
+
+ // collect all codecs before filtering
+ _tdav_codec_plugins_collect();
+
+ __b_initialized = tsk_true;
+
+ return ret;
+}
+
+int tdav_codec_set_priority(tdav_codec_id_t codec_id, int priority)
+{
+ tsk_size_t i;
+
+ if(priority < 0){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ for(i = 0; i < __codec_plugins_all_count && __codec_plugins_all[i]; ++i){
+ if(__codec_plugins_all[i]->codec_id == codec_id){
+ const struct tmedia_codec_plugin_def_s *codec_decl_1, *codec_decl_2;
+ priority = TSK_MIN(priority, (int)__codec_plugins_all_count-1);
+ codec_decl_1 = __codec_plugins_all[priority];
+ codec_decl_2 = __codec_plugins_all[i];
+
+ __codec_plugins_all[i] = codec_decl_1;
+ __codec_plugins_all[priority] = codec_decl_2;
+
+ // change priority if already registered and supported
+ if(_tdav_codec_is_supported((tdav_codec_id_t)codec_decl_2->codec_id, codec_decl_2) && tmedia_codec_plugin_is_registered(codec_decl_2)){
+ return tmedia_codec_plugin_register_2(codec_decl_2, priority);
+ }
+ return 0;
+ }
+ }
+
+ TSK_DEBUG_INFO("Cannot find codec with id=%d for priority setting", codec_id);
+ return 0;
+}
+
+int tdav_set_codecs(tdav_codec_id_t codecs)
+{
+ tsk_size_t i, prio;
+
+ // unregister all codecs
+ tmedia_codec_plugin_unregister_all();
+ // register "selected" and "fake" codecs. "fake" codecs have "none" as id (e.g. MSRP or DTMF)
+ for(i=0,prio=0; i<__codec_plugins_all_count && __codec_plugins_all[i]; ++i){
+ if((codecs & __codec_plugins_all[i]->codec_id) || __codec_plugins_all[i]->codec_id == tmedia_codec_id_none){
+ if(_tdav_codec_is_supported((tdav_codec_id_t)__codec_plugins_all[i]->codec_id, __codec_plugins_all[i])){
+ tmedia_codec_plugin_register_2(__codec_plugins_all[i], (int)prio++);
+ }
+ }
+ }
+ return 0;
+}
+
+static inline int _tdav_codec_plugins_collect()
+{
+#if defined(_MSC_VER) // TODO: Why next code crash on CentOS64 when built with debug enabled ("-g -O0")
+ const struct tmedia_codec_plugin_def_s* (* plugins)[TMED_CODEC_MAX_PLUGINS];
+ tsk_size_t i, count;
+ int ret;
+ static const tsk_size_t __codec_plugins_all_count = sizeof(__codec_plugins_all)/sizeof(__codec_plugins_all[0]);
+
+ ret = _tdav_codec_plugins_disperse();
+ if((ret = tmedia_codec_plugin_registered_get_all(&plugins, &count)) == 0) {
+ for(i = 0; i < count && i < __codec_plugins_all_count; ++i) {
+ __codec_plugins_all[i] = (*plugins)[i];
+ }
+ }
+ return 0;
+#else
+ extern const tmedia_codec_plugin_def_t* __tmedia_codec_plugins[TMED_CODEC_MAX_PLUGINS];
+
+ static const tsk_size_t __codec_plugins_all_count = sizeof(__codec_plugins_all)/sizeof(__codec_plugins_all[0]);
+
+ int ret = _tdav_codec_plugins_disperse();
+ if (ret == 0) {
+ tsk_size_t i, count_max = sizeof(__tmedia_codec_plugins)/sizeof(__tmedia_codec_plugins[0]);
+ for(i = 0; i < count_max && i < __codec_plugins_all_count; ++i) {
+ __codec_plugins_all[i] = __tmedia_codec_plugins[i];
+ }
+ }
+ return ret;
+#endif
+}
+
+static inline int _tdav_codec_plugins_disperse()
+{
+ memset((void*)__codec_plugins_all, 0, sizeof(__codec_plugins_all));
+ return 0;
+}
+
+
+/*
+ Must be called after tdav_init()
+*/
+static inline tsk_bool_t _tdav_codec_is_supported(tdav_codec_id_t codec, const tmedia_codec_plugin_def_t* plugin)
+{
+ tsk_size_t i;
+ for(i = 0; i < __codec_plugins_all_count && __codec_plugins_all[i]; ++i) {
+ if((plugin && __codec_plugins_all[i] == plugin) || __codec_plugins_all[i]->codec_id == codec) {
+ return tsk_true;
+ }
+ }
+ return tsk_false;
+}
+
+/**
+* Checks whether a codec is supported. Being supported doesn't mean it's enabled and ready for use.
+* @return @ref tsk_true if supported and @tsk_false otherwise.
+* @sa @ref tdav_codec_is_enabled()
+*/
+tsk_bool_t tdav_codec_is_supported(tdav_codec_id_t codec)
+{
+ return _tdav_codec_is_supported(codec, tsk_null);
+}
+
+/**
+* Checks whether a codec is enabled.
+* @return @ref tsk_true if enabled and @tsk_false otherwise.
+* @sa @ref tdav_codec_is_supported()
+*/
+tsk_bool_t tdav_codec_is_enabled(tdav_codec_id_t codec)
+{
+ return tmedia_codec_plugin_is_registered_2((tmedia_codec_id_t)codec);
+}
+
+/**
+* Checks whether a IPSec is supported.
+* @return @ref tsk_true if supported and @tsk_false otherwise.
+*/
+tsk_bool_t tdav_ipsec_is_supported()
+{
+ return __b_ipsec_supported;
+}
+
+int tdav_deinit()
+{
+ int ret = 0;
+
+ if(!__b_initialized){
+ TSK_DEBUG_INFO("TINYDAV not initialized");
+ return 0;
+ }
+
+ /* === OS specific === */
+#if TDAV_UNDER_WINDOWS
+ if ((ret = tdav_win32_deinit())) {
+ return ret;
+ }
+#elif TDAV_UNDER_APPLE
+ if ((ret = tdav_apple_deinit())) {
+ return ret;
+ }
+#endif
+
+ /* === UnRegister media contents === */
+ tmedia_content_plugin_unregister_all();
+
+ /* === UnRegister sessions === */
+ tmedia_session_plugin_unregister(tmedia_session_ghost_plugin_def_t);
+ tmedia_session_plugin_unregister(tdav_session_audio_plugin_def_t);
+ tmedia_session_plugin_unregister(tdav_session_video_plugin_def_t);
+#if !defined(HAVE_TINYMSRP) || HAVE_TINYMSRP
+ tmedia_session_plugin_unregister(tdav_session_msrp_plugin_def_t);
+#endif
+ tmedia_session_plugin_unregister(tdav_session_t140_plugin_def_t);
+#if !defined(HAVE_TINYBFCP) || HAVE_TINYBFCP
+ tmedia_session_plugin_unregister(tdav_session_bfcp_plugin_def_t);
+#endif
+ tmedia_session_plugin_unregister(tdav_session_bfcpaudio_plugin_def_t);
+ tmedia_session_plugin_unregister(tdav_session_bfcpvideo_plugin_def_t);
+
+ /* === UnRegister codecs === */
+ tmedia_codec_plugin_unregister_all();
+
+
+ /* === unRegister converters === */
+#if HAVE_LIBYUV
+ tmedia_converter_video_plugin_unregister(tdav_converter_video_libyuv_plugin_def_t);
+#endif
+#if HAVE_FFMPEG || HAVE_SWSSCALE
+ tmedia_converter_video_plugin_unregister(tdav_converter_video_ffmpeg_plugin_def_t);
+#endif
+
+ /* === unRegister consumers === */
+ tmedia_consumer_plugin_unregister(tdav_consumer_t140_plugin_def_t); /* T140 */
+#if HAVE_DSOUND_H
+ tmedia_consumer_plugin_unregister(tdav_consumer_dsound_plugin_def_t);
+#endif
+#if HAVE_WAVE_API
+ tmedia_consumer_plugin_unregister(tdav_consumer_waveapi_plugin_def_t);
+#endif
+#if HAVE_WASAPI
+ tmedia_consumer_plugin_unregister(tdav_consumer_wasapi_plugin_def_t);
+#endif
+#if TDAV_UNDER_WINDOWS && !TDAV_UNDER_WINDOWS_RT // Windows GDI
+ tmedia_consumer_plugin_unregister(tdav_consumer_video_gdi_plugin_def_t);
+#endif
+#if HAVE_WINM // Windows Media (WP8)
+ tmedia_consumer_plugin_unregister(tdav_consumer_winm_plugin_def_t);
+#endif
+#if HAVE_COREAUDIO_AUDIO_UNIT // CoreAudio based on AudioUnit
+ tmedia_consumer_plugin_unregister(tdav_consumer_audiounit_plugin_def_t);
+#endif
+#if HAVE_COREAUDIO_AUDIO_QUEUE // CoreAudio based on AudioQueue
+ tmedia_consumer_plugin_unregister(tdav_consumer_audioqueue_plugin_def_t);
+#endif
+#if HAVE_ALSA_ASOUNDLIB_H // Linux
+ tmedia_consumer_plugin_unregister(tdav_consumer_alsa_plugin_def_t);
+#endif
+#if HAVE_LINUX_SOUNDCARD_H // Linux
+ tmedia_consumer_plugin_unregister(tdav_consumer_oss_plugin_def_t);
+#endif
+
+ /* === UnRegister producers === */
+ tmedia_producer_plugin_unregister(tdav_producer_t140_plugin_def_t); /* T140 */
+#if HAVE_DSOUND_H // DirectSound
+ tmedia_producer_plugin_unregister(tdav_producer_dsound_plugin_def_t);
+#endif
+#if HAVE_WAVE_API // WaveAPI
+ tmedia_producer_plugin_unregister(tdav_producer_waveapi_plugin_def_t);
+#endif
+#if HAVE_WASAPI // WASAPI
+ tmedia_producer_plugin_unregister(tdav_producer_wasapi_plugin_def_t);
+#endif
+#if TDAV_UNDER_WINDOWS && !TDAV_UNDER_WINDOWS_RT // Windows DirectDraw (DirectX)
+ tmedia_producer_plugin_unregister(tdav_producer_screencast_ddraw_plugin_def_t);
+#endif
+#if TDAV_UNDER_WINDOWS && !TDAV_UNDER_WINDOWS_RT // Windows GDI
+ tmedia_producer_plugin_unregister(tdav_producer_screencast_gdi_plugin_def_t);
+#endif
+#if HAVE_WINM // Windows Media (WP8)
+ tmedia_producer_plugin_unregister(tdav_producer_winm_plugin_def_t);
+#endif
+#if HAVE_ALSA_ASOUNDLIB_H // Linux
+ tmedia_producer_plugin_unregister(tdav_producer_alsa_plugin_def_t);
+#endif
+#if HAVE_LINUX_SOUNDCARD_H // Linux
+ tmedia_producer_plugin_unregister(tdav_producer_oss_plugin_def_t);
+#endif
+#if HAVE_LINUX_VIDEODEV2_H // V4L2 (Linux)
+ tmedia_producer_plugin_unregister(tdav_producer_video_v4l2_plugin_def_t);
+ tmedia_producer_plugin_unregister(tdav_producer_screencast_v4l2_plugin_def_t);
+#endif
+
+#if HAVE_COREAUDIO_AUDIO_UNIT // CoreAudio based on AudioUnit
+ tmedia_producer_plugin_unregister(tdav_producer_audiounit_plugin_def_t);
+#elif HAVE_COREAUDIO_AUDIO_QUEUE // CoreAudio based on AudioQueue
+ tmedia_producer_plugin_unregister(tdav_producer_audioqueue_plugin_def_t);
+#endif
+
+#if HAVE_OSS_H
+ tmedia_consumer_plugin_unregister(tmedia_consumer_oss_plugin_def_t);
+#endif
+
+ /* === UnRegister Audio Denoise (AGC, VAD, Noise Suppression and AEC) === */
+#if HAVE_SPEEX_DSP && (!defined(HAVE_SPEEX_DENOISE) || HAVE_SPEEX_DENOISE)
+ tmedia_denoise_plugin_unregister(tdav_speex_denoise_plugin_def_t);
+#endif
+#if HAVE_WEBRTC && (!defined(HAVE_WEBRTC_DENOISE) || HAVE_WEBRTC_DENOISE)
+ tmedia_denoise_plugin_unregister(tdav_webrtc_denoise_plugin_def_t);
+#endif
+
+ /* === UnRegister Audio Resampler === */
+#if HAVE_SPEEX_DSP && (!defined(HAVE_SPEEX_RESAMPLER) || HAVE_SPEEX_RESAMPLER)
+ tmedia_resampler_plugin_unregister(tdav_speex_resampler_plugin_def_t);
+#endif
+
+ /* === UnRegister Audio/video JitterBuffer === */
+#if HAVE_SPEEX_DSP && HAVE_SPEEX_JB
+ tmedia_jitterbuffer_plugin_unregister(tdav_speex_jitterbuffer_plugin_def_t);
+#else
+ tmedia_jitterbuffer_plugin_unregister(tdav_speakup_jitterbuffer_plugin_def_t);
+#endif
+
+ /* === stand-alone plugins === */
+#if TDAV_HAVE_PLUGIN_EXT_WIN32
+ {
+ TSK_OBJECT_SAFE_FREE(__dll_plugin_cuda);
+ TSK_OBJECT_SAFE_FREE(__dll_plugin_wasapi);
+ TSK_OBJECT_SAFE_FREE(__dll_plugin_mf);
+ TSK_OBJECT_SAFE_FREE(__dll_plugin_dd);
+ TSK_OBJECT_SAFE_FREE(__dll_plugin_dshow);
+ TSK_OBJECT_SAFE_FREE(__dll_plugin_audio_dsp);
+ TSK_OBJECT_SAFE_FREE(__dll_plugin_ipsec_wfp);
+ }
+#endif
+
+ // disperse all collected codecs
+ _tdav_codec_plugins_disperse();
+
+ __b_initialized = tsk_false;
+
+ return ret;
+}
diff --git a/tinyDAV/src/tdav_apple.mm b/tinyDAV/src/tdav_apple.mm
new file mode 100644
index 0000000..9e68d23
--- /dev/null
+++ b/tinyDAV/src/tdav_apple.mm
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2014-2015 Mamadou DIOP.
+ *
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+#include "tinydav/tdav_apple.h"
+
+#if TDAV_UNDER_APPLE
+
+#import <UIKit/UIKit.h>
+#include <AudioToolbox/AudioToolbox.h>
+#import <AVFoundation/AVFoundation.h>
+
+#include "tsk_debug.h"
+
+static tsk_bool_t g_apple_initialized = tsk_false;
+static tsk_bool_t g_apple_audio_enabled = tsk_false;
+
+#if TARGET_OS_IPHONE
+// deprecated in iOS7+
+static void _AudioSessionInterruptionListener(void * inClientData, UInt32 inInterruptionState)
+{
+ switch(inInterruptionState) {
+ case kAudioSessionBeginInterruption:
+ {
+ TSK_DEBUG_INFO("_AudioSessionInterruptionListener:kAudioSessionBeginInterruption");
+ break;
+ }
+ case kAudioSessionEndInterruption:
+ {
+ TSK_DEBUG_INFO("_AudioSessionInterruptionListener:kAudioSessionEndInterruption");
+ break;
+ }
+ default:
+ {
+ TSK_DEBUG_INFO("_AudioSessionInterruptionListener:%u", (unsigned int)inInterruptionState);
+ break;
+ }
+ }
+}
+#endif
+
+int tdav_apple_init()
+{
+ if (g_apple_initialized) {
+ return 0;
+ }
+ // initialize audio session
+#if TARGET_OS_IPHONE
+ if ([[[UIDevice currentDevice] systemVersion] doubleValue] >= 7.0) {
+ // Listening to interruption must be done in your AppDelegate:
+ // https://code.google.com/p/idoubs/source/browse/branches/2.0/ios-idoubs/Classes/idoubs2AppDelegate.mm?r=264#433
+
+ /* Set the audio session category to allow for playback/recording and mixing */
+ NSError *setCategoryError = nil;
+ BOOL setCategorySuccess = [[AVAudioSession sharedInstance]
+ setCategory:AVAudioSessionCategoryPlayAndRecord
+ withOptions: AVAudioSessionCategoryOptionMixWithOthers
+ error:&setCategoryError];
+
+ if (setCategorySuccess == NO) {
+ TSK_DEBUG_ERROR("Failed to set audio catrgory. Error code=%ld", (long)[setCategoryError code]);
+ return -1;
+ }
+ }
+ else {
+ OSStatus status;
+ status = AudioSessionInitialize(NULL, NULL, _AudioSessionInterruptionListener, NULL);
+ if (status) {
+ TSK_DEBUG_ERROR("AudioSessionInitialize() failed with status code=%d", (int32_t)status);
+ return -1;
+ }
+
+ // enable record/playback
+ UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
+ status = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);
+ if(status){
+ TSK_DEBUG_ERROR("AudioSessionSetProperty(kAudioSessionProperty_AudioCategory) failed with status code=%d", (int32_t)status);
+ return -2;
+ }
+
+ // allow mixing
+ UInt32 allowMixing = true;
+ status = AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryMixWithOthers, sizeof(allowMixing), &allowMixing);
+ if(status){
+ TSK_DEBUG_ERROR("AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryMixWithOthers) failed with status code=%d", (int32_t)status);
+ return -3;
+ }
+ }
+#endif
+ g_apple_initialized = tsk_true;
+ return 0;
+}
+
+int tdav_apple_enable_audio()
+{
+ if (g_apple_audio_enabled) {
+ return 0;
+ }
+ int ret = 0;
+ if (!g_apple_initialized) {
+ if ((ret = tdav_apple_init())) {
+ return ret;
+ }
+ }
+#if TARGET_OS_IPHONE
+ if ([[[UIDevice currentDevice] systemVersion] doubleValue] >= 7.0) {
+ NSError *activationError = nil;
+ BOOL activationResult = [[AVAudioSession sharedInstance] setActive: YES error: &activationError];
+
+ if (activationResult == NO) {
+ NSInteger code = [activationError code];
+ if (code == AVAudioSessionErrorInsufficientPriority || [[AVAudioSession sharedInstance] isOtherAudioPlaying]) {
+ TSK_DEBUG_WARN("Delaying audio initialization because another app is using it");
+ return 0; // application is interrupted -> wait for notification -> not error
+ }
+ else {
+ TSK_DEBUG_ERROR("AVAudioSession.setActive(YES) faile with error code: %ld", (long)code);
+ return -1;
+ }
+ }
+ }
+ else {
+ // enable audio session
+ OSStatus status = AudioSessionSetActive(true);
+ if (status) {
+ TSK_DEBUG_ERROR("AudioSessionSetActive(true) failed with status code=%d", (int32_t)status);
+ ret = -1;
+ }
+ }
+#endif /* TARGET_OS_IPHONE */
+ g_apple_audio_enabled = (ret == 0) ? tsk_true : tsk_false;
+ return ret;
+}
+
+int tdav_apple_deinit()
+{
+ // maybe other code use the session
+ // OSStatus status = AudioSessionSetActive(false);
+ return 0;
+}
+
+#endif /* TDAV_UNDER_APPLE */
+
diff --git a/tinyDAV/src/tdav_session_av.c b/tinyDAV/src/tdav_session_av.c
new file mode 100644
index 0000000..8c15c18
--- /dev/null
+++ b/tinyDAV/src/tdav_session_av.c
@@ -0,0 +1,2474 @@
+/*
+ * Copyright (C) 2012-2013 Doubango Telecom <http://www.doubango.org>
+ * Copyright (C) 2012 Diop Mamadou Ibrahima
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+
+/**@file tdav_session_av.c
+ * @brief Audio/Video/T.140 base Session plugin
+ */
+// http://c-faq.com/ansi/constmismatch.html: to be checked for warnings
+
+#include "tinydav/tdav_session_av.h"
+#include "tinydav/codecs/dtmf/tdav_codec_dtmf.h"
+#include "tinydav/codecs/fec/tdav_codec_red.h"
+#include "tinydav/codecs/fec/tdav_codec_ulpfec.h"
+
+#include "tinysdp/headers/tsdp_header_S.h"
+#include "tinysdp/headers/tsdp_header_B.h"
+
+#include "tinyrtp/trtp_manager.h"
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+#if HAVE_SRTP
+# include "tinyrtp/trtp_srtp.h"
+#endif
+
+#include "ice/tnet_ice_ctx.h"
+#include "ice/tnet_ice_candidate.h"
+
+#include "tinymedia/tmedia_consumer.h"
+#include "tinymedia/tmedia_producer.h"
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tls/tnet_tls.h"
+#include "tls/tnet_dtls.h"
+
+#include <math.h> /* log10 */
+#include <limits.h> /* INT_MAX */
+#include <ctype.h> /* isspace */
+
+
+#if HAVE_SRTP
+static const tsk_bool_t __have_libsrtp = tsk_true;
+#else
+static const tsk_bool_t __have_libsrtp = tsk_false;
+#endif
+
+#define TDAV_IS_DTMF_CODEC(codec) (codec && TMEDIA_CODEC((codec))->plugin == tdav_codec_dtmf_plugin_def_t)
+#define TDAV_IS_ULPFEC_CODEC(codec) (codec && TMEDIA_CODEC((codec))->plugin == tdav_codec_ulpfec_plugin_def_t)
+#define TDAV_IS_RED_CODEC(codec) (codec && TMEDIA_CODEC((codec))->plugin == tdav_codec_red_plugin_def_t)
+#define TDAV_IS_VIDEO_CODEC(codec) (codec && TMEDIA_CODEC((codec))->plugin->type & tmedia_video)
+
+#if !defined(TDAV_DFAULT_FP_HASH)
+#define TDAV_DFAULT_FP_HASH tnet_dtls_hash_type_sha256
+#endif /* TDAV_DFAULT_FP_HASH */
+#if !defined(TDAV_FIXME_MEDIA_LEVEL_DTLS_ATT)
+#define TDAV_FIXME_MEDIA_LEVEL_DTLS_ATT 0
+#endif /* TDAV_FIXME_MEDIA_LEVEL_DTLS_ATT */
+
+// rfc5763 - The endpoint MUST NOT use the connection attribute defined in [RFC4145].
+#if !defined(TDAV_DTLS_CONNECTION_ATT)
+# define TDAV_DTLS_CONNECTION_ATT 0
+#endif
+
+static void* TSK_STDCALL _tdav_session_av_error_async_thread(void* usrdata);
+static int _tdav_session_av_raise_error_async(struct tdav_session_av_s* self, tsk_bool_t is_fatal, const char* reason);
+#if HAVE_SRTP
+static int _tdav_session_av_srtp_dtls_cb(const void* usrdata, enum trtp_srtp_dtls_event_type_e type, const char* reason);
+#endif /* HAVE_SRTP */
+static int _tdav_session_av_red_cb(const void* usrdata, const struct trtp_rtp_packet_s* packet);
+static int _tdav_session_av_dtls_set_remote_setup(struct tdav_session_av_s* self, tnet_dtls_setup_t setup, tsk_bool_t connection_new, tsk_bool_t is_ro_null);
+
+#define SDP_CAPS_COUNT_MAX 0x1F
+#define SDP_DECLARE_TAG int32_t tag // [1 - *]
+#define SDP_TAG(self) ((self) ? *((int32_t*)(self)) : 0)
+
+typedef enum RTP_PROFILE_E
+{
+ RTP_PROFILE_NONE = 0x00,
+
+ RTP_PROFILE_AVP = (1 << 0),
+ RTP_PROFILE_AVPF = (1 << 1),
+
+ RTP_PROFILE_SECURE = (1 << 2),
+ RTP_PROFILE_SECURE_SDES = (RTP_PROFILE_SECURE | (1 << 3)),
+ RTP_PROFILE_SECURE_DTLS = (RTP_PROFILE_SECURE | (1 << 4)),
+
+ RTP_PROFILE_SAVP = (RTP_PROFILE_AVP | RTP_PROFILE_SECURE_SDES),
+ RTP_PROFILE_SAVPF = (RTP_PROFILE_AVPF | RTP_PROFILE_SECURE_SDES),
+
+ RTP_PROFILE_UDP_TLS_RTP_SAVP = (RTP_PROFILE_AVP | RTP_PROFILE_SECURE_DTLS),
+ RTP_PROFILE_UDP_TLS_RTP_SAVPF = (RTP_PROFILE_AVPF | RTP_PROFILE_SECURE_DTLS)
+}
+RTP_PROFILE_T;
+
+typedef struct RTP_PROFILE_XS
+{
+ enum RTP_PROFILE_E type;
+ const char* name;
+}
+RTP_PROFILE_XT;
+
+static const RTP_PROFILE_XT RTP_PROFILES[] =
+{
+ { RTP_PROFILE_AVP, "RTP/AVP" },
+ { RTP_PROFILE_AVPF, "RTP/AVPF" },
+ { RTP_PROFILE_SAVP, "RTP/SAVP" },
+ { RTP_PROFILE_SAVPF, "RTP/SAVPF" },
+ { RTP_PROFILE_UDP_TLS_RTP_SAVP, "UDP/TLS/RTP/SAVP" },
+ { RTP_PROFILE_UDP_TLS_RTP_SAVPF, "UDP/TLS/RTP/SAVPF" },
+};
+#define RTP_PROFILES_COUNT (sizeof(RTP_PROFILES) / sizeof(RTP_PROFILES[0]))
+
+typedef struct sdp_acap_xs
+{
+ SDP_DECLARE_TAG;
+ unsigned optional:1; // "e.g. [2]"
+ unsigned or:1; // "e.g.|2"
+ const char* value;
+}
+sdp_acap_xt;
+typedef sdp_acap_xt sdp_acaps_xt[SDP_CAPS_COUNT_MAX];
+
+typedef struct sdp_tcap_xs
+{
+ SDP_DECLARE_TAG;
+ RTP_PROFILE_T profile;
+}
+sdp_tcap_xt;
+typedef sdp_tcap_xt sdp_tcaps_xt[SDP_CAPS_COUNT_MAX];
+
+typedef struct sdp_pcfg_xs
+{
+ SDP_DECLARE_TAG;
+ sdp_tcap_xt tcap;
+ sdp_acaps_xt acaps;
+}
+sdp_pcfg_xt;
+typedef sdp_pcfg_xt sdp_acfg_xt;
+typedef sdp_pcfg_xt sdp_pcfgs_xt[SDP_CAPS_COUNT_MAX];
+typedef tsk_object_t sdp_headerM_Or_Message; /* tsdp_header_M_t or tsdp_message_t */
+
+#define _sdp_reset(self) if((self)) memset((self), 0, sizeof(*(self)));
+#define _sdp_pcfgs_reset(self) _sdp_reset((self))
+#define _sdp_acfgs_reset(self) _sdp_reset((self))
+#define _sdp_pcfg_reset(self) _sdp_reset((self))
+#define _sdp_acfg_reset(self) _sdp_reset((self))
+#define _sdp_tcaps_reset(self) _sdp_reset((self))
+#define _sdp_acaps_reset(self) _sdp_reset((self))
+#define _sdp_integer_length(self) ((self) ? ((int32_t)log10(abs(self)) + 1) : 1)
+#define _sdp_str_index_of(str, sub_str) tsk_strindexOf((str), tsk_strlen((str)), sub_str)
+#define _sdp_str_starts_with(str, sub_str) (_sdp_str_index_of((str), (sub_str)) == 0)
+#define _sdp_str_contains(str, sub_str) (_sdp_str_index_of((str), (sub_str)) != -1)
+#define _SDP_DECLARE_INDEX_OF(name) \
+static int32_t _sdp_##name##s_indexof(const sdp_##name##_xt (*name##s)[SDP_CAPS_COUNT_MAX], int32_t tag) \
+{ \
+if(name##s){ \
+int32_t i; \
+for(i = 0; i < SDP_CAPS_COUNT_MAX; ++i){ \
+if((*name##s)[i].tag == tag){ \
+return i; \
+} \
+} \
+} \
+return -1; \
+}
+
+typedef struct tdav_sdp_caps_s
+{
+ TSK_DECLARE_OBJECT;
+
+ sdp_pcfgs_xt local;
+ sdp_pcfgs_xt remote;
+ sdp_acfg_xt acfg;
+}
+tdav_sdp_caps_t;
+
+static tdav_sdp_caps_t* tdav_sdp_caps_create();
+
+static const tsdp_header_A_t* _sdp_findA_at(const sdp_headerM_Or_Message* sdp, const char* field, tsk_size_t index);
+static int _sdp_add_headerA(sdp_headerM_Or_Message* sdp, const char* field, const char* value);
+static RTP_PROFILE_T _sdp_profile_from_string(const char* profile);
+static const char* _sdp_profile_to_string(RTP_PROFILE_T profile);
+static int32_t _sdp_acaps_indexof(const sdp_acap_xt (*acaps)[SDP_CAPS_COUNT_MAX], int32_t tag);
+static const sdp_acap_xt* _sdp_acaps_find_by_field(const sdp_acap_xt (*acaps)[SDP_CAPS_COUNT_MAX], const char* field, int32_t index);
+static int _sdp_acaps_from_sdp(const sdp_headerM_Or_Message* sdp, sdp_acap_xt (*acaps)[SDP_CAPS_COUNT_MAX], tsk_bool_t reset);
+static int32_t _sdp_tcaps_indexof(const sdp_tcap_xt (*tcaps)[SDP_CAPS_COUNT_MAX], int32_t tag);
+static int _sdp_tcaps_from_sdp(const sdp_headerM_Or_Message* sdp, sdp_tcap_xt (*tcaps)[SDP_CAPS_COUNT_MAX], tsk_bool_t reset);
+static int _sdp_acfg_to_sdp(sdp_headerM_Or_Message* sdp, const sdp_acfg_xt *acfg);
+static int _sdp_pcfgs_from_sdp(const sdp_headerM_Or_Message* sdp, sdp_acap_xt (*acaps)[SDP_CAPS_COUNT_MAX], sdp_tcap_xt (*tcaps)[SDP_CAPS_COUNT_MAX], sdp_pcfg_xt (*pcfgs)[SDP_CAPS_COUNT_MAX], tsk_bool_t reset);
+static int _sdp_pcfgs_to_sdp(sdp_headerM_Or_Message* sdp, const sdp_pcfg_xt (*pcfg)[SDP_CAPS_COUNT_MAX]);
+static int _sdp_pcfg_ensure(sdp_headerM_Or_Message* sdp, const sdp_pcfg_xt* pcfg);
+static int _sdp_pcfgs_cat(const sdp_pcfg_xt (*pcfgs_src)[SDP_CAPS_COUNT_MAX], sdp_pcfg_xt (*pcfgs_dst)[SDP_CAPS_COUNT_MAX]);
+
+
+int tdav_session_av_init(tdav_session_av_t* self, tmedia_type_t media_type)
+{
+ uint64_t session_id;
+ tmedia_session_t* base = TMEDIA_SESSION(self);
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if (!base->initialized) {
+ int ret = tmedia_session_init(base, media_type);
+ if (ret != 0) {
+ return ret;
+ }
+ }
+
+ /* base::init(): called by tmedia_session_create() */
+
+ self->media_type = media_type;
+ self->media_profile = tmedia_defaults_get_profile();
+ self->use_rtcp = tmedia_defaults_get_rtcp_enabled();
+ self->use_rtcpmux = tmedia_defaults_get_rtcpmux_enabled();
+ self->avpf_mode_set = self->avpf_mode_neg = tmedia_defaults_get_avpf_mode();
+ self->fps = -1; // use what is negotiated by the codec unless overrided by the user
+ self->pref_size = tmedia_defaults_get_pref_video_size(); // for the encoder
+ self->bandwidth_max_upload_kbps = ((media_type & tmedia_video || (media_type & tmedia_bfcp_video) == tmedia_bfcp_video) ? tmedia_defaults_get_bandwidth_video_upload_max() : INT_MAX); // INT_MAX or <=0 means undefined
+ self->bandwidth_max_download_kbps = ((media_type & tmedia_video || (media_type & tmedia_bfcp_video) == tmedia_bfcp_video) ? tmedia_defaults_get_bandwidth_video_download_max() : INT_MAX); // INT_MAX or <=0 means undefined
+ self->congestion_ctrl_enabled = tmedia_defaults_get_congestion_ctrl_enabled(); // whether to enable draft-alvestrand-rtcweb-congestion-03 and draft-alvestrand-rmcat-remb-01
+#if HAVE_SRTP
+ // this is the default value and can be updated by the user using "session_set('srtp-mode', mode_e)"
+ self->srtp_type = (self->media_profile == tmedia_profile_rtcweb) ? (tsk_strnullORempty(TMEDIA_SESSION(self)->dtls.file_pbk) ? tmedia_srtp_type_sdes : tmedia_srtp_type_dtls) : tmedia_defaults_get_srtp_type();
+ self->srtp_mode = (self->media_profile == tmedia_profile_rtcweb) ? tmedia_srtp_mode_mandatory : tmedia_defaults_get_srtp_mode();
+ self->use_srtp = (self->srtp_mode == tmedia_srtp_mode_mandatory); // if optional -> negotiate
+ // remove DTLS-SRTP option if not supported
+ if((self->srtp_type & tmedia_srtp_type_dtls) && !tnet_dtls_is_srtp_supported()){
+ TSK_DEBUG_WARN("DTLS-SRTP enabled but not supported. Please rebuild the code with this option enabled (requires OpenSSL 1.0.1+)");
+ if(!(self->srtp_type &= ~tmedia_srtp_type_dtls)){
+ // only DTLS-SRTP was enabled
+ self->srtp_mode = tmedia_srtp_mode_none;
+ self->use_srtp = tsk_false;
+ }
+ }
+ //!\ DTLS-SRTP requires certificates but do not check right now as it could be defined later
+#endif
+
+ tsk_safeobj_init(self);
+
+ // session id
+ if (!(session_id = TMEDIA_SESSION(self)->id)) { // set the session id if not already done
+ TMEDIA_SESSION(self)->id = session_id = tmedia_session_get_unique_id();
+ }
+ // consumer
+ TSK_OBJECT_SAFE_FREE(self->consumer);
+ if (!(self->consumer = tmedia_consumer_create((self->media_type & tmedia_video || (self->media_type & tmedia_bfcp_video) == tmedia_bfcp_video) ? tmedia_video : tmedia_audio, session_id))){ // get an audio (or video) consumer and ignore "bfcp" part
+ TSK_DEBUG_ERROR("Failed to create consumer for media type = %d", self->media_type);
+ }
+ // producer
+ TSK_OBJECT_SAFE_FREE(self->producer);
+ if (!(self->producer = tmedia_producer_create(self->media_type, session_id))){
+ TSK_DEBUG_ERROR("Failed to create producer for media type = %d", self->media_type);
+ }
+
+ // sdp caps
+ TSK_OBJECT_SAFE_FREE(self->sdp_caps);
+ if (!(self->sdp_caps = tdav_sdp_caps_create())) {
+ TSK_DEBUG_ERROR("Failed to create SDP caps");
+ return -1;
+ }
+
+ // pt mapping (when bypassing is enabled)
+ self->pt_map.local = self->pt_map.remote = self->pt_map.neg = -1;
+
+ return 0;
+}
+
+tsk_bool_t tdav_session_av_set(tdav_session_av_t* self, const tmedia_param_t* param)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_false;
+ }
+
+ // try with base class first
+ if(tmedia_session_set_2(TMEDIA_SESSION(self), param)){
+ return tsk_true;
+ }
+
+ if(param->plugin_type == tmedia_ppt_consumer && self->consumer){
+ return (tmedia_consumer_set(self->consumer, param) == 0);
+ }
+ else if(param->plugin_type == tmedia_ppt_producer && self->producer){
+ return (tmedia_producer_set(self->producer, param) == 0);
+ }
+ else if(param->plugin_type == tmedia_ppt_session){
+ if(param->value_type == tmedia_pvt_pchar){
+ if(tsk_striequals(param->key, "remote-ip")){
+ if(param->value){
+ tsk_strupdate(&self->remote_ip, (const char*)param->value);
+ return tsk_true;
+ }
+ }
+ else if(tsk_striequals(param->key, "local-ip")){
+ tsk_strupdate(&self->local_ip, (const char*)param->value);
+ return tsk_true;
+ }
+ else if(tsk_striequals(param->key, "local-ipver")){
+ self->use_ipv6 = tsk_striequals(param->value, "ipv6");
+ return tsk_true;
+ }
+ }
+ else if(param->value_type == tmedia_pvt_int32){
+ if(tsk_striequals(param->key, "srtp-mode")){
+#if HAVE_SRTP
+ self->srtp_mode = (tmedia_srtp_mode_t)TSK_TO_INT32((uint8_t*)param->value);
+ if(self->rtp_manager){
+ trtp_manager_set_srtp_type_local(self->rtp_manager, self->srtp_type, self->srtp_mode);
+ }
+#else
+ TSK_DEBUG_INFO("'srtp-mode' param ignored beacuse SRTP not enabled. Please rebuild the source code with this option.");
+#endif /* HAVE_SRTP */
+ return tsk_true;
+ }
+ else if(tsk_striequals(param->key, "rtp-ssrc")){
+ self->rtp_ssrc = *((uint32_t*)param->value);
+ if(self->rtp_manager && self->rtp_ssrc){
+ self->rtp_manager->rtp.ssrc.local = self->rtp_ssrc;
+ }
+ return tsk_true;
+ }
+ else if(tsk_striequals(param->key, "rtcp-enabled")){
+ self->use_rtcp = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+ return tsk_true;
+ }
+ else if(tsk_striequals(param->key, "rtcpmux-enabled")){
+ self->use_rtcpmux = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+ return tsk_true;
+ }
+ else if(tsk_striequals(param->key, "avpf-mode")){
+ self->avpf_mode_set = (tmedia_mode_t)TSK_TO_INT32((uint8_t*)param->value);
+ return tsk_true;
+ }
+ else if(tsk_striequals(param->key, "webrtc2sip-mode-enabled")){
+ self->is_webrtc2sip_mode_enabled = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+ return tsk_true;
+ }
+ else if (tsk_striequals(param->key, "bandwidth-max-upload")) {
+ self->bandwidth_max_upload_kbps = TSK_TO_INT32((uint8_t*)param->value);
+ return tsk_true;
+ }
+ else if (tsk_striequals(param->key, "bandwidth-max-download")) {
+ self->bandwidth_max_download_kbps = TSK_TO_INT32((uint8_t*)param->value);
+ return tsk_true;
+ }
+ else if (tsk_striequals(param->key, "fps")) {
+ self->fps = TSK_TO_INT32((uint8_t*)param->value);
+ return tsk_true;
+ }
+ else if (tsk_striequals(param->key, "pref-size")) {
+ self->pref_size = (tmedia_pref_video_size_t)TSK_TO_INT32((uint8_t*)param->value);
+ return tsk_true;
+ }
+ }
+ else if(param->value_type == tmedia_pvt_pobject){
+ if(tsk_striequals(param->key, "natt-ctx")){
+ TSK_OBJECT_SAFE_FREE(self->natt_ctx);
+ self->natt_ctx = tsk_object_ref(param->value);
+ return tsk_true;
+ }
+ else if(tsk_striequals(param->key, "ice-ctx")){
+ TSK_OBJECT_SAFE_FREE(self->ice_ctx);
+ self->ice_ctx = tsk_object_ref(param->value);
+ if(self->rtp_manager){
+ trtp_manager_set_ice_ctx(self->rtp_manager, self->ice_ctx);
+ }
+ return tsk_true;
+ }
+ else if(tsk_striequals(param->key, "remote-sdp-message")){
+ TSK_OBJECT_SAFE_FREE(self->remote_sdp);
+ self->remote_sdp = (struct tsdp_message_s*)tsk_object_ref(param->value);
+ return tsk_true;
+ }
+ else if(tsk_striequals(param->key, "local-sdp-message")){
+ TSK_OBJECT_SAFE_FREE(self->local_sdp);
+ self->local_sdp = (struct tsdp_message_s*)tsk_object_ref(param->value);
+ return tsk_true;
+ }
+ }
+ }
+
+ return tsk_false;
+}
+
+tsk_bool_t tdav_session_av_get(tdav_session_av_t* self, tmedia_param_t* param)
+{
+ if(!self || !param){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_false;
+ }
+
+ if (param->plugin_type == tmedia_ppt_session){
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "codecs-negotiated")) { // negotiated codecs
+ tmedia_codecs_L_t* neg_codecs = tsk_object_ref(TMEDIA_SESSION(self)->neg_codecs);
+ if (neg_codecs) {
+ const tsk_list_item_t* item;
+ tsk_list_foreach(item, neg_codecs) {
+ ((int32_t*)param->value)[0] |= TMEDIA_CODEC(item->data)->id;
+ }
+ TSK_OBJECT_SAFE_FREE(neg_codecs);
+ }
+ return tsk_true;
+ }
+ else if (tsk_striequals(param->key, "srtp-enabled")) {
+#if HAVE_SRTP
+ if (self->rtp_manager) {
+ ((int8_t*)param->value)[0] = self->use_srtp ? 1 : 0;
+ return tsk_true;
+ }
+#else
+ ((int8_t*)param->value)[0] = 0;
+ TSK_DEBUG_INFO("Ignoring parameter 'srtp-enabled' because SRTP not supported. Please rebuild the source code with this option enabled.");
+ return tsk_true;
+#endif /* HAVE_SRTP */
+ }
+ }
+ else if (param->value_type == tmedia_pvt_pobject) {
+ if (tsk_striequals(param->key, "producer")) {
+ *((tsk_object_t**)param->value) = tsk_object_ref(self->producer); // up to the caller to release the object
+ return tsk_true;
+ }
+ }
+ }
+
+ return tsk_false;
+}
+
+int tdav_session_av_init_encoder(tdav_session_av_t* self, struct tmedia_codec_s* encoder)
+{
+ if (!self || !encoder) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ encoder->bandwidth_max_upload = self->bandwidth_max_upload_kbps;
+ encoder->bandwidth_max_download = self->bandwidth_max_download_kbps;
+ if ((encoder->type & tmedia_video) || (encoder->type & tmedia_bfcp_video)) {
+ tmedia_codec_video_t* video = TMEDIA_CODEC_VIDEO(encoder);
+ if (self->fps > 0) {
+ video->out.fps = self->fps;
+ }
+ if (self->pref_size != video->pref_size) {
+ unsigned width, height;
+ if (tmedia_video_get_size(self->pref_size, &width, &height) == 0){
+ video->pref_size = self->pref_size;
+ video->out.width = width;
+ video->out.height = height;
+ }
+ }
+ }
+ return 0;
+}
+
+int tdav_session_av_prepare(tdav_session_av_t* self)
+{
+ int ret = 0;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* SRTPType */
+#if HAVE_SRTP
+ // Now that SSL certs are defined update SRTPType before creating the RTP manager
+ if (self->media_profile == tmedia_profile_rtcweb) {
+ self->srtp_type = tsk_strnullORempty(TMEDIA_SESSION(self)->dtls.file_pbk) ? tmedia_srtp_type_sdes : tmedia_srtp_type_dtls;
+ }
+#endif
+
+ /* set local port */
+ if (!self->rtp_manager){
+ self->rtp_manager = self->ice_ctx ? trtp_manager_create_2(self->ice_ctx, self->srtp_type, self->srtp_mode)
+ : trtp_manager_create(self->use_rtcp, self->local_ip, self->use_ipv6, self->srtp_type, self->srtp_mode);
+ }
+ if (self->rtp_manager) {
+ const char *webproxy_type = tsk_null, *webproxy_host = tsk_null, *webproxy_login = tsk_null, *webproxy_password = tsk_null;
+ unsigned short webproxy_port = 0;
+ // Port range
+ if ((ret = trtp_manager_set_port_range(self->rtp_manager, tmedia_defaults_get_rtp_port_range_start(), tmedia_defaults_get_rtp_port_range_stop()))) {
+ return ret;
+ }
+ // WebProxy
+ if ((ret = trtp_manager_set_proxy_auto_detect(self->rtp_manager, tmedia_defaults_get_webproxy_auto_detect()))) {
+ return ret;
+ }
+ if ((ret = tmedia_defaults_get_webproxy_info(&webproxy_type, &webproxy_host, &webproxy_port, &webproxy_login, &webproxy_password))) {
+
+ }
+ if ((ret = trtp_manager_set_proxy_info(self->rtp_manager, tnet_proxy_type_from_string(webproxy_type), webproxy_host, webproxy_port, webproxy_login, webproxy_password))) {
+ }
+
+#if HAVE_SRTP
+ if (tsk_strnullORempty(TMEDIA_SESSION(self)->dtls.file_pbk)) {
+ // DTLS-SRTP requires certificates
+ if(self->srtp_type & tmedia_srtp_type_dtls){
+ TSK_DEBUG_WARN("DTLS-SRTP requested but no SSL certificates provided, disabling this option :(");
+ if(!(self->srtp_type &= ~tmedia_srtp_type_dtls)){
+ // only DTLS-SRTP was enabled
+ self->srtp_mode = tmedia_srtp_mode_none;
+ self->use_srtp = tsk_false;
+ // update rtpmanager
+ ret = trtp_manager_set_srtp_type_local(self->rtp_manager, self->srtp_type, self->srtp_mode);
+ }
+ }
+ }
+
+ if ((self->srtp_type & tmedia_srtp_type_dtls) && (self->srtp_mode == tmedia_srtp_mode_optional || self->srtp_mode == tmedia_srtp_mode_mandatory)){
+ if((ret = trtp_manager_set_dtls_certs(self->rtp_manager, TMEDIA_SESSION(self)->dtls.file_ca, TMEDIA_SESSION(self)->dtls.file_pbk, TMEDIA_SESSION(self)->dtls.file_pvk, TMEDIA_SESSION(self)->dtls.verify))){
+ return ret;
+ }
+ }
+#endif /* HAVE_SRTP */
+ if((ret = trtp_manager_prepare(self->rtp_manager))){
+ return ret;
+ }
+ if(self->natt_ctx){
+ if((ret = trtp_manager_set_natt_ctx(self->rtp_manager, self->natt_ctx))){
+ return ret;
+ }
+ }
+ if(self->rtp_ssrc){
+ self->rtp_manager->rtp.ssrc.local = self->rtp_ssrc;
+ }
+ }
+
+
+ /* SRTP */
+#if HAVE_SRTP
+ {
+
+ }
+#endif
+
+ /* Consumer will be prepared in tdav_session_audio_start() */
+ /* Producer will be prepared in tdav_session_audio_start() */
+
+ return ret;
+}
+
+int tdav_session_av_start(tdav_session_av_t* self, const tmedia_codec_t* best_codec)
+{
+ int ret;
+ if(!self || !best_codec){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ // RED codec
+ TSK_OBJECT_SAFE_FREE(self->red.codec);
+ self->red.payload_type = 0;
+ if((self->red.codec = tsk_object_ref((tsk_object_t*)tdav_session_av_get_red_codec(self)))){
+ self->red.payload_type = atoi(self->red.codec->neg_format);
+ if(!TMEDIA_CODEC(self->red.codec)->opened){
+ if((ret = tmedia_codec_open(self->red.codec))){
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", self->red.codec->plugin->desc);
+ return ret;
+ }
+ }
+ // set RED callback (unencapsulated data)
+ ret = tdav_codec_red_set_callback((struct tdav_codec_red_s*)self->red.codec, _tdav_session_av_red_cb, self);
+ }
+
+ // ULPFEC
+ TSK_OBJECT_SAFE_FREE(self->ulpfec.codec);
+ self->ulpfec.payload_type = 0;
+ if((self->ulpfec.codec = tsk_object_ref((tsk_object_t*)tdav_session_av_get_ulpfec_codec(self)))){
+ self->ulpfec.payload_type = atoi(self->ulpfec.codec->neg_format);
+ if(!TMEDIA_CODEC(self->ulpfec.codec)->opened){
+ if((ret = tmedia_codec_open(self->ulpfec.codec))){
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", self->ulpfec.codec->plugin->desc);
+ return ret;
+ }
+ }
+ }
+
+ // Check if "RTCP-NACK", "RTC-FIR", "RTCP-GOOG-REMB".... are supported by the selected encoder
+ self->is_fb_fir_neg = self->is_fb_nack_neg = self->is_fb_googremb_neg = tsk_false;
+ if (TMEDIA_SESSION(self)->M.ro) {
+ // a=rtcp-fb:* ccm fir
+ // a=rtcp-fb:* nack
+ // a=rtcp-fb:* goog-remb
+ char attr_fir[256], attr_nack[256], attr_goog_remb[256];
+ int index = 0;
+ const tsdp_header_A_t* A;
+
+ sprintf(attr_fir, "%s ccm fir", best_codec->neg_format);
+ sprintf(attr_nack, "%s nack", best_codec->neg_format);
+ sprintf(attr_goog_remb, "%s goog-remb", best_codec->neg_format);
+
+ while ((A = tsdp_header_M_findA_at(TMEDIA_SESSION(self)->M.ro, "rtcp-fb", index++))) {
+ if (!self->is_fb_fir_neg) {
+ self->is_fb_fir_neg = (tsk_striequals(A->value, "* ccm fir") || tsk_striequals(A->value, attr_fir));
+ }
+ if (!self->is_fb_nack_neg) {
+ self->is_fb_nack_neg = (tsk_striequals(A->value, "* nack") || tsk_striequals(A->value, attr_nack));
+ }
+ if (!self->is_fb_googremb_neg) {
+ self->is_fb_googremb_neg = (tsk_striequals(A->value, "* goog-remb") || tsk_striequals(A->value, attr_goog_remb));
+ }
+ }
+ }
+
+ if (self->rtp_manager) {
+ int ret;
+ tmedia_param_t* media_param = tsk_null;
+ static const int32_t __ByPassIsYes = 1;
+ static const int32_t __ByPassIsNo = 0;
+ /* RTP/RTCP manager: use latest information. */
+
+ // set callbacks
+#if HAVE_SRTP
+ ret = trtp_manager_set_dtls_callback(self->rtp_manager, self, _tdav_session_av_srtp_dtls_cb);
+#endif /* HAVE_SRTP */
+
+ // network information will be updated when the RTP manager starts if ICE is enabled
+ ret = trtp_manager_set_rtp_remote(self->rtp_manager, self->remote_ip, self->remote_port);
+ self->rtp_manager->use_rtcpmux = self->use_rtcpmux;
+ ret = trtp_manager_set_payload_type(self->rtp_manager, best_codec->neg_format ? atoi(best_codec->neg_format) : atoi(best_codec->format));
+ {
+ int32_t bandwidth_max_upload_kbps = self->bandwidth_max_upload_kbps;
+ int32_t bandwidth_max_download_kbps = self->bandwidth_max_download_kbps;
+ if((self->media_type & tmedia_video || (self->media_type & tmedia_bfcp_video) == tmedia_bfcp_video)){
+ if(self->congestion_ctrl_enabled){
+ const tmedia_codec_t* best_codec = tdav_session_av_get_best_neg_codec(self); // use for encoding for sure and probably for decoding
+ if(TDAV_IS_VIDEO_CODEC(best_codec)){
+ // the up bandwidth will be updated once the decode the first frame as the current values (width, height, fps) are not really correct and based on the SDP negotiation
+ bandwidth_max_download_kbps = TSK_MIN(
+ tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(best_codec)->in.width, TMEDIA_CODEC_VIDEO(best_codec)->in.height, TMEDIA_CODEC_VIDEO(best_codec)->in.fps),
+ bandwidth_max_download_kbps);
+ bandwidth_max_upload_kbps = TSK_MIN(
+ tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(best_codec)->out.width, TMEDIA_CODEC_VIDEO(best_codec)->out.height, TMEDIA_CODEC_VIDEO(best_codec)->out.fps),
+ bandwidth_max_upload_kbps);
+ }
+ else if((self->media_type & tmedia_video || (self->media_type & tmedia_bfcp_video) == tmedia_bfcp_video)){
+ bandwidth_max_download_kbps = TSK_MIN(tmedia_get_video_bandwidth_kbps_3(), bandwidth_max_download_kbps);
+ bandwidth_max_upload_kbps = TSK_MIN(tmedia_get_video_bandwidth_kbps_3(), bandwidth_max_upload_kbps);
+ }
+ }
+ }
+
+ TSK_DEBUG_INFO("max_bw_up=%d kpbs, max_bw_down=%d kpbs, congestion_ctrl_enabled=%d, media_type=%d", bandwidth_max_upload_kbps, bandwidth_max_download_kbps, self->congestion_ctrl_enabled, self->media_type);
+ // forward up/down bandwidth info to rctp session (used in RTCP-REMB)
+ ret = trtp_manager_set_app_bandwidth_max(self->rtp_manager, bandwidth_max_upload_kbps, bandwidth_max_download_kbps);
+ }
+
+ // because of AudioUnit under iOS => prepare both consumer and producer then start() at the same time
+ /* prepare consumer and producer */
+ // Producer could output encoded frames:
+ // - On WP8 with built-in H.264 encoder
+ // - When Intel Quick Sync is used for encoding and added on the same Topology as the producer (camera MFMediaSource)
+ if (self->producer) {
+ if((ret = tmedia_producer_prepare(self->producer, best_codec)) == 0) {
+ media_param = tmedia_param_create(tmedia_pat_set,
+ best_codec->type,
+ tmedia_ppt_codec,
+ tmedia_pvt_int32,
+ "bypass-encoding",
+ (void*)(self->producer->encoder.codec_id == best_codec->id ? &__ByPassIsYes : &__ByPassIsNo));
+ if(media_param) {
+ tmedia_codec_set(TMEDIA_CODEC(best_codec), media_param);
+ TSK_OBJECT_SAFE_FREE(media_param);
+ }
+ }
+ }
+ // Consumer could accept encoded frames as input:
+ // - On WP8 with built-in H.264 decoder
+ // - When IMFTransform decoder is used for decoding and added on the same Topology as the consumer (EVR)
+ if (self->consumer) {
+ if ((ret = tmedia_consumer_prepare(self->consumer, best_codec)) == 0) {
+ media_param = tmedia_param_create(tmedia_pat_set,
+ best_codec->type,
+ tmedia_ppt_codec,
+ tmedia_pvt_int32,
+ "bypass-decoding",
+ (void*)(self->consumer->decoder.codec_id == best_codec->id ? &__ByPassIsYes : &__ByPassIsNo));
+ if(media_param) {
+ tmedia_codec_set(TMEDIA_CODEC(best_codec), media_param);
+ TSK_OBJECT_SAFE_FREE(media_param);
+ }
+ }
+ }
+
+ // Start RTP manager
+ ret = trtp_manager_start(self->rtp_manager);
+
+#if HAVE_SRTP
+ self->use_srtp = trtp_manager_is_srtp_activated(self->rtp_manager);
+
+ /* start consumer and producer */
+ if (trtp_manager_is_dtls_activated(self->rtp_manager) && !trtp_manager_is_dtls_started(self->rtp_manager)) {
+ // delay starting util DTLS-SRTP negotiation terminates (handshaking succeed)
+ TSK_DEBUG_INFO("Delaying consumer/producer starting until DTLS-SRTP negotiation complete");
+ }
+ else{
+#endif /* HAVE_SRTP */
+ tsk_safeobj_lock(self);
+ if (self->consumer && !self->consumer->is_started) ret = tmedia_consumer_start(self->consumer);
+ if (self->producer && !self->producer->is_started) ret = tmedia_producer_start(self->producer);
+ tsk_safeobj_unlock(self);
+#if HAVE_SRTP
+ }
+#endif /* HAVE_SRTP */
+
+ return ret;
+ }
+ else {
+ TSK_DEBUG_ERROR("Invalid RTP/RTCP manager");
+ return -3;
+ }
+
+ return 0;
+}
+
+int tdav_session_av_stop(tdav_session_av_t* self)
+{
+ tmedia_codec_t* codec;
+ tsk_list_item_t* item;
+ int ret = 0;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* stop Producer */
+ if (self->producer) {
+ ret = tmedia_producer_stop(self->producer);
+ }
+
+ /* stop RTP/RTCP manager */
+ if (self->rtp_manager) {
+ ret = trtp_manager_stop(self->rtp_manager);
+ }
+
+ /* stop Consumer (after RTP manager to silently discard in coming packets) */
+ if (self->consumer) {
+ ret = tmedia_consumer_stop(self->consumer);
+ }
+
+ /* close codecs to force open() for next start (e.g SIP UPDATE with SDP) */
+ if(TMEDIA_SESSION(self)->neg_codecs){
+ tsk_list_foreach(item, TMEDIA_SESSION(self)->neg_codecs){
+ if(!(codec = TMEDIA_CODEC(item->data))){
+ continue;
+ }
+ ret = tmedia_codec_close(codec);
+ }
+ }
+
+ self->bytes_in.count_last_time = self->bytes_out.count_last_time = 0;
+ self->bytes_in.count = self->bytes_out.count = 0;
+
+ return ret;
+}
+
+int tdav_session_av_pause(tdav_session_av_t* self)
+{
+ int ret = 0;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* Consumer */
+ if(self->consumer){
+ ret = tmedia_consumer_pause(self->consumer);
+ }
+ /* Producer */
+ if(self->producer){
+ ret = tmedia_producer_pause(self->producer);
+ }
+
+ return ret;
+}
+
+const tsdp_header_M_t* tdav_session_av_get_lo(tdav_session_av_t* self, tsk_bool_t *updated)
+{
+ tmedia_session_t* base = TMEDIA_SESSION(self);
+#if HAVE_SRTP
+ static const tsk_bool_t have_lib_srtp = tsk_true;
+#else
+ static const tsk_bool_t have_lib_srtp = tsk_false;
+#endif
+ const tsk_bool_t have_lib_srtp_dtls = tnet_dtls_is_srtp_supported();
+ tsk_bool_t is_srtp_enable = (self->srtp_type != tmedia_srtp_type_none) && (self->srtp_mode == tmedia_srtp_mode_optional || self->srtp_mode == tmedia_srtp_mode_mandatory);
+ tsk_bool_t is_srtp_dtls_enabled = is_srtp_enable && !!(self->srtp_type & tmedia_srtp_type_dtls);
+ tsk_bool_t is_srtp_sdes_enabled = is_srtp_enable && !!(self->srtp_type & tmedia_srtp_type_sdes);
+ tsk_bool_t is_srtp_local_mandatory = is_srtp_enable && (self->srtp_mode == tmedia_srtp_mode_mandatory);
+ tsk_bool_t is_bfcp_session = ((base->type & tmedia_bfcp) == tmedia_bfcp) ? tsk_true : tsk_false;
+ tsk_bool_t is_first_media;
+
+ if(!base || !base->plugin || !updated){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+
+ *updated = tsk_false;
+
+ if(!self->rtp_manager || (!self->ice_ctx && !self->rtp_manager->transport)){
+ if(self->rtp_manager && (!self->ice_ctx && !self->rtp_manager->transport)){ // reINVITE or UPDATE (manager was destroyed when stoppped)
+ if(trtp_manager_prepare(self->rtp_manager)){
+ TSK_DEBUG_ERROR("Failed to prepare transport");
+ return tsk_null;
+ }
+ }
+ else{
+ TSK_DEBUG_ERROR("RTP/RTCP manager in invalid");
+ return tsk_null;
+ }
+ }
+
+ // only first media will add session-level attributes (e.g. DTLS setup and fingerprint)
+ if((is_first_media = !!self->local_sdp)){
+ const tsdp_header_M_t* firstM = (const tsdp_header_M_t*)tsdp_message_get_headerAt(self->local_sdp, tsdp_htype_M, 0);
+ if(!(is_first_media = !firstM)){
+ is_first_media = tsk_striequals(TMEDIA_SESSION(self)->plugin->media, firstM->media);
+ }
+ }
+
+ if(base->ro_changed && base->M.lo){
+ static const char* __fields[] =
+ {
+ /* Codecs */
+ "fmtp", "rtpmap", "imageattr",
+ /* QoS */
+ "curr", "des", "conf",
+ /* SRTP */
+ "crypto",
+ /* DTLS */
+ "setup", "fingerprint",
+ /* ICE */
+ "candidate", "ice-ufrag", "ice-pwd",
+ /* SDPCapNeg */
+ "tcap", "acap", "pcfg",
+ /* Others */
+ "mid", "rtcp-mux", "ssrc"
+ };
+ // remove media-level attributes
+ tsdp_header_A_removeAll_by_fields(base->M.lo->Attributes, __fields, sizeof(__fields)/sizeof(__fields[0]));
+ tsk_list_clear_items(base->M.lo->FMTs);
+ // remove session-level attributes
+ if(is_first_media){
+ // headers: contains all kind of headers but this is a smart function :)
+ tsdp_header_A_removeAll_by_fields((tsdp_headers_A_L_t*)self->local_sdp->headers, __fields, sizeof(__fields)/sizeof(__fields[0]));
+ }
+ }
+
+ *updated = (base->ro_changed || !base->M.lo);
+
+ if(!base->M.lo){
+ if((base->M.lo = tsdp_header_M_create(base->plugin->media, self->rtp_manager->rtp.public_port, "RTP/AVP"))){
+ /* If NATT is active, do not rely on the global IP address Connection line */
+ if(self->natt_ctx){
+ tsdp_header_M_add_headers(base->M.lo,
+ TSDP_HEADER_C_VA_ARGS("IN", self->use_ipv6 ? "IP6" : "IP4", self->rtp_manager->rtp.public_ip),
+ tsk_null);
+ }
+ /* 3GPP TS 24.229 - 6.1.1 General
+ In order to support accurate bandwidth calculations, the UE may include the "a=ptime" attribute for all "audio" media
+ lines as described in RFC 4566 [39]. If a UE receives an "audio" media line with "a=ptime" specified, the UE should
+ transmit at the specified packetization rate. If a UE receives an "audio" media line which does not have "a=ptime"
+ specified or the UE does not support the "a=ptime" attribute, the UE should transmit at the default codec packetization
+ rate as defined in RFC 3551 [55A]. The UE will transmit consistent with the resources available from the network.
+
+ For "video" and "audio" media types that utilize the RTP/RTCP, the UE shall specify the proposed bandwidth for each
+ media stream utilizing the "b=" media descriptor and the "AS" bandwidth modifier in the SDP.
+
+ The UE shall include the MIME subtype "telephone-event" in the "m=" media descriptor in the SDP for audio media
+ flows that support both audio codec and DTMF payloads in RTP packets as described in RFC 4733 [23].
+ */
+ if(self->media_type & tmedia_audio){
+ tsk_istr_t ptime;
+ tsk_itoa(tmedia_defaults_get_audio_ptime(), &ptime);
+ tsdp_header_M_add_headers(base->M.lo,
+ /* rfc3551 section 4.5 says the default ptime is 20 */
+ TSDP_HEADER_A_VA_ARGS("ptime", ptime),
+ TSDP_HEADER_A_VA_ARGS("minptime", "1"),
+ TSDP_HEADER_A_VA_ARGS("maxptime", "255"),
+ TSDP_HEADER_A_VA_ARGS("silenceSupp", "off - - - -"),
+ tsk_null);
+ // the "telephone-event" fmt/rtpmap is added below
+ }
+ else if((self->media_type & tmedia_video || (self->media_type & tmedia_bfcp_video) == tmedia_bfcp_video)){
+ tsk_istr_t session_id;
+ // https://code.google.com/p/webrtc2sip/issues/detail?id=81
+ // goog-remb: https://groups.google.com/group/discuss-webrtc/browse_thread/thread/c61ad3487e2acd52
+ // rfc5104 - 7.1. Extension of the rtcp-fb Attribute
+ tsdp_header_M_add_headers(base->M.lo,
+ TSDP_HEADER_A_VA_ARGS("rtcp-fb", "* ccm fir"),
+ TSDP_HEADER_A_VA_ARGS("rtcp-fb", "* nack"),
+ TSDP_HEADER_A_VA_ARGS("rtcp-fb", "* goog-remb"),
+ tsk_null);
+ // https://tools.ietf.org/html/rfc4574
+ // http://tools.ietf.org/html/rfc4796
+ tsk_itoa(base->id, &session_id);
+ tsdp_header_M_add_headers(base->M.lo,
+ TSDP_HEADER_A_VA_ARGS("label", session_id),
+ TSDP_HEADER_A_VA_ARGS("content", (self->media_type & tmedia_bfcp) ? "slides" : "main"),
+ tsk_null);
+ // http://tools.ietf.org/html/rfc3556
+ // https://tools.ietf.org/html/rfc3890
+ if(self->bandwidth_max_download_kbps > 0 && self->bandwidth_max_download_kbps != INT_MAX){ // INT_MAX or <=0 means undefined
+ tsdp_header_M_add_headers(base->M.lo,
+ TSDP_HEADER_B_VA_ARGS("AS", self->bandwidth_max_download_kbps),
+ TSDP_HEADER_B_VA_ARGS("TIAS", (self->bandwidth_max_download_kbps << 10)),
+ tsk_null);
+ }
+ }
+ }
+ else{
+ TSK_DEBUG_ERROR("Failed to create lo");
+ return tsk_null;
+ }
+ }
+
+ if(*updated){
+ tmedia_codecs_L_t* neg_codecs = tsk_null;
+
+ if(base->M.ro){
+ TSK_OBJECT_SAFE_FREE(base->neg_codecs);
+ /* update negociated codecs */
+ if((neg_codecs = tmedia_session_match_codec(base, base->M.ro))){
+ base->neg_codecs = neg_codecs;
+ }
+ /* from codecs to sdp */
+ if(TSK_LIST_IS_EMPTY(base->neg_codecs) || ((base->neg_codecs->tail == base->neg_codecs->head) && TDAV_IS_DTMF_CODEC(TSK_LIST_FIRST_DATA(base->neg_codecs)))){
+ base->M.lo->port = 0; /* Keep the RTP transport and reuse it when we receive a reINVITE or UPDATE request */
+ // To reject an offered stream, the port number in the corresponding stream in the answer
+ // MUST be set to zero. Any media formats listed are ignored. AT LEAST ONE MUST BE PRESENT, AS SPECIFIED BY SDP.
+ tsk_strupdate(&base->M.lo->proto, base->M.ro->proto);
+ if(base->M.ro->FMTs){
+ tsk_list_pushback_list(base->M.lo->FMTs, base->M.ro->FMTs);
+ }
+ TSK_DEBUG_INFO("No codec matching for media type = %d", (int32_t)self->media_type);
+ goto DONE;
+ }
+ else{
+ tmedia_codec_to_sdp(base->neg_codecs, base->M.lo);
+ }
+ }
+ else{
+ /* from codecs to sdp */
+ tmedia_codec_to_sdp(base->codecs, base->M.lo);
+ }
+
+ /* SRTP */
+#if HAVE_SRTP
+ { //start-of-HAVE_SRTP
+
+ /* DTLS-SRTP default values */
+ if(is_srtp_dtls_enabled){
+ /* "setup" and "connection" */
+ if (self->dtls.local.setup == tnet_dtls_setup_none || self->dtls.local.setup == tnet_dtls_setup_actpass) { // if setup already negotiated then, use the same
+ // rfc5763: the caller is server by default
+ self->dtls.remote.setup = (!base->M.ro) ? tnet_dtls_setup_active : tnet_dtls_setup_passive;
+ _tdav_session_av_dtls_set_remote_setup(self, self->dtls.remote.setup, self->dtls.remote.connection_new, (!base->M.ro));
+ }
+ if (self->rtp_manager) {
+ trtp_manager_set_dtls_local_setup(self->rtp_manager, self->dtls.local.setup, self->dtls.local.connection_new);
+ }
+ }
+
+ if(!base->M.ro){
+ // === RO IS NULL ===
+ const trtp_srtp_ctx_xt *srtp_ctxs[SRTP_CRYPTO_TYPES_MAX] = { tsk_null };
+ tsk_size_t ctx_count = 0, ctx_idx, acap_tag = 1;
+ tsk_size_t acap_tag_fp_sha1 = 0, acap_tag_fp_sha256 = 0, acap_tag_setup = 0, acap_tag_connection = 0, acap_tag_crypro_start = 0;
+ char* str = tsk_null;
+ tsdp_header_A_t* cryptoA = tsk_null;
+ tsk_bool_t negotiate_srtp = (self->srtp_mode == tmedia_srtp_mode_optional);
+ tsk_bool_t negotiate_avpf = (self->avpf_mode_set == tmedia_mode_optional);
+ tsk_bool_t is_srtp_remote_mandatory = (base->M.ro && _sdp_str_contains(base->M.ro->proto, "SAVP"));
+ tsk_size_t profiles_index = 0;
+ RTP_PROFILE_T profiles[RTP_PROFILES_COUNT] = { RTP_PROFILE_NONE };
+
+ // get local SRTP context
+ if(is_srtp_sdes_enabled){
+ ctx_count = trtp_srtp_get_local_contexts(self->rtp_manager, (const trtp_srtp_ctx_xt **)&srtp_ctxs, sizeof(srtp_ctxs)/sizeof(srtp_ctxs[0]));
+ }
+
+ // a=tcap:
+ if((negotiate_srtp || negotiate_avpf)){
+ static const int32_t __tcap_tag = 1;
+ char* tcap = tsk_null;
+ const char* fp_sha1 = tsk_null;
+ const char* fp_sha256 = tsk_null;
+#define _first_media_add_header()
+#define _first_media_strcat(ppstr, format, ...) if(is_first_media) tsk_strcat_2((ppstr), (format), ##__VA_ARGS__)
+#define _first_media_sprintf(ppstr, format, ...) if(is_first_media) tsk_sprintf((ppstr), (format), ##__VA_ARGS__)
+#define _first_media_add_headers(sdp, ...) if(is_first_media) tsdp_message_add_headers((sdp), ##__VA_ARGS__)
+
+ _first_media_strcat(&tcap, "%d", __tcap_tag);
+
+ if(is_srtp_dtls_enabled){
+ if(!tsk_strnullORempty(TMEDIA_SESSION(self)->dtls.file_pbk)){
+ fp_sha1 = trtp_manager_get_dtls_local_fingerprint(self->rtp_manager, tnet_dtls_hash_type_sha1);
+ fp_sha256 = trtp_manager_get_dtls_local_fingerprint(self->rtp_manager, tnet_dtls_hash_type_sha256);
+ }
+ _first_media_strcat(&tcap, negotiate_avpf ? " UDP/TLS/RTP/SAVPF UDP/TLS/RTP/SAVP" : " UDP/TLS/RTP/SAVP");
+ if(negotiate_avpf){
+ profiles[profiles_index++] = RTP_PROFILE_UDP_TLS_RTP_SAVPF;
+ }
+ profiles[profiles_index++] = RTP_PROFILE_UDP_TLS_RTP_SAVP;
+ }
+ if(is_srtp_sdes_enabled){
+ _first_media_strcat(&tcap, negotiate_avpf ? " RTP/SAVPF RTP/SAVP" : " RTP/SAVP");
+ if(negotiate_avpf){
+ profiles[profiles_index++] = RTP_PROFILE_SAVPF;
+ }
+ profiles[profiles_index++] = RTP_PROFILE_SAVP;
+ }
+
+ if(!is_srtp_local_mandatory){
+ _first_media_strcat(&tcap, " RTP/AVPF");
+ profiles[profiles_index++] = RTP_PROFILE_AVPF;
+ }
+
+ // DTLS "setup" and "fringerprint"s
+ if((fp_sha1 || fp_sha256) && negotiate_srtp){
+ char* acap_fp = tsk_null;
+ acap_tag_setup = 1, acap_tag_connection = 2;
+ _first_media_sprintf(&str, "%d setup:%s", acap_tag_setup, TNET_DTLS_SETUP_NAMES[self->dtls.local.setup]);
+ _first_media_add_headers(self->local_sdp, TSDP_HEADER_A_VA_ARGS("acap", str), tsk_null);
+#if TDAV_DTLS_CONNECTION_ATT
+ _first_media_sprintf(&str, "%d connection:%s", acap_tag_connection, self->dtls.local.connection_new ? "new" : "existing");
+#endif
+ _first_media_add_headers(self->local_sdp, TSDP_HEADER_A_VA_ARGS("acap", str), tsk_null);
+ // New Firefox Nightly repspond with SHA-256 when offered SHA-1 -> It's a bug in FF
+ // Just use SHA-256 as first choice
+ if(fp_sha256){
+ _first_media_sprintf(&acap_fp, "3 fingerprint:%s %s", TNET_DTLS_HASH_NAMES[tnet_dtls_hash_type_sha256], fp_sha256);
+ acap_tag_fp_sha256 = 3;
+ _first_media_add_headers(self->local_sdp, TSDP_HEADER_A_VA_ARGS("acap", acap_fp), tsk_null);
+ }
+ if(fp_sha1){
+ _first_media_sprintf(&acap_fp, "%d fingerprint:%s %s", fp_sha256 ? 4 : 3, TNET_DTLS_HASH_NAMES[tnet_dtls_hash_type_sha1], fp_sha1);
+ acap_tag_fp_sha1 = (fp_sha256 ? 4 : 3);
+ _first_media_add_headers(self->local_sdp, TSDP_HEADER_A_VA_ARGS("acap", acap_fp), tsk_null);
+ }
+
+ TSK_FREE(acap_fp);
+ }
+
+ // "tcap"
+ _first_media_add_headers(self->local_sdp, TSDP_HEADER_A_VA_ARGS("tcap", tcap), tsk_null);
+ TSK_FREE(tcap);
+
+ acap_tag = TSK_MAX(acap_tag, acap_tag_connection);
+ acap_tag = TSK_MAX(acap_tag, acap_tag_fp_sha1);
+ acap_tag = TSK_MAX(acap_tag, acap_tag_fp_sha256);
+ }
+ if(is_first_media && !negotiate_srtp && is_srtp_dtls_enabled){
+ // add DTLS-SRTP fingerprint and setup at session-level
+ const char* fp_str = trtp_manager_get_dtls_local_fingerprint(self->rtp_manager, TDAV_DFAULT_FP_HASH);
+ if(fp_str){
+ tsk_sprintf(&str, "%s %s", TNET_DTLS_HASH_NAMES[TDAV_DFAULT_FP_HASH], fp_str);
+ //!\ From RFC 5763 (DTLS-SRTP Framework) \A75: The endpoint MUST NOT use the connection attribute defined in [RFC4145].
+#if TDAV_FIXME_MEDIA_LEVEL_DTLS_ATT
+ tsdp_header_M_add_headers(base->M.lo, TSDP_HEADER_A_VA_ARGS("fingerprint", str), tsk_null);
+ tsdp_header_M_add_headers(base->M.lo, TSDP_HEADER_A_VA_ARGS("setup", TNET_DTLS_SETUP_NAMES[self->dtls.local.setup]), tsk_null);
+ // tsdp_header_M_add_headers(base->M.lo, TSDP_HEADER_A_VA_ARGS("connection", self->dtls.local.connection_new ? "new" : "existing"), tsk_null);
+#else
+ _first_media_add_headers(self->local_sdp, TSDP_HEADER_A_VA_ARGS("fingerprint", str), tsk_null);
+ _first_media_add_headers(self->local_sdp, TSDP_HEADER_A_VA_ARGS("setup", TNET_DTLS_SETUP_NAMES[self->dtls.local.setup]), tsk_null);
+ // _first_media_add_headers(self->local_sdp, TSDP_HEADER_A_VA_ARGS("connection", self->dtls.local.connection_new ? "new" : "existing"), tsk_null);
+#endif
+ }
+ }
+
+ // "a=acap:crypto" or "a=crypto"
+ for(ctx_idx = 0; ctx_idx < ctx_count; ++ctx_idx){
+ if(acap_tag_crypro_start == 0){
+ acap_tag_crypro_start = (acap_tag == 1 ? acap_tag : ++acap_tag);
+ }
+ if(negotiate_srtp){
+ tsk_sprintf(&str, "%d crypto:%d %s inline:%s", acap_tag++, srtp_ctxs[ctx_idx]->rtp.tag, trtp_srtp_crypto_type_strings[srtp_ctxs[ctx_idx]->rtp.crypto_type], srtp_ctxs[ctx_idx]->rtp.key_str);
+ cryptoA = tsdp_header_A_create("acap", str);
+ }
+ else{
+ tsk_sprintf(&str, "%d %s inline:%s", srtp_ctxs[ctx_idx]->rtp.tag, trtp_srtp_crypto_type_strings[srtp_ctxs[ctx_idx]->rtp.crypto_type], srtp_ctxs[ctx_idx]->rtp.key_str);
+ cryptoA = tsdp_header_A_create("crypto", str);
+ }
+
+ tsdp_header_M_add(base->M.lo, (const tsdp_header_t*)cryptoA);
+
+ TSK_OBJECT_SAFE_FREE(cryptoA);
+ }
+
+ // a=pcfg:
+ if(negotiate_srtp || negotiate_avpf){
+ tsk_size_t pcfg_tag, pcfg_idx;
+ char *acap_crypto = tsk_null, *acap_dtls = tsk_null;
+
+ // crypto
+ for(ctx_idx = 0; ctx_idx < ctx_count; ++ctx_idx){
+ tsk_strcat_2(&acap_crypto, "%s%d",
+ acap_crypto ? "," : "",
+ (acap_tag_crypro_start + ctx_idx)
+ );
+ }
+
+ // dtls setup and fingerprints
+ if(acap_tag_setup > 0 && acap_tag_connection > 0 && (acap_tag_fp_sha1 > 0 || acap_tag_fp_sha256 > 0)){
+ tsk_sprintf(&acap_dtls, "%d,%d", acap_tag_setup, acap_tag_connection);
+ if(acap_tag_fp_sha1 > 0){
+ tsk_strcat_2(&acap_dtls, ",%d", acap_tag_fp_sha1);
+ }
+ if(acap_tag_fp_sha256 > 0){
+ tsk_strcat_2(&acap_dtls, "%s%d", acap_tag_fp_sha1 > 0 ? "|" : ",", acap_tag_fp_sha256);
+ }
+ }
+
+ for(pcfg_tag = 1, pcfg_idx = 0; pcfg_idx < profiles_index; ++pcfg_tag, ++pcfg_idx){
+ if(((profiles[pcfg_idx] & RTP_PROFILE_SECURE_SDES) == RTP_PROFILE_SECURE_SDES) && acap_crypto){
+ tsk_sprintf(&str, "%d t=%d a=%s", pcfg_tag, pcfg_tag, acap_crypto);
+ }
+ else if(((profiles[pcfg_idx] & RTP_PROFILE_SECURE_DTLS) == RTP_PROFILE_SECURE_DTLS) && acap_dtls){
+ tsk_sprintf(&str, "%d t=%d a=%s", pcfg_tag, pcfg_tag, acap_dtls);
+ }
+ else{
+ tsk_sprintf(&str, "%d t=%d", pcfg_tag, pcfg_tag);
+ }
+ tsdp_header_M_add_headers(base->M.lo, TSDP_HEADER_A_VA_ARGS("pcfg", str), tsk_null);
+ }
+
+ TSK_FREE(acap_crypto);
+ TSK_FREE(acap_dtls);
+ }
+
+ if(is_srtp_remote_mandatory || is_srtp_local_mandatory || trtp_srtp_is_initialized(self->rtp_manager)){
+ self->use_srtp = tsk_true;
+ }
+
+ /* RFC 5939 - Session Description Protocol (SDP) Capability Negotiation */
+ {
+ sdp_acaps_xt acaps;
+ sdp_tcaps_xt tcaps;
+
+ _sdp_acaps_reset(&acaps);
+ _sdp_tcaps_reset(&tcaps);
+ _sdp_pcfgs_reset(&self->sdp_caps->local);
+ // session-level attributes
+ if(self->local_sdp){
+ _sdp_pcfgs_from_sdp(self->local_sdp, &acaps, &tcaps, &self->sdp_caps->local, tsk_false);
+ }
+ // media-level attributes
+ _sdp_pcfgs_from_sdp(base->M.lo, &acaps, &tcaps, &self->sdp_caps->local, tsk_false);
+ }
+
+ TSK_FREE(str);
+ } //end-of-if(!base->M.ro)
+ else{
+ // === RO IS NOT NULL ===
+ // the ro validity has been checked in "set_ro()"
+ RTP_PROFILE_T profile_remote = (self->sdp_caps->acfg.tag > 0 && self->sdp_caps->acfg.tcap.tag > 0)
+ ? self->sdp_caps->acfg.tcap.profile
+ : _sdp_profile_from_string(base->M.ro->proto);
+ tsk_bool_t is_srtp_sdes_activated = tsk_false, is_srtp_dtls_activated = tsk_false;
+
+ // intersect remote and local SRTP options
+ if (self->avpf_mode_neg == tmedia_mode_optional && ((profile_remote & RTP_PROFILE_AVPF) == RTP_PROFILE_AVPF)) {
+ self->avpf_mode_neg = tmedia_mode_mandatory;
+ }
+ is_srtp_sdes_enabled &= ((profile_remote & RTP_PROFILE_SECURE_SDES) == RTP_PROFILE_SECURE_SDES);
+ is_srtp_dtls_enabled &= ((profile_remote & RTP_PROFILE_SECURE_DTLS) == RTP_PROFILE_SECURE_DTLS);
+
+
+ // SDES-SRTP
+ if(is_srtp_sdes_enabled){
+ const trtp_srtp_ctx_xt *srtp_ctxs[SRTP_CRYPTO_TYPES_MAX] = { tsk_null };
+ tsk_size_t ctx_count = 0, ctx_idx;
+ // get local SRTP context
+ if((ctx_count = trtp_srtp_get_local_contexts(self->rtp_manager, (const trtp_srtp_ctx_xt **)&srtp_ctxs, sizeof(srtp_ctxs)/sizeof(srtp_ctxs[0]))) > 0){
+ char* str = tsk_null;
+ for(ctx_idx = 0; ctx_idx < ctx_count; ++ctx_idx){
+ is_srtp_sdes_activated = tsk_true;
+ tsk_sprintf(&str, "%d %s inline:%s", srtp_ctxs[ctx_idx]->rtp.tag, trtp_srtp_crypto_type_strings[srtp_ctxs[ctx_idx]->rtp.crypto_type], srtp_ctxs[ctx_idx]->rtp.key_str);
+ tsdp_header_M_add_headers(base->M.lo, TSDP_HEADER_A_VA_ARGS("crypto", str), tsk_null);
+ }
+ TSK_FREE(str);
+ }
+ }
+
+ // DTLS-SRTP
+ if(!is_srtp_sdes_activated && is_srtp_dtls_enabled){
+ // get "fingerprint", "setup" and "connection" attributes
+ if(!tsk_strnullORempty(TMEDIA_SESSION(self)->dtls.file_pbk)){
+ tnet_dtls_hash_type_t fp_hash_remote;
+ char* str = tsk_null;
+ if((fp_hash_remote = trtp_manager_get_dtls_remote_fingerprint_hash(self->rtp_manager)) == tnet_dtls_hash_type_none){
+ fp_hash_remote = TDAV_DFAULT_FP_HASH;
+ }
+ tsk_sprintf(&str, "%s %s", TNET_DTLS_HASH_NAMES[fp_hash_remote], trtp_manager_get_dtls_local_fingerprint(self->rtp_manager, fp_hash_remote));
+#if TDAV_FIXME_MEDIA_LEVEL_DTLS_ATT
+ tsdp_header_M_add_headers(base->M.lo, TSDP_HEADER_A_VA_ARGS("fingerprint", str), tsk_null);
+#else
+ _first_media_add_headers(self->local_sdp, TSDP_HEADER_A_VA_ARGS("fingerprint", str), tsk_null);
+#endif /* TDAV_FIXME_MEDIA_LEVEL_DTLS_ATT */
+ TSK_FREE(str);
+ }
+#if TDAV_FIXME_MEDIA_LEVEL_DTLS_ATT
+ tsdp_header_M_add_headers(base->M.lo, TSDP_HEADER_A_VA_ARGS("setup", TNET_DTLS_SETUP_NAMES[self->dtls.local.setup]), tsk_null);
+#if TDAV_DTLS_CONNECTION_ATT
+ tsdp_header_M_add_headers(base->M.lo, TSDP_HEADER_A_VA_ARGS("connection", self->dtls.local.connection_new ? "new" : "existing"), tsk_null);
+#endif /* TDAV_DTLS_CONNECTION_ATT */
+#else
+ _first_media_add_headers(self->local_sdp, TSDP_HEADER_A_VA_ARGS("setup", TNET_DTLS_SETUP_NAMES[self->dtls.local.setup]), tsk_null);
+#if TDAV_FIXME_MEDIA_LEVEL_DTLS_ATT
+ _first_media_add_headers(self->local_sdp, TSDP_HEADER_A_VA_ARGS("connection", self->dtls.local.connection_new ? "new" : "existing"), tsk_null);
+#endif /* TDAV_FIXME_MEDIA_LEVEL_DTLS_ATT */
+#endif /* TDAV_FIXME_MEDIA_LEVEL_DTLS_ATT */
+
+ is_srtp_dtls_activated = tsk_true;
+ }
+
+ // activate the right SRTP type and disable others
+ trtp_manager_set_srtp_type_remote(self->rtp_manager,
+ is_srtp_sdes_activated ? tmedia_srtp_type_sdes : (is_srtp_dtls_activated ? tmedia_srtp_type_dtls : tmedia_srtp_type_none));
+
+ self->use_srtp |= (is_srtp_sdes_activated || is_srtp_dtls_activated);
+ is_srtp_sdes_enabled &= is_srtp_sdes_activated;
+ is_srtp_dtls_enabled &= is_srtp_dtls_activated;
+
+ }//end-of-else
+ }//end-of-HAVE_SRTP
+#endif /* HAVE_SRTP */
+
+
+ /* RFC 5939: acfg */
+ if(self->sdp_caps->acfg.tag > 0){
+ _sdp_acfg_to_sdp(base->M.lo, &self->sdp_caps->acfg);
+ }
+
+ /* Hold/Resume */
+#if 0
+ // BFCP sessions send media but not expected to receive any data.
+ // TODO: Radvision ignores "sendonly" and use the bfcp session as receiver for the mixed stream
+ tsdp_header_M_set_holdresume_att(base->M.lo, (base->lo_held | is_bfcp_session), base->ro_held);
+#else
+ tsdp_header_M_set_holdresume_att(base->M.lo, base->lo_held, base->ro_held);
+#endif
+
+ /* Update Proto*/
+ tsk_strupdate(&base->M.lo->proto,
+ self->use_srtp
+ ? ((self->avpf_mode_neg == tmedia_mode_mandatory) ? (is_srtp_dtls_enabled ? "UDP/TLS/RTP/SAVPF" : "RTP/SAVPF") : (is_srtp_dtls_enabled ? "UDP/TLS/RTP/SAVP" : "RTP/SAVP"))
+ : ((self->avpf_mode_neg == tmedia_mode_mandatory) ? "RTP/AVPF" : "RTP/AVP")
+ );
+
+ // RFC 5761: RTCP/RTP muxing
+ if(self->use_rtcpmux){
+ tsdp_header_M_add_headers(base->M.lo, TSDP_HEADER_A_VA_ARGS("rtcp-mux", tsk_null), tsk_null);
+ }
+
+ // draft-lennox-mmusic-sdp-source-attributes-01
+ if((self->media_type & tmedia_audio) || (self->media_type & tmedia_video) || ((self->media_type & tmedia_bfcp_video) == tmedia_bfcp_video)){
+ char* str = tsk_null;
+ tsk_sprintf(&str, "%u cname:%s", self->rtp_manager->rtp.ssrc.local, self->rtp_manager->rtcp.cname); // also defined in RTCP session
+ tsdp_header_M_add_headers(base->M.lo, TSDP_HEADER_A_VA_ARGS("ssrc", str), tsk_null);
+ tsk_sprintf(&str, "%u mslabel:%s", self->rtp_manager->rtp.ssrc.local, "6994f7d1-6ce9-4fbd-acfd-84e5131ca2e2");
+ tsdp_header_M_add_headers(base->M.lo, TSDP_HEADER_A_VA_ARGS("ssrc", str), tsk_null);
+ tsk_sprintf(&str, "%u label:%s", self->rtp_manager->rtp.ssrc.local, (self->media_type & tmedia_audio) ? ((base->type & tmedia_bfcp) ? "doubango@bfcpaudio" : "doubango@audio") : ((base->type & tmedia_bfcp) ? "doubango@bfcpvideo" : "doubango@video")); /* https://groups.google.com/group/discuss-webrtc/browse_thread/thread/6c44106c8ce7d6dc */
+ tsdp_header_M_add_headers(base->M.lo, TSDP_HEADER_A_VA_ARGS("ssrc", str), tsk_null);
+ TSK_FREE(str);
+ }
+
+ /* ICE */
+ if(self->ice_ctx){
+ tsk_size_t index = 0;
+ const tnet_ice_candidate_t* candidate;
+ tsk_bool_t remote_use_rtcpmux = (base->M.ro && (tsdp_header_M_findA(base->M.ro, "rtcp-mux") != tsk_null));
+
+ // FIXME: for RTCP, use "RFC 3605"in addition to "rtcp-mux"
+
+ // "a=ice-mismatch" if "C=" line is not included in the candidates
+ if ((candidate = tnet_ice_ctx_get_local_candidate_first(self->ice_ctx))) { // at least one candidate
+ base->M.lo->port = candidate->socket->port;
+
+ tsdp_header_M_remove(base->M.lo, tsdp_htype_C);
+ tsdp_header_M_add_headers(base->M.lo,
+ TSDP_HEADER_C_VA_ARGS("IN", TNET_SOCKET_TYPE_IS_IPV6(candidate->socket->type) ? "IP6" : "IP4", candidate->socket->ip),
+ tsk_null);
+ tsdp_header_M_add_headers(base->M.lo,
+ TSDP_HEADER_A_VA_ARGS("ice-ufrag", candidate->ufrag),
+ TSDP_HEADER_A_VA_ARGS("ice-pwd", candidate->pwd),
+ tsk_null);
+ // RTCWeb
+ // "mid:" must not added without BUNDLE
+ // tsdp_header_M_add_headers(base->M.lo,
+ // TSDP_HEADER_A_VA_ARGS("mid", self->media_type & tmedia_audio ? "audio" : "video"),
+ // tsk_null);
+
+ while ((candidate = tnet_ice_ctx_get_local_candidate_at(self->ice_ctx, index++))) {
+ if (self->use_rtcpmux && remote_use_rtcpmux && candidate->comp_id == TNET_ICE_CANDIDATE_COMPID_RTCP) {
+ continue; // do not add RTCP candidates if RTCP-MUX is activated (local + remote)
+ }
+#if 0 //TURN:FORCE
+ if (candidate->type_e != tnet_ice_cand_type_relay) {
+ continue;
+ }
+#endif
+ tsdp_header_M_add_headers(base->M.lo,
+ TSDP_HEADER_A_VA_ARGS("candidate", tnet_ice_candidate_tostring((tnet_ice_candidate_t*)candidate)),
+ tsk_null);
+ }
+ }
+ }
+ else{
+ if(base->M.lo->C){
+ tsk_strupdate(&base->M.lo->C->addr, self->rtp_manager->rtp.public_ip);
+ tsk_strupdate(&base->M.lo->C->addrtype, (self->use_ipv6 ? "IP6" : "IP4"));
+ }
+ base->M.lo->port = self->rtp_manager->rtp.public_port;
+ }
+
+ if(self->media_type & tmedia_audio){
+ ///* 3GPP TS 24.229 - 6.1.1 General
+ // The UE shall include the MIME subtype "telephone-event" in the "m=" media descriptor in the SDP for audio media
+ // flows that support both audio codec and DTMF payloads in RTP packets as described in RFC 4733 [23].
+ //*/
+ //tsdp_header_M_add_fmt(base->M.lo, TMEDIA_CODEC_FORMAT_DTMF);
+ //tsdp_header_M_add_headers(base->M.lo,
+ // TSDP_HEADER_A_VA_ARGS("fmtp", TMEDIA_CODEC_FORMAT_DTMF" 0-15"),
+ // tsk_null);
+ //tsdp_header_M_add_headers(base->M.lo,
+ // TSDP_HEADER_A_VA_ARGS("rtpmap", TMEDIA_CODEC_FORMAT_DTMF" telephone-event/8000"),
+ // tsk_null);
+ }
+
+ /* QoS */
+ if(base->qos){
+ tmedia_qos_tline_t* ro_tline;
+ if(base->M.ro && (ro_tline = tmedia_qos_tline_from_sdp(base->M.ro))){
+ tmedia_qos_tline_set_ro(base->qos, ro_tline);
+ TSK_OBJECT_SAFE_FREE(ro_tline);
+ }
+ tmedia_qos_tline_to_sdp(base->qos, base->M.lo);
+ }
+ DONE:;
+ } // end-of-if(*updated)
+
+ return base->M.lo;
+}
+
+int tdav_session_av_set_ro(tdav_session_av_t* self, const struct tsdp_header_M_s* m, tsk_bool_t *updated)
+{
+ tmedia_codecs_L_t* neg_codecs;
+ tsk_bool_t srtp_sdes_neg_ok = tsk_false, srtp_dtls_neg_ok = tsk_false;
+ tsk_bool_t is_srtp_remote_mandatory, is_srtp_dtls_remote_mandatory, is_srtp_sdes_remote_mandatory;
+ tsk_bool_t is_srtp_local_mandatory, is_srtp_dtls_local_enabled, is_srtp_sdes_local_enabled;
+ tmedia_session_t* base = TMEDIA_SESSION(self);
+ RTP_PROFILE_T profile_remote;
+ int32_t acfg_idx = -1;
+
+ if(!base || !m || !updated){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(!self->rtp_manager){
+ TSK_DEBUG_ERROR("RTP manager is null. Did you forget to prepare the session?");
+ return -1;
+ }
+
+ /* update remote offer */
+ TSK_OBJECT_SAFE_FREE(base->M.ro);
+ base->M.ro = tsk_object_ref((void*)m);
+
+ *updated = tsk_false;
+
+ // check if the RTP profile from remote party is supported or not
+ if((profile_remote = _sdp_profile_from_string(m->proto)) == RTP_PROFILE_NONE){
+ TSK_DEBUG_ERROR("%s not supported as RTP profile", m->proto);
+ return -2;
+ }
+ // check that all options in the profile are supported
+ if((is_srtp_remote_mandatory = (profile_remote & RTP_PROFILE_SECURE)) && (self->srtp_mode == tmedia_srtp_mode_none)){
+ TSK_DEBUG_ERROR("Remote party requesting secure transport (%s) but this option is not enabled", m->proto);
+ return -2;
+ }
+ if((is_srtp_sdes_remote_mandatory = (profile_remote & RTP_PROFILE_SECURE_SDES) == RTP_PROFILE_SECURE_SDES) && !(self->srtp_type & tmedia_srtp_type_sdes)){
+ TSK_DEBUG_ERROR("Remote party requesting SRTP-SDES (%s) but this option is not enabled", m->proto);
+ return -2;
+ }
+ if((is_srtp_dtls_remote_mandatory = (profile_remote & RTP_PROFILE_SECURE_DTLS) == RTP_PROFILE_SECURE_DTLS) && !(self->srtp_type & tmedia_srtp_type_dtls)){
+ TSK_DEBUG_ERROR("Remote party requesting DTLS-DTLS (%s) but this option is not enabled", m->proto);
+ return -2;
+ }
+ is_srtp_local_mandatory = (self->srtp_mode == tmedia_srtp_mode_mandatory) && (self->srtp_type != tmedia_srtp_type_none);
+ is_srtp_dtls_local_enabled = (self->srtp_mode != tmedia_srtp_mode_none) && (self->srtp_type & tmedia_srtp_type_dtls);
+ is_srtp_sdes_local_enabled = (self->srtp_mode != tmedia_srtp_mode_none) && (self->srtp_type & tmedia_srtp_type_sdes);
+
+ if (base->M.lo) {
+ if ((neg_codecs = tmedia_session_match_codec(base, m))) {
+ /* update negociated codecs */
+ TSK_OBJECT_SAFE_FREE(base->neg_codecs);
+ base->neg_codecs = neg_codecs;
+ *updated = tsk_true;
+ }
+ else {
+ TSK_DEBUG_ERROR("Codecs mismatch");
+ return -1;
+ }
+ /* QoS */
+ if (base->qos) {
+ tmedia_qos_tline_t* ro_tline;
+ if (base->M.ro && (ro_tline = tmedia_qos_tline_from_sdp(base->M.ro))) {
+ tmedia_qos_tline_set_ro(base->qos, ro_tline);
+ TSK_OBJECT_SAFE_FREE(ro_tline);
+ }
+ }
+ }
+
+ /* AVPF */
+ if(self->avpf_mode_set == tmedia_mode_optional && self->avpf_mode_neg != tmedia_mode_mandatory){
+ self->avpf_mode_neg = _sdp_str_contains(base->M.ro->proto, "AVPF") ? tmedia_mode_mandatory : tmedia_mode_none;
+ }
+
+ /* RFC 5939 - Session Description Protocol (SDP) Capability Negotiation */
+ {
+ sdp_acaps_xt acaps;
+ sdp_tcaps_xt tcaps;
+
+ _sdp_acfg_reset(&self->sdp_caps->acfg);
+
+ _sdp_acaps_reset(&acaps);
+ _sdp_tcaps_reset(&tcaps);
+ _sdp_pcfgs_reset(&self->sdp_caps->remote);
+
+ // session-level attributes
+ if(self->remote_sdp){
+ _sdp_pcfgs_from_sdp(self->remote_sdp, &acaps, &tcaps, &self->sdp_caps->remote, tsk_false);
+ }
+ // media-level attributes
+ _sdp_pcfgs_from_sdp(base->M.ro, &acaps, &tcaps, &self->sdp_caps->remote, tsk_false);
+ }
+
+ /* get connection associated to this media line
+ * If the connnection is global, then the manager will call tdav_session_audio_set() */
+ if(m->C && m->C->addr){
+ tsk_strupdate(&self->remote_ip, m->C->addr);
+ self->use_ipv6 = tsk_striequals(m->C->addrtype, "IP6");
+ }
+ /* set remote port */
+ self->remote_port = m->port;
+
+ /* RTCP-MUX */
+ self->use_rtcpmux &= (tsdp_header_M_findA(m, "rtcp-mux") != tsk_null);
+ if(self->ice_ctx){
+ tnet_ice_ctx_set_rtcpmux(self->ice_ctx, self->use_rtcpmux);
+ }
+
+ // BANDWIDTH:
+ // http://tools.ietf.org/html/rfc3556
+ // https://tools.ietf.org/html/rfc3890
+ {
+ if (!TSK_LIST_IS_EMPTY(m->Bandwidths)) {
+ const tsk_list_item_t* itemB;
+ const tsdp_header_B_t* B;
+ int32_t unit_div;
+ tsk_list_foreach(itemB, m->Bandwidths) {
+ if(!(B = (const tsdp_header_B_t*)itemB->data)) {
+ continue;
+ }
+ TSK_DEBUG_INFO("Remote party requested bandwidth limitation at %u using 'b=%s' SDP attribute", B->bandwidth, B->bwtype);
+ unit_div = tsk_striequals(B->bwtype, "AS") ? 1 : (tsk_striequals(B->bwtype, "TIAS") ? 1024 : 0);
+ if (unit_div) {
+ TSK_DEBUG_INFO("Setting bandwidth_max_upload_kbps=%u according to remote party request", B->bandwidth);
+ self->bandwidth_max_upload_kbps = (B->bandwidth/unit_div);
+ break;
+ }
+ }
+ }
+ }
+
+ /* Remote SSRC */
+ {
+ // will be also updated based on received RTP packets
+ const tsdp_header_A_t* ssrcA = tsdp_header_M_findA(m, "ssrc");
+ if(ssrcA && ssrcA->value){
+ if(sscanf(ssrcA->value, "%u %*s", &self->rtp_manager->rtp.ssrc.remote) != EOF){
+ TSK_DEBUG_INFO("Remote SSRC = %u", self->rtp_manager->rtp.ssrc.remote);
+ }
+ }
+ }
+
+ /* RTCWeb Type */
+ if(self->remote_sdp){
+ const tsdp_header_S_t* S = (const tsdp_header_S_t*)tsdp_message_get_header(self->remote_sdp, tsdp_htype_S);
+ if(S && !tsk_strnullORempty(S->value)){
+ struct rtcweb_type { const char* name; tmedia_rtcweb_type_t type; };
+ static const struct rtcweb_type rtcweb_types[] =
+ {
+ { "firefox", tmedia_rtcweb_type_firefox },
+ { "chrome", tmedia_rtcweb_type_chrome },
+ { "bowser", tmedia_rtcweb_type_ericsson },
+ { "doubango", tmedia_rtcweb_type_doubango },
+ };
+ static const int32_t rtcweb_types_count = sizeof(rtcweb_types)/sizeof(rtcweb_types[0]);
+ int32_t i;
+ for(i = 0; i < rtcweb_types_count; ++i){
+ if(_sdp_str_contains(S->value, rtcweb_types[i].name)){
+ trtp_manager_set_rtcweb_type_remote(self->rtp_manager, rtcweb_types[i].type);
+ break;
+ }
+ }
+ }
+ }
+
+ /* SRTP */
+#if HAVE_SRTP
+ // this is SRTP negotiation -> do not trust the remote profile
+ if(is_srtp_dtls_local_enabled || is_srtp_sdes_local_enabled){
+ int32_t i, j;
+ const sdp_acap_xt *acap;
+ int ret;
+ if(is_srtp_sdes_local_enabled){
+ const tsdp_header_A_t* A;
+ const char* cryptos[2] = { tsk_null };
+
+ /* 1. check crypto lines from the SDP */
+ i = 0;
+ while((A = tsdp_header_M_findA_at(m, "crypto", i))){
+ cryptos[i++] = A->value;
+ if(i >= (sizeof(cryptos)/sizeof(cryptos[0]))){
+ break;
+ }
+ }
+
+ /* 2. check crypto lines from the caps (RFC 5939)*/
+ if(!cryptos[0]){
+ tsk_size_t k = 0;
+ for(i = 0; (i < SDP_CAPS_COUNT_MAX && self->sdp_caps->remote[i].tag > 0); ++i){
+ j = 0;
+ while((acap = _sdp_acaps_find_by_field(&self->sdp_caps->remote[i].acaps, "crypto", j++))){
+ if(k < (sizeof(cryptos)/sizeof(cryptos[0]))){
+ // remove "crypto: "
+ if((cryptos[k] = strstr(acap->value, ":")) && ++cryptos[k]){
+ while(isspace(*cryptos[k])) ++cryptos[k];
+ ++k;
+ }
+ }
+ }
+ if(k != 0){ // do not mix crypto lines from different pcfgs
+ acfg_idx = i;
+ break;
+ }
+ }
+ }
+
+ /* 3. match cryptos */
+ for(i = 0; i< sizeof(cryptos)/sizeof(cryptos[0]); ++i){
+ if(!cryptos[i]){
+ break;
+ }
+ if((ret = trtp_srtp_set_crypto_remote(self->rtp_manager, cryptos[i])) == 0){
+ srtp_sdes_neg_ok = tsk_true;
+ break;
+ }
+ }
+ } // end-of-sdes
+
+ if(!srtp_sdes_neg_ok && is_srtp_dtls_local_enabled){
+ int ret;
+ const tsdp_header_A_t *setupA = tsk_null, *fpA = tsk_null, *connectionA = tsk_null;
+ const char* fingerprints[4] = { tsk_null };
+ const char* setups[4] = { tsk_null };
+ const char* connections[4] = { tsk_null };
+ const char* connection = tsk_null;
+ const char* setup = tsk_null;
+
+ /* 1. check DTLS attributes from the SDP */
+
+ if(self->remote_sdp){
+ setupA = tsdp_message_get_headerA(self->remote_sdp, "setup");
+ fpA = tsdp_message_get_headerA(self->remote_sdp, "fingerprint");
+ connectionA = tsdp_message_get_headerA(self->remote_sdp, "connection");
+ }
+ if(!setupA) setupA = tsdp_header_M_findA(m, "setup");
+ if(!fpA) fpA = tsdp_header_M_findA(m, "fingerprint");
+ if(!connectionA) connectionA = tsdp_header_M_findA(m, "connection");
+
+ if(setupA) setups[0] = setupA->value;
+ if(fpA) fingerprints[0] = fpA->value;
+ if(connectionA) connections[0] = connectionA->value;
+
+ /* 2. check DTLS attributes from from the caps (RFC 5939) */
+ if(!srtp_dtls_neg_ok && !fingerprints[0]){
+ tsk_size_t k_fp = 0, k_st = 0, k_conn = 0;
+ for(i = 0; (i < SDP_CAPS_COUNT_MAX && self->sdp_caps->remote[i].tag > 0); ++i){
+ // "fingerprint"
+ j = 0;
+ while((acap = _sdp_acaps_find_by_field(&self->sdp_caps->remote[i].acaps, "fingerprint", j++))){
+ if(k_fp < (sizeof(fingerprints)/sizeof(fingerprints[0]))){
+ if((fingerprints[k_fp] = strstr(acap->value, ":")) && ++fingerprints[k_fp]){
+ while(isspace(*fingerprints[k_fp])) ++fingerprints[k_fp];
+ ++k_fp;
+ }
+ }
+ }
+ // "setup"
+ j = 0;
+ while((acap = _sdp_acaps_find_by_field(&self->sdp_caps->remote[i].acaps, "setup", j++))){
+ if(k_st < (sizeof(setups)/sizeof(setups[0]))){
+ if((setups[k_st] = strstr(acap->value, ":")) && ++setups[k_st]){
+ while(isspace(*setups[k_st])) ++setups[k_st];
+ ++k_st;
+ }
+ }
+ }
+ // "connection"
+ j = 0;
+ while((acap = _sdp_acaps_find_by_field(&self->sdp_caps->remote[i].acaps, "connection", j++))){
+ if(k_conn < (sizeof(connections)/sizeof(connections[0]))){
+ if((connections[k_conn] = strstr(acap->value, ":")) && ++connections[k_conn]){
+ while(isspace(*connections[k_conn])) ++connections[k_conn];
+ ++k_conn;
+ }
+ }
+ }
+
+ if(k_fp || k_st || k_conn){ // do not mix crypto lines from different pcfgs
+ acfg_idx = i;
+ break;
+ }
+ }
+ }
+
+ /* 3. setup DTLS connection using negotiated attributes */
+
+ if(!srtp_dtls_neg_ok && fingerprints[0]){
+ tnet_fingerprint_t fingerprint;
+ char hash[16];
+ for(i = 0; i < sizeof(fingerprints)/sizeof(fingerprints[0]) && !srtp_dtls_neg_ok; ++i){
+ if(!fingerprints[i]){
+ break;
+ }
+ if(sscanf(fingerprints[i], "%15s %255s", hash, fingerprint) >= 2){
+ if((ret = trtp_manager_set_dtls_remote_fingerprint(self->rtp_manager, &fingerprint, hash)) == 0){
+ acfg_idx = i;
+ srtp_dtls_neg_ok = tsk_true;
+ break;
+ }
+ }
+ }
+ }
+
+ // only accept sdp without fingerprints if certificate verification is OFF
+ if(!srtp_dtls_neg_ok && !fingerprints[0] && !TMEDIA_SESSION(self)->dtls.verify){
+ for(i = 0; (i < SDP_CAPS_COUNT_MAX && self->sdp_caps->remote[i].tag > 0); ++i){
+ if(self->sdp_caps->remote[i].tcap.tag > 0 && (self->sdp_caps->remote[i].tcap.profile & RTP_PROFILE_SECURE_DTLS) == RTP_PROFILE_SECURE_DTLS){
+ acfg_idx = i;
+ break;
+ }
+ }
+ }
+
+ // defaults
+ if(!connection) connection = connections[acfg_idx != -1 ? acfg_idx : 0];
+ if(!setup) setup = setups[acfg_idx != -1 ? acfg_idx : 0];
+
+ if((!connection || !setup) && (is_srtp_dtls_remote_mandatory || is_srtp_local_mandatory || (acfg_idx != -1 && (self->sdp_caps->remote[acfg_idx].tcap.profile & RTP_PROFILE_SECURE_DTLS) == RTP_PROFILE_SECURE_DTLS))){
+ if(!connection) connection = (self->dtls.local.connection_new ? "existing" : "new");
+ if(!setup) setup = (self->dtls.local.setup == tnet_dtls_setup_active
+ ? "passive"
+ : (self->dtls.local.setup == tnet_dtls_setup_passive ? "active" : (base->M.lo ? "passive" : "active")));
+ }
+
+ if (connection && setup) {
+ // update local setup according to remote setup
+ // do not update if local setup already negotiated
+ if (tnet_dtls_get_setup_from_string(setup) != tnet_dtls_setup_actpass || (self->dtls.local.setup == tnet_dtls_setup_none || self->dtls.local.setup == tnet_dtls_setup_actpass)) {
+ ret = _tdav_session_av_dtls_set_remote_setup(self,
+ tnet_dtls_get_setup_from_string(setup),
+ !tsk_striequals(connection, "existing"),
+ (!base->M.ro)
+ );
+ }
+ if (ret == 0) {
+ // pass new local values to the RTP manager
+ ret = trtp_manager_set_dtls_local_setup(self->rtp_manager, self->dtls.local.setup, self->dtls.local.connection_new);
+ srtp_dtls_neg_ok = (ret == 0);
+ }
+ }
+
+ }// end-of-dtls
+ }//end-of-if(srtp=optional|mandatory)
+
+ self->use_srtp = trtp_srtp_is_initialized(self->rtp_manager);
+
+ // activate the right SRTP type and disable others
+ trtp_manager_set_srtp_type_remote(self->rtp_manager,
+ srtp_sdes_neg_ok ? tmedia_srtp_type_sdes : (srtp_dtls_neg_ok ? tmedia_srtp_type_dtls : tmedia_srtp_type_none));
+
+#endif
+
+ // set actual config
+ if(acfg_idx == -1){
+ // none matched (means SRTP negotiation failed or not enabled -> try to negotiate AVP(F))
+ int32_t i;
+ for(i = 0; (i < SDP_CAPS_COUNT_MAX && self->sdp_caps->remote[i].tag > 0); ++i){
+ if(self->sdp_caps->remote[i].tcap.tag > 0){
+ if((self->sdp_caps->remote[i].tcap.profile & RTP_PROFILE_AVPF) == RTP_PROFILE_AVPF){
+ acfg_idx = i;
+ break;
+ }
+ }
+ }
+ }
+ if(acfg_idx != -1){
+ self->sdp_caps->acfg = self->sdp_caps->remote[acfg_idx];
+ if (self->avpf_mode_set == tmedia_mode_optional && self->avpf_mode_neg != tmedia_mode_mandatory) {
+ self->avpf_mode_neg = ((self->sdp_caps->acfg.tcap.profile & RTP_PROFILE_AVPF) == RTP_PROFILE_AVPF) ? tmedia_mode_mandatory : tmedia_mode_none;
+ }
+ }
+
+ if(!srtp_sdes_neg_ok && !srtp_dtls_neg_ok && (is_srtp_remote_mandatory || is_srtp_local_mandatory)){
+ TSK_DEBUG_ERROR("SRTP negotiation failed");
+ return -4;
+ }
+
+ return 0;
+}
+
+const tmedia_codec_t* tdav_session_av_get_best_neg_codec(const tdav_session_av_t* self)
+{
+ const tsk_list_item_t* item;
+ if (!self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+
+ tsk_list_foreach(item, TMEDIA_SESSION(self)->neg_codecs) {
+ // exclude DTMF, RED and ULPFEC
+ if (!TDAV_IS_DTMF_CODEC(item->data) && !TDAV_IS_ULPFEC_CODEC(item->data) && !TDAV_IS_RED_CODEC(item->data)
+ && TMEDIA_CODEC(item->data)->plugin && TMEDIA_CODEC(item->data)->plugin->encode && TMEDIA_CODEC(item->data)->plugin->decode) {
+ return TMEDIA_CODEC(item->data);
+ }
+ }
+ return tsk_null;
+}
+
+const tmedia_codec_t* tdav_session_av_get_red_codec(const tdav_session_av_t* self)
+{
+ const tsk_list_item_t* item;
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+
+ tsk_list_foreach(item, TMEDIA_SESSION(self)->neg_codecs){
+ const tmedia_codec_t* codec = (const tmedia_codec_t*)item->data;
+ if(TDAV_IS_RED_CODEC(codec)){
+ return TMEDIA_CODEC(item->data);
+ }
+ }
+ return tsk_null;
+}
+
+static void* TSK_STDCALL _tdav_session_av_error_async_thread(void* usrdata)
+{
+ if(usrdata){
+ tdav_session_av_t* self = (tdav_session_av_t*)usrdata;
+ tsk_safeobj_lock(self);
+ if(TMEDIA_SESSION(self)->onerror_cb.fun){
+ TMEDIA_SESSION(self)->onerror_cb.fun(TMEDIA_SESSION(self)->onerror_cb.usrdata, TMEDIA_SESSION(self), self->last_error.reason, self->last_error.is_fatal);
+ }
+ tsk_safeobj_unlock(self);
+ tsk_object_unref(self); // see _tdav_session_av_raise_error_async()
+ }
+ return tsk_null;
+}
+
+static int _tdav_session_av_raise_error_async(struct tdav_session_av_s* self, tsk_bool_t is_fatal, const char* reason)
+{
+ int ret;
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(self);
+
+ tsk_object_ref(self); // do not unref(), see _tdav_session_av_error_async_thread()
+
+ if (self->last_error.tid[0]) {
+ tsk_thread_join(self->last_error.tid);
+ }
+
+ self->last_error.is_fatal = is_fatal;
+ tsk_strupdate(&self->last_error.reason, reason);
+ if ((ret = tsk_thread_create(self->last_error.tid, _tdav_session_av_error_async_thread, self)) != 0) {
+ tsk_object_unref(self);
+ goto bail;
+ }
+
+bail:
+ tsk_safeobj_unlock(self);
+
+ return ret;
+}
+
+#if HAVE_SRTP
+static int _tdav_session_av_srtp_dtls_cb(const void* usrdata, enum trtp_srtp_dtls_event_type_e type, const char* reason)
+{
+ tdav_session_av_t* self = tsk_object_ref((tdav_session_av_t*)usrdata);
+
+ tsk_safeobj_lock(self);
+ switch(type){
+ case trtp_srtp_dtls_event_type_handshake_failed:
+ case trtp_srtp_dtls_event_type_fatal_error:
+ {
+ if(TMEDIA_SESSION(self)->onerror_cb.fun){
+ static const tsk_bool_t __is_fatal = tsk_true;
+ _tdav_session_av_raise_error_async(self, __is_fatal, reason);
+ }
+ break;
+ }
+ case trtp_srtp_dtls_event_type_handshake_succeed:
+ {
+ break;
+ }
+ case trtp_srtp_dtls_event_type_started:
+ {
+ // start producer and consumer
+ if (self->rtp_manager && self->rtp_manager->is_started) {
+ if (self->consumer && !self->consumer->is_started) tmedia_consumer_start(self->consumer);
+ if (self->producer && !self->producer->is_started) tmedia_producer_start(self->producer);
+ }
+ break;
+ }
+ }
+ tsk_safeobj_unlock(self);
+ tsk_object_unref(self);
+
+ return 0;
+}
+#endif /* HAVE_SRTP */
+
+static int _tdav_session_av_red_cb(const void* usrdata, const struct trtp_rtp_packet_s* packet)
+{
+ tdav_session_av_t* self = (tdav_session_av_t*)usrdata;
+ if(self->rtp_manager && self->rtp_manager->rtp.cb.fun){
+ return self->rtp_manager->rtp.cb.fun(self->rtp_manager->rtp.cb.usrdata, packet);
+ }
+ return 0;
+}
+
+int _tdav_session_av_dtls_set_remote_setup(struct tdav_session_av_s* self, tnet_dtls_setup_t setup, tsk_bool_t connection_new, tsk_bool_t is_ro_null)
+{
+ if(self){
+ TSK_DEBUG_INFO("dtls.remote.setup=%s", TNET_DTLS_SETUP_NAMES[(int)setup]);
+ self->dtls.remote.setup = setup;
+ self->dtls.remote.connection_new = connection_new;
+ switch(self->dtls.remote.setup){
+ case tnet_dtls_setup_none:
+ default:
+ self->dtls.local.setup = tnet_dtls_setup_actpass;
+ self->dtls.local.connection_new = tsk_true; // RTP transport always unprepared for reINVITE/UPDATE -> new connection
+ break;
+ case tnet_dtls_setup_active:
+ self->dtls.local.setup = is_ro_null ? tnet_dtls_setup_actpass : tnet_dtls_setup_passive;
+ self->dtls.local.connection_new = tsk_true;
+ break;
+ case tnet_dtls_setup_passive:
+ self->dtls.local.setup = is_ro_null ? tnet_dtls_setup_actpass : tnet_dtls_setup_active;
+ self->dtls.local.connection_new = tsk_true;
+ break;
+ case tnet_dtls_setup_actpass:
+ if (self->dtls.local.setup == tnet_dtls_setup_actpass || self->dtls.local.setup == tnet_dtls_setup_none) { // change local setup only if actpass or none
+ self->dtls.local.setup = (self->dtls.local.setup == tnet_dtls_setup_actpass || self->dtls.local.setup == tnet_dtls_setup_active)
+ ? tnet_dtls_setup_active
+ : tnet_dtls_setup_passive;
+ self->dtls.local.connection_new = tsk_true;
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+const tmedia_codec_t* tdav_session_av_get_ulpfec_codec(const tdav_session_av_t* self)
+{
+ const tsk_list_item_t* item;
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+
+ tsk_list_foreach(item, TMEDIA_SESSION(self)->neg_codecs){
+ if(TDAV_IS_ULPFEC_CODEC(item->data)){
+ return TMEDIA_CODEC(item->data);
+ }
+ }
+ return tsk_null;
+}
+
+int tdav_session_av_deinit(tdav_session_av_t* self)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* deinit self (rtp manager should be destroyed after the producer) */
+ TSK_OBJECT_SAFE_FREE(self->consumer);
+ TSK_OBJECT_SAFE_FREE(self->producer);
+ TSK_OBJECT_SAFE_FREE(self->rtp_manager);
+ TSK_OBJECT_SAFE_FREE(self->sdp_caps);
+ TSK_OBJECT_SAFE_FREE(self->remote_sdp);
+ TSK_OBJECT_SAFE_FREE(self->local_sdp);
+ TSK_FREE(self->remote_ip);
+ TSK_FREE(self->local_ip);
+
+ /* RED and ULPFEC codecs */
+ TSK_OBJECT_SAFE_FREE(self->red.codec);
+ TSK_OBJECT_SAFE_FREE(self->ulpfec.codec);
+
+ /* NAT Traversal context */
+ TSK_OBJECT_SAFE_FREE(self->natt_ctx);
+ TSK_OBJECT_SAFE_FREE(self->ice_ctx);
+
+ /* Last error */
+ if(self->last_error.tid[0]){
+ tsk_thread_join(self->last_error.tid);
+ }
+ TSK_FREE(self->last_error.reason);
+
+ tsk_safeobj_deinit(self);
+
+ /* deinit base */
+ tmedia_session_deinit(TMEDIA_SESSION(self));
+
+ return 0;
+}
+
+
+
+
+
+
+
+
+
+static const tsdp_header_A_t* _sdp_findA_at(const sdp_headerM_Or_Message* sdp, const char* field, tsk_size_t index)
+{
+ if(sdp){
+ if(TSK_OBJECT_HEADER(sdp)->__def__ == tsdp_message_def_t){
+ return tsdp_message_get_headerA_at((const tsdp_message_t*)sdp, field, index);
+ }
+ else if(TSK_OBJECT_HEADER(sdp)->__def__ == tsdp_header_M_def_t){
+ return tsdp_header_M_findA_at((const tsdp_header_M_t*)sdp, field, index);
+ }
+ }
+
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+}
+
+static int _sdp_add_headerA(sdp_headerM_Or_Message* sdp, const char* field, const char* value)
+{
+ if(sdp && field){
+ if(TSK_OBJECT_HEADER(sdp)->__def__ == tsdp_message_def_t){
+ return tsdp_message_add_headers((tsdp_message_t*)sdp,
+ TSDP_HEADER_A_VA_ARGS(field, value),
+ tsk_null);
+ }
+ else if(TSK_OBJECT_HEADER(sdp)->__def__ == tsdp_header_M_def_t){
+ return tsdp_header_M_add_headers((tsdp_header_M_t*)sdp,
+ TSDP_HEADER_A_VA_ARGS(field, value),
+ tsk_null);
+ }
+ }
+
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+}
+
+static RTP_PROFILE_T _sdp_profile_from_string(const char* profile)
+{
+ int32_t i;
+ for(i = 0; i < RTP_PROFILES_COUNT; ++i){
+ if(tsk_striequals(RTP_PROFILES[i].name, profile)){
+ return RTP_PROFILES[i].type;
+ }
+ }
+ return RTP_PROFILE_NONE;
+}
+
+static const char* _sdp_profile_to_string(RTP_PROFILE_T profile)
+{
+ int32_t i;
+ for(i = 0; i < RTP_PROFILES_COUNT; ++i){
+ if(RTP_PROFILES[i].type == profile){
+ return RTP_PROFILES[i].name;
+ }
+ }
+ return tsk_null;
+}
+
+_SDP_DECLARE_INDEX_OF(acap);
+
+static const sdp_acap_xt* _sdp_acaps_find_by_field(const sdp_acap_xt (*acaps)[SDP_CAPS_COUNT_MAX], const char* field, int32_t index)
+{
+ int32_t i, j, k, size;
+
+ if(!acaps || !field){
+ TSK_DEBUG_ERROR("Invalid paramter");
+ return tsk_null;
+ }
+
+ i = 0, j = 0;
+ size = (int32_t)tsk_strlen(field);
+ while((*acaps)[j].tag && j < SDP_CAPS_COUNT_MAX){
+ k = _sdp_str_index_of((*acaps)[j].value, field);
+ if(k == 0 && (*acaps)[j].value[size] == ':'){
+ if(i == index){
+ return &(*acaps)[j];
+ }
+ ++i;
+ }
+ ++j;
+ }
+ return tsk_null;
+}
+
+static int _sdp_acaps_from_sdp(const sdp_headerM_Or_Message* sdp, sdp_acap_xt (*acaps)[SDP_CAPS_COUNT_MAX], tsk_bool_t reset)
+{
+ tsk_size_t acaps_count, acaps_idx;
+ const tsdp_header_A_t* A;
+ int32_t tag, index, size;
+
+ if(!sdp || !acaps){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+
+ if(reset){
+ _sdp_acaps_reset(acaps);
+ acaps_count = 0;
+ }
+ else{
+ if((acaps_count = _sdp_acaps_indexof(acaps, 0)) == -1){
+ TSK_DEBUG_ERROR("No room to append items");
+ return -1;
+ }
+ }
+
+ acaps_idx = 0;
+ while((A = _sdp_findA_at(sdp, "acap", acaps_idx++))){
+ if (!(size = (int32_t)tsk_strlen(A->value))){
+ goto next;
+ }
+ if(sscanf(A->value, "%d", &tag) == EOF){
+ TSK_DEBUG_ERROR("sscanf(%s) failed", A->value);
+ break;
+ }
+ if(tag <= 0 || (tag + 1) > SDP_CAPS_COUNT_MAX){
+ TSK_DEBUG_WARN("Ignoring tag with value = %d", tag);
+ goto next;
+ }
+
+ index = _sdp_integer_length(tag) + 1;/*SPACE*/
+ if(index >= size){
+ TSK_DEBUG_WARN("a=%s is empty", A->value);
+ goto next;
+ }
+
+ (*acaps)[acaps_count].tag = tag;
+ (*acaps)[acaps_count].value = &A->value[index];
+ next:
+ if(++acaps_count >= SDP_CAPS_COUNT_MAX){
+ break;
+ }
+ }
+
+ return 0;
+}
+
+_SDP_DECLARE_INDEX_OF(tcap);
+
+static int _sdp_tcaps_from_sdp(const sdp_headerM_Or_Message* sdp, sdp_tcap_xt (*tcaps)[SDP_CAPS_COUNT_MAX], tsk_bool_t reset)
+{
+ int32_t tcaps_count, tcaps_idx, profiles_count;
+ const tsdp_header_A_t* A;
+ int32_t tag, index, size, tag_fake;
+ char tcap[256];
+
+ if(!sdp || !tcaps){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(reset){
+ _sdp_tcaps_reset(tcaps);
+ tcaps_count = 0;
+ }
+ else{
+ if((tcaps_count = _sdp_tcaps_indexof(tcaps, 0)) == -1){
+ TSK_DEBUG_ERROR("No room to append items");
+ return -1;
+ }
+ }
+
+ profiles_count = 0;
+ index = 0;
+ tcaps_idx = 0;
+ while((A = _sdp_findA_at(sdp, "tcap", tcaps_idx++))){
+ if (!(size = (int32_t)tsk_strlen(A->value))){
+ goto next;
+ }
+ if(sscanf(&A->value[index], "%d", &tag) == EOF || (_sdp_integer_length(tag) + 1 >= size)){
+ TSK_DEBUG_ERROR("sscanf(%s) failed", A->value);
+ break;
+ }
+ if(tag <= 0 || (tag + 1) > SDP_CAPS_COUNT_MAX){
+ TSK_DEBUG_WARN("Ignoring tag with value = %d", tag);
+ goto next;
+ }
+
+ index += _sdp_integer_length(tag) + 1/*SPACE*/;
+
+ profiles_count = 0;
+ tag_fake = tag;
+ while(sscanf(&A->value[index], "%255s", &tcap) != EOF){
+ if(tag_fake < SDP_CAPS_COUNT_MAX){
+ (*tcaps)[tcaps_count + profiles_count].tag = tag_fake;
+ (*tcaps)[tcaps_count + profiles_count].profile = _sdp_profile_from_string(tcap); // split profiles
+ }
+ if ((index += (int32_t)tsk_strlen(tcap) + 1/*SPACE*/) >= size){
+ break;
+ }
+ ++tag_fake;
+ ++profiles_count;
+ }
+ next:
+ if(++tcaps_count >= SDP_CAPS_COUNT_MAX){
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int _sdp_acfg_to_sdp(sdp_headerM_Or_Message* sdp, const sdp_acfg_xt *acfg)
+{
+ int32_t i_a_caps;
+ char *acfg_str = tsk_null;
+
+ if(!sdp || !acfg || acfg->tag <= 0){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ // acfg: tag
+ tsk_strcat_2(&acfg_str, "%d", acfg->tag);
+ // acfg: t=
+ if(acfg_str && acfg->tcap.tag > 0){
+ tsk_strcat_2(&acfg_str, " t=%d", acfg->tcap.tag);
+ }
+ // acfg: a=
+ for (i_a_caps = 0; acfg_str && i_a_caps < SDP_CAPS_COUNT_MAX; ++i_a_caps){
+ if(acfg->acaps[i_a_caps].tag <= 0){
+ break;
+ }
+ if(i_a_caps == 0){
+ tsk_strcat_2(&acfg_str, " a=%d", acfg->acaps[i_a_caps].tag);
+ }
+ else{
+ tsk_strcat_2(&acfg_str, "%s%s%d%s", // e.g. |2 or ,6 or ,[2]
+ acfg->acaps[i_a_caps].or ? "|" : ",",
+ acfg->acaps[i_a_caps].optional ? "[" : "",
+ acfg->acaps[i_a_caps].tag,
+ acfg->acaps[i_a_caps].optional ? "]" : ""
+ );
+ }
+ }
+
+ // a=acfg:
+ if(acfg_str){
+ _sdp_add_headerA(sdp, "acfg", acfg_str);
+ TSK_FREE(acfg_str);
+ }
+
+ return 0;
+}
+
+_SDP_DECLARE_INDEX_OF(pcfg);
+
+static int _sdp_pcfgs_from_sdp(const sdp_headerM_Or_Message* sdp, sdp_acap_xt (*acaps)[SDP_CAPS_COUNT_MAX], sdp_tcap_xt (*tcaps)[SDP_CAPS_COUNT_MAX], sdp_pcfg_xt (*pcfgs)[SDP_CAPS_COUNT_MAX], tsk_bool_t reset)
+{
+ tsk_size_t pcfgs_count, pcfgs_idx;
+ const tsdp_header_A_t* A;
+ int32_t tag, index = 0, size, t, a_tag, indexof;
+ sdp_tcap_xt* tcap_curr;
+ int ret;
+ char pcfg[256], a[256];
+
+ if(!sdp || !acaps || !tcaps || !pcfgs){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if((ret = _sdp_tcaps_from_sdp(sdp, tcaps, reset))){
+ return ret;
+ }
+ if((ret = _sdp_acaps_from_sdp(sdp, acaps, reset))){
+ return ret;
+ }
+
+ if(reset){
+ _sdp_pcfgs_reset(pcfgs);
+ pcfgs_count = 0;
+ }
+ else{
+ if((pcfgs_count = _sdp_pcfgs_indexof(pcfgs, 0)) == -1){
+ TSK_DEBUG_ERROR("No room to append items");
+ return -1;
+ }
+ }
+
+ pcfgs_idx = 0;
+ tcap_curr = tsk_null;
+ while((A = _sdp_findA_at(sdp, "pcfg", pcfgs_idx++))){
+ if (!(size = (int32_t)tsk_strlen(A->value))){
+ goto next_A;
+ }
+ if(sscanf(A->value, "%d", &tag) == EOF || (_sdp_integer_length(tag) + 1 >= size)){
+ TSK_DEBUG_ERROR("sscanf(%s) failed", A->value);
+ break;
+ }
+ if(tag <= 0 || (tag + 1) > SDP_CAPS_COUNT_MAX){
+ TSK_DEBUG_WARN("Ignoring tag with value = %d", tag);
+ goto next_A;
+ }
+
+ (*pcfgs)[pcfgs_count].tag = tag;
+
+ index = _sdp_integer_length(tag) + 1/*SPACE*/;
+
+ while(sscanf(&A->value[index], "%255s", &pcfg) != EOF){
+ if(_sdp_str_starts_with(&A->value[index], "t=") && sscanf(pcfg, "t=%d", &t) != EOF){
+ if(t <= 0 || t + 1 >= SDP_CAPS_COUNT_MAX){
+ TSK_DEBUG_ERROR("t = %d ignored", t);
+ goto next_pcfg;
+ }
+ // tcap is something like a=tcap:1 RTP/SAVPF RTP/SAVP RTP/AVPF
+ // tcap [2] is "RTP/SAVP" -> not indexed by tag
+ tcap_curr = &(*pcfgs)[pcfgs_count].tcap;
+ if((indexof = _sdp_tcaps_indexof(tcaps, t)) == -1){
+ TSK_DEBUG_ERROR("Failed to find 'tcap' with tag=%d", t);
+ goto next_pcfg;
+ }
+ *tcap_curr = (*tcaps)[indexof];
+ }
+ else{
+ if(_sdp_str_starts_with(&A->value[index], "a=") && sscanf(pcfg, "a=%255s", a) != EOF){
+ char a_copy[sizeof(a)], *pch, *saveptr;
+ tsk_size_t pcfg_acfgs_count = 0;
+ sdp_acap_xt* acap;
+ memcpy(a_copy, a, sizeof(a));
+
+ pch = tsk_strtok_r (a, ",[]|", &saveptr);
+ while(pch){
+ a_tag = atoi(pch);
+ if(a_tag <= 0 || a_tag + 1 >= SDP_CAPS_COUNT_MAX){
+ TSK_DEBUG_ERROR("a = %d ignored", a_tag);
+ goto next_a;
+ }
+ if((indexof = _sdp_acaps_indexof(acaps, a_tag)) == -1){
+ TSK_DEBUG_ERROR("Failed to find 'acap' with tag=%d", a_tag);
+ goto next_a;
+ }
+ acap = &(*pcfgs)[pcfgs_count].acaps[pcfg_acfgs_count++];
+ *acap = (*acaps)[indexof];
+ acap->optional = (pch != a && a_copy[pch - a - 1] == '[') ? 1 : 0;
+ acap->or = (pch != a && a_copy[pch - a - 1] == '|') ? 1 : 0;
+ next_a:
+ pch = tsk_strtok_r(tsk_null, ",[]|", &saveptr);
+ }
+ }
+ tcap_curr = tsk_null;
+ }
+ next_pcfg:
+ if ((index += (int32_t)tsk_strlen(pcfg) + 1/*SPACE*/) >= size){
+ break;
+ }
+ }
+ next_A:
+ if(++pcfgs_count >= SDP_CAPS_COUNT_MAX){
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int _sdp_pcfgs_to_sdp(sdp_headerM_Or_Message* sdp, const sdp_pcfg_xt (*pcfgs)[SDP_CAPS_COUNT_MAX])
+{
+ int32_t i_pcfgs, i_a_caps, i_serialized_acaps;
+ char *pcfg = tsk_null, *acap = tsk_null, *tcap = tsk_null;
+ sdp_acaps_xt serialized_acaps; /* to avoid duplication */
+
+ if(!sdp || !pcfgs){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ // compact(a=tcap:)
+ // tcap:1 RTP/AVP
+ // tcap:2 RTP/SAVP
+ // tcap:3 RTP/SAVPF
+ // will be compacted as
+ // tcap:1 RTP/AVP RTP/SAVP RTP/SAVPF
+ for (i_pcfgs = 0; i_pcfgs < SDP_CAPS_COUNT_MAX; ++i_pcfgs){
+ if((*pcfgs)[i_pcfgs].tag <= 0 || (*pcfgs)[i_pcfgs].tcap.tag <= 0){
+ break;
+ }
+ if(!tcap){
+ tsk_sprintf(&tcap, "1 %s", _sdp_profile_to_string((*pcfgs)[i_pcfgs].tcap.profile));
+ }
+ else{
+ tsk_strcat_2(&tcap, " %s", _sdp_profile_to_string((*pcfgs)[i_pcfgs].tcap.profile));
+ }
+ }
+ if(tcap){
+ _sdp_add_headerA(sdp, "tcap", tcap);
+ TSK_FREE(tcap);
+ }
+
+ _sdp_acaps_reset(&serialized_acaps);
+ i_serialized_acaps = 0;
+
+ for (i_pcfgs = 0; i_pcfgs < SDP_CAPS_COUNT_MAX; ++i_pcfgs){
+ if((*pcfgs)[i_pcfgs].tag <= 0){
+ break;
+ }
+ // pcfg: tag
+ tsk_strcat_2(&pcfg, "%d", (*pcfgs)[i_pcfgs].tag);
+ // pcfg: t=
+ if((*pcfgs)[i_pcfgs].tcap.tag > 0){
+ tsk_strcat_2(&pcfg, " t=%d", (*pcfgs)[i_pcfgs].tcap.tag);
+ }
+
+ // pcfg: a=
+ for (i_a_caps = 0; i_a_caps < SDP_CAPS_COUNT_MAX; ++i_a_caps){
+ if((*pcfgs)[i_pcfgs].acaps[i_a_caps].tag <= 0){
+ break;
+ }
+ if(i_a_caps == 0){
+ tsk_strcat_2(&pcfg, " a=%d", (*pcfgs)[i_pcfgs].acaps[i_a_caps].tag);
+ }
+ else{
+ tsk_strcat_2(&pcfg, "%s%s%d%s", // e.g. |2 or ,6 or ,[2]
+ (*pcfgs)[i_pcfgs].acaps[i_a_caps].or ? "|" : ",",
+ (*pcfgs)[i_pcfgs].acaps[i_a_caps].optional ? "[" : "",
+ (*pcfgs)[i_pcfgs].acaps[i_a_caps].tag,
+ (*pcfgs)[i_pcfgs].acaps[i_a_caps].optional ? "]" : ""
+ );
+ }
+ // a=acap:
+ if(_sdp_acaps_indexof(&serialized_acaps, (*pcfgs)[i_pcfgs].acaps[i_a_caps].tag) == -1){
+ tsk_sprintf(&acap, "%d %s", (*pcfgs)[i_pcfgs].acaps[i_a_caps].tag, (*pcfgs)[i_pcfgs].acaps[i_a_caps].value);
+ if(acap){
+ _sdp_add_headerA(sdp, "acap", acap);
+ TSK_FREE(acap);
+ serialized_acaps[i_serialized_acaps++].tag = (*pcfgs)[i_pcfgs].acaps[i_a_caps].tag;
+ }
+ }
+ }
+
+ // a=pcfg:
+ if(pcfg){
+ _sdp_add_headerA(sdp, "pcfg", pcfg);
+ TSK_FREE(pcfg);
+ }
+ }
+ return 0;
+}
+
+static int _sdp_pcfg_ensure(sdp_headerM_Or_Message* sdp, const sdp_pcfg_xt* pcfg)
+{
+ int32_t i, n;
+ char field[256];
+
+ if(!sdp || !pcfg || pcfg->tag <=0){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(TSK_OBJECT_HEADER(sdp)->__def__ == tsdp_header_M_def_t && pcfg->tcap.profile != RTP_PROFILE_NONE){
+ tsk_strupdate(&((tsdp_header_M_t*)sdp)->proto, _sdp_profile_to_string(pcfg->tcap.profile));
+ }
+
+ for(i = 0; i < SDP_CAPS_COUNT_MAX && pcfg->acaps[i].tag > 0; ++i){
+ if (sscanf(pcfg->acaps[i].value, "%255s%*s", field) != EOF && (n = (int32_t)tsk_strlen(field)) > 2){
+ field[n - 2] = '\0';
+ _sdp_add_headerA(sdp, field, &pcfg->acaps[i].value[n + 1/*SPACE*/]);
+ }
+ }
+
+ return 0;
+}
+
+static int _sdp_pcfgs_cat(const sdp_pcfg_xt (*pcfgs_src)[SDP_CAPS_COUNT_MAX], sdp_pcfg_xt (*pcfgs_dst)[SDP_CAPS_COUNT_MAX])
+{
+ int32_t i, j;
+ if(!pcfgs_src || !pcfgs_dst){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ for(i = 0; i < SDP_CAPS_COUNT_MAX && (*pcfgs_dst)[i].tag > 0; ++i);
+
+ j = 0;
+ while (i < SDP_CAPS_COUNT_MAX && j < SDP_CAPS_COUNT_MAX){
+ if((*pcfgs_src)[j].tag > 0){
+ (*pcfgs_dst)[i++] = (*pcfgs_src)[j++];
+ }
+ else break;
+ }
+
+ return 0;
+}
+
+
+
+
+
+static tsk_object_t* tdav_sdp_caps_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_sdp_caps_t *caps = self;
+ if(caps){
+ }
+ return self;
+}
+static tsk_object_t* tdav_sdp_caps_dtor(tsk_object_t * self)
+{
+ tdav_sdp_caps_t *caps = self;
+ if(caps){
+ }
+ return self;
+}
+static const tsk_object_def_t tdav_sdp_caps_def_s =
+{
+ sizeof(tdav_sdp_caps_t),
+ tdav_sdp_caps_ctor,
+ tdav_sdp_caps_dtor,
+ tsk_null,
+};
+
+static tdav_sdp_caps_t* tdav_sdp_caps_create()
+{
+ return tsk_object_new(&tdav_sdp_caps_def_s);
+}
diff --git a/tinyDAV/src/tdav_win32.c b/tinyDAV/src/tdav_win32.c
new file mode 100644
index 0000000..789fc77
--- /dev/null
+++ b/tinyDAV/src/tdav_win32.c
@@ -0,0 +1,234 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+/**@file tdav_win32.c
+ * @brief tinyDAV WIN32 helper functions.
+ *
+ */
+#include "tinydav/tdav_win32.h"
+
+#if TDAV_UNDER_WINDOWS
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <windows.h>
+#if !TDAV_UNDER_WINDOWS_RT
+#include <Shlwapi.h> /* PathRemoveFileSpec */
+#endif
+
+/* https://msdn.microsoft.com/en-us/library/windows/desktop/ms724834%28v=vs.85%29.aspx
+Version Number Description
+6.2 Windows 8 / Windows Server 2012
+6.1 Windows 7 / Windows 2008 R2
+6.0 Windows Vista / Windows 2008
+5.2 Windows 2003
+5.1 Windows XP
+5.0 Windows 2000
+*/
+static DWORD dwMajorVersion = -1;
+static DWORD dwMinorVersion = -1;
+
+#if (TDAV_UNDER_WINDOWS_RT || TDAV_UNDER_WINDOWS_CE)
+const HMODULE GetCurrentModule()
+{
+ return NULL;
+}
+#else
+const HMODULE GetCurrentModule()
+{
+ HMODULE hm = {0};
+ GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, (LPCTSTR)GetCurrentModule, &hm);
+ return hm;
+}
+#endif /* !(TDAV_UNDER_WINDOWS_RT || TDAV_UNDER_WINDOWS_CE) */
+
+int tdav_win32_init()
+{
+#if !TDAV_UNDER_WINDOWS_RT
+ MMRESULT result;
+
+#if !TDAV_UNDER_WINDOWS_CE
+ CoInitializeEx(NULL, COINIT_MULTITHREADED);
+#endif
+
+ // Timers accuracy
+ result = timeBeginPeriod(1);
+ if (result) {
+ TSK_DEBUG_ERROR("timeBeginPeriod(1) returned result=%u", result);
+ }
+
+ // Get OS version
+ if(dwMajorVersion == -1 || dwMinorVersion == -1){
+ OSVERSIONINFO osvi;
+ ZeroMemory(&osvi, sizeof(OSVERSIONINFO));
+ osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+ GetVersionEx(&osvi);
+ dwMajorVersion = osvi.dwMajorVersion;
+ dwMinorVersion = osvi.dwMinorVersion;
+#if TDAV_UNDER_WINDOWS_CE && (BUILD_TYPE_GE && SIN_CITY)
+ TSK_DEBUG_INFO("Windows dwMajorVersion=%ld, dwMinorVersion=%ld\n", dwMajorVersion, dwMinorVersion);
+#else
+ fprintf(stdout, "Windows dwMajorVersion=%ld, dwMinorVersion=%ld\n", dwMajorVersion, dwMinorVersion);
+#endif
+ }
+#endif
+
+ return 0;
+}
+
+int tdav_win32_get_osversion(unsigned long* version_major, unsigned long* version_minor)
+{
+ if (version_major) {
+ *version_major = dwMajorVersion;
+ }
+ if (version_minor) {
+ *version_minor = dwMinorVersion;
+ }
+ return 0;
+}
+
+tsk_bool_t tdav_win32_is_win8_or_later()
+{
+ if (dwMajorVersion == -1 || dwMinorVersion == -1) {
+ TSK_DEBUG_ERROR("Version numbers are invalid");
+ return tsk_false;
+ }
+ return ((dwMajorVersion > 6) || ((dwMajorVersion == 6) && (dwMinorVersion >= 2)));
+}
+
+tsk_bool_t tdav_win32_is_win7_or_later()
+{
+ if (dwMajorVersion == -1 || dwMinorVersion == -1) {
+ TSK_DEBUG_ERROR("Version numbers are invalid");
+ return tsk_false;
+ }
+ return ( (dwMajorVersion > 6) || ( (dwMajorVersion == 6) && (dwMinorVersion >= 1) ) );
+}
+
+tsk_bool_t tdav_win32_is_winvista_or_later()
+{
+ if (dwMajorVersion == -1 || dwMinorVersion == -1) {
+ TSK_DEBUG_ERROR("Version numbers are invalid");
+ return tsk_false;
+ }
+ return (dwMajorVersion >= 6);
+}
+
+tsk_bool_t tdav_win32_is_winxp_or_later()
+{
+ if (dwMajorVersion == -1 || dwMinorVersion == -1) {
+ TSK_DEBUG_ERROR("Version numbers are invalid");
+ return tsk_false;
+ }
+ return ( (dwMajorVersion > 5) || ( (dwMajorVersion == 5) && (dwMinorVersion >= 1) ) );
+}
+
+const char* tdav_get_current_directory_const()
+{
+#if TDAV_UNDER_WINDOWS_RT
+ TSK_DEBUG_ERROR("Not supported");
+ return tsk_null;
+#else
+ static char CURRENT_DIR_PATH[MAX_PATH] = { 0 };
+ static DWORD CURRENT_DIR_PATH_LEN = 0;
+ if (CURRENT_DIR_PATH_LEN == 0) {
+ // NULL HMODULE will get the path to the executable not the DLL. When runing the code in Internet Explorer this is a BIG issue as the path is where IE.exe is installed.
+#if TDAV_UNDER_WINDOWS_CE
+ static wchar_t TMP_CURRENT_DIR_PATH[MAX_PATH] = { 0 };
+ if ((CURRENT_DIR_PATH_LEN = GetModuleFileName(GetCurrentModule(), TMP_CURRENT_DIR_PATH, sizeof(TMP_CURRENT_DIR_PATH)))) {
+ if ((CURRENT_DIR_PATH_LEN = wcstombs(CURRENT_DIR_PATH, TMP_CURRENT_DIR_PATH, sizeof(CURRENT_DIR_PATH) - 1))) {
+ int idx = tsk_strLastIndexOf(CURRENT_DIR_PATH, CURRENT_DIR_PATH_LEN, "\\");
+ if (idx > -1) {
+ CURRENT_DIR_PATH[idx] = '\0';
+ CURRENT_DIR_PATH_LEN = idx;
+ }
+ }
+ }
+#else
+ if ((CURRENT_DIR_PATH_LEN = GetModuleFileNameA(GetCurrentModule(), CURRENT_DIR_PATH, sizeof(CURRENT_DIR_PATH)))) {
+ if (!PathRemoveFileSpecA(CURRENT_DIR_PATH)) {
+ TSK_DEBUG_ERROR("PathRemoveFileSpec(%s) failed: %x", CURRENT_DIR_PATH, GetLastError());
+ memset(CURRENT_DIR_PATH, 0, sizeof(CURRENT_DIR_PATH));
+ CURRENT_DIR_PATH_LEN = 0;
+ }
+ }
+#endif /* TDAV_UNDER_WINDOWS_CE */
+ if (!CURRENT_DIR_PATH_LEN) {
+ TSK_DEBUG_ERROR("GetModuleFileNameA() failed: %x", GetLastError());
+ }
+ }
+ return CURRENT_DIR_PATH;
+#endif /* TDAV_UNDER_WINDOWS_RT */
+}
+
+TINYDAV_API void tdav_win32_print_error(const char* func, HRESULT hr)
+{
+ CHAR message[1024] = {0};
+
+#if (TDAV_UNDER_WINDOWS_RT || TDAV_UNDER_WINDOWS_CE)
+#if !defined(WC_ERR_INVALID_CHARS)
+#define WC_ERR_INVALID_CHARS 0
+#endif
+ // FormatMessageA not allowed on the Store
+ static WCHAR wBuff[1024] = {0};
+ FormatMessageW(
+ FORMAT_MESSAGE_FROM_SYSTEM,
+ tsk_null,
+ hr,
+ 0,
+ wBuff,
+ sizeof(wBuff)-1,
+ tsk_null);
+ WideCharToMultiByte(CP_UTF8, WC_ERR_INVALID_CHARS, wBuff, wcslen(wBuff), message, sizeof(message) - 1, NULL, NULL);
+#else
+ FormatMessageA
+ (
+#if !TDAV_UNDER_WINDOWS_RT
+ FORMAT_MESSAGE_ALLOCATE_BUFFER |
+#endif
+ FORMAT_MESSAGE_FROM_SYSTEM,
+ tsk_null,
+ hr,
+ 0,
+ message,
+ sizeof(message) - 1,
+ tsk_null);
+#endif
+
+ TSK_DEBUG_ERROR("%s(): %s", func, message);
+}
+
+int tdav_win32_deinit()
+{
+#if !(TDAV_UNDER_WINDOWS_RT || TDAV_UNDER_WINDOWS_CE)
+ MMRESULT result;
+
+ // Timers accuracy
+ result = timeEndPeriod(1);
+ if(result){
+ TSK_DEBUG_ERROR("timeEndPeriod(1) returned result=%u", result);
+ }
+#endif
+
+ return 0;
+}
+
+#endif /* TDAV_UNDER_WINDOWS */ \ No newline at end of file
diff --git a/tinyDAV/src/video/directx/tdav_producer_screencast_d3d9.cxx b/tinyDAV/src/video/directx/tdav_producer_screencast_d3d9.cxx
new file mode 100644
index 0000000..7efd1d6
--- /dev/null
+++ b/tinyDAV/src/video/directx/tdav_producer_screencast_d3d9.cxx
@@ -0,0 +1,185 @@
+/* Copyright (C) 2015 Mamadou DIOP.
+* Copyright (C) 2015 Doubango Telecom.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/video/directx/tdav_producer_screencast_d3d9.h"
+
+#if TDAV_UNDER_WINDOWS && !TDAV_UNDER_WINDOWS_RT
+
+#include <windows.h>
+#if TDAV_UNDER_WINDOWS_CE
+ // Direct3D Mobile (D3DM) was removed from Windows CE in version 7.
+ // Only include that header if running version 5 or 6. (When this
+ // class's implementation is complete, we'll need to revisit how
+ // this entire file is compiled.)
+# if _WIN32_WCE >= 0x0500 && _WIN32_WCE < 0x0700
+# include <D3dm.h>
+# endif
+#else
+# include <d3d9.h>
+#endif
+
+#ifdef _MSC_VER
+# if TDAV_UNDER_WINDOWS_CE
+# pragma comment(lib, "D3dm")
+# pragma comment(lib, "D3dmguid")
+# else
+# pragma comment(lib, "d3d9")
+# endif
+#endif
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_safeobj.h"
+#include "tsk_timer.h"
+#include "tsk_time.h"
+#include "tsk_string.h"
+#include "tsk_debug.h"
+
+#define D3D9_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[D3D9 Producer] " FMT, ##__VA_ARGS__)
+#define D3D9_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[D3D9 Producer] " FMT, ##__VA_ARGS__)
+#define D3D9_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[D3D9 Producer] " FMT, ##__VA_ARGS__)
+#define D3D9_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[D3D9 Producer] " FMT, ##__VA_ARGS__)
+
+typedef struct tdav_producer_screencast_d3d9_s
+{
+ TMEDIA_DECLARE_PRODUCER;
+
+ HWND hwnd_preview;
+ HWND hwnd_src;
+
+ tsk_thread_handle_t* tid[1];
+
+ void* p_buff_src; // must use VirtualAlloc()
+ tsk_size_t n_buff_src;
+ void* p_buff_neg; // must use VirtualAlloc()
+ tsk_size_t n_buff_neg;
+
+ tsk_bool_t b_started;
+ tsk_bool_t b_paused;
+ tsk_bool_t b_muted;
+
+ RECT rcScreen;
+
+ TSK_DECLARE_SAFEOBJ;
+}
+tdav_producer_screencast_d3d9_t;
+
+/* ============ Media Producer Interface ================= */
+static int _tdav_producer_screencast_d3d9_set(tmedia_producer_t *p_self, const tmedia_param_t* pc_param)
+{
+ D3D9_DEBUG_ERROR("Not implemented");
+ return -1;
+}
+
+
+static int _tdav_producer_screencast_d3d9_prepare(tmedia_producer_t* p_self, const tmedia_codec_t* pc_codec)
+{
+ D3D9_DEBUG_ERROR("Not implemented");
+ return -1;
+}
+
+static int _tdav_producer_screencast_d3d9_start(tmedia_producer_t* p_self)
+{
+ D3D9_DEBUG_ERROR("Not implemented");
+ return -1;
+}
+
+static int _tdav_producer_screencast_d3d9_pause(tmedia_producer_t* p_self)
+{
+ D3D9_DEBUG_ERROR("Not implemented");
+ return -1;
+}
+
+static int _tdav_producer_screencast_d3d9_stop(tmedia_producer_t* p_self)
+{
+ D3D9_DEBUG_ERROR("Not implemented");
+ return -1;
+}
+
+//
+// d3d9 screencast producer object definition
+//
+/* constructor */
+static tsk_object_t* _tdav_producer_screencast_d3d9_ctor(tsk_object_t *self, va_list * app)
+{
+ tdav_producer_screencast_d3d9_t *p_d3d9 = (tdav_producer_screencast_d3d9_t *)self;
+ if (p_d3d9) {
+ /* init base */
+ tmedia_producer_init(TMEDIA_PRODUCER(p_d3d9));
+ TMEDIA_PRODUCER(p_d3d9)->video.chroma = tmedia_chroma_bgr24; // RGB24 on x86 (little endians) stored as BGR24
+ /* init self with default values*/
+ TMEDIA_PRODUCER(p_d3d9)->video.fps = 15;
+ TMEDIA_PRODUCER(p_d3d9)->video.width = 352;
+ TMEDIA_PRODUCER(p_d3d9)->video.height = 288;
+
+ tsk_safeobj_init(p_d3d9);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* _tdav_producer_screencast_d3d9_dtor(tsk_object_t * self)
+{
+ tdav_producer_screencast_d3d9_t *p_d3d9 = (tdav_producer_screencast_d3d9_t *)self;
+ if (p_d3d9) {
+ /* stop */
+ if (p_d3d9->b_started) {
+ _tdav_producer_screencast_d3d9_stop((tmedia_producer_t*)p_d3d9);
+ }
+
+ /* deinit base */
+ tmedia_producer_deinit(TMEDIA_PRODUCER(p_d3d9));
+ /* deinit self */
+ if (p_d3d9->p_buff_neg) {
+ VirtualFree(p_d3d9->p_buff_neg, 0, MEM_RELEASE);
+ p_d3d9->p_buff_neg = NULL;
+ }
+ if (p_d3d9->p_buff_src) {
+ VirtualFree(p_d3d9->p_buff_src, 0, MEM_RELEASE);
+ p_d3d9->p_buff_src = NULL;
+ }
+ tsk_safeobj_deinit(p_d3d9);
+
+ TSK_DEBUG_INFO("*** d3d9 Screencast producer destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_screencast_d3d9_def_s =
+{
+ sizeof(tdav_producer_screencast_d3d9_t),
+ _tdav_producer_screencast_d3d9_ctor,
+ _tdav_producer_screencast_d3d9_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_screencast_d3d9_plugin_def_s =
+{
+ &tdav_producer_screencast_d3d9_def_s,
+ tmedia_bfcp_video,
+ "Microsoft Direct3D screencast producer",
+
+ _tdav_producer_screencast_d3d9_set,
+ _tdav_producer_screencast_d3d9_prepare,
+ _tdav_producer_screencast_d3d9_start,
+ _tdav_producer_screencast_d3d9_pause,
+ _tdav_producer_screencast_d3d9_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_screencast_d3d9_plugin_def_t = &tdav_producer_screencast_d3d9_plugin_def_s;
+
+#endif /* TDAV_UNDER_WINDOWS && !TDAV_UNDER_WINDOWS_RT */
diff --git a/tinyDAV/src/video/directx/tdav_producer_screencast_ddraw.cxx b/tinyDAV/src/video/directx/tdav_producer_screencast_ddraw.cxx
new file mode 100644
index 0000000..13507db
--- /dev/null
+++ b/tinyDAV/src/video/directx/tdav_producer_screencast_ddraw.cxx
@@ -0,0 +1,1542 @@
+/* Copyright (C) 2015 Mamadou DIOP.
+* Copyright (C) 2015 Doubango Telecom.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/video/directx/tdav_producer_screencast_ddraw.h"
+
+#if TDAV_UNDER_WINDOWS && !TDAV_UNDER_WINDOWS_RT
+
+#include <windows.h>
+#include <ddraw.h>
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_safeobj.h"
+#include "tsk_timer.h"
+#include "tsk_time.h"
+#include "tsk_string.h"
+#include "tsk_debug.h"
+
+#if defined(_MSC_VER)
+# define DDRAW_HAVE_RGB32_TO_I420 1
+# if !TDAV_UNDER_WINDOWS_CE
+# define DDRAW_HAVE_RGB32_TO_I420_INTRIN 1
+# include <intrin.h>
+# endif /* TDAV_UNDER_WINDOWS_CE */
+# if !defined(_M_X64) /*|| _MSC_VER <= 1500*/ // https://msdn.microsoft.com/en-us/library/4ks26t93.aspx: Inline assembly is not supported on the ARM and x64 processors (1500 = VS2008)
+# define DDRAW_HAVE_RGB32_TO_I420_ASM 1
+# endif
+#endif /* _MSC_VER */
+
+#if !defined(DDRAW_MEM_ALIGNMENT)
+# define DDRAW_MEM_ALIGNMENT 16 // SSE = 16, AVX = 32. Should be 16.
+#endif /* DDRAW_MEM_ALIGNMENT */
+
+#if !defined(DDRAW_IS_ALIGNED)
+# define DDRAW_IS_ALIGNED(p, a) (!((uintptr_t)(p) & ((a) - 1)))
+#endif /* DDRAW_IS_ALIGNED */
+
+#if !defined(DDRAW_HIGH_PRIO_MEMCPY)
+# define DDRAW_HIGH_PRIO_MEMCPY 0 // BOOL
+#endif /* DDRAW_HIGH_PRIO_MEMCPY */
+
+#if !defined(DDRAW_CPU_MONITOR)
+# define DDRAW_CPU_MONITOR 0 // BOOL
+#endif /* DDRAW_CPU_MONITOR */
+
+#if !defined(DDRAW_CPU_THROTTLING)
+# define DDRAW_CPU_THROTTLING 0 // BOOL
+#endif /* DDRAW_CPU_THROTTLING */
+
+#if (DDRAW_CPU_MONITOR || DDRAW_CPU_THROTTLING) && !defined(DDRAW_CPU_SCHEDULE_TIMEOUT)
+# define DDRAW_CPU_SCHEDULE_TIMEOUT 800 // millis
+#endif /* DDRAW_CPU_MONITOR */
+
+#if defined(DDRAW_CPU_THROTTLING) && !defined(DDRAW_CPU_THROTTLING_FPS_MIN)
+# define DDRAW_CPU_THROTTLING_FPS_MIN 1 // frames per second
+#endif /* DDRAW_CPU_THROTTLING_FPS_MIN */
+
+#if defined(DDRAW_CPU_THROTTLING) && !defined(DDRAW_CPU_THROTTLING_THRESHOLD)
+# define DDRAW_CPU_THROTTLING_THRESHOLD 70 // percent
+#endif /* DDRAW_CPU_THROTTLING_THRESHOLD */
+
+#if defined(DDRAW_CPU_THROTTLING) && !defined(DDRAW_CPU_THROTTLING_THRESHOLD_MARGIN)
+# define DDRAW_CPU_THROTTLING_THRESHOLD_MARGIN 5 // percent
+#endif /* DDRAW_CPU_THROTTLING_THRESHOLD_MARGIN */
+
+#if !defined(DDRAW_MT)
+# define DDRAW_MT 1 // BOOL: Multi-threading
+#endif /* DDRAW_MT */
+
+#if defined (DDRAW_MT) && !defined(DDRAW_MT_COUNT)
+# define DDRAW_MT_COUNT 3 // Number of buffers to use
+#endif /* DDRAW_MT_COUNT */
+
+#if defined(DDRAW_MT_COUNT)
+# define DDRAW_MT_EVENT_SHUTDOWN_INDEX DDRAW_MT_COUNT
+#endif
+
+#if !defined(DDRAW_MEM_SURFACE_DIRECT_ACCESS)
+# define DDRAW_MEM_SURFACE_DIRECT_ACCESS 0 // direct access to "ddsd.lpSurface" is very slow even if the memory is correctly aligned: to be investigated
+#endif /* DDRAW_MEM_SURFACE_DIRECT_ACCESS */
+
+#if !defined(DDRAW_PREVIEW)
+# if TDAV_UNDER_WINDOWS_CE && (BUILD_TYPE_GE || SIN_CITY)
+# define DDRAW_PREVIEW 0 // Do not waste time displaying the preview on "WEC7 + (GE | SINCITY)"
+# else
+# define DDRAW_PREVIEW 1
+# endif
+#endif
+
+#define DDRAW_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[DDRAW Producer] " FMT, ##__VA_ARGS__)
+#define DDRAW_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[DDRAW Producer] " FMT, ##__VA_ARGS__)
+#define DDRAW_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[DDRAW Producer] " FMT, ##__VA_ARGS__)
+#define DDRAW_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[DDRAW Producer] " FMT, ##__VA_ARGS__)
+
+#define DDRAW_SAFE_RELEASE(pp) if ((pp) && *(pp)) (*(pp))->Release(), *(pp) = NULL
+#define DDRAW_CHECK_HR(x) { HRESULT __hr__ = (x); if (FAILED(__hr__)) { DDRAW_DEBUG_ERROR("Operation Failed (%08x)", __hr__); goto bail; } }
+
+typedef struct DDrawModule {
+ LPDIRECTDRAW lpDD;
+ HMODULE hDLL;
+}DDrawModule;
+typedef struct DDrawModule FAR *LPDDrawModule;
+#define DDrawModuleSafeFree(module) DDRAW_SAFE_RELEASE(&module.lpDD); if (module.hDLL) { FreeLibrary(module.hDLL), module.hDLL = NULL; }
+
+typedef struct tdav_producer_screencast_ddraw_s
+{
+ TMEDIA_DECLARE_PRODUCER;
+
+ HWND hwnd_preview;
+ HWND hwnd_src;
+#if DDRAW_PREVIEW
+ BITMAPINFO bi_preview;
+#endif /* DDRAW_PREVIEW */
+
+#if DDRAW_CPU_MONITOR || DDRAW_CPU_THROTTLING
+ tsk_timer_manager_handle_t *p_timer_mgr;
+ struct {
+ tsk_timer_id_t id_timer;
+ int fps_target;
+ } cpu;
+#endif /* DDRAW_CPU_MONITOR || DDRAW_CPU_THROTTLING */
+
+#if DDRAW_MT
+ struct{
+ tsk_thread_handle_t* tid[1];
+ void* p_buff_yuv_aligned_array[DDRAW_MT_COUNT];
+ BOOL b_flags_array[DDRAW_MT_COUNT];
+ HANDLE h_events[DDRAW_MT_COUNT + 1]; // #DDRAW_MT_COUNT events for each buffer plus #1 for the shutdown/stop
+ } mt;
+#endif /* DDRAW_MT */
+
+ DDrawModule ddrawModule;
+ IDirectDrawSurface* p_surf_primary;
+
+ tsk_thread_handle_t* tid[1];
+
+ void* p_buff_rgb_aligned;
+ tsk_size_t n_buff_rgb;
+ tsk_size_t n_buff_rgb_bitscount;
+
+ void* p_buff_yuv_aligned;
+ tsk_size_t n_buff_yuv;
+
+ BOOL b_have_rgb32_conv; // support for RGB32 -> I420 and primary screen format is RGB32
+
+ tsk_bool_t b_started;
+ tsk_bool_t b_paused;
+ tsk_bool_t b_muted;
+
+ TSK_DECLARE_SAFEOBJ;
+}
+tdav_producer_screencast_ddraw_t;
+
+static BOOL _tdav_producer_screencast_have_ssse3();
+static tmedia_chroma_t _tdav_producer_screencast_get_chroma(const DDPIXELFORMAT* pixelFormat);
+static void* TSK_STDCALL _tdav_producer_screencast_grap_thread(void *arg);
+#if DDRAW_MT
+static void* TSK_STDCALL _tdav_producer_screencast_mt_encode_thread(void *arg);
+#endif /* DDRAW_MT */
+static int _tdav_producer_screencast_timer_cb(const void* arg, tsk_timer_id_t timer_id);
+static int _tdav_producer_screencast_grab(tdav_producer_screencast_ddraw_t* p_self);
+static HRESULT _tdav_producer_screencast_create_module(LPDDrawModule lpModule);
+static HRESULT _tdav_producer_screencast_alloc_rgb_buff(tdav_producer_screencast_ddraw_t* p_self, DWORD w, DWORD h, DWORD bitsCount);
+static HRESULT _tdav_producer_screencast_alloc_yuv_buff(tdav_producer_screencast_ddraw_t* p_self, DWORD w, DWORD h);
+
+#if DDRAW_HAVE_RGB32_TO_I420_INTRIN || DDRAW_HAVE_RGB32_TO_I420_ASM
+static __declspec(align(DDRAW_MEM_ALIGNMENT)) const int8_t kYCoeffs[16] = {
+ 13, 65, 33, 0,
+ 13, 65, 33, 0,
+ 13, 65, 33, 0,
+ 13, 65, 33, 0,
+ };
+ static __declspec(align(DDRAW_MEM_ALIGNMENT)) const int8_t kUCoeffs[16] = {
+ 112, -74, -38, 0,
+ 112, -74, -38, 0,
+ 112, -74, -38, 0,
+ 112, -74, -38, 0,
+ };
+ static __declspec(align(DDRAW_MEM_ALIGNMENT)) const int8_t kVCoeffs[16] = {
+ -18, -94, 112, 0,
+ -18, -94, 112, 0,
+ -18, -94, 112, 0,
+ -18, -94, 112, 0,
+ };
+ static __declspec(align(DDRAW_MEM_ALIGNMENT)) const int32_t kRGBAShuffleDuplicate[4] = { 0x03020100, 0x0b0a0908, 0x03020100, 0x0b0a0908 }; // RGBA(X) || RGBA(X + 2) || RGBA(X) || RGBA(X + 2) = 2U || 2V
+ static __declspec(align(DDRAW_MEM_ALIGNMENT)) const uint16_t kY16[8] = {
+ 16, 16, 16, 16,
+ 16, 16, 16, 16
+ };
+ static __declspec(align(DDRAW_MEM_ALIGNMENT)) const uint16_t kUV128[8] = {
+ 128, 128, 128, 128,
+ 128, 128, 128, 128
+ };
+#endif /* DDRAW_HAVE_RGB32_TO_I420_INTRIN || DDRAW_HAVE_RGB32_TO_I420_ASM */
+
+// public function used to check that we can use DDRAW plugin before loading it
+tsk_bool_t tdav_producer_screencast_ddraw_plugin_is_supported()
+{
+ static tsk_bool_t __checked = tsk_false; // static guard to avoid checking more than once
+ static tsk_bool_t __supported = tsk_false;
+
+ HRESULT hr = DD_OK;
+ DDSURFACEDESC ddsd;
+ DDPIXELFORMAT DDPixelFormat;
+ LPDIRECTDRAWSURFACE lpDDS = NULL;
+ DDrawModule ddrawModule = { 0 };
+
+ if (__checked) {
+ goto bail;
+ }
+
+ __checked = tsk_true;
+
+ DDRAW_CHECK_HR(hr = _tdav_producer_screencast_create_module(&ddrawModule));
+ DDRAW_CHECK_HR(hr = ddrawModule.lpDD->SetCooperativeLevel(NULL, DDSCL_NORMAL));
+
+ ZeroMemory(&ddsd, sizeof(ddsd));
+ ddsd.dwSize = sizeof(ddsd);
+ ddsd.dwFlags = DDSD_CAPS;
+ ddsd.ddsCaps.dwCaps = DDSCAPS_PRIMARYSURFACE;
+
+ DDRAW_CHECK_HR(hr = ddrawModule.lpDD->CreateSurface(&ddsd, &lpDDS, NULL));
+
+ ZeroMemory(&DDPixelFormat, sizeof(DDPixelFormat));
+ DDPixelFormat.dwSize = sizeof(DDPixelFormat);
+ DDRAW_CHECK_HR(hr = lpDDS->GetPixelFormat(&DDPixelFormat));
+ DDRAW_DEBUG_INFO("dwRGBBitCount:%d, dwRBitMask:%x, dwGBitMask:%x, dwBBitMask:%x, dwRGBAlphaBitMask:%x",
+ DDPixelFormat.dwRGBBitCount, DDPixelFormat.dwRBitMask, DDPixelFormat.dwGBitMask, DDPixelFormat.dwBBitMask, DDPixelFormat.dwRGBAlphaBitMask);
+ if (_tdav_producer_screencast_get_chroma(&DDPixelFormat) == tmedia_chroma_none) {
+ DDRAW_CHECK_HR(hr = DDERR_INVALIDCAPS);
+ }
+
+ __supported = SUCCEEDED(hr);
+
+bail:
+ DDRAW_SAFE_RELEASE(&lpDDS);
+ DDrawModuleSafeFree(ddrawModule);
+ return __supported;
+}
+
+static BOOL _tdav_producer_screencast_have_ssse3()
+{
+ static BOOL __checked = FALSE; // static guard to avoid checking more than once
+ static BOOL __supported = FALSE;
+
+ if (__checked) {
+ return __supported;
+ }
+ __checked = TRUE;
+
+#ifndef BIT
+# define BIT(n) (1<<n)
+#endif /*BIT*/
+#if DDRAW_HAVE_RGB32_TO_I420_ASM
+ #define cpuid(func, func2, a, b, c, d)\
+ __asm mov eax, func\
+ __asm mov ecx, func2\
+ __asm cpuid\
+ __asm mov a, eax\
+ __asm mov b, ebx\
+ __asm mov c, ecx\
+ __asm mov d, edx
+
+#define HAS_MMX 0x01
+#define HAS_SSE 0x02
+#define HAS_SSE2 0x04
+#define HAS_SSE3 0x08
+#define HAS_SSSE3 0x10
+#define HAS_SSE4_1 0x20
+#define HAS_AVX 0x40
+#define HAS_AVX2 0x80
+
+ unsigned int reg_eax, reg_ebx, reg_ecx, reg_edx;
+ cpuid(0, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
+ if (reg_eax < 1) {
+ DDRAW_DEBUG_ERROR("reg_eax < 1");
+ return FALSE;
+ }
+ cpuid(1, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
+ __supported = (reg_ecx & BIT(9)) ? TRUE : FALSE;
+#elif DDRAW_HAVE_RGB32_TO_I420_INTRIN
+ int cpu_info[4] = { 0 }, num_ids;
+ __cpuid(cpu_info, 0);
+ num_ids = cpu_info[0];
+ __cpuid(cpu_info, 0x80000000);
+ if (num_ids > 0) {
+ __cpuid(cpu_info, 0x00000001);
+ __supported = (cpu_info[2] & BIT(9)) ? TRUE : FALSE;
+ }
+#endif /* DDRAW_HAVE_RGB32_TO_I420_ASM */
+
+ DDRAW_DEBUG_INFO("SSSE3 supported = %s", __supported ? "YES" : "NO");
+
+ return __supported;
+}
+
+#if DDRAW_HAVE_RGB32_TO_I420_INTRIN
+
+#define DDRAW_COPY16_INTRIN(dst, src) \
+ _mm_store_si128((__m128i*)dst, _mm_load_si128((__m128i*)src))
+#define DDRAW_COPY64_INTRIN(dst, src) \
+ _mm_store_si128((__m128i*)dst, _mm_load_si128((__m128i*)src)); \
+ _mm_store_si128((__m128i*)&dst[16], _mm_load_si128((__m128i*)&src[16])); \
+ _mm_store_si128((__m128i*)&dst[32], _mm_load_si128((__m128i*)&src[32])); \
+ _mm_store_si128((__m128i*)&dst[48], _mm_load_si128((__m128i*)&src[48]))
+#define DDRAW_COPY128_INTRIN(dst, src) \
+ DDRAW_COPY64_INTRIN(dst, src); \
+ _mm_store_si128((__m128i*)&dst[64], _mm_load_si128((__m128i*)&src[64])); \
+ _mm_store_si128((__m128i*)&dst[80], _mm_load_si128((__m128i*)&src[80])); \
+ _mm_store_si128((__m128i*)&dst[96], _mm_load_si128((__m128i*)&src[96])); \
+ _mm_store_si128((__m128i*)&dst[112], _mm_load_si128((__m128i*)&src[112]))
+
+static void _tdav_producer_screencast_rgb32_to_yuv420_intrin_ssse3(uint8_t *yuvPtr, const uint8_t *rgbPtr, int width, int height)
+{
+ // rgbPtr contains (samplesCount * 16) bytes
+ // yPtr contains samplesCount bytes
+ const int samplesCount = (width * height); // "width" and "height" are in samples
+ const uint8_t *rgbPtr_;
+ uint8_t* yPtr_ = yuvPtr, *uPtr_ = (yPtr_ + samplesCount), *vPtr_ = uPtr_ + (samplesCount >> 2);
+ __m128i mmRgb0, mmRgb1, mmRgb2, mmRgb3, mmY0, mmY1, mmY;
+ __m128i mmRgbU0, mmRgbU1, mmRgbV0, mmRgbV1;
+
+ // Convert 16 RGBA samples to 16 Y samples
+ rgbPtr_ = rgbPtr;
+ /* const */__m128i yCoeffs = _mm_load_si128((__m128i*)kYCoeffs);
+ /* const */__m128i y16 = _mm_load_si128((__m128i*)kY16);
+ for(int i = 0; i < samplesCount; i += 16)
+ {
+ // load 16 RGBA samples
+ _mm_store_si128(&mmRgb0, _mm_load_si128((__m128i*)rgbPtr_)); // 4 RGBA samples
+ _mm_store_si128(&mmRgb1, _mm_load_si128((__m128i*)&rgbPtr_[16])); // 4 RGBA samples
+ _mm_store_si128(&mmRgb2, _mm_load_si128((__m128i*)&rgbPtr_[32])); // 4 RGBA samples
+ _mm_store_si128(&mmRgb3, _mm_load_si128((__m128i*)&rgbPtr_[48])); // 4 RGBA samples
+
+ _mm_store_si128(&mmRgb0, _mm_maddubs_epi16(mmRgb0/*unsigned*/, yCoeffs/*signed*/)); // mmRgb0 = ((yCoeffs[j] * mmRgb0[j]) + (yCoeffs[j + 1] * mmRgb0[j + 1]))
+ _mm_store_si128(&mmRgb1, _mm_maddubs_epi16(mmRgb1/*unsigned*/, yCoeffs/*signed*/));
+ _mm_store_si128(&mmRgb2, _mm_maddubs_epi16(mmRgb2/*unsigned*/, yCoeffs/*signed*/));
+ _mm_store_si128(&mmRgb3, _mm_maddubs_epi16(mmRgb3/*unsigned*/, yCoeffs/*signed*/));
+
+ _mm_store_si128(&mmY0, _mm_hadd_epi16(mmRgb0, mmRgb1)); // horizontal add
+ _mm_store_si128(&mmY1, _mm_hadd_epi16(mmRgb2, mmRgb3));
+
+ _mm_store_si128(&mmY0, _mm_srai_epi16(mmY0, 7)); // >> 7
+ _mm_store_si128(&mmY1, _mm_srai_epi16(mmY1, 7));
+
+ _mm_store_si128(&mmY0, _mm_add_epi16(mmY0, y16)); // + 16
+ _mm_store_si128(&mmY1, _mm_add_epi16(mmY1, y16));
+
+ _mm_store_si128(&mmY, _mm_packus_epi16(mmY0, mmY1)); // Saturate(I16 -> U8)
+
+ _mm_store_si128((__m128i*)yPtr_, mmY);
+
+ rgbPtr_ += 64; // 16samples * 4bytes
+ yPtr_ += 16; // 16samples * 1byte
+ }
+
+ // U+V planes
+ /* const */__m128i uCoeffs = _mm_load_si128((__m128i*)kUCoeffs);
+ /* const */__m128i vCoeffs = _mm_load_si128((__m128i*)kVCoeffs);
+ /* const */__m128i rgbaShuffleDuplicate = _mm_load_si128((__m128i*)kRGBAShuffleDuplicate);
+ /* const */__m128i uv128 = _mm_load_si128((__m128i*)kUV128);
+ rgbPtr_ = rgbPtr;
+ for(int i = 0; i < samplesCount; )
+ {
+ // load 16 RGBA samples
+ _mm_store_si128(&mmRgb0, _mm_load_si128((__m128i*)rgbPtr_)); // 4 RGBA samples
+ _mm_store_si128(&mmRgb1, _mm_load_si128((__m128i*)&rgbPtr_[16])); // 4 RGBA samples
+ _mm_store_si128(&mmRgb2, _mm_load_si128((__m128i*)&rgbPtr_[32])); // 4 RGBA samples
+ _mm_store_si128(&mmRgb3, _mm_load_si128((__m128i*)&rgbPtr_[48])); // 4 RGBA samples
+
+ _mm_store_si128(&mmRgb0, _mm_shuffle_epi8(mmRgb0, rgbaShuffleDuplicate));
+ _mm_store_si128(&mmRgb1, _mm_shuffle_epi8(mmRgb1, rgbaShuffleDuplicate));
+ _mm_store_si128(&mmRgb2, _mm_shuffle_epi8(mmRgb2, rgbaShuffleDuplicate));
+ _mm_store_si128(&mmRgb3, _mm_shuffle_epi8(mmRgb3, rgbaShuffleDuplicate));
+
+ _mm_store_si128(&mmRgbU0, _mm_unpacklo_epi64(mmRgb0, mmRgb1));
+ _mm_store_si128(&mmRgbV0, _mm_unpackhi_epi64(mmRgb0, mmRgb1)); // same as mmRgbU0: Use _mm_store_si128??
+ _mm_store_si128(&mmRgbU1, _mm_unpacklo_epi64(mmRgb2, mmRgb3));
+ _mm_store_si128(&mmRgbV1, _mm_unpackhi_epi64(mmRgb2, mmRgb3)); // same as mmRgbU0: Use _mm_store_si128??
+
+ _mm_store_si128(&mmRgbU0, _mm_maddubs_epi16(mmRgbU0/*unsigned*/, uCoeffs/*signed*/));
+ _mm_store_si128(&mmRgbV0, _mm_maddubs_epi16(mmRgbV0/*unsigned*/, vCoeffs/*signed*/));
+ _mm_store_si128(&mmRgbU1, _mm_maddubs_epi16(mmRgbU1/*unsigned*/, uCoeffs/*signed*/));
+ _mm_store_si128(&mmRgbV1, _mm_maddubs_epi16(mmRgbV1/*unsigned*/, vCoeffs/*signed*/));
+
+ _mm_store_si128(&mmY0, _mm_hadd_epi16(mmRgbU0, mmRgbU1)); // horizontal add
+ _mm_store_si128(&mmY1, _mm_hadd_epi16(mmRgbV0, mmRgbV1));
+
+ _mm_store_si128(&mmY0, _mm_srai_epi16(mmY0, 8)); // >> 8
+ _mm_store_si128(&mmY1, _mm_srai_epi16(mmY1, 8));
+
+ _mm_store_si128(&mmY0, _mm_add_epi16(mmY0, uv128)); // + 128
+ _mm_store_si128(&mmY1, _mm_add_epi16(mmY1, uv128));
+
+ // Y contains 8 samples for U then 8 samples for V
+ _mm_store_si128(&mmY, _mm_packus_epi16(mmY0, mmY1)); // Saturate(I16 -> U8)
+ _mm_storel_pi((__m64*)uPtr_, _mm_load_ps((float*)&mmY));
+ _mm_storeh_pi((__m64*)vPtr_, _mm_load_ps((float*)&mmY));
+
+ uPtr_ += 8; // 8samples * 1byte
+ vPtr_ += 8; // 8samples * 1byte
+
+ // move to next 16 samples
+ i += 16;
+ rgbPtr_ += 64; // 16samples * 4bytes
+
+ if (/*i % width == 0*/ !(i & (width - 1)))
+ {
+ // skip next line
+ i += width;
+ rgbPtr_ += (width * 4);
+ }
+ }
+}
+#endif /* DDRAW_HAVE_RGB32_TO_I420_INTRIN */
+
+#if DDRAW_HAVE_RGB32_TO_I420_ASM
+
+// __asm keyword must be duplicated in macro: https://msdn.microsoft.com/en-us/library/aa293825(v=vs.60).aspx
+#define DDRAW_COPY16_ASM(dst, src) \
+ __asm { \
+ __asm mov eax, dword ptr [src] \
+ __asm mov ecx, dword ptr [dst] \
+ \
+ __asm movdqa xmm0, xmmword ptr [eax] \
+ __asm movdqa xmmword ptr [ecx], xmm0 \
+ }
+#define DDRAW_COPY64_ASM(dst, src) \
+ __asm { \
+ __asm mov eax, dword ptr [src] \
+ __asm mov ecx, dword ptr [dst] \
+ \
+ __asm movdqa xmm0, xmmword ptr [eax] \
+ __asm add eax, dword ptr 16 \
+ __asm movdqa xmm1, xmmword ptr [eax] \
+ __asm add eax, dword ptr 16 \
+ __asm movdqa xmm2, xmmword ptr [eax] \
+ __asm add eax, dword ptr 16 \
+ __asm movdqa xmm3, xmmword ptr [eax] \
+ \
+ __asm movdqa xmmword ptr [ecx], xmm0 \
+ __asm add ecx, dword ptr 16 \
+ __asm movdqa xmmword ptr [ecx], xmm1 \
+ __asm add ecx, dword ptr 16 \
+ __asm movdqa xmmword ptr [ecx], xmm2 \
+ __asm add ecx, dword ptr 16 \
+ __asm movdqa xmmword ptr [ecx], xmm3 \
+ }
+#define DDRAW_COPY128_ASM(dst, src) \
+ __asm { \
+ __asm mov eax, dword ptr [src] \
+ __asm mov ecx, dword ptr [dst] \
+ \
+ __asm movdqa xmm0, xmmword ptr [eax] \
+ __asm add eax, dword ptr 16 \
+ __asm movdqa xmm1, xmmword ptr [eax] \
+ __asm add eax, dword ptr 16 \
+ __asm movdqa xmm2, xmmword ptr [eax] \
+ __asm add eax, dword ptr 16 \
+ __asm movdqa xmm3, xmmword ptr [eax] \
+ __asm add eax, dword ptr 16 \
+ __asm movdqa xmm4, xmmword ptr [eax] \
+ __asm add eax, dword ptr 16 \
+ __asm movdqa xmm5, xmmword ptr [eax] \
+ __asm add eax, dword ptr 16 \
+ __asm movdqa xmm6, xmmword ptr [eax] \
+ __asm add eax, dword ptr 16 \
+ __asm movdqa xmm7, xmmword ptr [eax] \
+ \
+ __asm movdqa xmmword ptr [ecx], xmm0 \
+ __asm add ecx, dword ptr 16 \
+ __asm movdqa xmmword ptr [ecx], xmm1 \
+ __asm add ecx, dword ptr 16 \
+ __asm movdqa xmmword ptr [ecx], xmm2 \
+ __asm add ecx, dword ptr 16 \
+ __asm movdqa xmmword ptr [ecx], xmm3 \
+ __asm add ecx, dword ptr 16 \
+ __asm movdqa xmmword ptr [ecx], xmm4 \
+ __asm add ecx, dword ptr 16 \
+ __asm movdqa xmmword ptr [ecx], xmm5 \
+ __asm add ecx, dword ptr 16 \
+ __asm movdqa xmmword ptr [ecx], xmm6 \
+ __asm add ecx, dword ptr 16 \
+ __asm movdqa xmmword ptr [ecx], xmm7 \
+ }
+
+__declspec(naked) __declspec(align(DDRAW_MEM_ALIGNMENT))
+static void _tdav_producer_screencast_rgb32_to_yuv420_asm_ssse3(uint8_t *yuvPtr, const uint8_t *rgbPtr, int width, int height)
+{
+ __asm {
+ push esi
+ push edi
+ push ebx
+ /*** Y Samples ***/
+ mov edx, [esp + 12 + 4] // yuvPtr
+ mov eax, [esp + 12 + 8] // rgbPtr
+ mov ecx, [esp + 12 + 12] // width
+ imul ecx, [esp + 12 + 16] // (width * height) = samplesCount
+
+ movdqa xmm7, kYCoeffs // yCoeffs
+ movdqa xmm6, kY16 // y16
+ /* loopY start */
+loopY:
+ // load 16 RGBA samples
+ movdqa xmm0, [eax] // mmRgb0
+ movdqa xmm1, [eax + 16] // mmRgb1
+ movdqa xmm2, [eax + 32] // mmRgb2
+ movdqa xmm3, [eax + 48] // mmRgb3
+ lea eax, [eax + 64] // rgbPtr_ += 64
+ // (yCoeffs[0] * mmRgbX[0]) + (yCoeffs[1] * mmRgbX[1])
+ pmaddubsw xmm0, xmm7
+ pmaddubsw xmm1, xmm7
+ pmaddubsw xmm2, xmm7
+ pmaddubsw xmm3, xmm7
+ // horizontal add
+ phaddw xmm0, xmm1
+ phaddw xmm2, xmm3
+ // >> 7
+ psraw xmm0, 7
+ psraw xmm2, 7
+ // + 16
+ paddw xmm0, xmm6
+ paddw xmm2, xmm6
+ // Saturate(I16 -> U8) - Packs
+ packuswb xmm0, xmm2
+ // Copy to yuvPtr
+ movdqa [edx], xmm0
+ lea edx, [edx + 16] // yPtr_ += 16
+ sub ecx, 16 // samplesCount -= 16
+ jnz loopY // goto loop if (samplesCount != 0)
+
+ //==================================//
+ //=========== UV Samples ===========//
+ //==================================//
+ mov esi, [esp + 12 + 4] // yuvPtr
+ mov eax, [esp + 12 + 8] // rgbPtr
+ mov ecx, [esp + 12 + 12] // width
+ imul ecx, [esp + 12 + 16] // (width * height) = samplesCount
+ mov edx, ecx
+ shr edx, 2 // edx = samplesCount / 4
+ add esi, ecx // [[esi = uPtr_]]
+ mov edi, esi // edi = uPtr_
+ add edi, edx // [[edi = uPtr_ + edx = uPtr_ + (samplesCount / 4) = vPtr_]]
+ xor edx, edx // edx = 0 = i
+ mov ebx, [esp + 12 + 12] // ebx = width
+ sub ebx, 1 // ebx = (width - 1)
+
+ movdqa xmm7, kUCoeffs // uCoeffs
+ movdqa xmm6, kVCoeffs // vCoeffs
+ movdqa xmm5, kRGBAShuffleDuplicate // rgbaShuffleDuplicate
+ movdqa xmm4, kUV128 // uv128
+
+ /* loopUV start */
+loopUV:
+ // load 16 RGBA samples
+ movdqa xmm0, [eax] // mmRgb0
+ movdqa xmm1, [eax + 16] // mmRgb1
+ movdqa xmm2, [eax + 32] // mmRgb2
+ movdqa xmm3, [eax + 48] // mmRgb3
+ lea eax, [eax + 64] // rgbPtr_ += 64
+
+ pshufb xmm0, xmm5
+ pshufb xmm1, xmm5
+ pshufb xmm2, xmm5
+ pshufb xmm3, xmm5
+
+ punpcklqdq xmm0, xmm1 // mmRgbU0
+ punpcklqdq xmm2, xmm3 // mmRgbU1
+ movdqa xmm1, xmm0 // mmRgbV0
+ movdqa xmm3, xmm2 // mmRgbV1
+
+ pmaddubsw xmm0, xmm7 // mmRgbU0
+ pmaddubsw xmm1, xmm6 // mmRgbV0
+ pmaddubsw xmm2, xmm7 // mmRgbU1
+ pmaddubsw xmm3, xmm6 // mmRgbV1
+
+ phaddw xmm0, xmm2 // mmY0
+ phaddw xmm1, xmm3 // mmY1
+
+ psraw xmm0, 8
+ psraw xmm1, 8
+
+ paddw xmm0, xmm4
+ paddw xmm1, xmm4
+
+ packuswb xmm0, xmm1
+ movlps [esi], xmm0
+ movhps [edi], xmm0
+
+ lea esi, [esi + 8]
+ lea edi, [edi + 8]
+
+ add edx, 16 // i += 16;
+ push edx // save edx
+ and edx, ebx // edx = (ebx & ebx) = (ebx & (width - 1)) = (ebx % width)
+ cmp edx, 0 // (ebx % width) == 0 ?
+ pop edx // restore edx
+ jne loopUV_NextLine
+
+ // loopUV_EndOfLine: ((ebx % width) == 0)
+ add ebx, 1// change ebx value from width-1 to width
+ add edx, ebx // i += width
+ lea eax, [eax + 4 * ebx]// rgbPtr_ += (width * 4);
+ sub ebx, 1// change back ebx value to width - 1
+loopUV_NextLine:
+ cmp edx, ecx
+ jl loopUV
+
+ pop ebx
+ pop edi
+ pop esi
+ ret
+ }
+}
+#endif /* DDRAW_HAVE_RGB32_TO_I420_ASM */
+
+/* ============ Media Producer Interface ================= */
+static int _tdav_producer_screencast_ddraw_set(tmedia_producer_t *p_self, const tmedia_param_t* pc_param)
+{
+ int ret = 0;
+ tdav_producer_screencast_ddraw_t* p_ddraw = (tdav_producer_screencast_ddraw_t*)p_self;
+
+ if (!p_ddraw || !pc_param) {
+ DDRAW_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (pc_param->value_type == tmedia_pvt_int64) {
+ if (tsk_striequals(pc_param->key, "local-hwnd") || tsk_striequals(pc_param->key, "preview-hwnd")) {
+ p_ddraw->hwnd_preview = (HWND)*((int64_t*)pc_param->value);
+ }
+ else if (tsk_striequals(pc_param->key, "src-hwnd")) {
+ p_ddraw->hwnd_src = (HWND)*((int64_t*)pc_param->value);
+ }
+ }
+ else if (pc_param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(pc_param->key, "mute")) {
+ p_ddraw->b_muted = (TSK_TO_INT32((uint8_t*)pc_param->value) != 0);
+ }
+ }
+
+ return ret;
+}
+
+
+static int _tdav_producer_screencast_ddraw_prepare(tmedia_producer_t* p_self, const tmedia_codec_t* pc_codec)
+{
+ tdav_producer_screencast_ddraw_t* p_ddraw = (tdav_producer_screencast_ddraw_t*)p_self;
+ int ret = 0;
+ HRESULT hr = DD_OK;
+#if 0
+ DDPIXELFORMAT DDPixelFormat;
+#endif
+ DDSURFACEDESC ddsd;
+
+ if (!p_ddraw || !pc_codec) {
+ DDRAW_DEBUG_ERROR("Invalid parameter");
+ DDRAW_CHECK_HR(hr = E_INVALIDARG);
+ }
+
+ tsk_safeobj_lock(p_ddraw);
+
+ // check support for DirectDraw again
+ if (!tdav_producer_screencast_ddraw_plugin_is_supported()) {
+ DDRAW_CHECK_HR(hr = E_FAIL);
+ }
+
+ TMEDIA_PRODUCER(p_ddraw)->video.fps = TMEDIA_CODEC_VIDEO(pc_codec)->out.fps;
+ TMEDIA_PRODUCER(p_ddraw)->video.width = TMEDIA_CODEC_VIDEO(pc_codec)->out.width;
+ TMEDIA_PRODUCER(p_ddraw)->video.height = TMEDIA_CODEC_VIDEO(pc_codec)->out.height;
+
+ // Hack the codec to avoid flipping
+ TMEDIA_CODEC_VIDEO(pc_codec)->out.flip = tsk_false;
+
+ DDRAW_DEBUG_INFO("Prepare with fps:%d, width:%d; height:%d", TMEDIA_PRODUCER(p_ddraw)->video.fps, TMEDIA_PRODUCER(p_ddraw)->video.width, TMEDIA_PRODUCER(p_ddraw)->video.height);
+
+ if (!p_ddraw->ddrawModule.lpDD || !p_ddraw->ddrawModule.hDLL) {
+ DDRAW_CHECK_HR(hr = _tdav_producer_screencast_create_module(&p_ddraw->ddrawModule));
+ }
+ DDRAW_CHECK_HR(hr = p_ddraw->ddrawModule.lpDD->SetCooperativeLevel(NULL, DDSCL_NORMAL));
+
+ if (!p_ddraw->p_surf_primary) {
+ ZeroMemory(&ddsd, sizeof(ddsd));
+ ddsd.dwSize = sizeof(ddsd);
+ ddsd.dwFlags = DDSD_CAPS;
+ ddsd.ddsCaps.dwCaps = DDSCAPS_PRIMARYSURFACE;
+
+ DDRAW_CHECK_HR(hr = p_ddraw->ddrawModule.lpDD->CreateSurface(&ddsd, &p_ddraw->p_surf_primary, NULL));
+ }
+#if 0
+ ZeroMemory(&DDPixelFormat, sizeof(DDPixelFormat));
+ DDPixelFormat.dwSize = sizeof(DDPixelFormat);
+ DDRAW_CHECK_HR(hr = DDRAW_VTBL(p_ddraw->p_surf_primary)->GetPixelFormat(p_ddraw->p_surf_primary, &DDPixelFormat));
+ DDRAW_DEBUG_INFO("dwRGBBitCount:%d, dwRBitMask:%x, dwGBitMask:%x, dwBBitMask:%x, dwRGBAlphaBitMask:%x",
+ DDPixelFormat.dwRGBBitCount, DDPixelFormat.dwRBitMask, DDPixelFormat.dwGBitMask, DDPixelFormat.dwBBitMask, DDPixelFormat.dwRGBAlphaBitMask);
+ if ((TMEDIA_PRODUCER(p_ddraw)->video.chroma = _tdav_producer_screencast_get_chroma(&DDPixelFormat)) == tmedia_chroma_none) {
+ DDRAW_CHECK_HR(hr = DDERR_INVALIDCAPS);
+ }
+#else
+ ZeroMemory(&ddsd, sizeof(ddsd));
+ ddsd.dwSize = sizeof(ddsd);
+ ddsd.dwFlags = DDSD_HEIGHT | DDSD_WIDTH | DDSD_PITCH | DDSD_PIXELFORMAT;
+ DDRAW_CHECK_HR(hr = p_ddraw->p_surf_primary->GetSurfaceDesc(&ddsd));
+ DDRAW_DEBUG_INFO("Prepare with neg. width:%d, height:%d, pitch=%ld", ddsd.dwWidth, ddsd.dwHeight, ddsd.lPitch);
+ TMEDIA_PRODUCER(p_ddraw)->video.width = ddsd.dwWidth;
+ TMEDIA_PRODUCER(p_ddraw)->video.height = ddsd.dwHeight;
+ p_ddraw->n_buff_rgb_bitscount = ddsd.ddpfPixelFormat.dwRGBBitCount;
+ DDRAW_DEBUG_INFO("Prepare with dwRGBBitCount:%d, dwRBitMask:%x, dwGBitMask:%x, dwBBitMask:%x, dwRGBAlphaBitMask:%x",
+ ddsd.ddpfPixelFormat.dwRGBBitCount, ddsd.ddpfPixelFormat.dwRBitMask, ddsd.ddpfPixelFormat.dwGBitMask, ddsd.ddpfPixelFormat.dwBBitMask, ddsd.ddpfPixelFormat.dwRGBAlphaBitMask);
+ if ((TMEDIA_PRODUCER(p_ddraw)->video.chroma = _tdav_producer_screencast_get_chroma(&ddsd.ddpfPixelFormat)) == tmedia_chroma_none) {
+ DDRAW_CHECK_HR(hr = DDERR_INVALIDCAPS);
+ }
+#endif
+ // allocate RGB buffer
+ DDRAW_CHECK_HR(hr = _tdav_producer_screencast_alloc_rgb_buff(p_ddraw, ddsd.dwWidth, ddsd.dwHeight, ddsd.ddpfPixelFormat.dwRGBBitCount));
+
+ // Check if we can use built-in chroma conversion
+#if DDRAW_HAVE_RGB32_TO_I420_INTRIN || DDRAW_HAVE_RGB32_TO_I420_ASM
+ p_ddraw->b_have_rgb32_conv =
+ _tdav_producer_screencast_have_ssse3() // SSSE3 supported
+ && DDRAW_IS_ALIGNED(TMEDIA_PRODUCER(p_ddraw)->video.width, DDRAW_MEM_ALIGNMENT) // width multiple of 16
+ /* && DDRAW_IS_ALIGNED(TMEDIA_PRODUCER(p_ddraw)->video.height, DDRAW_MEM_ALIGNMENT) // height multiple of 16 */
+ && TMEDIA_PRODUCER(p_ddraw)->video.chroma == tmedia_chroma_rgb32; // Primary screen RGB32
+ if (p_ddraw->b_have_rgb32_conv) {
+ TMEDIA_PRODUCER(p_ddraw)->video.chroma = tmedia_chroma_yuv420p;
+ }
+#endif
+ DDRAW_DEBUG_INFO("RGB32 -> I420 conversion supported: %s", p_ddraw->b_have_rgb32_conv ? "YES" : "NO");
+
+ // allocate YUV buffer
+ if (p_ddraw->b_have_rgb32_conv) {
+ DDRAW_CHECK_HR(hr = _tdav_producer_screencast_alloc_yuv_buff(p_ddraw, (DWORD)TMEDIA_PRODUCER(p_ddraw)->video.width, (DWORD)TMEDIA_PRODUCER(p_ddraw)->video.height));
+ }
+
+ // BitmapInfo for preview
+#if DDRAW_PREVIEW
+ ZeroMemory(&p_ddraw->bi_preview, sizeof(p_ddraw->bi_preview));
+ p_ddraw->bi_preview.bmiHeader.biSize = (DWORD)sizeof(BITMAPINFOHEADER);
+ p_ddraw->bi_preview.bmiHeader.biCompression = BI_RGB;
+ p_ddraw->bi_preview.bmiHeader.biPlanes = 1;
+ p_ddraw->bi_preview.bmiHeader.biWidth = ddsd.dwWidth;
+ p_ddraw->bi_preview.bmiHeader.biHeight = ddsd.dwHeight;
+ p_ddraw->bi_preview.bmiHeader.biBitCount = (WORD)ddsd.ddpfPixelFormat.dwRGBBitCount;
+ p_ddraw->bi_preview.bmiHeader.biSizeImage = (p_ddraw->bi_preview.bmiHeader.biWidth * p_ddraw->bi_preview.bmiHeader.biHeight * (p_ddraw->bi_preview.bmiHeader.biBitCount >> 3));
+#endif /* DDRAW_PREVIEW */
+
+#if DDRAW_CPU_MONITOR || DDRAW_CPU_THROTTLING
+ if (!p_ddraw->p_timer_mgr) {
+ p_ddraw->p_timer_mgr = tsk_timer_manager_create();
+ }
+#endif /* DDRAW_CPU_MONITOR ||DDRAW_CPU_THROTTLING */
+
+#if DDRAW_CPU_THROTTLING
+ p_ddraw->cpu.fps_target = (TMEDIA_PRODUCER(p_ddraw)->video.fps + DDRAW_CPU_THROTTLING_FPS_MIN) >> 1; // start with minimum fps and increase the value based on the fps
+#endif /* DDRAW_CPU_THROTTLING */
+
+bail:
+ tsk_safeobj_unlock(p_ddraw);
+ return SUCCEEDED(hr) ? 0 : -1;
+}
+
+static int _tdav_producer_screencast_ddraw_start(tmedia_producer_t* p_self)
+{
+ tdav_producer_screencast_ddraw_t* p_ddraw = (tdav_producer_screencast_ddraw_t*)p_self;
+ int ret = 0;
+
+ if (!p_ddraw) {
+ DDRAW_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ ret = tsk_safeobj_lock(p_ddraw);
+
+ p_ddraw->b_paused = tsk_false;
+
+ if (p_ddraw->b_started) {
+ DDRAW_DEBUG_INFO("Already started");
+ goto bail;
+ }
+
+ p_ddraw->b_started = tsk_true;
+
+ // Create notify events (must be done here before starting the grabber thread)
+#if DDRAW_MT
+ for (int i = 0; i < sizeof(p_ddraw->mt.h_events) / sizeof(p_ddraw->mt.h_events[0]); ++i) {
+ if (!p_ddraw->mt.h_events[i] && !(p_ddraw->mt.h_events[i] = CreateEvent(NULL, FALSE, FALSE, NULL))) {
+ DDRAW_DEBUG_ERROR("Failed to create event at %d", i);
+ ret = -1;
+ goto bail;
+ }
+ }
+#endif /* DDRAW_MT */
+
+ ret = tsk_thread_create(&p_ddraw->tid[0], _tdav_producer_screencast_grap_thread, p_ddraw);
+ if (ret != 0) {
+ DDRAW_DEBUG_ERROR("Failed to create thread");
+ goto bail;
+ }
+ //BOOL okSetTA = CeSetThreadAffinity((HANDLE)p_ddraw->tid[0], 0x01);
+#if DDRAW_MT
+ ret = tsk_thread_create(&p_ddraw->mt.tid[0], _tdav_producer_screencast_mt_encode_thread, p_ddraw);
+ if (ret != 0) {
+ DDRAW_DEBUG_ERROR("Failed to create thread");
+ goto bail;
+ }
+ //okSetTA = CeSetThreadAffinity((HANDLE)p_ddraw->mt.tid[0], 0x02);
+#endif /* DDRAW_MT */
+#if DDRAW_HIGH_PRIO_MEMCPY
+ if (p_ddraw->tid[0]) {
+ tsk_thread_set_priority(p_ddraw->tid[0], TSK_THREAD_PRIORITY_TIME_CRITICAL);
+ }
+#if DDRAW_MT
+ if (p_ddraw->mt.tid[0]) {
+ tsk_thread_set_priority(p_ddraw->mt.tid[0], TSK_THREAD_PRIORITY_TIME_CRITICAL);
+ }
+#endif /* DDRAW_MT */
+#endif /* DDRAW_HIGH_PRIO_MEMCPY */
+#if DDRAW_CPU_MONITOR || DDRAW_CPU_THROTTLING
+ ret = tsk_timer_manager_start(p_ddraw->p_timer_mgr);
+ if (ret == 0) {
+ p_ddraw->cpu.id_timer = tsk_timer_manager_schedule(p_ddraw->p_timer_mgr, DDRAW_CPU_SCHEDULE_TIMEOUT, _tdav_producer_screencast_timer_cb, p_ddraw);
+ }
+ else {
+ ret = 0; // not fatal error
+ DDRAW_DEBUG_WARN("Failed to start CPU timer");
+ }
+#endif /* DDRAW_CPU_MONITOR || DDRAW_CPU_THROTTLING */
+
+bail:
+ if (ret) {
+ p_ddraw->b_started = tsk_false;
+ if (p_ddraw->tid[0]) {
+ tsk_thread_join(&(p_ddraw->tid[0]));
+ }
+#if DDRAW_MT
+ if (p_ddraw->mt.tid[0]) {
+ tsk_thread_join(&(p_ddraw->mt.tid[0]));
+ }
+#endif /* DDRAW_MT */
+ }
+ ret = tsk_safeobj_unlock(p_ddraw);
+
+ return ret;
+}
+
+static int _tdav_producer_screencast_ddraw_pause(tmedia_producer_t* p_self)
+{
+ tdav_producer_screencast_ddraw_t* p_ddraw = (tdav_producer_screencast_ddraw_t*)p_self;
+
+ if (!p_ddraw) {
+ DDRAW_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_ddraw);
+
+ p_ddraw->b_paused = tsk_true;
+ goto bail;
+
+bail:
+ tsk_safeobj_unlock(p_ddraw);
+
+ return 0;
+}
+
+static int _tdav_producer_screencast_ddraw_stop(tmedia_producer_t* p_self)
+{
+ tdav_producer_screencast_ddraw_t* p_ddraw = (tdav_producer_screencast_ddraw_t*)p_self;
+
+ if (!p_ddraw) {
+ DDRAW_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_ddraw);
+
+ if (!p_ddraw->b_started) {
+ DDRAW_DEBUG_INFO("Already stopped");
+ goto bail;
+ }
+
+ p_ddraw->b_started = tsk_false;
+ p_ddraw->b_paused = tsk_false;
+
+#if DDRAW_CPU_MONITOR || DDRAW_CPU_THROTTLING
+ if (p_ddraw->p_timer_mgr) {
+ tsk_timer_manager_stop(p_ddraw->p_timer_mgr);
+ }
+#endif /* DDRAW_CPU_MONITOR ||DDRAW_CPU_THROTTLING */
+
+ // stop grabber thread
+ if (p_ddraw->tid[0]) {
+ tsk_thread_join(&(p_ddraw->tid[0]));
+ }
+
+#if DDRAW_MT
+ if (p_ddraw->mt.h_events[DDRAW_MT_EVENT_SHUTDOWN_INDEX]){
+ SetEvent(p_ddraw->mt.h_events[DDRAW_MT_EVENT_SHUTDOWN_INDEX]);
+ }
+ if (p_ddraw->mt.tid[0]) {
+ tsk_thread_join(&(p_ddraw->mt.tid[0]));
+ }
+ for (int i = 0; i < sizeof(p_ddraw->mt.h_events) / sizeof(p_ddraw->mt.h_events[0]); ++i) {
+ if (p_ddraw->mt.h_events[i]) {
+ CloseHandle(p_ddraw->mt.h_events[i]);
+ p_ddraw->mt.h_events[i] = NULL;
+ }
+ }
+#endif
+
+bail:
+ tsk_safeobj_unlock(p_ddraw);
+
+ return 0;
+}
+
+static int _tdav_producer_screencast_grab(tdav_producer_screencast_ddraw_t* p_self)
+{
+ int ret = 0;
+ HRESULT hr = S_OK;
+ DDSURFACEDESC ddsd;
+ DWORD nSizeWithoutPadding, nRowLengthInBytes, lockFlags;
+ tmedia_producer_t* p_base = TMEDIA_PRODUCER(p_self);
+ LPVOID lpBuffToSend, lpBuffYUV;
+ BOOL bDirectMemSurfAccess = DDRAW_MEM_SURFACE_DIRECT_ACCESS;
+#if DDRAW_MT
+ INT iMtFreeBuffIndex = -1;
+#endif
+ //--uint64_t timeStart, timeEnd;
+
+ //--timeStart = tsk_time_now();
+
+ if (!p_self) {
+ DDRAW_CHECK_HR(hr = E_INVALIDARG);
+ }
+
+ if (!p_self->b_started) {
+#if defined(E_ILLEGAL_METHOD_CALL)
+ DDRAW_CHECK_HR(hr = E_ILLEGAL_METHOD_CALL);
+#else
+ DDRAW_CHECK_HR(hr = E_FAIL);
+#endif
+ }
+
+#if DDRAW_MT
+ {
+ INT iIndex = 0;
+ for (; (iIndex < DDRAW_MT_COUNT) && (p_self->mt.b_flags_array[iIndex] == TRUE); ++iIndex);
+ if (iIndex == DDRAW_MT_COUNT) {
+ goto bail;
+ }
+ }
+#endif /* DDRAW_MT */
+
+ if (p_self->p_surf_primary->IsLost() == DDERR_SURFACELOST) {
+ DDRAW_CHECK_HR(hr = p_self->p_surf_primary->Restore());
+ }
+
+ ddsd.dwSize = sizeof(ddsd);
+ ddsd.dwFlags = DDSD_HEIGHT | DDSD_WIDTH | DDSD_PITCH | DDSD_PIXELFORMAT;
+ lockFlags = DDLOCK_READONLY |
+#if TDAV_UNDER_WINDOWS_CE
+ // This flag has a slightly different name under Windows CE vs. Desktop, but it's the same behavior.
+ DDLOCK_WAITNOTBUSY;
+#else
+ DDLOCK_WAIT;
+#endif
+ DDRAW_CHECK_HR(hr = p_self->p_surf_primary->Lock(NULL, &ddsd, lockFlags, NULL));
+ // make sure surface size and number of bits per pixel haven't changed
+ if (TMEDIA_PRODUCER(p_self)->video.width != ddsd.dwWidth || TMEDIA_PRODUCER(p_self)->video.height != ddsd.dwHeight || p_self->n_buff_rgb_bitscount != ddsd.ddpfPixelFormat.dwRGBBitCount) {
+ tsk_size_t n_buff_rgb_new;
+ tmedia_chroma_t chroma_new;
+ DDRAW_DEBUG_WARN("surface has changed: width %d<>%d or height %d<>%d or rgb_bits_count %d<>%d",
+ p_base->video.width, ddsd.dwWidth,
+ p_base->video.height, ddsd.dwHeight,
+ p_self->n_buff_rgb_bitscount, ddsd.ddpfPixelFormat.dwRGBBitCount);
+ if ((chroma_new = _tdav_producer_screencast_get_chroma(&ddsd.ddpfPixelFormat)) == tmedia_chroma_none) {
+ DDRAW_CHECK_HR(hr = DDERR_INVALIDCAPS);
+ }
+ // allocate RGB buffer
+ n_buff_rgb_new = (ddsd.dwWidth * ddsd.dwHeight * (ddsd.ddpfPixelFormat.dwRGBBitCount >> 3));
+ if (p_self->n_buff_rgb < n_buff_rgb_new) {
+ hr = _tdav_producer_screencast_alloc_rgb_buff(p_self, ddsd.dwWidth, ddsd.dwHeight, ddsd.ddpfPixelFormat.dwRGBBitCount);
+ if (FAILED(hr)) {
+ p_self->p_surf_primary->Unlock(NULL); // unlock before going to bail
+ DDRAW_CHECK_HR(hr);
+ }
+ }
+ p_base->video.width = ddsd.dwWidth;
+ p_base->video.height = ddsd.dwHeight;
+ p_base->video.chroma = chroma_new;
+ p_self->n_buff_rgb_bitscount = ddsd.ddpfPixelFormat.dwRGBBitCount;
+ // Check if we can use built-in chroma conversion
+#if DDRAW_HAVE_RGB32_TO_I420_INTRIN || DDRAW_HAVE_RGB32_TO_I420_ASM
+ p_self->b_have_rgb32_conv =
+ _tdav_producer_screencast_have_ssse3() // SSSE3 supported
+ && DDRAW_IS_ALIGNED(p_base->video.width, DDRAW_MEM_ALIGNMENT) // width multiple of 16
+ /* && DDRAW_IS_ALIGNED(p_base->video.height, DDRAW_MEM_ALIGNMENT) // height multiple of 16 */
+ && p_base->video.chroma == tmedia_chroma_rgb32; // Primary screen RGB32
+ if (p_self->b_have_rgb32_conv) {
+ p_base->video.chroma = tmedia_chroma_yuv420p;
+ }
+#endif
+ DDRAW_DEBUG_INFO("RGB32 -> I420 conversion supported: %s", p_self->b_have_rgb32_conv ? "YES" : "NO");
+ // allocate YUV buffer
+ if (p_self->b_have_rgb32_conv) {
+ hr = _tdav_producer_screencast_alloc_yuv_buff(p_self, (DWORD)p_base->video.width, (DWORD)p_base->video.height);
+ if (FAILED(hr)) {
+ p_self->p_surf_primary->Unlock(NULL); // unlock before going to bail
+ DDRAW_CHECK_HR(hr);
+ }
+ }
+ // preview
+#if DDRAW_PREVIEW
+ p_self->bi_preview.bmiHeader.biWidth = ddsd.dwWidth;
+ p_self->bi_preview.bmiHeader.biHeight = ddsd.dwHeight;
+ p_self->bi_preview.bmiHeader.biBitCount = (WORD)ddsd.ddpfPixelFormat.dwRGBBitCount;
+ p_self->bi_preview.bmiHeader.biSizeImage = (p_self->bi_preview.bmiHeader.biWidth * p_self->bi_preview.bmiHeader.biHeight * (p_self->bi_preview.bmiHeader.biBitCount >> 3));
+#endif /* DDRAW_PREVIEW */
+ }
+ nRowLengthInBytes = ddsd.dwWidth * (ddsd.ddpfPixelFormat.dwRGBBitCount >> 3);
+ nSizeWithoutPadding = ddsd.dwHeight * nRowLengthInBytes;
+
+ // init lpBuffToSend
+ if (DDRAW_MEM_SURFACE_DIRECT_ACCESS && ddsd.lPitch == nRowLengthInBytes && (!p_self->b_have_rgb32_conv || DDRAW_IS_ALIGNED(ddsd.lpSurface, DDRAW_MEM_ALIGNMENT))) {
+ // no padding
+ lpBuffToSend = ddsd.lpSurface;
+ bDirectMemSurfAccess = TRUE;
+ }
+ else {
+ // with padding or copy requested
+ UINT8 *pSurfBuff = (UINT8 *)ddsd.lpSurface, *pNegBuff = (UINT8 *)p_self->p_buff_rgb_aligned;
+ DWORD y;
+ bDirectMemSurfAccess = FALSE;
+ //--timeStart = tsk_time_now();
+ if (ddsd.lPitch == nRowLengthInBytes) {
+ // copy without padding padding
+ const UINT8* src = pSurfBuff;
+ UINT8* dst = (UINT8*)p_self->p_buff_rgb_aligned;
+ if (DDRAW_IS_ALIGNED(src, 16) && (nSizeWithoutPadding & 15) == 0) {
+#if DDRAW_HAVE_RGB32_TO_I420_INTRIN || DDRAW_HAVE_RGB32_TO_I420_ASM
+ if ((nSizeWithoutPadding & 127) == 0) {
+ for (DWORD i = 0; i < nSizeWithoutPadding; i += 128, src += 128, dst += 128) {
+#if defined(DDRAW_COPY128_ASM)
+ DDRAW_COPY128_ASM(dst, src);
+#else
+ DDRAW_COPY128_INTRIN(dst, src);
+#endif /* DDRAW_COPY128_ASM */
+ }
+ }
+ else if((nSizeWithoutPadding & 63) == 0) {
+ for (DWORD i = 0; i < nSizeWithoutPadding; i += 64, src += 64, dst += 64) {
+#if defined(DDRAW_COPY64_ASM)
+ DDRAW_COPY64_ASM(dst, src);
+#else
+ DDRAW_COPY64_INTRIN(dst, src);
+#endif /* DDRAW_COPY64_ASM */
+ }
+ }
+ else { // (nSizeWithoutPadding & 15) == 0
+ for (DWORD i = 0; i < nSizeWithoutPadding; i += 16, src += 16, dst += 16) {
+#if defined(DDRAW_COPY16_ASM)
+ DDRAW_COPY16_ASM(dst, src);
+#else
+ DDRAW_COPY16_INTRIN(dst, src);
+#endif /* DDRAW_COPY16_ASM */
+ }
+ }
+#else // neither ASM nor INTRINSIC support
+ CopyMemory(dst, src, nSizeWithoutPadding);
+#endif /* DDRAW_HAVE_RGB32_TO_I420_INTRIN || DDRAW_HAVE_RGB32_TO_I420_ASM */
+ }
+ else { // not 16bytes aligned
+ CopyMemory(dst, src, nSizeWithoutPadding);
+ }
+ }
+ else {
+ // copy with padding padding
+ for (y = 0; y < ddsd.dwHeight; ++y) {
+ CopyMemory(pNegBuff, pSurfBuff, nRowLengthInBytes);
+ pSurfBuff += ddsd.lPitch;
+ pNegBuff += nRowLengthInBytes;
+ }
+ }
+ lpBuffToSend = p_self->p_buff_rgb_aligned;
+ //--timeEnd = tsk_time_now();
+ //--DDRAW_DEBUG_INFO("Mem copy: start=%llu, end=%llu, duration=%llu", timeStart, timeEnd, (timeEnd - timeStart));
+ }
+ if (!bDirectMemSurfAccess) {
+ // surface buffer no longer needed, unlock
+ DDRAW_CHECK_HR(hr = p_self->p_surf_primary->Unlock(NULL));
+ }
+ // display preview
+#if DDRAW_PREVIEW
+ if (p_self->hwnd_preview) {
+ HWND hWnd; // copy for thread-safeness
+ HDC hDC = GetDC((hWnd = p_self->hwnd_preview));
+ if (hDC) {
+ RECT rcPreview;
+ if (GetWindowRect(hWnd, &rcPreview)) {
+ LONG nPreviewWidth = (rcPreview.right - rcPreview.left);
+ LONG nPreviewHeight = (rcPreview.bottom - rcPreview.top);
+ StretchDIBits(
+ hDC,
+ 0, 0, nPreviewWidth, nPreviewHeight,
+ 0, 0, p_self->bi_preview.bmiHeader.biWidth, p_self->bi_preview.bmiHeader.biHeight,
+ lpBuffToSend,
+ &p_self->bi_preview,
+ DIB_RGB_COLORS,
+ SRCCOPY);
+ }
+ ReleaseDC(hWnd, hDC);
+ }
+ }
+#endif /* DDRAW_PREVIEW */
+
+ // check we have a free buffer
+#if DDRAW_MT
+ {
+ for (INT iIndex = 0; iIndex < DDRAW_MT_COUNT; ++iIndex) {
+ if (p_self->mt.b_flags_array[iIndex] != TRUE) {
+ iMtFreeBuffIndex = iIndex;
+ lpBuffYUV = p_self->mt.p_buff_yuv_aligned_array[iIndex];
+ break;
+ }
+ }
+ if (iMtFreeBuffIndex < 0) {
+ lpBuffToSend = NULL; // do not waste time converting or encoding
+ lpBuffYUV = NULL;
+ }
+ }
+#else
+ lpBuffYUV = p_self->p_buff_yuv_aligned;
+#endif /* DDRAW_MT */
+
+ //--timeStart = tsk_time_now();
+ if (lpBuffToSend && (lpBuffYUV || !p_self->b_have_rgb32_conv)) {
+ if (p_self->b_have_rgb32_conv) {
+ // Convert from RGB32 to I420
+#if DDRAW_HAVE_RGB32_TO_I420_ASM
+ _tdav_producer_screencast_rgb32_to_yuv420_asm_ssse3((uint8_t*)lpBuffYUV, (const uint8_t*)lpBuffToSend, (int)p_base->video.width, (int)p_base->video.height);
+#elif DDRAW_HAVE_RGB32_TO_I420_INTRIN
+ _tdav_producer_screencast_rgb32_to_yuv420_intrin_ssse3((uint8_t*)lpBuffYUV, (const uint8_t*)lpBuffToSend, (int)p_base->video.width, (int)p_base->video.height);
+#else
+ DDRAW_CHECK_HR(hr = E_NOTIMPL); // never called
+#endif
+#if DDRAW_MT
+ p_self->mt.b_flags_array[iMtFreeBuffIndex] = TRUE;
+ if (!SetEvent(p_self->mt.h_events[iMtFreeBuffIndex])) {
+ DDRAW_CHECK_HR(hr = E_FAIL);
+ }
+#else
+ p_base->enc_cb.callback(p_base->enc_cb.callback_data, lpBuffYUV, p_self->n_buff_yuv);
+#endif
+ }
+ else {
+ // Send RGB32 buffer to the encode callback and let conversion be done by libyuv
+ // do not multi-thread as we cannot perform chroma conversion and encoding in parallel
+ p_base->enc_cb.callback(p_base->enc_cb.callback_data, lpBuffToSend, nSizeWithoutPadding);
+ }
+ }
+ //--timeEnd = tsk_time_now();
+ //--DDRAW_DEBUG_INFO("Encode callback: start=%llu, end=%llu, duration=%llu", timeStart, timeEnd, (timeEnd - timeStart));
+
+ if (bDirectMemSurfAccess) {
+ // surface buffer was used in preview and encode callback, unlock now
+ DDRAW_CHECK_HR(hr = p_self->p_surf_primary->Unlock(NULL));
+ }
+
+bail:
+ if (hr == DDERR_SURFACELOST) {
+ /*hr = */p_self->p_surf_primary->Restore();
+ hr = S_OK;
+ }
+
+ //--timeEnd = tsk_time_now();
+ //--DDRAW_DEBUG_INFO("Grab and encode duration=%llu", (timeEnd - timeStart));
+
+ return SUCCEEDED(hr) ? 0 : -1;
+}
+
+static tmedia_chroma_t _tdav_producer_screencast_get_chroma(const DDPIXELFORMAT* pixelFormat)
+{
+ HRESULT hr = DD_OK;
+ if (pixelFormat->dwFlags != DDPF_RGB) {
+ DDRAW_DEBUG_ERROR("dwFlags(%d) != DDPF_RGB", pixelFormat->dwFlags);
+ DDRAW_CHECK_HR(hr = DDERR_INVALIDCAPS);
+ }
+ switch (pixelFormat->dwRGBBitCount) {
+ case 32: // RGB32
+ case 24: // RGB24
+ // pixels must be aligned for fast copy
+ if (pixelFormat->dwRBitMask != 0xff0000 || pixelFormat->dwGBitMask != 0xff00 || pixelFormat->dwBBitMask != 0xff || pixelFormat->dwRGBAlphaBitMask != 0) {
+ DDRAW_DEBUG_ERROR("Pixels not aligned");
+ }
+ return pixelFormat->dwRGBBitCount == 24 ? tmedia_chroma_bgr24 : tmedia_chroma_rgb32;
+ case 16: // RGB565
+ // pixels must be aligned for fast copy
+ if (pixelFormat->dwRBitMask != 0xF800 || pixelFormat->dwGBitMask != 0x7E0 || pixelFormat->dwBBitMask != 0x1F) {
+ DDRAW_DEBUG_ERROR("Pixels not aligned");
+ }
+ return tmedia_chroma_rgb565le;
+ default:
+ DDRAW_DEBUG_ERROR("dwRGBBitCount(%d) != 24 and 32", pixelFormat->dwRGBBitCount);
+ DDRAW_CHECK_HR(hr = DDERR_INVALIDCAPS);
+ break;
+ }
+
+bail:
+ return tmedia_chroma_none;
+}
+
+static HRESULT _tdav_producer_screencast_create_module(LPDDrawModule lpModule)
+{
+ typedef HRESULT (WINAPI *pDirectDrawCreateFunc)(_In_ GUID FAR *lpGUID,
+ _Out_ LPDIRECTDRAW FAR *lplpDD,
+ _In_ IUnknown FAR *pUnkOuter);
+ HRESULT hr = S_OK;
+ pDirectDrawCreateFunc DirectDrawCreate_ = NULL;
+
+ if (!lpModule) {
+ DDRAW_CHECK_HR(hr = E_INVALIDARG);
+ }
+
+ if (!lpModule->hDLL && !(lpModule->hDLL = LoadLibrary(TEXT("ddraw.dll")))) {
+ DDRAW_DEBUG_ERROR("Failed to load ddraw.dll: %d", GetLastError());
+ DDRAW_CHECK_HR(hr = E_FAIL);
+ }
+ if (!lpModule->lpDD) {
+ // Hum, "GetProcAddressA" is missing but ""GetProcAddressW" exists on CE
+#if TDAV_UNDER_WINDOWS_CE
+# define DirectDrawCreateName TEXT("DirectDrawCreate")
+#else
+# define DirectDrawCreateName "DirectDrawCreate"
+#endif
+ if (!(DirectDrawCreate_ = (pDirectDrawCreateFunc)GetProcAddress(lpModule->hDLL, DirectDrawCreateName))) {
+ DDRAW_DEBUG_ERROR("Failed to find DirectDrawCreate in ddraw.dll: %d", GetLastError());
+ DDRAW_CHECK_HR(hr = E_FAIL);
+ }
+ DDRAW_CHECK_HR(hr = DirectDrawCreate_(NULL, &lpModule->lpDD, NULL));
+ }
+
+bail:
+ return hr;
+}
+
+static HRESULT _tdav_producer_screencast_alloc_rgb_buff(tdav_producer_screencast_ddraw_t* p_ddraw, DWORD w, DWORD h, DWORD bitsCount)
+{
+ HRESULT hr = S_OK;
+ DWORD n_buff_rgb_new = (w * h * (bitsCount >> 3));
+
+ if (p_ddraw->n_buff_rgb < n_buff_rgb_new) {
+ p_ddraw->p_buff_rgb_aligned = tsk_realloc_aligned(p_ddraw->p_buff_rgb_aligned, n_buff_rgb_new, DDRAW_MEM_ALIGNMENT);
+ if (!p_ddraw->p_buff_rgb_aligned) {
+ p_ddraw->n_buff_rgb = 0;
+ DDRAW_CHECK_HR(hr = DDERR_OUTOFMEMORY);
+ }
+ p_ddraw->n_buff_rgb = n_buff_rgb_new;
+ }
+
+bail:
+ return hr;
+}
+
+static HRESULT _tdav_producer_screencast_alloc_yuv_buff(tdav_producer_screencast_ddraw_t* p_ddraw, DWORD w, DWORD h)
+{
+ HRESULT hr = S_OK;
+ void** pp_buff_yuv_aligned;
+ int n_buff_yuv_aligned_count;
+
+#if DDRAW_MT
+ pp_buff_yuv_aligned = p_ddraw->mt.p_buff_yuv_aligned_array;
+ n_buff_yuv_aligned_count = sizeof(p_ddraw->mt.p_buff_yuv_aligned_array)/sizeof(p_ddraw->mt.p_buff_yuv_aligned_array[0]);
+#else
+ pp_buff_yuv_aligned = &p_ddraw->p_buff_yuv_aligned;
+ n_buff_yuv_aligned_count = 1;
+#endif /* DDRAW_MT */
+
+ p_ddraw->n_buff_yuv = (w * h * 3) >> 1;
+ for (int i = 0; i < n_buff_yuv_aligned_count; ++i) {
+ pp_buff_yuv_aligned[i] = tsk_realloc_aligned(pp_buff_yuv_aligned[i], p_ddraw->n_buff_yuv, DDRAW_MEM_ALIGNMENT);
+ if (!pp_buff_yuv_aligned[i]) {
+ p_ddraw->n_buff_yuv = 0;
+ DDRAW_CHECK_HR(hr = DDERR_OUTOFMEMORY);
+ }
+ }
+
+bail:
+ return hr;
+}
+
+static void* TSK_STDCALL _tdav_producer_screencast_grap_thread(void *arg)
+{
+ tdav_producer_screencast_ddraw_t* p_ddraw = (tdav_producer_screencast_ddraw_t*)arg;
+ tmedia_producer_t* p_base = TMEDIA_PRODUCER(arg);
+ int ret = 0;
+
+ // FPS manager
+ uint64_t TimeNow, TimeLastFrame = 0;
+ uint64_t TimeFrameDuration = (1000 / p_base->video.fps);
+
+ DDRAW_DEBUG_INFO("Grab thread -- START");
+
+ while (ret == 0 && p_ddraw->b_started) {
+#if DDRAW_CPU_THROTTLING
+ TimeFrameDuration = (1000 / p_ddraw->cpu.fps_target);
+#endif /* DDRAW_CPU_THROTTLING */
+ TimeNow = tsk_time_now();
+ if ((TimeNow - TimeLastFrame) > TimeFrameDuration) {
+ if (!p_ddraw->b_muted && !p_ddraw->b_paused) {
+ if (ret = _tdav_producer_screencast_grab(p_ddraw)) {
+ goto next;
+ }
+ }
+ TimeLastFrame = TimeNow;
+ }
+ else {
+ tsk_thread_sleep(1);
+#if 0
+ DDRAW_DEBUG_INFO("Skip frame");
+#endif
+ }
+ next:
+ ;
+ }
+ DDRAW_DEBUG_INFO("Grab thread -- STOP");
+ return tsk_null;
+}
+
+#if DDRAW_MT
+static void* TSK_STDCALL _tdav_producer_screencast_mt_encode_thread(void *arg)
+{
+ tdav_producer_screencast_ddraw_t* p_ddraw = (tdav_producer_screencast_ddraw_t*)arg;
+ tmedia_producer_t* p_base = TMEDIA_PRODUCER(arg);
+ DWORD dwEvent, dwIndex;
+ int ret = 0;
+ DWORD events_count = sizeof(p_ddraw->mt.h_events) / sizeof(p_ddraw->mt.h_events[0]);
+
+ DDRAW_DEBUG_INFO("Encode MT thread -- START");
+
+ while (ret == 0 && p_ddraw->b_started) {
+ dwEvent = WaitForMultipleObjects(events_count, p_ddraw->mt.h_events, FALSE, INFINITE);
+ if (!p_ddraw->b_started) {
+ break;
+ }
+ if (dwEvent < WAIT_OBJECT_0 || dwEvent >(WAIT_OBJECT_0 + events_count)) {
+ DDRAW_DEBUG_ERROR("Invalid dwEvent(%d)", dwEvent);
+ break;
+ }
+ dwIndex = (dwEvent - WAIT_OBJECT_0);
+ if (p_ddraw->mt.b_flags_array[dwIndex] != TRUE) {
+ // must never happen
+ DDRAW_DEBUG_ERROR("Invalid b_flags_array(%d)", dwIndex);
+ break;
+ }
+
+ p_base->enc_cb.callback(p_base->enc_cb.callback_data, p_ddraw->mt.p_buff_yuv_aligned_array[dwIndex], p_ddraw->n_buff_yuv);
+ p_ddraw->mt.b_flags_array[dwIndex] = FALSE;
+ }
+ DDRAW_DEBUG_INFO("Encode MT -- STOP");
+ return tsk_null;
+}
+#endif /* DDRAW_MT */
+
+#if DDRAW_CPU_MONITOR || DDRAW_CPU_THROTTLING
+static unsigned long long FileTimeToInt64(const FILETIME & ft)
+{
+ return (((unsigned long long)(ft.dwHighDateTime))<<32) | ((unsigned long long)ft.dwLowDateTime);
+}
+static BOOL GetCpuPercents(unsigned long long* PercentIdle, unsigned long long* PercentUsage)
+{
+ static unsigned long long _prevTicks = 0;
+ static unsigned long long _prevIdleTime = 0;
+ unsigned long long ticks, idleTime;
+ BOOL bSaveValues = FALSE, bSet = FALSE;
+#if TDAV_UNDER_WINDOWS_CE
+ bSaveValues = TRUE;
+ ticks = GetTickCount();
+ idleTime = GetIdleTime();
+#else
+ {
+ FILETIME _idleTime, _kernelTime, _userTime;
+ if (GetSystemTimes(&_idleTime, &_kernelTime, &_userTime)) {
+ idleTime = FileTimeToInt64(_idleTime);
+ ticks = FileTimeToInt64(_kernelTime) + FileTimeToInt64(_userTime);
+ bSaveValues = TRUE;
+ }
+ }
+#endif
+ if (_prevTicks > 0) {
+ *PercentIdle = ((100 * (idleTime - _prevIdleTime)) / (ticks - _prevTicks));
+ *PercentUsage = 100 - *PercentIdle;
+ bSet = TRUE;
+ }
+ if (bSaveValues) {
+ _prevTicks = ticks;
+ _prevIdleTime = idleTime;
+ }
+
+ return bSet;
+}
+
+static int _tdav_producer_screencast_timer_cb(const void* arg, tsk_timer_id_t timer_id)
+{
+ tdav_producer_screencast_ddraw_t* p_ddraw = (tdav_producer_screencast_ddraw_t*)arg;
+ int ret = 0;
+
+ if (!p_ddraw->b_started) {
+ return 0;
+ }
+
+ if (p_ddraw->cpu.id_timer == timer_id) {
+ unsigned long long PercentIdle, PercentUsage;
+ if (GetCpuPercents(&PercentIdle, &PercentUsage) == TRUE) {
+ TSK_DEBUG_INFO("\n\n****\n\nCPU Usage = %lld\n\n***", PercentUsage);
+#if DDRAW_CPU_THROTTLING
+ {
+ if ((PercentUsage + DDRAW_CPU_THROTTLING_THRESHOLD_MARGIN) > DDRAW_CPU_THROTTLING_THRESHOLD) {
+ unsigned long long NewTargetPercentUsage = TSK_CLAMP(DDRAW_CPU_THROTTLING_THRESHOLD_MARGIN, DDRAW_CPU_THROTTLING_THRESHOLD - DDRAW_CPU_THROTTLING_THRESHOLD_MARGIN, INT_MAX);
+ int NewTargetFps = (int)((NewTargetPercentUsage * p_ddraw->cpu.fps_target) / PercentUsage);
+ NewTargetFps = TSK_CLAMP(DDRAW_CPU_THROTTLING_FPS_MIN, NewTargetFps, TMEDIA_PRODUCER(p_ddraw)->video.fps);
+ TSK_DEBUG_INFO("\n\n****\n\nCPU throttling = (%lld+%d)>%d, NewTargetPercentUsage=%lld, NewTargetFps=%d\n\n***",
+ PercentUsage, DDRAW_CPU_THROTTLING_THRESHOLD_MARGIN, DDRAW_CPU_THROTTLING_THRESHOLD, NewTargetPercentUsage, NewTargetFps);
+ p_ddraw->cpu.fps_target = NewTargetFps;
+ }
+ else if (PercentUsage < DDRAW_CPU_THROTTLING_THRESHOLD) {
+ if ((p_ddraw->cpu.fps_target + DDRAW_CPU_THROTTLING_THRESHOLD_MARGIN) < TMEDIA_PRODUCER(p_ddraw)->video.fps) { // not honoring the negotiated fps yet?
+ p_ddraw->cpu.fps_target += 1; // TODO: this is ok only if the timer timeout is set to 1s or less
+ }
+ }
+ }
+#endif /* DDRAW_CPU_THROTTLING */
+ }
+
+ if (p_ddraw->b_started) {
+ p_ddraw->cpu.id_timer = tsk_timer_manager_schedule(p_ddraw->p_timer_mgr, DDRAW_CPU_SCHEDULE_TIMEOUT, _tdav_producer_screencast_timer_cb, p_ddraw);
+ }
+ }
+ return 0;
+}
+
+#endif /* DDRAW_CPU_MONITOR || DDRAW_CPU_THROTTLING */
+
+//
+// ddraw screencast producer object definition
+//
+/* constructor */
+static tsk_object_t* _tdav_producer_screencast_ddraw_ctor(tsk_object_t *self, va_list * app)
+{
+ tdav_producer_screencast_ddraw_t *p_ddraw = (tdav_producer_screencast_ddraw_t *)self;
+ if (p_ddraw) {
+ /* init base */
+ tmedia_producer_init(TMEDIA_PRODUCER(p_ddraw));
+ TMEDIA_PRODUCER(p_ddraw)->video.chroma = tmedia_chroma_bgr24; // RGB24 on x86 (little endians) stored as BGR24
+ /* init self with default values*/
+ TMEDIA_PRODUCER(p_ddraw)->video.fps = 15;
+ TMEDIA_PRODUCER(p_ddraw)->video.width = 352;
+ TMEDIA_PRODUCER(p_ddraw)->video.height = 288;
+
+ tsk_safeobj_init(p_ddraw);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* _tdav_producer_screencast_ddraw_dtor(tsk_object_t * self)
+{
+ tdav_producer_screencast_ddraw_t *p_ddraw = (tdav_producer_screencast_ddraw_t *)self;
+ if (p_ddraw) {
+ /* stop */
+ if (p_ddraw->b_started) {
+ _tdav_producer_screencast_ddraw_stop((tmedia_producer_t*)p_ddraw);
+ }
+
+ /* deinit base */
+ tmedia_producer_deinit(TMEDIA_PRODUCER(p_ddraw));
+ /* deinit self */
+#if DDRAW_CPU_MONITOR || DDRAW_CPU_THROTTLING
+ if (p_ddraw->p_timer_mgr) {
+ tsk_timer_manager_destroy(&p_ddraw->p_timer_mgr);
+ }
+#endif /* DDRAW_CPU_MONITOR || DDRAW_CPU_THROTTLING */
+#if DDRAW_MT
+ for (int i = 0; i < sizeof(p_ddraw->mt.p_buff_yuv_aligned_array) / sizeof(p_ddraw->mt.p_buff_yuv_aligned_array[0]); ++i) {
+ TSK_FREE_ALIGNED(p_ddraw->mt.p_buff_yuv_aligned_array[i]);
+ }
+ for (int i = 0; i < sizeof(p_ddraw->mt.h_events) / sizeof(p_ddraw->mt.h_events[0]); ++i) {
+ if (p_ddraw->mt.h_events[i]) {
+ CloseHandle(p_ddraw->mt.h_events[i]);
+ p_ddraw->mt.h_events[i] = NULL;
+ }
+ }
+#endif /* DDRAW_MT */
+ TSK_FREE_ALIGNED(p_ddraw->p_buff_rgb_aligned);
+ TSK_FREE_ALIGNED(p_ddraw->p_buff_yuv_aligned);
+ DDRAW_SAFE_RELEASE(&p_ddraw->p_surf_primary);
+ DDrawModuleSafeFree(p_ddraw->ddrawModule);
+ tsk_safeobj_deinit(p_ddraw);
+
+ DDRAW_DEBUG_INFO("*** destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_screencast_ddraw_def_s =
+{
+ sizeof(tdav_producer_screencast_ddraw_t),
+ _tdav_producer_screencast_ddraw_ctor,
+ _tdav_producer_screencast_ddraw_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_screencast_ddraw_plugin_def_s =
+{
+ &tdav_producer_screencast_ddraw_def_s,
+ tmedia_bfcp_video,
+ "Microsoft DirectDraw screencast producer",
+
+ _tdav_producer_screencast_ddraw_set,
+ _tdav_producer_screencast_ddraw_prepare,
+ _tdav_producer_screencast_ddraw_start,
+ _tdav_producer_screencast_ddraw_pause,
+ _tdav_producer_screencast_ddraw_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_screencast_ddraw_plugin_def_t = &tdav_producer_screencast_ddraw_plugin_def_s;
+
+#endif /* TDAV_UNDER_WINDOWS && !TDAV_UNDER_WINDOWS_RT */
diff --git a/tinyDAV/src/video/gdi/tdav_consumer_video_gdi.c b/tinyDAV/src/video/gdi/tdav_consumer_video_gdi.c
new file mode 100644
index 0000000..8a81b66
--- /dev/null
+++ b/tinyDAV/src/video/gdi/tdav_consumer_video_gdi.c
@@ -0,0 +1,544 @@
+/* Copyright (C) 2014 Mamadou DIOP
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/video/gdi/tdav_consumer_video_gdi.h"
+
+#if TDAV_UNDER_WINDOWS && !TDAV_UNDER_WINDOWS_RT
+
+#include <windows.h>
+
+#include "tsk_memory.h"
+#include "tsk_string.h"
+#include "tsk_safeobj.h"
+#include "tsk_debug.h"
+
+#define CHECK_HR(x) { HRESULT __hr__ = (x); if (FAILED(__hr__)) { TSK_DEBUG_ERROR("Operation Failed (%08x)", __hr__); goto bail; } }
+
+static HRESULT HookWindow(struct tdav_consumer_video_gdi_s *p_gdi, HWND hWnd, BOOL bFullScreenWindow);
+static HRESULT UnhookWindow(struct tdav_consumer_video_gdi_s *p_gdi, BOOL bFullScreenWindow);
+static HRESULT SetFullscreen(struct tdav_consumer_video_gdi_s *p_gdi, BOOL bFullScreen);
+static HWND CreateFullScreenWindow(struct tdav_consumer_video_gdi_s *p_gdi);
+static LRESULT CALLBACK WndProc(HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam);
+
+typedef struct tdav_consumer_video_gdi_s
+{
+ TMEDIA_DECLARE_CONSUMER;
+
+ BOOL bStarted, bPrepared, bPaused, bFullScreen, bWindowHooked, bWindowHookedFullScreen;
+ HWND hWindow;
+ WNDPROC wndProc;
+ HWND hWindowFullScreen;
+ WNDPROC wndProcFullScreen;
+ BITMAPINFO bitmapInfo;
+ void* pBuffer;
+
+ TSK_DECLARE_SAFEOBJ;
+}
+tdav_consumer_video_gdi_t;
+
+
+
+/* ============ Media Consumer Interface ================= */
+static int tdav_consumer_video_gdi_set(tmedia_consumer_t *self, const tmedia_param_t* param)
+{
+ int ret = 0;
+ tdav_consumer_video_gdi_t* p_gdi = (tdav_consumer_video_gdi_t*)self;
+ HRESULT hr = S_OK;
+
+ if (!self || !param) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ CHECK_HR(hr = E_POINTER);
+ }
+
+ if (param->value_type == tmedia_pvt_int64) {
+ if (tsk_striequals(param->key, "remote-hwnd")) {
+ HWND hWnd = ((HWND)*((int64_t*)param->value));
+ if (hWnd != p_gdi->hWindow) {
+ tsk_safeobj_lock(p_gdi); // block consumer thread
+ UnhookWindow(p_gdi, FALSE);
+ p_gdi->hWindow = hWnd;
+ tsk_safeobj_unlock(p_gdi); // unblock consumer thread
+ }
+ }
+ }
+ else if(param->value_type == tmedia_pvt_int32) {
+ if(tsk_striequals(param->key, "fullscreen")) {
+ BOOL bFullScreen = !!*((int32_t*)param->value);
+ TSK_DEBUG_INFO("[GDI video consumer] Full Screen = %d", bFullScreen);
+ CHECK_HR(hr = SetFullscreen(p_gdi, bFullScreen));
+ }
+ }
+
+ CHECK_HR(hr);
+
+bail:
+ return SUCCEEDED(hr) ? 0 : -1;
+}
+
+
+static int tdav_consumer_video_gdi_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_consumer_video_gdi_t* p_gdi = (tdav_consumer_video_gdi_t*)self;
+
+ if (!p_gdi || !codec && codec->plugin) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TMEDIA_CONSUMER(p_gdi)->video.fps = TMEDIA_CODEC_VIDEO(codec)->in.fps;
+ TMEDIA_CONSUMER(p_gdi)->video.in.width = TMEDIA_CODEC_VIDEO(codec)->in.width;
+ TMEDIA_CONSUMER(p_gdi)->video.in.height = TMEDIA_CODEC_VIDEO(codec)->in.height;
+
+ if (!TMEDIA_CONSUMER(p_gdi)->video.display.width) {
+ TMEDIA_CONSUMER(p_gdi)->video.display.width = TMEDIA_CONSUMER(p_gdi)->video.in.width;
+ }
+ if (!TMEDIA_CONSUMER(p_gdi)->video.display.height) {
+ TMEDIA_CONSUMER(p_gdi)->video.display.height = TMEDIA_CONSUMER(p_gdi)->video.in.height;
+ }
+
+ ZeroMemory(&p_gdi->bitmapInfo, sizeof(p_gdi->bitmapInfo));
+ p_gdi->bitmapInfo.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
+ p_gdi->bitmapInfo.bmiHeader.biPlanes = 1;
+ p_gdi->bitmapInfo.bmiHeader.biBitCount = 24; // RGB24
+ p_gdi->bitmapInfo.bmiHeader.biCompression = BI_RGB;
+ p_gdi->bitmapInfo.bmiHeader.biWidth = (LONG)TMEDIA_CONSUMER(p_gdi)->video.in.width;
+ p_gdi->bitmapInfo.bmiHeader.biHeight = (LONG)(TMEDIA_CONSUMER(p_gdi)->video.in.height * -1);
+ p_gdi->bitmapInfo.bmiHeader.biSizeImage = (DWORD)(TMEDIA_CONSUMER(p_gdi)->video.in.width * abs((int)TMEDIA_CONSUMER(p_gdi)->video.in.height) *
+ (p_gdi->bitmapInfo.bmiHeader.biBitCount >> 3));
+
+ return 0;
+}
+
+static int tdav_consumer_video_gdi_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_video_gdi_t* p_gdi = (tdav_consumer_video_gdi_t*)self;
+
+ if (!p_gdi) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_gdi);
+
+ p_gdi->bPaused = FALSE;
+ p_gdi->bStarted = TRUE;
+
+ tsk_safeobj_unlock(p_gdi);
+
+ return 0;
+}
+
+static int tdav_consumer_video_gdi_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_video_gdi_t* p_gdi = (tdav_consumer_video_gdi_t*)self;
+ int ret = 0;
+ HWND* p_Window;
+ BOOL *p_bWindowHooked, bImputSizeChanged;
+
+ if (!p_gdi) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_gdi);
+
+ if (!p_gdi->bStarted || p_gdi->bPaused) {
+ TSK_DEBUG_INFO("GDI consumer stopped or paused");
+ goto bail;
+ }
+
+ bImputSizeChanged = (size != p_gdi->bitmapInfo.bmiHeader.biSizeImage)
+ || (TMEDIA_CONSUMER(p_gdi)->video.in.width != p_gdi->bitmapInfo.bmiHeader.biWidth)
+ || (TMEDIA_CONSUMER(p_gdi)->video.in.height != TSK_ABS(p_gdi->bitmapInfo.bmiHeader.biHeight));
+
+ if (bImputSizeChanged) {
+ tsk_size_t xNewSize = TMEDIA_CONSUMER(p_gdi)->video.in.width * TMEDIA_CONSUMER(p_gdi)->video.in.height * (p_gdi->bitmapInfo.bmiHeader.biBitCount >> 3);
+ TSK_DEBUG_INFO("GDI input size changed: %u->%u", p_gdi->bitmapInfo.bmiHeader.biSizeImage, size);
+ if (xNewSize != size) {
+ TSK_DEBUG_ERROR("GDI consumer: chroma issue?");
+ ret = -1;
+ goto bail;
+ }
+ p_gdi->bitmapInfo.bmiHeader.biWidth = (LONG)TMEDIA_CONSUMER(p_gdi)->video.in.width;
+ p_gdi->bitmapInfo.bmiHeader.biHeight = (LONG)TMEDIA_CONSUMER(p_gdi)->video.in.height * -1;
+ p_gdi->bitmapInfo.bmiHeader.biSizeImage = (DWORD)xNewSize;
+ p_gdi->pBuffer = tsk_realloc(p_gdi->pBuffer, p_gdi->bitmapInfo.bmiHeader.biSizeImage);
+ }
+
+ p_Window = p_gdi->bFullScreen ? &p_gdi->hWindowFullScreen : &p_gdi->hWindow;
+ p_bWindowHooked = p_gdi->bFullScreen ? &p_gdi->bWindowHookedFullScreen : &p_gdi->bWindowHooked;
+
+ if (*p_Window) {
+ if (!*p_bWindowHooked) {
+ // Do not hook "hWnd" as it could be the fullscreen handle which is always hooked.
+ CHECK_HR(HookWindow(p_gdi, *p_Window, p_gdi->bFullScreen));
+ }
+ if (!p_gdi->pBuffer) {
+ p_gdi->pBuffer = tsk_realloc(p_gdi->pBuffer, p_gdi->bitmapInfo.bmiHeader.biSizeImage);
+ }
+ if (p_gdi->pBuffer) {
+ memcpy(p_gdi->pBuffer, buffer, size);
+ InvalidateRect(*p_Window, NULL, TRUE);
+ }
+ }
+
+bail:
+ tsk_safeobj_unlock(p_gdi);
+ return ret;
+}
+
+static int tdav_consumer_video_gdi_pause(tmedia_consumer_t* self)
+{
+ tdav_consumer_video_gdi_t* p_gdi = (tdav_consumer_video_gdi_t*)self;
+
+ if (!p_gdi) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_gdi);
+
+ p_gdi->bPaused = TRUE;
+
+ tsk_safeobj_unlock(p_gdi);
+
+ return 0;
+}
+
+static int tdav_consumer_video_gdi_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_video_gdi_t* p_gdi = (tdav_consumer_video_gdi_t*)self;
+
+ if (!p_gdi) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_gdi);
+
+ p_gdi->bStarted = FALSE;
+ p_gdi->bPaused = FALSE;
+ SetFullscreen(p_gdi, FALSE);
+ UnhookWindow(p_gdi, TRUE);
+ UnhookWindow(p_gdi, FALSE);
+
+ tsk_safeobj_unlock(p_gdi);
+
+ return 0;
+}
+
+
+static LRESULT CALLBACK WndProc(HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam)
+{
+ switch(uMsg)
+ {
+ case WM_CREATE:
+ case WM_SIZE:
+ case WM_MOVE:
+ {
+ struct tdav_consumer_video_gdi_s* p_gdi = ((struct tdav_consumer_video_gdi_s*)GetProp(hWnd, TEXT("Self")));
+ if (p_gdi) {
+
+ }
+ break;
+ }
+
+ case WM_PAINT:
+ {
+ struct tdav_consumer_video_gdi_s* p_gdi = ((struct tdav_consumer_video_gdi_s*)GetProp(hWnd, TEXT("Self")));
+ if (p_gdi) {
+ tsk_safeobj_lock(p_gdi);
+
+ if (p_gdi->bStarted && !p_gdi->bPaused && p_gdi->pBuffer) {
+ PAINTSTRUCT ps;
+ HDC hdc;
+ RECT rc, logical_rect;
+ int height, width, i, x, y;
+ HDC dc_mem, all_dc[2];
+ HBITMAP bmp_mem;
+ HGDIOBJ bmp_old;
+ POINT logical_area;
+ HBRUSH brush;
+
+ if (!(hdc = BeginPaint(hWnd, &ps))) {
+ goto paint_done;
+ }
+
+ if (!GetClientRect(hWnd, &rc)) {
+ EndPaint(hWnd, &ps);
+ goto paint_done;
+ }
+
+ height = abs(p_gdi->bitmapInfo.bmiHeader.biHeight);
+ width = p_gdi->bitmapInfo.bmiHeader.biWidth;
+
+ dc_mem = CreateCompatibleDC(ps.hdc);
+ SetStretchBltMode(dc_mem, HALFTONE);
+
+ // Set the map mode so that the ratio will be maintained for us.
+ all_dc[0] = ps.hdc, all_dc[1] = dc_mem;
+ for (i = 0; i < sizeof(all_dc)/sizeof(all_dc[0]); ++i) {
+#if !TDAV_UNDER_WINDOWS_CE
+ SetMapMode(all_dc[i], MM_ISOTROPIC);
+ SetWindowExtEx(all_dc[i], width, height, NULL);
+ SetViewportExtEx(all_dc[i], rc.right, rc.bottom, NULL);
+#endif
+ }
+
+ bmp_mem = CreateCompatibleBitmap(ps.hdc, rc.right, rc.bottom);
+ bmp_old = SelectObject(dc_mem, bmp_mem);
+
+ logical_area.x = rc.right, logical_area.y = rc.bottom;
+#if !TDAV_UNDER_WINDOWS_CE
+ DPtoLP(ps.hdc, &logical_area, 1);
+#endif
+
+ brush = CreateSolidBrush(RGB(0, 0, 0));
+ logical_rect.left = 0, logical_rect.top = 0, logical_rect.right = logical_area.x, logical_rect.bottom = logical_area.y;
+ FillRect(dc_mem, &logical_rect, brush);
+ DeleteObject(brush);
+
+ x = (logical_area.x / 2) - (width / 2);
+ y = (logical_area.y / 2) - (height / 2);
+
+ StretchDIBits(dc_mem, x, y, width, height,
+ 0, 0, width, height, p_gdi->pBuffer, &p_gdi->bitmapInfo, DIB_RGB_COLORS, SRCCOPY);
+
+ BitBlt(ps.hdc, 0, 0, logical_area.x, logical_area.y,
+ dc_mem, 0, 0, SRCCOPY);
+
+ // Cleanup.
+ SelectObject(dc_mem, bmp_old);
+ DeleteObject(bmp_mem);
+ DeleteDC(dc_mem);
+
+ EndPaint(hWnd, &ps);
+ }
+paint_done:
+ tsk_safeobj_unlock(p_gdi);
+ }
+ break;
+ }
+
+ case WM_ERASEBKGND:
+ {
+ return TRUE; // avoid background erasing.
+ }
+
+ case WM_CHAR:
+ case WM_KEYUP:
+ {
+ struct tdav_consumer_video_gdi_s* p_gdi = ((struct tdav_consumer_video_gdi_s*)GetProp(hWnd, TEXT("Self")));
+ if (p_gdi) {
+ SetFullscreen(p_gdi, FALSE);
+ }
+
+ break;
+ }
+ }
+
+ return DefWindowProc(hWnd, uMsg, wParam, lParam);
+}
+
+static HRESULT HookWindow(struct tdav_consumer_video_gdi_s *p_gdi, HWND hWnd, BOOL bFullScreenWindow)
+{
+ HRESULT hr = S_OK;
+ HWND* p_Window = bFullScreenWindow ? &p_gdi->hWindowFullScreen : &p_gdi->hWindow;
+ WNDPROC* p_wndProc = bFullScreenWindow ? &p_gdi->wndProcFullScreen : &p_gdi->wndProc;
+ BOOL* p_bWindowHooked = bFullScreenWindow ? &p_gdi->bWindowHookedFullScreen : &p_gdi->bWindowHooked;
+
+ tsk_safeobj_lock(p_gdi);
+
+ CHECK_HR(hr = UnhookWindow(p_gdi, bFullScreenWindow));
+
+ if ((*p_Window = hWnd)) {
+#if TDAV_UNDER_WINDOWS_CE
+ *p_wndProc = (WNDPROC)SetWindowLong(hWnd, GWL_WNDPROC, (LONG)WndProc);
+#else
+ *p_wndProc = (WNDPROC)SetWindowLongPtr(hWnd, GWLP_WNDPROC, (LONG_PTR)WndProc);
+#endif
+ if (!*p_wndProc) {
+ TSK_DEBUG_ERROR("HookWindowLongPtr() failed with errcode=%d", GetLastError());
+ CHECK_HR(hr = E_FAIL);
+ }
+ *p_bWindowHooked = TRUE;
+ SetProp(*p_Window, TEXT("Self"), p_gdi);
+ }
+bail:
+ tsk_safeobj_unlock(p_gdi);
+ return S_OK;
+}
+
+static HRESULT UnhookWindow(struct tdav_consumer_video_gdi_s *p_gdi, BOOL bFullScreenWindow)
+{
+ HWND* p_Window = bFullScreenWindow ? &p_gdi->hWindowFullScreen : &p_gdi->hWindow;
+ WNDPROC* p_wndProc = bFullScreenWindow ? &p_gdi->wndProcFullScreen : &p_gdi->wndProc;
+ BOOL* p_bWindowHooked = bFullScreenWindow ? &p_gdi->bWindowHookedFullScreen : &p_gdi->bWindowHooked;
+
+ tsk_safeobj_lock(p_gdi);
+ if (*p_Window && *p_wndProc) {
+#if TDAV_UNDER_WINDOWS_CE
+ SetWindowLong(*p_Window, GWL_WNDPROC, (LONG)*p_wndProc);
+#else
+ SetWindowLongPtr(*p_Window, GWLP_WNDPROC, (LONG_PTR)*p_wndProc);
+#endif
+ *p_wndProc = NULL;
+ }
+ if (*p_Window) {
+ if (p_gdi->pBuffer) {
+ memset(p_gdi->pBuffer, 0, p_gdi->bitmapInfo.bmiHeader.biSizeImage);
+ }
+ InvalidateRect(*p_Window, NULL, FALSE);
+ }
+ *p_bWindowHooked = FALSE;
+ tsk_safeobj_unlock(p_gdi);
+ return S_OK;
+}
+
+static HRESULT SetFullscreen(struct tdav_consumer_video_gdi_s *p_gdi, BOOL bFullScreen)
+{
+ HRESULT hr = S_OK;
+ if (!p_gdi) {
+ CHECK_HR(hr = E_POINTER);
+ }
+
+ if (p_gdi->bFullScreen != bFullScreen) {
+ tsk_safeobj_lock(p_gdi);
+ if (bFullScreen) {
+ HWND hWnd = CreateFullScreenWindow(p_gdi);
+ if (hWnd) {
+#if TDAV_UNDER_WINDOWS_CE
+ ShowWindow(hWnd, SW_SHOWNORMAL);
+#else
+ ShowWindow(hWnd, SW_SHOWDEFAULT);
+#endif
+ UpdateWindow(hWnd);
+ HookWindow(p_gdi, hWnd, TRUE);
+ }
+ }
+ else if(p_gdi->hWindowFullScreen) {
+ ShowWindow(p_gdi->hWindowFullScreen, SW_HIDE);
+ UnhookWindow(p_gdi, TRUE);
+ }
+ p_gdi->bFullScreen = bFullScreen;
+ tsk_safeobj_unlock(p_gdi);
+
+ CHECK_HR(hr);
+ }
+
+bail:
+ return hr;
+}
+
+static HWND CreateFullScreenWindow(struct tdav_consumer_video_gdi_s *p_gdi)
+{
+ HRESULT hr = S_OK;
+
+ if(!p_gdi) {
+ return NULL;
+ }
+
+ if (!p_gdi->hWindowFullScreen) {
+ WNDCLASS wc = {0};
+
+ wc.lpfnWndProc = WndProc;
+ wc.hInstance = GetModuleHandle(NULL);
+ wc.hCursor = LoadCursor(NULL, IDC_ARROW);
+ wc.lpszClassName = L"WindowClass";
+ RegisterClass(&wc);
+ p_gdi->hWindowFullScreen = CreateWindowEx(
+ 0,
+ wc.lpszClassName,
+ L"Doubango's Video Consumer Fullscreen",
+ WS_EX_TOPMOST | WS_POPUP,
+ 0, 0,
+ GetSystemMetrics(SM_CXSCREEN), GetSystemMetrics(SM_CYSCREEN),
+ NULL,
+ NULL,
+ GetModuleHandle(NULL),
+ NULL);
+
+ SetProp(p_gdi->hWindowFullScreen, TEXT("Self"), p_gdi);
+ }
+ return p_gdi->hWindowFullScreen;
+}
+
+//
+// GDI video consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_video_gdi_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_video_gdi_t *p_gdi = (tdav_consumer_video_gdi_t *)self;
+ if (p_gdi) {
+ /* init base */
+ tmedia_consumer_init(TMEDIA_CONSUMER(p_gdi));
+ TMEDIA_CONSUMER(p_gdi)->video.display.chroma = tmedia_chroma_bgr24;
+
+ /* init self */
+ TMEDIA_CONSUMER(p_gdi)->video.fps = 15;
+ TMEDIA_CONSUMER(p_gdi)->video.display.width = 352;
+ TMEDIA_CONSUMER(p_gdi)->video.display.height = 288;
+ TMEDIA_CONSUMER(p_gdi)->video.display.auto_resize = tsk_true;
+ tsk_safeobj_init(p_gdi);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_video_gdi_dtor(tsk_object_t * self)
+{
+ tdav_consumer_video_gdi_t *p_gdi = (tdav_consumer_video_gdi_t *)self;
+ if (p_gdi) {
+ /* stop */
+ tdav_consumer_video_gdi_stop((tmedia_consumer_t*)self);
+
+ /* deinit base */
+ tmedia_consumer_deinit(TMEDIA_CONSUMER(p_gdi));
+ /* deinit self */
+ TSK_FREE(p_gdi->pBuffer);
+ tsk_safeobj_deinit(p_gdi);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_video_gdi_def_s =
+{
+ sizeof(tdav_consumer_video_gdi_t),
+ tdav_consumer_video_gdi_ctor,
+ tdav_consumer_video_gdi_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_video_gdi_plugin_def_s =
+{
+ &tdav_consumer_video_gdi_def_s,
+
+ tmedia_video,
+ "Microsoft GDI consumer (using custom source)",
+
+ tdav_consumer_video_gdi_set,
+ tdav_consumer_video_gdi_prepare,
+ tdav_consumer_video_gdi_start,
+ tdav_consumer_video_gdi_consume,
+ tdav_consumer_video_gdi_pause,
+ tdav_consumer_video_gdi_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_video_gdi_plugin_def_t = &tdav_consumer_video_gdi_plugin_def_s;
+
+#endif /* TDAV_UNDER_WINDOWS && !TDAV_UNDER_WINDOWS_RT */
+
diff --git a/tinyDAV/src/video/gdi/tdav_producer_screencast_gdi.c b/tinyDAV/src/video/gdi/tdav_producer_screencast_gdi.c
new file mode 100644
index 0000000..799aafc
--- /dev/null
+++ b/tinyDAV/src/video/gdi/tdav_producer_screencast_gdi.c
@@ -0,0 +1,534 @@
+/* Copyright (C) 2014-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/video/gdi/tdav_producer_screencast_gdi.h"
+
+#if TDAV_UNDER_WINDOWS && !TDAV_UNDER_WINDOWS_RT
+
+#include <windows.h>
+
+#define RESIZER_DO_NOT_INCLUDE_HEADER
+#include "..\..\..\..\plugins\pluginDirectShow\internals\Resizer.cxx"
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_safeobj.h"
+#include "tsk_timer.h"
+#include "tsk_time.h"
+#include "tsk_string.h"
+#include "tsk_debug.h"
+
+#if TDAV_UNDER_WINDOWS_CE
+static const BOOL bitmapBuffSrcOwnMemory = FALSE;
+#else
+static const BOOL bitmapBuffSrcOwnMemory = TRUE;
+#endif /* TDAV_UNDER_WINDOWS_CE */
+
+#if !defined(kMaxFrameRate)
+# define kMaxFrameRate 4 // FIXME
+#endif /* kMaxFrameRate */
+
+// https://social.msdn.microsoft.com/forums/windowsdesktop/en-us/2cbe4674-e744-41d6-bc61-3c8e381aa942/how-to-make-bitblt-faster-for-copying-screen
+#if !defined(HIGH_PRIO_BITBLIT)
+# define HIGH_PRIO_BITBLIT 0
+#endif /* HIGH_PRIO_BITBLIT */
+
+typedef struct tdav_producer_screencast_gdi_s
+{
+ TMEDIA_DECLARE_PRODUCER;
+
+ HWND hwnd_preview;
+ HWND hwnd_src;
+
+ BITMAPINFO bitmapInfoSrc;
+ BITMAPINFO bitmapInfoNeg;
+
+ tsk_thread_handle_t* tid[1];
+
+ void* p_buff_src; // must use VirtualAlloc()
+ tsk_size_t n_buff_src;
+ void* p_buff_neg; // must use VirtualAlloc()
+ tsk_size_t n_buff_neg;
+
+ tsk_bool_t b_started;
+ tsk_bool_t b_paused;
+ tsk_bool_t b_muted;
+
+ RECT rcScreen;
+
+ TSK_DECLARE_SAFEOBJ;
+}
+tdav_producer_screencast_gdi_t;
+
+static void* TSK_STDCALL _tdav_producer_screencast_record_thread(void *arg);
+static int _tdav_producer_screencast_grab(tdav_producer_screencast_gdi_t* p_self);
+
+
+/* ============ Media Producer Interface ================= */
+static int _tdav_producer_screencast_gdi_set(tmedia_producer_t *p_self, const tmedia_param_t* pc_param)
+{
+ int ret = 0;
+ tdav_producer_screencast_gdi_t* p_gdi = (tdav_producer_screencast_gdi_t*)p_self;
+
+ if (!p_gdi || !pc_param) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (pc_param->value_type == tmedia_pvt_int64) {
+ if (tsk_striequals(pc_param->key, "local-hwnd") || tsk_striequals(pc_param->key, "preview-hwnd")) {
+ p_gdi->hwnd_preview = (HWND)*((int64_t*)pc_param->value);
+ }
+ else if (tsk_striequals(pc_param->key, "src-hwnd")) {
+ p_gdi->hwnd_src = (HWND)*((int64_t*)pc_param->value);
+ }
+ }
+ else if (pc_param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(pc_param->key, "mute")) {
+ p_gdi->b_muted = (TSK_TO_INT32((uint8_t*)pc_param->value) != 0);
+ }
+ }
+
+ return ret;
+}
+
+static int _tdav_producer_screencast_gdi_prepare(tmedia_producer_t* p_self, const tmedia_codec_t* pc_codec)
+{
+ tdav_producer_screencast_gdi_t* p_gdi = (tdav_producer_screencast_gdi_t*)p_self;
+ int ret = 0;
+
+ if (!p_gdi || !pc_codec) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_gdi);
+
+#if METROPOLIS /*= G2J.COM */
+ TMEDIA_PRODUCER(p_gdi)->video.fps = TSK_MIN(TMEDIA_CODEC_VIDEO(pc_codec)->out.fps, kMaxFrameRate);
+#else
+ TMEDIA_PRODUCER(p_gdi)->video.fps = TMEDIA_CODEC_VIDEO(pc_codec)->out.fps;
+#endif
+ TMEDIA_PRODUCER(p_gdi)->video.width = TMEDIA_CODEC_VIDEO(pc_codec)->out.width;
+ TMEDIA_PRODUCER(p_gdi)->video.height = TMEDIA_CODEC_VIDEO(pc_codec)->out.height;
+
+ TSK_DEBUG_INFO("[GDI screencast] fps:%d, width:%d; height:%d", TMEDIA_PRODUCER(p_gdi)->video.fps, TMEDIA_PRODUCER(p_gdi)->video.width, TMEDIA_PRODUCER(p_gdi)->video.height);
+
+ p_gdi->bitmapInfoNeg.bmiHeader.biSize = p_gdi->bitmapInfoSrc.bmiHeader.biSize = (DWORD)sizeof(BITMAPINFOHEADER);
+ p_gdi->bitmapInfoNeg.bmiHeader.biWidth = p_gdi->bitmapInfoSrc.bmiHeader.biWidth = (LONG)TMEDIA_PRODUCER(p_gdi)->video.width;
+ p_gdi->bitmapInfoNeg.bmiHeader.biHeight = p_gdi->bitmapInfoSrc.bmiHeader.biHeight = (LONG)TMEDIA_PRODUCER(p_gdi)->video.height;
+ p_gdi->bitmapInfoNeg.bmiHeader.biPlanes = p_gdi->bitmapInfoSrc.bmiHeader.biPlanes = 1;
+ p_gdi->bitmapInfoNeg.bmiHeader.biBitCount = p_gdi->bitmapInfoSrc.bmiHeader.biBitCount = 24;
+ p_gdi->bitmapInfoNeg.bmiHeader.biCompression = p_gdi->bitmapInfoSrc.bmiHeader.biCompression = BI_RGB;
+ p_gdi->bitmapInfoNeg.bmiHeader.biSizeImage = (p_gdi->bitmapInfoNeg.bmiHeader.biWidth * p_gdi->bitmapInfoNeg.bmiHeader.biHeight * (p_gdi->bitmapInfoNeg.bmiHeader.biBitCount >> 3));
+
+ if (p_gdi->n_buff_neg < p_gdi->bitmapInfoNeg.bmiHeader.biSizeImage) {
+ if (p_gdi->p_buff_neg) VirtualFree(p_gdi->p_buff_neg, 0, MEM_RELEASE);
+ if (!(p_gdi->p_buff_neg = VirtualAlloc(NULL, p_gdi->bitmapInfoNeg.bmiHeader.biSizeImage, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE))) {
+ p_gdi->n_buff_neg = 0;
+ ret = -3;
+ goto bail;
+ }
+ p_gdi->n_buff_neg = p_gdi->bitmapInfoNeg.bmiHeader.biSizeImage;
+ }
+
+ /* Get screen size */ {
+ HDC hDC;
+ hDC = CreateDC(TEXT("DISPLAY"), NULL, NULL, NULL);
+ if (!hDC) {
+ TSK_DEBUG_ERROR("CreateDC failed");
+ ret = -4;
+ goto bail;
+ }
+
+ // Get the dimensions of the main desktop window
+ p_gdi->rcScreen.left = p_gdi->rcScreen.top = 0;
+ p_gdi->rcScreen.right = GetDeviceCaps(hDC, HORZRES);
+ p_gdi->rcScreen.bottom = GetDeviceCaps(hDC, VERTRES);
+
+ // Release the device context
+ DeleteDC(hDC);
+ }
+
+bail:
+ tsk_safeobj_unlock(p_gdi);
+ return ret;
+}
+
+static int _tdav_producer_screencast_gdi_start(tmedia_producer_t* p_self)
+{
+ tdav_producer_screencast_gdi_t* p_gdi = (tdav_producer_screencast_gdi_t*)p_self;
+ int ret = 0;
+
+ if (!p_gdi) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_gdi);
+
+ p_gdi->b_paused = tsk_false;
+
+ if (p_gdi->b_started) {
+ TSK_DEBUG_INFO("GDI screencast producer already started");
+ goto bail;
+ }
+
+ p_gdi->b_started = tsk_true;
+
+ tsk_thread_create(&p_gdi->tid[0], _tdav_producer_screencast_record_thread, p_gdi);
+#if HIGH_PRIO_BITBLIT
+ if (p_gdi->tid[0]) {
+ tsk_thread_set_priority(p_gdi->tid[0], TSK_THREAD_PRIORITY_TIME_CRITICAL);
+ }
+#endif
+
+bail:
+ if (ret) {
+ p_gdi->b_started = tsk_false;
+ }
+ tsk_safeobj_unlock(p_gdi);
+
+ return ret;
+}
+
+static int _tdav_producer_screencast_gdi_pause(tmedia_producer_t* p_self)
+{
+ tdav_producer_screencast_gdi_t* p_gdi = (tdav_producer_screencast_gdi_t*)p_self;
+
+ if (!p_gdi) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_gdi);
+
+ p_gdi->b_paused = tsk_true;
+ goto bail;
+
+bail:
+ tsk_safeobj_unlock(p_gdi);
+
+ return 0;
+}
+
+static int _tdav_producer_screencast_gdi_stop(tmedia_producer_t* p_self)
+{
+ tdav_producer_screencast_gdi_t* p_gdi = (tdav_producer_screencast_gdi_t*)p_self;
+
+ if (!p_gdi) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_gdi);
+
+ if (!p_gdi->b_started) {
+ TSK_DEBUG_INFO("GDI screencast producer already stopped");
+ goto bail;
+ }
+
+ p_gdi->b_started = tsk_false;
+ p_gdi->b_paused = tsk_false;
+
+ // stop thread
+ if (p_gdi->tid[0]) {
+ tsk_thread_join(&(p_gdi->tid[0]));
+ }
+
+bail:
+ tsk_safeobj_unlock(p_gdi);
+
+ return 0;
+}
+
+static int _tdav_producer_screencast_grab(tdav_producer_screencast_gdi_t* p_self)
+{
+ int ret = 0;
+ HDC hSrcDC = NULL, hMemDC = NULL;
+ HBITMAP hBitmap, hOldBitmap;
+ int nWidth, nHeight;
+ RECT rcSrc;
+
+ if (!p_self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ //--tsk_safeobj_lock(p_self);
+
+ if (!p_self->b_started) {
+ TSK_DEBUG_ERROR("producer not started yet");
+ ret = -2;
+ goto bail;
+ }
+
+ if (!TMEDIA_PRODUCER(p_self)->enc_cb.callback) {
+ goto bail;
+ }
+
+ hSrcDC = GetDC(p_self->hwnd_src);
+ if (!hSrcDC) {
+ TSK_DEBUG_ERROR("GetDC(%x) failed", (int64_t)p_self->hwnd_src);
+ ret = -5;
+ goto bail;
+ }
+ hMemDC = CreateCompatibleDC(hSrcDC);
+ if (!hMemDC) {
+ TSK_DEBUG_ERROR("CreateCompatibleDC(%x) failed", (int64_t)hSrcDC);
+ ret = -6;
+ goto bail;
+ }
+
+ // get points of rectangle to grab
+ if (p_self->hwnd_src) {
+ GetWindowRect(p_self->hwnd_src, &rcSrc);
+ }
+ else {
+ rcSrc.left = rcSrc.top = 0;
+ rcSrc.right = GetDeviceCaps(hSrcDC, HORZRES);
+ rcSrc.bottom = GetDeviceCaps(hSrcDC, VERTRES);
+ }
+
+ nWidth = rcSrc.right - rcSrc.left;
+ nHeight = rcSrc.bottom - rcSrc.top;
+
+ p_self->bitmapInfoSrc.bmiHeader.biWidth = nWidth;
+ p_self->bitmapInfoSrc.bmiHeader.biHeight = nHeight;
+ p_self->bitmapInfoSrc.bmiHeader.biSizeImage = nWidth * nHeight * (p_self->bitmapInfoSrc.bmiHeader.biBitCount >> 3);
+
+ // create a bitmap compatible with the screen DC
+#if TDAV_UNDER_WINDOWS_CE
+ {
+ void* pvBits = NULL;
+ hBitmap = CreateDIBSection(hSrcDC, &p_self->bitmapInfoSrc, DIB_RGB_COLORS, &pvBits, NULL, 0);
+ if (!hBitmap || !pvBits) {
+ TSK_DEBUG_ERROR("Failed to create bitmap(%dx%d)", nWidth, nHeight);
+ goto bail;
+ }
+ p_self->p_buff_src = pvBits;
+ p_self->n_buff_src = p_self->bitmapInfoSrc.bmiHeader.biSizeImage;
+ }
+#else
+ hBitmap = CreateCompatibleBitmap(hSrcDC, nWidth, nHeight);
+ if (!hBitmap) {
+ TSK_DEBUG_ERROR("Failed to create bitmap(%dx%d)", nWidth, nHeight);
+ goto bail;
+ }
+
+ if (p_self->n_buff_src < p_self->bitmapInfoSrc.bmiHeader.biSizeImage) {
+ if (p_self->p_buff_src) VirtualFree(p_self->p_buff_src, 0, MEM_RELEASE);
+ if (!(p_self->p_buff_src = VirtualAlloc(NULL, p_self->bitmapInfoSrc.bmiHeader.biSizeImage, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE))) {
+ p_self->n_buff_src = 0;
+ ret = -3;
+ goto bail;
+ }
+ p_self->n_buff_src = p_self->bitmapInfoSrc.bmiHeader.biSizeImage;
+ }
+#endif /* TDAV_UNDER_WINDOWS_CE */
+
+ // select new bitmap into memory DC
+ hOldBitmap = (HBITMAP) SelectObject(hMemDC, hBitmap);
+
+ // bitblt screen DC to memory DC
+ BitBlt(hMemDC, 0, 0, nWidth, nHeight, hSrcDC, 0, 0, SRCCOPY);
+
+ // select old bitmap back into memory DC and get handle to
+ // bitmap of the screen
+ hBitmap = (HBITMAP) SelectObject(hMemDC, hOldBitmap);
+
+ // Copy the bitmap data into the provided BYTE buffer
+#if TDAV_UNDER_WINDOWS_CE
+ // memory already retrieved using "CreateDIBSection"
+#else
+ GetDIBits(hSrcDC, hBitmap, 0, nHeight, p_self->p_buff_src, &p_self->bitmapInfoSrc, DIB_RGB_COLORS);
+#endif
+
+ // resize
+ ResizeRGB(&p_self->bitmapInfoSrc.bmiHeader,
+ (const unsigned char *) p_self->p_buff_src,
+ &p_self->bitmapInfoNeg.bmiHeader,
+ (unsigned char *) p_self->p_buff_neg,
+ p_self->bitmapInfoNeg.bmiHeader.biWidth,
+ p_self->bitmapInfoNeg.bmiHeader.biHeight);
+
+ // preview
+ if (p_self->hwnd_preview) {
+ HDC hDC = GetDC(p_self->hwnd_preview);
+ if (hDC) {
+ RECT rcPreview = {0};
+ if (GetWindowRect(p_self->hwnd_preview, &rcPreview)) {
+ LONG nPreviewWidth = (rcPreview.right - rcPreview.left);
+ LONG nPreviewHeight = (rcPreview.bottom - rcPreview.top);
+
+ SetStretchBltMode(hDC, COLORONCOLOR);
+#if 0 // preview(neg)
+ StretchDIBits(
+ hDC,
+ 0, 0, nPreviewWidth, nPreviewHeight,
+ 0, 0, p_self->bitmapInfoNeg.bmiHeader.biWidth, p_self->bitmapInfoNeg.bmiHeader.biHeight,
+ p_self->p_buff_neg,
+ &p_self->bitmapInfoNeg,
+ DIB_RGB_COLORS,
+ SRCCOPY);
+#else // preview(src)
+ StretchDIBits(
+ hDC,
+ 0, 0, nPreviewWidth, nPreviewHeight,
+ 0, 0, p_self->bitmapInfoSrc.bmiHeader.biWidth, p_self->bitmapInfoSrc.bmiHeader.biHeight,
+ p_self->p_buff_src,
+ &p_self->bitmapInfoSrc,
+ DIB_RGB_COLORS,
+ SRCCOPY);
+#endif
+ }
+ ReleaseDC(p_self->hwnd_preview, hDC);
+ }
+ }
+
+ // encode and send data
+ TMEDIA_PRODUCER(p_self)->enc_cb.callback(TMEDIA_PRODUCER(p_self)->enc_cb.callback_data, p_self->p_buff_neg, p_self->bitmapInfoNeg.bmiHeader.biSizeImage);
+
+bail:
+ //--tsk_safeobj_unlock(p_self);
+
+ if (hSrcDC) {
+ ReleaseDC(p_self->hwnd_src, hSrcDC);
+ }
+ if (hMemDC) {
+ DeleteDC(hMemDC);
+ }
+
+ if (hBitmap) {
+ DeleteObject(hBitmap);
+ if (!bitmapBuffSrcOwnMemory) {
+ p_self->p_buff_src = NULL;
+ p_self->n_buff_src = 0;
+ }
+ }
+
+ return ret;
+}
+
+static void* TSK_STDCALL _tdav_producer_screencast_record_thread(void *arg)
+{
+ tdav_producer_screencast_gdi_t* p_gdi = (tdav_producer_screencast_gdi_t*)arg;
+ int ret = 0;
+
+ // FPS manager
+ uint64_t TimeNow, TimeLastFrame = 0;
+ const uint64_t TimeFrameDuration = (1000 / TMEDIA_PRODUCER(p_gdi)->video.fps);
+
+ TSK_DEBUG_INFO("_tdav_producer_screencast_record_thread -- START");
+
+ while (ret == 0 && p_gdi->b_started) {
+ TimeNow = tsk_time_now();
+ if ((TimeNow - TimeLastFrame) >= TimeFrameDuration) {
+ if (!p_gdi->b_muted && !p_gdi->b_paused) {
+ if (ret = _tdav_producer_screencast_grab(p_gdi)) {
+ goto next;
+ }
+ }
+ TimeLastFrame = TimeNow;
+ }
+ else {
+ tsk_thread_sleep(1);
+#if 0
+ TSK_DEBUG_INFO("[GDI screencast] Skip frame");
+#endif
+ }
+ next:
+ ;
+ }
+ TSK_DEBUG_INFO("_tdav_producer_screencast_record_thread -- STOP");
+ return tsk_null;
+}
+
+//
+// GDI screencast producer object definition
+//
+/* constructor */
+static tsk_object_t* _tdav_producer_screencast_gdi_ctor(tsk_object_t *self, va_list * app)
+{
+ tdav_producer_screencast_gdi_t *p_gdi = (tdav_producer_screencast_gdi_t *)self;
+ if (p_gdi) {
+ /* init base */
+ tmedia_producer_init(TMEDIA_PRODUCER(p_gdi));
+ TMEDIA_PRODUCER(p_gdi)->video.chroma = tmedia_chroma_bgr24; // RGB24 on x86 (little endians) stored as BGR24
+ /* init self with default values*/
+ TMEDIA_PRODUCER(p_gdi)->video.fps = 15;
+ TMEDIA_PRODUCER(p_gdi)->video.width = 352;
+ TMEDIA_PRODUCER(p_gdi)->video.height = 288;
+
+ tsk_safeobj_init(p_gdi);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* _tdav_producer_screencast_gdi_dtor(tsk_object_t * self)
+{
+ tdav_producer_screencast_gdi_t *p_gdi = (tdav_producer_screencast_gdi_t *)self;
+ if (p_gdi) {
+ /* stop */
+ if (p_gdi->b_started) {
+ _tdav_producer_screencast_gdi_stop((tmedia_producer_t*)p_gdi);
+ }
+
+ /* deinit base */
+ tmedia_producer_deinit(TMEDIA_PRODUCER(p_gdi));
+ /* deinit self */
+ if (p_gdi->p_buff_neg) {
+ VirtualFree(p_gdi->p_buff_neg, 0, MEM_RELEASE);
+ p_gdi->p_buff_neg = NULL;
+ }
+ if (p_gdi->p_buff_src) {
+ if (bitmapBuffSrcOwnMemory) {
+ VirtualFree(p_gdi->p_buff_src, 0, MEM_RELEASE);
+ }
+ p_gdi->p_buff_src = NULL;
+ }
+ tsk_safeobj_deinit(p_gdi);
+
+ TSK_DEBUG_INFO("*** GDI Screencast producer destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_screencast_gdi_def_s =
+{
+ sizeof(tdav_producer_screencast_gdi_t),
+ _tdav_producer_screencast_gdi_ctor,
+ _tdav_producer_screencast_gdi_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_screencast_gdi_plugin_def_s =
+{
+ &tdav_producer_screencast_gdi_def_s,
+ tmedia_bfcp_video,
+ "Microsoft GDI screencast producer",
+
+ _tdav_producer_screencast_gdi_set,
+ _tdav_producer_screencast_gdi_prepare,
+ _tdav_producer_screencast_gdi_start,
+ _tdav_producer_screencast_gdi_pause,
+ _tdav_producer_screencast_gdi_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_screencast_gdi_plugin_def_t = &tdav_producer_screencast_gdi_plugin_def_s;
+
+#endif /* TDAV_UNDER_WINDOWS && !TDAV_UNDER_WINDOWS_RT */
diff --git a/tinyDAV/src/video/jb/tdav_video_frame.c b/tinyDAV/src/video/jb/tdav_video_frame.c
new file mode 100644
index 0000000..fc7cbc3
--- /dev/null
+++ b/tinyDAV/src/video/jb/tdav_video_frame.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2012 Doubango Telecom <http://www.doubango.org>
+ *
+ * Contact: Mamadou Diop <diopmamadou(at)doubango(DOT)org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+
+/**@file tdav_video_frame.c
+ * @brief Video Frame
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango(DOT)org>
+ */
+#include "tinydav/video/jb/tdav_video_frame.h"
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <string.h>
+#include <stdlib.h>
+
+static tsk_object_t* tdav_video_frame_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_video_frame_t *frame = self;
+ if(frame){
+ if(!(frame->pkts = tsk_list_create())){
+ TSK_DEBUG_ERROR("Faile to list");
+ return tsk_null;
+ }
+ tsk_safeobj_init(frame);
+ }
+ return self;
+}
+static tsk_object_t* tdav_video_frame_dtor(tsk_object_t * self)
+{
+ tdav_video_frame_t *frame = self;
+ if(frame){
+ TSK_OBJECT_SAFE_FREE(frame->pkts);
+
+ tsk_safeobj_deinit(frame);
+ }
+
+ return self;
+}
+static int tdav_video_frame_cmp(const tsk_object_t *_p1, const tsk_object_t *_p2)
+{
+ const tdav_video_frame_t *p1 = _p1;
+ const tdav_video_frame_t *p2 = _p2;
+
+ if(p1 && p2){
+ return (int)(p1->timestamp - p2->timestamp);
+ }
+ else if(!p1 && !p2) return 0;
+ else return -1;
+}
+static const tsk_object_def_t tdav_video_frame_def_s =
+{
+ sizeof(tdav_video_frame_t),
+ tdav_video_frame_ctor,
+ tdav_video_frame_dtor,
+ tdav_video_frame_cmp,
+};
+const tsk_object_def_t *tdav_video_frame_def_t = &tdav_video_frame_def_s;
+
+
+tdav_video_frame_t* tdav_video_frame_create(trtp_rtp_packet_t* rtp_pkt)
+{
+ tdav_video_frame_t* frame;
+ if(!rtp_pkt || !rtp_pkt->header){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+
+ if((frame = tsk_object_new(tdav_video_frame_def_t))){
+ rtp_pkt = tsk_object_ref(rtp_pkt);
+ frame->payload_type = rtp_pkt->header->payload_type;
+ frame->timestamp = rtp_pkt->header->timestamp;
+ frame->highest_seq_num = rtp_pkt->header->seq_num;
+ frame->ssrc = rtp_pkt->header->ssrc;
+ tsk_list_push_ascending_data(frame->pkts, (void**)&rtp_pkt);
+ }
+ return frame;
+}
+
+int tdav_video_frame_put(tdav_video_frame_t* self, trtp_rtp_packet_t* rtp_pkt)
+{
+ if(!self || !rtp_pkt || !rtp_pkt->header){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(self->timestamp != rtp_pkt->header->timestamp){
+ TSK_DEBUG_ERROR("Timestamp mismatch");
+ return -2;
+ }
+ if(self->payload_type != rtp_pkt->header->payload_type){
+ TSK_DEBUG_ERROR("Payload Type mismatch");
+ return -2;
+ }
+#if 0
+ if(self->ssrc != rtp_pkt->header->ssrc){
+ TSK_DEBUG_ERROR("SSRC mismatch");
+ return -2;
+ }
+#endif
+
+ rtp_pkt = tsk_object_ref(rtp_pkt);
+ self->highest_seq_num = TSK_MAX(self->highest_seq_num, rtp_pkt->header->seq_num);
+ tsk_list_lock(self->pkts);
+ if (tdav_video_frame_find_by_seq_num(self, rtp_pkt->header->seq_num)) {
+ TSK_DEBUG_INFO("JB: Packet with seq_num=%hu duplicated", rtp_pkt->header->seq_num);
+ }
+ else {
+ tsk_list_push_ascending_data(self->pkts, (void**)&rtp_pkt);
+ }
+ tsk_list_unlock(self->pkts);
+
+ return 0;
+}
+
+const trtp_rtp_packet_t* tdav_video_frame_find_by_seq_num(const tdav_video_frame_t* self, uint16_t seq_num)
+{
+ const tsk_list_item_t *item;
+ const trtp_rtp_packet_t* pkt;
+ const trtp_rtp_packet_t* ret;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+
+ ret = tsk_null;
+
+ tsk_list_lock(self->pkts);
+ tsk_list_foreach(item, self->pkts){
+ if(!(pkt = item->data) || !pkt->header){
+ continue;
+ }
+ if(pkt->header->seq_num == seq_num){
+ ret = pkt;
+ break;
+ }
+ }
+ tsk_list_unlock(self->pkts);
+
+ return ret;
+}
+
+// @buffer_ptr pointer to the destination buffer
+// @buffer_size the actual buffer size. Could be enlarged if too small to fit
+// @retval number of copied bytes
+tsk_size_t tdav_video_frame_write(struct tdav_video_frame_s* self, void** buffer_ptr, tsk_size_t* buffer_size)
+{
+ const tsk_list_item_t *item;
+ const trtp_rtp_packet_t* pkt;
+ tsk_size_t ret_size = 0;
+ int32_t last_seq_num = -1; // guard against duplicated packets
+
+ if(!self || !buffer_ptr || !buffer_size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ tsk_list_lock(self->pkts);
+ tsk_list_foreach(item, self->pkts){
+ if(!(pkt = item->data) || !pkt->payload.size || !pkt->header || pkt->header->seq_num == last_seq_num){
+ continue;
+ }
+ if((ret_size + pkt->payload.size) > *buffer_size){
+ if(!(*buffer_ptr = tsk_realloc(*buffer_ptr, (ret_size + pkt->payload.size)))){
+ TSK_DEBUG_ERROR("Failed to resize the buffer");
+ *buffer_size = 0;
+ goto bail;
+ }
+ *buffer_size = (ret_size + pkt->payload.size);
+ }
+ memcpy(&((uint8_t*)*buffer_ptr)[ret_size], (pkt->payload.data ? pkt->payload.data : pkt->payload.data_const), pkt->payload.size);
+ ret_size += pkt->payload.size;
+ last_seq_num = pkt->header->seq_num;
+ }
+
+bail:
+ tsk_list_unlock(self->pkts);
+
+ return ret_size;
+}
+
+
+/**
+ Checks if the frame is complete (no gap/loss) or not.
+ IMPORTANT: This function assume that the RTP packets use the marker bit to signal end of sequences.
+ *@param self The frame with all rtp packets to check
+ *@param last_seq_num_with_mark The last seq num value of the packet with the mark bit set. Use negative value to ignore.
+ *@param missing_seq_num A missing seq num if any. This value is set only if the function returns False.
+ *@return True if the frame is complete and False otherwise. If False is returned then, missing_seq_num is set.
+ */
+tsk_bool_t tdav_video_frame_is_complete(const tdav_video_frame_t* self, int32_t last_seq_num_with_mark, int32_t* missing_seq_num_start, int32_t* missing_seq_num_count)
+{
+ const trtp_rtp_packet_t* pkt;
+ const tsk_list_item_t *item;
+ uint16_t i;
+ tsk_bool_t is_complete = tsk_false;
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_false;
+ }
+
+ i = 0;
+ tsk_list_lock(self->pkts);
+ tsk_list_foreach (item, self->pkts) {
+ if (!(pkt = item->data)){
+ continue;
+ }
+ if (last_seq_num_with_mark >= 0 && pkt->header->seq_num != (last_seq_num_with_mark + ++i)) {
+ if (missing_seq_num_start) *missing_seq_num_start = (last_seq_num_with_mark + i);
+ if (missing_seq_num_count) *missing_seq_num_count = pkt->header->seq_num - (*missing_seq_num_start);
+ break;
+ }
+ if (item == self->pkts->tail) {
+ if(!(is_complete = (pkt->header->marker))){
+ if (missing_seq_num_start) *missing_seq_num_start = (pkt->header->seq_num + 1);
+ if (missing_seq_num_count) *missing_seq_num_count = 1;
+ }
+ }
+ }
+ tsk_list_unlock(self->pkts);
+
+ return is_complete;
+}
diff --git a/tinyDAV/src/video/jb/tdav_video_jb.c b/tinyDAV/src/video/jb/tdav_video_jb.c
new file mode 100644
index 0000000..4540fcf
--- /dev/null
+++ b/tinyDAV/src/video/jb/tdav_video_jb.c
@@ -0,0 +1,573 @@
+/*
+ * Copyright (C) 2011-2015 Mamadou DIOP
+ * Copyright (C) 2011-2015 Doubango Telecom <http://www.doubango.org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+
+/**@file tdav_video_jb.c
+ * @brief Video Jitter Buffer
+ */
+#include "tinydav/video/jb/tdav_video_jb.h"
+#include "tinydav/video/jb/tdav_video_frame.h"
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tsk_time.h"
+#include "tsk_memory.h"
+#include "tsk_thread.h"
+#include "tsk_condwait.h"
+#include "tsk_debug.h"
+
+#if TSK_UNDER_WINDOWS
+# include <windows.h>
+#endif
+
+#define TDAV_VIDEO_JB_DISABLE 0
+
+// default frame rate
+// the corret fps will be computed using the RTP timestamps
+#define TDAV_VIDEO_JB_FPS TDAV_VIDEO_JB_FPS_MAX
+#define TDAV_VIDEO_JB_FPS_MIN 10
+#define TDAV_VIDEO_JB_FPS_MAX 120
+// Number of correct consecutive video frames to receive before computing the FPS
+#define TDAV_VIDEO_JB_FPS_PROB (TDAV_VIDEO_JB_FPS << 1)
+// Maximum gap allowed (used to detect seqnum wrpping)
+#define TDAV_VIDEO_JB_MAX_DROPOUT 0xFD9B
+
+#define TDAV_VIDEO_JB_TAIL_MAX_LOG2 1
+#if TDAV_UNDER_MOBILE /* to avoid too high memory usage */
+# define TDAV_VIDEO_JB_TAIL_MAX (TDAV_VIDEO_JB_FPS_MIN << TDAV_VIDEO_JB_TAIL_MAX_LOG2)
+#else
+# define TDAV_VIDEO_JB_TAIL_MAX (TDAV_VIDEO_JB_FPS_MAX << TDAV_VIDEO_JB_TAIL_MAX_LOG2)
+#endif
+
+#define TDAV_VIDEO_JB_RATE 90 /* KHz */
+
+#define TDAV_VIDEO_JB_LATENCY_MIN 2 /* Must be > 0 */
+#define TDAV_VIDEO_JB_LATENCY_MAX 15 /* Default, will be updated using fps */
+
+static int _tdav_video_jb_set_defaults(struct tdav_video_jb_s* self);
+static const tdav_video_frame_t* _tdav_video_jb_get_frame(struct tdav_video_jb_s* self, uint32_t timestamp, uint8_t pt, tsk_bool_t *pt_matched);
+static void* TSK_STDCALL _tdav_video_jb_decode_thread_func(void *arg);
+
+typedef struct tdav_video_jb_s
+{
+ TSK_DECLARE_OBJECT;
+
+ tsk_bool_t started;
+ int32_t fps;
+ int32_t fps_prob;
+ int32_t avg_duration;
+ int32_t rate; // in Khz
+ uint32_t last_timestamp;
+ int32_t conseq_frame_drop;
+ int32_t tail_max;
+ tdav_video_frames_L_t *frames;
+ int64_t frames_count;
+
+ tsk_size_t latency_min;
+ tsk_size_t latency_max;
+
+ uint32_t decode_last_timestamp;
+ int32_t decode_last_seq_num_with_mark; // -1 = unset
+ uint64_t decode_last_time;
+ tsk_thread_handle_t* decode_thread[1];
+ tsk_condwait_handle_t* decode_thread_cond;
+
+ uint16_t seq_nums[0xFF];
+ tdav_video_jb_cb_f callback;
+ const void* callback_data;
+
+ // to avoid locking use different cb_data
+ tdav_video_jb_cb_data_xt cb_data_rtp;
+ tdav_video_jb_cb_data_xt cb_data_fdd;
+ tdav_video_jb_cb_data_xt cb_data_any;
+
+ struct{
+ void* ptr;
+ tsk_size_t size;
+ } buffer;
+
+ TSK_DECLARE_SAFEOBJ;
+}
+tdav_video_jb_t;
+
+
+static tsk_object_t* tdav_video_jb_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_video_jb_t *jb = self;
+ if(jb){
+ if(!(jb->frames = tsk_list_create())){
+ TSK_DEBUG_ERROR("Failed to create list");
+ return tsk_null;
+ }
+ if(!(jb->decode_thread_cond = tsk_condwait_create())){
+ TSK_DEBUG_ERROR("Failed to create condition var");
+ return tsk_null;
+ }
+ jb->cb_data_fdd.type = tdav_video_jb_cb_data_type_fdd;
+ jb->cb_data_rtp.type = tdav_video_jb_cb_data_type_rtp;
+
+ tsk_safeobj_init(jb);
+ }
+ return self;
+}
+static tsk_object_t* tdav_video_jb_dtor(tsk_object_t * self)
+{
+ tdav_video_jb_t *jb = self;
+ if(jb){
+ if(jb->started){
+ tdav_video_jb_stop(jb);
+ }
+ TSK_OBJECT_SAFE_FREE(jb->frames);
+ if(jb->decode_thread_cond){
+ tsk_condwait_destroy(&jb->decode_thread_cond);
+ }
+ TSK_SAFE_FREE(jb->buffer.ptr);
+ tsk_safeobj_deinit(jb);
+ }
+
+ return self;
+}
+static const tsk_object_def_t tdav_video_jb_def_s =
+{
+ sizeof(tdav_video_jb_t),
+ tdav_video_jb_ctor,
+ tdav_video_jb_dtor,
+ tsk_null,
+};
+
+tdav_video_jb_t* tdav_video_jb_create()
+{
+ tdav_video_jb_t* jb;
+
+ if ((jb = tsk_object_new(&tdav_video_jb_def_s))) {
+ if (_tdav_video_jb_set_defaults(jb) != 0) {
+ TSK_OBJECT_SAFE_FREE(jb);
+ }
+ }
+ return jb;
+}
+
+#define tdav_video_jb_reset_fps_prob(self) {\
+(self)->fps_prob = TDAV_VIDEO_JB_FPS_PROB; \
+(self)->last_timestamp = 0; \
+(self)->avg_duration = 0; \
+}
+#define tdav_video_jb_reset_tail_min_prob(self) {\
+(self)->tail_prob = TDAV_VIDEO_JB_TAIL_MIN_PROB; \
+(self)->tail_min = TDAV_VIDEO_JB_TAIL_MIN_MAX; \
+}
+
+int tdav_video_jb_set_callback(tdav_video_jb_t* self, tdav_video_jb_cb_f callback, const void* usr_data)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ self->callback = callback;
+ self->cb_data_any.usr_data = usr_data;
+ self->cb_data_fdd.usr_data = usr_data;
+ self->cb_data_rtp.usr_data = usr_data;
+ return 0;
+}
+
+int tdav_video_jb_start(tdav_video_jb_t* self)
+{
+ int ret = 0;
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(self->started){
+ return 0;
+ }
+
+ self->started = tsk_true;
+
+ if(!self->decode_thread[0]){
+ ret = tsk_thread_create(&self->decode_thread[0], _tdav_video_jb_decode_thread_func, self);
+ if(ret != 0 || !self->decode_thread[0]){
+ TSK_DEBUG_ERROR("Failed to create new thread");
+ }
+ ret = tsk_thread_set_priority(self->decode_thread[0], TSK_THREAD_PRIORITY_TIME_CRITICAL);
+ }
+
+ return ret;
+}
+
+int tdav_video_jb_put(tdav_video_jb_t* self, trtp_rtp_packet_t* rtp_pkt)
+{
+#if TDAV_VIDEO_JB_DISABLE
+ self->cb_data_rtp.rtp.pkt = rtp_pkt;
+ self->callback(&self->cb_data_rtp);
+#else
+ const tdav_video_frame_t* old_frame;
+ tsk_bool_t pt_matched = tsk_false, is_frame_late_or_dup = tsk_false, is_restarted = tsk_false;
+ uint16_t* seq_num;
+
+ if(!self || !rtp_pkt || !rtp_pkt->header){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!self->started){
+ TSK_DEBUG_INFO("Video jitter buffer not started");
+ return 0;
+ }
+
+ seq_num = &self->seq_nums[rtp_pkt->header->payload_type];
+
+ tsk_safeobj_lock(self);
+
+ //TSK_DEBUG_INFO("receive seqnum=%u", rtp_pkt->header->seq_num);
+
+ if(self->decode_last_timestamp && (self->decode_last_timestamp > rtp_pkt->header->timestamp)){
+ if((self->decode_last_timestamp - rtp_pkt->header->timestamp) < TDAV_VIDEO_JB_MAX_DROPOUT){
+ TSK_DEBUG_INFO("--------Frame already Decoded [seqnum=%u]------------", rtp_pkt->header->seq_num);
+ tsk_safeobj_unlock(self);
+ return 0;
+ }
+ }
+
+ old_frame = _tdav_video_jb_get_frame(self, rtp_pkt->header->timestamp, rtp_pkt->header->payload_type, &pt_matched);
+
+ if((*seq_num && *seq_num != 0xFFFF) && (*seq_num + 1) != rtp_pkt->header->seq_num){
+ int32_t diff = ((int32_t)rtp_pkt->header->seq_num - (int32_t)*seq_num);
+ tsk_bool_t is_frame_loss = (diff > 0);
+ is_restarted = (TSK_ABS(diff) > TDAV_VIDEO_JB_MAX_DROPOUT);
+ is_frame_late_or_dup = !is_frame_loss;
+ tdav_video_jb_reset_fps_prob(self);
+ TSK_DEBUG_INFO("Packet %s (from JB) [%hu - %hu]", is_frame_loss ? "loss" : "late/duplicated/nack", *seq_num, rtp_pkt->header->seq_num);
+
+ if(is_frame_loss && !is_restarted){
+ if(self->callback){
+ self->cb_data_any.type = tdav_video_jb_cb_data_type_fl;
+ self->cb_data_any.ssrc = rtp_pkt->header->ssrc;
+ self->cb_data_any.fl.seq_num = (*seq_num + 1);
+ self->cb_data_any.fl.count = diff - 1;
+ self->callback(&self->cb_data_any);
+ }
+ }
+ }
+
+ if(!old_frame){
+ tdav_video_frame_t* new_frame;
+ if(pt_matched){
+ // if we have a frame with the same payload type but without this timestamp this means that we moved to a new frame
+ // this happens if the frame is waiting to be decoded or the marker is lost
+ }
+ if((new_frame = tdav_video_frame_create(rtp_pkt))){
+ // compute avg frame duration
+ if(self->last_timestamp && self->last_timestamp < rtp_pkt->header->timestamp){
+ uint32_t duration = (rtp_pkt->header->timestamp - self->last_timestamp)/self->rate;
+ self->avg_duration = self->avg_duration ? ((self->avg_duration + duration) >> 1) : duration;
+ --self->fps_prob;
+ }
+ self->last_timestamp = rtp_pkt->header->timestamp;
+
+ tsk_list_lock(self->frames);
+ if(self->frames_count >= self->tail_max){
+ if(++self->conseq_frame_drop >= self->tail_max){
+ TSK_DEBUG_ERROR("Too many frames dropped and fps=%d", self->fps);
+ tsk_list_clear_items(self->frames);
+ self->conseq_frame_drop = 0;
+ self->frames_count = 1;
+ if(self->callback){
+ self->cb_data_any.type = tdav_video_jb_cb_data_type_tmfr;
+ self->cb_data_any.ssrc = rtp_pkt->header->ssrc;
+ self->callback(&self->cb_data_any);
+ }
+ }
+ else{
+ TSK_DEBUG_INFO("Dropping video frame because frames_count(%lld)>=tail_max(%d)", self->frames_count, self->tail_max);
+ tsk_list_remove_first_item(self->frames);
+ }
+ tdav_video_jb_reset_fps_prob(self);
+ }
+ else{
+ ++self->frames_count;
+ }
+ tsk_list_push_ascending_data(self->frames, (void**)&new_frame);
+ tsk_list_unlock(self->frames);
+ }
+ if(self->fps_prob <= 0 && self->avg_duration){
+ // compute FPS using timestamp values
+ int32_t fps_new = (1000 / self->avg_duration);
+ int32_t fps_old = self->fps;
+ self->fps = TSK_CLAMP(TDAV_VIDEO_JB_FPS_MIN, fps_new, TDAV_VIDEO_JB_FPS_MAX);
+ self->tail_max = (self->fps << TDAV_VIDEO_JB_TAIL_MAX_LOG2); // maximum delay = 2 seconds
+ self->latency_max = self->fps; // maximum = 1 second
+ TSK_DEBUG_INFO("According to rtp-timestamps ...FPS = %d (clipped to %d) tail_max=%d, latency_max=%u", fps_new, self->fps, self->tail_max, (unsigned)self->latency_max);
+ tdav_video_jb_reset_fps_prob(self);
+ if(self->callback && (fps_old != self->fps)){
+ self->cb_data_any.type = tdav_video_jb_cb_data_type_fps_changed;
+ self->cb_data_any.ssrc = rtp_pkt->header->ssrc;
+ self->cb_data_any.fps.new = self->fps; // clipped value
+ self->cb_data_any.fps.old = fps_old;
+ self->callback(&self->cb_data_any);
+ }
+ }
+ }
+ else{
+ tdav_video_frame_put((tdav_video_frame_t*)old_frame, rtp_pkt);
+ }
+
+ tsk_safeobj_unlock(self);
+
+ if(!is_frame_late_or_dup || is_restarted){
+ *seq_num = rtp_pkt->header->seq_num;
+ }
+#endif
+
+ return 0;
+}
+
+int tdav_video_jb_stop(tdav_video_jb_t* self)
+{
+ int ret;
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(!self->started){
+ return 0;
+ }
+
+ TSK_DEBUG_INFO("tdav_video_jb_stop()");
+
+ self->started = tsk_false;
+
+ ret = tsk_condwait_broadcast(self->decode_thread_cond);
+
+ if (self->decode_thread[0]) {
+ ret = tsk_thread_join(&self->decode_thread[0]);
+ }
+
+ // clear pending frames
+ tsk_list_lock(self->frames);
+ tsk_list_clear_items(self->frames);
+ self->frames_count = 0;
+ tsk_list_unlock(self->frames);
+
+ // reset default values to make sure next start will be called with right defaults
+ // do not call this function in start to avoid overriding values defined between prepare() and start()
+ _tdav_video_jb_set_defaults(self);
+
+ return ret;
+}
+
+static int _tdav_video_jb_set_defaults(struct tdav_video_jb_s* self)
+{
+ if (!self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ self->fps = TDAV_VIDEO_JB_FPS;
+ self->fps_prob = TDAV_VIDEO_JB_FPS_PROB;
+ self->tail_max = TDAV_VIDEO_JB_TAIL_MAX;
+ self->avg_duration = 0;
+ self->rate = TDAV_VIDEO_JB_RATE;
+ self->conseq_frame_drop = 0;
+ self->frames_count = 0;
+ self->decode_last_timestamp = 0;
+ self->decode_last_seq_num_with_mark = -1;
+ self->decode_last_time = 0;
+
+ self->latency_min = TDAV_VIDEO_JB_LATENCY_MIN;
+ self->latency_max = TDAV_VIDEO_JB_LATENCY_MAX;
+
+ return 0;
+}
+
+static const tdav_video_frame_t* _tdav_video_jb_get_frame(tdav_video_jb_t* self, uint32_t timestamp, uint8_t pt, tsk_bool_t *pt_matched)
+{
+ const tdav_video_frame_t* ret = tsk_null;
+ const tsk_list_item_t *item;
+
+ *pt_matched =tsk_false;
+
+ tsk_list_lock(self->frames);
+ tsk_list_foreach(item, self->frames){
+ if(TDAV_VIDEO_FRAME(item->data)->payload_type == pt){
+ if(!(*pt_matched)) *pt_matched = tsk_true;
+ if(TDAV_VIDEO_FRAME(item->data)->timestamp == timestamp){
+ ret = item->data;
+ break;
+ }
+ }
+
+ }
+ tsk_list_unlock(self->frames);
+
+ return ret;
+}
+
+static void* TSK_STDCALL _tdav_video_jb_decode_thread_func(void *arg)
+{
+ tdav_video_jb_t* jb = (tdav_video_jb_t*)arg;
+ //uint64_t delay;
+ int32_t missing_seq_num_start = 0, prev_missing_seq_num_start = 0;
+ int32_t missing_seq_num_count = 0, prev_lasted_missing_seq_num_count = 0;
+ const tdav_video_frame_t* frame;
+ tsk_list_item_t* item;
+ uint64_t next_decode_duration = 0, now, _now, latency = 0;
+ //uint64_t x_decode_duration = (1000 / jb->fps); // expected
+ //uint64_t x_decode_time = tsk_time_now();//expected
+ tsk_bool_t postpone, cleaning_delay = tsk_false;
+#if 0
+ static const uint64_t __toomuch_delay_to_be_valid = 10000; // guard against systems with buggy "tsk_time_now()" -Won't say Windows ...but :)-
+#endif
+
+ jb->decode_last_seq_num_with_mark = -1; // -1 -> unset
+ jb->decode_last_time = tsk_time_now();
+
+ (void)(now);
+ //(void)(delay);
+
+ TSK_DEBUG_INFO("Video jitter buffer thread - ENTER");
+
+ while(jb->started){
+ now = tsk_time_now();
+ if (next_decode_duration > 0) {
+ tsk_condwait_timedwait(jb->decode_thread_cond, next_decode_duration);
+ }
+
+ if(!jb->started){
+ break;
+ }
+
+ // TSK_DEBUG_INFO("Frames count = %d", jb->frames_count);
+
+ // the second condition (jb->frames_count > 0 && latency >= jb->latency_max) is required to make sure we'll process the pending pkts even if the remote party stops sending frames. GE issue: device stops sending frames when it enters in "frame freeze" mode which means #"latency_min" frames won't be displayed.
+ if (jb->frames_count >= (int64_t)jb->latency_min || (jb->frames_count > 0 && latency >= jb->latency_max)) {
+ item = tsk_null;
+ postpone = tsk_false;
+ latency = 0;
+
+ tsk_safeobj_lock(jb); // against get_frame()
+ tsk_list_lock(jb->frames); // against put()
+
+ // is it still acceptable to wait for missing packets?
+ if (jb->frames_count < (int64_t)jb->latency_max) {
+ frame = (const tdav_video_frame_t*)jb->frames->head->data;
+ if (!tdav_video_frame_is_complete(frame, jb->decode_last_seq_num_with_mark, &missing_seq_num_start, &missing_seq_num_count)) {
+ TSK_DEBUG_INFO("Time to decode frame...but some RTP packets are missing (missing_seq_num_start=%d, missing_seq_num_count=%d, last_seq_num_with_mark=%d). Postpone :(", missing_seq_num_start, missing_seq_num_count, jb->decode_last_seq_num_with_mark);
+ // signal to the session that a sequence number is missing (will send a NACK)
+ // the missing seqnum has been already requested in jb_put() and here we request it again only ONE time
+ if (jb->callback && frame) {
+ if(prev_missing_seq_num_start != missing_seq_num_start || prev_lasted_missing_seq_num_count != missing_seq_num_count){ // guard to request it only once
+ jb->cb_data_any.type = tdav_video_jb_cb_data_type_fl;
+ jb->cb_data_any.ssrc = frame->ssrc;
+ jb->cb_data_any.fl.seq_num = prev_missing_seq_num_start = missing_seq_num_start;
+ jb->cb_data_any.fl.count = prev_lasted_missing_seq_num_count = missing_seq_num_count;
+ jb->callback(&jb->cb_data_any);
+ }
+ postpone = tsk_true;
+ }
+ }
+ }
+ else {
+ TSK_DEBUG_INFO("frames_count(%lld)>=latency_max(%u)...decoding video frame even if pkts are missing :(", jb->frames_count, (unsigned)jb->latency_max);
+ jb->decode_last_seq_num_with_mark = -1; // unset()
+ // postpone is equal to "tsk_false" which means the pending frame will be displayed in all cases
+ }
+ if (!postpone) {
+ item = tsk_list_pop_first_item(jb->frames);
+ --jb->frames_count;
+ }
+ tsk_list_unlock(jb->frames);
+ tsk_safeobj_unlock(jb);
+
+ if (item) {
+ jb->decode_last_timestamp = ((const tdav_video_frame_t*)item->data)->timestamp;
+ if(jb->callback){
+ trtp_rtp_packet_t* pkt;
+ const tsk_list_item_t* _item = item; // save memory address as "tsk_list_foreach() will change it for each loop"
+ int32_t last_seq_num = -1; // guard against duplicated packets
+ frame = _item->data;
+ tsk_list_foreach(_item, frame->pkts){
+ if(!(pkt = _item->data) || !pkt->payload.size || !pkt->header || pkt->header->seq_num == last_seq_num || !jb->started){
+ TSK_DEBUG_ERROR("Skipping invalid rtp packet (do not decode!)");
+ continue;
+ }
+ jb->cb_data_rtp.rtp.pkt = pkt;
+ jb->callback(&jb->cb_data_rtp);
+ if(pkt->header->marker){
+ jb->decode_last_seq_num_with_mark = pkt->header->seq_num;
+ }
+ }
+ }
+
+ TSK_OBJECT_SAFE_FREE(item);
+ }
+ }
+ else {
+ if (jb->frames_count > 0) { // there are pending frames but we cannot display them yet -> increase latency
+ latency++;
+ }
+ }
+
+#if 1
+ if (cleaning_delay || jb->frames_count > (int64_t)jb->latency_max){
+ //x_decode_time = now;
+ next_decode_duration = 0;
+ cleaning_delay = ((jb->frames_count << 1) > (int64_t)jb->latency_max); // cleanup up2 half
+ }
+ else{
+ next_decode_duration = (1000 / jb->fps);
+ _now = tsk_time_now();
+ if (_now > now) {
+ if ((_now - now) > next_decode_duration){
+ next_decode_duration = 0;
+ }
+ else {
+ next_decode_duration -= (_now - now);
+ }
+ }
+ //delay = ( (now > x_decode_time) ? (now - x_decode_time) : (x_decode_duration >> 1)/* do not use zero to avoid endless loop when there is no frame to display */ );
+ //next_decode_duration = (delay > x_decode_duration) ? 0 : (x_decode_duration - delay);
+ //x_decode_duration = (1000 / jb->fps);
+ //x_decode_time += x_decode_duration;
+ }
+ //delay = /*(now - x_decode_time);*/(now > x_decode_time) ? (now - x_decode_time) : ( (jb->frames_count >= jb->latency_max) ? 0 : (x_decode_duration >> 1) )/* do not use zero to avoid endless loop when there is no frame to display */;
+ // delay = (jb->frames_count > jb->latency_max) ? 0 : ( (now > x_decode_time) ? (now - x_decode_time) : (x_decode_duration >> 1)/* do not use zero to avoid endless loop when there is no frame to display */ );
+ // comparison used as guard against time wrapping
+ /*if(delay > __toomuch_delay_to_be_valid){
+ TSK_DEBUG_INFO("Too much delay (%llu) in video jb. Reseting...", delay);
+ x_decode_time = now;
+ next_decode_duration = 0;
+ }
+ else*/{
+ //next_decode_duration = (delay > x_decode_duration) ? 0 : (x_decode_duration - delay);
+ //x_decode_duration = (1000 / jb->fps);
+ //x_decode_time += x_decode_duration;
+ }
+
+
+ //TSK_DEBUG_INFO("next_decode_timeout=%llu, delay = %llu", next_decode_duration, delay);
+#else
+ next_decode_duration = (1000 / jb->fps);
+#endif
+ }
+
+ TSK_DEBUG_INFO("Video jitter buffer thread - EXIT");
+
+ return tsk_null;
+}
diff --git a/tinyDAV/src/video/mf/tdav_consumer_video_mf.cxx b/tinyDAV/src/video/mf/tdav_consumer_video_mf.cxx
new file mode 100644
index 0000000..b5048b2
--- /dev/null
+++ b/tinyDAV/src/video/mf/tdav_consumer_video_mf.cxx
@@ -0,0 +1,185 @@
+/*Copyright (C) 2013 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+/**@file tdav_consumer_video_mf.cxx
+ * @brief Microsoft Media Foundation video consumer (source).
+ * http://msdn.microsoft.com/en-us/library/windows/desktop/ms694197(v=vs.85).aspx
+ */
+#include "tinydav/video/mf/tdav_consumer_video_mf.h"
+
+#if HAVE_MF
+
+#include "tsk_string.h"
+#include "tsk_debug.h"
+
+typedef struct tdav_consumer_video_mf_s
+{
+ TMEDIA_DECLARE_CONSUMER;
+}
+tdav_consumer_video_mf_t;
+
+/* ============ Media Producer Interface ================= */
+int tdav_consumer_video_mf_set(tmedia_consumer_t *self, const tmedia_param_t* param)
+{
+ int ret = 0;
+
+ if(!self || !param){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return ret;
+}
+
+
+int tdav_consumer_video_mf_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_consumer_video_mf_t* consumer = (tdav_consumer_video_mf_t*)self;
+
+ if(!consumer || !codec || !codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+#if 0
+ TMEDIA_CONSUMER(consumer)->decoder.codec_id = (tmedia_codec_id_t)(tmedia_codec_id_h264_bp | tmedia_codec_id_h264_mp);
+#endif
+
+ TMEDIA_CONSUMER(consumer)->video.fps = TMEDIA_CODEC_VIDEO(codec)->in.fps;
+ TMEDIA_CONSUMER(consumer)->video.in.width = TMEDIA_CODEC_VIDEO(codec)->in.width;
+ TMEDIA_CONSUMER(consumer)->video.in.height = TMEDIA_CODEC_VIDEO(codec)->in.height;
+
+ if(!TMEDIA_CONSUMER(consumer)->video.display.width){
+ TMEDIA_CONSUMER(consumer)->video.display.width = TMEDIA_CONSUMER(consumer)->video.in.width;
+ }
+ if(!TMEDIA_CONSUMER(consumer)->video.display.height){
+ TMEDIA_CONSUMER(consumer)->video.display.height = TMEDIA_CONSUMER(consumer)->video.in.height;
+ }
+
+ return 0;
+}
+
+int tdav_consumer_video_mf_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_video_mf_t* consumer = (tdav_consumer_video_mf_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+int tdav_consumer_video_mf_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_video_mf_t* consumer = (tdav_consumer_video_mf_t*)self;
+
+ return 0;
+}
+
+int tdav_consumer_video_mf_pause(tmedia_consumer_t* self)
+{
+ tdav_consumer_video_mf_t* consumer = (tdav_consumer_video_mf_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+int tdav_consumer_video_mf_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_video_mf_t* consumer = (tdav_consumer_video_mf_t*)self;
+
+ TSK_DEBUG_INFO("tdav_consumer_video_mf_stop");
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+//
+// Windows Media Foundation video consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_video_mf_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_video_mf_t *consumer = (tdav_consumer_video_mf_t *)self;
+ if(consumer){
+ /* init base */
+ tmedia_consumer_init(TMEDIA_CONSUMER(consumer));
+ TMEDIA_CONSUMER(consumer)->video.display.chroma = tmedia_chroma_yuv420p; // To avoid chroma conversion
+
+ /* init self */
+ TMEDIA_CONSUMER(consumer)->video.fps = 15;
+ TMEDIA_CONSUMER(consumer)->video.display.width = 352;
+ TMEDIA_CONSUMER(consumer)->video.display.height = 288;
+ TMEDIA_CONSUMER(consumer)->video.display.auto_resize = tsk_true;
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_video_mf_dtor(tsk_object_t * self)
+{
+ tdav_consumer_video_mf_t *consumer = (tdav_consumer_video_mf_t *)self;
+ if(consumer){
+
+ /* stop */
+ //if(consumer->started){
+ tdav_consumer_video_mf_stop((tmedia_consumer_t*)self);
+ //}
+
+ /* deinit base */
+ tmedia_consumer_deinit(TMEDIA_CONSUMER(consumer));
+ /* deinit self */
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_video_mf_def_s =
+{
+ sizeof(tdav_consumer_video_mf_t),
+ tdav_consumer_video_mf_ctor,
+ tdav_consumer_video_mf_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_video_mf_plugin_def_s =
+{
+ &tdav_consumer_video_mf_def_s,
+
+ tmedia_video,
+ "Microsoft Windows Media Foundation consumer (Video)",
+
+ tdav_consumer_video_mf_set,
+ tdav_consumer_video_mf_prepare,
+ tdav_consumer_video_mf_start,
+ tdav_consumer_video_mf_consume,
+ tdav_consumer_video_mf_pause,
+ tdav_consumer_video_mf_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_video_mf_plugin_def_t = &tdav_consumer_video_mf_plugin_def_s;
+
+#endif /*HAVE_MF */
diff --git a/tinyDAV/src/video/mf/tdav_producer_video_mf.cxx b/tinyDAV/src/video/mf/tdav_producer_video_mf.cxx
new file mode 100644
index 0000000..be899a0
--- /dev/null
+++ b/tinyDAV/src/video/mf/tdav_producer_video_mf.cxx
@@ -0,0 +1,855 @@
+/*Copyright (C) 2013 Doubango Telecom <http://www.doubango.org>
+* Copyright (C) Microsoft Corporation. All rights reserved.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+/**@file tdav_producer_video_mf.cxx
+ * @brief Microsoft Media Foundation video producer (source).
+ * http://msdn.microsoft.com/en-us/library/windows/desktop/ms694197(v=vs.85).aspx
+ */
+#include "tinydav/video/mf/tdav_producer_video_mf.h"
+
+#if HAVE_MF
+
+#include "tsk_semaphore.h"
+#include "tsk_thread.h"
+#include "tsk_string.h"
+#include "tsk_debug.h"
+
+#undef _WIN32_WINNT /* project-level value is 0x0501 (XP) */
+#define _WIN32_WINNT _WIN32_WINNT_WIN7 /* must be re-defined before include Media Doundation headers */
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "mfplat.lib")
+# pragma comment(lib, "mf.lib")
+# pragma comment(lib, "mfuuid.lib")
+# pragma comment(lib, "shlwapi.lib")
+#endif
+
+#include <new>
+#include <mfapi.h>
+#include <mfidl.h>
+#include <Mferror.h>
+#include <shlwapi.h>
+
+#undef SafeRelease
+#define SafeRelease(ppT) \
+{ \
+ if (*ppT) \
+ { \
+ (*ppT)->Release(); \
+ *ppT = NULL; \
+ } \
+}
+
+#undef CHECK_HR
+#define CHECK_HR(x) if (FAILED(x)) { TSK_DEBUG_ERROR("Operation Failed"); goto bail; }
+
+
+typedef struct VideoSubTypeGuidPair
+{
+ tmedia_chroma_t chroma;
+ const GUID fourcc;
+}
+VideoSubTypeGuidPair;
+static const VideoSubTypeGuidPair g_VideoSubTypeGuidPairs[] =
+{
+
+ { tmedia_chroma_nv12, MFVideoFormat_NV12 }, // 0
+ { tmedia_chroma_rgb24, MFVideoFormat_RGB24 },
+ { tmedia_chroma_rgb32, MFVideoFormat_RGB32 },
+ // to be continued
+};
+
+// MFVideoFormat_NV12, MFVideoFormat_YUY2, MFVideoFormat_UYVY,
+// MFVideoFormat_RGB32, MFVideoFormat_RGB24, MFVideoFormat_IYUV
+#undef DEFAULT_SUBTYPE_INDEX
+#define DEFAULT_SUBTYPE_INDEX 0
+
+//
+// DeviceList [Declaration]
+//
+class DeviceList
+{
+ UINT32 m_cDevices;
+ IMFActivate **m_ppDevices;
+
+public:
+ DeviceList() : m_ppDevices(NULL), m_cDevices(0)
+ {
+
+ }
+ ~DeviceList()
+ {
+ Clear();
+ }
+
+ UINT32 Count() const { return m_cDevices; }
+
+ void Clear();
+ HRESULT EnumerateDevices();
+ HRESULT GetDeviceAtIndex(UINT32 index, IMFActivate **ppActivate);
+ HRESULT GetDeviceBest(IMFActivate **ppActivate);
+ HRESULT GetDeviceName(UINT32 index, WCHAR **ppszName);
+};
+
+
+//
+// SampleGrabberCB [Declaration]
+// http://msdn.microsoft.com/en-us/library/windows/desktop/hh184779(v=vs.85).aspx
+//
+class SampleGrabberCB : public IMFSampleGrabberSinkCallback
+{
+ long m_cRef;
+ const struct tdav_producer_video_mf_s* m_pWrappedProducer;
+
+ SampleGrabberCB(const struct tdav_producer_video_mf_s* pcWrappedProducer) : m_cRef(1), m_pWrappedProducer(pcWrappedProducer) {}
+
+public:
+ static HRESULT CreateInstance(const struct tdav_producer_video_mf_s* pcWrappedProducer, SampleGrabberCB **ppCB);
+
+ // IUnknown methods
+ STDMETHODIMP QueryInterface(REFIID iid, void** ppv);
+ STDMETHODIMP_(ULONG) AddRef();
+ STDMETHODIMP_(ULONG) Release();
+
+ // IMFClockStateSink methods
+ STDMETHODIMP OnClockStart(MFTIME hnsSystemTime, LONGLONG llClockStartOffset);
+ STDMETHODIMP OnClockStop(MFTIME hnsSystemTime);
+ STDMETHODIMP OnClockPause(MFTIME hnsSystemTime);
+ STDMETHODIMP OnClockRestart(MFTIME hnsSystemTime);
+ STDMETHODIMP OnClockSetRate(MFTIME hnsSystemTime, float flRate);
+
+ // IMFSampleGrabberSinkCallback methods
+ STDMETHODIMP OnSetPresentationClock(IMFPresentationClock* pClock);
+ STDMETHODIMP OnProcessSample(REFGUID guidMajorMediaType, DWORD dwSampleFlags,
+ LONGLONG llSampleTime, LONGLONG llSampleDuration, const BYTE * pSampleBuffer,
+ DWORD dwSampleSize);
+ STDMETHODIMP OnShutdown();
+};
+
+
+//
+// tdav_producer_video_mf_t
+//
+typedef struct tdav_producer_video_mf_s
+{
+ TMEDIA_DECLARE_PRODUCER;
+
+ bool bStarted;
+ tsk_thread_handle_t* ppTread[1];
+
+ DeviceList* pDeviceList;
+
+ IMFMediaSession *pSession;
+ IMFMediaSource *pSource;
+ SampleGrabberCB *pCallback;
+ IMFActivate *pSinkActivate;
+ IMFTopology *pTopology;
+ IMFMediaType *pType;
+}
+tdav_producer_video_mf_t;
+
+//
+// Forward declarations for glovbal functions
+//
+static HRESULT CreateTopology(
+ IMFMediaSource *pSource,
+ IMFActivate *pSinkActivate,
+ IMFTopology **ppTopo);
+static HRESULT AddSourceNode(
+ IMFTopology *pTopology,
+ IMFMediaSource *pSource,
+ IMFPresentationDescriptor *pPD,
+ IMFStreamDescriptor *pSD,
+ IMFTopologyNode **ppNode);
+static HRESULT AddOutputNode(
+ IMFTopology *pTopology,
+ IMFActivate *pActivate,
+ DWORD dwId,
+ IMFTopologyNode **ppNode);
+static HRESULT RunSession(
+ IMFMediaSession *pSession,
+ IMFTopology *pTopology);
+static HRESULT StopSession(
+ IMFMediaSession *pSession,
+ IMFMediaSource *pSource);
+static HRESULT PauseSession(
+ IMFMediaSession *pSession);
+static void* TSK_STDCALL RunSessionThread(void *pArg);
+
+
+/* ============ Media Producer Interface ================= */
+static int tdav_producer_video_mf_set(tmedia_producer_t *self, const tmedia_param_t* param)
+{
+ int ret = 0;
+ tdav_producer_video_mf_t* pSelf = (tdav_producer_video_mf_t*)self;
+
+ if(!pSelf || !param){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return ret;
+}
+
+static int tdav_producer_video_mf_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_producer_video_mf_t* pSelf = (tdav_producer_video_mf_t*)self;
+
+ if(!pSelf || !codec || !codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+#if 0
+ TMEDIA_PRODUCER(producer)->encoder.codec_id = (tmedia_codec_id_t)(tmedia_codec_id_h264_bp | tmedia_codec_id_h264_mp);
+#else
+ TMEDIA_PRODUCER(pSelf)->video.chroma = g_VideoSubTypeGuidPairs[DEFAULT_SUBTYPE_INDEX].chroma;
+#endif
+
+ TMEDIA_PRODUCER(pSelf)->video.fps = TMEDIA_CODEC_VIDEO(codec)->out.fps;
+ TMEDIA_PRODUCER(pSelf)->video.width = TMEDIA_CODEC_VIDEO(codec)->out.width;
+ TMEDIA_PRODUCER(pSelf)->video.height = TMEDIA_CODEC_VIDEO(codec)->out.height;
+
+ // MFVideoFormat_NV12, MFVideoFormat_YUY2, MFVideoFormat_UYVY,
+// MFVideoFormat_RGB32, MFVideoFormat_RGB24, MFVideoFormat_IYUV
+#undef DEFAULT_SUBTYPE
+
+ HRESULT hr = S_OK;
+
+ // create device list object
+ if(!pSelf->pDeviceList && !(pSelf->pDeviceList = new DeviceList())){
+ TSK_DEBUG_ERROR("Failed to create device list");
+ hr = E_OUTOFMEMORY;
+ goto bail;
+ }
+ // enumerate devices
+ hr = pSelf->pDeviceList->EnumerateDevices();
+ if(!SUCCEEDED(hr)){
+ goto bail;
+ }
+
+ // check if we have at least one MF video source connected to the PC
+ if(pSelf->pDeviceList->Count() == 0){
+ TSK_DEBUG_WARN("No MF video source could be found...no video will be sent");
+ // do not break the negotiation as one-way video connection is a valid use-case
+ }
+ else{
+ IMFActivate* pActivate = NULL;
+ // Get best MF video source
+ hr = pSelf->pDeviceList->GetDeviceBest(&pActivate);
+ if(!SUCCEEDED(hr) || !pActivate){
+ TSK_DEBUG_ERROR("Failed to get best MF video source");
+ if(!pActivate){
+ hr = E_OUTOFMEMORY;
+ }
+ goto bail;
+ }
+
+ // Create the media source for the device.
+ hr = pActivate->ActivateObject(
+ __uuidof(IMFMediaSource),
+ (void**)&pSelf->pSource
+ );
+ SafeRelease(&pActivate);
+ if(!SUCCEEDED(hr)){
+ TSK_DEBUG_ERROR("ActivateObject(MF video source) failed");
+ goto bail;
+ }
+
+ // Configure the media type that the Sample Grabber will receive.
+ // Setting the major and subtype is usually enough for the topology loader
+ // to resolve the topology.
+
+ CHECK_HR(hr = MFCreateMediaType(&pSelf->pType));
+ CHECK_HR(hr = pSelf->pType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video));
+#if 0
+ CHECK_HR(hr = pSelf->pType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
+#else
+ CHECK_HR(hr = pSelf->pType->SetGUID(MF_MT_SUBTYPE, g_VideoSubTypeGuidPairs[DEFAULT_SUBTYPE_INDEX].fourcc));
+#endif
+ CHECK_HR(hr = MFSetAttributeSize(pSelf->pType, MF_MT_FRAME_SIZE, TMEDIA_PRODUCER(pSelf)->video.width, TMEDIA_PRODUCER(pSelf)->video.height));
+ // CHECK_HR(hr = pSelf->pType->SetUINT32(MF_MT_DEFAULT_STRIDE, 1280));
+ CHECK_HR(hr = pSelf->pType->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, 1));
+ CHECK_HR(hr = pSelf->pType->SetUINT32(MF_MT_FIXED_SIZE_SAMPLES, 1));
+ // CHECK_HR(hr = pSelf->pType->SetUINT32(MF_MT_AVG_BITRATE, 147456000));
+ //CHECK_HR(hr = MFSetAttributeRatio(pSelf->pType, MF_MT_FRAME_RATE, (UINT32)30000, 1001));
+ CHECK_HR(hr = MFSetAttributeRatio(pSelf->pType, MF_MT_PIXEL_ASPECT_RATIO, 1, 1));
+ CHECK_HR(hr = pSelf->pType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive));
+
+ // Create the sample grabber sink.
+ CHECK_HR(hr = SampleGrabberCB::CreateInstance(pSelf, &pSelf->pCallback));
+ CHECK_HR(hr = MFCreateSampleGrabberSinkActivate(pSelf->pType, pSelf->pCallback, &pSelf->pSinkActivate));
+
+ // To run as fast as possible, set this attribute (requires Windows 7):
+ CHECK_HR(hr = pSelf->pSinkActivate->SetUINT32(MF_SAMPLEGRABBERSINK_IGNORE_CLOCK, TRUE));
+
+ // Create the Media Session.
+ CHECK_HR(hr = MFCreateMediaSession(NULL, &pSelf->pSession));
+
+ // Create the topology.
+ CHECK_HR(hr = CreateTopology(pSelf->pSource, pSelf->pSinkActivate, &pSelf->pTopology));
+ }
+
+bail:
+ return SUCCEEDED(hr) ? 0 : -1;
+}
+
+static int tdav_producer_video_mf_start(tmedia_producer_t* self)
+{
+ tdav_producer_video_mf_t* pSelf = (tdav_producer_video_mf_t*)self;
+
+ if(!pSelf){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(pSelf->bStarted){
+ return 0;
+ }
+
+ HRESULT hr = S_OK;
+
+ // Run the media session.
+ CHECK_HR(hr = RunSession(pSelf->pSession, pSelf->pTopology));
+
+ // Start asynchronous watcher thread
+ pSelf->bStarted = true;
+ int ret = tsk_thread_create(&pSelf->ppTread[0], RunSessionThread, pSelf);
+ if(ret != 0){
+ TSK_DEBUG_ERROR("Failed to create thread");
+ hr = E_FAIL;
+ pSelf->bStarted = false;
+ StopSession(pSelf->pSession, pSelf->pSource);
+ goto bail;
+ }
+
+bail:
+ return SUCCEEDED(hr) ? 0 : -1;
+}
+
+static int tdav_producer_video_mf_pause(tmedia_producer_t* self)
+{
+ tdav_producer_video_mf_t* pSelf = (tdav_producer_video_mf_t*)self;
+
+ if(!pSelf){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ HRESULT hr = PauseSession(pSelf->pSession);
+
+ return 0;
+}
+
+static int tdav_producer_video_mf_stop(tmedia_producer_t* self)
+{
+ tdav_producer_video_mf_t* pSelf = (tdav_producer_video_mf_t*)self;
+
+ if(!pSelf){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+
+ // for the thread
+ pSelf->bStarted = false;
+ hr = StopSession(pSelf->pSession, NULL); // stop session to wakeup the asynchronous thread
+ if(pSelf->ppTread[0]){
+ tsk_thread_join(&pSelf->ppTread[0]);
+ }
+ hr = StopSession(NULL, pSelf->pSource); // stop source to release the camera
+
+ return 0;
+}
+
+//
+// Windows Media Foundation video producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_video_mf_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_video_mf_t *pSelf = (tdav_producer_video_mf_t *)self;
+ if(pSelf){
+ /* init base */
+ tmedia_producer_init(TMEDIA_PRODUCER(pSelf));
+#if 0 // H.264
+ TMEDIA_PRODUCER(pSelf)->video.chroma = tmedia_chroma_yuv420p; // To avoid chroma conversion
+#endif
+ /* init self with default values*/
+
+ TMEDIA_PRODUCER(pSelf)->video.fps = 15;
+ TMEDIA_PRODUCER(pSelf)->video.width = 352;
+ TMEDIA_PRODUCER(pSelf)->video.height = 288;
+
+#if TDAV_UNDER_WINDOWS_PHONE
+ pSelf->videoCapturePhone = ref new VideoCapturePhone();
+#endif
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_video_mf_dtor(tsk_object_t * self)
+{
+ tdav_producer_video_mf_t *pSelf = (tdav_producer_video_mf_t *)self;
+ if(pSelf){
+ /* stop */
+ //if(pSelf->started){
+ tdav_producer_video_mf_stop((tmedia_producer_t*)self);
+ //}
+
+ /* deinit base */
+ tmedia_producer_deinit(TMEDIA_PRODUCER(pSelf));
+ /* deinit self */
+ if(pSelf->pDeviceList){
+ delete pSelf->pDeviceList, pSelf->pDeviceList = NULL;
+ }
+ if(pSelf->pSource){
+ pSelf->pSource->Shutdown();
+ }
+ if(pSelf->pSession){
+ pSelf->pSession->Shutdown();
+ }
+
+ SafeRelease(&pSelf->pSession);
+ SafeRelease(&pSelf->pSource);
+ SafeRelease(&pSelf->pCallback);
+ SafeRelease(&pSelf->pSinkActivate);
+ SafeRelease(&pSelf->pTopology);
+ SafeRelease(&pSelf->pType);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_video_mf_def_s =
+{
+ sizeof(tdav_producer_video_mf_t),
+ tdav_producer_video_mf_ctor,
+ tdav_producer_video_mf_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_video_mf_plugin_def_s =
+{
+ &tdav_producer_video_mf_def_s,
+
+ tmedia_video,
+ "Microsoft Windows Media Foundation producer (Video)",
+
+ tdav_producer_video_mf_set,
+ tdav_producer_video_mf_prepare,
+ tdav_producer_video_mf_start,
+ tdav_producer_video_mf_pause,
+ tdav_producer_video_mf_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_video_mf_plugin_def_t = &tdav_producer_video_mf_plugin_def_s;
+
+
+
+
+//
+// DeviceList [Implementation]
+//
+void DeviceList::Clear()
+{
+ for (UINT32 i = 0; i < m_cDevices; i++)
+ {
+ SafeRelease(&m_ppDevices[i]);
+ }
+ CoTaskMemFree(m_ppDevices);
+ m_ppDevices = NULL;
+
+ m_cDevices = 0;
+}
+
+HRESULT DeviceList::EnumerateDevices()
+{
+ HRESULT hr = S_OK;
+ IMFAttributes *pAttributes = NULL;
+
+ Clear();
+
+ // Initialize an attribute store. We will use this to
+ // specify the enumeration parameters.
+
+ hr = MFCreateAttributes(&pAttributes, 1);
+
+ // Ask for source type = video capture devices
+ if (SUCCEEDED(hr))
+ {
+ hr = pAttributes->SetGUID(
+ MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE,
+ MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_GUID
+ );
+ }
+
+ // Enumerate devices.
+ if (SUCCEEDED(hr))
+ {
+ hr = MFEnumDeviceSources(pAttributes, &m_ppDevices, &m_cDevices);
+ }
+
+ SafeRelease(&pAttributes);
+
+ return hr;
+}
+
+
+HRESULT DeviceList::GetDeviceAtIndex(UINT32 index, IMFActivate **ppActivate)
+{
+ if (index >= Count())
+ {
+ return E_INVALIDARG;
+ }
+
+ *ppActivate = m_ppDevices[index];
+ (*ppActivate)->AddRef();
+
+ return S_OK;
+}
+
+HRESULT DeviceList::GetDeviceBest(IMFActivate **ppActivate)
+{
+ // for now we just get the default video source device (index = 0)
+ return GetDeviceAtIndex(0, ppActivate);
+}
+
+HRESULT DeviceList::GetDeviceName(UINT32 index, WCHAR **ppszName)
+{
+ if (index >= Count())
+ {
+ return E_INVALIDARG;
+ }
+
+ HRESULT hr = S_OK;
+
+ hr = m_ppDevices[index]->GetAllocatedString(
+ MF_DEVSOURCE_ATTRIBUTE_FRIENDLY_NAME,
+ ppszName,
+ NULL
+ );
+
+ return hr;
+}
+
+
+//
+// SampleGrabberCB [implementation]
+// http://msdn.microsoft.com/en-us/library/windows/desktop/hh184779(v=vs.85).aspx
+//
+
+// Create a new instance of the object.
+HRESULT SampleGrabberCB::CreateInstance(const struct tdav_producer_video_mf_s* pcWrappedProducer, SampleGrabberCB **ppCB)
+{
+ *ppCB = new (std::nothrow) SampleGrabberCB(pcWrappedProducer);
+
+ if (ppCB == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ return S_OK;
+}
+
+STDMETHODIMP SampleGrabberCB::QueryInterface(REFIID riid, void** ppv)
+{
+ static const QITAB qit[] =
+ {
+ QITABENT(SampleGrabberCB, IMFSampleGrabberSinkCallback),
+ QITABENT(SampleGrabberCB, IMFClockStateSink),
+ { 0 }
+ };
+ return QISearch(this, qit, riid, ppv);
+}
+
+STDMETHODIMP_(ULONG) SampleGrabberCB::AddRef()
+{
+ return InterlockedIncrement(&m_cRef);
+}
+
+STDMETHODIMP_(ULONG) SampleGrabberCB::Release()
+{
+ ULONG cRef = InterlockedDecrement(&m_cRef);
+ if (cRef == 0)
+ {
+ delete this;
+ }
+ return cRef;
+
+}
+
+// IMFClockStateSink methods.
+
+// In these example, the IMFClockStateSink methods do not perform any actions.
+// You can use these methods to track the state of the sample grabber sink.
+
+STDMETHODIMP SampleGrabberCB::OnClockStart(MFTIME hnsSystemTime, LONGLONG llClockStartOffset)
+{
+ return S_OK;
+}
+
+STDMETHODIMP SampleGrabberCB::OnClockStop(MFTIME hnsSystemTime)
+{
+ return S_OK;
+}
+
+STDMETHODIMP SampleGrabberCB::OnClockPause(MFTIME hnsSystemTime)
+{
+ return S_OK;
+}
+
+STDMETHODIMP SampleGrabberCB::OnClockRestart(MFTIME hnsSystemTime)
+{
+ return S_OK;
+}
+
+STDMETHODIMP SampleGrabberCB::OnClockSetRate(MFTIME hnsSystemTime, float flRate)
+{
+ return S_OK;
+}
+
+// IMFSampleGrabberSink methods.
+
+STDMETHODIMP SampleGrabberCB::OnSetPresentationClock(IMFPresentationClock* pClock)
+{
+ return S_OK;
+}
+
+STDMETHODIMP SampleGrabberCB::OnProcessSample(REFGUID guidMajorMediaType, DWORD dwSampleFlags,
+ LONGLONG llSampleTime, LONGLONG llSampleDuration, const BYTE * pSampleBuffer,
+ DWORD dwSampleSize)
+{
+ if(m_pWrappedProducer && TMEDIA_PRODUCER(m_pWrappedProducer)->enc_cb.callback)
+ {
+ TMEDIA_PRODUCER(m_pWrappedProducer)->enc_cb.callback(TMEDIA_PRODUCER(m_pWrappedProducer)->enc_cb.callback_data, pSampleBuffer, dwSampleSize);
+ }
+
+ return S_OK;
+}
+
+STDMETHODIMP SampleGrabberCB::OnShutdown()
+{
+ return S_OK;
+}
+
+
+//
+// Global functions
+//
+
+// Create the topology.
+static HRESULT CreateTopology(IMFMediaSource *pSource, IMFActivate *pSinkActivate, IMFTopology **ppTopo)
+{
+ IMFTopology *pTopology = NULL;
+ IMFPresentationDescriptor *pPD = NULL;
+ IMFStreamDescriptor *pSD = NULL;
+ IMFMediaTypeHandler *pHandler = NULL;
+ IMFTopologyNode *pNode1 = NULL;
+ IMFTopologyNode *pNode2 = NULL;
+
+ HRESULT hr = S_OK;
+ DWORD cStreams = 0;
+
+ CHECK_HR(hr = MFCreateTopology(&pTopology));
+ CHECK_HR(hr = pSource->CreatePresentationDescriptor(&pPD));
+ CHECK_HR(hr = pPD->GetStreamDescriptorCount(&cStreams));
+
+ for (DWORD i = 0; i < cStreams; i++)
+ {
+ // In this example, we look for audio streams and connect them to the sink.
+
+ BOOL fSelected = FALSE;
+ GUID majorType;
+
+ CHECK_HR(hr = pPD->GetStreamDescriptorByIndex(i, &fSelected, &pSD));
+ CHECK_HR(hr = pSD->GetMediaTypeHandler(&pHandler));
+ CHECK_HR(hr = pHandler->GetMajorType(&majorType));
+
+ if (majorType == MFMediaType_Video && fSelected)
+ {
+ CHECK_HR(hr = AddSourceNode(pTopology, pSource, pPD, pSD, &pNode1));
+ CHECK_HR(hr = AddOutputNode(pTopology, pSinkActivate, 0, &pNode2));
+ CHECK_HR(hr = pNode1->ConnectOutput(0, pNode2, 0));
+ break;
+ }
+ else
+ {
+ CHECK_HR(hr = pPD->DeselectStream(i));
+ }
+ SafeRelease(&pSD);
+ SafeRelease(&pHandler);
+ }
+
+ *ppTopo = pTopology;
+ (*ppTopo)->AddRef();
+
+bail:
+ SafeRelease(&pTopology);
+ SafeRelease(&pNode1);
+ SafeRelease(&pNode2);
+ SafeRelease(&pPD);
+ SafeRelease(&pSD);
+ SafeRelease(&pHandler);
+ return hr;
+}
+
+// Add a source node to a topology.
+static HRESULT AddSourceNode(
+ IMFTopology *pTopology, // Topology.
+ IMFMediaSource *pSource, // Media source.
+ IMFPresentationDescriptor *pPD, // Presentation descriptor.
+ IMFStreamDescriptor *pSD, // Stream descriptor.
+ IMFTopologyNode **ppNode) // Receives the node pointer.
+{
+ IMFTopologyNode *pNode = NULL;
+
+ HRESULT hr = S_OK;
+ CHECK_HR(hr = MFCreateTopologyNode(MF_TOPOLOGY_SOURCESTREAM_NODE, &pNode));
+ CHECK_HR(hr = pNode->SetUnknown(MF_TOPONODE_SOURCE, pSource));
+ CHECK_HR(hr = pNode->SetUnknown(MF_TOPONODE_PRESENTATION_DESCRIPTOR, pPD));
+ CHECK_HR(hr = pNode->SetUnknown(MF_TOPONODE_STREAM_DESCRIPTOR, pSD));
+ CHECK_HR(hr = pTopology->AddNode(pNode));
+
+ // Return the pointer to the caller.
+ *ppNode = pNode;
+ (*ppNode)->AddRef();
+
+bail:
+ SafeRelease(&pNode);
+ return hr;
+}
+
+// Add an output node to a topology.
+static HRESULT AddOutputNode(
+ IMFTopology *pTopology, // Topology.
+ IMFActivate *pActivate, // Media sink activation object.
+ DWORD dwId, // Identifier of the stream sink.
+ IMFTopologyNode **ppNode) // Receives the node pointer.
+{
+ IMFTopologyNode *pNode = NULL;
+
+ HRESULT hr = S_OK;
+ CHECK_HR(hr = MFCreateTopologyNode(MF_TOPOLOGY_OUTPUT_NODE, &pNode));
+ CHECK_HR(hr = pNode->SetObject(pActivate));
+ CHECK_HR(hr = pNode->SetUINT32(MF_TOPONODE_STREAMID, dwId));
+ CHECK_HR(hr = pNode->SetUINT32(MF_TOPONODE_NOSHUTDOWN_ON_REMOVE, FALSE));
+ CHECK_HR(hr = pTopology->AddNode(pNode));
+
+ // Return the pointer to the caller.
+ *ppNode = pNode;
+ (*ppNode)->AddRef();
+
+bail:
+ SafeRelease(&pNode);
+ return hr;
+}
+
+// Run session
+static HRESULT RunSession(IMFMediaSession *pSession, IMFTopology *pTopology)
+{
+ IMFMediaEvent *pEvent = NULL;
+
+ PROPVARIANT var;
+ PropVariantInit(&var);
+
+ MediaEventType met;
+ HRESULT hrStatus = S_OK;
+ HRESULT hr = S_OK;
+ CHECK_HR(hr = pSession->SetTopology(0, pTopology));
+ CHECK_HR(hr = pSession->Start(&GUID_NULL, &var));
+
+ // Check first event
+ hr = pSession->GetEvent(MF_EVENT_FLAG_NO_WAIT, &pEvent);
+ if(hr == MF_E_NO_EVENTS_AVAILABLE){
+ hr = S_OK;
+ goto bail;
+ }
+ CHECK_HR(hr = pEvent->GetStatus(&hrStatus));
+ if (FAILED(hrStatus))
+ {
+ CHECK_HR(hr = pEvent->GetType(&met));
+ TSK_DEBUG_ERROR("Session error: 0x%x (event id: %d)\n", hrStatus, met);
+ hr = hrStatus;
+ goto bail;
+ }
+
+bail:
+ SafeRelease(&pEvent);
+ return hr;
+}
+
+// Stop session
+static HRESULT StopSession(
+ IMFMediaSession *pSession,
+ IMFMediaSource *pSource)
+{
+ // MUST be source then session
+ if(pSource){
+ pSource->Shutdown();
+ }
+ if(pSession){
+ pSession->Shutdown();
+ }
+ return S_OK;
+}
+
+// Pause session
+static HRESULT PauseSession(
+ IMFMediaSession *pSession)
+{
+ if(!pSession){
+ return E_INVALIDARG;
+ }
+ return pSession->Pause();
+}
+
+// Run session async thread
+static void* TSK_STDCALL RunSessionThread(void *pArg)
+{
+ tdav_producer_video_mf_t *pSelf = (tdav_producer_video_mf_t *)pArg;
+ HRESULT hrStatus = S_OK;
+ HRESULT hr = S_OK;
+ IMFMediaEvent *pEvent = NULL;
+ MediaEventType met;
+
+ TSK_DEBUG_INFO("RunSessionThread - ENTER");
+
+ while(pSelf->bStarted){
+ CHECK_HR(hr = pSelf->pSession->GetEvent(0, &pEvent));
+ CHECK_HR(hr = pEvent->GetStatus(&hrStatus));
+ CHECK_HR(hr = pEvent->GetType(&met));
+
+ if (FAILED(hrStatus))
+ {
+ TSK_DEBUG_ERROR("Session error: 0x%x (event id: %d)\n", hrStatus, met);
+ hr = hrStatus;
+ goto bail;
+ }
+ if (met == MESessionEnded)
+ {
+ break;
+ }
+ SafeRelease(&pEvent);
+ }
+
+bail:
+ TSK_DEBUG_INFO("RunSessionThread - EXIT");
+
+ return NULL;
+}
+
+#endif /*HAVE_MF */
diff --git a/tinyDAV/src/video/tdav_consumer_video.c b/tinyDAV/src/video/tdav_consumer_video.c
new file mode 100644
index 0000000..b7adeca
--- /dev/null
+++ b/tinyDAV/src/video/tdav_consumer_video.c
@@ -0,0 +1,207 @@
+/*
+* Copyright (C) 2011 Doubango Telecom <http://www.doubango.org>
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango(DOT)org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_consumer_video.c
+ * @brief Base class for all Video consumers.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango(DOT)org>
+ */
+#include "tinydav/video/tdav_consumer_video.h"
+#include "tinymedia/tmedia_jitterbuffer.h"
+#include "tinyrtp/rtp/trtp_rtp_header.h"
+
+#include "tsk_debug.h"
+
+#define TDAV_VIDEO_DEFAULT_WIDTH 176
+#define TDAV_VIDEO_DEFAULT_HEIGHT 144
+#define TDAV_VIDEO_DEFAULT_FPS 15
+#define TDAV_VIDEO_DEFAULT_AUTORESIZE tsk_true
+
+/** Initialize video consumer */
+int tdav_consumer_video_init(tdav_consumer_video_t* self)
+{
+ int ret;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* base */
+ if((ret = tmedia_consumer_init(TMEDIA_CONSUMER(self)))){
+ return ret;
+ }
+
+ /* self (should be update by prepare() by using the codec's info)*/
+ TMEDIA_CONSUMER(self)->video.fps = TDAV_VIDEO_DEFAULT_FPS;
+ TMEDIA_CONSUMER(self)->video.display.width = TDAV_VIDEO_DEFAULT_WIDTH;
+ TMEDIA_CONSUMER(self)->video.display.height = TDAV_VIDEO_DEFAULT_HEIGHT;
+ TMEDIA_CONSUMER(self)->video.display.auto_resize = TDAV_VIDEO_DEFAULT_AUTORESIZE;
+
+ /* self:jitterbuffer */
+ if(!self->jitterbuffer && !(self->jitterbuffer = tmedia_jitterbuffer_create(tmedia_video))){
+ // -- TSK_DEBUG_WARN("Failed to create video jitter buffer");
+ }
+ if(self->jitterbuffer){
+ tmedia_jitterbuffer_init(TMEDIA_JITTER_BUFFER(self->jitterbuffer));
+ }
+
+ tsk_safeobj_init(self);
+
+ return 0;
+}
+
+/**
+* Generic function to compare two consumers.
+* @param consumer1 The first consumer to compare.
+* @param consumer2 The second consumer to compare.
+* @retval Returns an integral value indicating the relationship between the two consumers:
+* <0 : @a consumer1 less than @a consumer2.<br>
+* 0 : @a consumer1 identical to @a consumer2.<br>
+* >0 : @a consumer1 greater than @a consumer2.<br>
+*/
+int tdav_consumer_video_cmp(const tsk_object_t* consumer1, const tsk_object_t* consumer2)
+{
+ int ret;
+ tsk_subsat_int32_ptr(consumer1, consumer2, &ret);
+ return ret;
+}
+
+int tdav_consumer_video_set(tdav_consumer_video_t* self, const tmedia_param_t* param)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+int tdav_consumer_video_put(tdav_consumer_video_t* self, const void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr)
+{
+ const trtp_rtp_header_t* rtp_hdr = TRTP_RTP_HEADER(proto_hdr);
+ int ret;
+
+ if(!self || !data || !self->jitterbuffer || !rtp_hdr){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(self);
+
+ if(!TMEDIA_JITTER_BUFFER(self->jitterbuffer)->opened){
+ uint32_t frame_duration = (1000 / TMEDIA_CONSUMER(self)->video.fps);
+ static uint32_t rate = 90000;
+ static uint32_t channels = 1;
+ if((ret = tmedia_jitterbuffer_open(TMEDIA_JITTER_BUFFER(self->jitterbuffer), frame_duration, rate, channels))){
+ TSK_DEBUG_ERROR("Failed to open jitterbuffer (%d)", ret);
+ tsk_safeobj_unlock(self);
+ return ret;
+ }
+ }
+ ret = tmedia_jitterbuffer_put(TMEDIA_JITTER_BUFFER(self->jitterbuffer), (void*)data, data_size, proto_hdr);
+
+ tsk_safeobj_unlock(self);
+
+ return ret;
+}
+
+/* get data drom the jitter buffer (consumers should always have ptime of 20ms) */
+tsk_size_t tdav_consumer_video_get(tdav_consumer_video_t* self, void* out_data, tsk_size_t out_size)
+{
+ tsk_size_t ret_size = 0;
+ if(!self && self->jitterbuffer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ tsk_safeobj_lock(self);
+
+ if(!TMEDIA_JITTER_BUFFER(self->jitterbuffer)->opened){
+ int ret;
+ uint32_t frame_duration = (1000 / TMEDIA_CONSUMER(self)->video.fps);
+ static uint32_t rate = 90000;
+ static uint32_t channles = 1;
+ if((ret = tmedia_jitterbuffer_open(TMEDIA_JITTER_BUFFER(self->jitterbuffer), frame_duration, rate, channles))){
+ TSK_DEBUG_ERROR("Failed to open jitterbuffer (%d)", ret);
+ tsk_safeobj_unlock(self);
+ return 0;
+ }
+ }
+ ret_size = tmedia_jitterbuffer_get(TMEDIA_JITTER_BUFFER(self->jitterbuffer), out_data, out_size);
+
+ tsk_safeobj_unlock(self);
+
+
+
+
+ return ret_size;
+}
+
+int tdav_consumer_video_tick(tdav_consumer_video_t* self)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+ return tmedia_jitterbuffer_tick(TMEDIA_JITTER_BUFFER(self->jitterbuffer));
+}
+
+/** Reset jitterbuffer */
+int tdav_consumer_video_reset(tdav_consumer_video_t* self){
+ int ret;
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(self);
+ ret = tmedia_jitterbuffer_reset(TMEDIA_JITTER_BUFFER(self->jitterbuffer));
+ tsk_safeobj_unlock(self);
+
+ return ret;
+}
+
+/* tsk_safeobj_lock(self); */
+/* tsk_safeobj_unlock(self); */
+
+/** DeInitialize video consumer */
+int tdav_consumer_video_deinit(tdav_consumer_video_t* self)
+{
+ int ret;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* base */
+ if((ret = tmedia_consumer_deinit(TMEDIA_CONSUMER(self)))){
+ /* return ret; */
+ }
+
+ /* self */
+ TSK_OBJECT_SAFE_FREE(self->jitterbuffer);
+
+ tsk_safeobj_deinit(self);
+
+ return 0;
+} \ No newline at end of file
diff --git a/tinyDAV/src/video/tdav_converter_video.cxx b/tinyDAV/src/video/tdav_converter_video.cxx
new file mode 100644
index 0000000..2195d79
--- /dev/null
+++ b/tinyDAV/src/video/tdav_converter_video.cxx
@@ -0,0 +1,832 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+/**@file tdav_converter_video.c
+* @brief Video converter.
+*
+* @author Mamadou Diop <diopmamadou(at)doubango.org>
+* @author Alex Vishnev (Added support for rotation)
+*/
+#include "tinydav/video/tdav_converter_video.h"
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+// FIXME: FFmpeg implementation do not support "scale_rotated_frames" option
+
+#if HAVE_LIBYUV
+
+#include <libyuv/libyuv.h>
+
+using namespace libyuv;
+
+typedef struct tdav_converter_video_libyuv_s
+{
+ TMEDIA_DECLARE_CONVERTER_VIDEO;
+
+ enum FourCC srcFormat;
+ enum FourCC dstFormat;
+
+ tsk_bool_t toI420;
+ tsk_bool_t fromI420;
+
+ struct{
+ uint8* ptr;
+ int size;
+ }chroma;
+ struct{
+ uint8* ptr;
+ int size;
+ }rotate;
+ struct{
+ uint8* ptr;
+ int size;
+ }scale;
+ struct{
+ uint8* ptr;
+ int size;
+ }mirror;
+}
+tdav_converter_video_libyuv_t;
+
+#define TDAV_CONVERTER_VIDEO_LIBYUV(self) ((tdav_converter_video_libyuv_t*)(self))
+#define LIBYUV_INPUT_BUFFER_PADDING_SIZE 32
+
+static inline tsk_bool_t _tdav_converter_video_libyuv_is_chroma_varsize(tmedia_chroma_t chroma)
+{
+ return chroma == tmedia_chroma_mjpeg;
+}
+
+static inline tsk_size_t _tdav_converter_video_libyuv_get_size(tmedia_chroma_t chroma, tsk_size_t w, tsk_size_t h)
+{
+ switch (chroma){
+ case tmedia_chroma_rgb24:
+ case tmedia_chroma_bgr24:
+ return (w * h * 3);
+ case tmedia_chroma_rgb565le:
+ return ((w * h) << 1);
+ case tmedia_chroma_rgb32:
+ return ((w * h) << 2);
+ case tmedia_chroma_nv21:
+ return ((w * h * 3) >> 1);
+ case tmedia_chroma_nv12:
+ return ((w * h * 3) >> 1);
+ case tmedia_chroma_yuv422p:
+ return ((w * h) << 1);
+ case tmedia_chroma_uyvy422:
+ case tmedia_chroma_yuyv422:
+ return ((w * h) << 1);
+ case tmedia_chroma_yuv420p:
+ return ((w * h * 3) >> 1);
+ case tmedia_chroma_mjpeg:
+ return 0;
+ default:
+ TSK_DEBUG_ERROR("Invalid chroma %d", (int)chroma);
+ return 0;
+ }
+}
+
+static inline enum FourCC _tdav_converter_video_libyuv_get_pixfmt(tmedia_chroma_t chroma)
+{
+ switch (chroma){
+ case tmedia_chroma_rgb24:
+ case tmedia_chroma_bgr24:
+ return FOURCC_24BG;
+ case tmedia_chroma_rgb565le:
+ return FOURCC_RGBP;
+ case tmedia_chroma_rgb32:
+ return FOURCC_ARGB;
+ case tmedia_chroma_nv21:
+ return FOURCC_NV21;
+ case tmedia_chroma_nv12:
+ return FOURCC_NV12;
+ case tmedia_chroma_yuv422p:
+ return FOURCC_I422;
+ case tmedia_chroma_uyvy422:
+ return FOURCC_UYVY;
+ case tmedia_chroma_yuyv422:
+ return FOURCC_YUY2;
+ case tmedia_chroma_yuv420p:
+ return FOURCC_I420;
+ case tmedia_chroma_mjpeg:
+ return FOURCC_MJPG;
+ default:
+ TSK_DEBUG_ERROR("Invalid chroma %d", (int)chroma);
+ return FOURCC_ANY;
+ }
+}
+
+static int tdav_converter_video_libyuv_init(tmedia_converter_video_t* self, tsk_size_t srcWidth, tsk_size_t srcHeight, tmedia_chroma_t srcChroma, tsk_size_t dstWidth, tsk_size_t dstHeight, tmedia_chroma_t dstChroma)
+{
+ TSK_DEBUG_INFO("Initializing new LibYUV Video Converter src=(%dx%d@%d) dst=(%dx%d@%d)", (int)srcWidth, (int)srcHeight, (int)srcChroma, (int)dstWidth, (int)dstHeight, (int)dstChroma);
+
+ if ((TDAV_CONVERTER_VIDEO_LIBYUV(self)->srcFormat = _tdav_converter_video_libyuv_get_pixfmt(srcChroma)) == FOURCC_ANY){
+ TSK_DEBUG_ERROR("Invalid source chroma");
+ return -2;
+ }
+ if ((TDAV_CONVERTER_VIDEO_LIBYUV(self)->dstFormat = _tdav_converter_video_libyuv_get_pixfmt(dstChroma)) == FOURCC_ANY){
+ TSK_DEBUG_ERROR("Invalid destination chroma");
+ return -3;
+ }
+
+ TDAV_CONVERTER_VIDEO_LIBYUV(self)->toI420 = (TDAV_CONVERTER_VIDEO_LIBYUV(self)->dstFormat == FOURCC_I420);
+ TDAV_CONVERTER_VIDEO_LIBYUV(self)->fromI420 = (TDAV_CONVERTER_VIDEO_LIBYUV(self)->srcFormat == FOURCC_I420);
+ if (!TDAV_CONVERTER_VIDEO_LIBYUV(self)->toI420 && !TDAV_CONVERTER_VIDEO_LIBYUV(self)->fromI420)
+ {
+ TSK_DEBUG_ERROR("LIBYUV only support from/to I420");
+ return -1;
+ }
+ return 0;
+}
+
+static tsk_size_t tdav_converter_video_libyuv_process(tmedia_converter_video_t* _self, const void* buffer, tsk_size_t buffer_size, void** output, tsk_size_t* output_max_size)
+{
+#define RESIZE_BUFFER(buff, curr_size, new_size) \
+ if((int)(curr_size) < (new_size)){ \
+ if(!((buff) = (uint8*)tsk_realloc((buff), (new_size)))){ \
+ (curr_size) = 0; \
+ TSK_DEBUG_ERROR("Failed to allocate buffer"); \
+ return 0; \
+ } \
+ (curr_size) = (new_size); \
+ }
+ static const int crop_x = 0;
+ static const int crop_y = 0;
+
+ int ret;
+ tdav_converter_video_libyuv_t* self = TDAV_CONVERTER_VIDEO_LIBYUV(_self);
+ tsk_bool_t scale = ((_self->dstWidth != _self->srcWidth) || (_self->dstHeight != _self->srcHeight));
+ int s, ls, src_y_stride, src_u_stride, src_v_stride, dst_y_stride, dst_u_stride, dst_v_stride;
+ int src_w, src_h, dst_w, dst_h;
+ uint8 *dst_y, *dst_u, *dst_v, *src_y, *src_u, *src_v;
+
+ RotationMode rotation = kRotate0;
+
+ switch (_self->rotation){
+ case 90: rotation = kRotate90; break;
+ case 180: rotation = kRotate180; break;
+ case 270: rotation = kRotate270; break;
+ }
+ //rotation = kRotate0;
+
+ // not square and rotaion=270/90 -> requires scaling unless disabled
+ if ((rotation == kRotate90 || rotation == kRotate270) && _self->scale_rotated_frames){
+ scale |= (_self->dstWidth != _self->dstHeight) && (rotation == kRotate90 || rotation == kRotate270);
+ }
+
+ src_w = (int)_self->srcWidth, src_h = (int)_self->srcHeight;
+
+ if (self->toI420) {
+ tsk_size_t x_in_size;
+ // check input size
+ x_in_size = _tdav_converter_video_libyuv_is_chroma_varsize(_self->srcChroma) ? buffer_size : _tdav_converter_video_libyuv_get_size(_self->srcChroma, src_w, src_h);
+ if (x_in_size > buffer_size) { // Ignore any extra data. For example, "CVPixelBufferGetDataSize()" will return size padded with 8 extra bytes for RGB32.
+ TSK_DEBUG_ERROR("Invalid input size: %u>%u", (unsigned)x_in_size, (unsigned)buffer_size);
+ return 0;
+ }
+
+ dst_w = src_w, dst_h = src_h; // because no scaling when converting to I420
+ ls = src_w * src_h;
+ s = ((ls * 3) >> 1);
+ if (scale || rotation != kRotate0){
+ RESIZE_BUFFER(self->chroma.ptr, self->chroma.size, s);
+ dst_y = self->chroma.ptr;
+ }
+ else{
+ RESIZE_BUFFER((*output), (*output_max_size), s);
+ dst_y = (uint8*)*output;
+ }
+ dst_u = (dst_y + ls);
+ dst_v = dst_u + (ls >> 2);
+ src_y_stride = dst_y_stride = src_w;
+ src_u_stride = src_v_stride = dst_u_stride = dst_v_stride = ((dst_y_stride + 1) >> 1);
+
+ // convert to I420 without scaling or rotation
+ ret = ConvertToI420(
+ (const uint8*)buffer, (int)x_in_size,
+ dst_y, dst_y_stride,
+ dst_u, dst_u_stride,
+ dst_v, dst_v_stride,
+ crop_x, crop_y,
+ (int)_self->srcWidth, (int)(_self->flip ? (_self->srcHeight * -1) : _self->srcHeight), // vertical flip
+ (int)_self->srcWidth, (int)_self->srcHeight,
+ kRotate0,
+ (uint32)self->srcFormat);
+ // mirror: horizontal flip (front camera video)
+ if (_self->mirror) {
+ RESIZE_BUFFER(self->mirror.ptr, self->mirror.size, s);
+ ret = I420Mirror(
+ dst_y, dst_y_stride,
+ dst_u, dst_u_stride,
+ dst_v, dst_v_stride,
+ self->mirror.ptr, dst_y_stride,
+ (self->mirror.ptr + ls), dst_u_stride,
+ (self->mirror.ptr + ls + (ls >> 2)), dst_v_stride,
+ (int)_self->srcWidth, (int)_self->srcHeight);
+ memcpy(dst_y, self->mirror.ptr, s);
+ }
+
+ if (ret){
+ TSK_DEBUG_ERROR("ConvertToI420 failed with error code = %d, in_size:%u", ret, x_in_size);
+ return 0;
+ }
+
+ // rotate
+ if (rotation != kRotate0){
+ dst_w = (int)((rotation == kRotate90 || rotation == kRotate270) ? _self->srcHeight : _self->srcWidth);
+ dst_h = (int)((rotation == kRotate90 || rotation == kRotate270) ? _self->srcWidth : _self->srcHeight);
+
+ src_y = dst_y, src_u = dst_u, src_v = dst_v;
+ src_y_stride = src_y_stride, src_u_stride = src_u_stride, src_v_stride = src_v_stride;
+ dst_y_stride = dst_w;
+ dst_u_stride = dst_v_stride = ((dst_y_stride + 1) >> 1);
+
+ if (scale){
+ RESIZE_BUFFER(self->rotate.ptr, self->rotate.size, s);
+ dst_y = self->rotate.ptr;
+ }
+ else{// last step
+ RESIZE_BUFFER((*output), (*output_max_size), s);
+ dst_y = (uint8*)*output;
+ }
+
+ dst_u = (dst_y + ls);
+ dst_v = dst_u + (ls >> 2);
+ ret = I420Rotate(
+ src_y, src_y_stride,
+ src_u, src_u_stride,
+ src_v, src_v_stride,
+ dst_y, dst_y_stride,
+ dst_u, dst_u_stride,
+ dst_v, dst_v_stride,
+ (int)_self->srcWidth, (int)_self->srcHeight, rotation);
+ if (ret){
+ TSK_DEBUG_ERROR("I420Rotate failed with error code = %d", ret);
+ return 0;
+ }
+
+ // scale to fit ratio, pad, crop then copy
+ if ((rotation == kRotate90 || rotation == kRotate270) && _self->scale_rotated_frames){
+ int iwidth = (int)_self->srcHeight;
+ int iheight = (int)_self->srcWidth;
+
+ src_y = dst_y, src_u = dst_u, src_v = dst_v;
+ src_w = dst_w, src_h = dst_h;
+ src_y_stride = dst_y_stride, src_u_stride = dst_u_stride, src_v_stride = dst_v_stride;
+
+ if (_self->dstWidth != _self->dstHeight) {
+ if (iwidth * _self->srcHeight > iheight * _self->srcWidth) {
+ iwidth = (int)((iheight * _self->srcWidth / _self->srcHeight) & ~1);
+ int iwidth_offset = (int)((_self->srcHeight - iwidth) >> 1);
+ src_y += iwidth_offset;
+ src_u += iwidth_offset >> 1;
+ src_v += iwidth_offset >> 1;
+ }
+ else if (iwidth * _self->srcHeight < iheight * _self->srcWidth) {
+ iheight = (int)(iwidth * _self->srcHeight / _self->srcWidth);
+ int iheight_offset = (int)((_self->srcWidth - iheight) >> 2);
+ iheight_offset <<= 1;
+ src_y += iheight_offset * src_y_stride;
+ src_u += (iheight_offset >> 1) * src_u_stride;
+ src_v += (iheight_offset >> 1) * src_v_stride;
+ }
+
+ src_w = iwidth, src_h = iheight;
+ src_y_stride = src_w;
+ src_u_stride = src_v_stride = ((src_y_stride + 1) >> 1);
+
+ dst_w = (int)_self->dstWidth;
+ dst_h = (int)_self->dstHeight;
+ ls = dst_w * dst_h;
+ s = ((ls * 3) >> 1);
+ RESIZE_BUFFER((*output), (*output_max_size), s);
+ dst_y_stride = dst_w;
+ dst_u_stride = dst_v_stride = ((dst_y_stride + 1) >> 1);
+ uint8* dst_y = (uint8*)*output;
+ uint8* dst_u = (dst_y + ls);
+ uint8* dst_v = dst_u + (ls >> 2);
+
+ ret = I420Scale(
+ src_y, src_y_stride,
+ src_u, src_u_stride,
+ src_v, src_v_stride,
+ src_w, src_h,
+ dst_y, dst_y_stride,
+ dst_u, dst_u_stride,
+ dst_v, dst_v_stride,
+ dst_w, dst_h,
+ kFilterBox);
+ if (ret){
+ TSK_DEBUG_ERROR("I420Scale failed with error code = %d", ret);
+ return 0;
+ }
+ return s;
+ }
+ }
+ }
+
+ // scale
+ if (scale){
+ src_w = dst_w, src_h = dst_h;
+ dst_w = (int)(((rotation == kRotate90 || rotation == kRotate270) && !_self->scale_rotated_frames) ? _self->dstHeight : _self->dstWidth);
+ dst_h = (int)(((rotation == kRotate90 || rotation == kRotate270) && !_self->scale_rotated_frames) ? _self->dstWidth : _self->dstHeight);
+ src_y = dst_y, src_u = dst_u, src_v = dst_v;
+ src_y_stride = dst_y_stride, src_u_stride = dst_u_stride, src_v_stride = dst_v_stride;
+ dst_y_stride = dst_w;
+ dst_u_stride = dst_v_stride = ((dst_y_stride + 1) >> 1);
+
+ ls = dst_w * dst_h;
+ s = ((ls * 3) >> 1);
+ RESIZE_BUFFER((*output), (*output_max_size), s);
+ dst_y = (uint8*)*output;
+ dst_u = (dst_y + ls);
+ dst_v = dst_u + (ls >> 2);
+
+ ret = I420Scale(
+ src_y, src_y_stride,
+ src_u, src_u_stride,
+ src_v, src_v_stride,
+ src_w, src_h,
+ dst_y, dst_y_stride,
+ dst_u, dst_u_stride,
+ dst_v, dst_v_stride,
+ dst_w, dst_h,
+ kFilterNone);
+ if (ret){
+ TSK_DEBUG_ERROR("I420Scale failed with error code = %d", ret);
+ return 0;
+ }
+ }
+
+ return ((dst_w * dst_h * 3) >> 1);
+ }
+ else if (self->fromI420){
+ static const int dst_sample_stride = 0;
+
+ dst_w = (int)_self->dstWidth, dst_h = (int)_self->dstHeight;
+ src_y = (uint8*)buffer;
+ src_u = (src_y + (src_w * src_h));
+ src_v = (src_u + ((src_w * src_h) >> 2));
+ src_y_stride = src_w;
+ src_u_stride = src_v_stride = ((src_y_stride + 1) >> 1);
+
+ // mirror: horizontal flip (front camera video)
+ if ((_self->mirror)) {
+ ls = src_w * src_h;
+ s = ((ls * 3) >> 1);
+ if (s < (int)buffer_size) { // security check
+ RESIZE_BUFFER(self->mirror.ptr, self->mirror.size, s);
+ ret = I420Mirror(
+ src_y, src_y_stride,
+ src_u, src_u_stride,
+ src_v, src_v_stride,
+ self->mirror.ptr, src_y_stride,
+ (self->mirror.ptr + ls), src_u_stride,
+ (self->mirror.ptr + ls + (ls >> 2)), src_v_stride,
+ src_w, src_h);
+ memcpy(src_y, self->mirror.ptr, s);
+ }
+ }
+
+ if (scale){
+ ls = dst_w * dst_h;
+ s = ((ls * 3) >> 1);
+
+ RESIZE_BUFFER(self->scale.ptr, self->scale.size, s);
+ dst_y = self->scale.ptr;
+ dst_u = (dst_y + (dst_w * dst_h));
+ dst_v = (dst_u + ((dst_w * dst_h) >> 2));
+ dst_y_stride = dst_w;
+ dst_u_stride = dst_v_stride = ((dst_y_stride + 1) >> 1);
+
+ ret = I420Scale(
+ src_y, src_y_stride,
+ src_u, src_u_stride,
+ src_v, src_v_stride,
+ src_w, src_h,
+ dst_y, dst_y_stride,
+ dst_u, dst_u_stride,
+ dst_v, dst_v_stride,
+ dst_w, dst_h,
+ kFilterNone);
+
+ if (ret){
+ TSK_DEBUG_ERROR("I420Scale failed with error code = %d", ret);
+ return 0;
+ }
+
+ src_y = dst_y;
+ src_u = (dst_y + ls);
+ src_v = (dst_u + (ls >> 2));
+ src_y_stride = dst_y_stride;
+ src_u_stride = src_v_stride = ((src_y_stride + 1) >> 1);
+ }
+
+ s = (int)_tdav_converter_video_libyuv_get_size(_self->dstChroma, _self->srcWidth, _self->srcHeight);
+ RESIZE_BUFFER((*output), (*output_max_size), s);
+
+ ret = ConvertFromI420(
+ src_y, src_y_stride,
+ src_u, src_u_stride,
+ src_v, src_v_stride,
+ (uint8*)*output, dst_sample_stride,
+ (int)_self->dstWidth, (_self->flip ? ((int)_self->dstHeight * -1) : (int)_self->dstHeight), // vertical flip
+ (uint32)self->dstFormat);
+ if (ret){
+ TSK_DEBUG_ERROR("ConvertFromI420 failed with error code = %d", ret);
+ return 0;
+ }
+
+ return s;
+ }
+
+ // Must be from/to I420
+ TSK_DEBUG_ERROR("Not expected code called");
+ return 0;
+}
+
+static tsk_object_t* tdav_converter_video_libyuv_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_converter_video_libyuv_t *converter = (tdav_converter_video_libyuv_t *)self;
+ if (converter){
+
+ }
+ return self;
+}
+static tsk_object_t* tdav_converter_video_libyuv_dtor(tsk_object_t * self)
+{
+ tdav_converter_video_libyuv_t *converter = (tdav_converter_video_libyuv_t *)self;
+ if (converter){
+ TSK_FREE(converter->chroma.ptr);
+ TSK_FREE(converter->rotate.ptr);
+ TSK_FREE(converter->scale.ptr);
+ TSK_FREE(converter->mirror.ptr);
+ }
+
+ return self;
+}
+static const tsk_object_def_t tdav_converter_video_libyuv_def_s =
+{
+ sizeof(tdav_converter_video_libyuv_t),
+ tdav_converter_video_libyuv_ctor,
+ tdav_converter_video_libyuv_dtor,
+ tsk_null,
+};
+const tsk_object_def_t *tdav_converter_video_libyuv_def_t = &tdav_converter_video_libyuv_def_s;
+static const tmedia_converter_video_plugin_def_t tdav_converter_video_libyuv_plugin_def_s =
+{
+ &tdav_converter_video_libyuv_def_s,
+
+ tdav_converter_video_libyuv_init,
+ tdav_converter_video_libyuv_process
+};
+const tmedia_converter_video_plugin_def_t *tdav_converter_video_libyuv_plugin_def_t = &tdav_converter_video_libyuv_plugin_def_s;
+
+#endif /* HAVE_LIBYUV */
+
+#if HAVE_FFMPEG || HAVE_SWSSCALE
+
+#ifndef INT64_C
+# define INT64_C(c) (c ## LL)
+# define UINT64_C(c) (c ## ULL)
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+#include <libswscale/swscale.h>
+#include <libavcodec/avcodec.h>
+#ifdef __cplusplus
+}
+#endif
+
+typedef struct tdav_converter_video_ffmpeg_s
+{
+ TMEDIA_DECLARE_CONVERTER_VIDEO;
+
+ struct SwsContext *context;
+
+ enum PixelFormat srcFormat;
+ enum PixelFormat dstFormat;
+
+ AVFrame* srcFrame;
+ AVFrame* dstFrame;
+
+ struct {
+ struct SwsContext *context;
+ AVFrame* frame;
+ uint8_t* buffer;
+ tsk_size_t buffer_size;
+ } rot;
+}
+tdav_converter_video_ffmpeg_t;
+
+#define TDAV_CONVERTER_VIDEO_FFMPEG(self) ((tdav_converter_video_ffmpeg_t*)(self))
+
+// use macro for performance reasons keep (called (15x3) times per seconds)
+#define _tdav_converter_video_ffmpeg_rotate90(srcw, srch, srcdata, dstdata) \
+{ \
+ register int i,j; \
+ register int newx = 0; \
+ for (i = 0; i < (int)(srcw); i ++ ){ \
+ for( j = (int)srch-1; j >=0; j -- ){ \
+ (dstdata)[newx++] = (srcdata)[j * (srcw) + i]; \
+ } \
+ } \
+}
+
+#define _tdav_converter_video_ffmpeg_flip(frame, height) \
+ frame->data[0] += frame->linesize[0] * (height -1); \
+ frame->data[1] += frame->linesize[1] * ((height -1)>>1); \
+ frame->data[2] += frame->linesize[2] * ((height -1)>>1); \
+ \
+ frame->linesize[0] *= -1; \
+ frame->linesize[1] *= -1; \
+ frame->linesize[2] *= -1;
+
+
+static inline enum PixelFormat _tdav_converter_video_ffmpeg_get_pixfmt(tmedia_chroma_t chroma)
+{
+ switch(chroma){
+ case tmedia_chroma_rgb24:
+ return PIX_FMT_RGB24;
+ case tmedia_chroma_bgr24:
+ return PIX_FMT_BGR24;
+ case tmedia_chroma_rgb32:
+ return PIX_FMT_RGB32;
+ case tmedia_chroma_rgb565le:
+ return PIX_FMT_RGB565LE;
+ case tmedia_chroma_rgb565be:
+ return PIX_FMT_RGB565BE;
+ case tmedia_chroma_nv21:
+ return PIX_FMT_NV21;
+ case tmedia_chroma_nv12:
+ return PIX_FMT_NV12;
+ case tmedia_chroma_yuv422p:
+ return PIX_FMT_YUV422P;
+ case tmedia_chroma_uyvy422:
+ return PIX_FMT_UYVY422;
+ case tmedia_chroma_yuyv422:
+ return PIX_FMT_YUYV422;
+ case tmedia_chroma_yuv420p:
+ return PIX_FMT_YUV420P;
+ default:
+ TSK_DEBUG_ERROR("Invalid chroma %d", (int)chroma);
+ return PIX_FMT_NONE;
+ }
+}
+
+
+static int tdav_converter_video_ffmpeg_init(tmedia_converter_video_t* self, tsk_size_t srcWidth, tsk_size_t srcHeight, tmedia_chroma_t srcChroma, tsk_size_t dstWidth, tsk_size_t dstHeight, tmedia_chroma_t dstChroma)
+{
+ TSK_DEBUG_INFO("Initializing new FFmpeg Video Converter src=(%dx%d@%d) dst=(%dx%d@%d)", srcWidth, srcHeight, srcChroma, dstWidth, dstHeight, dstChroma);
+
+ if((TDAV_CONVERTER_VIDEO_FFMPEG(self)->srcFormat = _tdav_converter_video_ffmpeg_get_pixfmt(srcChroma)) == PIX_FMT_NONE){
+ TSK_DEBUG_ERROR("Invalid source chroma");
+ return -2;
+ }
+ if((TDAV_CONVERTER_VIDEO_FFMPEG(self)->dstFormat = _tdav_converter_video_ffmpeg_get_pixfmt(dstChroma)) == PIX_FMT_NONE){
+ TSK_DEBUG_ERROR("Invalid destination chroma");
+ return -3;
+ }
+
+ return 0;
+}
+
+static tsk_size_t tdav_converter_video_ffmpeg_process(tmedia_converter_video_t* _self, const void* buffer, tsk_size_t buffer_size, void** output, tsk_size_t* output_max_size)
+{
+ int ret, size;
+ tsk_bool_t _rotate = tsk_false;
+ tdav_converter_video_ffmpeg_t* self = TDAV_CONVERTER_VIDEO_FFMPEG(_self);
+
+ if (!self || !buffer || !output){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* Pictures */
+ if (!self->srcFrame){
+ if (!(self->srcFrame = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create picture");
+ return 0;
+ }
+ }
+ if (!self->dstFrame){
+ if (!(self->dstFrame = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create picture");
+ return 0;
+ }
+ }
+
+ size = avpicture_get_size(self->dstFormat, (int)_self->dstWidth, (int)_self->dstHeight);
+ if ((int)*output_max_size < size){
+ if (!(*output = tsk_realloc(*output, (size + FF_INPUT_BUFFER_PADDING_SIZE)))){
+ *output_max_size = 0;
+ TSK_DEBUG_ERROR("Failed to allocate buffer");
+ return 0;
+ }
+ *output_max_size = size;
+ }
+
+ /* Wrap the source buffer */
+ ret = avpicture_fill((AVPicture *)self->srcFrame, (uint8_t*)buffer, self->srcFormat, (int)_self->srcWidth, (int)_self->srcHeight);
+ /* Wrap the destination buffer */
+ ret = avpicture_fill((AVPicture *)self->dstFrame, (uint8_t*)*output, self->dstFormat, (int)_self->dstWidth, (int)_self->dstHeight);
+
+ /* === performs conversion === */
+ /* Context */
+ if (!self->context) {
+ self->context = sws_getContext(
+ (int)_self->srcWidth, (int)_self->srcHeight, self->srcFormat,
+ (int)_self->dstWidth, (int)_self->dstHeight, self->dstFormat,
+ SWS_FAST_BILINEAR, NULL, NULL, NULL);
+
+ if (!self->context) {
+ TSK_DEBUG_ERROR("Failed to create context");
+ return 0;
+ }
+ }
+
+ /*FIXME: For now only 90\B0 rotation is supported this is why we always use libyuv on mobile devices */
+ _rotate = (PIX_FMT_YUV420P == self->dstFormat) && _self->rotation == 90;
+
+ // if no rotation then, flip while scaling othersize do it after rotation
+ if (!_rotate && _self->flip) {
+ _tdav_converter_video_ffmpeg_flip(self->dstFrame, _self->dstHeight);
+ }
+
+ // chroma conversion and scaling
+ ret = sws_scale(self->context, (const uint8_t* const*)self->srcFrame->data, self->srcFrame->linesize, 0, (int)_self->srcHeight,
+ self->dstFrame->data, self->dstFrame->linesize);
+ if (ret < 0){
+ TSK_FREE(*output);
+ return 0;
+ }
+
+ // Rotation
+ if (_rotate){
+ // because we rotated 90 width = original height, height = original width
+ int w = (int)_self->dstHeight;
+ int h = (int)_self->dstWidth;
+
+ // allocation rotation frame if not already done
+ if (!(self->rot.frame) && !(self->rot.frame = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("failed to allocate rotation frame");
+ TSK_FREE(*output);
+ return(0);
+ }
+
+ // allocate rotation temporary buffer
+ size = avpicture_get_size(self->dstFormat, w, h);
+ if (self->rot.buffer_size != size){
+ if (!(self->rot.buffer = (uint8_t *)av_realloc(self->rot.buffer, size))){
+ TSK_DEBUG_ERROR("failed to allocate new buffer for the frame");
+ self->rot.buffer_size = 0;
+ return(0);
+ }
+ self->rot.buffer_size = size;
+ }
+
+ //wrap
+ avpicture_fill((AVPicture *)self->rot.frame, self->rot.buffer, self->dstFormat, w, h);
+ // rotate
+ _tdav_converter_video_ffmpeg_rotate90(_self->dstWidth, _self->dstHeight, self->dstFrame->data[0], self->rot.frame->data[0]);
+ _tdav_converter_video_ffmpeg_rotate90((_self->dstWidth >> 1), (_self->dstHeight >> 1), self->dstFrame->data[1], self->rot.frame->data[1]);
+ _tdav_converter_video_ffmpeg_rotate90((_self->dstWidth >> 1), (_self->dstHeight >> 1), self->dstFrame->data[2], self->rot.frame->data[2]);
+ // flip
+ if (_self->flip){
+ _tdav_converter_video_ffmpeg_flip(self->rot.frame, h);
+ }
+
+ {
+ static const int y_shift = 1;
+ static const int x_shift = 1;
+ int r_size, r_w, r_h, left_band, top_band;
+ int pad = ((int)_self->dstWidth - w) > ((int)_self->dstHeight - h) ? ((int)_self->dstWidth - w) : ((int)_self->dstHeight - h);
+ if (pad < 0){
+ pad = 0;
+ }
+ r_size;
+ r_w = w + pad;
+ r_h = h + pad;
+ left_band = (int)((r_w - _self->dstWidth) / 2);
+ top_band = (int)((r_h - _self->dstHeight) / 3);
+
+ if (!self->rot.context){
+ if (!(self->rot.context = sws_getContext(w, h, self->dstFormat, r_w, r_h, self->dstFormat, SWS_FAST_BILINEAR, NULL, NULL, NULL))){
+ TSK_DEBUG_ERROR("Failed to create context");
+ TSK_FREE(*output);
+ return 0;
+ }
+ }
+
+ r_size = avpicture_get_size(self->dstFormat, r_w, r_h);
+ if ((int)*output_max_size < r_size){
+ if (!(*output = tsk_realloc(*output, (r_size + FF_INPUT_BUFFER_PADDING_SIZE)))){
+ *output_max_size = 0;
+ TSK_DEBUG_ERROR("Failed to allocate buffer");
+ return 0;
+ }
+ *output_max_size = r_size;
+ }
+
+ // re-wrap
+ avpicture_fill((AVPicture *)self->dstFrame, (uint8_t*)*output, self->dstFormat, r_w, r_h);
+
+ // pad
+ sws_scale(self->rot.context, (const uint8_t* const*)self->rot.frame->data, self->rot.frame->linesize,
+ 0, h, self->dstFrame->data, self->dstFrame->linesize);
+
+ // crop
+ self->dstFrame->data[0] = self->dstFrame->data[0] + (top_band * self->dstFrame->linesize[0]) + left_band;
+ self->dstFrame->data[1] = self->dstFrame->data[1] + ((top_band >> y_shift) * self->dstFrame->linesize[1]) + (left_band >> x_shift);
+ self->dstFrame->data[2] = self->dstFrame->data[2] + ((top_band >> y_shift) * self->dstFrame->linesize[2]) + (left_band >> x_shift);
+
+ avpicture_layout((const AVPicture*)self->dstFrame, self->dstFormat, (int)_self->dstWidth, (int)_self->dstHeight, (unsigned char *)*output, (int)*output_max_size);
+ }
+
+ }//end of rotation
+
+ return size;
+}
+
+
+
+//=================================================================================================
+// Video Converter object definition
+//
+static tsk_object_t* tdav_converter_video_ffmpeg_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_converter_video_ffmpeg_t *converter = (tdav_converter_video_ffmpeg_t *)self;
+ if(converter){
+
+ }
+ return self;
+}
+
+static tsk_object_t* tdav_converter_video_ffmpeg_dtor(tsk_object_t * self)
+{
+ tdav_converter_video_ffmpeg_t *converter = (tdav_converter_video_ffmpeg_t *)self;
+ if(converter){
+ if(converter->context){
+ sws_freeContext(converter->context);
+ }
+ if(converter->srcFrame){
+ av_free(converter->srcFrame);
+ }
+ if(converter->dstFrame){
+ av_free(converter->dstFrame);
+ }
+
+ // Rotation
+ if(converter->rot.context){
+ sws_freeContext(converter->rot.context);
+ }
+ if(converter->rot.frame){
+ av_free(converter->rot.frame);
+ }
+ if(converter->rot.buffer){
+ av_free(converter->rot.buffer);
+ }
+ }
+
+ return self;
+}
+
+static const tsk_object_def_t tdav_converter_video_ffmpeg_def_s =
+{
+ sizeof(tdav_converter_video_ffmpeg_t),
+ tdav_converter_video_ffmpeg_ctor,
+ tdav_converter_video_ffmpeg_dtor,
+ tsk_null,
+};
+const tsk_object_def_t *tdav_converter_video_ffmpeg_def_t = &tdav_converter_video_ffmpeg_def_s;
+
+/* plugin definition*/
+static const tmedia_converter_video_plugin_def_t tdav_converter_video_ffmpeg_plugin_def_s =
+{
+ &tdav_converter_video_ffmpeg_def_s,
+
+ tdav_converter_video_ffmpeg_init,
+ tdav_converter_video_ffmpeg_process
+};
+const tmedia_converter_video_plugin_def_t *tdav_converter_video_ffmpeg_plugin_def_t = &tdav_converter_video_ffmpeg_plugin_def_s;
+
+#endif /* HAVE_FFMPEG || HAVE_SWSSCALE */
+
diff --git a/tinyDAV/src/video/tdav_runnable_video.c b/tinyDAV/src/video/tdav_runnable_video.c
new file mode 100644
index 0000000..c8102ea
--- /dev/null
+++ b/tinyDAV/src/video/tdav_runnable_video.c
@@ -0,0 +1,95 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+/**@file tdav_runnable_video.c
+ * @brief Video runnable used by codecs.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/video/tdav_runnable_video.h"
+
+#include "tsk_buffer.h"
+#include "tsk_debug.h"
+
+tdav_runnable_video_t* tdav_runnable_video_create(tsk_runnable_func_run run_f, const void* userdata)
+{
+ tdav_runnable_video_t* runnable;
+
+ if((runnable = tsk_object_new(tdav_runnable_video_def_t))){
+ TSK_RUNNABLE(runnable)->run = run_f;
+ runnable->userdata = userdata;
+ }
+ return runnable;
+}
+
+int tdav_runnable_video_start(tdav_runnable_video_t* self)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return tsk_runnable_start(TSK_RUNNABLE(self), tsk_buffer_def_t);
+}
+
+int tdav_runnable_video_stop(tdav_runnable_video_t* self)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return tsk_runnable_stop(TSK_RUNNABLE(self));
+}
+
+
+//=================================================================================================
+// Video Runable object definition
+//
+static tsk_object_t* tdav_runnable_video_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_runnable_video_t *runnable = self;
+ if(runnable){
+
+ }
+ return self;
+}
+
+static tsk_object_t* tdav_runnable_video_dtor(tsk_object_t * self)
+{
+ tdav_runnable_video_t *runnable = self;
+ if(runnable){
+ tsk_runnable_stop(TSK_RUNNABLE(runnable));
+ }
+
+ return self;
+}
+
+static const tsk_object_def_t tdav_runnable_video_def_s =
+{
+ sizeof(tdav_runnable_video_t),
+ tdav_runnable_video_ctor,
+ tdav_runnable_video_dtor,
+ tsk_null,
+};
+const tsk_object_def_t *tdav_runnable_video_def_t = &tdav_runnable_video_def_s;
diff --git a/tinyDAV/src/video/tdav_session_video.c b/tinyDAV/src/video/tdav_session_video.c
new file mode 100644
index 0000000..4ee6812
--- /dev/null
+++ b/tinyDAV/src/video/tdav_session_video.c
@@ -0,0 +1,1649 @@
+/*
+ * Copyright (C) 2010-2014 Mamadou DIOP.
+ * Copyright (C) 2011-2014 Doubango Telecom.
+ *
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+
+/**@file tdav_session_video.c
+ * @brief Video Session plugin.
+ *
+ */
+#include "tinydav/video/tdav_session_video.h"
+#include "tinydav/video/tdav_converter_video.h"
+#include "tinydav/video/jb/tdav_video_jb.h"
+#include "tinydav/codecs/fec/tdav_codec_red.h"
+#include "tinydav/codecs/fec/tdav_codec_ulpfec.h"
+
+#include "tinymedia/tmedia_converter_video.h"
+#include "tinymedia/tmedia_consumer.h"
+#include "tinymedia/tmedia_producer.h"
+#include "tinymedia/tmedia_defaults.h"
+#include "tinymedia/tmedia_params.h"
+
+#include "tinyrtp/trtp_manager.h"
+#include "tinyrtp/rtcp/trtp_rtcp_header.h"
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+#include "tinyrtp/rtcp/trtp_rtcp_packet.h"
+#include "tinyrtp/rtcp/trtp_rtcp_report_rr.h"
+#include "tinyrtp/rtcp/trtp_rtcp_report_sr.h"
+#include "tinyrtp/rtcp/trtp_rtcp_report_fb.h"
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+// Minimum time between two incoming FIR. If smaller, the request from the remote party will be ignored
+// Tell the encoder to send IDR frame if condition is met
+#if METROPOLIS
+# define TDAV_SESSION_VIDEO_AVPF_FIR_HONOR_INTERVAL_MIN 0 // millis
+#else
+# define TDAV_SESSION_VIDEO_AVPF_FIR_HONOR_INTERVAL_MIN 750 // millis
+#endif
+// Minimum time between two outgoing FIR. If smaller, the request from the remote party will be ignored
+// Tell the RTCP session to request IDR if condition is met
+#if METROPOLIS
+# define TDAV_SESSION_VIDEO_AVPF_FIR_REQUEST_INTERVAL_MIN 0 // millis
+#else
+# define TDAV_SESSION_VIDEO_AVPF_FIR_REQUEST_INTERVAL_MIN 1500 // millis
+#endif
+
+#define TDAV_SESSION_VIDEO_PKT_LOSS_PROB_BAD 2
+#define TDAV_SESSION_VIDEO_PKT_LOSS_PROB_GOOD 6
+#define TDAV_SESSION_VIDEO_PKT_LOSS_FACT_MIN 0
+#define TDAV_SESSION_VIDEO_PKT_LOSS_FACT_MAX 8
+#define TDAV_SESSION_VIDEO_PKT_LOSS_LOW 9
+#define TDAV_SESSION_VIDEO_PKT_LOSS_MEDIUM 22
+#define TDAV_SESSION_VIDEO_PKT_LOSS_HIGH 63
+
+#if !defined(TDAV_SESSION_VIDEO_PKT_LOSS_NO_REPORT_BEFORE_INCREASING_BW)
+# define TDAV_SESSION_VIDEO_PKT_LOSS_NO_REPORT_BEFORE_INCREASING_BW 5000 // millis
+#endif
+
+// The maximum number of pakcet loss allowed
+#define TDAV_SESSION_VIDEO_PKT_LOSS_MAX_COUNT_TO_REQUEST_FIR 50
+
+#if !defined (TDAV_GOOG_REMB_FULL_SUPPORT)
+# define TDAV_GOOG_REMB_FULL_SUPPORT 0
+#endif
+
+static const tmedia_codec_action_t __action_encode_idr = tmedia_codec_action_encode_idr;
+static const tmedia_codec_action_t __action_encode_bw_up = tmedia_codec_action_bw_up;
+static const tmedia_codec_action_t __action_encode_bw_down = tmedia_codec_action_bw_down;
+
+// FIXME: lock ?
+#define _tdav_session_video_codec_set(__self, __key, __value) \
+{ \
+static tmedia_param_t* __param = tsk_null; \
+if(!__param){ \
+__param = tmedia_param_create(tmedia_pat_set, \
+tmedia_video, \
+tmedia_ppt_codec, \
+tmedia_pvt_int32, \
+__key, \
+(void*)&__value); \
+} \
+if((__self)->encoder.codec && __param){ \
+/*tsk_mutex_lock((__self)->encoder.h_mutex);*/ \
+if(TDAV_SESSION_AV(__self)->producer && TDAV_SESSION_AV(__self)->producer->encoder.codec_id == (__self)->encoder.codec->id) { /* Whether the producer ourput encoded frames */ \
+tmedia_producer_set(TDAV_SESSION_AV(__self)->producer, __param); \
+} \
+else { \
+tmedia_codec_set((tmedia_codec_t*)(__self)->encoder.codec, __param); \
+} \
+/*tsk_mutex_unlock((__self)->encoder.h_mutex);*/ \
+} \
+/* TSK_OBJECT_SAFE_FREE(param); */ \
+}
+
+#define _tdav_session_video_remote_requested_idr(__self, __ssrc_media) { \
+uint64_t __now = tsk_time_now(); \
+tsk_bool_t too_close = tsk_false; \
+if((__now - (__self)->avpf.last_fir_time) > TDAV_SESSION_VIDEO_AVPF_FIR_HONOR_INTERVAL_MIN){ /* guard to avoid sending too many FIR */ \
+_tdav_session_video_codec_set((__self), "action", __action_encode_idr); \
+}else { too_close = tsk_true; TSK_DEBUG_INFO("***IDR request tooo close(%llu ms)...ignoring****", (__now - (__self)->avpf.last_fir_time)); } \
+if((__self)->cb_rtcpevent.func){ \
+(__self)->cb_rtcpevent.func((__self)->cb_rtcpevent.context, tmedia_rtcp_event_type_fir, (__ssrc_media)); \
+} \
+if (!too_close) { /* if too close don't update "last_fir_time" to "now" to be sure interval will increase */ \
+(__self)->avpf.last_fir_time = __now; \
+} \
+}
+#define _tdav_session_video_local_request_idr(_session, _reason, _ssrc) \
+{ \
+tdav_session_av_t* _base = (tdav_session_av_t*)_session; \
+if ((_base)->avpf_mode_neg || (_base)->is_fb_fir_neg) { \
+/*return*/ trtp_manager_signal_frame_corrupted((_base)->rtp_manager, _ssrc); \
+} \
+else if ((_session)->rfc5168_cb.fun) { \
+/*return*/ (_session)->rfc5168_cb.fun((_session)->rfc5168_cb.usrdata, (_session), (_reason), tmedia_session_rfc5168_cmd_picture_fast_update); \
+} \
+}
+#define _tdav_session_video_bw_up(__self) _tdav_session_video_codec_set(__self, "action", __action_encode_bw_up)
+#define _tdav_session_video_bw_down(__self) _tdav_session_video_codec_set(__self, "action", __action_encode_bw_down)
+#define _tdav_session_video_bw_kbps(__self, __bw_kbps) _tdav_session_video_codec_set(__self, "bw_kbps", __bw_kbps)
+
+
+#define _tdav_session_video_reset_loss_prob(__self) \
+{ \
+(__self)->encoder.pkt_loss_level = tdav_session_video_pkt_loss_level_low; \
+(__self)->encoder.pkt_loss_prob_bad = TDAV_SESSION_VIDEO_PKT_LOSS_PROB_BAD; \
+(__self)->encoder.pkt_loss_prob_good = TDAV_SESSION_VIDEO_PKT_LOSS_PROB_GOOD; \
+}
+
+static int _tdav_session_video_set_defaults(tdav_session_video_t* self);
+static int _tdav_session_video_jb_cb(const tdav_video_jb_cb_data_xt* data);
+static int _tdav_session_video_open_decoder(tdav_session_video_t* self, uint8_t payload_type);
+static int _tdav_session_video_decode(tdav_session_video_t* self, const trtp_rtp_packet_t* packet);
+static int _tdav_session_video_set_callbacks(tmedia_session_t* self);
+
+// Codec callback (From codec to the network)
+// or Producer callback to sendRaw() data "as is"
+static int tdav_session_video_raw_cb(const tmedia_video_encode_result_xt* result)
+{
+ tdav_session_av_t* base = (tdav_session_av_t*)result->usr_data;
+ tdav_session_video_t* video = (tdav_session_video_t*)result->usr_data;
+ trtp_rtp_header_t* rtp_header = (trtp_rtp_header_t*)result->proto_hdr;
+ trtp_rtp_packet_t* packet = tsk_null;
+ int ret = 0;
+ tsk_size_t s;
+
+ if(base->rtp_manager && base->rtp_manager->is_started){
+ if(rtp_header){
+ // uses negotiated SSRC (SDP)
+ rtp_header->ssrc = base->rtp_manager->rtp.ssrc.local;
+ // uses negotiated payload type
+ if(base->pt_map.local != base->rtp_manager->rtp.payload_type || base->pt_map.remote != rtp_header->payload_type || base->pt_map.neg == -1){
+ if(rtp_header->codec_id == tmedia_codec_id_none){
+ TSK_DEBUG_WARN("Internal codec id is equal to none");
+ }
+ else{
+ const tsk_list_item_t* item;
+ tsk_bool_t found = tsk_false;
+ tsk_list_lock(TMEDIA_SESSION(base)->neg_codecs);
+ tsk_list_foreach(item, TMEDIA_SESSION(base)->neg_codecs){
+ if((item->data) && ((const tmedia_codec_t*)item->data)->id == rtp_header->codec_id){
+ base->pt_map.local = base->rtp_manager->rtp.payload_type;
+ base->pt_map.remote = rtp_header->payload_type;
+ base->pt_map.neg = atoi(((const tmedia_codec_t*)item->data)->neg_format);
+ found = tsk_true;
+ break;
+ }
+ }
+ tsk_list_unlock(TMEDIA_SESSION(base)->neg_codecs);
+ if(found){
+ TSK_DEBUG_INFO("Codec PT mapping: local=%d, remote=%d, neg=%d", base->pt_map.local, base->pt_map.remote, base->pt_map.neg);
+ }
+ else{
+ TSK_DEBUG_ERROR("Failed to map codec PT: local=%d, remote=%d", base->rtp_manager->rtp.payload_type, rtp_header->payload_type);
+ }
+ }
+ }
+ rtp_header->payload_type = base->pt_map.neg;
+ }
+ packet = rtp_header
+ ? trtp_rtp_packet_create_2(rtp_header)
+ : trtp_rtp_packet_create(base->rtp_manager->rtp.ssrc.local, base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, base->rtp_manager->rtp.payload_type, result->last_chunck);
+
+ if(packet ){
+ tsk_size_t rtp_hdr_size;
+ if(result->last_chunck){
+#if 1
+#if 1
+ /* http://www.cs.columbia.edu/~hgs/rtp/faq.html#timestamp-computed
+ For video, time clock rate is fixed at 90 kHz. The timestamps generated depend on whether the application can determine the frame number or not.
+ If it can or it can be sure that it is transmitting every frame with a fixed frame rate, the timestamp is governed by the nominal frame rate. Thus, for a 30 f/s video, timestamps would increase by 3,000 for each frame, for a 25 f/s video by 3,600 for each frame.
+ If a frame is transmitted as several RTP packets, these packets would all bear the same timestamp.
+ If the frame number cannot be determined or if frames are sampled aperiodically, as is typically the case for software codecs, the timestamp has to be computed from the system clock (e.g., gettimeofday())
+ */
+
+ if(!video->encoder.last_frame_time){
+ // For the first frame it's not possible to compute the duration as there is no previous one.
+ // In this case, we trust the duration from the result (computed based on the codec fps and rate).
+ video->encoder.last_frame_time = tsk_time_now();
+ base->rtp_manager->rtp.timestamp += result->duration;
+ }
+ else{
+ uint64_t now = tsk_time_now();
+ uint32_t duration = (uint32_t)(now - video->encoder.last_frame_time);
+ base->rtp_manager->rtp.timestamp += (duration * 90/* 90KHz */);
+ video->encoder.last_frame_time = now;
+ }
+#else
+ base->rtp_manager->rtp.timestamp = (uint32_t)(tsk_gettimeofday_ms() * 90/* 90KHz */);
+#endif
+#else
+ base->rtp_manager->rtp.timestamp += result->duration;
+#endif
+
+ }
+
+ packet->payload.data_const = result->buffer.ptr;
+ packet->payload.size = result->buffer.size;
+ s = trtp_manager_send_rtp_packet(base->rtp_manager, packet, tsk_false); // encrypt and send data
+ ++base->rtp_manager->rtp.seq_num; // seq_num must be incremented here (before the bail) because already used by SRTP context
+ if(s < TRTP_RTP_HEADER_MIN_SIZE) {
+ // without audio session iOS "audio" background mode is useless and UDP sockets will be closed: e.g. GE's video-only sessions
+#if TDAV_UNDER_IPHONE
+ if (tnet_geterrno() == TNET_ERROR_BROKENPIPE) {
+ TSK_DEBUG_INFO("iOS UDP pipe is broken (restoration is progress): failed to send packet with seqnum=%u. %u expected but only %u sent", (unsigned)packet->header->seq_num, (unsigned)packet->payload.size, (unsigned)s);
+ }
+#endif /* TDAV_UNDER_IPHONE */
+ TSK_DEBUG_ERROR("Failed to send packet with seqnum=%u. %u expected but only %u sent", (unsigned)packet->header->seq_num, (unsigned)packet->payload.size, (unsigned)s);
+ // save data expected to be sent in order to honor RTCP-NACK requests
+ s = base->rtp_manager->rtp.serial_buffer.index;
+ }
+
+ rtp_hdr_size = TRTP_RTP_HEADER_MIN_SIZE + (packet->header->csrc_count << 2);
+ // Save packet
+ if (base->avpf_mode_neg && (s > TRTP_RTP_HEADER_MIN_SIZE)) {
+ trtp_rtp_packet_t* packet_avpf = tsk_object_ref(packet);
+ // when SRTP is used, "serial_buffer" will contains the encoded buffer with both RTP header and payload
+ // Hack the RTP packet payload to point to the the SRTP data instead of unencrypted ptr
+ packet_avpf->payload.size = (s - rtp_hdr_size);
+ packet_avpf->payload.data_const = tsk_null;
+ if(!(packet_avpf->payload.data = tsk_malloc(packet_avpf->payload.size))){// FIXME: to be optimized (reuse memory address)
+ TSK_DEBUG_ERROR("failed to allocate buffer");
+ goto bail;
+ }
+ memcpy(packet_avpf->payload.data, (((const uint8_t*)base->rtp_manager->rtp.serial_buffer.ptr) + rtp_hdr_size), packet_avpf->payload.size);
+ tsk_list_lock(video->avpf.packets);
+ if(video->avpf.count > video->avpf.max){
+ tsk_list_remove_first_item(video->avpf.packets);
+ }
+ else{
+ ++video->avpf.count;
+ }
+
+ // The packet must not added 'ascending' but 'back' because the sequence number coult wrap
+ // For example:
+ // - send(65533, 65534, 65535, 0, 1)
+ // - will be stored as (if added 'ascending'): 0, 1, 65533, 65534, 65535
+ // - this means there is no benefit (if added 'ascending') as we cannot make 'smart search' using seqnums
+ // tsk_list_push_ascending_data(video->avpf.packets, (void**)&packet_avpf); // filtered per seqnum
+ tsk_list_push_back_data(video->avpf.packets, (void**)&packet_avpf);
+ tsk_list_unlock(video->avpf.packets);
+ }
+
+ // Send FEC packet
+ // FIXME: protect only Intra and Params packets
+ if(base->ulpfec.codec && (s > TRTP_RTP_HEADER_MIN_SIZE)){
+ packet->payload.data_const = (((const uint8_t*)base->rtp_manager->rtp.serial_buffer.ptr) + rtp_hdr_size);
+ packet->payload.size = (s - rtp_hdr_size);
+ ret = tdav_codec_ulpfec_enc_protect((struct tdav_codec_ulpfec_s*)base->ulpfec.codec, packet);
+ if(result->last_chunck){
+ trtp_rtp_packet_t* packet_fec;
+ if((packet_fec = trtp_rtp_packet_create(base->rtp_manager->rtp.ssrc.local, base->ulpfec.seq_num++, base->ulpfec.timestamp, base->ulpfec.payload_type, tsk_true))){
+ // serialize the FEC payload packet packet
+ s = tdav_codec_ulpfec_enc_serialize((const struct tdav_codec_ulpfec_s*)base->ulpfec.codec, &video->encoder.buffer, &video->encoder.buffer_size);
+ if(s > 0){
+ packet_fec->payload.data_const = video->encoder.buffer;
+ packet_fec->payload.size = s;
+ s = trtp_manager_send_rtp_packet(base->rtp_manager, packet_fec, tsk_true/* already encrypted */);
+ }
+ TSK_OBJECT_SAFE_FREE(packet_fec);
+ }
+ base->ulpfec.timestamp += result->duration;
+ ret = tdav_codec_ulpfec_enc_reset((struct tdav_codec_ulpfec_s*)base->ulpfec.codec);
+ }
+ }
+#if 0
+ // Send RED Packet
+ if (ret == 0 && video->red.codec) {
+ // don't need to lock as the buffer is never used by other codecs
+ tsk_size_t red_pay_size = video->red.codec->plugin->encode(
+ video->red.codec,
+ buffer, size,
+ &video->encoder.buffer, &video->encoder.buffer_size
+ );
+ if(red_pay_size > 1){
+ packet->header->payload_type = video->red.payload_type;
+ ((uint8_t*)video->encoder.buffer)[0] = packet->header->payload_type;
+ packet->payload.data_const = video->encoder.buffer;
+ packet->payload.size = red_pay_size;
+ trtp_manager_send_rtp_2(base->rtp_manager, packet);
+ }
+ }
+#endif
+ }
+ else {
+ TSK_DEBUG_ERROR("Failed to create packet");
+ }
+ }
+ else{
+ //--TSK_DEBUG_WARN("Session not ready yet");
+ }
+
+bail:
+ TSK_OBJECT_SAFE_FREE(packet);
+ return ret;
+}
+
+// Codec Callback after decoding
+static int tdav_session_video_decode_cb(const tmedia_video_decode_result_xt* result)
+{
+ tdav_session_av_t* base = (tdav_session_av_t*)result->usr_data;
+ tdav_session_video_t* video = (tdav_session_video_t*)base;
+
+ switch(result->type){
+ case tmedia_video_decode_result_type_idr:
+ {
+ if(video->decoder.last_corrupted_timestamp != ((const trtp_rtp_header_t*)result->proto_hdr)->timestamp){
+ TSK_DEBUG_INFO("IDR frame decoded");
+ video->decoder.stream_corrupted = tsk_false;
+ }
+ else{
+ TSK_DEBUG_INFO("IDR frame decoded but corrupted :(");
+ }
+ break;
+ }
+ case tmedia_video_decode_result_type_error:
+ {
+ TSK_DEBUG_INFO("Decoding failed -> request Full Intra Refresh (FIR)");
+ _tdav_session_video_local_request_idr(TMEDIA_SESSION(video), "DECODED_FAILED", ((const trtp_rtp_header_t*)result->proto_hdr)->ssrc);
+ break;
+ }
+ default: break;
+ }
+ return 0;
+}
+
+// Producer callback (From the producer to the network) => encode data before send()
+static int tdav_session_video_producer_enc_cb(const void* callback_data, const void* buffer, tsk_size_t size)
+{
+ tdav_session_video_t* video = (tdav_session_video_t*)callback_data;
+ tdav_session_av_t* base = (tdav_session_av_t*)callback_data;
+ tsk_size_t yuv420p_size = 0;
+ int ret = 0;
+
+ if(!base){
+ TSK_DEBUG_ERROR("Null session");
+ return 0;
+ }
+
+ // do nothing if session is held
+ // when the session is held the end user will get feedback he also has possibilities to put the consumer and producer on pause
+ if (TMEDIA_SESSION(base)->lo_held) {
+ return 0;
+ }
+
+ // do nothing if not started yet
+ if (!video->started) {
+ TSK_DEBUG_INFO("Video session not started yet");
+ return 0;
+ }
+
+ // get best negotiated codec if not already done
+ // the encoder codec could be null when session is renegotiated without re-starting (e.g. hold/resume)
+ if (!video->encoder.codec) {
+ const tmedia_codec_t* codec;
+ tsk_safeobj_lock(base);
+ if (!(codec = tdav_session_av_get_best_neg_codec(base))) {
+ TSK_DEBUG_ERROR("No codec matched");
+ tsk_safeobj_unlock(base);
+ return -2;
+ }
+ video->encoder.codec = tsk_object_ref(TSK_OBJECT(codec));
+ tsk_safeobj_unlock(base);
+ }
+
+ if (base->rtp_manager) {
+ //static int __rotation_counter = 0;
+ /* encode */
+ tsk_size_t out_size = 0;
+ tmedia_codec_t* codec_encoder = tsk_null;
+
+ if (!base->rtp_manager->is_started) {
+ TSK_DEBUG_ERROR("Not started");
+ goto bail;
+ }
+
+ // take a reference to the encoder to make sure it'll not be destroyed while we're using it
+ codec_encoder = tsk_object_ref(video->encoder.codec);
+ if (!codec_encoder) {
+ TSK_DEBUG_ERROR("The encoder is null");
+ goto bail;
+ }
+
+#define PRODUCER_OUTPUT_FIXSIZE (base->producer->video.chroma != tmedia_chroma_mjpeg) // whether the output data has a fixed size/length
+#define PRODUCER_OUTPUT_RAW (base->producer->encoder.codec_id == tmedia_codec_id_none) // Otherwise, frames from the producer are already encoded
+#define PRODUCER_SIZE_CHANGED ((video->conv.producerWidth && video->conv.producerWidth != base->producer->video.width) || (video->conv.producerHeight && video->conv.producerHeight != base->producer->video.height) \
+|| (video->conv.xProducerSize && (video->conv.xProducerSize != size && PRODUCER_OUTPUT_FIXSIZE)))
+#define ENCODED_NEED_FLIP (TMEDIA_CODEC_VIDEO(codec_encoder)->out.flip)
+#define ENCODED_NEED_RESIZE (base->producer->video.width != TMEDIA_CODEC_VIDEO(codec_encoder)->out.width || base->producer->video.height != TMEDIA_CODEC_VIDEO(codec_encoder)->out.height)
+#define PRODUCED_FRAME_NEED_ROTATION (base->producer->video.rotation != 0)
+#define PRODUCED_FRAME_NEED_MIRROR (base->producer->video.mirror != tsk_false)
+#define PRODUCED_FRAME_NEED_CHROMA_CONVERSION (base->producer->video.chroma != TMEDIA_CODEC_VIDEO(codec_encoder)->out.chroma)
+ // Video codecs only accept YUV420P buffers ==> do conversion if needed or producer doesn't have the right size
+ if (PRODUCER_OUTPUT_RAW && (PRODUCED_FRAME_NEED_CHROMA_CONVERSION || PRODUCER_SIZE_CHANGED || ENCODED_NEED_FLIP || ENCODED_NEED_RESIZE ||PRODUCED_FRAME_NEED_ROTATION || PRODUCED_FRAME_NEED_MIRROR)) {
+ // Create video converter if not already done or producer size have changed
+ if(!video->conv.toYUV420 || PRODUCER_SIZE_CHANGED){
+ TSK_OBJECT_SAFE_FREE(video->conv.toYUV420);
+ video->conv.producerWidth = base->producer->video.width;
+ video->conv.producerHeight = base->producer->video.height;
+ video->conv.xProducerSize = size;
+
+ TSK_DEBUG_INFO("producer size = (%d, %d)", (int)base->producer->video.width, (int)base->producer->video.height);
+ if (!(video->conv.toYUV420 = tmedia_converter_video_create(base->producer->video.width, base->producer->video.height, base->producer->video.chroma, TMEDIA_CODEC_VIDEO(codec_encoder)->out.width, TMEDIA_CODEC_VIDEO(codec_encoder)->out.height,
+ TMEDIA_CODEC_VIDEO(codec_encoder)->out.chroma))){
+ TSK_DEBUG_ERROR("Failed to create video converter");
+ ret = -5;
+ goto bail;
+ }
+ // restore/set rotation scaling info because producer size could change
+ tmedia_converter_video_set_scale_rotated_frames(video->conv.toYUV420, video->encoder.scale_rotated_frames);
+ }
+ }
+
+ if(video->conv.toYUV420){
+ video->encoder.scale_rotated_frames = video->conv.toYUV420->scale_rotated_frames;
+ // check if rotation have changed and alert the codec
+ // we avoid scalling the frame after rotation because it's CPU intensive and keeping the image ratio is difficult
+ // it's up to the encoder to swap (w,h) and to track the rotation value
+ if(video->encoder.rotation != base->producer->video.rotation){
+ tmedia_param_t* param = tmedia_param_create(tmedia_pat_set,
+ tmedia_video,
+ tmedia_ppt_codec,
+ tmedia_pvt_int32,
+ "rotation",
+ (void*)&base->producer->video.rotation);
+ if (!param) {
+ TSK_DEBUG_ERROR("Failed to create a media parameter");
+ return -1;
+ }
+ video->encoder.rotation = base->producer->video.rotation; // update rotation to avoid calling the function several times
+ ret = tmedia_codec_set(codec_encoder, param);
+ TSK_OBJECT_SAFE_FREE(param);
+ // (ret != 0) -> not supported by the codec -> to be done by the converter
+ video->encoder.scale_rotated_frames = (ret != 0);
+ }
+
+ // update one-shot parameters
+ tmedia_converter_video_set(video->conv.toYUV420, base->producer->video.rotation, TMEDIA_CODEC_VIDEO(codec_encoder)->out.flip, base->producer->video.mirror, video->encoder.scale_rotated_frames);
+
+ yuv420p_size = tmedia_converter_video_process(video->conv.toYUV420, buffer, size, &video->encoder.conv_buffer, &video->encoder.conv_buffer_size);
+ if (!yuv420p_size || !video->encoder.conv_buffer) {
+ TSK_DEBUG_ERROR("Failed to convert XXX buffer to YUV42P");
+ ret = -6;
+ goto bail;
+ }
+ }
+
+ // Encode data
+ tsk_mutex_lock(video->encoder.h_mutex);
+ if (video->started && codec_encoder->opened) { // stop() function locks the encoder mutex before changing "started"
+ if (video->encoder.conv_buffer && yuv420p_size) {
+ /* producer doesn't support yuv42p */
+ out_size = codec_encoder->plugin->encode(codec_encoder, video->encoder.conv_buffer, yuv420p_size, &video->encoder.buffer, &video->encoder.buffer_size);
+ }
+ else {
+ /* producer supports yuv42p */
+ out_size = codec_encoder->plugin->encode(codec_encoder, buffer, size, &video->encoder.buffer, &video->encoder.buffer_size);
+ }
+ }
+ tsk_mutex_unlock(video->encoder.h_mutex);
+
+ if (out_size) {
+ /* Never called, see tdav_session_video_raw_cb() */
+ trtp_manager_send_rtp(base->rtp_manager, video->encoder.buffer, out_size, 6006, tsk_true, tsk_true);
+ }
+ bail:
+ TSK_OBJECT_SAFE_FREE(codec_encoder);
+ }
+ else {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ ret = -1;
+ }
+
+ return ret;
+}
+
+// RTP callback (Network -> Decoder -> Consumer)
+static int tdav_session_video_rtp_cb(const void* callback_data, const trtp_rtp_packet_t* packet)
+{
+ tdav_session_video_t* video = (tdav_session_video_t*)callback_data;
+ tdav_session_av_t* base = (tdav_session_av_t*)callback_data;
+
+ if(!video || !packet || !packet->header){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(packet->header->payload_type == base->red.payload_type){
+ static void* __red_buffer_ptr = tsk_null; // Never used
+ static tsk_size_t __red_buffer_size = 0; // Never used
+ if(!base->red.codec){
+ TSK_DEBUG_ERROR("No RED codec could be found");
+ return -2;
+ }
+ // Decode RED data
+ base->red.codec->plugin->decode(
+ base->red.codec,
+ (packet->payload.data ? packet->payload.data : packet->payload.data_const), packet->payload.size,
+ &__red_buffer_ptr, &__red_buffer_size,
+ packet->header
+ );
+ return 0;
+ }
+ else if(packet->header->payload_type == base->ulpfec.payload_type){
+ if(!base->ulpfec.codec){
+ TSK_DEBUG_ERROR("No ULPFEC codec could be found");
+ return -2;
+ }
+ // FIXME: do something
+ return 0;
+ }
+ else{
+ return video->jb
+ ? tdav_video_jb_put(video->jb, (trtp_rtp_packet_t*)packet)
+ : _tdav_session_video_decode(video, packet);
+ }
+}
+
+// RTCP callback (Network -> This)
+static int tdav_session_video_rtcp_cb(const void* callback_data, const trtp_rtcp_packet_t* packet)
+{
+ int ret = 0;
+ const trtp_rtcp_report_psfb_t* psfb;
+ const trtp_rtcp_report_rtpfb_t* rtpfb;
+ const trtp_rtcp_rblocks_L_t* blocks = tsk_null;
+
+ tdav_session_video_t* video = (tdav_session_video_t*)callback_data;
+ tdav_session_av_t* base = (tdav_session_av_t*)callback_data;
+ tsk_size_t i;
+
+ if((blocks = (packet->header->type == trtp_rtcp_packet_type_rr) ? ((const trtp_rtcp_report_rr_t*)packet)->blocks :
+ (packet->header->type == trtp_rtcp_packet_type_sr ? ((const trtp_rtcp_report_sr_t*)packet)->blocks : tsk_null))){
+ const tsk_list_item_t* item;
+ const trtp_rtcp_rblock_t* block;
+ tsk_list_foreach(item, blocks){
+ if(!(block = item->data)) continue;
+ if(base->rtp_manager->rtp.ssrc.local == block->ssrc){
+ tdav_session_video_pkt_loss_level_t pkt_loss_level = tdav_session_video_pkt_loss_level_low;
+ TSK_DEBUG_INFO("RTCP pkt loss fraction=%d", block->fraction);
+ if(block->fraction > TDAV_SESSION_VIDEO_PKT_LOSS_HIGH) pkt_loss_level = tdav_session_video_pkt_loss_level_high;
+ else if(block->fraction > TDAV_SESSION_VIDEO_PKT_LOSS_MEDIUM) pkt_loss_level = tdav_session_video_pkt_loss_level_medium;
+ if (pkt_loss_level == tdav_session_video_pkt_loss_level_high || (pkt_loss_level > video->encoder.pkt_loss_level)){ // high or low -> medium
+ video->encoder.pkt_loss_level = pkt_loss_level;
+ if(video->encoder.pkt_loss_prob_bad-- <= 0){
+ int32_t new_pkt_loss_fact = TSK_CLAMP(TDAV_SESSION_VIDEO_PKT_LOSS_FACT_MIN, (video->encoder.pkt_loss_fact + 1), TDAV_SESSION_VIDEO_PKT_LOSS_FACT_MAX);
+ if (video->encoder.pkt_loss_fact != new_pkt_loss_fact) {
+ TSK_DEBUG_INFO("Downgrade bandwidth %d->%d", video->encoder.pkt_loss_fact, new_pkt_loss_fact);
+ video->encoder.pkt_loss_fact = new_pkt_loss_fact;
+ _tdav_session_video_bw_down(video);
+ }
+ _tdav_session_video_reset_loss_prob(video);
+ }
+ }
+ else{
+ if (video->encoder.pkt_loss_prob_good-- <= 0) {
+ int32_t new_pkt_loss_fact = TSK_CLAMP(TDAV_SESSION_VIDEO_PKT_LOSS_FACT_MIN, (video->encoder.pkt_loss_fact - 1), TDAV_SESSION_VIDEO_PKT_LOSS_FACT_MAX);
+ if (video->encoder.pkt_loss_fact != new_pkt_loss_fact) {
+ TSK_DEBUG_INFO("Upgrade bandwidth %d->%d", video->encoder.pkt_loss_fact, new_pkt_loss_fact);
+ video->encoder.pkt_loss_fact = new_pkt_loss_fact;
+ _tdav_session_video_bw_up(video);
+ }
+ _tdav_session_video_reset_loss_prob(video);
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ i = 0;
+ while((psfb = (const trtp_rtcp_report_psfb_t*)trtp_rtcp_packet_get_at(packet, trtp_rtcp_packet_type_psfb, i++))){
+ switch(psfb->fci_type){
+ case trtp_rtcp_psfb_fci_type_fir:
+ {
+ TSK_DEBUG_INFO("Receiving RTCP-FIR (%u)", ((const trtp_rtcp_report_fb_t*)psfb)->ssrc_media);
+ _tdav_session_video_remote_requested_idr(video, ((const trtp_rtcp_report_fb_t*)psfb)->ssrc_media);
+ break;
+ }
+ case trtp_rtcp_psfb_fci_type_pli:
+ {
+ uint64_t now;
+ TSK_DEBUG_INFO("Receiving RTCP-PLI (%u)", ((const trtp_rtcp_report_fb_t*)psfb)->ssrc_media);
+ now = tsk_time_now();
+ // more than one PLI in 500ms ?
+ // "if" removed because PLI really means codec prediction chain is broken
+ /*if((now - video->avpf.last_pli_time) < 500)*/{
+ _tdav_session_video_remote_requested_idr(video, ((const trtp_rtcp_report_fb_t*)psfb)->ssrc_media);
+ }
+ video->avpf.last_pli_time = now;
+ break;
+ }
+ case trtp_rtcp_psfb_fci_type_afb:
+ {
+ if (psfb->afb.type == trtp_rtcp_psfb_afb_type_remb) {
+ uint32_t bandwidth_up_reported_kpbs = ((psfb->afb.remb.mantissa << psfb->afb.remb.exp) / 1024);
+ TSK_DEBUG_INFO("Receiving RTCP-AFB-REMB (%u), exp=%u, mantissa=%u, bandwidth = %ukbps, congestion_ctrl_enabled=%s", ((const trtp_rtcp_report_fb_t*)psfb)->ssrc_media, psfb->afb.remb.exp, psfb->afb.remb.mantissa, bandwidth_up_reported_kpbs, base->congestion_ctrl_enabled ? "yes" : "no");
+#if TDAV_GOOG_REMB_FULL_SUPPORT
+ if (base->congestion_ctrl_enabled) {
+ uint32_t remb_upload_kbps = 0;
+ tsk_bool_t remb_ok = tsk_false;
+ uint64_t bytes_count_now;
+ uint64_t bytes_count_out;
+ static uint64_t* bytes_count_in_ptr_null = tsk_null;
+
+ if ((ret = trtp_manager_get_bytes_count(base->rtp_manager, bytes_count_in_ptr_null, &bytes_count_out)) == 0) {
+ uint64_t duration;
+ bytes_count_now = tsk_time_now();
+ duration = (bytes_count_now - base->bytes_out.count_last_time);
+ remb_ok = (base->bytes_out.count_last_time != 0 && duration > 0);
+ if (remb_ok) {
+ remb_upload_kbps = (int32_t)((((bytes_count_out - base->bytes_out.count) * 8 * 1000) / 1024) / duration);
+ TSK_DEBUG_INFO("remb_upload_kbps=%u, bandwidth_up_reported_kpbs=%u", remb_upload_kbps, bandwidth_up_reported_kpbs);
+ }
+ base->bytes_out.count_last_time = bytes_count_now;
+ base->bytes_out.count = bytes_count_out;
+ }
+ if (remb_ok) {
+ int32_t pkt_loss_percent = bandwidth_up_reported_kpbs >= remb_upload_kbps ? 0 : ((remb_upload_kbps - bandwidth_up_reported_kpbs) / remb_upload_kbps) * 100;
+ TSK_DEBUG_INFO("GOO-REMB: pkt_loss_percent=%d", pkt_loss_percent);
+ if (pkt_loss_percent > 5) {
+ // more than 5% pkt loss
+ TSK_DEBUG_WARN("pkt_loss_percent(%u) > 5%%, using lower bw(%d)", pkt_loss_percent, bandwidth_up_reported_kpbs);
+ _tdav_session_video_bw_kbps(video, bandwidth_up_reported_kpbs);
+ }
+ else if (pkt_loss_percent == 0) {
+#if 0
+ // no pkt loss --> increase bw
+ int32_t target_bw_max_upload_kbps = base->bandwidth_max_upload_kbps; // user-defined (guard), INT_MAX if not defined
+ if (video->encoder.codec) {
+ target_bw_max_upload_kbps = TSK_MIN(
+ tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(video->encoder.codec)->out.width, TMEDIA_CODEC_VIDEO(video->encoder.codec)->out.height, TMEDIA_CODEC_VIDEO(video->encoder.codec)->out.fps),
+ target_bw_max_upload_kbps);
+ }
+ if (target_bw_max_upload_kbps > remb_upload_kbps + ((remb_upload_kbps / 100) * 20)) {
+ // target (best) bw is 20% less than what we're sending --> increase by 5%
+ uint32_t new_upload_kbps = remb_upload_kbps + ((remb_upload_kbps / 100) * 5);
+ TSK_DEBUG_INFO("current upload bw is too low, increasing from %u to %u", remb_upload_kbps, new_upload_kbps);
+ _tdav_session_video_bw_kbps(video, new_upload_kbps);
+ }
+#endif /* 0 */
+ }
+ }
+
+ }
+#else
+ // for now we just don't respect the requested bandwidth
+#endif /* TDAV_GOOG_REMB_FULL_SUPPORT */
+ }
+ break;
+ }
+ default: break;
+ }
+ }
+ i = 0;
+ while((rtpfb = (const trtp_rtcp_report_rtpfb_t*)trtp_rtcp_packet_get_at(packet, trtp_rtcp_packet_type_rtpfb, i++))){
+ switch(rtpfb->fci_type){
+ default: break;
+ case trtp_rtcp_rtpfb_fci_type_nack:
+ {
+ if(rtpfb->nack.blp && rtpfb->nack.pid){
+ tsk_size_t i;
+ int32_t j;
+ uint16_t pid, blp;
+ const tsk_list_item_t* item;
+ const trtp_rtp_packet_t* pkt_rtp;
+ for(i = 0; i < rtpfb->nack.count; ++i){
+ static const int32_t __Pow2[16] = { 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000 };
+ int32_t blp_count;
+ blp = rtpfb->nack.blp[i];
+ blp_count = blp ? 16 : 0;
+
+ for(j = -1/*Packet ID (PID)*/; j < blp_count; ++j){
+ if(j == -1 || (blp & __Pow2[j])){
+ pid = (rtpfb->nack.pid[i] + (j + 1));
+ tsk_list_lock(video->avpf.packets);
+ tsk_list_foreach(item, video->avpf.packets){
+ if(!(pkt_rtp = item->data)){
+ continue;
+ }
+
+ // Very Important: the seq_nums are not consecutive because of wrapping.
+ // For example, '65533, 65534, 65535, 0, 1' is a valid sequences which means we have to check all packets (probaly need somthing smarter)
+ if(pkt_rtp->header->seq_num == pid){
+ TSK_DEBUG_INFO("NACK Found, pid=%d, blp=%u", pid, blp);
+ trtp_manager_send_rtp_packet(base->rtp_manager, pkt_rtp, tsk_true);
+ break;
+ }
+ if(item == video->avpf.packets->tail){
+ // should never be called unless the tail is too small
+ int32_t old_max = (int32_t)video->avpf.max;
+ int32_t len_drop = (pkt_rtp->header->seq_num - pid);
+ video->avpf.max = TSK_CLAMP((int32_t)tmedia_defaults_get_avpf_tail_min(), (old_max + len_drop), (int32_t)tmedia_defaults_get_avpf_tail_max());
+ TSK_DEBUG_INFO("**NACK requesting dropped frames. List=[%d-%d], requested=%d, List.Max=%d, List.Count=%d. RTT is probably too high.",
+ ((const trtp_rtp_packet_t*)TSK_LIST_FIRST_DATA(video->avpf.packets))->header->seq_num,
+ ((const trtp_rtp_packet_t*)TSK_LIST_LAST_DATA(video->avpf.packets))->header->seq_num,
+ pid,
+ (int)video->avpf.max,
+ (int)video->avpf.count);
+ // FIR not really requested but needed
+ /*_tdav_session_video_remote_requested_idr(video, ((const trtp_rtcp_report_fb_t*)rtpfb)->ssrc_media);
+ tsk_list_clear_items(video->avpf.packets);
+ video->avpf.count = 0;*/
+ } // if(last_item)
+ }// foreach(pkt)
+ tsk_list_unlock(video->avpf.packets);
+ }// if(BLP is set)
+ }// foreach(BIT in BLP)
+ }// foreach(nack)
+ }// if(nack-blp and nack-pid are set)
+ break;
+ }// case
+ }// switch
+ }// while(rtcp-pkt)
+
+ return ret;
+}
+
+static int _tdav_session_video_set_defaults(tdav_session_video_t* self)
+{
+ if (!self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ self->jb_enabled = tmedia_defaults_get_videojb_enabled();
+ self->zero_artifacts = tmedia_defaults_get_video_zeroartifacts_enabled();
+ self->avpf.max = tmedia_defaults_get_avpf_tail_min();
+ self->encoder.pkt_loss_level = tdav_session_video_pkt_loss_level_low;
+ self->encoder.pkt_loss_prob_bad = 0; // honor first report
+ self->encoder.pkt_loss_prob_good = TDAV_SESSION_VIDEO_PKT_LOSS_PROB_GOOD;
+ self->encoder.last_frame_time = 0;
+
+ // reset rotation info (MUST for reINVITE when mobile device in portrait[90 degrees])
+ self->encoder.rotation = 0;
+
+ TSK_DEBUG_INFO("Video 'zero-artifacts' option = %s", self->zero_artifacts ? "yes" : "no");
+
+ return 0;
+}
+
+// From jitter buffer to codec
+static int _tdav_session_video_jb_cb(const tdav_video_jb_cb_data_xt* data)
+{
+ tdav_session_video_t* video = (tdav_session_video_t*)data->usr_data;
+ tdav_session_av_t* base = (tdav_session_av_t*)data->usr_data;
+ tmedia_session_t* session = (tmedia_session_t*)data->usr_data;
+
+ switch(data->type){
+ default: break;
+ case tdav_video_jb_cb_data_type_rtp:
+ {
+ return _tdav_session_video_decode(video, data->rtp.pkt);
+ }
+ case tdav_video_jb_cb_data_type_tmfr:
+ {
+ base->time_last_frame_loss_report = tsk_time_now();
+ _tdav_session_video_local_request_idr(session, "TMFR", data->ssrc);
+ }
+ case tdav_video_jb_cb_data_type_fl:
+ {
+ base->time_last_frame_loss_report = tsk_time_now();
+ if(data->fl.count > TDAV_SESSION_VIDEO_PKT_LOSS_MAX_COUNT_TO_REQUEST_FIR){
+ _tdav_session_video_local_request_idr(session, "TMFR", data->ssrc);
+ }
+ else {
+ if (base->avpf_mode_neg || base->is_fb_nack_neg) { // AVPF?
+ // Send RTCP-NACK
+ tsk_size_t i, j, k;
+ uint16_t seq_nums[16];
+ for(i = 0; i < data->fl.count; i+=16){
+ for(j = 0, k = i; j < 16 && k < data->fl.count; ++j, ++k){
+ seq_nums[j] = (uint16_t)(data->fl.seq_num + i + j);
+ TSK_DEBUG_INFO("Request re-send(%u)", seq_nums[j]);
+ }
+ trtp_manager_signal_pkt_loss(base->rtp_manager, data->ssrc, seq_nums, j);
+ }
+ }
+ }
+
+ break;
+ }
+ case tdav_video_jb_cb_data_type_fps_changed:
+ {
+ if(base->congestion_ctrl_enabled){
+ video->fps_changed = tsk_true;
+ if(video->decoder.codec){
+ TSK_DEBUG_INFO("Congestion control enabled and fps updated from %u to %u", data->fps.old, data->fps.new);
+ TMEDIA_CODEC_VIDEO(video->decoder.codec)->in.fps = data->fps.new;
+ }
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int _tdav_session_video_open_decoder(tdav_session_video_t* self, uint8_t payload_type)
+{
+ int ret = 0;
+
+ if ((self->decoder.codec_payload_type != payload_type) || !self->decoder.codec) {
+ tsk_istr_t format;
+ TSK_OBJECT_SAFE_FREE(self->decoder.codec);
+ tsk_itoa(payload_type, &format);
+ if (!(self->decoder.codec = tmedia_codec_find_by_format(TMEDIA_SESSION(self)->neg_codecs, format)) || !self->decoder.codec->plugin || !self->decoder.codec->plugin->decode) {
+ TSK_DEBUG_ERROR("%s is not a valid payload for this session", format);
+ ret = -2;
+ goto bail;
+ }
+ self->decoder.codec_payload_type = payload_type;
+ self->decoder.codec_decoded_frames_count = 0; // because we switched the codecs
+ }
+ // Open codec if not already done
+ if (!TMEDIA_CODEC(self->decoder.codec)->opened){
+ if ((ret = tmedia_codec_open(self->decoder.codec))) {
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", self->decoder.codec->plugin->desc);
+ goto bail;
+ }
+ self->decoder.codec_decoded_frames_count = 0; // because first time to use
+ }
+
+bail:
+ return ret;
+}
+
+static int _tdav_session_video_decode(tdav_session_video_t* self, const trtp_rtp_packet_t* packet)
+{
+ tdav_session_av_t* base = (tdav_session_av_t*)self;
+ static const trtp_rtp_header_t* __rtp_header = tsk_null;
+ static const tmedia_codec_id_t __codecs_supporting_zero_artifacts = (tmedia_codec_id_vp8 | tmedia_codec_id_h264_bp | tmedia_codec_id_h264_mp | tmedia_codec_id_h263);
+ int ret = 0;
+
+ if(!self || !packet || !packet->header){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(base);
+
+ if (self->started && base->consumer && base->consumer->is_started) {
+ tsk_size_t out_size, _size;
+ const void* _buffer;
+ tdav_session_video_t* video = (tdav_session_video_t*)base;
+
+ // Find the codec to use to decode the RTP payload
+ if(!self->decoder.codec || self->decoder.codec_payload_type != packet->header->payload_type){
+ if((ret = _tdav_session_video_open_decoder(self, packet->header->payload_type))){
+ goto bail;
+ }
+ }
+
+ // check whether bypassing is enabled (e.g. rtcweb breaker ON and media coder OFF)
+ if(TMEDIA_SESSION(self)->bypass_decoding){
+ // set codec id for internal use (useful to find codec with dynamic payload type)
+ TRTP_RTP_HEADER(packet->header)->codec_id = self->decoder.codec->id;
+ // consume the frame
+ ret = tmedia_consumer_consume(base->consumer, (packet->payload.data ? packet->payload.data : packet->payload.data_const), packet->payload.size, packet->header);
+ goto bail;
+ }
+
+ // Check if stream is corrupted or not
+ if(video->decoder.last_seqnum && (video->decoder.last_seqnum + 1) != packet->header->seq_num){
+ TSK_DEBUG_INFO("/!\\Video stream corrupted because of packet loss [%u - %u]. Pause rendering if 'zero_artifacts' (supported = %s, enabled = %s).",
+ video->decoder.last_seqnum,
+ packet->header->seq_num,
+ (__codecs_supporting_zero_artifacts & self->decoder.codec->id) ? "yes" : "no",
+ self->zero_artifacts ? "yes" : "no"
+ );
+ if(!video->decoder.stream_corrupted){ // do not do the job twice
+ if(self->zero_artifacts && (__codecs_supporting_zero_artifacts & self->decoder.codec->id)){
+ // request IDR now and every time after 'TDAV_SESSION_VIDEO_AVPF_FIR_REQUEST_INTERVAL_MIN' ellapsed
+ // 'zero-artifacts' not enabled then, we'll request IDR when decoding fails
+ TSK_DEBUG_INFO("Sending FIR to request IDR...");
+ _tdav_session_video_local_request_idr(TMEDIA_SESSION(video), "ZERO_ART_CORRUPTED", packet->header->ssrc);
+ }
+ // value will be updated when we decode an IDR frame
+ video->decoder.stream_corrupted = tsk_true;
+ video->decoder.stream_corrupted_since = tsk_time_now();
+ }
+ // will be used as guard to avoid redering corrupted IDR
+ video->decoder.last_corrupted_timestamp = packet->header->timestamp;
+ }
+ video->decoder.last_seqnum = packet->header->seq_num; // update last seqnum
+
+ // Decode data
+ out_size = self->decoder.codec->plugin->decode(
+ self->decoder.codec,
+ (packet->payload.data ? packet->payload.data : packet->payload.data_const), packet->payload.size,
+ &self->decoder.buffer, &self->decoder.buffer_size,
+ packet->header
+ );
+ // check
+ if(!out_size || !self->decoder.buffer){
+ goto bail;
+ }
+ // check if stream is corrupted
+ // the above decoding process is required in order to reset stream corruption status when IDR frame is decoded
+ if(self->zero_artifacts && self->decoder.stream_corrupted && (__codecs_supporting_zero_artifacts & self->decoder.codec->id)){
+ TSK_DEBUG_INFO("Do not render video frame because stream is corrupted and 'zero-artifacts' is enabled. Last seqnum=%u", video->decoder.last_seqnum);
+ if(video->decoder.stream_corrupted && (tsk_time_now() - video->decoder.stream_corrupted_since) > TDAV_SESSION_VIDEO_AVPF_FIR_REQUEST_INTERVAL_MIN){
+ TSK_DEBUG_INFO("Sending FIR to request IDR because frame corrupted since %llu...", video->decoder.stream_corrupted_since);
+ _tdav_session_video_local_request_idr(TMEDIA_SESSION(video), "ZERO_ART_CORRUPTED", packet->header->ssrc);
+ }
+ goto bail;
+ }
+
+ // important: do not override the display size (used by the end-user) unless requested
+ if(base->consumer->video.display.auto_resize){
+ base->consumer->video.display.width = TMEDIA_CODEC_VIDEO(self->decoder.codec)->in.width;//decoded width
+ base->consumer->video.display.height = TMEDIA_CODEC_VIDEO(self->decoder.codec)->in.height;//decoded height
+ }
+
+ // Convert decoded data to the consumer chroma and size
+#define CONSUMER_NEED_DECODER (base->consumer->decoder.codec_id == tmedia_codec_id_none) // Otherwise, the consumer requires encoded frames
+#define CONSUMER_IN_N_DISPLAY_MISMATCH (!base->consumer->video.display.auto_resize && (base->consumer->video.in.width != base->consumer->video.display.width || base->consumer->video.in.height != base->consumer->video.display.height))
+#define CONSUMER_DISPLAY_N_CODEC_MISMATCH (base->consumer->video.display.width != TMEDIA_CODEC_VIDEO(self->decoder.codec)->in.width || base->consumer->video.display.height != TMEDIA_CODEC_VIDEO(self->decoder.codec)->in.height)
+#define CONSUMER_DISPLAY_N_CONVERTER_MISMATCH ( (self->conv.fromYUV420 && self->conv.fromYUV420->dstWidth != base->consumer->video.display.width) || (self->conv.fromYUV420 && self->conv.fromYUV420->dstHeight != base->consumer->video.display.height) )
+#define CONSUMER_CHROMA_MISMATCH (base->consumer->video.display.chroma != TMEDIA_CODEC_VIDEO(self->decoder.codec)->in.chroma)
+#define DECODED_NEED_FLIP (TMEDIA_CODEC_VIDEO(self->decoder.codec)->in.flip)
+
+ if(CONSUMER_NEED_DECODER && (CONSUMER_CHROMA_MISMATCH || CONSUMER_DISPLAY_N_CODEC_MISMATCH || CONSUMER_IN_N_DISPLAY_MISMATCH || CONSUMER_DISPLAY_N_CONVERTER_MISMATCH || DECODED_NEED_FLIP)){
+
+ // Create video converter if not already done
+ if(!self->conv.fromYUV420 || CONSUMER_DISPLAY_N_CONVERTER_MISMATCH){
+ TSK_OBJECT_SAFE_FREE(self->conv.fromYUV420);
+
+ // create converter
+ if(!(self->conv.fromYUV420 = tmedia_converter_video_create(TMEDIA_CODEC_VIDEO(self->decoder.codec)->in.width, TMEDIA_CODEC_VIDEO(self->decoder.codec)->in.height, TMEDIA_CODEC_VIDEO(self->decoder.codec)->in.chroma, base->consumer->video.display.width, base->consumer->video.display.height,
+ base->consumer->video.display.chroma))){
+ TSK_DEBUG_ERROR("Failed to create video converter");
+ ret = -3;
+ goto bail;
+ }
+ }
+ }
+
+ // update consumer size using the codec decoded values
+ // must be done here to avoid fooling "CONSUMER_IN_N_DISPLAY_MISMATCH" unless "auto_resize" option is enabled
+ base->consumer->video.in.width = TMEDIA_CODEC_VIDEO(self->decoder.codec)->in.width;//decoded width
+ base->consumer->video.in.height = TMEDIA_CODEC_VIDEO(self->decoder.codec)->in.height;//decoded height
+
+ if(self->conv.fromYUV420){
+ // update one-shot parameters
+ tmedia_converter_video_set_flip(self->conv.fromYUV420, TMEDIA_CODEC_VIDEO(self->decoder.codec)->in.flip);
+ // convert data to the consumer's chroma
+ out_size = tmedia_converter_video_process(self->conv.fromYUV420, self->decoder.buffer, self->decoder.buffer_size, &self->decoder.conv_buffer, &self->decoder.conv_buffer_size);
+ if(!out_size || !self->decoder.conv_buffer){
+ TSK_DEBUG_ERROR("Failed to convert YUV420 buffer to consumer's chroma");
+ ret = -4;
+ goto bail;
+ }
+
+ _buffer = self->decoder.conv_buffer;
+ _size = out_size;
+ }
+ else{
+ _buffer = self->decoder.buffer;
+ _size = out_size;
+ }
+
+ // congetion control
+ // send RTCP-REMB if:
+ // - congestion control is enabled and
+ // - fps changed or
+ // - first frame or
+ // - approximately every 1 seconds (1 = 1 * 1)
+ if (base->congestion_ctrl_enabled && base->rtp_manager && (self->fps_changed || self->decoder.codec_decoded_frames_count == 0 || ((self->decoder.codec_decoded_frames_count % (TMEDIA_CODEC_VIDEO(self->decoder.codec)->in.fps * 1)) == 0))){
+ int32_t bandwidth_max_upload_kbps = base->bandwidth_max_upload_kbps;
+ int32_t bandwidth_max_download_kbps = base->bandwidth_max_download_kbps; // user-defined (guard), INT_MAX if not defined
+ // bandwidth already computed in start() but the decoded video size was not correct and based on the SDP negotiation
+ bandwidth_max_download_kbps = TSK_MIN(
+ tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self->decoder.codec)->in.width, TMEDIA_CODEC_VIDEO(self->decoder.codec)->in.height, TMEDIA_CODEC_VIDEO(self->decoder.codec)->in.fps),
+ bandwidth_max_download_kbps);
+ if (self->encoder.codec) {
+ bandwidth_max_upload_kbps = TSK_MIN(
+ tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self->encoder.codec)->out.width, TMEDIA_CODEC_VIDEO(self->encoder.codec)->out.height, TMEDIA_CODEC_VIDEO(self->encoder.codec)->out.fps),
+ bandwidth_max_upload_kbps);
+ }
+
+#if TDAV_GOOG_REMB_FULL_SUPPORT
+ {
+ tsk_bool_t remb_ok = tsk_false;
+ int32_t remb_download_kbps = 0;
+ uint64_t now = 0;
+ uint64_t bytes_count_in;
+ static uint64_t* bytes_count_out_ptr_null = tsk_null;
+ if ((ret = trtp_manager_get_bytes_count(base->rtp_manager, &bytes_count_in, bytes_count_out_ptr_null)) == 0) {
+ uint64_t duration;
+ now = tsk_time_now();
+ duration = (now - base->bytes_in.count_last_time);
+ remb_ok = (base->bytes_in.count_last_time != 0 && duration > 0);
+ if (remb_ok) {
+ remb_download_kbps = (int32_t)((((bytes_count_in - base->bytes_in.count) * 8 * 1000) / 1024) / duration);
+ TSK_DEBUG_INFO("remb_download_kbps=%d", remb_download_kbps);
+ }
+ base->bytes_in.count_last_time = now;
+ base->bytes_in.count = bytes_count_in;
+ }
+ if (remb_ok) {
+ // if "remb_ok" is true then "now" has a valid value
+ if ((now - base->time_last_frame_loss_report) > TDAV_SESSION_VIDEO_PKT_LOSS_NO_REPORT_BEFORE_INCREASING_BW) {
+ TSK_DEBUG_INFO("No pakt loss since %d millis ... adding 5%% to the estimated max bandwidth", TDAV_SESSION_VIDEO_PKT_LOSS_NO_REPORT_BEFORE_INCREASING_BW);
+ remb_download_kbps += (remb_download_kbps / 100) * 5; // add 5% to the estimated bandwidth
+ }
+ // CLAMP is used to make sure we will not report more than what the user defined as max values even if the estimated values are higher
+ bandwidth_max_download_kbps = TSK_CLAMP(0, remb_download_kbps, bandwidth_max_download_kbps);
+ }
+ }
+#endif /* TDAV_GOOG_REMB_FULL_SUPPORT */
+
+ self->fps_changed = tsk_false; // reset
+ TSK_DEBUG_INFO("video with congestion control enabled: max_bw_up(unused)=%d kpbs, max_bw_down=%d kpbs", bandwidth_max_upload_kbps, bandwidth_max_download_kbps);
+ ret = trtp_manager_set_app_bandwidth_max(base->rtp_manager, bandwidth_max_upload_kbps/* unused */, bandwidth_max_download_kbps);
+ }
+ // inc() frame count and consume decoded video
+ ++self->decoder.codec_decoded_frames_count;
+ ret = tmedia_consumer_consume(base->consumer, _buffer, _size, __rtp_header);
+ }
+ else if (!base->consumer || !base->consumer->is_started) {
+ TSK_DEBUG_INFO("Consumer not started (is_null=%d)", !base->consumer);
+ }
+
+bail:
+ tsk_safeobj_unlock(base);
+
+ return ret;
+}
+
+/* ============ Plugin interface ================= */
+
+static int tdav_session_video_set(tmedia_session_t* self, const tmedia_param_t* param)
+{
+ int ret = 0;
+ tdav_session_video_t* video;
+ tdav_session_av_t* base;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ // try with the base class to see if this option is supported or not
+ if (tdav_session_av_set(TDAV_SESSION_AV(self), param) == tsk_true) {
+ return 0;
+ }
+
+ video = (tdav_session_video_t*)self;
+ base = (tdav_session_av_t*)self;
+
+ if(param->plugin_type == tmedia_ppt_codec){
+ tsk_mutex_lock(video->encoder.h_mutex);
+ ret = tmedia_codec_set((tmedia_codec_t*)video->encoder.codec, param);
+ tsk_mutex_unlock(video->encoder.h_mutex);
+ }
+ else if(param->plugin_type == tmedia_ppt_consumer){
+ if(!base->consumer){
+ TSK_DEBUG_ERROR("No consumer associated to this session");
+ return -1;
+ }
+ if(param->value_type == tmedia_pvt_int32){
+ if(tsk_striequals(param->key, "flip")){
+ tsk_list_item_t* item;
+ tsk_bool_t flip = (tsk_bool_t)TSK_TO_INT32((uint8_t*)param->value);
+ tmedia_codecs_L_t *codecs = tsk_object_ref(self->codecs);
+ tsk_list_foreach(item, codecs){
+ TMEDIA_CODEC_VIDEO(item->data)->in.flip = flip;
+ }
+ tsk_object_unref(codecs);
+ }
+ }
+ ret = tmedia_consumer_set(base->consumer, param);
+ }
+ else if(param->plugin_type == tmedia_ppt_producer){
+ if(!base->producer){
+ TSK_DEBUG_ERROR("No producer associated to this session");
+ return -1;
+ }
+ if(param->value_type == tmedia_pvt_int32){
+ if(tsk_striequals(param->key, "flip")){
+ tsk_list_item_t* item;
+ tsk_bool_t flip = (tsk_bool_t)TSK_TO_INT32((uint8_t*)param->value);
+ tmedia_codecs_L_t *codecs = tsk_object_ref(self->codecs);
+ tsk_list_foreach(item, codecs){
+ TMEDIA_CODEC_VIDEO(item->data)->out.flip = flip;
+ }
+ tsk_object_unref(codecs);
+ }
+ }
+ ret = tmedia_producer_set(base->producer, param);
+ }
+ else{
+ if (param->value_type == tmedia_pvt_int32){
+ if (tsk_striequals(param->key, "bandwidth-level")){
+ tsk_list_item_t* item;
+ self->bl = (tmedia_bandwidth_level_t)TSK_TO_INT32((uint8_t*)param->value);
+ self->codecs = tsk_object_ref(self->codecs);
+ tsk_list_foreach(item, self->codecs){
+ ((tmedia_codec_t*)item->data)->bl = self->bl;
+ }
+ tsk_object_unref(self->codecs);
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int tdav_session_video_get(tmedia_session_t* self, tmedia_param_t* param)
+{
+ if (!self || !param) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ // try with the base class to see if this option is supported or not
+ if (tdav_session_av_get(TDAV_SESSION_AV(self), param) == tsk_true) {
+ return 0;
+ }
+ else {
+ if (param->plugin_type == tmedia_ppt_session) {
+ if (param->value_type == tmedia_pvt_pobject) {
+ if (tsk_striequals(param->key, "codec-encoder")) {
+ *((tsk_object_t**)param->value) = tsk_object_ref(TDAV_SESSION_VIDEO(self)->encoder.codec); // up to the caller to release the object
+ return 0;
+ }
+ }
+ }
+ }
+
+ TSK_DEBUG_WARN("This session doesn't support get(%s)", param->key);
+ return -2;
+}
+
+static int tdav_session_video_prepare(tmedia_session_t* self)
+{
+ tdav_session_av_t* base = (tdav_session_av_t*)(self);
+ tdav_session_video_t* video = (tdav_session_video_t*)self;
+ int ret;
+
+ if((ret = tdav_session_av_prepare(base))){
+ TSK_DEBUG_ERROR("tdav_session_av_prepare(video) failed");
+ return ret;
+ }
+
+ if(base->rtp_manager){
+ ret = trtp_manager_set_rtp_callback(base->rtp_manager, tdav_session_video_rtp_cb, base);
+ ret = trtp_manager_set_rtcp_callback(base->rtp_manager, tdav_session_video_rtcp_cb, base);
+ }
+
+ return ret;
+}
+
+static int tdav_session_video_start(tmedia_session_t* self)
+{
+ int ret;
+ tdav_session_video_t* video;
+ const tmedia_codec_t* codec;
+ tdav_session_av_t* base;
+
+ if (!self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ video = (tdav_session_video_t*)self;
+ base = (tdav_session_av_t*)self;
+
+ if (video->started) {
+ TSK_DEBUG_INFO("Video session already started");
+ return 0;
+ }
+
+ // ENCODER codec
+ if (!(codec = tdav_session_av_get_best_neg_codec(base))) {
+ TSK_DEBUG_ERROR("No codec matched");
+ return -2;
+ }
+ tsk_mutex_lock(video->encoder.h_mutex);
+ TSK_OBJECT_SAFE_FREE(video->encoder.codec);
+ video->encoder.codec = tsk_object_ref((tsk_object_t*)codec);
+ // initialize the encoder using user-defined values
+ if ((ret = tdav_session_av_init_encoder(base, video->encoder.codec))) {
+ TSK_DEBUG_ERROR("Failed to initialize the encoder [%s] codec", video->encoder.codec->plugin->desc);
+ return ret;
+ }
+ if (!TMEDIA_CODEC(video->encoder.codec)->opened) {
+ if((ret = tmedia_codec_open(video->encoder.codec))){
+ tsk_mutex_unlock(video->encoder.h_mutex);
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", video->encoder.codec->plugin->desc);
+ return ret;
+ }
+ }
+ tsk_mutex_unlock(video->encoder.h_mutex);
+
+ if (video->jb) {
+ if ((ret = tdav_video_jb_start(video->jb))) {
+ TSK_DEBUG_ERROR("Failed to start jitter buffer");
+ return ret;
+ }
+ }
+
+ if ((ret = tdav_session_av_start(base, video->encoder.codec))) {
+ TSK_DEBUG_ERROR("tdav_session_av_start(video) failed");
+ return ret;
+ }
+ video->started = tsk_true;
+ return ret;
+}
+
+static int tdav_session_video_stop(tmedia_session_t* self)
+{
+ int ret;
+ tdav_session_video_t* video;
+ tdav_session_av_t* base;
+
+ TSK_DEBUG_INFO("tdav_session_video_stop");
+
+ video = (tdav_session_video_t*)self;
+ base = (tdav_session_av_t*)self;
+
+ // must be here to make sure no other thread will lock the encoder once we have done it
+ tsk_mutex_lock(video->encoder.h_mutex); // encoder thread will check "started" var right after the lock is passed
+ video->started = tsk_false;
+ tsk_mutex_unlock(video->encoder.h_mutex);
+ // at this step we're sure that encode() will no longer be called which means we can safely close the codec
+
+ if (video->jb) {
+ ret = tdav_video_jb_stop(video->jb);
+ }
+ // clear AVPF packets and wait for the dtor() before destroying the list
+ tsk_list_lock(video->avpf.packets);
+ tsk_list_clear_items(video->avpf.packets);
+ tsk_list_unlock(video->avpf.packets);
+
+ // tdav_session_av_stop() : stop producer and consumer, close encoder and all other codecs, stop rtpManager...
+ // no need to lock the encoder to avoid using a closed codec (see above)
+ // no need to lock the decoder as the rtpManager will be stop before closing the codec
+ // lock-free stop() may avoid deadlock issue (cannot reproduce it myself) on Hovis
+ ret = tdav_session_av_stop(base);
+ tsk_mutex_lock(video->encoder.h_mutex);
+ TSK_OBJECT_SAFE_FREE(video->encoder.codec);
+ tsk_mutex_unlock(video->encoder.h_mutex);
+ TSK_OBJECT_SAFE_FREE(video->decoder.codec);
+
+ // reset default values to make sure next start will be called with right defaults
+ // do not call this function in start to avoid overriding values defined between prepare() and start()
+ _tdav_session_video_set_defaults(video);
+
+ return ret;
+}
+
+static int tdav_session_video_pause(tmedia_session_t* self)
+{
+ return tdav_session_av_pause(TDAV_SESSION_AV(self));
+}
+
+static const tsdp_header_M_t* tdav_session_video_get_lo(tmedia_session_t* self)
+{
+ tsk_bool_t updated = tsk_false;
+ const tsdp_header_M_t* ret;
+ tdav_session_av_t* base = TDAV_SESSION_AV(self);
+
+ if(!(ret = tdav_session_av_get_lo(base, &updated))){
+ TSK_DEBUG_ERROR("tdav_session_av_get_lo(video) failed");
+ return tsk_null;
+ }
+
+ if(updated){
+ // set callbacks
+ _tdav_session_video_set_callbacks(self);
+ }
+
+ return ret;
+}
+
+static int tdav_session_video_set_ro(tmedia_session_t* self, const tsdp_header_M_t* m)
+{
+ int ret;
+ tsk_bool_t updated = tsk_false;
+ tdav_session_av_t* base = TDAV_SESSION_AV(self);
+
+ if((ret = tdav_session_av_set_ro(base, m, &updated))){
+ TSK_DEBUG_ERROR("tdav_session_av_set_ro(video) failed");
+ return ret;
+ }
+
+ // Check if "RTCP-NACK" and "RTC-FIR" are supported
+ {
+ const tmedia_codec_t* codec;
+ base->is_fb_fir_neg = base->is_fb_nack_neg = base->is_fb_googremb_neg = tsk_false;
+ if ((codec = tdav_session_av_get_best_neg_codec(base))) {
+ // a=rtcp-fb:* ccm fir
+ // a=rtcp-fb:* nack
+ // a=rtcp-fb:* goog-remb
+ char attr_fir[256], attr_nack[256], attr_goog_remb[256];
+ int index = 0;
+ const tsdp_header_A_t* A;
+
+ sprintf(attr_fir, "%s ccm fir", codec->neg_format);
+ sprintf(attr_nack, "%s nack", codec->neg_format);
+ sprintf(attr_goog_remb, "%s goog-remb", codec->neg_format);
+
+ while ((A = tsdp_header_M_findA_at(m, "rtcp-fb", index++))) {
+ if (!base->is_fb_fir_neg) {
+ base->is_fb_fir_neg = (tsk_striequals(A->value, "* ccm fir") || tsk_striequals(A->value, attr_fir));
+ }
+ if (!base->is_fb_nack_neg) {
+ base->is_fb_nack_neg = (tsk_striequals(A->value, "* nack") || tsk_striequals(A->value, attr_nack));
+ }
+ if (!base->is_fb_googremb_neg) {
+ base->is_fb_googremb_neg = (tsk_striequals(A->value, "* goog-remb") || tsk_striequals(A->value, attr_goog_remb));
+ }
+ }
+ }
+ }
+
+ if (updated) {
+ // set callbacks
+ ret = _tdav_session_video_set_callbacks(self);
+ }
+
+ return ret;
+}
+
+// Plugin interface: callback from the end-user to set rtcp event callback (should be called only when encoding is bypassed)
+static int tdav_session_video_rtcp_set_onevent_cbfn(tmedia_session_t* self, const void* context, tmedia_session_rtcp_onevent_cb_f func)
+{
+ tdav_session_video_t* video;
+ tdav_session_av_t* base;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ video = (tdav_session_video_t*)self;
+ base = (tdav_session_av_t*)self;
+
+ tsk_safeobj_lock(base);
+ video->cb_rtcpevent.context = context;
+ video->cb_rtcpevent.func = func;
+ tsk_safeobj_unlock(base);
+
+ return 0;
+}
+
+// Plugin interface: called by the end-user to send rtcp event (should be called only when encoding is bypassed)
+static int tdav_session_video_rtcp_send_event(tmedia_session_t* self, tmedia_rtcp_event_type_t event_type, uint32_t ssrc_media)
+{
+ tdav_session_video_t* video;
+ tdav_session_av_t* base;
+ int ret = -1;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ video = (tdav_session_video_t*)self;
+ base = (tdav_session_av_t*)self;
+
+ tsk_safeobj_lock(base);
+ switch(event_type){
+ case tmedia_rtcp_event_type_fir:
+ {
+ if(base->rtp_manager && base->rtp_manager->is_started){
+ if(!ssrc_media){ // when called from C++/Java/C# bindings "ssrc_media" is a default parameter with value=0
+ ssrc_media = base->rtp_manager->rtp.ssrc.remote;
+ }
+ TSK_DEBUG_INFO("Send FIR(%u)", ssrc_media);
+ _tdav_session_video_local_request_idr(self, "CALLBACK", ssrc_media);
+ }
+ break;
+ }
+ }
+ tsk_safeobj_unlock(base);
+
+ return ret;
+}
+
+// Plugin interface: called by the end-user to recv rtcp event
+static int tdav_session_video_rtcp_recv_event(tmedia_session_t* self, tmedia_rtcp_event_type_t event_type, uint32_t ssrc_media)
+{
+ tdav_session_video_t* video;
+ tdav_session_av_t* base;
+ int ret = -1;
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ video = (tdav_session_video_t*)self;
+ base = (tdav_session_av_t*)self;
+
+ tsk_safeobj_lock(base);
+ switch(event_type){
+ case tmedia_rtcp_event_type_fir:
+ {
+ _tdav_session_video_remote_requested_idr(video, ssrc_media);
+ ret = 0;
+ break;
+ }
+ }
+ tsk_safeobj_unlock(base);
+
+ return ret;
+}
+
+static int _tdav_session_video_set_callbacks(tmedia_session_t* self)
+{
+ if(self){
+ tsk_list_item_t* item;
+ tsk_list_foreach(item, TMEDIA_SESSION(self)->neg_codecs){
+ // set codec callbacks
+ tmedia_codec_video_set_enc_callback(TMEDIA_CODEC_VIDEO(item->data), tdav_session_video_raw_cb, self);
+ tmedia_codec_video_set_dec_callback(TMEDIA_CODEC_VIDEO(item->data), tdav_session_video_decode_cb, self);
+ // set RED callback: redundant data to decode and send to the consumer
+ if(TMEDIA_CODEC(item->data)->plugin == tdav_codec_red_plugin_def_t){
+ tdav_codec_red_set_callback((struct tdav_codec_red_s *)(item->data), tdav_session_video_rtp_cb, self);
+ }
+ }
+ }
+ return 0;
+}
+
+static int _tdav_session_video_init(tdav_session_video_t *p_self, tmedia_type_t e_media_type)
+{
+ int ret;
+ tdav_session_av_t *p_base = TDAV_SESSION_AV(p_self);
+ if (!p_self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* init() base */
+ if ((ret = tdav_session_av_init(p_base, e_media_type)) != 0) {
+ TSK_DEBUG_ERROR("tdav_session_av_init(video) failed");
+ return ret;
+ }
+
+ /* init() self */
+ _tdav_session_video_set_defaults(p_self);
+ if (!p_self->encoder.h_mutex && !(p_self->encoder.h_mutex = tsk_mutex_create())) {
+ TSK_DEBUG_ERROR("Failed to create encode mutex");
+ return -4;
+ }
+ if (!p_self->avpf.packets && !(p_self->avpf.packets = tsk_list_create())) {
+ TSK_DEBUG_ERROR("Failed to create list");
+ return -2;
+ }
+ if (p_self->jb_enabled) {
+ if (!p_self->jb && !(p_self->jb = tdav_video_jb_create())) {
+ TSK_DEBUG_ERROR("Failed to create jitter buffer");
+ return -3;
+ }
+ tdav_video_jb_set_callback(p_self->jb, _tdav_session_video_jb_cb, p_self);
+ }
+
+ if (p_base->producer) {
+ tmedia_producer_set_enc_callback(p_base->producer, tdav_session_video_producer_enc_cb, p_self);
+ tmedia_producer_set_raw_callback(p_base->producer, tdav_session_video_raw_cb, p_self);
+ }
+
+ return 0;
+}
+
+
+//=================================================================================================
+// Session Video Plugin object definition
+//
+/* constructor */
+static tsk_object_t* tdav_session_video_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_session_video_t *video = self;
+ if(video){
+ if (_tdav_session_video_init(video, tmedia_video)) {
+ return tsk_null;
+ }
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_session_video_dtor(tsk_object_t * self)
+{
+ tdav_session_video_t *video = self;
+ TSK_DEBUG_INFO("*** tdav_session_video_t destroyed ***");
+ if(video){
+ tdav_session_video_stop((tmedia_session_t*)video);
+ // deinit self (rtp manager should be destroyed after the producer)
+ TSK_OBJECT_SAFE_FREE(video->conv.toYUV420);
+ TSK_OBJECT_SAFE_FREE(video->conv.fromYUV420);
+
+ TSK_FREE(video->encoder.buffer);
+ TSK_FREE(video->encoder.conv_buffer);
+ TSK_FREE(video->decoder.buffer);
+ TSK_FREE(video->decoder.conv_buffer);
+
+ TSK_OBJECT_SAFE_FREE(video->encoder.codec);
+ TSK_OBJECT_SAFE_FREE(video->decoder.codec);
+
+ TSK_OBJECT_SAFE_FREE(video->avpf.packets);
+
+ TSK_OBJECT_SAFE_FREE(video->jb);
+
+ if(video->encoder.h_mutex){
+ tsk_mutex_destroy(&video->encoder.h_mutex);
+ }
+
+ /* deinit() base */
+ tdav_session_av_deinit(TDAV_SESSION_AV(video));
+
+ TSK_DEBUG_INFO("*** Video session destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_session_video_def_s =
+{
+ sizeof(tdav_session_video_t),
+ tdav_session_video_ctor,
+ tdav_session_video_dtor,
+ tmedia_session_cmp,
+};
+/* plugin definition*/
+static const tmedia_session_plugin_def_t tdav_session_video_plugin_def_s =
+{
+ &tdav_session_video_def_s,
+
+ tmedia_video,
+ "video",
+
+ tdav_session_video_set,
+ tdav_session_video_get,
+ tdav_session_video_prepare,
+ tdav_session_video_start,
+ tdav_session_video_pause,
+ tdav_session_video_stop,
+
+ /* Audio part */
+ { tsk_null },
+
+ tdav_session_video_get_lo,
+ tdav_session_video_set_ro,
+
+ /* T.140 */
+ { tsk_null },
+
+ /* Rtcp */
+ {
+ tdav_session_video_rtcp_set_onevent_cbfn,
+ tdav_session_video_rtcp_send_event,
+ tdav_session_video_rtcp_recv_event
+ }
+};
+const tmedia_session_plugin_def_t *tdav_session_video_plugin_def_t = &tdav_session_video_plugin_def_s;
+
+//=================================================================================================
+// Session BfcpVideo Plugin object definition
+//
+/* constructor */
+static tsk_object_t* tdav_session_bfcpvideo_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_session_video_t *video = self;
+ if(video){
+ if (_tdav_session_video_init(video, tmedia_bfcp_video)) {
+ return tsk_null;
+ }
+ }
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_session_bfcpvideo_def_s =
+{
+ sizeof(tdav_session_video_t),
+ tdav_session_bfcpvideo_ctor,
+ tdav_session_video_dtor,
+ tmedia_session_cmp,
+};
+static const tmedia_session_plugin_def_t tdav_session_bfcpvideo_plugin_def_s =
+{
+ &tdav_session_bfcpvideo_def_s,
+
+ tmedia_bfcp_video,
+ "video",
+
+ tdav_session_video_set,
+ tdav_session_video_get,
+ tdav_session_video_prepare,
+ tdav_session_video_start,
+ tdav_session_video_pause,
+ tdav_session_video_stop,
+
+ /* Audio part */
+ { tsk_null },
+
+ tdav_session_video_get_lo,
+ tdav_session_video_set_ro,
+
+ /* T.140 */
+ { tsk_null },
+
+ /* Rtcp */
+ {
+ tdav_session_video_rtcp_set_onevent_cbfn,
+ tdav_session_video_rtcp_send_event,
+ tdav_session_video_rtcp_recv_event
+ }
+};
+const tmedia_session_plugin_def_t *tdav_session_bfcpvideo_plugin_def_t = &tdav_session_bfcpvideo_plugin_def_s;
diff --git a/tinyDAV/src/video/v4linux/tdav_producer_video_v4l2.c b/tinyDAV/src/video/v4linux/tdav_producer_video_v4l2.c
new file mode 100644
index 0000000..ef3152a
--- /dev/null
+++ b/tinyDAV/src/video/v4linux/tdav_producer_video_v4l2.c
@@ -0,0 +1,1164 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/video/v4linux/tdav_producer_video_v4l2.h"
+
+#if HAVE_LINUX_VIDEODEV2_H
+
+#include<stdio.h>
+#include<stdlib.h>
+#include<string.h>
+#include<assert.h>
+
+#include<fcntl.h>
+#include<unistd.h>
+#include<errno.h>
+#include<sys/stat.h>
+#include<sys/types.h>
+#include<sys/mman.h>
+#include<sys/ioctl.h>
+
+#include<linux/videodev2.h>
+
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tsk_memory.h"
+#include "tsk_safeobj.h"
+#include "tsk_timer.h"
+#include "tsk_string.h"
+#include "tsk_debug.h"
+
+#if !defined(V4L2_FAKE_UYVY)
+# define V4L2_FAKE_UYVY 0
+#endif /* V4L2_FAKE_UYVY */
+
+#define V4L2_CLEAR(x) memset(&(x), 0, sizeof(x))
+#define V4L2_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[V4L2 Producer] " FMT, ##__VA_ARGS__)
+#define V4L2_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[V4L2 Producer] " FMT, ##__VA_ARGS__)
+#define V4L2_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[V4L2 Producer] " FMT, ##__VA_ARGS__)
+#define V4L2_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[V4L2 Producer] " FMT, ##__VA_ARGS__)
+
+typedef enum v4l2_io_method_e {
+ V4L2_IO_METHOD_NONE = 0,
+ V4L2_IO_METHOD_READ,
+ V4L2_IO_METHOD_MMAP,
+ V4L2_IO_METHOD_USERPTR,
+}
+v4l2_io_method_t;
+
+typedef struct v4l2_buffer_s {
+ void *p_start;
+ size_t n_length;
+}
+v4l2_buffer_t;
+
+// By preference order
+static const v4l2_io_method_t io_method_prefs[] = {
+ V4L2_IO_METHOD_MMAP,
+ V4L2_IO_METHOD_USERPTR,
+ V4L2_IO_METHOD_READ,
+};
+static const unsigned int pix_format_prefs[] =
+{
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+ V4L2_PIX_FMT_YUYV,
+ V4L2_PIX_FMT_UYVY, // SINCITY
+ V4L2_PIX_FMT_RGB24,
+ V4L2_PIX_FMT_RGB32,
+ V4L2_PIX_FMT_MJPEG
+};
+
+typedef struct tdav_producer_video_v4l2_s
+{
+ TMEDIA_DECLARE_PRODUCER;
+
+ tsk_bool_t b_muted;
+ tsk_bool_t b_started;
+ tsk_bool_t b_prepared;
+ tsk_bool_t b_paused;
+
+ int fd;
+ v4l2_io_method_t io;
+ struct v4l2_format fmt;
+ struct v4l2_capability cap;
+ struct v4l2_cropcap cropcap;
+ struct v4l2_crop crop;
+ unsigned int n_buffers;
+ v4l2_buffer_t* p_buffers;
+
+ tsk_timer_manager_handle_t *p_timer_mgr;
+ tsk_timer_id_t id_timer_grab;
+ uint64_t u_timout_grab;
+
+ TSK_DECLARE_SAFEOBJ;
+}
+tdav_producer_video_v4l2_t;
+
+static int _v4l2_prepare(tdav_producer_video_v4l2_t* p_self);
+static int _v4l2_start(tdav_producer_video_v4l2_t* p_self);
+static int _v4l2_pause(tdav_producer_video_v4l2_t* p_self);
+static int _v4l2_stop(tdav_producer_video_v4l2_t* p_self);
+static int _v4l2_unprepare(tdav_producer_video_v4l2_t* p_self);
+static int _v4l2_xioctl(int fh, int request, void *arg);
+static int _v4l2_get_best_format(tdav_producer_video_v4l2_t* p_self, const char* device_name, struct v4l2_format* fmt_ret);
+static int _v4l2_init_read(tdav_producer_video_v4l2_t* p_self, unsigned int buffer_size);
+static int _v4l2_init_mmap(tdav_producer_video_v4l2_t* p_self, const char* device_name);
+static int _v4l2_init_userp(tdav_producer_video_v4l2_t* p_self, unsigned int buffer_size, const char* device_name);
+static int _v4l2_send_frame(tdav_producer_video_v4l2_t* p_self);
+
+static int _tdav_producer_video_v4l2_timer_cb(const void* arg, tsk_timer_id_t timer_id);
+static int _tdav_producer_video_v4l2_grab(tdav_producer_video_v4l2_t* p_self);
+
+/* ============ Media Producer Interface ================= */
+static int _tdav_producer_video_v4l2_set(tmedia_producer_t *p_self, const tmedia_param_t* pc_param)
+{
+ int ret = 0;
+ tdav_producer_video_v4l2_t* p_v4l2 = (tdav_producer_video_v4l2_t*)p_self;
+
+ if (!p_v4l2 || !pc_param) {
+ V4L2_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (pc_param->value_type == tmedia_pvt_pchar) {
+ if (tsk_striequals(pc_param->key, "local-hwnd") || tsk_striequals(pc_param->key, "preview-hwnd")) {
+ V4L2_DEBUG_ERROR("Not implemented yet");
+ }
+ else if (tsk_striequals(pc_param->key, "src-hwnd")) {
+ V4L2_DEBUG_ERROR("Not implemented yet");
+ }
+ }
+ else if (pc_param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(pc_param->key, "mute")) {
+ p_v4l2->b_muted = (TSK_TO_INT32((uint8_t*)pc_param->value) != 0);
+ }
+ }
+
+ return ret;
+}
+
+static int _tdav_producer_video_v4l2_prepare(tmedia_producer_t* p_self, const tmedia_codec_t* pc_codec)
+{
+ tdav_producer_video_v4l2_t* p_v4l2 = (tdav_producer_video_v4l2_t*)p_self;
+ int ret = 0;
+
+ if (!p_v4l2 || !pc_codec) {
+ V4L2_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_v4l2);
+
+ if (!p_v4l2->p_timer_mgr && !(p_v4l2->p_timer_mgr = tsk_timer_manager_create())) {
+ V4L2_DEBUG_ERROR("Failed to create timer manager");
+ ret = -2;
+ goto bail;
+ }
+
+ TMEDIA_PRODUCER(p_v4l2)->video.fps = TMEDIA_CODEC_VIDEO(pc_codec)->out.fps;
+ TMEDIA_PRODUCER(p_v4l2)->video.width = TMEDIA_CODEC_VIDEO(pc_codec)->out.width;
+ TMEDIA_PRODUCER(p_v4l2)->video.height = TMEDIA_CODEC_VIDEO(pc_codec)->out.height;
+
+ p_v4l2->u_timout_grab = (1000/TMEDIA_PRODUCER(p_v4l2)->video.fps);
+
+ // prepare()
+ if ((ret = _v4l2_prepare(p_v4l2))) {
+ goto bail;
+ }
+
+ // update() - up to the "converter" to perform chroma conversion and scaling
+ TMEDIA_PRODUCER(p_v4l2)->video.width = p_v4l2->fmt.fmt.pix.width;
+ TMEDIA_PRODUCER(p_v4l2)->video.height = p_v4l2->fmt.fmt.pix.height;
+#if V4L2_FAKE_UYVY
+ TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_uyvy422;
+#else
+ switch (p_v4l2->fmt.fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_yuv420p;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_nv12;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_nv21;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_yuyv422;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_uyvy422; // SINCITY
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_rgb24;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_rgb32;
+ break;
+ case V4L2_PIX_FMT_MJPEG:
+ TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_mjpeg;
+ break;
+ default:
+ V4L2_DEBUG_ERROR("Failed to match negotiated format: %d", p_v4l2->fmt.fmt.pix.pixelformat);
+ ret = -1;
+ goto bail;
+ }
+#endif /* V4L2_FAKE_UYVY */
+
+ V4L2_DEBUG_INFO("Negotiated caps: fps=%d, width=%d, height=%d, chroma=%d",
+ TMEDIA_PRODUCER(p_v4l2)->video.fps,
+ TMEDIA_PRODUCER(p_v4l2)->video.width,
+ TMEDIA_PRODUCER(p_v4l2)->video.height,
+ TMEDIA_PRODUCER(p_v4l2)->video.chroma);
+ p_v4l2->b_prepared = (ret == 0) ? tsk_true : tsk_false;
+
+bail:
+ tsk_safeobj_unlock(p_v4l2);
+ return ret;
+}
+
+static int _tdav_producer_video_v4l2_start(tmedia_producer_t* p_self)
+{
+ tdav_producer_video_v4l2_t* p_v4l2 = (tdav_producer_video_v4l2_t*)p_self;
+ int ret = 0;
+
+ if (!p_v4l2) {
+ V4L2_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_v4l2);
+
+ if (!p_v4l2->b_prepared) {
+ V4L2_DEBUG_INFO("Not prepared");
+ ret = -1;
+ goto bail;
+ }
+
+ p_v4l2->b_paused = tsk_false;
+
+ if (p_v4l2->b_started) {
+ V4L2_DEBUG_INFO("Already started");
+ goto bail;
+ }
+
+ if ((ret = tsk_timer_manager_start(p_v4l2->p_timer_mgr))) {
+ goto bail;
+ }
+
+ // start()
+ if ((ret = _v4l2_start(p_v4l2))) {
+ goto bail;
+ }
+
+ p_v4l2->b_started = tsk_true;
+
+ // Schedule frame grabbing
+ p_v4l2->id_timer_grab = tsk_timer_manager_schedule(p_v4l2->p_timer_mgr, p_v4l2->u_timout_grab, _tdav_producer_video_v4l2_timer_cb, p_v4l2);
+ if (!TSK_TIMER_ID_IS_VALID(p_v4l2->id_timer_grab)) {
+ V4L2_DEBUG_ERROR("Failed to schedule timer with timeout=%llu", p_v4l2->u_timout_grab);
+ ret = -2;
+ goto bail;
+ }
+
+bail:
+ if (ret) {
+ _v4l2_stop(p_v4l2);
+ p_v4l2->b_started = tsk_false;
+ if (p_v4l2->p_timer_mgr) {
+ tsk_timer_manager_stop(p_v4l2->p_timer_mgr);
+ }
+ }
+ else {
+ V4L2_DEBUG_INFO("Started :)");
+ }
+ tsk_safeobj_unlock(p_v4l2);
+
+ return ret;
+}
+
+static int _tdav_producer_video_v4l2_pause(tmedia_producer_t* p_self)
+{
+ tdav_producer_video_v4l2_t* p_v4l2 = (tdav_producer_video_v4l2_t*)p_self;
+ int ret;
+
+ if (!p_v4l2) {
+ V4L2_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_v4l2);
+
+ if ((ret = _v4l2_pause(p_v4l2))) {
+ goto bail;
+ }
+
+ p_v4l2->b_paused = tsk_true;
+ goto bail;
+
+bail:
+ tsk_safeobj_unlock(p_v4l2);
+
+ return ret;
+}
+
+static int _tdav_producer_video_v4l2_stop(tmedia_producer_t* p_self)
+{
+ tdav_producer_video_v4l2_t* p_v4l2 = (tdav_producer_video_v4l2_t*)p_self;
+
+ if (!p_v4l2) {
+ V4L2_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_v4l2);
+
+ if (!p_v4l2->b_started) {
+ V4L2_DEBUG_INFO("Already stopped");
+ goto bail;
+ }
+
+ if (p_v4l2->p_timer_mgr) {
+ tsk_timer_manager_stop(p_v4l2->p_timer_mgr);
+ }
+
+ // next start will be called after prepare()
+ _v4l2_unprepare(p_v4l2); // stop() then unprepare()
+
+ p_v4l2->b_started = tsk_false;
+ p_v4l2->b_paused = tsk_false;
+ p_v4l2->b_prepared = tsk_false;
+
+bail:
+ tsk_safeobj_unlock(p_v4l2);
+ V4L2_DEBUG_INFO("Stopped");
+
+ return 0;
+}
+
+static int _v4l2_prepare(tdav_producer_video_v4l2_t* p_self)
+{
+const char* device_names[] =
+ {
+ tmedia_producer_get_friendly_name(TMEDIA_PRODUCER(p_self)->plugin->type),
+ "/dev/video0",
+ }; // FIXME: VIDIOC_C_ENUM_INPUT and choose best one
+ const char* device_name;
+ int i, err = -1;
+ struct stat st;
+ unsigned int min;
+
+ V4L2_DEBUG_INFO("--- PREPARE ---");
+
+ if (p_self->fd > 0) {
+ V4L2_DEBUG_WARN("Producer already prepared");
+ return 0;
+ }
+ for (i = 0; i < sizeof(device_names)/sizeof(device_names[0]); ++i) {
+ if ((device_name = device_names[i])) {
+ V4L2_DEBUG_INFO("Preparing '%s'...", device_name);
+ if (stat(device_name, &st) == -1) {
+ V4L2_DEBUG_WARN("stat('%s'): %d, %s", device_name, errno, strerror(errno));
+ continue;
+ }
+ if (!S_ISCHR(st.st_mode)) {
+ V4L2_DEBUG_WARN("'%s' not a valid device", device_name);
+ continue;
+ }
+ if ((p_self->fd = open(device_name, O_RDWR /* required */ | O_NONBLOCK, 0)) == -1) {
+ V4L2_DEBUG_WARN("Failed to open '%s': %d, %s\n", device_name, errno, strerror(errno));
+ continue;
+ }
+ V4L2_DEBUG_INFO("'%s' successfully opened", device_name);
+ }
+ }
+ if (p_self->fd == -1) {
+ V4L2_DEBUG_ERROR("No valid device found");
+ goto bail;
+ }
+
+ if (-1 == _v4l2_xioctl(p_self->fd, VIDIOC_QUERYCAP, &p_self->cap)) {
+ if (EINVAL == errno) {
+ V4L2_DEBUG_ERROR("%s is no V4L2 device", device_name);
+ goto bail;
+ } else {
+ V4L2_DEBUG_ERROR("xioctl(%s, VIDIOC_QUERYCAP) failed: %s error %d", device_name, strerror(errno), errno);
+ goto bail;
+ }
+ }
+
+ if (!(p_self->cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
+ V4L2_DEBUG_ERROR("%s is no video capture device", device_name);
+ goto bail;
+ }
+
+ // Get best io method
+ p_self->io = V4L2_IO_METHOD_NONE;
+ for (i = 0; i < sizeof(io_method_prefs)/sizeof(io_method_prefs[0]) && p_self->io == V4L2_IO_METHOD_NONE; ++i) {
+ V4L2_DEBUG_INFO("Trying with io method=%d", io_method_prefs[i]);
+ switch (io_method_prefs[i]) {
+ case V4L2_IO_METHOD_READ:
+ if (!(p_self->cap.capabilities & V4L2_CAP_READWRITE)) {
+ V4L2_DEBUG_WARN("%s does not support read i/o", device_name);
+ continue;
+ }
+ p_self->io = io_method_prefs[i];
+ break;
+
+ case V4L2_IO_METHOD_MMAP:
+ case V4L2_IO_METHOD_USERPTR:
+ if (!(p_self->cap.capabilities & V4L2_CAP_STREAMING)) {
+ V4L2_DEBUG_WARN("%s does not support streaming i/o", device_name);
+ continue;
+ }
+ p_self->io = io_method_prefs[i];
+ break;
+ }
+ }
+ if (p_self->io == V4L2_IO_METHOD_NONE) {
+ V4L2_DEBUG_ERROR("Failed to peek an i/o method for '%s' device", device_name);
+ goto bail;
+ }
+ V4L2_DEBUG_INFO("i/o method for '%s' device is %d", device_name, p_self->io);
+
+ /* Select video input, video standard and tune here. */
+
+ V4L2_CLEAR(p_self->cropcap);
+
+ p_self->cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ if (0 == _v4l2_xioctl(p_self->fd, VIDIOC_CROPCAP, &p_self->cropcap)) {
+ p_self->crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ p_self->crop.c = p_self->cropcap.defrect; /* reset to default */
+
+ if (-1 == _v4l2_xioctl(p_self->fd, VIDIOC_S_CROP, &p_self->crop)) {
+ switch (errno) {
+ case EINVAL:
+ default:
+ V4L2_DEBUG_INFO("'%s' device doesn't support cropping", device_name);
+ break;
+ }
+ }
+ else {
+ V4L2_DEBUG_INFO("'%s' device supports cropping with type = %d", device_name, p_self->crop.type);
+ }
+ } else {
+ V4L2_DEBUG_INFO("'%s' device doesn't support cropping", device_name);
+ }
+
+ /* Best format */
+ V4L2_CLEAR(p_self->fmt);
+ // get()
+ if (_v4l2_get_best_format(p_self, device_name, &p_self->fmt) != 0) {
+ V4L2_DEBUG_ERROR("Failed to peek best format for '%s' device", device_name);
+ goto bail;
+ }
+ // set()
+ if (_v4l2_xioctl(p_self->fd, VIDIOC_S_FMT, &p_self->fmt) == -1) {
+ goto bail;
+ }
+ V4L2_DEBUG_INFO("device '%s' best format: width:%d, height:%d, field:%d, pixelformat:%d",
+ device_name, p_self->fmt.fmt.pix.width, p_self->fmt.fmt.pix.height, p_self->fmt.fmt.pix.field, p_self->fmt.fmt.pix.pixelformat);
+
+ /* Buggy driver paranoia. */
+#if 1
+ min = p_self->fmt.fmt.pix.width * 2;
+ if (p_self->fmt.fmt.pix.bytesperline < min) {
+ p_self->fmt.fmt.pix.bytesperline = min;
+ }
+ min = p_self->fmt.fmt.pix.bytesperline * p_self->fmt.fmt.pix.height;
+ if (p_self->fmt.fmt.pix.sizeimage < min) {
+ p_self->fmt.fmt.pix.sizeimage = min;
+ }
+#endif
+
+ switch (p_self->io) {
+ case V4L2_IO_METHOD_READ:
+ if (_v4l2_init_read(p_self, p_self->fmt.fmt.pix.sizeimage) != 0) {
+ goto bail;
+ }
+ break;
+
+ case V4L2_IO_METHOD_MMAP:
+ if (_v4l2_init_mmap(p_self, device_name) != 0) {
+ goto bail;
+ }
+ break;
+
+ case V4L2_IO_METHOD_USERPTR:
+ if (_v4l2_init_userp(p_self, p_self->fmt.fmt.pix.sizeimage, device_name) != 0) {
+ goto bail;
+ }
+ break;
+ }
+ V4L2_DEBUG_INFO("'%s' device initialized using i/o method=%d", device_name, p_self->io);
+
+ // all is OK
+ err = 0;
+
+bail:
+ if (err) {
+ _v4l2_unprepare(p_self);
+ }
+ else {
+ V4L2_DEBUG_INFO("Prepared :)");
+ }
+ return err;
+}
+
+
+static int _v4l2_start(tdav_producer_video_v4l2_t* p_self)
+{
+ unsigned int i;
+ enum v4l2_buf_type type;
+
+ V4L2_DEBUG_INFO("--- START ---");
+
+ if (p_self->b_started) {
+ V4L2_DEBUG_WARN("Already started");
+ return 0;
+ }
+
+ switch (p_self->io) {
+ case V4L2_IO_METHOD_READ:
+ /* Nothing to do. */
+ break;
+
+ case V4L2_IO_METHOD_MMAP:
+ for (i = 0; i < p_self->n_buffers; ++i) {
+ struct v4l2_buffer buf;
+
+ V4L2_CLEAR(buf);
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+ buf.index = i;
+
+ if (-1 == _v4l2_xioctl(p_self->fd, VIDIOC_QBUF, &buf)) {
+ V4L2_DEBUG_ERROR("xioctl(VIDIOC_QBUF) failed: %s error %d", strerror(errno), errno);
+ return -1;
+ }
+ }
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (-1 == _v4l2_xioctl(p_self->fd, VIDIOC_STREAMON, &type)) {
+ V4L2_DEBUG_ERROR("xioctl(VIDIOC_STREAMON) failed: %s error %d", strerror(errno), errno);
+ return -1;
+ }
+ break;
+
+ case V4L2_IO_METHOD_USERPTR:
+ for (i = 0; i < p_self->n_buffers; ++i) {
+ struct v4l2_buffer buf;
+
+ V4L2_CLEAR(buf);
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_USERPTR;
+ buf.index = i;
+ buf.m.userptr = (unsigned long)p_self->p_buffers[i].p_start;
+ buf.length = p_self->p_buffers[i].n_length;
+
+ if (-1 == _v4l2_xioctl(p_self->fd, VIDIOC_QBUF, &buf)) {
+ V4L2_DEBUG_ERROR("xioctl(VIDIOC_QBUF) failed: %s error %d", strerror(errno), errno);
+ return -1;
+ }
+ }
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (-1 == _v4l2_xioctl(p_self->fd, VIDIOC_STREAMON, &type)) {
+ V4L2_DEBUG_ERROR("xioctl(VIDIOC_STREAMON) failed: %s error %d", strerror(errno), errno);
+ return -1;
+ }
+ break;
+ }
+ return 0;
+}
+
+static int _v4l2_pause(tdav_producer_video_v4l2_t* p_self)
+{
+ V4L2_DEBUG_INFO("--- PAUSE ---");
+
+ return 0;
+}
+
+static int _v4l2_stop(tdav_producer_video_v4l2_t* p_self)
+{
+ enum v4l2_buf_type type;
+
+ V4L2_DEBUG_INFO("--- STOP ---");
+
+ switch (p_self->io) {
+ case V4L2_IO_METHOD_READ:
+ /* Nothing to do. */
+ break;
+
+ case V4L2_IO_METHOD_MMAP:
+ case V4L2_IO_METHOD_USERPTR:
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (p_self->fd != -1 && -1 == _v4l2_xioctl(p_self->fd, VIDIOC_STREAMOFF, &type)) {
+ if (p_self->b_started) { // display error only if the device is marked as "started"
+ V4L2_DEBUG_ERROR("xioctl(VIDIOC_STREAMOFF) failed: %s error %d", strerror(errno), errno);
+ return -1;
+ }
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int _v4l2_unprepare(tdav_producer_video_v4l2_t* p_self)
+{
+ unsigned int i;
+ V4L2_DEBUG_INFO("--- UNPREPARE ---");
+
+ _v4l2_stop(p_self);
+
+ switch (p_self->io) {
+ case V4L2_IO_METHOD_READ:
+ if (p_self->p_buffers && p_self->p_buffers[0].p_start) {
+ free(p_self->p_buffers[0].p_start);
+ p_self->p_buffers[0].p_start = NULL;
+ }
+ break;
+ case V4L2_IO_METHOD_MMAP:
+ for (i = 0; i < p_self->n_buffers; ++i) {
+ if (p_self->p_buffers && p_self->p_buffers[i].p_start) {
+ if (-1 == munmap(p_self->p_buffers[i].p_start, p_self->p_buffers[i].n_length)) {
+ V4L2_DEBUG_ERROR("munmap(%d) failed", i);
+ }
+ }
+ }
+ break;
+
+ case V4L2_IO_METHOD_USERPTR:
+ for (i = 0; i < p_self->n_buffers; ++i) {
+ if (p_self->p_buffers && p_self->p_buffers[i].p_start) {
+ free(p_self->p_buffers[i].p_start);
+ p_self->p_buffers[i].p_start = NULL;
+ }
+ }
+ break;
+ }
+
+ if (p_self->p_buffers) {
+ free(p_self->p_buffers);
+ p_self->p_buffers = NULL;
+ }
+ p_self->n_buffers = 0;
+
+ if (p_self->fd > 0) {
+ close(p_self->fd);
+ }
+ p_self->fd = -1;
+
+ return 0;
+}
+
+static int _v4l2_xioctl(int fh, int request, void *arg)
+{
+ int r;
+ do {
+ r = ioctl(fh, request, arg);
+ } while (-1 == r && EINTR == errno);
+ return r;
+}
+
+static int _v4l2_get_best_format(tdav_producer_video_v4l2_t* p_self, const char* device_name, struct v4l2_format* fmt_ret)
+{
+ struct v4l2_format fmt, fmt_default, fmt_best;
+ struct v4l2_fmtdesc fmtdesc;
+ int i, j, field, size;
+ int ok = 0;
+
+ if (!fmt_ret) {
+ V4L2_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ // get default format
+ V4L2_CLEAR(fmt_default);
+ fmt_default.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (_v4l2_xioctl(p_self->fd, VIDIOC_G_FMT, &fmt_default) == -1) {
+ V4L2_DEBUG_ERROR("xioctl(%s, VIDIOC_G_FMT) failed: %s error %d", device_name, strerror(errno), errno);
+ return -1;
+ }
+ V4L2_DEBUG_INFO("device '%s' default format: width:%d, height:%d, field:%d, pixelformat:%d",
+ device_name, fmt_default.fmt.pix.width, fmt_default.fmt.pix.height, fmt_default.fmt.pix.field, fmt_default.fmt.pix.pixelformat);
+
+ /* Best format (using preference) */
+ V4L2_CLEAR(fmt);
+ fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ for (i = 0; i < sizeof(pix_format_prefs)/sizeof(pix_format_prefs[0]); ++i) {
+ for (size = 0; size < 2; ++size) {
+ for (field = 0; field < 2; ++field) {
+ fmt.fmt.pix.width = (size == 0) ? TMEDIA_PRODUCER(p_self)->video.width : fmt_default.fmt.pix.width;
+ fmt.fmt.pix.height = (size == 0) ? TMEDIA_PRODUCER(p_self)->video.height : fmt_default.fmt.pix.height;
+ fmt.fmt.pix.pixelformat = pix_format_prefs[i];
+ fmt.fmt.pix.field = (field == 0) ? V4L2_FIELD_NONE : V4L2_FIELD_INTERLACED;
+ if ((ok = (_v4l2_xioctl(p_self->fd, VIDIOC_TRY_FMT, &fmt) != -1))) {
+ goto bail;
+ }
+ }
+ }
+ }
+
+ /* Best format (using caps) */
+ for (i = 0; ; ++i) {
+ V4L2_CLEAR(fmtdesc);
+ fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ fmtdesc.index = i;
+
+ if (_v4l2_xioctl(p_self->fd, VIDIOC_ENUM_FMT, &fmtdesc) == -1) {
+ break;
+ }
+ V4L2_DEBUG_INFO("CAPS: device name=%s, fmtdesc index=%d, type=%d, description=%s, pixelformat=%d",
+ device_name, fmtdesc.index, fmtdesc.type, fmtdesc.description, fmtdesc.pixelformat);
+ for (j = 0; j < sizeof(pix_format_prefs)/sizeof(pix_format_prefs[0]); ++j) {
+ if (fmtdesc.pixelformat == pix_format_prefs[j]) {
+ for (size = 0; size < 2; ++size) {
+ for (field = 0; field < 2; ++field) {
+ fmt.fmt.pix.width = (size == 0) ? TMEDIA_PRODUCER(p_self)->video.width : fmt_default.fmt.pix.width;
+ fmt.fmt.pix.height = (size == 0) ? TMEDIA_PRODUCER(p_self)->video.height : fmt_default.fmt.pix.height;
+ fmt.fmt.pix.pixelformat = pix_format_prefs[i];
+ fmt.fmt.pix.field = (field == 0) ? V4L2_FIELD_NONE : V4L2_FIELD_INTERLACED;
+ if ((ok = (_v4l2_xioctl(p_self->fd, VIDIOC_TRY_FMT, &fmt) != -1))) {
+ goto bail;
+ }
+ }
+ }
+ }
+ }
+ }
+
+bail:
+ if (ok) {
+ memcpy(fmt_ret, &fmt, sizeof(fmt));
+ }
+ return ok ? 0 : -1;
+}
+
+static int _v4l2_init_read(tdav_producer_video_v4l2_t* p_self, unsigned int buffer_size)
+{
+ if (p_self->p_buffers) {
+ V4L2_DEBUG_ERROR("Buffers already initialized");
+ return -1;
+ }
+ if (!buffer_size) {
+ V4L2_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (!(p_self->p_buffers = calloc(1, sizeof(*p_self->p_buffers)))) {
+ V4L2_DEBUG_ERROR("Out of memory");
+ return -1;
+ }
+
+ p_self->p_buffers[0].n_length = buffer_size;
+ p_self->p_buffers[0].p_start = tsk_malloc(buffer_size);
+
+ if (!p_self->p_buffers[0].p_start) {
+ V4L2_DEBUG_ERROR("Out of memory");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int _v4l2_init_mmap(tdav_producer_video_v4l2_t* p_self, const char* device_name)
+{
+ struct v4l2_requestbuffers req;
+
+ V4L2_CLEAR(req);
+
+ req.count = 4;
+ req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ req.memory = V4L2_MEMORY_MMAP;
+
+ if (-1 == _v4l2_xioctl(p_self->fd, VIDIOC_REQBUFS, &req)) {
+ if (EINVAL == errno) {
+ V4L2_DEBUG_ERROR("%s does not support memory mapping", device_name);
+ return -1;
+ } else {
+ V4L2_DEBUG_ERROR("xioctl(%s, VIDIOC_REQBUFS) failed: %s error %d", device_name, strerror(errno), errno);
+ return -1;
+ }
+ }
+
+ if (req.count < 2) {
+ V4L2_DEBUG_ERROR("Insufficient buffer memory on %s", device_name);
+ return -1;
+ }
+
+ if (!(p_self->p_buffers = tsk_calloc(req.count, sizeof(*p_self->p_buffers)))) {
+ V4L2_DEBUG_ERROR("Out of memory");
+ return -1;
+ }
+
+ for (p_self->n_buffers = 0; p_self->n_buffers < req.count; ++p_self->n_buffers) {
+ struct v4l2_buffer buf;
+
+ V4L2_CLEAR(buf);
+
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+ buf.index = p_self->n_buffers;
+
+ if (-1 == _v4l2_xioctl(p_self->fd, VIDIOC_QUERYBUF, &buf)) {
+ V4L2_DEBUG_ERROR("xioctl(%s, VIDIOC_REQBUFS) failed: %s error %d", device_name, strerror(errno), errno);
+ return -1;
+ }
+
+ p_self->p_buffers[p_self->n_buffers].n_length = buf.length;
+ p_self->p_buffers[p_self->n_buffers].p_start = mmap(NULL /* start anywhere */,
+ buf.length,
+ PROT_READ | PROT_WRITE /* required */,
+ MAP_SHARED /* recommended */,
+ p_self->fd, buf.m.offset);
+
+ if (MAP_FAILED == p_self->p_buffers[p_self->n_buffers].p_start) {
+ V4L2_DEBUG_ERROR("mmap(%s) failed: %s error %d", device_name, strerror(errno), errno);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int _v4l2_init_userp(tdav_producer_video_v4l2_t* p_self, unsigned int buffer_size, const char* device_name)
+{
+ struct v4l2_requestbuffers req;
+
+ V4L2_CLEAR(req);
+
+ req.count = 4;
+ req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ req.memory = V4L2_MEMORY_USERPTR;
+
+ if (-1 == _v4l2_xioctl(p_self->fd, VIDIOC_REQBUFS, &req)) {
+ if (EINVAL == errno) {
+ V4L2_DEBUG_ERROR("%s does not support user pointer i/o", device_name);
+ return -1;
+ } else {
+ V4L2_DEBUG_ERROR("xioctl(%s, VIDIOC_REQBUFS) failed: %s error %d", device_name, strerror(errno), errno);
+ return -1;
+ }
+ }
+
+ if (!(p_self->p_buffers = tsk_calloc(4, sizeof(*p_self->p_buffers)))) {
+ V4L2_DEBUG_ERROR("Out of memory");
+ return -1;
+ }
+
+ for (p_self->n_buffers = 0; p_self->n_buffers < 4; ++p_self->n_buffers) {
+ p_self->p_buffers[p_self->n_buffers].n_length = buffer_size;
+ p_self->p_buffers[p_self->n_buffers].p_start = tsk_malloc(buffer_size);
+
+ if (!p_self->p_buffers[p_self->n_buffers].p_start) {
+ V4L2_DEBUG_ERROR("Out of memory");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int _v4l2_send_frame(tdav_producer_video_v4l2_t* p_self)
+{
+ struct v4l2_buffer buf;
+ unsigned int i;
+
+#define V4L2_SEND_BUFF(_buff, _size) \
+ TMEDIA_PRODUCER(p_self)->enc_cb.callback(TMEDIA_PRODUCER(p_self)->enc_cb.callback_data, (_buff), (_size));
+
+#if V4L2_FAKE_UYVY
+ {
+ tsk_size_t size = (TMEDIA_PRODUCER(p_self)->video.width * TMEDIA_PRODUCER(p_self)->video.height) << 1;
+ uint8_t* buff = (uint8_t*)tsk_malloc(size);
+ if (buff) {
+ tsk_size_t i;
+ for (i = 0; i < size; ++i) {
+ buff[i] = rand() & 254;
+ }
+ V4L2_SEND_BUFF(buff, size);
+ tsk_free((void**)&buff);
+ }
+ return 0;
+ }
+#endif
+
+ switch (p_self->io) {
+ case V4L2_IO_METHOD_READ:
+ if (-1 == read(p_self->fd, p_self->p_buffers[0].p_start, p_self->p_buffers[0].n_length)) {
+ switch (errno) {
+ case EAGAIN:
+ return 0;
+
+ case EIO:
+ /* Could ignore EIO, see spec. */
+
+ /* fall through */
+
+ default:
+ V4L2_DEBUG_ERROR("read() failed: %s error %d", strerror(errno), errno);
+ break;
+ }
+ }
+
+ V4L2_SEND_BUFF(p_self->p_buffers[0].p_start, p_self->p_buffers[0].n_length);
+ return 0;
+
+ case V4L2_IO_METHOD_MMAP:
+ V4L2_CLEAR(buf);
+
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+
+ if (-1 == _v4l2_xioctl(p_self->fd, VIDIOC_DQBUF, &buf)) {
+ switch (errno) {
+ case EAGAIN:
+ V4L2_DEBUG_INFO("EAGAIN");
+ return 0;
+
+ case EIO:
+ /* Could ignore EIO, see spec. */
+
+ /* fall through */
+
+ default:
+ V4L2_DEBUG_ERROR("xioctl(VIDIOC_DQBUF) failed: %s error %d", strerror(errno), errno);
+ break;
+ }
+ }
+
+ assert(buf.index < p_self->n_buffers);
+
+ V4L2_SEND_BUFF(p_self->p_buffers[buf.index].p_start, buf.bytesused);
+
+ if (-1 == _v4l2_xioctl(p_self->fd, VIDIOC_QBUF, &buf)) {
+ V4L2_DEBUG_ERROR("xioctl(VIDIOC_DQBUF) failed: %s error %d", strerror(errno), errno);
+ break;
+ }
+ return 0;
+
+ case V4L2_IO_METHOD_USERPTR:
+ V4L2_CLEAR(buf);
+
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_USERPTR;
+
+ if (-1 == _v4l2_xioctl(p_self->fd, VIDIOC_DQBUF, &buf)) {
+ switch (errno) {
+ case EAGAIN:
+ V4L2_DEBUG_INFO("EAGAIN");
+ return 0;
+
+ case EIO:
+ /* Could ignore EIO, see spec. */
+
+ /* fall through */
+
+ default:
+ V4L2_DEBUG_ERROR("xioctl(VIDIOC_DQBUF) failed: %s error %d", strerror(errno), errno);
+ break;
+ }
+ }
+
+ for (i = 0; i < p_self->n_buffers; ++i) {
+ if (buf.m.userptr == (unsigned long)p_self->p_buffers[i].p_start && buf.length == p_self->p_buffers[i].n_length) {
+ break;
+ }
+ }
+
+ V4L2_SEND_BUFF((void *)buf.m.userptr, buf.bytesused);
+
+ if (-1 == _v4l2_xioctl(p_self->fd, VIDIOC_QBUF, &buf)) {
+ V4L2_DEBUG_ERROR("xioctl(VIDIOC_DQBUF) failed: %s error %d", strerror(errno), errno);
+ break;
+ }
+ return 0;
+ }
+
+ return -1;
+}
+
+static int _tdav_producer_video_v4l2_timer_cb(const void* arg, tsk_timer_id_t timer_id)
+{
+ tdav_producer_video_v4l2_t* p_v4l2 = (tdav_producer_video_v4l2_t*)arg;
+ int ret = 0;
+
+ tsk_safeobj_lock(p_v4l2);
+
+ if (p_v4l2->id_timer_grab == timer_id) {
+ if (ret = _tdav_producer_video_v4l2_grab(p_v4l2)) {
+ // goto bail;
+ }
+ if (p_v4l2->b_started) {
+ p_v4l2->id_timer_grab = tsk_timer_manager_schedule(p_v4l2->p_timer_mgr, p_v4l2->u_timout_grab, _tdav_producer_video_v4l2_timer_cb, p_v4l2);
+ if (!TSK_TIMER_ID_IS_VALID(p_v4l2->id_timer_grab)) {
+ V4L2_DEBUG_ERROR("Failed to schedule timer with timeout=%llu", p_v4l2->u_timout_grab);
+ ret = -2;
+ goto bail;
+ }
+ }
+ }
+
+bail:
+ tsk_safeobj_unlock(p_v4l2);
+ return ret;
+}
+
+static int _tdav_producer_video_v4l2_grab(tdav_producer_video_v4l2_t* p_self)
+{
+ int ret = 0, r;
+ fd_set fds;
+ struct timeval tv;
+
+ if (!p_self) {
+ V4L2_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_self);
+
+ if (!p_self->b_started) {
+ V4L2_DEBUG_ERROR("producer not started yet");
+ ret = -2;
+ goto bail;
+ }
+
+ if (!TMEDIA_PRODUCER(p_self)->enc_cb.callback) {
+ goto bail;
+ }
+
+ FD_ZERO(&fds);
+ FD_SET(p_self->fd, &fds);
+
+ /* Timeout. */
+ tv.tv_sec = 0;
+ tv.tv_usec = (p_self->id_timer_grab * 1000);
+ while (tv.tv_usec >= 1000000) {
+ tv.tv_usec -= 1000000;
+ tv.tv_sec++;
+ }
+
+ r = select(p_self->fd + 1, &fds, NULL, NULL, &tv);
+
+ if (-1 == r) {
+ if (EINTR == errno) {
+ V4L2_DEBUG_INFO("select() returned EINTR");
+ }
+ else {
+ V4L2_DEBUG_ERROR("select() failed: %s error %d", strerror(errno), errno);
+ }
+ goto bail;
+ }
+
+ if (0 == r) {
+ V4L2_DEBUG_INFO("select() timeout: %s error %d", strerror(errno), errno);
+ goto bail;
+ }
+ // Grab a frame
+ if ((ret = _v4l2_send_frame(p_self))) {
+ goto bail;
+ }
+bail:
+ tsk_safeobj_unlock(p_self);
+
+ return ret;
+}
+
+//
+// V4L2 video producer object definition
+//
+/* constructor */
+static tsk_object_t* _tdav_producer_video_v4l2_ctor(tsk_object_t *self, va_list * app)
+{
+ tdav_producer_video_v4l2_t *p_v4l2 = (tdav_producer_video_v4l2_t *)self;
+ if (p_v4l2) {
+ /* init base */
+ tmedia_producer_init(TMEDIA_PRODUCER(p_v4l2));
+ TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_yuv420p;
+ /* init self with default values*/
+ p_v4l2->fd = -1;
+ TMEDIA_PRODUCER(p_v4l2)->video.fps = 15;
+ TMEDIA_PRODUCER(p_v4l2)->video.width = 352;
+ TMEDIA_PRODUCER(p_v4l2)->video.height = 288;
+
+ tsk_safeobj_init(p_v4l2);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* _tdav_producer_video_v4l2_dtor(tsk_object_t * self)
+{
+ tdav_producer_video_v4l2_t *p_v4l2 = (tdav_producer_video_v4l2_t *)self;
+ if (p_v4l2) {
+ /* stop */
+ if (p_v4l2->b_started) {
+ _tdav_producer_video_v4l2_stop((tmedia_producer_t*)p_v4l2);
+ }
+
+ /* deinit base */
+ tmedia_producer_deinit(TMEDIA_PRODUCER(p_v4l2));
+ /* deinit self */
+ _v4l2_unprepare(p_v4l2);
+ TSK_OBJECT_SAFE_FREE(p_v4l2->p_timer_mgr);
+ tsk_safeobj_deinit(p_v4l2);
+
+ V4L2_DEBUG_INFO("*** destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_video_v4l2_def_s =
+{
+ sizeof(tdav_producer_video_v4l2_t),
+ _tdav_producer_video_v4l2_ctor,
+ _tdav_producer_video_v4l2_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+// Video
+static const tmedia_producer_plugin_def_t tdav_producer_video_v4l2_plugin_def_s =
+{
+ &tdav_producer_video_v4l2_def_s,
+ tmedia_video,
+ "V4L2 video producer",
+
+ _tdav_producer_video_v4l2_set,
+ _tdav_producer_video_v4l2_prepare,
+ _tdav_producer_video_v4l2_start,
+ _tdav_producer_video_v4l2_pause,
+ _tdav_producer_video_v4l2_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_video_v4l2_plugin_def_t = &tdav_producer_video_v4l2_plugin_def_s;
+// Screencast
+static const tmedia_producer_plugin_def_t tdav_producer_screencast_v4l2_plugin_def_s =
+{
+ &tdav_producer_video_v4l2_def_s,
+ tmedia_bfcp_video,
+ "V4L2 screencast producer",
+
+ _tdav_producer_video_v4l2_set,
+ _tdav_producer_video_v4l2_prepare,
+ _tdav_producer_video_v4l2_start,
+ _tdav_producer_video_v4l2_pause,
+ _tdav_producer_video_v4l2_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_screencast_v4l2_plugin_def_t = &tdav_producer_screencast_v4l2_plugin_def_s;
+#endif /* HAVE_LINUX_VIDEODEV2_H */
+
diff --git a/tinyDAV/src/video/winm/tdav_consumer_winm.cxx b/tinyDAV/src/video/winm/tdav_consumer_winm.cxx
new file mode 100644
index 0000000..b608a72
--- /dev/null
+++ b/tinyDAV/src/video/winm/tdav_consumer_winm.cxx
@@ -0,0 +1,219 @@
+/*Copyright (C) 2013 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+/**@file tdav_consumer_winm.cxx
+ * @brief Microsoft Windows Media (WinM) consumer.
+ *
+ */
+#include "tinydav/video/winm/tdav_consumer_winm.h"
+
+#if HAVE_WINM
+
+#include "tsk_string.h"
+#include "tsk_debug.h"
+
+#if TDAV_UNDER_WINDOWS_PHONE
+#include "Globals.h"
+
+using namespace doubango_rt::BackEnd;
+#endif
+
+
+typedef struct tdav_consumer_winm_s
+{
+ TMEDIA_DECLARE_CONSUMER;
+}
+tdav_consumer_winm_t;
+
+
+
+/* ============ Media Producer Interface ================= */
+int tdav_consumer_winm_set(tmedia_consumer_t *self, const tmedia_param_t* param)
+{
+ int ret = 0;
+
+ if(!self || !param){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return ret;
+}
+
+
+int tdav_consumer_winm_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_consumer_winm_t* consumer = (tdav_consumer_winm_t*)self;
+
+ if(!consumer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(codec->id == tmedia_codec_id_h264_bp || codec->id == tmedia_codec_id_h264_mp) {
+ TMEDIA_CONSUMER(consumer)->decoder.codec_id = codec->id;
+ }
+ else {
+ TMEDIA_CONSUMER(consumer)->decoder.codec_id = tmedia_codec_id_none;
+ }
+
+ TMEDIA_CONSUMER(consumer)->video.fps = TMEDIA_CODEC_VIDEO(codec)->in.fps;
+ TMEDIA_CONSUMER(consumer)->video.in.width = TMEDIA_CODEC_VIDEO(codec)->in.width;
+ TMEDIA_CONSUMER(consumer)->video.in.height = TMEDIA_CODEC_VIDEO(codec)->in.height;
+
+ if(!TMEDIA_CONSUMER(consumer)->video.display.width){
+ TMEDIA_CONSUMER(consumer)->video.display.width = TMEDIA_CONSUMER(consumer)->video.in.width;
+ }
+ if(!TMEDIA_CONSUMER(consumer)->video.display.height){
+ TMEDIA_CONSUMER(consumer)->video.display.height = TMEDIA_CONSUMER(consumer)->video.in.height;
+ }
+
+ return 0;
+}
+
+int tdav_consumer_winm_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_winm_t* consumer = (tdav_consumer_winm_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+#if TDAV_UNDER_WINDOWS_PHONE
+ if(Globals::Instance->VideoRenderer != nullptr)
+ {
+ Globals::Instance->VideoRenderer->Start();
+ }
+#endif
+
+ return 0;
+}
+
+int tdav_consumer_winm_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_winm_t* consumer = (tdav_consumer_winm_t*)self;
+
+#if TDAV_UNDER_WINDOWS_PHONE
+ static const UINT64 hnsPresenationTime = 0;
+ static const UINT64 hnsSampleDuration = 0;
+
+ Globals::Instance->ReceiveVideoFrame((BYTE*)buffer, size, hnsPresenationTime, hnsSampleDuration);
+#endif
+
+ return 0;
+}
+
+int tdav_consumer_winm_pause(tmedia_consumer_t* self)
+{
+ tdav_consumer_winm_t* consumer = (tdav_consumer_winm_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+int tdav_consumer_winm_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_winm_t* consumer = (tdav_consumer_winm_t*)self;
+
+ TSK_DEBUG_INFO("tdav_consumer_winm_stop");
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+#if TDAV_UNDER_WINDOWS_PHONE
+ if(Globals::Instance->VideoRenderer != nullptr)
+ {
+ Globals::Instance->VideoRenderer->Stop();
+ }
+#endif
+
+ return 0;
+}
+
+
+//
+// Windows Media video consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_winm_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_winm_t *consumer = (tdav_consumer_winm_t *)self;
+ if(consumer){
+ /* init base */
+ tmedia_consumer_init(TMEDIA_CONSUMER(consumer));
+ TMEDIA_CONSUMER(consumer)->video.display.chroma = tmedia_chroma_yuv420p; // To avoid chroma conversion
+
+ /* init self */
+ TMEDIA_CONSUMER(consumer)->video.fps = 15;
+ TMEDIA_CONSUMER(consumer)->video.display.width = 352;
+ TMEDIA_CONSUMER(consumer)->video.display.height = 288;
+ TMEDIA_CONSUMER(consumer)->video.display.auto_resize = tsk_true;
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_winm_dtor(tsk_object_t * self)
+{
+ tdav_consumer_winm_t *consumer = (tdav_consumer_winm_t *)self;
+ if(consumer){
+
+ /* stop */
+ //if(consumer->started){
+ tdav_consumer_winm_stop((tmedia_consumer_t*)self);
+ //}
+
+ /* deinit base */
+ tmedia_consumer_deinit(TMEDIA_CONSUMER(consumer));
+ /* deinit self */
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_winm_def_s =
+{
+ sizeof(tdav_consumer_winm_t),
+ tdav_consumer_winm_ctor,
+ tdav_consumer_winm_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_winm_plugin_def_s =
+{
+ &tdav_consumer_winm_def_s,
+
+ tmedia_video,
+ "Microsoft Windows Media consumer (Video)",
+
+ tdav_consumer_winm_set,
+ tdav_consumer_winm_prepare,
+ tdav_consumer_winm_start,
+ tdav_consumer_winm_consume,
+ tdav_consumer_winm_pause,
+ tdav_consumer_winm_stop
+};
+extern const tmedia_consumer_plugin_def_t *tdav_consumer_winm_plugin_def_t = &tdav_consumer_winm_plugin_def_s;
+
+
+#endif /* HAVE_WINM */
diff --git a/tinyDAV/src/video/winm/tdav_producer_winm.cxx b/tinyDAV/src/video/winm/tdav_producer_winm.cxx
new file mode 100644
index 0000000..398340a
--- /dev/null
+++ b/tinyDAV/src/video/winm/tdav_producer_winm.cxx
@@ -0,0 +1,737 @@
+/*Copyright (C) 2013 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+/**@file tdav_producer_winm.cxx
+* @brief Microsoft Windows Media (WinM) video producer.
+*
+*/
+#include "tinydav/video/winm/tdav_producer_winm.h"
+
+#if HAVE_WINM
+
+#include "tsk_string.h"
+#include "tsk_debug.h"
+
+#if TDAV_UNDER_WINDOWS_PHONE
+
+#include <windows.h>
+#include <implements.h>
+#include <Windows.Phone.Media.Capture.h>
+#include <Windows.Phone.Media.Capture.Native.h>
+
+using namespace Windows::System::Threading;
+using namespace Microsoft::WRL;
+using namespace Windows::Foundation;
+using namespace Platform;
+using namespace Windows::Phone::Media::Capture;
+using namespace Windows::Storage::Streams;
+
+struct tdav_producer_winm_s;
+
+namespace Doubango
+{
+ namespace VoIP
+ {
+ class CaptureSampleSink :
+ public Microsoft::WRL::RuntimeClass<
+ Microsoft::WRL::RuntimeClassFlags<Microsoft::WRL::RuntimeClassType::ClassicCom>,
+ ICameraCaptureSampleSink>
+ {
+ DWORD m_dwSampleCount;
+ const struct tdav_producer_winm_s* m_pProducer;
+
+ public:
+
+ STDMETHODIMP RuntimeClassInitialize(const struct tdav_producer_winm_s* pProducer)
+ {
+ m_dwSampleCount = 0;
+ m_pProducer = pProducer;
+ return S_OK;
+ }
+
+ DWORD GetSampleCount()
+ {
+ return m_dwSampleCount;
+ }
+
+ IFACEMETHODIMP_(void)
+ OnSampleAvailable(
+ ULONGLONG hnsPresentationTime,
+ ULONGLONG hnsSampleDuration,
+ DWORD cbSample,
+ BYTE* pSample)
+ {
+ m_dwSampleCount++;
+ if(m_pProducer && TMEDIA_PRODUCER(m_pProducer)->enc_cb.callback)
+ {
+ TMEDIA_PRODUCER(m_pProducer)->enc_cb.callback(TMEDIA_PRODUCER(m_pProducer)->enc_cb.callback_data, pSample, cbSample);
+ }
+ }
+ };
+
+ ref class VideoCapturePhone sealed
+ {
+ public:
+ virtual ~VideoCapturePhone();
+ internal:
+ VideoCapturePhone();
+
+ int Prepare(const struct tdav_producer_winm_s* winm);
+ int Start();
+ int Pause();
+ int Stop();
+ void SetCameraLocation(Windows::Phone::Media::Capture::CameraSensorLocation cameraLocation);
+ void ToggleCamera();
+
+ private:
+ int UnPrepare();
+ void ToggleCameraThread(Windows::Foundation::IAsyncAction^ operation);
+
+ tsk_mutex_handle_t* m_hMutex;
+
+ const tdav_producer_winm_s* m_pWrappedPlugin;
+
+ // Has capture started?
+ bool m_bStarted, m_bPrepared;
+
+ // Events to signal whether capture has stopped/started
+ HANDLE m_hStopCompleted;
+ HANDLE m_hStartCompleted;
+
+ IAsyncOperation<AudioVideoCaptureDevice^> ^m_pOpenOperation;
+
+ Windows::Foundation::IAsyncAction^ m_ToggleThread;
+
+ // Native sink and video device
+ CaptureSampleSink *m_pVideoSink;
+ IAudioVideoCaptureDeviceNative *m_pVideoDevice;
+
+ Windows::Phone::Media::Capture::CameraSensorLocation m_eCameraLocation;
+
+ Windows::Phone::Media::Capture::AudioVideoCaptureDevice ^m_pVideoOnlyDevice;
+ Windows::Foundation::IAsyncAction ^m_pVideoCaptureAction;
+ };
+ }
+}
+
+using namespace Doubango::VoIP;
+
+#endif
+
+
+typedef struct tdav_producer_winm_s
+{
+ TMEDIA_DECLARE_PRODUCER;
+#if TDAV_UNDER_WINDOWS_PHONE
+ VideoCapturePhone^ videoCapturePhone;
+#endif
+}
+tdav_producer_winm_t;
+
+
+/* ============ Media Producer Interface ================= */
+static int tdav_producer_winm_set(tmedia_producer_t *self, const tmedia_param_t* param)
+{
+ int ret = 0;
+ tdav_producer_winm_t* producer = (tdav_producer_winm_t*)self;
+
+ if(!producer || !param){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(param->value_type == tmedia_pvt_int32){
+ if(tsk_striequals(param->key, "camera-location")){
+ Windows::Phone::Media::Capture::CameraSensorLocation cameraLocation = (Windows::Phone::Media::Capture::CameraSensorLocation)*((int32_t*)param->value);
+ if(producer->videoCapturePhone)
+ {
+ producer->videoCapturePhone->SetCameraLocation(cameraLocation);
+ return 0;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int tdav_producer_winm_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_producer_winm_t* producer = (tdav_producer_winm_t*)self;
+
+ if(!producer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(codec->id == tmedia_codec_id_h264_bp || codec->id == tmedia_codec_id_h264_mp) {
+ TMEDIA_PRODUCER(producer)->encoder.codec_id = codec->id;
+ }
+ else {
+ TMEDIA_PRODUCER(producer)->encoder.codec_id = tmedia_codec_id_none;
+ }
+ TMEDIA_PRODUCER(producer)->video.fps = TMEDIA_CODEC_VIDEO(codec)->out.fps;
+ TMEDIA_PRODUCER(producer)->video.width = TMEDIA_CODEC_VIDEO(codec)->out.width;
+ TMEDIA_PRODUCER(producer)->video.height = TMEDIA_CODEC_VIDEO(codec)->out.height;
+
+
+#if TDAV_UNDER_WINDOWS_PHONE
+ return producer->videoCapturePhone->Prepare(producer);
+#else
+ TSK_DEBUG_ERROR("Unexpected code called");
+ return -1;
+#endif
+}
+
+static int tdav_producer_winm_start(tmedia_producer_t* self)
+{
+ tdav_producer_winm_t* producer = (tdav_producer_winm_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+#if TDAV_UNDER_WINDOWS_PHONE
+ return producer->videoCapturePhone->Start();
+#else
+ TSK_DEBUG_ERROR("Unexpected code called");
+ return -1;
+#endif
+}
+
+static int tdav_producer_winm_pause(tmedia_producer_t* self)
+{
+ tdav_producer_winm_t* producer = (tdav_producer_winm_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+#if TDAV_UNDER_WINDOWS_PHONE
+ return producer->videoCapturePhone->Pause();
+#else
+ TSK_DEBUG_ERROR("Unexpected code called");
+ return -1;
+#endif
+}
+
+static int tdav_producer_winm_stop(tmedia_producer_t* self)
+{
+ tdav_producer_winm_t* producer = (tdav_producer_winm_t*)self;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+#if TDAV_UNDER_WINDOWS_PHONE
+ return producer->videoCapturePhone->Stop();
+#else
+ TSK_DEBUG_ERROR("Unexpected code called");
+ return -1;
+#endif
+}
+
+
+#if TDAV_UNDER_WINDOWS_PHONE
+
+VideoCapturePhone::VideoCapturePhone() :
+ m_bStarted(false),
+ m_bPrepared(false),
+ m_pVideoOnlyDevice(nullptr),
+ m_pVideoSink(NULL),
+ m_pVideoDevice(NULL),
+ m_pWrappedPlugin(NULL),
+ m_pOpenOperation(nullptr),
+ m_eCameraLocation(CameraSensorLocation::Front)
+{
+ if(!(m_hMutex = tsk_mutex_create())){
+ throw ref new Platform::FailureException(L"Failed to create mutex");
+ }
+
+ m_hStopCompleted = CreateEventEx(NULL, NULL, CREATE_EVENT_MANUAL_RESET, EVENT_ALL_ACCESS);
+ if (!m_hStopCompleted)
+ {
+ throw ref new Platform::Exception(HRESULT_FROM_WIN32(GetLastError()), L"Could not create shutdown event");
+ }
+
+ m_hStartCompleted = CreateEventEx(NULL, NULL, CREATE_EVENT_MANUAL_RESET, EVENT_ALL_ACCESS);
+ if (!m_hStartCompleted)
+ {
+ throw ref new Platform::Exception(HRESULT_FROM_WIN32(GetLastError()), L"Could not create start event");
+ }
+}
+
+VideoCapturePhone::~VideoCapturePhone()
+{
+ Stop();
+
+ if(m_ToggleThread)
+ {
+ m_ToggleThread->Cancel();
+ m_ToggleThread->Close();
+ m_ToggleThread = nullptr;
+ }
+
+ tsk_mutex_destroy(&m_hMutex);
+}
+
+int VideoCapturePhone::Prepare(const struct tdav_producer_winm_s* winm)
+{
+ HRESULT hr = E_FAIL;
+ int ret = 0;
+ Windows::Foundation::Size dimensionsRequested, dimensionsClosest;
+ Collections::IVectorView<Size> ^availableSizes;
+ Collections::IIterator<Windows::Foundation::Size> ^availableSizesIterator;
+ bool bClosestFound = false;
+
+ #define WINM_SET_ERROR(code) ret = (code); goto bail;
+
+ tsk_mutex_lock(m_hMutex);
+
+ if(m_bPrepared)
+ {
+ TSK_DEBUG_INFO("#WASAPI: Audio producer already prepared");
+ goto bail;
+ }
+
+ if(!winm)
+ {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ WINM_SET_ERROR(-1);
+ }
+
+ if(m_pVideoCaptureAction || m_pVideoDevice || m_pVideoOnlyDevice || m_pVideoSink || m_pOpenOperation){
+ TSK_DEBUG_ERROR("Producer already prepared");
+ WINM_SET_ERROR(-2);
+ }
+
+ dimensionsClosest.Width = dimensionsRequested.Width = (float)TMEDIA_PRODUCER(winm)->video.width;
+ dimensionsClosest.Height = dimensionsRequested.Height = (float)TMEDIA_PRODUCER(winm)->video.height;
+ availableSizes = AudioVideoCaptureDevice::GetAvailableCaptureResolutions(m_eCameraLocation);
+ availableSizesIterator = availableSizes->First();
+
+ while(!m_pOpenOperation && availableSizesIterator->HasCurrent)
+ {
+ TSK_DEBUG_INFO("Camera Supported size: (%f, %f)", availableSizesIterator->Current.Width, availableSizesIterator->Current.Height);
+ if(availableSizesIterator->Current.Height == dimensionsRequested.Width && availableSizesIterator->Current.Width == dimensionsRequested.Height)
+ {
+ m_pOpenOperation = AudioVideoCaptureDevice::OpenForVideoOnlyAsync(m_eCameraLocation, dimensionsRequested);
+ TSK_DEBUG_INFO("Camera::Open(%d, %d)", dimensionsRequested.Width, dimensionsRequested.Height);
+ break;
+ }
+ else if(!bClosestFound && (availableSizesIterator->Current.Height <= dimensionsRequested.Height && availableSizesIterator->Current.Width <= dimensionsRequested.Width))
+ {
+ dimensionsClosest.Height = availableSizesIterator->Current.Height;
+ dimensionsClosest.Width = availableSizesIterator->Current.Width;
+ bClosestFound = true;
+ }
+ availableSizesIterator->MoveNext();
+ }
+
+ if(!m_pOpenOperation)
+ {
+ m_pOpenOperation = AudioVideoCaptureDevice::OpenForVideoOnlyAsync(m_eCameraLocation, dimensionsClosest);
+ TSK_DEBUG_INFO("Camera::Open(%f, %f)", dimensionsClosest.Width, dimensionsClosest.Height);
+ }
+
+bail:
+ if(ret != 0){
+ UnPrepare();
+ }
+ if((m_bPrepared = (ret == 0)))
+ {
+ m_pWrappedPlugin = winm;
+ }
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return ret;
+}
+
+int VideoCapturePhone::Start()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ if(m_bStarted)
+ {
+ TSK_DEBUG_INFO("#WINM: Video producer already started");
+ goto bail;
+ }
+ if(!m_bPrepared)
+ {
+ TSK_DEBUG_ERROR("#WINM: Video producer not prepared");
+ goto bail;
+ }
+
+ m_bStarted = true;
+
+ m_pOpenOperation->Completed = ref new AsyncOperationCompletedHandler<AudioVideoCaptureDevice^>([this] (IAsyncOperation<AudioVideoCaptureDevice^> ^operation, Windows::Foundation::AsyncStatus status)
+ {
+ tsk_mutex_lock(m_hMutex);
+ if(m_bStarted)
+ {
+
+ if(status == Windows::Foundation::AsyncStatus::Completed)
+ {
+
+ TSK_DEBUG_INFO("+[VideoCapturePhone::Prepare] => OpenAsyncOperation started");
+
+ auto videoDevice = operation->GetResults();
+
+ m_pVideoOnlyDevice = videoDevice;
+ IAudioVideoCaptureDeviceNative *pNativeDevice = NULL;
+ HRESULT hr = reinterpret_cast<IUnknown*>(videoDevice)->QueryInterface(__uuidof(IAudioVideoCaptureDeviceNative), (void**) &pNativeDevice);
+
+ if (NULL == pNativeDevice || FAILED(hr))
+ {
+ throw ref new FailureException("Unable to QI IAudioVideoCaptureDeviceNative");
+ }
+
+ // Save off the native device
+ m_pVideoDevice = pNativeDevice;
+
+ // Set Fps
+ CameraCapturePropertyRange^ cameraCapturePropertyRange = m_pVideoOnlyDevice->GetSupportedPropertyRange(m_eCameraLocation, KnownCameraAudioVideoProperties::VideoFrameRate);
+ if(cameraCapturePropertyRange)
+ {
+ try
+ {
+ Windows::Foundation::IPropertyValue^ vMin = dynamic_cast<Windows::Foundation::IPropertyValue^>(cameraCapturePropertyRange->Min);
+ Windows::Foundation::IPropertyValue^ vMax = dynamic_cast<Windows::Foundation::IPropertyValue^>(cameraCapturePropertyRange->Max);
+ UINT32 nFps = TSK_CLAMP(vMin->GetUInt32(), (UINT32)TMEDIA_PRODUCER(m_pWrappedPlugin)->video.fps, vMax->GetUInt32());
+ m_pVideoOnlyDevice->SetProperty(KnownCameraAudioVideoProperties::VideoFrameRate, nFps);
+ }
+ catch(...){ }
+ }
+
+ // Set Camera Rotation
+ try
+ {
+ m_pVideoOnlyDevice->SetProperty(
+ KnownCameraGeneralProperties::EncodeWithOrientation,
+ m_eCameraLocation == Windows::Phone::Media::Capture::CameraSensorLocation::Back ? 90 : -90
+ );
+ }
+ catch(...){ }
+
+ // Create the sink
+ MakeAndInitialize<CaptureSampleSink>(&(m_pVideoSink), m_pWrappedPlugin);
+ pNativeDevice->SetVideoSampleSink(m_pVideoSink);
+
+ // Use the same encoding format as in VideoMediaStreamSource.cs
+ videoDevice->VideoEncodingFormat = CameraCaptureVideoFormat::H264;
+
+ SetEvent(m_hStartCompleted);
+
+ // Start recording to our sink
+ m_pVideoCaptureAction = videoDevice->StartRecordingToSinkAsync();
+ m_pVideoCaptureAction->Completed = ref new AsyncActionCompletedHandler([this] (IAsyncAction ^asyncInfo, Windows::Foundation::AsyncStatus status)
+ {
+ if(status == Windows::Foundation::AsyncStatus::Completed)
+ {
+ TSK_DEBUG_INFO("[VideoCapturePhone::Prepare] => StartRecordingToSinkAsync completed");
+ }
+ else if(status == Windows::Foundation::AsyncStatus::Error || status == Windows::Foundation::AsyncStatus::Canceled)
+ {
+ TSK_DEBUG_INFO("[VideoCapturePhone::Prepare] => StartRecordingToSinkAsync did not complete");
+ }
+ });
+
+ TSK_DEBUG_INFO("-[VideoCapturePhone::Prepare] => OpenAsyncOperation Completed");
+ }
+ else if(status == Windows::Foundation::AsyncStatus::Canceled)
+ {
+ TSK_DEBUG_INFO("[VideoCapturePhone::Prepare] => OpenAsyncOperation Canceled");
+ }
+ else if(status == Windows::Foundation::AsyncStatus::Error)
+ {
+ TSK_DEBUG_INFO("[VideoCapturePhone::Prepare] => OpenAsyncOperation encountered an error");
+ }
+ }
+
+ tsk_mutex_unlock(m_hMutex);
+ });
+
+bail:
+ tsk_mutex_unlock(m_hMutex);
+
+ return (m_bStarted ? 0 : -2);
+}
+
+int VideoCapturePhone::Pause()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ if(m_bStarted)
+ {
+
+ }
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+int VideoCapturePhone::Stop()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ TSK_DEBUG_INFO("+[VideoCapturePhone::Stop] => Trying to stop capture");
+ if (m_pVideoOnlyDevice)
+ {
+ TSK_DEBUG_INFO("Destroying VideoCaptureDevice");
+
+ try
+ {
+ if(m_bStarted)
+ {
+ m_pVideoOnlyDevice->StopRecordingAsync()->Completed = ref new AsyncActionCompletedHandler([this] (IAsyncAction ^action, Windows::Foundation::AsyncStatus status){
+ if(status == Windows::Foundation::AsyncStatus::Completed)
+ {
+ TSK_DEBUG_INFO("[VideoCapturePhone::StopRecordingAsync] Video successfully stopped");
+ }
+ else
+ {
+ TSK_DEBUG_INFO("[VideoCapturePhone::StopRecordingAsync] Error occurred while stopping recording");
+ }
+ m_pVideoCaptureAction = nullptr;
+ m_pVideoOnlyDevice = nullptr;
+ m_bStarted = false;
+ SetEvent(m_hStopCompleted);
+ });
+ }
+ }
+ catch(...)
+ {
+ // A Platform::ObjectDisposedException can be raised if the app has had its access
+ // to video revoked (most commonly when the app is going out of the foreground)
+ TSK_DEBUG_ERROR("Exception caught while destroying video capture");
+ m_pVideoCaptureAction = nullptr;
+ m_pVideoOnlyDevice = nullptr;
+ m_bStarted = false;
+ SetEvent(m_hStopCompleted);
+ }
+
+ if (m_pVideoDevice)
+ {
+ m_pVideoDevice->Release();
+ m_pVideoDevice = NULL;
+ }
+
+ if (m_pVideoSink)
+ {
+ m_pVideoSink->Release();
+ m_pVideoSink = NULL;
+ }
+ }
+ else
+ {
+ m_bStarted = false;
+ }
+
+ TSK_DEBUG_INFO("-[VideoCapturePhone::Stop] => finished stopping capture\n");
+
+ // will be prepared again before next start()
+ UnPrepare();
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+void VideoCapturePhone::SetCameraLocation(Windows::Phone::Media::Capture::CameraSensorLocation cameraLocation)
+{
+ if(m_eCameraLocation != cameraLocation)
+ {
+ if(m_bStarted)
+ {
+ ToggleCamera();
+ }
+ else
+ {
+ m_eCameraLocation = cameraLocation;
+ }
+ }
+}
+
+int VideoCapturePhone::UnPrepare()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ if(m_bStarted)
+ {
+ ResetEvent(m_hStopCompleted);
+ Stop();
+ DWORD waitResult = WaitForSingleObjectEx(m_hStopCompleted, 5000, FALSE);
+ if(waitResult != WAIT_OBJECT_0)
+ {
+ TSK_DEBUG_ERROR("Failed to stop video producer");
+ }
+ }
+
+ if (m_pVideoDevice)
+ {
+ m_pVideoDevice->Release();
+ m_pVideoDevice = NULL;
+ }
+
+ if (m_pVideoSink)
+ {
+ m_pVideoSink->Release();
+ m_pVideoSink = NULL;
+ }
+
+ m_pOpenOperation = nullptr;
+
+ m_bPrepared = false;
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+void VideoCapturePhone::ToggleCamera()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ if(m_ToggleThread)
+ {
+ m_ToggleThread->Cancel();
+ m_ToggleThread->Close();
+ m_ToggleThread = nullptr;
+ }
+
+ m_ToggleThread = ThreadPool::RunAsync(ref new WorkItemHandler(this, &VideoCapturePhone::ToggleCameraThread), WorkItemPriority::High, WorkItemOptions::TimeSliced);
+
+ tsk_mutex_unlock(m_hMutex);
+}
+
+
+void VideoCapturePhone::ToggleCameraThread(Windows::Foundation::IAsyncAction^ operation)
+{
+ TSK_DEBUG_INFO("+[VideoCapturePhone::ToggleCamera] => Toggling camera");
+
+ ResetEvent(m_hStopCompleted);
+ Stop();
+ DWORD waitResult = WaitForSingleObjectEx(m_hStopCompleted, INFINITE, FALSE);
+ if(waitResult == WAIT_OBJECT_0)
+ {
+ ResetEvent(m_hStartCompleted);
+ if(m_eCameraLocation == Windows::Phone::Media::Capture::CameraSensorLocation::Back)
+ {
+ m_eCameraLocation = Windows::Phone::Media::Capture::CameraSensorLocation::Front;
+ }
+ else
+ {
+ m_eCameraLocation = Windows::Phone::Media::Capture::CameraSensorLocation::Back;
+ }
+ Prepare(m_pWrappedPlugin);
+ Start();
+ }
+ else
+ {
+ throw ref new Platform::Exception(HRESULT_FROM_WIN32(waitResult), L"Error waiting for capture to stop when toggling cameras");
+ }
+
+ waitResult = WaitForSingleObjectEx(m_hStartCompleted, INFINITE, FALSE);
+ if(waitResult == WAIT_OBJECT_0)
+ {
+ // CameraLocationChanged(newCameraLocation);
+ }
+ else
+ {
+ throw ref new Platform::Exception(HRESULT_FROM_WIN32(waitResult), L"Error waiting for capture to start when toggling cameras");
+ }
+ TSK_DEBUG_INFO("-[VideoCapturePhone::ToggleCamera] => Toggling camera");
+}
+
+#endif /* TDAV_UNDER_WINDOWS_PHONE */
+
+
+//
+// Windows Media video producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_winm_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_winm_t *producer = (tdav_producer_winm_t *)self;
+ if(producer){
+ /* init base */
+ tmedia_producer_init(TMEDIA_PRODUCER(producer));
+ TMEDIA_PRODUCER(producer)->video.chroma = tmedia_chroma_yuv420p; // To avoid chroma conversion
+ /* init self with default values*/
+
+ TMEDIA_PRODUCER(producer)->video.fps = 15;
+ TMEDIA_PRODUCER(producer)->video.width = 352;
+ TMEDIA_PRODUCER(producer)->video.height = 288;
+
+#if TDAV_UNDER_WINDOWS_PHONE
+ producer->videoCapturePhone = ref new VideoCapturePhone();
+#endif
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_winm_dtor(tsk_object_t * self)
+{
+ tdav_producer_winm_t *producer = (tdav_producer_winm_t *)self;
+ if(producer){
+ /* stop */
+ //if(producer->started){
+ tdav_producer_winm_stop((tmedia_producer_t*)self);
+ //}
+
+ /* deinit base */
+ tmedia_producer_deinit(TMEDIA_PRODUCER(producer));
+ /* deinit self */
+#if TDAV_UNDER_WINDOWS_PHONE
+ if(producer->videoCapturePhone)
+ {
+ delete producer->videoCapturePhone;
+ }
+#endif
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_winm_def_s =
+{
+ sizeof(tdav_producer_winm_t),
+ tdav_producer_winm_ctor,
+ tdav_producer_winm_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_winm_plugin_def_s =
+{
+ &tdav_producer_winm_def_s,
+
+ tmedia_video,
+ "Microsoft Windows Media producer (Video)",
+
+ tdav_producer_winm_set,
+ tdav_producer_winm_prepare,
+ tdav_producer_winm_start,
+ tdav_producer_winm_pause,
+ tdav_producer_winm_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_winm_plugin_def_t = &tdav_producer_winm_plugin_def_s;
+
+#endif /* HAVE_WINM */
OpenPOWER on IntegriCloud