summaryrefslogtreecommitdiffstats
path: root/tinyDAV/src
diff options
context:
space:
mode:
authorMamadou DIOP <bossiel@yahoo.fr>2015-08-17 01:56:35 +0200
committerMamadou DIOP <bossiel@yahoo.fr>2015-08-17 01:56:35 +0200
commit631fffee8a28b1bec5ed1f1d26a20e0135967f99 (patch)
tree74afe3bf3efe15aa82bcd0272b2b0f4d48c2d837 /tinyDAV/src
parent7908865936604036e6f200f1b5e069f8752f3a3a (diff)
downloaddoubango-631fffee8a28b1bec5ed1f1d26a20e0135967f99.zip
doubango-631fffee8a28b1bec5ed1f1d26a20e0135967f99.tar.gz
-
Diffstat (limited to 'tinyDAV/src')
-rw-r--r--tinyDAV/src/audio/alsa/tdav_common_alsa.c275
-rw-r--r--tinyDAV/src/audio/alsa/tdav_consumer_alsa.c288
-rw-r--r--tinyDAV/src/audio/alsa/tdav_producer_alsa.c261
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_audiounit.c425
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_consumer_audioqueue.c268
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_consumer_audiounit.c447
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_producer_audioqueue.c253
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_producer_audiounit.c422
-rw-r--r--tinyDAV/src/audio/directsound/tdav_consumer_dsound.c458
-rw-r--r--tinyDAV/src/audio/directsound/tdav_producer_dsound.c402
-rw-r--r--tinyDAV/src/audio/oss/tdav_consumer_oss.c397
-rw-r--r--tinyDAV/src/audio/oss/tdav_producer_oss.c369
-rw-r--r--tinyDAV/src/audio/tdav_consumer_audio.c272
-rw-r--r--tinyDAV/src/audio/tdav_jitterbuffer.c1036
-rw-r--r--tinyDAV/src/audio/tdav_producer_audio.c133
-rw-r--r--tinyDAV/src/audio/tdav_session_audio.c991
-rw-r--r--tinyDAV/src/audio/tdav_speakup_jitterbuffer.c281
-rw-r--r--tinyDAV/src/audio/tdav_speex_denoise.c312
-rw-r--r--tinyDAV/src/audio/tdav_speex_jitterbuffer.c319
-rw-r--r--tinyDAV/src/audio/tdav_speex_resampler.c254
-rw-r--r--tinyDAV/src/audio/tdav_webrtc_denoise.c627
-rw-r--r--tinyDAV/src/audio/wasapi/tdav_consumer_wasapi.cxx676
-rw-r--r--tinyDAV/src/audio/wasapi/tdav_producer_wasapi.cxx681
-rw-r--r--tinyDAV/src/audio/waveapi/tdav_consumer_waveapi.c402
-rw-r--r--tinyDAV/src/audio/waveapi/tdav_producer_waveapi.c393
-rw-r--r--tinyDAV/src/bfcp/tdav_session_bfcp.c741
-rw-r--r--tinyDAV/src/codecs/amr/tdav_codec_amr.c816
-rw-r--r--tinyDAV/src/codecs/bfcp/tdav_codec_bfcp.c104
-rw-r--r--tinyDAV/src/codecs/bv/tdav_codec_bv16.c250
-rw-r--r--tinyDAV/src/codecs/bv/tdav_codec_bv32.c0
-rw-r--r--tinyDAV/src/codecs/dtmf/tdav_codec_dtmf.c126
-rw-r--r--tinyDAV/src/codecs/fec/tdav_codec_red.c263
-rw-r--r--tinyDAV/src/codecs/fec/tdav_codec_ulpfec.c424
-rw-r--r--tinyDAV/src/codecs/g711/g711.c295
-rw-r--r--tinyDAV/src/codecs/g711/tdav_codec_g711.c326
-rw-r--r--tinyDAV/src/codecs/g722/g722_decode.c400
-rw-r--r--tinyDAV/src/codecs/g722/g722_encode.c426
-rw-r--r--tinyDAV/src/codecs/g722/tdav_codec_g722.c219
-rw-r--r--tinyDAV/src/codecs/g729/tdav_codec_g729.c466
-rw-r--r--tinyDAV/src/codecs/gsm/tdav_codec_gsm.c209
-rw-r--r--tinyDAV/src/codecs/h261/tdav_codec_h261.c536
-rw-r--r--tinyDAV/src/codecs/h263/tdav_codec_h263.c1373
-rw-r--r--tinyDAV/src/codecs/h264/tdav_codec_h264.c993
-rw-r--r--tinyDAV/src/codecs/h264/tdav_codec_h264_cisco.cxx882
-rw-r--r--tinyDAV/src/codecs/h264/tdav_codec_h264_cuda.cxx1130
-rw-r--r--tinyDAV/src/codecs/h264/tdav_codec_h264_intel.cxx2221
-rw-r--r--tinyDAV/src/codecs/h264/tdav_codec_h264_rtp.c411
-rw-r--r--tinyDAV/src/codecs/ilbc/tdav_codec_ilbc.c265
-rw-r--r--tinyDAV/src/codecs/mp4ves/tdav_codec_mp4ves.c818
-rw-r--r--tinyDAV/src/codecs/msrp/tdav_codec_msrp.c106
-rw-r--r--tinyDAV/src/codecs/opus/tdav_codec_opus.c363
-rw-r--r--tinyDAV/src/codecs/speex/tdav_codec_speex.c286
-rw-r--r--tinyDAV/src/codecs/t140/tdav_codec_t140.c175
-rw-r--r--tinyDAV/src/codecs/theora/tdav_codec_theora.c862
-rw-r--r--tinyDAV/src/codecs/vpx/tdav_codec_vp8.c1059
-rw-r--r--tinyDAV/src/msrp/tdav_consumer_msrp.c0
-rw-r--r--tinyDAV/src/msrp/tdav_producer_msrp.c0
-rw-r--r--tinyDAV/src/msrp/tdav_session_msrp.c984
-rw-r--r--tinyDAV/src/t140/tdav_consumer_t140.c137
-rw-r--r--tinyDAV/src/t140/tdav_producer_t140.c139
-rw-r--r--tinyDAV/src/t140/tdav_session_t140.c1165
-rw-r--r--tinyDAV/src/tdav.c758
-rw-r--r--tinyDAV/src/tdav_apple.mm159
-rw-r--r--tinyDAV/src/tdav_session_av.c2474
-rw-r--r--tinyDAV/src/tdav_win32.c234
-rw-r--r--tinyDAV/src/video/directx/tdav_producer_screencast_d3d9.cxx185
-rw-r--r--tinyDAV/src/video/directx/tdav_producer_screencast_ddraw.cxx1542
-rw-r--r--tinyDAV/src/video/gdi/tdav_consumer_video_gdi.c544
-rw-r--r--tinyDAV/src/video/gdi/tdav_producer_screencast_gdi.c534
-rw-r--r--tinyDAV/src/video/jb/tdav_video_frame.c243
-rw-r--r--tinyDAV/src/video/jb/tdav_video_jb.c573
-rw-r--r--tinyDAV/src/video/mf/tdav_consumer_video_mf.cxx185
-rw-r--r--tinyDAV/src/video/mf/tdav_producer_video_mf.cxx855
-rw-r--r--tinyDAV/src/video/tdav_consumer_video.c207
-rw-r--r--tinyDAV/src/video/tdav_converter_video.cxx832
-rw-r--r--tinyDAV/src/video/tdav_runnable_video.c95
-rw-r--r--tinyDAV/src/video/tdav_session_video.c1649
-rw-r--r--tinyDAV/src/video/v4linux/tdav_producer_video_v4l2.c1164
-rw-r--r--tinyDAV/src/video/winm/tdav_consumer_winm.cxx219
-rw-r--r--tinyDAV/src/video/winm/tdav_producer_winm.cxx737
80 files changed, 42801 insertions, 0 deletions
diff --git a/tinyDAV/src/audio/alsa/tdav_common_alsa.c b/tinyDAV/src/audio/alsa/tdav_common_alsa.c
new file mode 100644
index 0000000..d1deec8
--- /dev/null
+++ b/tinyDAV/src/audio/alsa/tdav_common_alsa.c
@@ -0,0 +1,275 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/audio/alsa/tdav_common_alsa.h"
+
+#if HAVE_ALSA_ASOUNDLIB_H
+
+#define ALSA_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[ALSA Common] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[ALSA Common] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[ALSA Common] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[ALSA Common] " FMT, ##__VA_ARGS__)
+
+#define ALSA_PLAYBACK_PERIODS 6
+
+int tdav_common_alsa_init(tdav_common_alsa_t* p_self)
+{
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if (p_self->b_initialized) {
+ ALSA_DEBUG_WARN("Already initialized");
+ return 0;
+ }
+ tsk_safeobj_init(p_self);
+ p_self->b_initialized = tsk_true;
+ return 0;
+}
+
+int tdav_common_alsa_lock(tdav_common_alsa_t* p_self)
+{
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ return tsk_safeobj_lock(p_self);
+}
+
+int tdav_common_alsa_unlock(tdav_common_alsa_t* p_self)
+{
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ return tsk_safeobj_unlock(p_self);
+}
+
+int tdav_common_alsa_prepare(tdav_common_alsa_t* p_self, tsk_bool_t is_capture, int ptime, int channels, int sample_rate)
+{
+ int err = 0, val;
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(p_self);
+
+ if (p_self->b_prepared) {
+ ALSA_DEBUG_WARN("Already prepared");
+ goto bail;
+ }
+ if (!p_self->p_device_name) {
+ p_self->p_device_name = strdup("default");
+ }
+ p_self->b_capture = is_capture;
+
+ if ((err = snd_pcm_open(&p_self->p_handle, p_self->p_device_name, is_capture ? SND_PCM_STREAM_CAPTURE : SND_PCM_STREAM_PLAYBACK, /*SND_PCM_NONBLOCK | SND_PCM_ASYNC*/0)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to open audio device %s (%s)", p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+ ALSA_DEBUG_INFO("device('%s') opened", p_self->p_device_name);
+
+ if ((err = snd_pcm_hw_params_malloc(&p_self->p_params)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to allocate hardware parameter structure(%s)", snd_strerror(err));
+ goto bail;
+ }
+
+ if ((err = snd_pcm_hw_params_any(p_self->p_handle, p_self->p_params)) < 0) {
+ ALSA_DEBUG_ERROR("Failed to initialize hardware parameter structure (device=%s, err=%s)", p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ if ((err = snd_pcm_hw_params_set_access(p_self->p_handle, p_self->p_params, SND_PCM_ACCESS_RW_INTERLEAVED)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to set access type (device=%s, err=%s)", p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ if ((err = snd_pcm_hw_params_set_format(p_self->p_handle, p_self->p_params, SND_PCM_FORMAT_S16_LE)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to set sample format (device=%s, err=%s)", p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ val = sample_rate;
+ if ((err = snd_pcm_hw_params_set_rate_near(p_self->p_handle, p_self->p_params, &val, 0)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to set sample rate (rate=%d, device=%s, err=%s)", p_self->sample_rate, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+ ALSA_DEBUG_INFO("sample_rate: req=%d, resp=%d", sample_rate, val);
+ p_self->sample_rate = val;
+
+ val = channels;
+ if ((err = snd_pcm_hw_params_set_channels_near(p_self->p_handle, p_self->p_params, &val)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to set channels (channels=%d, device=%s, err=%s)", p_self->channels, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+ ALSA_DEBUG_INFO("channels: req=%d, resp=%d", channels, val);
+ p_self->channels = val;
+
+ if (!is_capture) {
+ unsigned int periods = ALSA_PLAYBACK_PERIODS;
+ snd_pcm_uframes_t periodSize = (ptime * p_self->sample_rate * p_self->channels) / 1000;
+ if ((err = snd_pcm_hw_params_set_periods_near(p_self->p_handle, p_self->p_params, &periods, 0)) != 0) {
+ ALSA_DEBUG_ERROR ("Failed to set periods (val=%u, device=%s, err=%s)", periods, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ snd_pcm_uframes_t bufferSize = (periodSize * periods);
+ if ((err = snd_pcm_hw_params_set_buffer_size(p_self->p_handle, p_self->p_params, bufferSize)) != 0) {
+ ALSA_DEBUG_ERROR ("Failed to set buffer size (val=%lu, device=%s, err=%s)", bufferSize, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+ ALSA_DEBUG_INFO("periods=%u, buffersize=%lu", periods, bufferSize);
+ }
+
+ if ((err = snd_pcm_hw_params (p_self->p_handle, p_self->p_params)) != 0) {
+ ALSA_DEBUG_ERROR ("Failed to set parameters (channels=%d, rate=%d, device=%s, err=%s)", p_self->channels, p_self->sample_rate, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+ if ((err = snd_pcm_prepare(p_self->p_handle)) != 0) {
+ ALSA_DEBUG_ERROR ("Failed to prepare device (channels=%d, rate=%d, device=%s, err=%s)", p_self->channels, p_self->sample_rate, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ /*if (is_capture)*/ {
+ p_self->n_buff_size_in_bytes = (ptime * p_self->sample_rate * (2/*SND_PCM_FORMAT_S16_LE*/ * p_self->channels)) / 1000;
+ if (!(p_self->p_buff_ptr = tsk_realloc(p_self->p_buff_ptr, p_self->n_buff_size_in_bytes))) {
+ ALSA_DEBUG_ERROR("Failed to allocate buffer with size = %u", p_self->n_buff_size_in_bytes);
+ err = -4;
+ goto bail;
+ }
+ p_self->n_buff_size_in_samples = (p_self->n_buff_size_in_bytes >> 1/*SND_PCM_FORMAT_S16_LE*/);
+ ALSA_DEBUG_INFO("n_buff_size_in_bytes=%u", p_self->n_buff_size_in_bytes);
+ }
+
+ ALSA_DEBUG_INFO("device('%s') prepared", p_self->p_device_name);
+
+ // everything is OK
+ p_self->b_prepared = tsk_true;
+bail:
+ if (err) {
+ tdav_common_alsa_unprepare(p_self);
+ }
+ tdav_common_alsa_unlock(p_self);
+ return err;
+
+}
+
+int tdav_common_alsa_unprepare(tdav_common_alsa_t* p_self)
+{
+ int err = 0;
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(p_self);
+
+ if (p_self->b_started) {
+ ALSA_DEBUG_ERROR("Must stop the capture device before unpreparing");
+ err = -2;
+ goto bail;
+ }
+
+ if (p_self->p_params) {
+ snd_pcm_hw_params_free(p_self->p_params);
+ p_self->p_params = tsk_null;
+ }
+ if (p_self->p_handle) {
+ snd_pcm_close(p_self->p_handle);
+ p_self->p_handle = tsk_null;
+ }
+ p_self->b_prepared = tsk_false;
+
+ ALSA_DEBUG_INFO("device('%s') unprepared", p_self->p_device_name);
+
+bail:
+ tdav_common_alsa_unlock(p_self);
+ return err;
+}
+
+int tdav_common_alsa_start(tdav_common_alsa_t* p_self)
+{
+ int err = 0;
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(p_self);
+
+ if (p_self->b_started) {
+ ALSA_DEBUG_WARN("Already started");
+ err = - 3;
+ goto bail;
+ }
+ if (!p_self->b_prepared) {
+ ALSA_DEBUG_ERROR("Not prepared");
+ err = -2;
+ goto bail;
+ }
+
+ if ((err = snd_pcm_start(p_self->p_handle)) != 0) {
+ ALSA_DEBUG_ERROR ("Failed to start device (channels=%d, rate=%d, device=%s, err=%s)", p_self->channels, p_self->sample_rate, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ p_self->b_started = tsk_true;
+ ALSA_DEBUG_INFO("device('%s') started", p_self->p_device_name);
+bail:
+ tdav_common_alsa_unlock(p_self);
+ return err;
+}
+
+int tdav_common_alsa_stop(tdav_common_alsa_t* p_self)
+{
+ int err = 0;
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(p_self);
+
+ if (p_self->b_started) {
+ p_self->b_started = tsk_false;
+ //err = snd_pcm_drain(p_self->p_handle);
+ ALSA_DEBUG_INFO("device('%s') stopped", p_self->p_device_name);
+ }
+ if (p_self->b_prepared) {
+ tdav_common_alsa_unprepare(p_self);
+ }
+bail:
+ tdav_common_alsa_unlock(p_self);
+ return err;
+}
+
+int tdav_common_alsa_deinit(tdav_common_alsa_t* p_self)
+{
+ if (p_self && p_self->b_initialized) {
+ tdav_common_alsa_stop(p_self);
+ tdav_common_alsa_unprepare(p_self);
+ TSK_FREE(p_self->p_device_name);
+ TSK_FREE(p_self->p_buff_ptr);
+ tsk_safeobj_deinit(p_self);
+ p_self->b_initialized = tsk_false;
+ }
+ return 0;
+}
+
+#endif /* HAVE_ALSA_ASOUNDLIB_H */
+
diff --git a/tinyDAV/src/audio/alsa/tdav_consumer_alsa.c b/tinyDAV/src/audio/alsa/tdav_consumer_alsa.c
new file mode 100644
index 0000000..65bfcd8
--- /dev/null
+++ b/tinyDAV/src/audio/alsa/tdav_consumer_alsa.c
@@ -0,0 +1,288 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/audio/alsa/tdav_consumer_alsa.h"
+
+#if HAVE_ALSA_ASOUNDLIB_H
+
+#include "tinydav/audio/alsa/tdav_common_alsa.h"
+
+#define ALSA_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[ALSA Consumer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[ALSA Consumer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[ALSA Consumer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[ALSA Consumer] " FMT, ##__VA_ARGS__)
+
+typedef struct tdav_consumer_alsa_s
+{
+ TDAV_DECLARE_CONSUMER_AUDIO;
+
+ tsk_bool_t b_muted;
+ tsk_bool_t b_started;
+ tsk_bool_t b_paused;
+
+ tsk_thread_handle_t* tid[1];
+
+ struct tdav_common_alsa_s alsa_common;
+}
+tdav_consumer_alsa_t;
+
+static void* TSK_STDCALL _tdav_producer_alsa_playback_thread(void *param)
+{
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)param;
+ int err;
+
+ ALSA_DEBUG_INFO("__playback_thread -- START");
+
+ tsk_thread_set_priority_2(TSK_THREAD_PRIORITY_TIME_CRITICAL);
+
+ while (p_alsa->b_started) {
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+ //snd_pcm_wait(p_alsa->alsa_common.p_handle, 20);
+ //ALSA_DEBUG_INFO ("get (%d)", p_alsa->alsa_common.n_buff_size_in_bytes);
+ err = tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(p_alsa), p_alsa->alsa_common.p_buff_ptr, p_alsa->alsa_common.n_buff_size_in_bytes); // requires 16bits, thread-safe
+ //ALSA_DEBUG_INFO ("get returned %d", err);
+ if (err < p_alsa->alsa_common.n_buff_size_in_bytes) {
+ memset(((uint8_t*)p_alsa->alsa_common.p_buff_ptr) + err, 0, (p_alsa->alsa_common.n_buff_size_in_bytes - err));
+
+ }
+ if ((err = snd_pcm_writei(p_alsa->alsa_common.p_handle, p_alsa->alsa_common.p_buff_ptr, p_alsa->alsa_common.n_buff_size_in_samples)) != p_alsa->alsa_common.n_buff_size_in_samples) {
+ if (err == -EPIPE) { // pipe broken
+ err = snd_pcm_recover(p_alsa->alsa_common.p_handle, err, 0);
+ if (err == 0) {
+ ALSA_DEBUG_INFO ("recovered");
+ goto next;
+ }
+ }
+ ALSA_DEBUG_ERROR ("Failed to read data from audio interface failed (%d->%s)", err, snd_strerror(err));
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ goto bail;
+ }
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(p_alsa));
+next:
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ }
+bail:
+ ALSA_DEBUG_INFO("__playback_thread -- STOP");
+ return tsk_null;
+}
+
+
+/* ============ Media Consumer Interface ================= */
+static int tdav_consumer_alsa_set(tmedia_consumer_t* self, const tmedia_param_t* param)
+{
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)self;
+ int ret = 0;
+
+ ret = tdav_consumer_audio_set(TDAV_CONSUMER_AUDIO(self), param);
+
+ return ret;
+}
+
+static int tdav_consumer_alsa_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)self;
+ int err = 0;
+ ALSA_DEBUG_INFO("******* tdav_consumer_alsa_prepare ******");
+
+ if (! p_alsa || !codec && codec->plugin) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+
+ // Set using requested
+ TMEDIA_CONSUMER(p_alsa)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(p_alsa)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(p_alsa)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+ // Prepare
+ err = tdav_common_alsa_prepare(&p_alsa->alsa_common, tsk_false/*is_record*/, TMEDIA_CONSUMER( p_alsa)->audio.ptime, TMEDIA_CONSUMER( p_alsa)->audio.in.channels, TMEDIA_CONSUMER( p_alsa)->audio.in.rate);
+ if (err) {
+ goto bail;
+ }
+
+ ALSA_DEBUG_INFO("prepared: req_channels=%d; req_rate=%d, resp_channels=%d; resp_rate=%d",
+ TMEDIA_CONSUMER(p_alsa)->audio.in.channels, TMEDIA_CONSUMER(p_alsa)->audio.in.rate,
+ p_alsa->alsa_common.channels, p_alsa->alsa_common.sample_rate);
+
+ // Set using supported (up to the resampler to convert to requested)
+ TMEDIA_CONSUMER(p_alsa)->audio.out.channels = p_alsa->alsa_common.channels;
+ TMEDIA_CONSUMER(p_alsa)->audio.out.rate = p_alsa->alsa_common.sample_rate;
+
+bail:
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ return err;
+}
+
+static int tdav_consumer_alsa_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)self;
+ int err = 0;
+
+ ALSA_DEBUG_INFO("******* tdav_consumer_alsa_start ******");
+
+ if (!p_alsa) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+
+ if (p_alsa->b_started) {
+ ALSA_DEBUG_WARN("Already started");
+ goto bail;
+ }
+
+ /* start device */
+ err = tdav_common_alsa_start(&p_alsa->alsa_common);
+ if (err) {
+ goto bail;
+ }
+
+ /* start thread */
+ p_alsa->b_started = tsk_true;
+ tsk_thread_create(&p_alsa->tid[0], _tdav_producer_alsa_playback_thread, p_alsa);
+
+ ALSA_DEBUG_INFO("started");
+
+bail:
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ return err;
+}
+
+static int tdav_consumer_alsa_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ int err = 0;
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)self;
+
+ if (!p_alsa || !buffer || !size) {
+ ALSA_DEBUG_ERROR("Invalid paramter");
+ return -1;
+ }
+
+ //tdav_common_alsa_lock(&p_alsa->alsa_common);
+
+ if (!p_alsa->b_started) {
+ ALSA_DEBUG_WARN("Not started");
+ err = -2;
+ goto bail;
+ }
+
+ if ((err = tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(p_alsa), buffer, size, proto_hdr))) {//thread-safe
+ ALSA_DEBUG_WARN("Failed to put audio data to the jitter buffer");
+ goto bail;
+ }
+
+bail:
+ //tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ return err;
+}
+
+static int tdav_consumer_alsa_pause(tmedia_consumer_t* self)
+{
+ return 0;
+}
+
+static int tdav_consumer_alsa_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)self;
+ int err;
+
+ if (!p_alsa) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* should be done here */
+ p_alsa->b_started = tsk_false;
+
+ err = tdav_common_alsa_stop(&p_alsa->alsa_common);
+
+ /* stop thread */
+ if (p_alsa->tid[0]) {
+ tsk_thread_join(&(p_alsa->tid[0]));
+ }
+
+ ALSA_DEBUG_INFO("stopped");
+
+ return 0;
+}
+
+
+//
+// ALSA consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_alsa_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_alsa_t *p_alsa = self;
+ if (p_alsa) {
+ ALSA_DEBUG_INFO("create");
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(p_alsa));
+ /* init self */
+ tdav_common_alsa_init(&p_alsa->alsa_common);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_alsa_dtor(tsk_object_t * self)
+{
+ tdav_consumer_alsa_t *p_alsa = self;
+ if (p_alsa) {
+ /* stop */
+ if (p_alsa->b_started) {
+ tdav_consumer_alsa_stop((tmedia_consumer_t*)p_alsa);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(p_alsa));
+ /* deinit self */
+ tdav_common_alsa_deinit(&p_alsa->alsa_common);
+
+ ALSA_DEBUG_INFO("*** destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_alsa_def_s =
+{
+ sizeof(tdav_consumer_alsa_t),
+ tdav_consumer_alsa_ctor,
+ tdav_consumer_alsa_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_alsa_plugin_def_s =
+{
+ &tdav_consumer_alsa_def_s,
+
+ tmedia_audio,
+ "Linux ALSA consumer",
+
+ tdav_consumer_alsa_set,
+ tdav_consumer_alsa_prepare,
+ tdav_consumer_alsa_start,
+ tdav_consumer_alsa_consume,
+ tdav_consumer_alsa_pause,
+ tdav_consumer_alsa_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_alsa_plugin_def_t = &tdav_consumer_alsa_plugin_def_s;
+
+#endif /* #if HAVE_ALSA_ASOUNDLIB_H */
diff --git a/tinyDAV/src/audio/alsa/tdav_producer_alsa.c b/tinyDAV/src/audio/alsa/tdav_producer_alsa.c
new file mode 100644
index 0000000..d5c4021
--- /dev/null
+++ b/tinyDAV/src/audio/alsa/tdav_producer_alsa.c
@@ -0,0 +1,261 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/audio/alsa/tdav_producer_alsa.h"
+
+#if HAVE_ALSA_ASOUNDLIB_H
+
+#include "tinydav/audio/alsa/tdav_common_alsa.h"
+
+#define ALSA_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[ALSA Producer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[ALSA Producer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[ALSA Producer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[ALSA Producer] " FMT, ##__VA_ARGS__)
+
+typedef struct tdav_producer_alsa_s
+{
+ TDAV_DECLARE_PRODUCER_AUDIO;
+
+ tsk_bool_t b_muted;
+ tsk_bool_t b_started;
+ tsk_bool_t b_paused;
+
+ tsk_thread_handle_t* tid[1];
+
+ struct tdav_common_alsa_s alsa_common;
+}
+tdav_producer_alsa_t;
+
+static void* TSK_STDCALL _tdav_producer_alsa_record_thread(void *param)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)param;
+ int err;
+
+ ALSA_DEBUG_INFO("__record_thread -- START");
+
+ tsk_thread_set_priority_2(TSK_THREAD_PRIORITY_TIME_CRITICAL);
+
+ while (p_alsa->b_started) {
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+ if ((err = snd_pcm_readi(p_alsa->alsa_common.p_handle, p_alsa->alsa_common.p_buff_ptr, p_alsa->alsa_common.n_buff_size_in_samples)) != p_alsa->alsa_common.n_buff_size_in_samples) {
+ ALSA_DEBUG_ERROR ("Failed to read data from audio interface failed (%d->%s)", err, snd_strerror(err));
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ goto bail;
+ }
+ if (!p_alsa->b_muted && TMEDIA_PRODUCER(p_alsa)->enc_cb.callback) {
+ TMEDIA_PRODUCER(p_alsa)->enc_cb.callback(TMEDIA_PRODUCER(p_alsa)->enc_cb.callback_data, p_alsa->alsa_common.p_buff_ptr, p_alsa->alsa_common.n_buff_size_in_bytes);
+ }
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ }
+bail:
+ ALSA_DEBUG_INFO("__record_thread -- STOP");
+ return tsk_null;
+}
+
+
+/* ============ Media Producer Interface ================= */
+static int tdav_producer_alsa_set(tmedia_producer_t* self, const tmedia_param_t* param)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)self;
+ if (param->plugin_type == tmedia_ppt_producer) {
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "volume")) {
+ return 0;
+ }
+ else if(tsk_striequals(param->key, "mute")){
+ p_alsa->b_muted = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+ return 0;
+ }
+ }
+ }
+ return tdav_producer_audio_set(TDAV_PRODUCER_AUDIO(self), param);
+}
+
+static int tdav_producer_alsa_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)self;
+ int err = 0;
+ ALSA_DEBUG_INFO("******* tdav_producer_alsa_prepare ******");
+
+ if (! p_alsa || !codec && codec->plugin) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+
+ // Set using requested
+ TMEDIA_PRODUCER( p_alsa)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER( p_alsa)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER( p_alsa)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+
+ // Prepare
+ err = tdav_common_alsa_prepare(&p_alsa->alsa_common, tsk_true/*is_capture*/, TMEDIA_PRODUCER( p_alsa)->audio.ptime, TMEDIA_PRODUCER( p_alsa)->audio.channels, TMEDIA_PRODUCER( p_alsa)->audio.rate);
+ if (err) {
+ goto bail;
+ }
+
+ ALSA_DEBUG_INFO("prepared: req_channels=%d; req_rate=%d, resp_channels=%d; resp_rate=%d",
+ TMEDIA_PRODUCER(p_alsa)->audio.channels, TMEDIA_PRODUCER(p_alsa)->audio.rate,
+ p_alsa->alsa_common.channels, p_alsa->alsa_common.sample_rate);
+
+ // Set using supported (up to the resampler to convert to requested)
+ TMEDIA_PRODUCER(p_alsa)->audio.channels = p_alsa->alsa_common.channels;
+ TMEDIA_PRODUCER(p_alsa)->audio.rate = p_alsa->alsa_common.sample_rate;
+
+bail:
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ return err;
+}
+
+static int tdav_producer_alsa_start(tmedia_producer_t* self)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)self;
+ int err = 0;
+
+ ALSA_DEBUG_INFO("******* tdav_producer_alsa_start ******");
+
+ if (!p_alsa) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+
+ if (p_alsa->b_started) {
+ ALSA_DEBUG_WARN("Already started");
+ goto bail;
+ }
+
+ /* start device */
+ err = tdav_common_alsa_start(&p_alsa->alsa_common);
+ if (err) {
+ goto bail;
+ }
+
+ /* start thread */
+ p_alsa->b_started = tsk_true;
+ tsk_thread_create(&p_alsa->tid[0], _tdav_producer_alsa_record_thread, p_alsa);
+
+ ALSA_DEBUG_INFO("started");
+
+bail:
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ return err;
+}
+
+static int tdav_producer_alsa_pause(tmedia_producer_t* self)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)self;
+
+ if (!p_alsa) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ ALSA_DEBUG_INFO("paused");
+
+ return 0;
+}
+
+static int tdav_producer_alsa_stop(tmedia_producer_t* self)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)self;
+ int err;
+
+ if (!p_alsa) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* should be done here */
+ p_alsa->b_started = tsk_false;
+
+ err = tdav_common_alsa_stop(&p_alsa->alsa_common);
+
+ /* stop thread */
+ if (p_alsa->tid[0]) {
+ tsk_thread_join(&(p_alsa->tid[0]));
+ }
+
+ ALSA_DEBUG_INFO("stopped");
+
+ return 0;
+}
+
+
+//
+// ALSA producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_alsa_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_alsa_t *p_alsa = (tdav_producer_alsa_t*)self;
+ if (p_alsa) {
+ ALSA_DEBUG_INFO("create");
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(p_alsa));
+ /* init self */
+ tdav_common_alsa_init(&p_alsa->alsa_common);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_alsa_dtor(tsk_object_t * self)
+{
+ tdav_producer_alsa_t *p_alsa = (tdav_producer_alsa_t *)self;
+ if (p_alsa) {
+ /* stop */
+ if (p_alsa->b_started) {
+ tdav_producer_alsa_stop((tmedia_producer_t*)p_alsa);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(p_alsa));
+ /* deinit self */
+ tdav_common_alsa_deinit(&p_alsa->alsa_common);
+
+ ALSA_DEBUG_INFO("*** destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_alsa_def_s =
+{
+ sizeof(tdav_producer_alsa_t),
+ tdav_producer_alsa_ctor,
+ tdav_producer_alsa_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_alsa_plugin_def_s =
+{
+ &tdav_producer_alsa_def_s,
+
+ tmedia_audio,
+ "Linux ALSA producer",
+
+ tdav_producer_alsa_set,
+ tdav_producer_alsa_prepare,
+ tdav_producer_alsa_start,
+ tdav_producer_alsa_pause,
+ tdav_producer_alsa_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_alsa_plugin_def_t = &tdav_producer_alsa_plugin_def_s;
+
+#endif /* #if HAVE_ALSA_ASOUNDLIB_H */
diff --git a/tinyDAV/src/audio/coreaudio/tdav_audiounit.c b/tinyDAV/src/audio/coreaudio/tdav_audiounit.c
new file mode 100644
index 0000000..dc11f10
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_audiounit.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (C) 2010-2011 Mamadou Diop.
+ *
+ * Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+#include "tinydav/audio/coreaudio/tdav_audiounit.h"
+
+#if HAVE_COREAUDIO_AUDIO_UNIT
+
+#include "tinydav/tdav_apple.h"
+
+#include "tsk_string.h"
+#include "tsk_list.h"
+#include "tsk_safeobj.h"
+#include "tsk_debug.h"
+
+#if TARGET_OS_IPHONE
+static UInt32 kOne = 1;
+static UInt32 kZero = 0;
+#endif /* TARGET_OS_IPHONE */
+
+#if TARGET_OS_IPHONE
+ #if TARGET_IPHONE_SIMULATOR // VoiceProcessingIO will give unexpected result on the simulator when using iOS 5
+ #define kDoubangoAudioUnitSubType kAudioUnitSubType_RemoteIO
+ #else // Echo cancellation, AGC, ...
+ #define kDoubangoAudioUnitSubType kAudioUnitSubType_VoiceProcessingIO
+ #endif
+#elif TARGET_OS_MAC
+ #define kDoubangoAudioUnitSubType kAudioUnitSubType_HALOutput
+#else
+ #error "Unknown target"
+#endif
+
+#undef kInputBus
+#define kInputBus 1
+#undef kOutputBus
+#define kOutputBus 0
+
+typedef struct tdav_audiounit_instance_s
+{
+ TSK_DECLARE_OBJECT;
+ uint64_t session_id;
+ uint32_t frame_duration;
+ AudioComponentInstance audioUnit;
+ struct{
+ unsigned consumer:1;
+ unsigned producer:1;
+ } prepared;
+ unsigned started:1;
+ unsigned interrupted:1;
+
+ TSK_DECLARE_SAFEOBJ;
+
+}
+tdav_audiounit_instance_t;
+TINYDAV_GEXTERN const tsk_object_def_t *tdav_audiounit_instance_def_t;
+typedef tsk_list_t tdav_audiounit_instances_L_t;
+
+
+static AudioComponent __audioSystem = tsk_null;
+static tdav_audiounit_instances_L_t* __audioUnitInstances = tsk_null;
+
+static int _tdav_audiounit_handle_signal_xxx_prepared(tdav_audiounit_handle_t* self, tsk_bool_t consumer)
+{
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+ if(!inst || !inst->audioUnit){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(inst);
+
+ if(consumer){
+ inst->prepared.consumer = tsk_true;
+ }
+ else {
+ inst->prepared.producer = tsk_true;
+ }
+
+ OSStatus status;
+
+ // For iOS we are using full-duplex AudioUnit and we wait for both consumer and producer to be prepared
+#if TARGET_OS_IPHONE
+ if(inst->prepared.consumer && inst->prepared.producer)
+#endif
+ {
+ status = AudioUnitInitialize(inst->audioUnit);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitInitialize failed with status =%ld", (signed long)status);
+ tsk_safeobj_unlock(inst);
+ return -2;
+ }
+ }
+
+ tsk_safeobj_unlock(inst);
+ return 0;
+}
+
+tdav_audiounit_handle_t* tdav_audiounit_handle_create(uint64_t session_id)
+{
+ tdav_audiounit_instance_t* inst = tsk_null;
+
+ // create audio unit component
+ if(!__audioSystem){
+ AudioComponentDescription audioDescription;
+ audioDescription.componentType = kAudioUnitType_Output;
+ audioDescription.componentSubType = kDoubangoAudioUnitSubType;
+ audioDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
+ audioDescription.componentFlags = 0;
+ audioDescription.componentFlagsMask = 0;
+ if((__audioSystem = AudioComponentFindNext(NULL, &audioDescription))){
+ // leave blank
+ }
+ else {
+ TSK_DEBUG_ERROR("Failed to find new audio component");
+ goto done;
+ }
+
+ }
+ // create list used to hold instances
+ if(!__audioUnitInstances && !(__audioUnitInstances = tsk_list_create())){
+ TSK_DEBUG_ERROR("Failed to create new list");
+ goto done;
+ }
+
+ //= lock the list
+ tsk_list_lock(__audioUnitInstances);
+
+ // For iOS we are using full-duplex AudioUnit and to keep it unique for both
+ // the consumer and producer we use the session id.
+#if TARGET_OS_IPHONE
+ // find the instance from the list
+ const tsk_list_item_t* item;
+ tsk_list_foreach(item,__audioUnitInstances){
+ if(((tdav_audiounit_instance_t*)item->data)->session_id == session_id){
+ inst = tsk_object_ref(item->data);
+ goto done;
+ }
+ }
+#endif
+
+ // create instance object and put it into the list
+ if((inst = tsk_object_new(tdav_audiounit_instance_def_t))){
+ OSStatus status = noErr;
+ tdav_audiounit_instance_t* _inst;
+
+ // create new instance
+ if((status= AudioComponentInstanceNew(__audioSystem, &inst->audioUnit)) != noErr){
+ TSK_DEBUG_ERROR("AudioComponentInstanceNew() failed with status=%ld", (signed long)status);
+ TSK_OBJECT_SAFE_FREE(inst);
+ goto done;
+ }
+ _inst = inst, _inst->session_id = session_id;
+ tsk_list_push_back_data(__audioUnitInstances, (void**)&_inst);
+ }
+
+done:
+ //= unlock the list
+ tsk_list_unlock(__audioUnitInstances);
+ return (tdav_audiounit_handle_t*)inst;
+}
+
+AudioComponentInstance tdav_audiounit_handle_get_instance(tdav_audiounit_handle_t* self)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+ return ((tdav_audiounit_instance_t*)self)->audioUnit;
+}
+
+int tdav_audiounit_handle_signal_consumer_prepared(tdav_audiounit_handle_t* self)
+{
+ return _tdav_audiounit_handle_signal_xxx_prepared(self, tsk_true);
+}
+
+int tdav_audiounit_handle_signal_producer_prepared(tdav_audiounit_handle_t* self)
+{
+ return _tdav_audiounit_handle_signal_xxx_prepared(self, tsk_false);
+}
+
+int tdav_audiounit_handle_start(tdav_audiounit_handle_t* self)
+{
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+ OSStatus status = noErr;
+ if(!inst || !inst->audioUnit){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(inst);
+ status = (OSStatus)tdav_apple_enable_audio();
+ if (status == noErr) {
+ if ((!inst->started || inst->interrupted) && (status = AudioOutputUnitStart(inst->audioUnit))) {
+ TSK_DEBUG_ERROR("AudioOutputUnitStart failed with status=%ld", (signed long)status);
+ }
+ }
+ else {
+ TSK_DEBUG_ERROR("tdav_apple_enable_audio() failed with status=%ld", (signed long)status);
+ }
+ inst->started = (status == noErr) ? tsk_true : tsk_false;
+ if (inst->started) inst->interrupted = 0;
+ tsk_safeobj_unlock(inst);
+ return status ? -2 : 0;
+}
+
+uint32_t tdav_audiounit_handle_get_frame_duration(tdav_audiounit_handle_t* self)
+{
+ if(self){
+ return ((tdav_audiounit_instance_t*)self)->frame_duration;
+ }
+ return 0;
+}
+
+int tdav_audiounit_handle_configure(tdav_audiounit_handle_t* self, tsk_bool_t consumer, uint32_t ptime, AudioStreamBasicDescription* audioFormat)
+{
+ OSStatus status = noErr;
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+
+ if(!inst || !audioFormat){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+#if TARGET_OS_IPHONE
+ // set preferred buffer size
+ Float32 preferredBufferSize = ((Float32)ptime / 1000.f); // in seconds
+ UInt32 size = sizeof(preferredBufferSize);
+ status = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration) failed with status=%d", (int)status);
+ TSK_OBJECT_SAFE_FREE(inst);
+ goto done;
+ }
+ status = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration, &size, &preferredBufferSize);
+ if(status == noErr){
+ inst->frame_duration = (preferredBufferSize * 1000);
+ TSK_DEBUG_INFO("Frame duration=%d", inst->frame_duration);
+ }
+ else {
+ TSK_DEBUG_ERROR("AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration, %f) failed", preferredBufferSize);
+ }
+
+
+ UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
+ status = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioSessionSetProperty(kAudioSessionProperty_AudioCategory) failed with status code=%d", (int)status);
+ goto done;
+ }
+
+#elif TARGET_OS_MAC
+#if 1
+ // set preferred buffer size
+ UInt32 preferredBufferSize = ((ptime * audioFormat->mSampleRate)/1000); // in bytes
+ UInt32 size = sizeof(preferredBufferSize);
+ status = AudioUnitSetProperty(inst->audioUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &preferredBufferSize, size);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_SetInputCallback) failed with status=%ld", (signed long)status);
+ }
+ status = AudioUnitGetProperty(inst->audioUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &preferredBufferSize, &size);
+ if(status == noErr){
+ inst->frame_duration = ((preferredBufferSize * 1000)/audioFormat->mSampleRate);
+ TSK_DEBUG_INFO("Frame duration=%d", inst->frame_duration);
+ }
+ else {
+ TSK_DEBUG_ERROR("AudioUnitGetProperty(kAudioDevicePropertyBufferFrameSize, %lu) failed", (unsigned long)preferredBufferSize);
+ }
+#endif
+
+#endif
+
+done:
+ return (status == noErr) ? 0 : -2;
+}
+
+int tdav_audiounit_handle_mute(tdav_audiounit_handle_t* self, tsk_bool_t mute)
+{
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+ if(!inst || !inst->audioUnit){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+#if TARGET_OS_IPHONE
+ OSStatus status = noErr;
+ status = AudioUnitSetProperty(inst->audioUnit, kAUVoiceIOProperty_MuteOutput,
+ kAudioUnitScope_Output, kOutputBus, mute ? &kOne : &kZero, mute ? sizeof(kOne) : sizeof(kZero));
+
+ return (status == noErr) ? 0 : -2;
+#else
+ return 0;
+#endif
+}
+
+int tdav_audiounit_handle_interrupt(tdav_audiounit_handle_t* self, tsk_bool_t interrupt)
+{
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+ if (!inst){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ OSStatus status = noErr;
+ if (inst->interrupted != interrupt && inst->started) {
+ if (interrupt) {
+ status = AudioOutputUnitStop(inst->audioUnit);
+ if (status != noErr) {
+ TSK_DEBUG_ERROR("AudioOutputUnitStop failed with status=%ld", (signed long)status);
+ goto bail;
+ }
+ }
+ else {
+#if TARGET_OS_IPHONE
+ status = (OSStatus)tdav_apple_enable_audio();
+ if (status != noErr) {
+ TSK_DEBUG_ERROR("AudioSessionSetActive failed with status=%ld", (signed long)status);
+ goto bail;
+ }
+#endif
+ status = AudioOutputUnitStart(inst->audioUnit);
+ if (status != noErr) {
+ TSK_DEBUG_ERROR("AudioOutputUnitStart failed with status=%ld", (signed long)status);
+ goto bail;
+ }
+ }
+ }
+ inst->interrupted = interrupt ? 1: 0;
+bail:
+ return (status != noErr) ? -2 : 0;
+}
+
+int tdav_audiounit_handle_stop(tdav_audiounit_handle_t* self)
+{
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+ OSStatus status = noErr;
+ if(!inst || (inst->started && !inst->audioUnit)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(inst);
+ if(inst->started && (status = AudioOutputUnitStop(inst->audioUnit))){
+ TSK_DEBUG_ERROR("AudioOutputUnitStop failed with status=%ld", (signed long)status);
+ }
+ inst->started = (status == noErr ? tsk_false : tsk_true);
+ tsk_safeobj_unlock(inst);
+ return (status != noErr) ? -2 : 0;
+}
+
+int tdav_audiounit_handle_destroy(tdav_audiounit_handle_t** self){
+ if(!self || !*self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ tsk_list_lock(__audioUnitInstances);
+ if(tsk_object_get_refcount(*self)==1){
+ tsk_list_remove_item_by_data(__audioUnitInstances, *self);
+ }
+ else {
+ tsk_object_unref(*self);
+ }
+ tsk_list_unlock(__audioUnitInstances);
+ *self = tsk_null;
+ return 0;
+}
+
+//
+// Object definition for and AudioUnit instance
+//
+static tsk_object_t* tdav_audiounit_instance_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_audiounit_instance_t* inst = self;
+ if(inst){
+ tsk_safeobj_init(inst);
+ }
+ return self;
+}
+static tsk_object_t* tdav_audiounit_instance_dtor(tsk_object_t * self)
+{
+ tdav_audiounit_instance_t* inst = self;
+ if(inst){
+ tsk_safeobj_lock(inst);
+ if(inst->audioUnit){
+ AudioUnitUninitialize(inst->audioUnit);
+ AudioComponentInstanceDispose(inst->audioUnit);
+ inst->audioUnit = tsk_null;
+ }
+ tsk_safeobj_unlock(inst);
+
+ tsk_safeobj_deinit(inst);
+ TSK_DEBUG_INFO("*** AudioUnit Instance destroyed ***");
+ }
+ return self;
+}
+static int tdav_audiounit_instance_cmp(const tsk_object_t *_ai1, const tsk_object_t *_ai2)
+{
+ return (int)(_ai1 - _ai2);
+}
+static const tsk_object_def_t tdav_audiounit_instance_def_s =
+{
+ sizeof(tdav_audiounit_instance_t),
+ tdav_audiounit_instance_ctor,
+ tdav_audiounit_instance_dtor,
+ tdav_audiounit_instance_cmp,
+};
+const tsk_object_def_t *tdav_audiounit_instance_def_t = &tdav_audiounit_instance_def_s;
+
+
+
+#endif /* HAVE_COREAUDIO_AUDIO_UNIT */
diff --git a/tinyDAV/src/audio/coreaudio/tdav_consumer_audioqueue.c b/tinyDAV/src/audio/coreaudio/tdav_consumer_audioqueue.c
new file mode 100644
index 0000000..2f5fd90
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_consumer_audioqueue.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2010-2011 Mamadou Diop.
+ *
+ * Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+
+/**@file tdav_consumer_audioqueue.c
+ * @brief Audio Consumer for MacOSX and iOS platforms.
+ *
+ * @authors
+ * - Laurent Etiemble <laurent.etiemble(at)gmail.com>
+ * - Mamadou Diop <diopmamadou(at)doubango(dot)org>
+ *
+ * @date Created: Sat Nov 8 16:54:58 2009 letiemble
+ */
+#include "tinydav/audio/coreaudio/tdav_consumer_audioqueue.h"
+
+
+// http://developer.apple.com/library/mac/#documentation/MusicAudio/Reference/AudioQueueReference/Reference/reference.html
+#if HAVE_COREAUDIO_AUDIO_QUEUE
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+static void __handle_output_buffer(void *userdata, AudioQueueRef queue, AudioQueueBufferRef buffer) {
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)userdata;
+
+ if (!consumer->started) {
+ return;
+ }
+
+ if(!tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(consumer), buffer->mAudioData, consumer->buffer_size)){
+ // Put silence
+ memset(buffer->mAudioData, 0, consumer->buffer_size);
+ }
+
+ // Re-enqueue the buffer
+ AudioQueueEnqueueBuffer(consumer->queue, buffer, 0, NULL);
+ // alert the jitter buffer
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(consumer));
+}
+
+/* ============ Media Consumer Interface ================= */
+#define tdav_consumer_audioqueue_set tsk_null
+
+int tdav_consumer_audioqueue_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ OSStatus ret;
+ tsk_size_t i;
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)self;
+
+ if(!consumer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TMEDIA_CONSUMER(consumer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+ /* codec should have ptime */
+
+ // Set audio category
+#if TARGET_OS_IPHONE
+ UInt32 category = kAudioSessionCategory_PlayAndRecord;
+ AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
+#endif
+
+ // Create the audio stream description
+ AudioStreamBasicDescription *description = &(consumer->description);
+ description->mSampleRate = TMEDIA_CONSUMER(consumer)->audio.out.rate ? TMEDIA_CONSUMER(consumer)->audio.out.rate : TMEDIA_CONSUMER(consumer)->audio.in.rate;
+ description->mFormatID = kAudioFormatLinearPCM;
+ description->mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
+ description->mChannelsPerFrame = TMEDIA_CONSUMER(consumer)->audio.in.channels;
+ description->mFramesPerPacket = 1;
+ description->mBitsPerChannel = TMEDIA_CONSUMER(consumer)->audio.bits_per_sample;
+ description->mBytesPerPacket = description->mBitsPerChannel / 8 * description->mChannelsPerFrame;
+ description->mBytesPerFrame = description->mBytesPerPacket;
+ description->mReserved = 0;
+
+ int packetperbuffer = 1000 / TMEDIA_CONSUMER(consumer)->audio.ptime;
+ consumer->buffer_size = description->mSampleRate * description->mBytesPerFrame / packetperbuffer;
+
+ // Create the playback audio queue
+ ret = AudioQueueNewOutput(&(consumer->description),
+ __handle_output_buffer,
+ consumer,
+ NULL,
+ NULL,
+ 0,
+ &(consumer->queue));
+
+ for(i = 0; i < CoreAudioPlayBuffers; i++) {
+ // Create the buffer for the queue
+ ret = AudioQueueAllocateBuffer(consumer->queue, consumer->buffer_size, &(consumer->buffers[i]));
+ if (ret) {
+ break;
+ }
+
+ // Clear the data
+ memset(consumer->buffers[i]->mAudioData, 0, consumer->buffer_size);
+ consumer->buffers[i]->mAudioDataByteSize = consumer->buffer_size;
+
+ // Enqueue the buffer
+ ret = AudioQueueEnqueueBuffer(consumer->queue, consumer->buffers[i], 0, NULL);
+ if (ret) {
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int tdav_consumer_audioqueue_start(tmedia_consumer_t* self)
+{
+ OSStatus ret;
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(consumer->started){
+ TSK_DEBUG_WARN("Consumer already started");
+ return 0;
+ }
+
+ consumer->started = tsk_true;
+ ret = AudioQueueStart(consumer->queue, NULL);
+
+ return ret;
+}
+
+int tdav_consumer_audioqueue_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)self;
+
+ if(!consumer || !buffer || !size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ // buffer is already decoded
+ return tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(consumer), buffer, size, proto_hdr);
+}
+
+int tdav_consumer_audioqueue_pause(tmedia_consumer_t* self)
+{
+ OSStatus ret;
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ ret = AudioQueuePause(consumer->queue);
+
+ return ret;
+}
+
+int tdav_consumer_audioqueue_stop(tmedia_consumer_t* self)
+{
+ OSStatus ret;
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)self;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!consumer->started){
+ TSK_DEBUG_WARN("Consumer not started");
+ return 0;
+ }
+
+ consumer->started = tsk_false;
+ ret = AudioQueueStop(consumer->queue, false);
+
+ return ret;
+}
+
+//
+// coreaudio consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_audioqueue_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_audioqueue_t *consumer = self;
+ if(consumer){
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(consumer));
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_audioqueue_dtor(tsk_object_t * self)
+{
+ tdav_consumer_audioqueue_t *consumer = self;
+ if(consumer){
+ // Stop the consumer if not done
+ if(consumer->started){
+ tdav_consumer_audioqueue_stop(self);
+ }
+
+ // Free all buffers and dispose the queue
+ if (consumer->queue) {
+ tsk_size_t i;
+
+ for(i=0; i<CoreAudioPlayBuffers; i++){
+ AudioQueueFreeBuffer(consumer->queue, consumer->buffers[i]);
+ }
+
+ AudioQueueDispose(consumer->queue, true);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(consumer));
+ }
+
+ return self;
+}
+
+/* object definition */
+static const tsk_object_def_t tdav_consumer_audioqueue_def_s =
+{
+ sizeof(tdav_consumer_audioqueue_t),
+ tdav_consumer_audioqueue_ctor,
+ tdav_consumer_audioqueue_dtor,
+ tdav_consumer_audio_cmp,
+};
+
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_audioqueue_plugin_def_s =
+{
+ &tdav_consumer_audioqueue_def_s,
+
+ tmedia_audio,
+ "Apple CoreAudio consumer(AudioQueue)",
+
+ tdav_consumer_audioqueue_set,
+ tdav_consumer_audioqueue_prepare,
+ tdav_consumer_audioqueue_start,
+ tdav_consumer_audioqueue_consume,
+ tdav_consumer_audioqueue_pause,
+ tdav_consumer_audioqueue_stop
+};
+
+const tmedia_consumer_plugin_def_t *tdav_consumer_audioqueue_plugin_def_t = &tdav_consumer_audioqueue_plugin_def_s;
+
+#endif /* HAVE_COREAUDIO_AUDIO_QUEUE */
diff --git a/tinyDAV/src/audio/coreaudio/tdav_consumer_audiounit.c b/tinyDAV/src/audio/coreaudio/tdav_consumer_audiounit.c
new file mode 100644
index 0000000..947d782
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_consumer_audiounit.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (C) 2010-2011 Mamadou Diop.
+ *
+ * Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+#include "tinydav/audio/coreaudio/tdav_consumer_audiounit.h"
+
+// http://developer.apple.com/library/ios/#documentation/MusicAudio/Conceptual/AudioUnitHostingGuide_iOS/Introduction/Introduction.html%23//apple_ref/doc/uid/TP40009492-CH1-SW1
+// Resampler: http://developer.apple.com/library/mac/#technotes/tn2097/_index.html
+
+#if HAVE_COREAUDIO_AUDIO_UNIT
+
+#undef DISABLE_JITTER_BUFFER
+#define DISABLE_JITTER_BUFFER 0
+
+#include "tsk_debug.h"
+#include "tsk_memory.h"
+#include "tsk_string.h"
+
+#define kNoDataError -1
+#define kRingPacketCount +10
+
+static tsk_size_t tdav_consumer_audiounit_get(tdav_consumer_audiounit_t* self, void* data, tsk_size_t size);
+
+static OSStatus __handle_output_buffer(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
+ AudioBufferList *ioData) {
+ OSStatus status = noErr;
+ // tsk_size_t out_size;
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t* )inRefCon;
+
+ if(!consumer->started || consumer->paused){
+ goto done;
+ }
+
+ if(!ioData){
+ TSK_DEBUG_ERROR("Invalid argument");
+ status = kNoDataError;
+ goto done;
+ }
+ // read from jitter buffer and fill ioData buffers
+ tsk_mutex_lock(consumer->ring.mutex);
+ for(int i=0; i<ioData->mNumberBuffers; i++){
+ /* int ret = */ tdav_consumer_audiounit_get(consumer, ioData->mBuffers[i].mData, ioData->mBuffers[i].mDataByteSize);
+ }
+ tsk_mutex_unlock(consumer->ring.mutex);
+
+done:
+ return status;
+}
+
+static tsk_size_t tdav_consumer_audiounit_get(tdav_consumer_audiounit_t* self, void* data, tsk_size_t size)
+{
+ tsk_ssize_t retSize = 0;
+
+#if DISABLE_JITTER_BUFFER
+ retSize = speex_buffer_read(self->ring.buffer, data, size);
+ if(retSize < size){
+ memset(((uint8_t*)data)+retSize, 0, (size - retSize));
+ }
+#else
+ self->ring.leftBytes += size;
+ while (self->ring.leftBytes >= self->ring.chunck.size) {
+ self->ring.leftBytes -= self->ring.chunck.size;
+ retSize = (tsk_ssize_t)tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(self), self->ring.chunck.buffer, self->ring.chunck.size);
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(self));
+ speex_buffer_write(self->ring.buffer, self->ring.chunck.buffer, retSize);
+ }
+ // IMPORTANT: looks like there is a bug in speex: continously trying to read more than avail
+ // many times can corrupt the buffer. At least on OS X 1.5
+ if(speex_buffer_get_available(self->ring.buffer) >= size){
+ retSize = (tsk_ssize_t)speex_buffer_read(self->ring.buffer, data, (int)size);
+ }
+ else{
+ memset(data, 0, size);
+ }
+#endif
+
+ return retSize;
+}
+
+/* ============ Media Consumer Interface ================= */
+int tdav_consumer_audiounit_set(tmedia_consumer_t* self, const tmedia_param_t* param)
+{
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+ if (param->plugin_type == tmedia_ppt_consumer) {
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "interrupt")) {
+ int32_t interrupt = *((uint8_t*)param->value) ? 1 : 0;
+ return tdav_audiounit_handle_interrupt(consumer->audioUnitHandle, interrupt);
+ }
+ }
+ }
+ return tdav_consumer_audio_set(TDAV_CONSUMER_AUDIO(self), param);
+}
+
+static int tdav_consumer_audiounit_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ static UInt32 flagOne = 1;
+ AudioStreamBasicDescription audioFormat;
+#define kOutputBus 0
+
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+ OSStatus status = noErr;
+
+ if(!consumer || !codec || !codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(!consumer->audioUnitHandle){
+ if(!(consumer->audioUnitHandle = tdav_audiounit_handle_create(TMEDIA_CONSUMER(consumer)->session_id))){
+ TSK_DEBUG_ERROR("Failed to get audio unit instance for session with id=%lld", TMEDIA_CONSUMER(consumer)->session_id);
+ return -3;
+ }
+ }
+
+ // enable
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle),
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output,
+ kOutputBus,
+ &flagOne,
+ sizeof(flagOne));
+ if(status){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_EnableIO) failed with status=%d", (int32_t)status);
+ return -4;
+ }
+ else {
+
+#if !TARGET_OS_IPHONE // strange: TARGET_OS_MAC is equal to '1' on Smulator
+ UInt32 param;
+
+ // disable input
+ param = 0;
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle), kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &param, sizeof(UInt32));
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_EnableIO) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+
+ // set default audio device
+ param = sizeof(AudioDeviceID);
+ AudioDeviceID outputDeviceID;
+ status = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice, &param, &outputDeviceID);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+
+ // set the current device to the default input unit
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle),
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global,
+ 0,
+ &outputDeviceID,
+ sizeof(AudioDeviceID));
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_CurrentDevice) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+
+#endif
+
+ TMEDIA_CONSUMER(consumer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+ TSK_DEBUG_INFO("AudioUnit consumer: in.channels=%d, out.channles=%d, in.rate=%d, out.rate=%d, ptime=%d",
+ TMEDIA_CONSUMER(consumer)->audio.in.channels,
+ TMEDIA_CONSUMER(consumer)->audio.out.channels,
+ TMEDIA_CONSUMER(consumer)->audio.in.rate,
+ TMEDIA_CONSUMER(consumer)->audio.out.rate,
+ TMEDIA_CONSUMER(consumer)->audio.ptime);
+
+ audioFormat.mSampleRate = TMEDIA_CONSUMER(consumer)->audio.out.rate ? TMEDIA_CONSUMER(consumer)->audio.out.rate : TMEDIA_CONSUMER(consumer)->audio.in.rate;
+ audioFormat.mFormatID = kAudioFormatLinearPCM;
+ audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
+ audioFormat.mChannelsPerFrame = TMEDIA_CONSUMER(consumer)->audio.in.channels;
+ audioFormat.mFramesPerPacket = 1;
+ audioFormat.mBitsPerChannel = TMEDIA_CONSUMER(consumer)->audio.bits_per_sample;
+ audioFormat.mBytesPerPacket = audioFormat.mBitsPerChannel / 8 * audioFormat.mChannelsPerFrame;
+ audioFormat.mBytesPerFrame = audioFormat.mBytesPerPacket;
+ audioFormat.mReserved = 0;
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle),
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ kOutputBus,
+ &audioFormat,
+ sizeof(audioFormat));
+
+ if(status){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioUnitProperty_StreamFormat) failed with status=%ld", (signed long)status);
+ return -5;
+ }
+ else {
+ // configure
+ if(tdav_audiounit_handle_configure(consumer->audioUnitHandle, tsk_true, TMEDIA_CONSUMER(consumer)->audio.ptime, &audioFormat)){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_set_rate(%d) failed", TMEDIA_CONSUMER(consumer)->audio.out.rate);
+ return -4;
+ }
+
+ // set callback function
+ AURenderCallbackStruct callback;
+ callback.inputProc = __handle_output_buffer;
+ callback.inputProcRefCon = consumer;
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle),
+ kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Input,
+ kOutputBus,
+ &callback,
+ sizeof(callback));
+ if(status){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_SetInputCallback) failed with status=%ld", (signed long)status);
+ return -6;
+ }
+ }
+ }
+
+ // allocate the chunck buffer and create the ring
+ consumer->ring.chunck.size = (TMEDIA_CONSUMER(consumer)->audio.ptime * audioFormat.mSampleRate * audioFormat.mBytesPerFrame) / 1000;
+ consumer->ring.size = kRingPacketCount * consumer->ring.chunck.size;
+ if(!(consumer->ring.chunck.buffer = tsk_realloc(consumer->ring.chunck.buffer, consumer->ring.chunck.size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ return -7;
+ }
+ if(!consumer->ring.buffer){
+ consumer->ring.buffer = speex_buffer_init((int)consumer->ring.size);
+ }
+ else {
+ int ret;
+ if((ret = (int)speex_buffer_resize(consumer->ring.buffer, (int)consumer->ring.size)) < 0){
+ TSK_DEBUG_ERROR("speex_buffer_resize(%d) failed with error code=%d", (int)consumer->ring.size, ret);
+ return ret;
+ }
+ }
+ if(!consumer->ring.buffer){
+ TSK_DEBUG_ERROR("Failed to create a new ring buffer with size = %d", (int)consumer->ring.size);
+ return -8;
+ }
+ if(!consumer->ring.mutex && !(consumer->ring.mutex = tsk_mutex_create_2(tsk_false))){
+ TSK_DEBUG_ERROR("Failed to create mutex");
+ return -9;
+ }
+
+ // set maximum frames per slice as buffer size
+ //UInt32 numFrames = (UInt32)consumer->ring.chunck.size;
+ //status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle),
+ // kAudioUnitProperty_MaximumFramesPerSlice,
+ // kAudioUnitScope_Global,
+ // 0,
+ // &numFrames,
+ // sizeof(numFrames));
+ //if(status){
+ // TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioUnitProperty_MaximumFramesPerSlice, %u) failed with status=%d", (unsigned)numFrames, (int32_t)status);
+ // return -6;
+ //}
+
+ TSK_DEBUG_INFO("AudioUnit consumer prepared");
+ return tdav_audiounit_handle_signal_consumer_prepared(consumer->audioUnitHandle);
+}
+
+static int tdav_consumer_audiounit_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(consumer->paused){
+ consumer->paused = tsk_false;
+ }
+ if(consumer->started){
+ TSK_DEBUG_WARN("Already started");
+ return 0;
+ }
+ else {
+ int ret = tdav_audiounit_handle_start(consumer->audioUnitHandle);
+ if(ret){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_start failed with error code=%d", ret);
+ return ret;
+ }
+ }
+ consumer->started = tsk_true;
+ TSK_DEBUG_INFO("AudioUnit consumer started");
+ return 0;
+}
+
+static int tdav_consumer_audiounit_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+ if(!consumer || !buffer || !size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+#if DISABLE_JITTER_BUFFER
+ {
+ if(consumer->ring.buffer){
+ tsk_mutex_lock(consumer->ring.mutex);
+ speex_buffer_write(consumer->ring.buffer, (void*)buffer, size);
+ tsk_mutex_unlock(consumer->ring.mutex);
+ return 0;
+ }
+ return -2;
+ }
+#else
+ {
+ return tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(consumer), buffer, size, proto_hdr);
+ }
+#endif
+}
+
+static int tdav_consumer_audiounit_pause(tmedia_consumer_t* self)
+{
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ consumer->paused = tsk_true;
+ TSK_DEBUG_INFO("AudioUnit consumer paused");
+ return 0;
+}
+
+static int tdav_consumer_audiounit_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(!consumer->started){
+ TSK_DEBUG_INFO("Not started");
+ return 0;
+ }
+ else {
+ int ret = tdav_audiounit_handle_stop(consumer->audioUnitHandle);
+ if(ret){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_stop failed with error code=%d", ret);
+ return ret;
+ }
+ }
+#if TARGET_OS_IPHONE
+ //https://devforums.apple.com/thread/118595
+ if(consumer->audioUnitHandle){
+ tdav_audiounit_handle_destroy(&consumer->audioUnitHandle);
+ }
+#endif
+
+ consumer->started = tsk_false;
+ TSK_DEBUG_INFO("AudioUnit consumer stoppped");
+ return 0;
+
+}
+
+//
+// coreaudio consumer (AudioUnit) object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_audiounit_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_audiounit_t *consumer = self;
+ if(consumer){
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(consumer));
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_audiounit_dtor(tsk_object_t * self)
+{
+ tdav_consumer_audiounit_t *consumer = self;
+ if(consumer){
+ /* deinit self */
+ // Stop the consumer if not done
+ if(consumer->started){
+ tdav_consumer_audiounit_stop(self);
+ }
+ // destroy handle
+ if(consumer->audioUnitHandle){
+ tdav_audiounit_handle_destroy(&consumer->audioUnitHandle);
+ }
+ TSK_FREE(consumer->ring.chunck.buffer);
+ if(consumer->ring.buffer){
+ speex_buffer_destroy(consumer->ring.buffer);
+ }
+ if(consumer->ring.mutex){
+ tsk_mutex_destroy(&consumer->ring.mutex);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(consumer));
+ TSK_DEBUG_INFO("*** AudioUnit Consumer destroyed ***");
+ }
+
+ return self;
+}
+
+/* object definition */
+static const tsk_object_def_t tdav_consumer_audiounit_def_s =
+{
+ sizeof(tdav_consumer_audiounit_t),
+ tdav_consumer_audiounit_ctor,
+ tdav_consumer_audiounit_dtor,
+ tdav_consumer_audio_cmp,
+};
+
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_audiounit_plugin_def_s =
+{
+ &tdav_consumer_audiounit_def_s,
+
+ tmedia_audio,
+ "Apple CoreAudio consumer(AudioUnit)",
+
+ tdav_consumer_audiounit_set,
+ tdav_consumer_audiounit_prepare,
+ tdav_consumer_audiounit_start,
+ tdav_consumer_audiounit_consume,
+ tdav_consumer_audiounit_pause,
+ tdav_consumer_audiounit_stop
+};
+
+const tmedia_consumer_plugin_def_t *tdav_consumer_audiounit_plugin_def_t = &tdav_consumer_audiounit_plugin_def_s;
+
+#endif /* HAVE_COREAUDIO_AUDIO_UNIT */
diff --git a/tinyDAV/src/audio/coreaudio/tdav_producer_audioqueue.c b/tinyDAV/src/audio/coreaudio/tdav_producer_audioqueue.c
new file mode 100644
index 0000000..d96fd67
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_producer_audioqueue.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2010-2011 Mamadou Diop.
+ *
+ * Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+
+/**@file tdav_producer_audioqueue.c
+ * @brief Audio Producer for MacOSX and iOS platforms using AudioQueue.
+ *
+ * @authors
+ * - Laurent Etiemble <laurent.etiemble(at)gmail.com>
+ * - Mamadou Diop <diopmamadou(at)doubango(dot)org>
+ *
+ * @date Created: Sat Nov 8 16:54:58 2009 letiemble
+ */
+#include "tinydav/audio/coreaudio/tdav_producer_audioqueue.h"
+
+
+// http://developer.apple.com/library/mac/#documentation/MusicAudio/Reference/AudioQueueReference/Reference/reference.html
+
+#if HAVE_COREAUDIO_AUDIO_QUEUE
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+static void __handle_input_buffer (void *userdata, AudioQueueRef queue, AudioQueueBufferRef buffer, const AudioTimeStamp *start_time, UInt32 number_packet_descriptions, const AudioStreamPacketDescription *packet_descriptions ) {
+ tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)userdata;
+
+ if (!producer->started) {
+ return;
+ }
+
+ // Alert the session that there is new data to send
+ if(TMEDIA_PRODUCER(producer)->enc_cb.callback) {
+ TMEDIA_PRODUCER(producer)->enc_cb.callback(TMEDIA_PRODUCER(producer)->enc_cb.callback_data, buffer->mAudioData, buffer->mAudioDataByteSize);
+ }
+
+ // Re-enqueue the buffer
+ AudioQueueEnqueueBuffer(producer->queue, buffer, 0, NULL);
+}
+
+/* ============ Media Producer Interface ================= */
+#define tdav_producer_audioqueue_set tsk_null
+
+static int tdav_producer_audioqueue_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ OSStatus ret;
+ tsk_size_t i;
+ tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)self;
+
+ if(!producer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TMEDIA_PRODUCER(producer)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+ /* codec should have ptime */
+
+
+ // Set audio category
+#if TARGET_OS_IPHONE
+ UInt32 category = kAudioSessionCategory_PlayAndRecord;
+ AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
+#endif
+ // Create the audio stream description
+ AudioStreamBasicDescription *description = &(producer->description);
+ description->mSampleRate = TMEDIA_PRODUCER(producer)->audio.rate;
+ description->mFormatID = kAudioFormatLinearPCM;
+ description->mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
+ description->mChannelsPerFrame = TMEDIA_PRODUCER(producer)->audio.channels;
+ description->mFramesPerPacket = 1;
+ description->mBitsPerChannel = TMEDIA_PRODUCER(producer)->audio.bits_per_sample;
+ description->mBytesPerPacket = description->mBitsPerChannel / 8 * description->mChannelsPerFrame;
+ description->mBytesPerFrame = description->mBytesPerPacket;
+ description->mReserved = 0;
+
+ int packetperbuffer = 1000 / TMEDIA_PRODUCER(producer)->audio.ptime;
+ producer->buffer_size = description->mSampleRate * description->mBytesPerFrame / packetperbuffer;
+
+ // Create the record audio queue
+ ret = AudioQueueNewInput(&(producer->description),
+ __handle_input_buffer,
+ producer,
+ NULL,
+ kCFRunLoopCommonModes,
+ 0,
+ &(producer->queue));
+
+ for(i = 0; i < CoreAudioRecordBuffers; i++) {
+ // Create the buffer for the queue
+ ret = AudioQueueAllocateBuffer(producer->queue, producer->buffer_size, &(producer->buffers[i]));
+ if (ret) {
+ break;
+ }
+
+ // Clear the data
+ memset(producer->buffers[i]->mAudioData, 0, producer->buffer_size);
+ producer->buffers[i]->mAudioDataByteSize = producer->buffer_size;
+
+ // Enqueue the buffer
+ ret = AudioQueueEnqueueBuffer(producer->queue, producer->buffers[i], 0, NULL);
+ if (ret) {
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int tdav_producer_audioqueue_start(tmedia_producer_t* self)
+{
+ OSStatus ret;
+ tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(producer->started){
+ TSK_DEBUG_WARN("Producer already started");
+ return 0;
+ }
+
+ producer->started = tsk_true;
+ ret = AudioQueueStart(producer->queue, NULL);
+
+ return ret;
+}
+
+static int tdav_producer_audioqueue_pause(tmedia_producer_t* self)
+{
+ OSStatus ret;
+ tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ ret = AudioQueuePause(producer->queue);
+
+ return ret;
+}
+
+static int tdav_producer_audioqueue_stop(tmedia_producer_t* self)
+{
+ OSStatus ret;
+ tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)self;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!producer->started){
+ TSK_DEBUG_WARN("Producer not started");
+ return 0;
+ }
+
+ producer->started = tsk_false;
+ ret = AudioQueueStop(producer->queue, false);
+
+ return ret;
+}
+
+
+//
+// CoreAudio producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_audioqueue_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_audioqueue_t *producer = self;
+ if(producer){
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(producer));
+ /* init self */
+ // TODO
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_audioqueue_dtor(tsk_object_t * self)
+{
+ tdav_producer_audioqueue_t *producer = self;
+ if(producer){
+ // Stop the producer if not done
+ if(producer->started){
+ tdav_producer_audioqueue_stop(self);
+ }
+
+ // Free all buffers and dispose the queue
+ if (producer->queue) {
+ tsk_size_t i;
+
+ for(i=0; i<CoreAudioRecordBuffers; i++){
+ AudioQueueFreeBuffer(producer->queue, producer->buffers[i]);
+ }
+ AudioQueueDispose(producer->queue, true);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(producer));
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_audioqueue_def_s =
+{
+ sizeof(tdav_producer_audioqueue_t),
+ tdav_producer_audioqueue_ctor,
+ tdav_producer_audioqueue_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_audioqueue_plugin_def_s =
+{
+ &tdav_producer_audioqueue_def_s,
+
+ tmedia_audio,
+ "Apple CoreAudio producer (AudioQueue)",
+
+ tdav_producer_audioqueue_set,
+ tdav_producer_audioqueue_prepare,
+ tdav_producer_audioqueue_start,
+ tdav_producer_audioqueue_pause,
+ tdav_producer_audioqueue_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_audioqueue_plugin_def_t = &tdav_producer_audioqueue_plugin_def_s;
+
+#endif /* HAVE_COREAUDIO_AUDIO_QUEUE */
diff --git a/tinyDAV/src/audio/coreaudio/tdav_producer_audiounit.c b/tinyDAV/src/audio/coreaudio/tdav_producer_audiounit.c
new file mode 100644
index 0000000..a88261e
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_producer_audiounit.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2010-2011 Mamadou Diop.
+ *
+ * Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+#include "tinydav/audio/coreaudio/tdav_producer_audiounit.h"
+
+// http://developer.apple.com/library/ios/#documentation/MusicAudio/Conceptual/AudioUnitHostingGuide_iOS/Introduction/Introduction.html%23//apple_ref/doc/uid/TP40009492-CH1-SW1
+
+#if HAVE_COREAUDIO_AUDIO_UNIT
+
+#include <mach/mach.h>
+#import <sys/sysctl.h>
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_thread.h"
+#include "tsk_debug.h"
+
+#define kRingPacketCount 10
+
+static OSStatus __handle_input_buffer(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
+ AudioBufferList *ioData) {
+ OSStatus status = noErr;
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)inRefCon;
+
+ // holder
+ AudioBuffer buffer;
+ buffer.mData = tsk_null;
+ buffer.mDataByteSize = 0;
+ buffer.mNumberChannels = TMEDIA_PRODUCER(producer)->audio.channels;
+
+ // list of holders
+ AudioBufferList buffers;
+ buffers.mNumberBuffers = 1;
+ buffers.mBuffers[0] = buffer;
+
+ // render to get frames from the system
+ status = AudioUnitRender(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ ioActionFlags,
+ inTimeStamp,
+ inBusNumber,
+ inNumberFrames,
+ &buffers);
+ if(status == 0){
+ // must not be done on async thread: doing it gives bad audio quality when audio+video call is done with CPU consuming codec (e.g. speex or g729)
+ speex_buffer_write(producer->ring.buffer, buffers.mBuffers[0].mData, buffers.mBuffers[0].mDataByteSize);
+ int avail = speex_buffer_get_available(producer->ring.buffer);
+ while (producer->started && avail >= producer->ring.chunck.size) {
+ avail -= speex_buffer_read(producer->ring.buffer, (void*)producer->ring.chunck.buffer, (int)producer->ring.chunck.size);
+ TMEDIA_PRODUCER(producer)->enc_cb.callback(TMEDIA_PRODUCER(producer)->enc_cb.callback_data,
+ producer->ring.chunck.buffer, producer->ring.chunck.size);
+ }
+ }
+
+ return status;
+}
+
+/* ============ Media Producer Interface ================= */
+int tdav_producer_audiounit_set(tmedia_producer_t* self, const tmedia_param_t* param)
+{
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)self;
+ if(param->plugin_type == tmedia_ppt_producer){
+ if(param->value_type == tmedia_pvt_int32){
+ if (tsk_striequals(param->key, "mute")) {
+ producer->muted = TSK_TO_INT32((uint8_t*)param->value);
+ return tdav_audiounit_handle_mute(((tdav_producer_audiounit_t*)self)->audioUnitHandle, producer->muted);
+ }
+ else if (tsk_striequals(param->key, "interrupt")) {
+ int32_t interrupt = *((uint8_t*)param->value) ? 1 : 0;
+ return tdav_audiounit_handle_interrupt(producer->audioUnitHandle, interrupt);
+ }
+ }
+ }
+ return tdav_producer_audio_set(TDAV_PRODUCER_AUDIO(self), param);
+}
+
+static int tdav_producer_audiounit_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ static UInt32 flagOne = 1;
+ UInt32 param;
+ // static UInt32 flagZero = 0;
+#define kInputBus 1
+
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)self;
+ OSStatus status = noErr;
+ AudioStreamBasicDescription audioFormat;
+ AudioStreamBasicDescription deviceFormat;
+
+ if(!producer || !codec || !codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(!producer->audioUnitHandle){
+ if(!(producer->audioUnitHandle = tdav_audiounit_handle_create(TMEDIA_PRODUCER(producer)->session_id))){
+ TSK_DEBUG_ERROR("Failed to get audio unit instance for session with id=%lld", TMEDIA_PRODUCER(producer)->session_id);
+ return -3;
+ }
+ }
+
+ // enable
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Input,
+ kInputBus,
+ &flagOne,
+ sizeof(flagOne));
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_EnableIO) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+ else {
+#if !TARGET_OS_IPHONE // strange: TARGET_OS_MAC is equal to '1' on Smulator
+ // disable output
+ param = 0;
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output,
+ 0,
+ &param,
+ sizeof(UInt32));
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_EnableIO) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+
+ // set default audio device
+ param = sizeof(AudioDeviceID);
+ AudioDeviceID inputDeviceID;
+ status = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &param, &inputDeviceID);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+
+ // set the current device to the default input unit
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Output,
+ 0,
+ &inputDeviceID,
+ sizeof(AudioDeviceID));
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_CurrentDevice) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+#endif /* TARGET_OS_MAC */
+
+ /* codec should have ptime */
+ TMEDIA_PRODUCER(producer)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+
+ TSK_DEBUG_INFO("AudioUnit producer: channels=%d, rate=%d, ptime=%d",
+ TMEDIA_PRODUCER(producer)->audio.channels,
+ TMEDIA_PRODUCER(producer)->audio.rate,
+ TMEDIA_PRODUCER(producer)->audio.ptime);
+
+ // get device format
+ param = sizeof(AudioStreamBasicDescription);
+ status = AudioUnitGetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ kInputBus,
+ &deviceFormat, &param);
+ if(status == noErr && deviceFormat.mSampleRate){
+#if TARGET_OS_IPHONE
+ // iOS support 8Khz, 16kHz and 32kHz => do not override the sampleRate
+#elif TARGET_OS_MAC
+ // For example, iSight supports only 48kHz
+ TMEDIA_PRODUCER(producer)->audio.rate = deviceFormat.mSampleRate;
+#endif
+ }
+
+ // set format
+ audioFormat.mSampleRate = TMEDIA_PRODUCER(producer)->audio.rate;
+ audioFormat.mFormatID = kAudioFormatLinearPCM;
+ audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved;
+ audioFormat.mChannelsPerFrame = TMEDIA_PRODUCER(producer)->audio.channels;
+ audioFormat.mFramesPerPacket = 1;
+ audioFormat.mBitsPerChannel = TMEDIA_PRODUCER(producer)->audio.bits_per_sample;
+ audioFormat.mBytesPerPacket = audioFormat.mBitsPerChannel / 8 * audioFormat.mChannelsPerFrame;
+ audioFormat.mBytesPerFrame = audioFormat.mBytesPerPacket;
+ audioFormat.mReserved = 0;
+ if(audioFormat.mFormatID == kAudioFormatLinearPCM && audioFormat.mChannelsPerFrame == 1){
+ audioFormat.mFormatFlags &= ~kLinearPCMFormatFlagIsNonInterleaved;
+ }
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ kInputBus,
+ &audioFormat,
+ sizeof(audioFormat));
+ if(status){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioUnitProperty_StreamFormat) failed with status=%ld", (signed long)status);
+ return -5;
+ }
+ else {
+
+ // configure
+ if(tdav_audiounit_handle_configure(producer->audioUnitHandle, tsk_false, TMEDIA_PRODUCER(producer)->audio.ptime, &audioFormat)){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_set_rate(%d) failed", TMEDIA_PRODUCER(producer)->audio.rate);
+ return -4;
+ }
+
+ // set callback function
+ AURenderCallbackStruct callback;
+ callback.inputProc = __handle_input_buffer;
+ callback.inputProcRefCon = producer;
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioOutputUnitProperty_SetInputCallback,
+ kAudioUnitScope_Output,
+ kInputBus,
+ &callback,
+ sizeof(callback));
+ if(status){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_SetInputCallback) failed with status=%ld", (signed long)status);
+ return -6;
+ }
+ else {
+ // disbale buffer allocation as we will provide ours
+ //status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ // kAudioUnitProperty_ShouldAllocateBuffer,
+ // kAudioUnitScope_Output,
+ // kInputBus,
+ // &flagZero,
+ // sizeof(flagZero));
+
+ producer->ring.chunck.size = (TMEDIA_PRODUCER(producer)->audio.ptime * audioFormat.mSampleRate * audioFormat.mBytesPerFrame) / 1000;
+ // allocate our chunck buffer
+ if(!(producer->ring.chunck.buffer = tsk_realloc(producer->ring.chunck.buffer, producer->ring.chunck.size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ return -7;
+ }
+ // create ringbuffer
+ producer->ring.size = kRingPacketCount * producer->ring.chunck.size;
+ if(!producer->ring.buffer){
+ producer->ring.buffer = speex_buffer_init((int)producer->ring.size);
+ }
+ else {
+ int ret;
+ if((ret = speex_buffer_resize(producer->ring.buffer, producer->ring.size)) < 0){
+ TSK_DEBUG_ERROR("speex_buffer_resize(%d) failed with error code=%d", (int)producer->ring.size, ret);
+ return ret;
+ }
+ }
+ if(!producer->ring.buffer){
+ TSK_DEBUG_ERROR("Failed to create a new ring buffer with size = %d", (int)producer->ring.size);
+ return -9;
+ }
+ }
+
+ }
+ }
+
+ TSK_DEBUG_INFO("AudioUnit producer prepared");
+ return tdav_audiounit_handle_signal_producer_prepared(producer->audioUnitHandle);;
+}
+
+static int tdav_producer_audiounit_start(tmedia_producer_t* self)
+{
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(producer->paused){
+ producer->paused = tsk_false;
+ return tsk_false;
+ }
+
+ int ret;
+ if(producer->started){
+ TSK_DEBUG_WARN("Already started");
+ return 0;
+ }
+ else {
+ ret = tdav_audiounit_handle_start(producer->audioUnitHandle);
+ if(ret){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_start failed with error code=%d", ret);
+ return ret;
+ }
+ }
+ producer->started = tsk_true;
+
+ // apply parameters (because could be lost when the producer is restarted -handle recreated-)
+ ret = tdav_audiounit_handle_mute(producer->audioUnitHandle, producer->muted);
+
+ TSK_DEBUG_INFO("AudioUnit producer started");
+ return 0;
+}
+
+static int tdav_producer_audiounit_pause(tmedia_producer_t* self)
+{
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)self;
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ producer->paused = tsk_true;
+ TSK_DEBUG_INFO("AudioUnit producer paused");
+ return 0;
+}
+
+static int tdav_producer_audiounit_stop(tmedia_producer_t* self)
+{
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(!producer->started){
+ TSK_DEBUG_INFO("Not started");
+ return 0;
+ }
+ else {
+ int ret = tdav_audiounit_handle_stop(producer->audioUnitHandle);
+ if(ret){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_stop failed with error code=%d", ret);
+ // do not return even if failed => we MUST stop the thread!
+ }
+#if TARGET_OS_IPHONE
+ //https://devforums.apple.com/thread/118595
+ if(producer->audioUnitHandle){
+ tdav_audiounit_handle_destroy(&producer->audioUnitHandle);
+ }
+#endif
+ }
+ producer->started = tsk_false;
+ TSK_DEBUG_INFO("AudioUnit producer stoppped");
+ return 0;
+}
+
+
+//
+// CoreAudio producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_audiounit_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_audiounit_t *producer = self;
+ if(producer){
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(producer));
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_audiounit_dtor(tsk_object_t * self)
+{
+ tdav_producer_audiounit_t *producer = self;
+ if(producer){
+ // Stop the producer if not done
+ if(producer->started){
+ tdav_producer_audiounit_stop(self);
+ }
+
+ // Free all buffers and dispose the queue
+ if (producer->audioUnitHandle) {
+ tdav_audiounit_handle_destroy(&producer->audioUnitHandle);
+ }
+ TSK_FREE(producer->ring.chunck.buffer);
+ if(producer->ring.buffer){
+ speex_buffer_destroy(producer->ring.buffer);
+ }
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(producer));
+
+ TSK_DEBUG_INFO("*** AudioUnit Producer destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_audiounit_def_s =
+{
+ sizeof(tdav_producer_audiounit_t),
+ tdav_producer_audiounit_ctor,
+ tdav_producer_audiounit_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_audiounit_plugin_def_s =
+{
+ &tdav_producer_audiounit_def_s,
+
+ tmedia_audio,
+ "Apple CoreAudio producer (AudioUnit)",
+
+ tdav_producer_audiounit_set,
+ tdav_producer_audiounit_prepare,
+ tdav_producer_audiounit_start,
+ tdav_producer_audiounit_pause,
+ tdav_producer_audiounit_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_audiounit_plugin_def_t = &tdav_producer_audiounit_plugin_def_s;
+
+
+#endif /* HAVE_COREAUDIO_AUDIO_UNIT */
diff --git a/tinyDAV/src/audio/directsound/tdav_consumer_dsound.c b/tinyDAV/src/audio/directsound/tdav_consumer_dsound.c
new file mode 100644
index 0000000..82e125b
--- /dev/null
+++ b/tinyDAV/src/audio/directsound/tdav_consumer_dsound.c
@@ -0,0 +1,458 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_consumer_dsound.c
+ * @brief Microsoft DirectSound consumer.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ */
+#include "tinydav/audio/directsound/tdav_consumer_dsound.h"
+
+#if HAVE_DSOUND_H
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "dsound.lib")
+// # pragma comment(lib, "dxguid.lib")
+#endif
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <initguid.h>
+#include <dsound.h>
+
+extern void tdav_win32_print_error(const char* func, HRESULT hr);
+
+#if !defined(TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT)
+# define TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT 20
+#endif /* TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT */
+
+typedef struct tdav_consumer_dsound_s
+{
+ TDAV_DECLARE_CONSUMER_AUDIO;
+
+ tsk_bool_t started;
+ tsk_size_t bytes_per_notif_size;
+ uint8_t* bytes_per_notif_ptr;
+ tsk_thread_handle_t* tid[1];
+
+ LPDIRECTSOUND device;
+ LPDIRECTSOUNDBUFFER primaryBuffer;
+ LPDIRECTSOUNDBUFFER secondaryBuffer;
+ HANDLE notifEvents[TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT];
+}
+tdav_consumer_dsound_t;
+
+static _inline int32_t __convert_volume(int32_t volume)
+{
+ static const int32_t __step = (DSBVOLUME_MAX - DSBVOLUME_MIN) / 100;
+ return (volume * __step) + DSBVOLUME_MIN;
+}
+
+static void* TSK_STDCALL _tdav_consumer_dsound_playback_thread(void *param)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)param;
+
+ HRESULT hr;
+ LPVOID lpvAudio1, lpvAudio2;
+ DWORD dwBytesAudio1, dwBytesAudio2, dwEvent;
+ static const DWORD dwWriteCursor = 0;
+ tsk_size_t out_size;
+
+ TSK_DEBUG_INFO("_tdav_consumer_dsound_playback_thread -- START");
+
+ SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
+
+ while (dsound->started) {
+ dwEvent = WaitForMultipleObjects(TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT, dsound->notifEvents, FALSE, INFINITE);
+ if (!dsound->started) {
+ break;
+ }
+
+ // lock
+ hr = IDirectSoundBuffer_Lock(
+ dsound->secondaryBuffer,
+ dwWriteCursor/* Ignored because of DSBLOCK_FROMWRITECURSOR */,
+ (DWORD)dsound->bytes_per_notif_size,
+ &lpvAudio1, &dwBytesAudio1,
+ &lpvAudio2, &dwBytesAudio2,
+ DSBLOCK_FROMWRITECURSOR);
+ if (hr != DS_OK) {
+ tdav_win32_print_error("IDirectSoundBuffer_Lock", hr);
+ goto next;
+ }
+
+ out_size = tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(dsound), dsound->bytes_per_notif_ptr, dsound->bytes_per_notif_size);
+ if (out_size < dsound->bytes_per_notif_size) {
+ // fill with silence
+ memset(&dsound->bytes_per_notif_ptr[out_size], 0, (dsound->bytes_per_notif_size - out_size));
+ }
+ if ((dwBytesAudio1 + dwBytesAudio2) == dsound->bytes_per_notif_size) {
+ memcpy(lpvAudio1, dsound->bytes_per_notif_ptr, dwBytesAudio1);
+ if (lpvAudio2 && dwBytesAudio2) {
+ memcpy(lpvAudio2, &dsound->bytes_per_notif_ptr[dwBytesAudio1], dwBytesAudio2);
+ }
+ }
+ else {
+ TSK_DEBUG_ERROR("Not expected: %d+%d#%d", dwBytesAudio1, dwBytesAudio2, dsound->bytes_per_notif_size);
+ }
+#if 0
+ memset(lpvAudio1, rand(), dwBytesAudio1);
+#endif
+ // unlock
+ if ((hr = IDirectSoundBuffer_Unlock(dsound->secondaryBuffer, lpvAudio1, dwBytesAudio1, lpvAudio2, dwBytesAudio2)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundBuffer_UnLock", hr);
+ goto next;
+ }
+next:
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(dsound));
+ }
+
+ TSK_DEBUG_INFO("_tdav_consumer_dsound_playback_thread -- STOP");
+
+
+ return tsk_null;
+}
+
+static int _tdav_consumer_dsound_unprepare(tdav_consumer_dsound_t *dsound)
+{
+ if(dsound){
+ tsk_size_t i;
+ if(dsound->primaryBuffer){
+ IDirectSoundBuffer_Release(dsound->primaryBuffer);
+ dsound->primaryBuffer = NULL;
+ }
+ if(dsound->secondaryBuffer){
+ IDirectSoundBuffer_Release(dsound->secondaryBuffer);
+ dsound->secondaryBuffer = NULL;
+ }
+ if(dsound->device){
+ IDirectSound_Release(dsound->device);
+ dsound->device = NULL;
+ }
+ for(i = 0; i<sizeof(dsound->notifEvents)/sizeof(dsound->notifEvents[0]); i++){
+ if(dsound->notifEvents[i]){
+ CloseHandle(dsound->notifEvents[i]);
+ dsound->notifEvents[i] = NULL;
+ }
+ }
+ }
+ return 0;
+}
+
+
+
+/* ============ Media Consumer Interface ================= */
+static int tdav_consumer_dsound_set(tmedia_consumer_t* self, const tmedia_param_t* param)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+ int ret = tdav_consumer_audio_set(TDAV_CONSUMER_AUDIO(self), param);
+
+ if(ret == 0){
+ if(dsound->secondaryBuffer && tsk_striequals(param->key, "volume")){
+ if(IDirectSoundBuffer_SetVolume(dsound->secondaryBuffer, __convert_volume(TMEDIA_CONSUMER(self)->audio.volume)) != DS_OK){
+ TSK_DEBUG_ERROR("IDirectSoundBuffer_SetVolume() failed");
+ ret = -1;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int tdav_consumer_dsound_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ HRESULT hr;
+ HWND hWnd;
+
+ WAVEFORMATEX wfx = {0};
+ DSBUFFERDESC dsbd = {0};
+
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+
+ if(!dsound){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(dsound->device || dsound->primaryBuffer || dsound->secondaryBuffer){
+ TSK_DEBUG_ERROR("Consumer already prepared");
+ return -2;
+ }
+
+ TMEDIA_CONSUMER(dsound)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(dsound)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(dsound)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+#if 0
+ TMEDIA_CONSUMER(dsound)->audio.out.rate = 48000;
+ TMEDIA_CONSUMER(dsound)->audio.out.channels = 2;
+#endif
+
+ /* Create sound device */
+ if((hr = DirectSoundCreate(NULL, &dsound->device, NULL) != DS_OK)){
+ tdav_win32_print_error("DirectSoundCreate", hr);
+ return -3;
+ }
+
+ /* Set CooperativeLevel */
+ if((hWnd = GetForegroundWindow()) || (hWnd = GetDesktopWindow()) || (hWnd = GetConsoleWindow())){
+ if((hr = IDirectSound_SetCooperativeLevel(dsound->device, hWnd, DSSCL_PRIORITY)) != DS_OK){
+ tdav_win32_print_error("IDirectSound_SetCooperativeLevel", hr);
+ return -2;
+ }
+ }
+
+ /* Creates the primary buffer and apply format */
+ wfx.wFormatTag = WAVE_FORMAT_PCM;
+ wfx.nChannels = TMEDIA_CONSUMER(dsound)->audio.out.channels ? TMEDIA_CONSUMER(dsound)->audio.out.channels : TMEDIA_CONSUMER(dsound)->audio.in.channels;
+ wfx.nSamplesPerSec = TMEDIA_CONSUMER(dsound)->audio.out.rate ? TMEDIA_CONSUMER(dsound)->audio.out.rate : TMEDIA_CONSUMER(dsound)->audio.in.rate;
+ wfx.wBitsPerSample = TMEDIA_CONSUMER(dsound)->audio.bits_per_sample;
+ wfx.nBlockAlign = (wfx.nChannels * wfx.wBitsPerSample/8);
+ wfx.nAvgBytesPerSec = (wfx.nSamplesPerSec * wfx.nBlockAlign);
+
+ /* Average bytes (count) for each notification */
+ dsound->bytes_per_notif_size = ((wfx.nAvgBytesPerSec * TMEDIA_CONSUMER(dsound)->audio.ptime)/1000);
+ if(!(dsound->bytes_per_notif_ptr = tsk_realloc(dsound->bytes_per_notif_ptr, dsound->bytes_per_notif_size))){
+ TSK_DEBUG_ERROR("Failed to allocate buffer with size = %u", dsound->bytes_per_notif_size);
+ return -3;
+ }
+
+ dsbd.dwSize = sizeof(DSBUFFERDESC);
+ dsbd.dwFlags = DSBCAPS_PRIMARYBUFFER;
+ dsbd.dwBufferBytes = 0;
+ dsbd.lpwfxFormat = NULL;
+
+ if((hr = IDirectSound_CreateSoundBuffer(dsound->device, &dsbd, &dsound->primaryBuffer, NULL)) != DS_OK){
+ tdav_win32_print_error("IDirectSound_CreateSoundBuffer", hr);
+ return -4;
+ }
+ if((hr = IDirectSoundBuffer_SetFormat(dsound->primaryBuffer, &wfx)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_SetFormat", hr);
+ return -5;
+ }
+
+ /* Creates the secondary buffer and apply format */
+ dsbd.dwFlags = (DSBCAPS_CTRLPOSITIONNOTIFY | DSBCAPS_GLOBALFOCUS | DSBCAPS_CTRLVOLUME);
+ dsbd.dwBufferBytes = (DWORD)(TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT * dsound->bytes_per_notif_size);
+ dsbd.lpwfxFormat = &wfx;
+
+ if((hr = IDirectSound_CreateSoundBuffer(dsound->device, &dsbd, &dsound->secondaryBuffer, NULL)) != DS_OK){
+ tdav_win32_print_error("IDirectSound_CreateSoundBuffer", hr);
+ return -6;
+ }
+
+ /* Set Volume */
+ if(IDirectSoundBuffer_SetVolume(dsound->secondaryBuffer, __convert_volume(TMEDIA_CONSUMER(self)->audio.volume)) != DS_OK){
+ TSK_DEBUG_ERROR("IDirectSoundBuffer_SetVolume() failed");
+ }
+
+ return 0;
+}
+
+static int tdav_consumer_dsound_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+
+ tsk_size_t i;
+ HRESULT hr;
+ LPDIRECTSOUNDNOTIFY lpDSBNotify;
+ DSBPOSITIONNOTIFY pPosNotify[TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT] = {0};
+
+ static DWORD dwMajorVersion = -1;
+
+ // Get OS version
+ if(dwMajorVersion == -1){
+ OSVERSIONINFO osvi;
+ ZeroMemory(&osvi, sizeof(OSVERSIONINFO));
+ osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+ GetVersionEx(&osvi);
+ dwMajorVersion = osvi.dwMajorVersion;
+ }
+
+ if(!dsound){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!dsound->device || !dsound->primaryBuffer || !dsound->secondaryBuffer){
+ TSK_DEBUG_ERROR("Consumer not prepared");
+ return -2;
+ }
+
+ if(dsound->started){
+ return 0;
+ }
+
+ if((hr = IDirectSoundBuffer_QueryInterface(dsound->secondaryBuffer, &IID_IDirectSoundNotify, (LPVOID*)&lpDSBNotify)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_QueryInterface", hr);
+ return -3;
+ }
+
+ /* Events associated to notification points */
+ for(i = 0; i<TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT; i++){
+ dsound->notifEvents[i] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ // set notification point offset at the start of the buffer for Windows Vista and later and at the half of the buffer of XP and before
+ pPosNotify[i].dwOffset = (DWORD)((dsound->bytes_per_notif_size * i) + (dwMajorVersion > 5 ? (dsound->bytes_per_notif_size >> 1) : 1));
+ pPosNotify[i].hEventNotify = dsound->notifEvents[i];
+ }
+ if((hr = IDirectSoundNotify_SetNotificationPositions(lpDSBNotify, TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT, pPosNotify)) != DS_OK){
+ IDirectSoundNotify_Release(lpDSBNotify);
+ tdav_win32_print_error("IDirectSoundBuffer_QueryInterface", hr);
+ return -4;
+ }
+
+ if((hr = IDirectSoundNotify_Release(lpDSBNotify))){
+ tdav_win32_print_error("IDirectSoundNotify_Release", hr);
+ }
+
+ /* Start the buffer */
+ if((hr = IDirectSoundBuffer_Play(dsound->secondaryBuffer, 0, 0, DSBPLAY_LOOPING)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundNotify_Release", hr);
+ return -5;
+ }
+
+ /* start the reader thread */
+ dsound->started = tsk_true;
+ tsk_thread_create(&dsound->tid[0], _tdav_consumer_dsound_playback_thread, dsound);
+
+ return 0;
+}
+
+static int tdav_consumer_dsound_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+
+ if(!dsound || !buffer || !size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* buffer is already decoded */
+ return tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(dsound), buffer, size, proto_hdr);
+}
+
+static int tdav_consumer_dsound_pause(tmedia_consumer_t* self)
+{
+ return 0;
+}
+
+static int tdav_consumer_dsound_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+
+ HRESULT hr;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!dsound->started){
+ return 0;
+ }
+
+ /* should be done here */
+ dsound->started = tsk_false;
+
+ /* stop thread */
+ if(dsound->tid[0]){
+ tsk_thread_join(&(dsound->tid[0]));
+ }
+
+ if((hr = IDirectSoundBuffer_Stop(dsound->secondaryBuffer)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_Stop", hr);
+ }
+ if((hr = IDirectSoundBuffer_SetCurrentPosition(dsound->secondaryBuffer, 0)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_SetCurrentPosition", hr);
+ }
+
+ // unprepare
+ // will be prepared again before calling next start()
+ _tdav_consumer_dsound_unprepare(dsound);
+
+ return 0;
+}
+
+
+//
+// WaveAPI consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_dsound_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_dsound_t *consumer = self;
+ if(consumer){
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(consumer));
+ /* init self */
+
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_dsound_dtor(tsk_object_t * self)
+{
+ tdav_consumer_dsound_t *dsound = self;
+ if(dsound){
+ /* stop */
+ if(dsound->started){
+ tdav_consumer_dsound_stop(self);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(dsound));
+ /* deinit self */
+ _tdav_consumer_dsound_unprepare(dsound);
+ TSK_FREE(dsound->bytes_per_notif_ptr);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_dsound_def_s =
+{
+ sizeof(tdav_consumer_dsound_t),
+ tdav_consumer_dsound_ctor,
+ tdav_consumer_dsound_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_dsound_plugin_def_s =
+{
+ &tdav_consumer_dsound_def_s,
+
+ tmedia_audio,
+ "Microsoft DirectSound consumer",
+
+ tdav_consumer_dsound_set,
+ tdav_consumer_dsound_prepare,
+ tdav_consumer_dsound_start,
+ tdav_consumer_dsound_consume,
+ tdav_consumer_dsound_pause,
+ tdav_consumer_dsound_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_dsound_plugin_def_t = &tdav_consumer_dsound_plugin_def_s;
+
+
+#endif /* HAVE_DSOUND_H */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/directsound/tdav_producer_dsound.c b/tinyDAV/src/audio/directsound/tdav_producer_dsound.c
new file mode 100644
index 0000000..c5ae167
--- /dev/null
+++ b/tinyDAV/src/audio/directsound/tdav_producer_dsound.c
@@ -0,0 +1,402 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_producer_dsound.c
+ * @brief Microsoft DirectSound producer.
+ *
+ */
+#include "tinydav/audio/directsound/tdav_producer_dsound.h"
+
+#if HAVE_DSOUND_H
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "dsound.lib")
+// # pragma comment(lib, "dxguid.lib")
+#endif
+
+#if !defined(SEND_SILENCE_ON_MUTE)
+# if METROPOLIS
+# define SEND_SILENCE_ON_MUTE 1
+# else
+# define SEND_SILENCE_ON_MUTE 0
+# endif
+#endif /* SEND_SILENCE_ON_MUTE */
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <initguid.h>
+#include <dsound.h>
+
+extern void tdav_win32_print_error(const char* func, HRESULT hr);
+
+#if !defined(TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT)
+# define TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT 10
+#endif /* TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT */
+
+typedef struct tdav_producer_dsound_s
+{
+ TDAV_DECLARE_PRODUCER_AUDIO;
+
+ tsk_bool_t started;
+ tsk_bool_t mute;
+ tsk_size_t bytes_per_notif_size;
+ tsk_thread_handle_t* tid[1];
+
+ LPDIRECTSOUNDCAPTURE device;
+ LPDIRECTSOUNDCAPTUREBUFFER captureBuffer;
+ HANDLE notifEvents[TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT];
+}
+tdav_producer_dsound_t;
+
+static void* TSK_STDCALL _tdav_producer_dsound_record_thread(void *param)
+{
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)param;
+
+ HRESULT hr;
+ LPVOID lpvAudio1, lpvAudio2;
+ DWORD dwBytesAudio1, dwBytesAudio2, dwEvent, dwIndex;
+
+ TSK_DEBUG_INFO("_tdav_producer_dsound_record_thread -- START");
+
+ SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_TIME_CRITICAL);
+
+ while (dsound->started) {
+ dwEvent = WaitForMultipleObjects(TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT, dsound->notifEvents, FALSE, INFINITE);
+ if (!dsound->started) {
+ break;
+ }
+ if (dwEvent < WAIT_OBJECT_0 || dwEvent >(WAIT_OBJECT_0 + TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT)) {
+ TSK_DEBUG_ERROR("Invalid dwEvent(%d)", dwEvent);
+ break;
+ }
+ dwIndex = (dwEvent - WAIT_OBJECT_0);
+
+ // lock
+ if ((hr = IDirectSoundCaptureBuffer_Lock(dsound->captureBuffer, (DWORD)(dwIndex * dsound->bytes_per_notif_size), (DWORD)dsound->bytes_per_notif_size, &lpvAudio1, &dwBytesAudio1, &lpvAudio2, &dwBytesAudio2, 0)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_Lock", hr);
+ continue;
+ }
+
+ if (TMEDIA_PRODUCER(dsound)->enc_cb.callback) {
+#if SEND_SILENCE_ON_MUTE
+ if (dsound->mute) {
+ memset(lpvAudio1, 0, dwBytesAudio1);
+ if(lpvAudio2){
+ memset(lpvAudio2, 0, dwBytesAudio2);
+ }
+ }
+#endif
+ TMEDIA_PRODUCER(dsound)->enc_cb.callback(TMEDIA_PRODUCER(dsound)->enc_cb.callback_data, lpvAudio1, dwBytesAudio1);
+ if (lpvAudio2) {
+ TMEDIA_PRODUCER(dsound)->enc_cb.callback(TMEDIA_PRODUCER(dsound)->enc_cb.callback_data, lpvAudio2, dwBytesAudio2);
+ }
+ }
+
+ // unlock
+ if ((hr = IDirectSoundCaptureBuffer_Unlock(dsound->captureBuffer, lpvAudio1, dwBytesAudio1, lpvAudio2, dwBytesAudio2)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_Unlock", hr);
+ continue;
+ }
+ }
+
+ TSK_DEBUG_INFO("_tdav_producer_dsound_record_thread -- STOP");
+
+
+ return tsk_null;
+}
+
+static int _tdav_producer_dsound_unprepare(tdav_producer_dsound_t* dsound)
+{
+ if (dsound) {
+ tsk_size_t i;
+ if (dsound->captureBuffer) {
+ IDirectSoundCaptureBuffer_Release(dsound->captureBuffer);
+ dsound->captureBuffer = NULL;
+ }
+ if (dsound->device) {
+ IDirectSoundCapture_Release(dsound->device);
+ dsound->device = NULL;
+ }
+ for (i = 0; i < (sizeof(dsound->notifEvents) / sizeof(dsound->notifEvents[0])); i++){
+ if (dsound->notifEvents[i]) {
+ CloseHandle(dsound->notifEvents[i]);
+ dsound->notifEvents[i] = NULL;
+ }
+ }
+ }
+ return 0;
+}
+
+
+
+
+/* ============ Media Producer Interface ================= */
+static int tdav_producer_dsound_set(tmedia_producer_t* self, const tmedia_param_t* param)
+{
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)self;
+ if (param->plugin_type == tmedia_ppt_producer) {
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "volume")) {
+ return 0;
+ }
+ else if (tsk_striequals(param->key, "mute")) {
+ dsound->mute = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+#if !SEND_SILENCE_ON_MUTE
+ if (dsound->started) {
+ if (dsound->mute) {
+ IDirectSoundCaptureBuffer_Stop(dsound->captureBuffer);
+ }
+ else {
+ IDirectSoundCaptureBuffer_Start(dsound->captureBuffer, DSBPLAY_LOOPING);
+ }
+ }
+#endif
+ return 0;
+ }
+ }
+ }
+ return tdav_producer_audio_set(TDAV_PRODUCER_AUDIO(self), param);
+}
+static int tdav_producer_dsound_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ HRESULT hr;
+
+ WAVEFORMATEX wfx = { 0 };
+ DSCBUFFERDESC dsbd = { 0 };
+
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)self;
+
+ if (!dsound || !codec) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (dsound->device || dsound->captureBuffer) {
+ TSK_DEBUG_ERROR("Producer already prepared");
+ return -2;
+ }
+
+ TMEDIA_PRODUCER(dsound)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(dsound)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER(dsound)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+
+#if 0
+ TMEDIA_PRODUCER(dsound)->audio.rate = 48000;
+ TMEDIA_PRODUCER(dsound)->audio.channels = 1;
+#endif
+
+ /* Create capture device */
+ if ((hr = DirectSoundCaptureCreate(NULL, &dsound->device, NULL) != DS_OK)) {
+ tdav_win32_print_error("DirectSoundCaptureCreate", hr);
+ return -3;
+ }
+
+ /* Creates the capture buffer */
+ wfx.wFormatTag = WAVE_FORMAT_PCM;
+ wfx.nChannels = TMEDIA_PRODUCER(dsound)->audio.channels;
+ wfx.nSamplesPerSec = TMEDIA_PRODUCER(dsound)->audio.rate;
+ wfx.wBitsPerSample = TMEDIA_PRODUCER(dsound)->audio.bits_per_sample;
+ wfx.nBlockAlign = (wfx.nChannels * wfx.wBitsPerSample / 8);
+ wfx.nAvgBytesPerSec = (wfx.nSamplesPerSec * wfx.nBlockAlign);
+
+ /* Average bytes (count) for each notification */
+ dsound->bytes_per_notif_size = ((wfx.nAvgBytesPerSec * TMEDIA_PRODUCER(dsound)->audio.ptime) / 1000);
+
+ dsbd.dwSize = sizeof(DSCBUFFERDESC);
+ dsbd.dwBufferBytes = (DWORD)(TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT * dsound->bytes_per_notif_size);
+ dsbd.lpwfxFormat = &wfx;
+
+ if ((hr = IDirectSoundCapture_CreateCaptureBuffer(dsound->device, &dsbd, &dsound->captureBuffer, NULL)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCapture_CreateCaptureBuffer", hr);
+ return -4;
+ }
+
+ return 0;
+}
+
+static int tdav_producer_dsound_start(tmedia_producer_t* self)
+{
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)self;
+
+ tsk_size_t i;
+ DWORD dwOffset;
+ HRESULT hr;
+ LPDIRECTSOUNDNOTIFY lpDSBNotify;
+ DSBPOSITIONNOTIFY pPosNotify[TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT] = { 0 };
+
+ if (!dsound) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (!dsound->device || !dsound->captureBuffer) {
+ TSK_DEBUG_ERROR("Producer not prepared");
+ return -2;
+ }
+
+ if (dsound->started) {
+ return 0;
+ }
+
+ if ((hr = IDirectSoundCaptureBuffer_QueryInterface(dsound->captureBuffer, &IID_IDirectSoundNotify, (LPVOID*)&lpDSBNotify)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_QueryInterface", hr);
+ return -3;
+ }
+
+ /* Events associated to notification points */
+ dwOffset = (DWORD)(dsound->bytes_per_notif_size - 1);
+ for (i = 0; i < (sizeof(dsound->notifEvents) / sizeof(dsound->notifEvents[0])); i++){
+ dsound->notifEvents[i] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ pPosNotify[i].dwOffset = dwOffset;
+ pPosNotify[i].hEventNotify = dsound->notifEvents[i];
+ dwOffset += (DWORD)dsound->bytes_per_notif_size;
+ }
+ if ((hr = IDirectSoundNotify_SetNotificationPositions(lpDSBNotify, TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT, pPosNotify)) != DS_OK) {
+ IDirectSoundNotify_Release(lpDSBNotify);
+ tdav_win32_print_error("IDirectSoundBuffer_QueryInterface", hr);
+ return -4;
+ }
+
+ if ((hr = IDirectSoundNotify_Release(lpDSBNotify))) {
+ tdav_win32_print_error("IDirectSoundNotify_Release", hr);
+ }
+
+ /* Start the buffer */
+ if ((hr = IDirectSoundCaptureBuffer_Start(dsound->captureBuffer, DSBPLAY_LOOPING)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_Start", hr);
+ return -5;
+ }
+
+ /* start the reader thread */
+ dsound->started = tsk_true;
+ tsk_thread_create(&dsound->tid[0], _tdav_producer_dsound_record_thread, dsound);
+
+ return 0;
+}
+
+static int tdav_producer_dsound_pause(tmedia_producer_t* self)
+{
+ return 0;
+}
+
+static int tdav_producer_dsound_stop(tmedia_producer_t* self)
+{
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)self;
+
+ HRESULT hr;
+
+ if (!self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (!dsound->started) {
+ return 0;
+ }
+
+ // should be done here
+ dsound->started = tsk_false;
+
+#if !SEND_SILENCE_ON_MUTE
+ if (dsound->mute && dsound->notifEvents[0]) {
+ // thread is paused -> raise event now that "started" is equal to false
+ SetEvent(dsound->notifEvents[0]);
+ }
+#endif
+
+ // stop thread
+ if (dsound->tid[0]) {
+ tsk_thread_join(&(dsound->tid[0]));
+ }
+
+ if ((hr = IDirectSoundCaptureBuffer_Stop(dsound->captureBuffer)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_Stop", hr);
+ }
+
+ // unprepare
+ // will be prepared again before next start()
+ _tdav_producer_dsound_unprepare(dsound);
+
+ return 0;
+}
+
+
+//
+// WaveAPI producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_dsound_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_dsound_t *producer = self;
+ if (producer) {
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(producer));
+ /* init self */
+
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_dsound_dtor(tsk_object_t * self)
+{
+ tdav_producer_dsound_t *dsound = self;
+ if (dsound) {
+ /* stop */
+ if (dsound->started) {
+ tdav_producer_dsound_stop(self);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(dsound));
+ /* deinit self */
+ _tdav_producer_dsound_unprepare(dsound);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_dsound_def_s =
+{
+ sizeof(tdav_producer_dsound_t),
+ tdav_producer_dsound_ctor,
+ tdav_producer_dsound_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_dsound_plugin_def_s =
+{
+ &tdav_producer_dsound_def_s,
+
+ tmedia_audio,
+ "Microsoft DirectSound producer",
+
+ tdav_producer_dsound_set,
+ tdav_producer_dsound_prepare,
+ tdav_producer_dsound_start,
+ tdav_producer_dsound_pause,
+ tdav_producer_dsound_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_dsound_plugin_def_t = &tdav_producer_dsound_plugin_def_s;
+
+
+#endif /* HAVE_DSOUND_H */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/oss/tdav_consumer_oss.c b/tinyDAV/src/audio/oss/tdav_consumer_oss.c
new file mode 100644
index 0000000..0370210
--- /dev/null
+++ b/tinyDAV/src/audio/oss/tdav_consumer_oss.c
@@ -0,0 +1,397 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/audio/oss/tdav_consumer_oss.h"
+
+#if HAVE_LINUX_SOUNDCARD_H
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_safeobj.h"
+#include "tsk_debug.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+#include <linux/soundcard.h>
+
+#define OSS_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[OSS Consumer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[OSS Consumer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[OSS Consumer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[OSS Consumer] " FMT, ##__VA_ARGS__)
+
+typedef struct tdav_consumer_oss_s
+{
+ TDAV_DECLARE_CONSUMER_AUDIO;
+
+ tsk_bool_t b_started;
+ tsk_bool_t b_prepared;
+ tsk_bool_t b_muted;
+ int n_bits_per_sample;
+
+ int fd;
+ tsk_thread_handle_t* tid[1];
+
+ tsk_size_t n_buff_size_in_bytes;
+ tsk_size_t n_buff_size_in_samples;
+ uint8_t* p_buff_ptr;
+
+ tsk_size_t n_buff16_size_in_bytes;
+ tsk_size_t n_buff16_size_in_samples;
+ uint16_t* p_buff16_ptr;
+
+ TSK_DECLARE_SAFEOBJ;
+}
+tdav_consumer_oss_t;
+
+static int __oss_from_16bits_to_8bits(const void* p_src, void* p_dst, tsk_size_t n_samples)
+{
+ tsk_size_t i;
+ uint16_t *_p_src = (uint16_t*)p_src;
+ uint8_t *_p_dst = (uint8_t*)p_dst;
+
+ if (!p_src || !p_dst || !n_samples) {
+ OSS_DEBUG_ERROR("invalid parameter");
+ return -1;
+ }
+ for (i = 0; i < n_samples; ++i) {
+ _p_dst[i] = _p_src[i];
+ }
+ return 0;
+}
+
+static void* TSK_STDCALL _tdav_consumer_oss_playback_thread(void *param)
+{
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)param;
+ int err;
+ void* p_buffer = ((p_oss->n_bits_per_sample == 8) ? (void*)p_oss->p_buff16_ptr: (void*)p_oss->p_buff_ptr);
+ tsk_size_t n_buffer_in_bytes = (p_oss->n_bits_per_sample == 8) ? p_oss->n_buff16_size_in_bytes : p_oss->n_buff_size_in_bytes;
+ tsk_size_t n_buffer_in_samples = p_oss->n_buff_size_in_samples;
+
+ const void* _p_buffer;
+ tsk_size_t _n_buffer_in_bytes;
+
+ OSS_DEBUG_INFO("__playback_thread -- START");
+
+ tsk_thread_set_priority_2(TSK_THREAD_PRIORITY_TIME_CRITICAL);
+
+ while (p_oss->b_started) {
+ tsk_safeobj_lock(p_oss);
+ err = tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(p_oss), p_buffer, n_buffer_in_bytes); // requires 16bits, thread-safe
+ if (err >= 0) {
+ _p_buffer = p_buffer;
+ _n_buffer_in_bytes = n_buffer_in_bytes;
+ if (err < n_buffer_in_bytes) {
+ memset(((uint8_t*)p_buffer) + err, 0, (n_buffer_in_bytes - err));
+ }
+ if (p_oss->n_bits_per_sample == 8) {
+ __oss_from_16bits_to_8bits(p_buffer, p_oss->p_buff_ptr, n_buffer_in_samples);
+ _p_buffer = p_oss->p_buff_ptr;
+ _n_buffer_in_bytes >>= 1;
+ }
+ if ((err = write(p_oss->fd, _p_buffer, _n_buffer_in_bytes)) != _n_buffer_in_bytes) {
+ OSS_DEBUG_ERROR ("Failed to read data from audio interface failed (%d -> %s)", err , strerror(errno));
+ tsk_safeobj_unlock(p_oss);
+ goto bail;
+ }
+ }
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(p_oss));
+
+ tsk_safeobj_unlock(p_oss);
+ }
+bail:
+ OSS_DEBUG_INFO("__playback_thread -- STOP");
+ return tsk_null;
+}
+
+/* ============ Media Consumer Interface ================= */
+static int tdav_consumer_oss_set(tmedia_consumer_t* self, const tmedia_param_t* param)
+{
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)self;
+ int ret = 0;
+
+ ret = tdav_consumer_audio_set(TDAV_CONSUMER_AUDIO(self), param);
+
+ return ret;
+}
+
+static int tdav_consumer_oss_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)self;
+ int err = 0, channels, sample_rate, bits_per_sample;
+
+ if (!p_oss || !codec && codec->plugin) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ if (p_oss->fd == -1) {
+ if ((p_oss->fd = open("/dev/dsp", O_WRONLY)) < 0) {
+ OSS_DEBUG_ERROR("open('/dev/dsp') failed: %s", strerror(errno));
+ err = -2;
+ goto bail;
+ }
+ }
+
+ TMEDIA_CONSUMER(p_oss)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(p_oss)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(p_oss)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+ // Set using requested
+ channels = TMEDIA_CONSUMER(p_oss)->audio.in.channels;
+ sample_rate = TMEDIA_CONSUMER(p_oss)->audio.in.rate;
+ bits_per_sample = TMEDIA_CONSUMER(p_oss)->audio.bits_per_sample; // 16
+
+ // Prepare
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_BITS, &bits_per_sample)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_BITS, %d) failed: %d->%s", bits_per_sample, err, strerror(errno));
+ goto bail;
+ }
+ if (bits_per_sample != 16 && bits_per_sample != 8) {
+ OSS_DEBUG_ERROR("bits_per_sample=%d not supported", bits_per_sample);
+ err = -3;
+ goto bail;
+ }
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_CHANNELS, &channels)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_CHANNELS, %d) failed: %d->%s", channels, err, strerror(errno));
+ goto bail;
+ }
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_RATE, &sample_rate)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_RATE, %d) failed: %d->%s", sample_rate, err, strerror(errno));
+ goto bail;
+ }
+
+ p_oss->n_buff_size_in_bytes = (TMEDIA_CONSUMER(p_oss)->audio.ptime * sample_rate * ((bits_per_sample >> 3) * channels)) / 1000;
+ if (!(p_oss->p_buff_ptr = tsk_realloc(p_oss->p_buff_ptr, p_oss->n_buff_size_in_bytes))) {
+ OSS_DEBUG_ERROR("Failed to allocate buffer with size = %u", p_oss->n_buff_size_in_bytes);
+ err = -4;
+ goto bail;
+ }
+ p_oss->n_buff_size_in_samples = (p_oss->n_buff_size_in_bytes / (bits_per_sample >> 3));
+ if (bits_per_sample == 8) {
+ p_oss->n_buff16_size_in_bytes = p_oss->n_buff_size_in_bytes << 1;
+ if (!(p_oss->p_buff16_ptr = tsk_realloc(p_oss->p_buff16_ptr, p_oss->n_buff16_size_in_bytes))) {
+ OSS_DEBUG_ERROR("Failed to allocate buffer with size = %u", p_oss->n_buff_size_in_bytes);
+ err = -5;
+ goto bail;
+ }
+ p_oss->n_buff16_size_in_samples = p_oss->n_buff_size_in_samples;
+ }
+
+ OSS_DEBUG_INFO("prepared: req_bits_per_sample=%d; req_channels=%d; req_rate=%d, resp_bits_per_sample=%d; resp_channels=%d; resp_rate=%d /// n_buff_size_in_samples=%u;n_buff_size_in_bytes=%u",
+ TMEDIA_CONSUMER(p_oss)->audio.bits_per_sample, TMEDIA_CONSUMER(p_oss)->audio.in.channels, TMEDIA_CONSUMER(p_oss)->audio.in.rate,
+ bits_per_sample, channels, sample_rate,
+ p_oss->n_buff_size_in_samples, p_oss->n_buff_size_in_bytes);
+
+ // Set using supported (up to the resampler to convert to requested)
+ TMEDIA_CONSUMER(p_oss)->audio.out.channels = channels;
+ TMEDIA_CONSUMER(p_oss)->audio.out.rate = sample_rate;
+ // TMEDIA_CONSUMER(p_oss)->audio.bits_per_sample = bits_per_sample;
+
+ p_oss->n_bits_per_sample = bits_per_sample;
+ p_oss->b_prepared = tsk_true;
+
+bail:
+ if (err) {
+ if (p_oss->fd != -1) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ }
+ tsk_safeobj_unlock(p_oss);
+
+ return err;
+}
+
+static int tdav_consumer_oss_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)self;
+ int err = 0;
+
+ if (! p_oss) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ if (!p_oss->b_prepared) {
+ OSS_DEBUG_WARN("Not prepared");
+ err = -2;
+ goto bail;
+ }
+
+ if (p_oss->b_started) {
+ OSS_DEBUG_WARN("Already started");
+ goto bail;
+ }
+
+ /* start thread */
+ p_oss->b_started = tsk_true;
+ tsk_thread_create(&p_oss->tid[0], _tdav_consumer_oss_playback_thread, p_oss);
+
+ OSS_DEBUG_INFO("started");
+
+bail:
+ tsk_safeobj_unlock(p_oss);
+ return err;
+}
+
+static int tdav_consumer_oss_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ int err = 0;
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)self;
+
+ if (!p_oss || !buffer || !size) {
+ OSS_DEBUG_ERROR("Invalid paramter");
+ return -1;
+ }
+
+ //tsk_safeobj_lock(p_oss);
+
+ if (!p_oss->b_started) {
+ OSS_DEBUG_WARN("Not started");
+ err = -2;
+ goto bail;
+ }
+ if ((err = tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(p_oss), buffer, size, proto_hdr))/*thread-safe*/) {
+ OSS_DEBUG_WARN("Failed to put audio data to the jitter buffer");
+ goto bail;
+ }
+
+bail:
+ //tsk_safeobj_unlock(p_oss);
+ return err;
+}
+
+static int tdav_consumer_oss_pause(tmedia_consumer_t* self)
+{
+ return 0;
+}
+
+static int tdav_consumer_oss_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)self;
+ int err;
+
+ if (!p_oss) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ /* should be done here */
+ p_oss->b_started = tsk_false;
+
+ /* stop thread */
+ if (p_oss->tid[0]) {
+ tsk_thread_join(&(p_oss->tid[0]));
+ }
+ if (p_oss->fd != -1) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ p_oss->b_prepared = tsk_false;
+
+ OSS_DEBUG_INFO("stopped");
+
+ tsk_safeobj_unlock(p_oss);
+
+ return 0;
+}
+
+
+//
+// Linux OSS consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_oss_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_oss_t *p_oss = self;
+ if (p_oss) {
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(p_oss));
+ /* init self */
+
+ p_oss->fd = -1;
+ tsk_safeobj_init(p_oss);
+
+ OSS_DEBUG_INFO("created");
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_oss_dtor(tsk_object_t * self)
+{
+ tdav_consumer_oss_t *p_oss = self;
+ if (p_oss) {
+
+ /* stop */
+ if (p_oss->b_started) {
+ tdav_consumer_oss_stop(self);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(p_oss));
+ /* deinit self */
+ if (p_oss->fd > 0) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ TSK_FREE(p_oss->p_buff_ptr);
+ TSK_FREE(p_oss->p_buff16_ptr);
+ tsk_safeobj_deinit(p_oss);
+
+ OSS_DEBUG_INFO("*** destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_oss_def_s =
+{
+ sizeof(tdav_consumer_oss_t),
+ tdav_consumer_oss_ctor,
+ tdav_consumer_oss_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_oss_plugin_def_s =
+{
+ &tdav_consumer_oss_def_s,
+
+ tmedia_audio,
+ "Linux OSS consumer",
+
+ tdav_consumer_oss_set,
+ tdav_consumer_oss_prepare,
+ tdav_consumer_oss_start,
+ tdav_consumer_oss_consume,
+ tdav_consumer_oss_pause,
+ tdav_consumer_oss_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_oss_plugin_def_t = &tdav_consumer_oss_plugin_def_s;
+
+#endif /* HAVE_LINUX_SOUNDCARD_H */
diff --git a/tinyDAV/src/audio/oss/tdav_producer_oss.c b/tinyDAV/src/audio/oss/tdav_producer_oss.c
new file mode 100644
index 0000000..d61fb96
--- /dev/null
+++ b/tinyDAV/src/audio/oss/tdav_producer_oss.c
@@ -0,0 +1,369 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/audio/oss/tdav_producer_oss.h"
+
+#if HAVE_LINUX_SOUNDCARD_H
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_safeobj.h"
+#include "tsk_debug.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+#include <linux/soundcard.h>
+
+#define OSS_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[OSS Producer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[OSS Producer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[OSS Producer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[OSS Producer] " FMT, ##__VA_ARGS__)
+
+typedef struct tdav_producer_oss_s
+{
+ TDAV_DECLARE_PRODUCER_AUDIO;
+
+ tsk_bool_t b_started;
+ tsk_bool_t b_prepared;
+ tsk_bool_t b_muted;
+ int n_bits_per_sample;
+
+ int fd;
+ tsk_thread_handle_t* tid[1];
+
+ tsk_size_t n_buff_size_in_bytes;
+ tsk_size_t n_buff_size_in_samples;
+ uint8_t* p_buff_ptr;
+
+ tsk_size_t n_buff16_size_in_bytes;
+ tsk_size_t n_buff16_size_in_samples;
+ uint16_t* p_buff16_ptr;
+
+ TSK_DECLARE_SAFEOBJ;
+}
+tdav_producer_oss_t;
+
+static int __oss_from_8bits_to_16bits(const void* p_src, void* p_dst, tsk_size_t n_samples)
+{
+ tsk_size_t i;
+ const uint8_t *_p_src = (const uint8_t*)p_src;
+ uint16_t *_p_dst = (uint16_t*)p_dst;
+
+ if (!p_src || !p_dst || !n_samples) {
+ OSS_DEBUG_ERROR("invalid parameter");
+ return -1;
+ }
+ for (i = 0; i < n_samples; ++i) {
+ _p_dst[i] = _p_src[i];
+ }
+ return 0;
+}
+
+static void* TSK_STDCALL _tdav_producer_oss_record_thread(void *param)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)param;
+ int err;
+ const void* p_buffer = ((p_oss->n_bits_per_sample == 8) ? (const void*)p_oss->p_buff16_ptr: (const void*)p_oss->p_buff_ptr);
+ tsk_size_t n_buffer_in_bytes = (p_oss->n_bits_per_sample == 8) ? p_oss->n_buff16_size_in_bytes : p_oss->n_buff_size_in_bytes;
+
+ OSS_DEBUG_INFO("__record_thread -- START");
+
+ tsk_thread_set_priority_2(TSK_THREAD_PRIORITY_TIME_CRITICAL);
+
+ while (p_oss->b_started) {
+ tsk_safeobj_lock(p_oss);
+ if ((err = read(p_oss->fd, p_oss->p_buff_ptr, p_oss->n_buff_size_in_bytes)) != p_oss->n_buff_size_in_bytes) {
+ OSS_DEBUG_ERROR ("Failed to read data from audio interface failed (%d -> %s)", err , strerror(errno));
+ tsk_safeobj_unlock(p_oss);
+ goto bail;
+ }
+ if (p_oss->n_bits_per_sample == 8) {
+ if ((err = __oss_from_8bits_to_16bits(p_oss->p_buff_ptr, p_oss->p_buff16_ptr, p_oss->n_buff_size_in_samples))) {
+ tsk_safeobj_unlock(p_oss);
+ goto bail;
+ }
+ }
+ if (!p_oss->b_muted && TMEDIA_PRODUCER(p_oss)->enc_cb.callback) {
+ TMEDIA_PRODUCER(p_oss)->enc_cb.callback(TMEDIA_PRODUCER(p_oss)->enc_cb.callback_data, p_buffer, n_buffer_in_bytes);
+ }
+ tsk_safeobj_unlock(p_oss);
+ }
+bail:
+ OSS_DEBUG_INFO("__record_thread -- STOP");
+ return tsk_null;
+}
+
+
+/* ============ Media Producer Interface ================= */
+static int tdav_producer_oss_set(tmedia_producer_t* self, const tmedia_param_t* param)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)self;
+ if (param->plugin_type == tmedia_ppt_producer) {
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "volume")) {
+ return 0;
+ }
+ else if(tsk_striequals(param->key, "mute")){
+ p_oss->b_muted = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+ return 0;
+ }
+ }
+ }
+ return tdav_producer_audio_set(TDAV_PRODUCER_AUDIO(self), param);
+}
+
+static int tdav_producer_oss_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)self;
+ int err = 0, channels, sample_rate, bits_per_sample;
+
+ if (!p_oss || !codec && codec->plugin) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ if (p_oss->fd == -1) {
+ if ((p_oss->fd = open("/dev/dsp", O_RDONLY)) < 0) {
+ OSS_DEBUG_ERROR("open('/dev/dsp') failed: %s", strerror(errno));
+ err = -2;
+ goto bail;
+ }
+ }
+
+ // Set using requested
+ channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ sample_rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ bits_per_sample = TMEDIA_PRODUCER(p_oss)->audio.bits_per_sample; // 16
+
+ // Prepare
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_BITS, &bits_per_sample)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_BITS, %d) failed: %d->%s", bits_per_sample, err, strerror(errno));
+ goto bail;
+ }
+ if (bits_per_sample != 16 && bits_per_sample != 8) {
+ OSS_DEBUG_ERROR("bits_per_sample=%d not supported", bits_per_sample);
+ err = -3;
+ goto bail;
+ }
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_CHANNELS, &channels)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_CHANNELS, %d) failed: %d->%s", channels, err, strerror(errno));
+ goto bail;
+ }
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_RATE, &sample_rate)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_RATE, %d) failed: %d->%s", sample_rate, err, strerror(errno));
+ goto bail;
+ }
+
+ p_oss->n_buff_size_in_bytes = (TMEDIA_PRODUCER(p_oss)->audio.ptime * sample_rate * ((bits_per_sample >> 3) * channels)) / 1000;
+ if (!(p_oss->p_buff_ptr = tsk_realloc(p_oss->p_buff_ptr, p_oss->n_buff_size_in_bytes))) {
+ OSS_DEBUG_ERROR("Failed to allocate buffer with size = %u", p_oss->n_buff_size_in_bytes);
+ err = -4;
+ goto bail;
+ }
+ p_oss->n_buff_size_in_samples = (p_oss->n_buff_size_in_bytes / (bits_per_sample >> 3));
+ if (bits_per_sample == 8) {
+ p_oss->n_buff16_size_in_bytes = p_oss->n_buff_size_in_bytes << 1;
+ if (!(p_oss->p_buff16_ptr = tsk_realloc(p_oss->p_buff16_ptr, p_oss->n_buff16_size_in_bytes))) {
+ OSS_DEBUG_ERROR("Failed to allocate buffer with size = %u", p_oss->n_buff_size_in_bytes);
+ err = -5;
+ goto bail;
+ }
+ p_oss->n_buff16_size_in_samples = p_oss->n_buff_size_in_samples;
+ }
+
+ OSS_DEBUG_INFO("prepared: req_bits_per_sample=%d; req_channels=%d; req_rate=%d, resp_bits_per_sample=%d; resp_channels=%d; resp_rate=%d /// n_buff_size_in_samples=%u;n_buff_size_in_bytes=%u",
+ TMEDIA_PRODUCER(p_oss)->audio.bits_per_sample, TMEDIA_PRODUCER(p_oss)->audio.channels, TMEDIA_PRODUCER(p_oss)->audio.rate,
+ bits_per_sample, channels, sample_rate,
+ p_oss->n_buff_size_in_samples, p_oss->n_buff_size_in_bytes);
+
+ // Set using supported (up to the resampler to convert to requested)
+ TMEDIA_PRODUCER(p_oss)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(p_oss)->audio.channels = channels;
+ TMEDIA_PRODUCER(p_oss)->audio.rate = sample_rate;
+ // TMEDIA_PRODUCER(p_oss)->audio.bits_per_sample = bits_per_sample;
+
+ p_oss->n_bits_per_sample = bits_per_sample;
+ p_oss->b_prepared = tsk_true;
+
+bail:
+ if (err) {
+ if (p_oss->fd != -1) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ }
+ tsk_safeobj_unlock(p_oss);
+
+ return err;
+}
+
+static int tdav_producer_oss_start(tmedia_producer_t* self)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)self;
+ int err = 0;
+
+ if (! p_oss) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ if (!p_oss->b_prepared) {
+ OSS_DEBUG_WARN("Not prepared");
+ err = -2;
+ goto bail;
+ }
+
+ if (p_oss->b_started) {
+ OSS_DEBUG_WARN("Already started");
+ goto bail;
+ }
+
+ /* start thread */
+ p_oss->b_started = tsk_true;
+ tsk_thread_create(&p_oss->tid[0], _tdav_producer_oss_record_thread, p_oss);
+
+ OSS_DEBUG_INFO("started");
+
+bail:
+ tsk_safeobj_unlock(p_oss);
+ return err;
+}
+
+static int tdav_producer_oss_pause(tmedia_producer_t* self)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)self;
+
+ if (!p_oss) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ OSS_DEBUG_INFO("paused");
+
+ return 0;
+}
+
+static int tdav_producer_oss_stop(tmedia_producer_t* self)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)self;
+ int err;
+
+ if (!p_oss) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ /* should be done here */
+ p_oss->b_started = tsk_false;
+
+ /* stop thread */
+ if (p_oss->tid[0]) {
+ tsk_thread_join(&(p_oss->tid[0]));
+ }
+ if (p_oss->fd != -1) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ p_oss->b_prepared = tsk_false;
+
+ OSS_DEBUG_INFO("stopped");
+
+ tsk_safeobj_unlock(p_oss);
+
+ return 0;
+}
+
+
+//
+// Linux OSS producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_oss_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_oss_t *p_oss = (tdav_producer_oss_t*)self;
+ if (p_oss) {
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(p_oss));
+ /* init self */
+ p_oss->fd = -1;
+ tsk_safeobj_init(p_oss);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_oss_dtor(tsk_object_t * self)
+{
+ tdav_producer_oss_t *p_oss = (tdav_producer_oss_t *)self;
+ if (p_oss) {
+ /* stop */
+ if (p_oss->b_started) {
+ tdav_producer_oss_stop((tmedia_producer_t*)p_oss);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(p_oss));
+ /* deinit self */
+ if (p_oss->fd != -1) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ TSK_FREE(p_oss->p_buff_ptr);
+ TSK_FREE(p_oss->p_buff16_ptr);
+ tsk_safeobj_deinit(p_oss);
+
+ OSS_DEBUG_INFO("*** destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_oss_def_s =
+{
+ sizeof(tdav_producer_oss_t),
+ tdav_producer_oss_ctor,
+ tdav_producer_oss_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_oss_plugin_def_s =
+{
+ &tdav_producer_oss_def_s,
+
+ tmedia_audio,
+ "Linux OSS producer",
+
+ tdav_producer_oss_set,
+ tdav_producer_oss_prepare,
+ tdav_producer_oss_start,
+ tdav_producer_oss_pause,
+ tdav_producer_oss_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_oss_plugin_def_t = &tdav_producer_oss_plugin_def_s;
+
+#endif /* HAVE_LINUX_SOUNDCARD_H */
diff --git a/tinyDAV/src/audio/tdav_consumer_audio.c b/tinyDAV/src/audio/tdav_consumer_audio.c
new file mode 100644
index 0000000..73d9688
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_consumer_audio.c
@@ -0,0 +1,272 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+
+/**@file tdav_consumer_audio.c
+* @brief Base class for all Audio consumers.
+*/
+#include "tinydav/audio/tdav_consumer_audio.h"
+
+#include "tinymedia/tmedia_defaults.h"
+#include "tinymedia/tmedia_denoise.h"
+#include "tinymedia/tmedia_resampler.h"
+#include "tinymedia/tmedia_jitterbuffer.h"
+#include "tinyrtp/rtp/trtp_rtp_header.h"
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_time.h"
+#include "tsk_debug.h"
+
+#if TSK_UNDER_WINDOWS
+# include <Winsock2.h> // timeval
+#elif defined(__SYMBIAN32__)
+# include <_timeval.h>
+#else
+# include <sys/time.h>
+#endif
+
+#define TDAV_BITS_PER_SAMPLE_DEFAULT 16
+#define TDAV_CHANNELS_DEFAULT 2
+#define TDAV_RATE_DEFAULT 8000
+#define TDAV_PTIME_DEFAULT 20
+
+#define TDAV_AUDIO_GAIN_MAX 15
+
+/** Initialize audio consumer */
+int tdav_consumer_audio_init(tdav_consumer_audio_t* self)
+{
+ int ret;
+
+ TSK_DEBUG_INFO("tdav_consumer_audio_init()");
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* base */
+ if ((ret = tmedia_consumer_init(TMEDIA_CONSUMER(self)))){
+ return ret;
+ }
+
+ /* self (should be update by prepare() by using the codec's info)*/
+ TMEDIA_CONSUMER(self)->audio.bits_per_sample = TDAV_BITS_PER_SAMPLE_DEFAULT;
+ TMEDIA_CONSUMER(self)->audio.ptime = TDAV_PTIME_DEFAULT;
+ TMEDIA_CONSUMER(self)->audio.in.channels = TDAV_CHANNELS_DEFAULT;
+ TMEDIA_CONSUMER(self)->audio.in.rate = TDAV_RATE_DEFAULT;
+ TMEDIA_CONSUMER(self)->audio.gain = TSK_MIN(tmedia_defaults_get_audio_consumer_gain(), TDAV_AUDIO_GAIN_MAX);
+
+ tsk_safeobj_init(self);
+
+ return 0;
+}
+
+/**
+* Generic function to compare two consumers.
+* @param consumer1 The first consumer to compare.
+* @param consumer2 The second consumer to compare.
+* @retval Returns an integral value indicating the relationship between the two consumers:
+* <0 : @a consumer1 less than @a consumer2.<br>
+* 0 : @a consumer1 identical to @a consumer2.<br>
+* >0 : @a consumer1 greater than @a consumer2.<br>
+*/
+int tdav_consumer_audio_cmp(const tsk_object_t* consumer1, const tsk_object_t* consumer2)
+{
+ int ret;
+ tsk_subsat_int32_ptr(consumer1, consumer2, &ret);
+ return ret;
+}
+
+int tdav_consumer_audio_set(tdav_consumer_audio_t* self, const tmedia_param_t* param)
+{
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (param->plugin_type == tmedia_ppt_consumer){
+ if (param->value_type == tmedia_pvt_int32){
+ if (tsk_striequals(param->key, "gain")){
+ int32_t gain = *((int32_t*)param->value);
+ if (gain < TDAV_AUDIO_GAIN_MAX && gain >= 0){
+ TMEDIA_CONSUMER(self)->audio.gain = (uint8_t)gain;
+ TSK_DEBUG_INFO("audio consumer gain=%u", gain);
+ }
+ else{
+ TSK_DEBUG_ERROR("%u is invalid as gain value", gain);
+ return -2;
+ }
+ }
+ else if (tsk_striequals(param->key, "volume")){
+ TMEDIA_CONSUMER(self)->audio.volume = TSK_TO_INT32((uint8_t*)param->value);
+ TMEDIA_CONSUMER(self)->audio.volume = TSK_CLAMP(0, TMEDIA_CONSUMER(self)->audio.volume, 100);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* put data (bytes not shorts) into the jitter buffer (consumers always have ptime of 20ms) */
+int tdav_consumer_audio_put(tdav_consumer_audio_t* self, const void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr)
+{
+ int ret;
+
+ if (!self || !data || !self->jitterbuffer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(self);
+
+ if (!TMEDIA_JITTER_BUFFER(self->jitterbuffer)->opened){
+ uint32_t rate = TMEDIA_CONSUMER(self)->audio.out.rate ? TMEDIA_CONSUMER(self)->audio.out.rate : TMEDIA_CONSUMER(self)->audio.in.rate;
+ uint32_t channels = TMEDIA_CONSUMER(self)->audio.out.channels ? TMEDIA_CONSUMER(self)->audio.out.channels : tmedia_defaults_get_audio_channels_playback();
+ if ((ret = tmedia_jitterbuffer_open(self->jitterbuffer, TMEDIA_CONSUMER(self)->audio.ptime, rate, channels))){
+ TSK_DEBUG_ERROR("Failed to open jitterbuffer (%d)", ret);
+ tsk_safeobj_unlock(self);
+ return ret;
+ }
+ }
+
+ ret = tmedia_jitterbuffer_put(self->jitterbuffer, (void*)data, data_size, proto_hdr);
+
+ tsk_safeobj_unlock(self);
+
+ return ret;
+}
+
+/* get data from the jitter buffer (consumers should always have ptime of 20ms) */
+tsk_size_t tdav_consumer_audio_get(tdav_consumer_audio_t* self, void* out_data, tsk_size_t out_size)
+{
+ tsk_size_t ret_size = 0;
+ if (!self || !self->jitterbuffer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ tsk_safeobj_lock(self);
+
+ if (!TMEDIA_JITTER_BUFFER(self->jitterbuffer)->opened){
+ int ret;
+ uint32_t frame_duration = TMEDIA_CONSUMER(self)->audio.ptime;
+ uint32_t rate = TMEDIA_CONSUMER(self)->audio.out.rate ? TMEDIA_CONSUMER(self)->audio.out.rate : TMEDIA_CONSUMER(self)->audio.in.rate;
+ uint32_t channels = TMEDIA_CONSUMER(self)->audio.out.channels ? TMEDIA_CONSUMER(self)->audio.out.channels : tmedia_defaults_get_audio_channels_playback();
+ if ((ret = tmedia_jitterbuffer_open(TMEDIA_JITTER_BUFFER(self->jitterbuffer), frame_duration, rate, channels))){
+ TSK_DEBUG_ERROR("Failed to open jitterbuffer (%d)", ret);
+ tsk_safeobj_unlock(self);
+ return 0;
+ }
+ }
+ ret_size = tmedia_jitterbuffer_get(TMEDIA_JITTER_BUFFER(self->jitterbuffer), out_data, out_size);
+
+ tsk_safeobj_unlock(self);
+
+ // denoiser
+ if (self->denoise && self->denoise->opened && (self->denoise->echo_supp_enabled || self->denoise->noise_supp_enabled)) {
+ if (self->denoise->echo_supp_enabled) {
+ // Echo process last frame
+ if (self->denoise->playback_frame && self->denoise->playback_frame->size) {
+ tmedia_denoise_echo_playback(self->denoise, self->denoise->playback_frame->data, (uint32_t)self->denoise->playback_frame->size);
+ }
+ if (ret_size){
+ // save
+ tsk_buffer_copy(self->denoise->playback_frame, 0, out_data, ret_size);
+ }
+ }
+
+#if 1 // suppress noise if not supported by remote party's encoder
+ // suppress noise
+ if (self->denoise->noise_supp_enabled && ret_size) {
+ tmedia_denoise_process_playback(self->denoise, out_data, (uint32_t)ret_size);
+ }
+#endif
+ }
+
+ return ret_size;
+}
+
+int tdav_consumer_audio_tick(tdav_consumer_audio_t* self)
+{
+ if (!self || !self->jitterbuffer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+ return tmedia_jitterbuffer_tick(TMEDIA_JITTER_BUFFER(self->jitterbuffer));
+}
+
+/* set denioiser */
+void tdav_consumer_audio_set_denoise(tdav_consumer_audio_t* self, struct tmedia_denoise_s* denoise)
+{
+ tsk_safeobj_lock(self);
+ TSK_OBJECT_SAFE_FREE(self->denoise);
+ self->denoise = (struct tmedia_denoise_s*)tsk_object_ref(denoise);
+ tsk_safeobj_unlock(self);
+}
+
+void tdav_consumer_audio_set_jitterbuffer(tdav_consumer_audio_t* self, struct tmedia_jitterbuffer_s* jitterbuffer)
+{
+ tsk_safeobj_lock(self);
+ TSK_OBJECT_SAFE_FREE(self->jitterbuffer);
+ self->jitterbuffer = (struct tmedia_jitterbuffer_s*)tsk_object_ref(jitterbuffer);
+ tsk_safeobj_unlock(self);
+}
+
+/** Reset jitterbuffer */
+int tdav_consumer_audio_reset(tdav_consumer_audio_t* self){
+ int ret;
+ if (!self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(self);
+ ret = tmedia_jitterbuffer_reset(TMEDIA_JITTER_BUFFER(self->jitterbuffer));
+ tsk_safeobj_unlock(self);
+
+ return ret;
+}
+
+/* tsk_safeobj_lock(self); */
+/* tsk_safeobj_unlock(self); */
+
+/** DeInitialize audio consumer */
+int tdav_consumer_audio_deinit(tdav_consumer_audio_t* self)
+{
+ int ret;
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* base */
+ if ((ret = tmedia_consumer_deinit(TMEDIA_CONSUMER(self)))){
+ /* return ret; */
+ }
+
+ /* self */
+ TSK_OBJECT_SAFE_FREE(self->denoise);
+ TSK_OBJECT_SAFE_FREE(self->resampler);
+ TSK_OBJECT_SAFE_FREE(self->jitterbuffer);
+
+ tsk_safeobj_deinit(self);
+
+ return 0;
+}
+
diff --git a/tinyDAV/src/audio/tdav_jitterbuffer.c b/tinyDAV/src/audio/tdav_jitterbuffer.c
new file mode 100644
index 0000000..4fd1010
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_jitterbuffer.c
@@ -0,0 +1,1036 @@
+/* File from: http://cms.speakup.nl/tech/opensource/jitterbuffer/verslag-20051209.pdf/ */
+
+/*******************************************************
+* jitterbuffer:
+* an application-independent jitterbuffer, which tries
+* to achieve the maximum user perception during a call.
+* For more information look at:
+* http://www.speakup.nl/opensource/jitterbuffer/
+*
+* Copyright on this file is held by:
+* - Jesse Kaijen <jesse@speakup.nl>
+* - SpeakUp <info@speakup.nl>
+*
+* Contributors:
+* Jesse Kaijen <jesse@speakup.nl>
+*
+* This program is free software, distributed under the terms of:
+* - the GNU Lesser (Library) General Public License
+* - the Mozilla Public License
+*
+* if you are interested in an different licence type, please contact us.
+*
+* How to use the jitterbuffer, please look at the comments
+* in the headerfile.
+*
+* Further details on specific implementations,
+* please look at the comments in the code file.
+*/
+#include "tinydav/audio/tdav_jitterbuffer.h"
+
+#if !(HAVE_SPEEX_DSP && HAVE_SPEEX_JB)
+
+#include "tsk_memory.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+
+#define jb_warn(...) (warnf ? warnf(__VA_ARGS__) : (void)0)
+#define jb_err(...) (errf ? errf(__VA_ARGS__) : (void)0)
+#define jb_dbg(...) (dbgf ? dbgf(__VA_ARGS__) : (void)0)
+
+//public functions
+jitterbuffer *jb_new();
+void jb_reset(jitterbuffer *jb);
+void jb_reset_all(jitterbuffer *jb);
+void jb_destroy(jitterbuffer *jb);
+void jb_set_settings(jitterbuffer *jb, jb_settings *settings);
+
+void jb_get_info(jitterbuffer *jb, jb_info *stats);
+void jb_get_settings(jitterbuffer *jb, jb_settings *settings);
+float jb_guess_mos(float p, long d, int codec);
+int jb_has_frames(jitterbuffer *jb);
+
+void jb_put(jitterbuffer *jb, void *data, int type, long ms, long ts, long now, int codec);
+int jb_get(jitterbuffer *jb, void **data, long now, long interpl);
+
+
+
+//private functions
+static void set_default_settings(jitterbuffer *jb);
+static void reset(jitterbuffer *jb);
+static long find_pointer(long *array, long max_index, long value); static void frame_free(jb_frame *frame);
+
+static void put_control(jitterbuffer *jb, void *data, int type, long ts);
+static void put_voice(jitterbuffer *jb, void *data, int type, long ms, long ts, int codec);
+static void put_history(jitterbuffer *jb, long ts, long now, long ms, int codec);
+static void calculate_info(jitterbuffer *jb, long ts, long now, int codec);
+
+static int get_control(jitterbuffer *jb, void **data);
+static int get_voice(jitterbuffer *jb, void **data, long now, long interpl);
+static int get_voicecase(jitterbuffer *jb, void **data, long now, long interpl, long diff);
+
+static int get_next_frametype(jitterbuffer *jb, long ts);
+static long get_next_framets(jitterbuffer *jb);
+static jb_frame *get_frame(jitterbuffer *jb, long ts);
+static jb_frame *get_all_frames(jitterbuffer *jb);
+
+//debug...
+static jb_output_function_t warnf, errf, dbgf;
+void jb_setoutput(jb_output_function_t warn, jb_output_function_t err, jb_output_function_t dbg) {
+ warnf = warn;
+ errf = err;
+ dbgf = dbg;
+}
+
+
+/***********
+ * create a new jitterbuffer
+ * return NULL if malloc doesn't work
+ * else return jb with default_settings.
+ */
+jitterbuffer *jb_new()
+{
+ jitterbuffer *jb;
+
+ jb_dbg("N");
+ jb = tsk_calloc(1, sizeof(jitterbuffer));
+ if (!jb) {
+ jb_err("cannot allocate jitterbuffer\n");
+ return NULL;
+ }
+ set_default_settings(jb);
+ reset(jb);
+ return jb;
+}
+
+
+/***********
+ * empty voice messages
+ * reset statistics
+ * keep the settings
+ */
+void jb_reset(jitterbuffer *jb)
+{
+ jb_frame *frame;
+
+ jb_dbg("R");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_reset()\n");
+ return;
+ }
+
+ //free voice
+ while(jb->voiceframes) {
+ frame = get_all_frames(jb);
+ frame_free(frame);
+ }
+ //reset stats
+ memset(&(jb->info),0,sizeof(jb_info) );
+ // set default settings
+ reset(jb);
+}
+
+
+/***********
+ * empty nonvoice messages
+ * empty voice messages
+ * reset statistics
+ * reset settings to default
+ */
+void jb_reset_all(jitterbuffer *jb)
+{
+ jb_frame *frame;
+
+ jb_dbg("r");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_reset_all()\n");
+ return;
+ }
+
+ // free nonvoice
+ while(jb->controlframes) {
+ frame = jb->controlframes;
+ jb->controlframes = frame->next;
+ frame_free(frame);
+ }
+ // free voice and reset statistics is done by jb_reset
+ jb_reset(jb);
+ set_default_settings(jb);
+}
+
+
+/***********
+ * destroy the jitterbuffer
+ * free all the [non]voice frames with reset_all
+ * free the jitterbuffer
+ */
+void jb_destroy(jitterbuffer *jb)
+{
+ jb_dbg("D");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_destroy()\n");
+ return;
+ }
+
+ jb_reset_all(jb);
+ free(jb);
+}
+
+
+/***********
+ * Set settings for the jitterbuffer.
+ * Only if a setting is defined it will be written
+ * in the jb->settings.
+ * This means that no setting can be set to zero
+ */
+void jb_set_settings(jitterbuffer *jb, jb_settings *settings)
+{
+ jb_dbg("S");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_set_settings()\n");
+ return;
+ }
+
+ if (settings->min_jb) {
+ jb->settings.min_jb = settings->min_jb;
+ }
+ if (settings->max_jb) {
+ jb->settings.max_jb = settings->max_jb;
+ }
+ if (settings->max_successive_interp) {
+ jb->settings.max_successive_interp = settings->max_successive_interp;
+ }
+ if (settings->extra_delay) {
+ jb->settings.extra_delay = settings->extra_delay;
+ }
+ if (settings->wait_grow) {
+ jb->settings.wait_grow = settings->wait_grow;
+ }
+ if (settings->wait_shrink) {
+ jb->settings.wait_shrink = settings->wait_shrink;
+ }
+ if (settings->max_diff) {
+ jb->settings.max_diff = settings->max_diff;
+ }
+}
+
+
+/***********
+ * validates the statistics
+ * the losspct due the jitterbuffer will be calculated.
+ * delay and delay_target will be calculated
+ * *stats = info
+ */
+void jb_get_info(jitterbuffer *jb, jb_info *stats)
+{
+ long max_index, pointer;
+
+ jb_dbg("I");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_get_info()\n");
+ return;
+ }
+
+ jb->info.delay = jb->current - jb->min;
+ jb->info.delay_target = jb->target - jb->min;
+
+ //calculate the losspct...
+ max_index = (jb->hist_pointer < JB_HISTORY_SIZE) ?
+jb->hist_pointer : JB_HISTORY_SIZE-1;
+ if (max_index>1) {
+ pointer = find_pointer(&jb->hist_sorted_delay[0], max_index,
+jb->current);
+ jb->info.losspct = ((max_index - pointer)*100/max_index);
+ if (jb->info.losspct < 0) {
+ jb->info.losspct = 0;
+ }
+ } else {
+ jb->info.losspct = 0;
+ }
+
+ *stats = jb->info;
+}
+
+
+/***********
+ * gives the settings for this jitterbuffer
+ * *settings = settings
+ */
+void jb_get_settings(jitterbuffer *jb, jb_settings *settings)
+{
+ jb_dbg("S");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_get_settings()\n");
+ return;
+ }
+
+ *settings = jb->settings;
+}
+
+
+/***********
+ * returns an estimate on the MOS with given loss, delay and codec
+ * if the formula is not present the default will be used
+ * please use the JB_CODEC_OTHER if you want to define your own formula
+ *
+ */
+float jb_guess_mos(float p, long d, int codec)
+{
+ float result;
+
+ switch (codec) {
+ case JB_CODEC_GSM_EFR:
+ result = (4.31f - 0.23f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_G723_1:
+ result = (3.99f - 0.16f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_G729:
+ case JB_CODEC_G729A:
+ result = (4.13f - 0.14f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_G711x_PLC:
+ result = (4.42f - 0.087f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_G711x:
+ result = (4.42f - 0.63f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_OTHER:
+ default:
+ result = (4.42f - 0.63f*p - 0.0071f*d);
+
+ }
+ return result;
+}
+
+
+/***********
+ * if there are any frames left in JB returns JB_OK, otherwise returns JB_EMPTY
+ */
+int jb_has_frames(jitterbuffer *jb)
+{
+ jb_dbg("H");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_has_frames()\n");
+ return JB_NOJB;
+ }
+
+ if(jb->controlframes || jb->voiceframes) {
+ return JB_OK;
+ } else {
+ return JB_EMPTY;
+ }
+}
+
+
+/***********
+ * Put a packet into the jitterbuffers
+ * Only the timestamps of voicepackets are put in the history
+ * this because the jitterbuffer only works for voicepackets
+ * don't put packets twice in history and queue (e.g. transmitting every frame twice)
+ * keep track of statistics
+ */
+void jb_put(jitterbuffer *jb, void *data, int type, long ms, long ts, long now, int codec)
+{
+ long pointer, max_index;
+
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_put()\n");
+ return;
+ }
+
+ jb->info.frames_received++;
+
+ if (type == JB_TYPE_CONTROL) {
+ //put the packet into the contol-queue of the jitterbuffer
+ jb_dbg("pC");
+ put_control(jb,data,type,ts);
+
+ } else if (type == JB_TYPE_VOICE) {
+ // only add voice that aren't already in the buffer
+ max_index = (jb->hist_pointer < JB_HISTORY_SIZE) ? jb->hist_pointer : JB_HISTORY_SIZE-1;
+ pointer = find_pointer(&jb->hist_sorted_timestamp[0], max_index, ts);
+ if (jb->hist_sorted_timestamp[pointer]==ts) { //timestamp already in queue
+ jb_dbg("pT");
+ free(data);
+ jb->info.frames_dropped_twice++;
+ } else { //add
+ jb_dbg("pV");
+ /* add voicepacket to history */
+ put_history(jb,ts,now,ms,codec);
+ /*calculate jitterbuffer size*/
+ calculate_info(jb, ts, now, codec);
+ /*put the packet into the queue of the jitterbuffer*/
+ put_voice(jb,data,type,ms,ts,codec);
+ }
+
+ } else if (type == JB_TYPE_SILENCE){ //silence
+ jb_dbg("pS");
+ put_voice(jb,data,type,ms,ts,codec);
+
+ } else {//should NEVER happen
+ jb_err("jb_put(): type not known\n");
+ free(data);
+ }
+}
+
+
+/***********
+ * control frames have a higher priority then voice frames
+ * returns JB_OK if a frame is available and *data points to the packet
+ * returns JB_NOFRAME if it's no time to play voice and no control available
+ * returns JB_INTERP if interpolating is required
+ * returns JB_EMPTY if no voice frame is in the jitterbuffer (only during silence)
+ */
+int jb_get(jitterbuffer *jb, void **data, long now, long interpl)
+{
+ int result;
+
+ jb_dbg("A");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_get()\n");
+ return JB_NOJB;
+ }
+
+ result = get_control(jb, data);
+ if (result != JB_OK ) { //no control message available maybe there is voice...
+ result = get_voice(jb, data, now, interpl);
+ }
+ return result;
+}
+
+
+/***********
+ * set all the settings to default
+ */
+static void set_default_settings(jitterbuffer *jb)
+{
+ jb->settings.min_jb = JB_MIN_SIZE;
+ jb->settings.max_jb = JB_MAX_SIZE;
+ jb->settings.max_successive_interp = JB_MAX_SUCCESSIVE_INTERP;
+ jb->settings.extra_delay = JB_ALLOW_EXTRA_DELAY;
+ jb->settings.wait_grow = JB_WAIT_GROW;
+ jb->settings.wait_shrink = JB_WAIT_SHRINK;
+ jb->settings.max_diff = JB_MAX_DIFF;
+}
+
+
+/***********
+ * reset the jitterbuffer so we can start in silence and
+ * we start with a new history
+ */
+static void reset(jitterbuffer *jb)
+{
+ jb->hist_pointer = 0; //start over
+ jb->silence_begin_ts = 0; //no begin_ts defined
+ jb->info.silence =1; //we always start in silence
+}
+
+
+/***********
+ * Search algorithm
+ * @REQUIRE max_index is within array
+ *
+ * Find the position of value in hist_sorted_delay
+ * if value doesn't exist return first pointer where array[low]>value
+ * int low; //the lowest index being examined
+ * int max_index; //the highest index being examined
+ * int mid; //the middle index between low and max_index.
+ * mid ==(low+max_index)/2
+ * at the end low is the position of value or where array[low]>value
+ */
+static long find_pointer(long *array, long max_index, long value)
+{
+ register long low, mid, high;
+ low = 0;
+ high = max_index;
+ while (low<=high) {
+ mid= (low+high)/2;
+ if (array[mid] < value) {
+ low = mid+1;
+ } else {
+ high = mid-1;
+ }
+ }
+ while(low < max_index && (array[low]==array[(low+1)]) ) {
+ low++;
+ }
+ return low;
+}
+
+
+/***********
+ * free the given frame, afterwards the framepointer is undefined
+ */
+static void frame_free(jb_frame *frame)
+{
+ if (frame->data) {
+ free(frame->data);
+ }
+ free(frame);
+}
+
+
+/***********
+ * put a nonvoice frame into the nonvoice queue
+ */
+static void put_control(jitterbuffer *jb, void *data, int type, long ts)
+{
+ jb_frame *frame, *p;
+
+ frame = malloc(sizeof(jb_frame));
+ if(!frame) {
+ jb_err("cannot allocate frame\n");
+ return;
+ }
+ frame->data = data;
+ frame->ts = ts;
+ frame->type = type;
+ frame->next = NULL;
+ data = NULL;//to avoid stealing memory
+
+ p = jb->controlframes;
+ if (p) { //there are already control messages
+ if (ts < p->ts) {
+ jb->controlframes = frame;
+ frame->next = p;
+ } else {
+ while (p->next && (ts >=p->next->ts)) {//sort on timestamps! so find place to put...
+ p = p->next;
+ }
+ if (p->next) {
+ frame->next = p->next;
+ }
+ p->next = frame;
+ }
+ } else {
+ jb->controlframes = frame;
+ }
+}
+
+
+/***********
+ * put a voice or silence frame into the jitterbuffer
+ */
+static void put_voice(jitterbuffer *jb, void *data, int type, long ms, long ts, int codec)
+{
+ jb_frame *frame, *p;
+ frame = malloc(sizeof(jb_frame));
+ if(!frame) {
+ jb_err("cannot allocate frame\n");
+ return;
+ }
+
+ frame->data = data;
+ frame->ts = ts;
+ frame->ms = ms;
+ frame->type = type;
+ frame->codec = codec;
+
+ data = NULL; //to avoid stealing the memory location
+ /*
+ * frames are a circular list, jb->voiceframes points to to the lowest ts,
+ * jb->voiceframes->prev points to the highest ts
+ */
+ if(!jb->voiceframes) { /* queue is empty */
+ jb->voiceframes = frame;
+ frame->next = frame;
+ frame->prev = frame;
+ } else {
+ p = jb->voiceframes;
+ if(ts < p->prev->ts) { //frame is out of order
+ jb->info.frames_ooo++;
+ }
+ if (ts < p->ts) { //frame is lowest, let voiceframes point to it!
+ jb->voiceframes = frame;
+ } else {
+ while(ts < p->prev->ts ) {
+ p = p->prev;
+ }
+ }
+ frame->next = p;
+ frame->prev = p->prev;
+ frame->next->prev = frame;
+ frame->prev->next = frame;
+ }
+}
+
+
+/***********
+ * puts the timestamps of a received packet in the history of *jb
+ * for later calculations of the size of jitterbuffer *jb.
+ *
+ * summary of function:
+ * - calculate delay difference
+ * - delete old value from hist & sorted_history_delay & sorted_history_timestamp if needed
+ * - add new value to history & sorted_history_delay & sorted_history_timestamp
+ * - we keep sorted_history_delay for calculations
+ * - we keep sorted_history_timestamp for ensuring each timestamp isn't put twice in the buffer.
+ */
+static void put_history(jitterbuffer *jb, long ts, long now, long ms, int codec)
+{
+ jb_hist_element out, in;
+ long max_index, pointer, location;
+
+ // max_index is the highest possible index
+ max_index = (jb->hist_pointer < JB_HISTORY_SIZE) ? jb->hist_pointer : JB_HISTORY_SIZE-1;
+ location = (jb->hist_pointer % JB_HISTORY_SIZE);
+
+ // we want to delete a value from the jitterbuffer
+ // only when we are through the history.
+ if (jb->hist_pointer > JB_HISTORY_SIZE-1) {
+ /* the value we need to delete from sorted histories */
+ out = jb->hist[location];
+ //delete delay from hist_sorted_delay
+ pointer = find_pointer(&jb->hist_sorted_delay[0], max_index, out.delay);
+ /* move over pointer is the position of kicked*/
+ if (pointer<max_index) { //only move if we have something to move
+ memmove( &(jb->hist_sorted_delay[pointer]),
+ &(jb->hist_sorted_delay[pointer+1]),
+ ((JB_HISTORY_SIZE-(pointer+1)) * sizeof(long)) );
+ }
+
+ //delete timestamp from hist_sorted_timestamp
+ pointer = find_pointer(&jb->hist_sorted_timestamp[0], max_index, out.ts);
+ /* move over pointer is the position of kicked*/
+ if (pointer<max_index) { //only move if we have something to move
+ memmove( &(jb->hist_sorted_timestamp[pointer]),
+ &(jb->hist_sorted_timestamp[pointer+1]),
+ ((JB_HISTORY_SIZE-(pointer+1)) * sizeof(long)) );
+ }
+ }
+
+ in.delay = now - ts; //delay of current packet
+ in.ts = ts; //timestamp of current packet
+ in.ms = ms; //length of current packet
+ in.codec = codec; //codec of current packet
+
+ /* adding the new delay to the sorted history
+ * first special cases:
+ * - delay is the first history stamp
+ * - delay > highest history stamp
+ */
+ if (max_index==0 || in.delay >= jb->hist_sorted_delay[max_index-1]) {
+ jb->hist_sorted_delay[max_index] = in.delay;
+ } else {
+ pointer = find_pointer(&jb->hist_sorted_delay[0], (max_index-1), in.delay);
+ /* move over and add delay */
+ memmove( &(jb->hist_sorted_delay[pointer+1]),
+ &(jb->hist_sorted_delay[pointer]),
+ ((JB_HISTORY_SIZE-(pointer+1)) * sizeof(long)) );
+ jb->hist_sorted_delay[pointer] = in.delay;
+ }
+
+ /* adding the new timestamp to the sorted history
+ * first special cases:
+ * - timestamp is the first history stamp
+ * - timestamp > highest history stamp
+ */
+ if (max_index==0 || in.ts >= jb->hist_sorted_timestamp[max_index-1]) {
+ jb->hist_sorted_timestamp[max_index] = in.ts;
+ } else {
+
+ pointer = find_pointer(&jb->hist_sorted_timestamp[0], (max_index-1), in.ts);
+ /* move over and add timestamp */
+ memmove( &(jb->hist_sorted_timestamp[pointer+1]),
+ &(jb->hist_sorted_timestamp[pointer]),
+ ((JB_HISTORY_SIZE-(pointer+1)) * sizeof(long)) );
+ jb->hist_sorted_timestamp[pointer] = in.ts;
+ }
+
+ /* put the jb_hist_element in the history
+ * then increase hist_pointer for next time
+ */
+ jb->hist[location] = in;
+ jb->hist_pointer++;
+}
+
+
+/***********
+ * this tries to make a jitterbuffer that behaves like
+ * the jitterbuffer proposed in this article:
+ * Adaptive Playout Buffer Algorithm for Enhancing Perceived Quality of Streaming Applications
+ * by: Kouhei Fujimoto & Shingo Ata & Masayuki Murata
+ * http://www.nal.ics.es.osaka-u.ac.jp/achievements/web2002/pdf/journal/k-fujimo02TSJ-AdaptivePlayoutBuffer.pdf
+ *
+ * it calculates jitter and minimum delay
+ * get the best delay for the specified codec
+
+ */
+static void calculate_info(jitterbuffer *jb, long ts, long now, int codec)
+{
+ long diff, size, max_index, d, d1, d2, n;
+ float p, p1, p2, A, B;
+ //size = how many items there in the history
+ size = (jb->hist_pointer < JB_HISTORY_SIZE) ? jb->hist_pointer : JB_HISTORY_SIZE;
+ max_index = size-1;
+
+ /*
+ * the Inter-Quartile Range can be used for estimating jitter
+ * http://www.slac.stanford.edu/comp/net/wan-mon/tutorial.html#variable
+ * just take the square root of the iqr for jitter
+ */
+ jb->info.iqr = jb->hist_sorted_delay[max_index*3/4] - jb->hist_sorted_delay[max_index/4];
+
+
+ /*
+ * The RTP way of calculating jitter.
+ * This one is used at the moment, although it is not correct.
+ * But in this way the other side understands us.
+ */
+ diff = now - ts - jb->last_delay;
+ if (!jb->last_delay) {
+ diff = 0; //this to make sure we won't get odd jitter due first ts.
+ }
+ jb->last_delay = now - ts;
+ if (diff <0){
+ diff = -diff;
+ }
+ jb->info.jitter = jb->info.jitter + (diff - jb->info.jitter)/16;
+
+ /* jb->min is minimum delay in hist_sorted_delay, we don't look at the lowest 2% */
+ /* because sometimes there are odd delays in there */
+ jb->min = jb->hist_sorted_delay[(max_index*2/100)];
+
+ /*
+ * calculating the preferred size of the jitterbuffer:
+ * instead of calculating the optimum delay using the Pareto equation
+ * I use look at the array of sorted delays and choose my optimum from there
+ * always walk trough a percentage of the history this because imagine following tail:
+ * [...., 12, 300, 301 ,302]
+ * her we want to discard last three but that won't happen if we won't walk the array
+ * the number of frames we walk depends on how scattered the sorted delays are.
+ * For that we look at the iqr. The dependencies of the iqr are based on
+ * tests we've done here in the lab. But are not optimized.
+ */
+ //init:
+ //the higest delay..
+ d = d1= d2 = jb->hist_sorted_delay[max_index]- jb->min;
+ A=B=LONG_MIN;
+ p = p2 =0;
+ n=0;
+ p1 = 5; //always look at the top 5%
+ if (jb->info.iqr >200) { //with more jitter look at more delays
+ p1=25;
+ } else if (jb->info.iqr >100) {
+ p1=20;
+ } else if (jb->info.iqr >50){
+ p1=11;
+ }
+
+ //find the optimum delay..
+ while(max_index>10 && (B > A ||p2<p1)) { // By MDI: from ">=" to ">"
+ //the packetloss with this delay
+ p2 =(n*100.0f/size);
+ // estimate MOS-value
+ B = jb_guess_mos(p2,d2,codec);
+ if (B > A) {
+ p = p2;
+ d = d2;
+ A = B;
+ }
+ d1 = d2;
+ //find next delay != delay so the same delay isn't calculated twice
+ //don't look further if we have seen half of the history
+ while((d2>=d1) && ((n*2)<max_index) ) {
+ n++;
+ d2 = jb->hist_sorted_delay[(max_index-n)] - jb->min;
+ }
+ }
+ //the targeted size of the jitterbuffer
+ if (jb->settings.min_jb && (jb->settings.min_jb > d) ) {
+ jb->target = jb->min + jb->settings.min_jb;
+ } else if (jb->settings.max_jb && (jb->settings.max_jb > d) ){
+ jb->target = jb->min + jb->settings.max_jb;
+ } else {
+ jb->target = jb->min + d;
+ }
+}
+
+
+/***********
+ * if there is a nonvoice frame it will be returned [*data] and the frame
+ * will be made free
+ */
+static int get_control(jitterbuffer *jb, void **data)
+{
+ jb_frame *frame;
+ int result;
+
+ frame = jb->controlframes;
+ if (frame) {
+ jb_dbg("gC");
+ *data = frame->data;
+ frame->data = NULL;
+ jb->controlframes = frame->next;
+ frame_free(frame);
+ result = JB_OK;
+ } else {
+ result = JB_NOFRAME;
+ }
+ return result;
+}
+
+
+/***********
+ * returns JB_OK if a frame is available and *data points to the packet
+ * returns JB_NOFRAME if it's no time to play voice and or no frame available
+ * returns JB_INTERP if interpolating is required
+ * returns JB_EMPTY if no voice frame is in the jitterbuffer (only during silence)
+ *
+ * if the next frame is a silence frame we will go in silence-mode
+ * each new instance of the jitterbuffer will start in silence mode
+ * in silence mode we will set the jitterbuffer to the size we want
+ * when we are not in silence mode get_voicecase will handle the rest.
+ */
+static int get_voice(jitterbuffer *jb, void **data, long now, long interpl)
+{
+ jb_frame *frame;
+ long diff;
+ int result;
+
+ diff = jb->target - jb->current;
+
+ //if the next frame is a silence frame, go in silence mode...
+ if((get_next_frametype(jb, now - jb->current) == JB_TYPE_SILENCE) ) {
+ jb_dbg("gs");
+ frame = get_frame(jb, now - jb->current);
+ *data = frame->data;
+ frame->data = NULL;
+ jb->info.silence =1;
+ jb->silence_begin_ts = frame->ts;
+ frame_free(frame);
+ result = JB_OK;
+ } else {
+ if(jb->info.silence) { // we are in silence
+ /*
+ * During silence we can set the jitterbuffer size to the size
+ * we want...
+ */
+ if (diff) {
+ jb->current = jb->target;
+ }
+ frame = get_frame(jb, now - jb->current);
+ if (frame) {
+ if (jb->silence_begin_ts && frame->ts < jb->silence_begin_ts) {
+ jb_dbg("gL");
+ /* voice frame is late, next!*/
+ jb->info.frames_late++;
+ frame_free(frame);
+ result = get_voice(jb, data, now, interpl);
+ } else {
+ jb_dbg("gP");
+ /* voice frame */
+ jb->info.silence = 0;
+ jb->silence_begin_ts = 0;
+ jb->next_voice_time = frame->ts + frame->ms;
+ jb->info.last_voice_ms = frame->ms;
+ *data = frame->data;
+ frame->data = NULL;
+ frame_free(frame);
+ result = JB_OK;
+ }
+ } else { //no frame
+ jb_dbg("gS");
+ result = JB_EMPTY;
+ }
+ } else { //voice case
+ result = get_voicecase(jb,data,now,interpl,diff);
+ }
+ }
+ return result;
+}
+
+
+/***********
+ * The voicecase has four 'options'
+ * - difference is way off, reset
+ * - diff > 0, we may need to grow
+ * - diff < 0, we may need to shrink
+ * - everything else
+ */
+static int get_voicecase(jitterbuffer *jb, void **data, long now, long interpl, long diff)
+{
+ jb_frame *frame;
+ int result;
+
+ // * - difference is way off, reset
+ if (diff > jb->settings.max_diff || -diff > jb->settings.max_diff) {
+ jb_err("wakko diff in get_voicecase\n");
+ reset(jb); //reset hist because the timestamps are wakko.
+ result = JB_NOFRAME;
+ //- diff > 0, we may need to grow
+ } else if ((diff > 0) &&
+ (now > (jb->last_adjustment + jb->settings.wait_grow)
+ || (now + jb->current + interpl) < get_next_framets(jb) ) ) { //grow
+ /* first try to grow */
+ if (diff<interpl/2) {
+ jb_dbg("ag");
+ jb->current +=diff;
+ } else {
+ jb_dbg("aG");
+ /* grow by interp frame len */
+ jb->current += interpl;
+ }
+ jb->last_adjustment = now;
+ result = get_voice(jb, data, now, interpl);
+ //- diff < 0, we may need to shrink
+ } else if ( (diff < 0)
+ && (now > (jb->last_adjustment + jb->settings.wait_shrink))
+ && ((-diff) > jb->settings.extra_delay) ) {
+ /* now try to shrink
+ * if there is a frame shrink by frame length
+ * otherwise shrink by interpl
+ */
+ jb->last_adjustment = now;
+
+ frame = get_frame(jb, now - jb->current);
+ if(frame) {
+ jb_dbg("as");
+ /* shrink by frame size we're throwing out */
+ jb->info.frames_dropped++;
+ jb->current -= frame->ms;
+ frame_free(frame);
+ } else {
+ jb_dbg("aS");
+ /* shrink by interpl */
+ jb->current -= interpl;
+ }
+ result = get_voice(jb, data, now, interpl);
+ } else {
+ /* if it is not the time to play a result = JB_NOFRAME
+ * else We try to play a frame if a frame is available
+ * and not late it is played otherwise
+ * if available it is dropped and the next is tried
+ * last option is interpolating
+ */
+ if (now - jb->current < jb->next_voice_time) {
+ jb_dbg("aN");
+ result = JB_NOFRAME;
+ } else {
+ frame = get_frame(jb, now - jb->current);
+ if (frame) { //there is a frame
+ /* voice frame is late */
+ if(frame->ts < jb->next_voice_time) { //late
+ jb_dbg("aL");
+ jb->info.frames_late++;
+ frame_free(frame);
+ result = get_voice(jb, data, now, interpl);
+ } else {
+ jb_dbg("aP");
+ /* normal case; return the frame, increment stuff */
+ *data = frame->data;
+ frame->data = NULL;
+ jb->next_voice_time = frame->ts + frame->ms;
+ jb->cnt_successive_interp = 0;
+ frame_free(frame);
+ result = JB_OK;
+ }
+ } else { // no frame, thus interpolate
+ jb->cnt_successive_interp++;
+ /* assume silence instead of continuing to interpolate */
+ if (jb->settings.max_successive_interp && jb->cnt_successive_interp >= jb->settings.max_successive_interp) {
+ jb->info.silence = 1;
+ jb->silence_begin_ts = jb->next_voice_time;
+ }
+ jb_dbg("aI");
+ jb->next_voice_time += interpl;
+ result = JB_INTERP;
+ }
+ }
+ }
+ return result;
+
+}
+
+
+/***********
+ * if there are frames and next frame->ts is smaller or equal ts
+ * return type of next frame.
+ * else return 0
+ */
+static int get_next_frametype(jitterbuffer *jb, long ts)
+{
+ jb_frame *frame;
+ int result;
+
+ result = 0;
+ frame = jb->voiceframes;
+ if (frame && frame->ts <= ts) {
+ result = frame->type;
+ }
+ return result;
+}
+
+
+/***********
+ * returns ts from next frame in jb->voiceframes
+ * or returns LONG_MAX if there is no frame
+ */
+static long get_next_framets(jitterbuffer *jb)
+{
+ if (jb->voiceframes) {
+ return jb->voiceframes->ts;
+ }
+ return LONG_MAX;
+}
+
+
+/***********
+ * if there is a frame in jb->voiceframes and
+ * has a timestamp smaller/equal to ts
+ * this frame will be returned and
+ * removed from the queue
+ */
+static jb_frame *get_frame(jitterbuffer *jb, long ts)
+{
+ jb_frame *frame;
+
+ frame = jb->voiceframes;
+ if (frame && frame->ts <= ts) {
+ if(frame->next == frame) {
+ jb->voiceframes = NULL;
+ } else {
+ /* remove this frame */
+ frame->prev->next = frame->next;
+ frame->next->prev = frame->prev;
+ jb->voiceframes = frame->next;
+ }
+ return frame;
+ }
+ return NULL;
+}
+
+/***********
+ * if there is a frame in jb->voiceframes
+ * this frame will be unconditionally returned and
+ * removed from the queue
+ */
+static jb_frame *get_all_frames(jitterbuffer *jb)
+{
+ jb_frame *frame;
+
+ frame = jb->voiceframes;
+ if (frame) {
+ if(frame->next == frame) {
+ jb->voiceframes = NULL;
+ } else {
+ /* remove this frame */
+ frame->prev->next = frame->next;
+ frame->next->prev = frame->prev;
+ jb->voiceframes = frame->next;
+ }
+ return frame;
+ }
+ return NULL;
+}
+
+
+#endif // !(HAVE_SPEEX_DSP && HAVE_SPEEX_JB)
diff --git a/tinyDAV/src/audio/tdav_producer_audio.c b/tinyDAV/src/audio/tdav_producer_audio.c
new file mode 100644
index 0000000..8c73c9f
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_producer_audio.c
@@ -0,0 +1,133 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_producer_audio.c
+ * @brief Base class for all Audio producers.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/audio/tdav_producer_audio.h"
+
+#include "tinymedia/tmedia_defaults.h"
+
+#define TDAV_PRODUCER_BITS_PER_SAMPLE_DEFAULT 16
+#define TDAV_PRODUCER_CHANNELS_DEFAULT 1
+#define TDAV_PRODUCER_RATE_DEFAULT 8000
+#define TDAV_PRODUCER_PTIME_DEFAULT 20
+#define TDAV_PRODUCER_AUDIO_GAIN_MAX 15
+
+#include "tsk_string.h"
+#include "tsk_debug.h"
+
+/** Initialize Audio producer
+* @param self The producer to initialize
+*/
+int tdav_producer_audio_init(tdav_producer_audio_t* self)
+{
+ int ret;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* base */
+ if((ret = tmedia_producer_init(TMEDIA_PRODUCER(self)))){
+ return ret;
+ }
+
+ /* self (should be update by prepare() by using the codec's info)*/
+ TMEDIA_PRODUCER(self)->audio.bits_per_sample = TDAV_PRODUCER_BITS_PER_SAMPLE_DEFAULT;
+ TMEDIA_PRODUCER(self)->audio.channels = TDAV_PRODUCER_CHANNELS_DEFAULT;
+ TMEDIA_PRODUCER(self)->audio.rate = TDAV_PRODUCER_RATE_DEFAULT;
+ TMEDIA_PRODUCER(self)->audio.ptime = TDAV_PRODUCER_PTIME_DEFAULT;
+ TMEDIA_PRODUCER(self)->audio.gain = TSK_MIN(tmedia_defaults_get_audio_producer_gain(), TDAV_PRODUCER_AUDIO_GAIN_MAX);
+
+ return 0;
+}
+
+/**
+* Generic function to compare two producers.
+* @param producer1 The first producer to compare.
+* @param producer2 The second producer to compare.
+* @retval Returns an integral value indicating the relationship between the two producers:
+* <0 : @a producer1 less than @a producer2.<br>
+* 0 : @a producer1 identical to @a producer2.<br>
+* >0 : @a producer1 greater than @a producer2.<br>
+*/
+int tdav_producer_audio_cmp(const tsk_object_t* producer1, const tsk_object_t* producer2)
+{
+ int ret;
+ tsk_subsat_int32_ptr(producer1, producer2, &ret);
+ return ret;
+}
+
+int tdav_producer_audio_set(tdav_producer_audio_t* self, const tmedia_param_t* param)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(param->plugin_type == tmedia_ppt_producer){
+ if(param->value_type == tmedia_pvt_int32){
+ if(tsk_striequals(param->key, "gain")){
+ int32_t gain = *((int32_t*)param->value);
+ if(gain<TDAV_PRODUCER_AUDIO_GAIN_MAX && gain>=0){
+ TMEDIA_PRODUCER(self)->audio.gain = (uint8_t)gain;
+ TSK_DEBUG_INFO("audio producer gain=%u", gain);
+ }
+ else{
+ TSK_DEBUG_ERROR("%u is invalid as gain value", gain);
+ return -2;
+ }
+ }
+ else if(tsk_striequals(param->key, "volume")){
+ TMEDIA_PRODUCER(self)->audio.volume = TSK_TO_INT32((uint8_t*)param->value);
+ TMEDIA_PRODUCER(self)->audio.volume = TSK_CLAMP(0, TMEDIA_PRODUCER(self)->audio.volume, 100);
+ TSK_DEBUG_INFO("audio producer volume=%u", TMEDIA_PRODUCER(self)->audio.volume);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/** Deinitialize a producer
+*/
+int tdav_producer_audio_deinit(tdav_producer_audio_t* self)
+{
+ int ret;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* base */
+ if((ret = tmedia_producer_deinit(TMEDIA_PRODUCER(self)))){
+ return ret;
+ }
+
+ return ret;
+} \ No newline at end of file
diff --git a/tinyDAV/src/audio/tdav_session_audio.c b/tinyDAV/src/audio/tdav_session_audio.c
new file mode 100644
index 0000000..f12e801
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_session_audio.c
@@ -0,0 +1,991 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_session_audio.c
+* @brief Audio Session plugin.
+*
+* @author Mamadou Diop <diopmamadou(at)doubango.org>
+* @contributors: See $(DOUBANGO_HOME)\contributors.txt
+*/
+#include "tinydav/audio/tdav_session_audio.h"
+
+//#include "tinydav/codecs/dtmf/tdav_codec_dtmf.h"
+#include "tinydav/audio/tdav_consumer_audio.h"
+
+#include "tinymedia/tmedia_resampler.h"
+#include "tinymedia/tmedia_denoise.h"
+#include "tinymedia/tmedia_jitterbuffer.h"
+#include "tinymedia/tmedia_consumer.h"
+#include "tinymedia/tmedia_producer.h"
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tinyrtp/trtp_manager.h"
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tsk_timer.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_AUDIO_RESAMPLER_DEFAULT_QUALITY 5
+
+static int _tdav_session_audio_dtmfe_timercb(const void* arg, tsk_timer_id_t timer_id);
+static struct tdav_session_audio_dtmfe_s* _tdav_session_audio_dtmfe_create(const tdav_session_audio_t* session, uint8_t event, uint16_t duration, uint32_t seq, uint32_t timestamp, uint8_t format, tsk_bool_t M, tsk_bool_t E);
+static void _tdav_session_audio_apply_gain(void* buffer, int len, int bps, int gain);
+static tmedia_resampler_t* _tdav_session_audio_resampler_create(int32_t bytes_per_sample, uint32_t in_freq, uint32_t out_freq, uint32_t frame_duration, uint32_t in_channels, uint32_t out_channels, uint32_t quality, void** resampler_buffer, tsk_size_t *resampler_buffer_size);
+
+/* DTMF event object */
+typedef struct tdav_session_audio_dtmfe_s
+{
+ TSK_DECLARE_OBJECT;
+
+ tsk_timer_id_t timer_id;
+ trtp_rtp_packet_t* packet;
+
+ const tdav_session_audio_t* session;
+}
+tdav_session_audio_dtmfe_t;
+extern const tsk_object_def_t *tdav_session_audio_dtmfe_def_t;
+
+// RTP/RTCP callback (From the network to the consumer)
+static int tdav_session_audio_rtp_cb(const void* callback_data, const struct trtp_rtp_packet_s* packet)
+{
+ tdav_session_audio_t* audio = (tdav_session_audio_t*)callback_data;
+ tmedia_codec_t* codec = tsk_null;
+ tdav_session_av_t* base = (tdav_session_av_t*)callback_data;
+ int ret = -1;
+
+ if (!audio || !packet || !packet->header) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ goto bail;
+ }
+
+ if (audio->is_started && base->consumer && base->consumer->is_started) {
+ tsk_size_t out_size = 0;
+
+ // Find the codec to use to decode the RTP payload
+ if (!audio->decoder.codec || audio->decoder.payload_type != packet->header->payload_type) {
+ tsk_istr_t format;
+ TSK_OBJECT_SAFE_FREE(audio->decoder.codec);
+ tsk_itoa(packet->header->payload_type, &format);
+ if (!(audio->decoder.codec = tmedia_codec_find_by_format(TMEDIA_SESSION(audio)->neg_codecs, format)) || !audio->decoder.codec->plugin || !audio->decoder.codec->plugin->decode){
+ TSK_DEBUG_ERROR("%s is not a valid payload for this session", format);
+ ret = -2;
+ goto bail;
+ }
+ audio->decoder.payload_type = packet->header->payload_type;
+ }
+ // ref() the codec to be able to use it short time after stop(SAFE_FREE(codec))
+ if (!(codec = tsk_object_ref(TSK_OBJECT(audio->decoder.codec)))) {
+ TSK_DEBUG_ERROR("Failed to get decoder codec");
+ goto bail;
+ }
+
+ // Open codec if not already done
+ if (!TMEDIA_CODEC(codec)->opened) {
+ tsk_safeobj_lock(base);
+ if ((ret = tmedia_codec_open(codec))) {
+ tsk_safeobj_unlock(base);
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", codec->plugin->desc);
+ TSK_OBJECT_SAFE_FREE(audio->decoder.codec);
+ goto bail;
+ }
+ tsk_safeobj_unlock(base);
+ }
+ // Decode data
+ out_size = codec->plugin->decode(codec, packet->payload.data, packet->payload.size, &audio->decoder.buffer, &audio->decoder.buffer_size, packet->header);
+ if (out_size && audio->is_started) { // check "is_started" again ...to be sure stop() not called by another thread
+ void* buffer = audio->decoder.buffer;
+ tsk_size_t size = out_size;
+
+ // resample if needed
+ if ((base->consumer->audio.out.rate && base->consumer->audio.out.rate != codec->in.rate) || (base->consumer->audio.out.channels && base->consumer->audio.out.channels != TMEDIA_CODEC_AUDIO(codec)->in.channels)) {
+ tsk_size_t resampler_result_size = 0;
+ int bytesPerSample = (base->consumer->audio.bits_per_sample >> 3);
+
+ if (!audio->decoder.resampler.instance) {
+ TSK_DEBUG_INFO("Create audio resampler(%s) for consumer: rate=%d->%d, channels=%d->%d, bytesPerSample=%d",
+ codec->plugin->desc,
+ codec->in.rate, base->consumer->audio.out.rate,
+ TMEDIA_CODEC_AUDIO(codec)->in.channels, base->consumer->audio.out.channels,
+ bytesPerSample);
+ audio->decoder.resampler.instance = _tdav_session_audio_resampler_create(
+ bytesPerSample,
+ codec->in.rate, base->consumer->audio.out.rate,
+ base->consumer->audio.ptime,
+ TMEDIA_CODEC_AUDIO(codec)->in.channels, base->consumer->audio.out.channels,
+ TDAV_AUDIO_RESAMPLER_DEFAULT_QUALITY,
+ &audio->decoder.resampler.buffer, &audio->decoder.resampler.buffer_size
+ );
+ }
+ if (!audio->decoder.resampler.instance) {
+ TSK_DEBUG_ERROR("No resampler to handle data");
+ ret = -5;
+ goto bail;
+ }
+ if (!(resampler_result_size = tmedia_resampler_process(audio->decoder.resampler.instance, buffer, size / bytesPerSample, audio->decoder.resampler.buffer, audio->decoder.resampler.buffer_size / bytesPerSample))){
+ TSK_DEBUG_ERROR("Failed to process audio resampler input buffer");
+ ret = -6;
+ goto bail;
+ }
+
+ buffer = audio->decoder.resampler.buffer;
+ size = audio->decoder.resampler.buffer_size;
+ }
+
+ // adjust the gain
+ if (base->consumer->audio.gain) {
+ _tdav_session_audio_apply_gain(buffer, (int)size, base->consumer->audio.bits_per_sample, base->consumer->audio.gain);
+ }
+ // consume the frame
+ tmedia_consumer_consume(base->consumer, buffer, size, packet->header);
+ }
+ }
+ else {
+ TSK_DEBUG_INFO("Session audio not ready");
+ }
+
+ // everything is ok
+ ret = 0;
+
+bail:
+ tsk_object_unref(TSK_OBJECT(codec));
+ return ret;
+}
+
+// Producer callback (From the producer to the network). Will encode() data before sending
+static int tdav_session_audio_producer_enc_cb(const void* callback_data, const void* buffer, tsk_size_t size)
+{
+ int ret = 0;
+
+ tdav_session_audio_t* audio = (tdav_session_audio_t*)callback_data;
+ tdav_session_av_t* base = (tdav_session_av_t*)callback_data;
+
+ if (!audio) {
+ TSK_DEBUG_ERROR("Null session");
+ return 0;
+ }
+
+ // do nothing if session is held
+ // when the session is held the end user will get feedback he also has possibilities to put the consumer and producer on pause
+ if (TMEDIA_SESSION(audio)->lo_held) {
+ return 0;
+ }
+
+ // get best negotiated codec if not already done
+ // the encoder codec could be null when session is renegotiated without re-starting (e.g. hold/resume)
+ if (!audio->encoder.codec) {
+ const tmedia_codec_t* codec;
+ tsk_safeobj_lock(base);
+ if (!(codec = tdav_session_av_get_best_neg_codec(base))) {
+ TSK_DEBUG_ERROR("No codec matched");
+ tsk_safeobj_unlock(base);
+ return -2;
+ }
+ audio->encoder.codec = tsk_object_ref(TSK_OBJECT(codec));
+ tsk_safeobj_unlock(base);
+ }
+
+ if (audio->is_started && base->rtp_manager && base->rtp_manager->is_started) {
+ /* encode */
+ tsk_size_t out_size = 0;
+
+ // Open codec if not already done
+ if (!audio->encoder.codec->opened) {
+ tsk_safeobj_lock(base);
+ if ((ret = tmedia_codec_open(audio->encoder.codec))) {
+ tsk_safeobj_unlock(base);
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", audio->encoder.codec->plugin->desc);
+ return -4;
+ }
+ tsk_safeobj_unlock(base);
+ }
+ // check if we're sending DTMF or not
+ if (audio->is_sending_dtmf_events) {
+ if (base->rtp_manager) {
+ // increment the timestamp
+ base->rtp_manager->rtp.timestamp += TMEDIA_CODEC_PCM_FRAME_SIZE_AUDIO_ENCODING(audio->encoder.codec)/*duration*/;
+ }
+ TSK_DEBUG_INFO("Skiping audio frame as we're sending DTMF...");
+ return 0;
+ }
+
+ // resample if needed
+ if (base->producer->audio.rate != audio->encoder.codec->out.rate || base->producer->audio.channels != TMEDIA_CODEC_AUDIO(audio->encoder.codec)->out.channels){
+ tsk_size_t resampler_result_size = 0;
+ int bytesPerSample = (base->producer->audio.bits_per_sample >> 3);
+
+ if (!audio->encoder.resampler.instance){
+ TSK_DEBUG_INFO("Create audio resampler(%s) for producer: rate=%d->%d, channels=%d->%d, bytesPerSample=%d",
+ audio->encoder.codec->plugin->desc,
+ base->producer->audio.rate, audio->encoder.codec->out.rate,
+ base->producer->audio.channels, TMEDIA_CODEC_AUDIO(audio->encoder.codec)->out.channels,
+ bytesPerSample);
+ audio->encoder.resampler.instance = _tdav_session_audio_resampler_create(
+ bytesPerSample,
+ base->producer->audio.rate, audio->encoder.codec->out.rate,
+ base->producer->audio.ptime,
+ base->producer->audio.channels, TMEDIA_CODEC_AUDIO(audio->encoder.codec)->out.channels,
+ TDAV_AUDIO_RESAMPLER_DEFAULT_QUALITY,
+ &audio->encoder.resampler.buffer, &audio->encoder.resampler.buffer_size
+ );
+ }
+ if (!audio->encoder.resampler.instance){
+ TSK_DEBUG_ERROR("No resampler to handle data");
+ ret = -1;
+ goto done;
+ }
+ if (!(resampler_result_size = tmedia_resampler_process(audio->encoder.resampler.instance, buffer, size / bytesPerSample, audio->encoder.resampler.buffer, audio->encoder.resampler.buffer_size / bytesPerSample))){
+ TSK_DEBUG_ERROR("Failed to process audio resampler input buffer");
+ ret = -1;
+ goto done;
+ }
+
+ buffer = audio->encoder.resampler.buffer;
+ size = audio->encoder.resampler.buffer_size;
+ }
+
+ // Denoise (VAD, AGC, Noise suppression, ...)
+ // Must be done after resampling
+ if (audio->denoise){
+ tsk_bool_t silence_or_noise = tsk_false;
+ if (audio->denoise->echo_supp_enabled){
+ ret = tmedia_denoise_process_record(TMEDIA_DENOISE(audio->denoise), (void*)buffer, (uint32_t)size, &silence_or_noise);
+ }
+ }
+ // adjust the gain
+ // Must be done after resampling
+ if (base->producer->audio.gain){
+ _tdav_session_audio_apply_gain((void*)buffer, (int)size, base->producer->audio.bits_per_sample, base->producer->audio.gain);
+ }
+
+ // Encode data
+ if ((audio->encoder.codec = tsk_object_ref(audio->encoder.codec))){ /* Thread safeness (SIP reINVITE or UPDATE could update the encoder) */
+ out_size = audio->encoder.codec->plugin->encode(audio->encoder.codec, buffer, size, &audio->encoder.buffer, &audio->encoder.buffer_size);
+ if (out_size){
+ trtp_manager_send_rtp(base->rtp_manager, audio->encoder.buffer, out_size, TMEDIA_CODEC_FRAME_DURATION_AUDIO_ENCODING(audio->encoder.codec), tsk_false/*Marker*/, tsk_true/*lastPacket*/);
+ }
+ tsk_object_unref(audio->encoder.codec);
+ }
+ else{
+ TSK_DEBUG_WARN("No encoder");
+ }
+ }
+
+done:
+ return ret;
+}
+
+
+/* ============ Plugin interface ================= */
+
+static int tdav_session_audio_set(tmedia_session_t* self, const tmedia_param_t* param)
+{
+ int ret = 0;
+ tdav_session_audio_t* audio;
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (tdav_session_av_set(TDAV_SESSION_AV(self), param) == tsk_true){
+ return 0;
+ }
+
+ audio = (tdav_session_audio_t*)self;
+
+ if (param->plugin_type == tmedia_ppt_consumer){
+ TSK_DEBUG_ERROR("Not expected consumer_set(%s)", param->key);
+ }
+ else if (param->plugin_type == tmedia_ppt_producer){
+ TSK_DEBUG_ERROR("Not expected producer_set(%s)", param->key);
+ }
+ else{
+ if (param->value_type == tmedia_pvt_int32){
+ if (tsk_striequals(param->key, "echo-supp")){
+ if (audio->denoise){
+ audio->denoise->echo_supp_enabled = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+ }
+ }
+ else if (tsk_striequals(param->key, "echo-tail")){
+ if (audio->denoise){
+ return tmedia_denoise_set(audio->denoise, param);
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int tdav_session_audio_get(tmedia_session_t* self, tmedia_param_t* param)
+{
+ if (!self || !param){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ // try with the base class to see if this option is supported or not
+ if (tdav_session_av_get(TDAV_SESSION_AV(self), param) == tsk_true){
+ return 0;
+ }
+ else {
+ // the codec information is held by the session even if the user is authorized to request it for the consumer/producer
+ if (param->value_type == tmedia_pvt_pobject){
+ if (param->plugin_type == tmedia_ppt_consumer){
+ TSK_DEBUG_ERROR("Not implemented");
+ return -4;
+ }
+ else if (param->plugin_type == tmedia_ppt_producer){
+ if (tsk_striequals("codec", param->key)) {
+ const tmedia_codec_t* codec;
+ if (!(codec = TDAV_SESSION_AUDIO(self)->encoder.codec)){
+ codec = tdav_session_av_get_best_neg_codec((const tdav_session_av_t*)self); // up to the caller to release the object
+ }
+ *((tsk_object_t**)param->value) = tsk_object_ref(TSK_OBJECT(codec));
+ return 0;
+ }
+ }
+ else if (param->plugin_type == tmedia_ppt_session) {
+ if (tsk_striequals(param->key, "codec-encoder")) {
+ *((tsk_object_t**)param->value) = tsk_object_ref(TDAV_SESSION_AUDIO(self)->encoder.codec); // up to the caller to release the object
+ return 0;
+ }
+ }
+ }
+ }
+
+ TSK_DEBUG_WARN("This session doesn't support get(%s)", param->key);
+ return -2;
+}
+
+static int tdav_session_audio_prepare(tmedia_session_t* self)
+{
+ tdav_session_av_t* base = (tdav_session_av_t*)(self);
+ int ret;
+
+ if ((ret = tdav_session_av_prepare(base))){
+ TSK_DEBUG_ERROR("tdav_session_av_prepare(audio) failed");
+ return ret;
+ }
+
+ if (base->rtp_manager){
+ ret = trtp_manager_set_rtp_callback(base->rtp_manager, tdav_session_audio_rtp_cb, base);
+ }
+
+ return ret;
+}
+
+static int tdav_session_audio_start(tmedia_session_t* self)
+{
+ int ret;
+ tdav_session_audio_t* audio;
+ const tmedia_codec_t* codec;
+ tdav_session_av_t* base;
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ audio = (tdav_session_audio_t*)self;
+ base = (tdav_session_av_t*)self;
+
+ if (audio->is_started) {
+ TSK_DEBUG_INFO("Audio session already started");
+ return 0;
+ }
+
+ if (!(codec = tdav_session_av_get_best_neg_codec(base))){
+ TSK_DEBUG_ERROR("No codec matched");
+ return -2;
+ }
+
+ TSK_OBJECT_SAFE_FREE(audio->encoder.codec);
+ audio->encoder.codec = tsk_object_ref((tsk_object_t*)codec);
+
+ if ((ret = tdav_session_av_start(base, codec))){
+ TSK_DEBUG_ERROR("tdav_session_av_start(audio) failed");
+ return ret;
+ }
+
+ if (base->rtp_manager){
+ /* Denoise (AEC, Noise Suppression, AGC)
+ * tmedia_denoise_process_record() is called after resampling and before encoding which means sampling rate is equal to codec's rate
+ * tmedia_denoise_echo_playback() is called before playback which means sampling rate is equal to consumer's rate
+ */
+ if (audio->denoise){
+ uint32_t record_frame_size_samples = TMEDIA_CODEC_PCM_FRAME_SIZE_AUDIO_ENCODING(audio->encoder.codec);
+ uint32_t record_sampling_rate = TMEDIA_CODEC_RATE_ENCODING(audio->encoder.codec);
+ uint32_t record_channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(audio->encoder.codec);
+
+ uint32_t playback_frame_size_samples = (base->consumer && base->consumer->audio.ptime && base->consumer->audio.out.rate && base->consumer->audio.out.channels)
+ ? ((base->consumer->audio.ptime * base->consumer->audio.out.rate) / 1000) * base->consumer->audio.out.channels
+ : TMEDIA_CODEC_PCM_FRAME_SIZE_AUDIO_DECODING(audio->encoder.codec);
+ uint32_t playback_sampling_rate = (base->consumer && base->consumer->audio.out.rate)
+ ? base->consumer->audio.out.rate
+ : TMEDIA_CODEC_RATE_DECODING(audio->encoder.codec);
+ uint32_t playback_channels = (base->consumer && base->consumer->audio.out.channels)
+ ? base->consumer->audio.out.channels
+ : TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(audio->encoder.codec);
+
+ TSK_DEBUG_INFO("Audio denoiser to be opened(record_frame_size_samples=%u, record_sampling_rate=%u, record_channels=%u, playback_frame_size_samples=%u, playback_sampling_rate=%u, playback_channels=%u)",
+ record_frame_size_samples, record_sampling_rate, record_channels, playback_frame_size_samples, playback_sampling_rate, playback_channels);
+
+ // close()
+ tmedia_denoise_close(audio->denoise);
+ // open() with new values
+ tmedia_denoise_open(audio->denoise,
+ record_frame_size_samples, record_sampling_rate, TSK_CLAMP(1, record_channels, 2),
+ playback_frame_size_samples, playback_sampling_rate, TSK_CLAMP(1, playback_channels, 2));
+ }
+ }
+
+ audio->is_started = (ret == 0);
+
+ return ret;
+}
+
+static int tdav_session_audio_stop(tmedia_session_t* self)
+{
+ tdav_session_audio_t* audio = TDAV_SESSION_AUDIO(self);
+ tdav_session_av_t* base = TDAV_SESSION_AV(self);
+ int ret = tdav_session_av_stop(base);
+ audio->is_started = tsk_false;
+ TSK_OBJECT_SAFE_FREE(audio->encoder.codec);
+ TSK_OBJECT_SAFE_FREE(audio->decoder.codec);
+
+ // close the jitter buffer and denoiser to be sure it will be reopened and reinitialized if reINVITE or UPDATE
+ // this is a "must" when the initial and updated sessions use codecs with different rate
+ if (audio->jitterbuffer && audio->jitterbuffer->opened) {
+ ret = tmedia_jitterbuffer_close(audio->jitterbuffer);
+ }
+ if (audio->denoise && audio->denoise->opened) {
+ ret = tmedia_denoise_close(audio->denoise);
+ }
+ return ret;
+}
+
+static int tdav_session_audio_send_dtmf(tmedia_session_t* self, uint8_t event)
+{
+ tdav_session_audio_t* audio;
+ tdav_session_av_t* base;
+ tmedia_codec_t* codec;
+ int ret, rate = 8000, ptime = 20;
+ uint16_t duration;
+ tdav_session_audio_dtmfe_t *dtmfe, *copy;
+ int format = 101;
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ audio = (tdav_session_audio_t*)self;
+ base = (tdav_session_av_t*)self;
+
+ // Find the DTMF codec to use to use the RTP payload
+ if ((codec = tmedia_codec_find_by_format(TMEDIA_SESSION(audio)->codecs, TMEDIA_CODEC_FORMAT_DTMF))){
+ rate = (int)codec->out.rate;
+ format = atoi(codec->neg_format ? codec->neg_format : codec->format);
+ TSK_OBJECT_SAFE_FREE(codec);
+ }
+
+ /* do we have an RTP manager? */
+ if (!base->rtp_manager){
+ TSK_DEBUG_ERROR("No RTP manager associated to this session");
+ return -2;
+ }
+
+ /* Create Events list */
+ if (!audio->dtmf_events){
+ audio->dtmf_events = tsk_list_create();
+ }
+
+ /* Create global reference to the timer manager */
+ if (!audio->timer.handle_mgr_global){
+ if (!(audio->timer.handle_mgr_global = tsk_timer_mgr_global_ref())){
+ TSK_DEBUG_ERROR("Failed to create Global Timer Manager");
+ return -3;
+ }
+ }
+
+ /* Start the timer manager */
+ if (!audio->timer.started){
+ if ((ret = tsk_timer_manager_start(audio->timer.handle_mgr_global))){
+ TSK_DEBUG_ERROR("Failed to start Global Timer Manager");
+ return ret;
+ }
+ audio->timer.started = tsk_true;
+ }
+
+
+ /* RFC 4733 - 5. Examples
+
+ +-------+-----------+------+--------+------+--------+--------+------+
+ | Time | Event | M | Time- | Seq | Event | Dura- | E |
+ | (ms) | | bit | stamp | No | Code | tion | bit |
+ +-------+-----------+------+--------+------+--------+--------+------+
+ | 0 | "9" | | | | | | |
+ | | starts | | | | | | |
+ | 50 | RTP | "1" | 0 | 1 | 9 | 400 | "0" |
+ | | packet 1 | | | | | | |
+ | | sent | | | | | | |
+ | 100 | RTP | "0" | 0 | 2 | 9 | 800 | "0" |
+ | | packet 2 | | | | | | |
+ | | sent | | | | | | |
+ | 150 | RTP | "0" | 0 | 3 | 9 | 1200 | "0" |
+ | | packet 3 | | | | | | |
+ | | sent | | | | | | |
+ | 200 | RTP | "0" | 0 | 4 | 9 | 1600 | "0" |
+ | | packet 4 | | | | | | |
+ | | sent | | | | | | |
+ | 200 | "9" ends | | | | | | |
+ | 250 | RTP | "0" | 0 | 5 | 9 | 1600 | "1" |
+ | | packet 4 | | | | | | |
+ | | first | | | | | | |
+ | | retrans- | | | | | | |
+ | | mission | | | | | | |
+ | 300 | RTP | "0" | 0 | 6 | 9 | 1600 | "1" |
+ | | packet 4 | | | | | | |
+ | | second | | | | | | |
+ | | retrans- | | | | | | |
+ | | mission | | | | | | |
+ =====================================================================
+ | 880 | First "1" | | | | | | |
+ | | starts | | | | | | |
+ | 930 | RTP | "1" | 7040 | 7 | 1 | 400 | "0" |
+ | | packet 5 | | | | | | |
+ | | sent | | | | | | |
+ */
+
+ // ref()(thread safeness)
+ audio = tsk_object_ref(audio);
+
+ // says we're sending DTMF digits to avoid mixing with audio (SRTP won't let this happen because of senquence numbers)
+ // flag will be turned OFF when the list is empty
+ audio->is_sending_dtmf_events = tsk_true;
+
+ duration = TMEDIA_CODEC_PCM_FRAME_SIZE_AUDIO_ENCODING(audio->encoder.codec);
+
+ // lock() list
+ tsk_list_lock(audio->dtmf_events);
+
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 1, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_true, tsk_false);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 0, _tdav_session_audio_dtmfe_timercb, copy);
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 2, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_false, tsk_false);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 1, _tdav_session_audio_dtmfe_timercb, copy);
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 3, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_false, tsk_false);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 2, _tdav_session_audio_dtmfe_timercb, copy);
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 4, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_false, tsk_false);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 3, _tdav_session_audio_dtmfe_timercb, copy);
+
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 4, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_false, tsk_true);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 4, _tdav_session_audio_dtmfe_timercb, copy);
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 4, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_false, tsk_true);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 5, _tdav_session_audio_dtmfe_timercb, copy);
+
+ // unlock() list
+ tsk_list_unlock(audio->dtmf_events);
+
+ // increment timestamp
+ base->rtp_manager->rtp.timestamp += duration;
+
+ // unref()(thread safeness)
+ audio = tsk_object_unref(audio);
+
+ return 0;
+}
+
+static int tdav_session_audio_pause(tmedia_session_t* self)
+{
+ return tdav_session_av_pause(TDAV_SESSION_AV(self));
+}
+
+static const tsdp_header_M_t* tdav_session_audio_get_lo(tmedia_session_t* self)
+{
+ tsk_bool_t updated = tsk_false;
+ const tsdp_header_M_t* ret;
+ tdav_session_av_t* base = TDAV_SESSION_AV(self);
+
+
+ if (!(ret = tdav_session_av_get_lo(base, &updated))){
+ TSK_DEBUG_ERROR("tdav_session_av_get_lo(audio) failed");
+ return tsk_null;
+ }
+
+ if (updated){
+ tsk_safeobj_lock(base);
+ TSK_OBJECT_SAFE_FREE(TDAV_SESSION_AUDIO(self)->encoder.codec);
+ tsk_safeobj_unlock(base);
+ }
+
+ return ret;
+}
+
+static int tdav_session_audio_set_ro(tmedia_session_t* self, const tsdp_header_M_t* m)
+{
+ int ret;
+ tsk_bool_t updated = tsk_false;
+ tdav_session_av_t* base = TDAV_SESSION_AV(self);
+
+ if ((ret = tdav_session_av_set_ro(base, m, &updated))){
+ TSK_DEBUG_ERROR("tdav_session_av_set_ro(audio) failed");
+ return ret;
+ }
+
+ if (updated) {
+ tsk_safeobj_lock(base);
+ // reset audio jitter buffer (new Offer probably comes with new seq_nums or timestamps)
+ if (base->consumer) {
+ ret = tdav_consumer_audio_reset(TDAV_CONSUMER_AUDIO(base->consumer));
+ }
+ // destroy encoder to force requesting new one
+ TSK_OBJECT_SAFE_FREE(TDAV_SESSION_AUDIO(self)->encoder.codec);
+ tsk_safeobj_unlock(base);
+ }
+
+ return ret;
+}
+
+/* apply gain */
+static void _tdav_session_audio_apply_gain(void* buffer, int len, int bps, int gain)
+{
+ register int i;
+ int max_val;
+
+ max_val = (1 << (bps - 1 - gain)) - 1;
+
+ if (bps == 8) {
+ int8_t *buff = buffer;
+ for (i = 0; i < len; i++) {
+ if (buff[i] > -max_val && buff[i] < max_val)
+ buff[i] = buff[i] << gain;
+ }
+ }
+ else if (bps == 16) {
+ int16_t *buff = buffer;
+ for (i = 0; i < len / 2; i++) {
+ if (buff[i] > -max_val && buff[i] < max_val)
+ buff[i] = buff[i] << gain;
+ }
+ }
+}
+
+
+/* Internal function used to create new DTMF event */
+static tdav_session_audio_dtmfe_t* _tdav_session_audio_dtmfe_create(const tdav_session_audio_t* session, uint8_t event, uint16_t duration, uint32_t seq, uint32_t timestamp, uint8_t format, tsk_bool_t M, tsk_bool_t E)
+{
+ tdav_session_audio_dtmfe_t* dtmfe;
+ const tdav_session_av_t* base = (const tdav_session_av_t*)session;
+ static uint8_t volume = 10;
+ static uint32_t ssrc = 0x5234A8;
+
+ uint8_t pay[4] = { 0 };
+
+ /* RFC 4733 - 2.3. Payload Format
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | event |E|R| volume | duration |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+ if (!(dtmfe = tsk_object_new(tdav_session_audio_dtmfe_def_t))){
+ TSK_DEBUG_ERROR("Failed to create new DTMF event");
+ return tsk_null;
+ }
+ dtmfe->session = session;
+
+ if (!(dtmfe->packet = trtp_rtp_packet_create((session && base->rtp_manager) ? base->rtp_manager->rtp.ssrc.local : ssrc, seq, timestamp, format, M))){
+ TSK_DEBUG_ERROR("Failed to create DTMF RTP packet");
+ TSK_OBJECT_SAFE_FREE(dtmfe);
+ return tsk_null;
+ }
+
+ pay[0] = event;
+ pay[1] |= ((E << 7) | (volume & 0x3F));
+ pay[2] = (duration >> 8);
+ pay[3] = (duration & 0xFF);
+
+ /* set data */
+ if ((dtmfe->packet->payload.data = tsk_calloc(sizeof(pay), sizeof(uint8_t)))){
+ memcpy(dtmfe->packet->payload.data, pay, sizeof(pay));
+ dtmfe->packet->payload.size = sizeof(pay);
+ }
+
+ return dtmfe;
+}
+
+static int _tdav_session_audio_dtmfe_timercb(const void* arg, tsk_timer_id_t timer_id)
+{
+ tdav_session_audio_dtmfe_t* dtmfe = (tdav_session_audio_dtmfe_t*)arg;
+ tdav_session_audio_t *audio;
+
+ if (!dtmfe || !dtmfe->session || !dtmfe->session->dtmf_events){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* Send the data */
+ TSK_DEBUG_INFO("Sending DTMF event...");
+ trtp_manager_send_rtp_packet(TDAV_SESSION_AV(dtmfe->session)->rtp_manager, dtmfe->packet, tsk_false);
+
+
+ audio = tsk_object_ref(TSK_OBJECT(dtmfe->session));
+ tsk_list_lock(audio->dtmf_events);
+ /* Remove and delete the event from the queue */
+ tsk_list_remove_item_by_data(audio->dtmf_events, dtmfe);
+ /* Check if there are pending events */
+ audio->is_sending_dtmf_events = !TSK_LIST_IS_EMPTY(audio->dtmf_events);
+ tsk_list_unlock(audio->dtmf_events);
+ tsk_object_unref(audio);
+
+ return 0;
+}
+
+static tmedia_resampler_t* _tdav_session_audio_resampler_create(int32_t bytes_per_sample, uint32_t in_freq, uint32_t out_freq, uint32_t frame_duration, uint32_t in_channels, uint32_t out_channels, uint32_t quality, void** resampler_buffer, tsk_size_t *resampler_buffer_size)
+{
+ uint32_t resampler_buff_size;
+ tmedia_resampler_t* resampler;
+ int ret;
+
+ if (out_channels > 2 || in_channels > 2) {
+ TSK_DEBUG_ERROR("Invalid parameter: out_channels=%u, in_channels=%u", out_channels, in_channels);
+ return tsk_null;
+ }
+
+ resampler_buff_size = (((out_freq * frame_duration) / 1000) * bytes_per_sample) << (out_channels == 2 ? 1 : 0);
+
+ if (!(resampler = tmedia_resampler_create())) {
+ TSK_DEBUG_ERROR("Failed to create audio resampler");
+ return tsk_null;
+ }
+ else {
+ if ((ret = tmedia_resampler_open(resampler, in_freq, out_freq, frame_duration, in_channels, out_channels, quality, 16))) {
+ TSK_DEBUG_ERROR("Failed to open audio resampler (%d, %d, %d, %d, %d,%d) with retcode=%d", in_freq, out_freq, frame_duration, in_channels, out_channels, quality, ret);
+ TSK_OBJECT_SAFE_FREE(resampler);
+ goto done;
+ }
+ }
+ // create temp resampler buffer
+ if ((*resampler_buffer = tsk_realloc(*resampler_buffer, resampler_buff_size))) {
+ *resampler_buffer_size = resampler_buff_size;
+ }
+ else {
+ *resampler_buffer_size = 0;
+ TSK_DEBUG_ERROR("Failed to allocate resampler buffer with size = %d", resampler_buff_size);
+ TSK_OBJECT_SAFE_FREE(resampler);
+ goto done;
+ }
+done:
+ return resampler;
+}
+
+//=================================================================================================
+// Session Audio Plugin object definition
+//
+/* constructor */
+static tsk_object_t* tdav_session_audio_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_session_audio_t *audio = self;
+ if (audio){
+ int ret;
+ tdav_session_av_t *base = TDAV_SESSION_AV(self);
+
+ /* init() base */
+ if ((ret = tdav_session_av_init(base, tmedia_audio)) != 0){
+ TSK_DEBUG_ERROR("tdav_session_av_init(audio) failed");
+ return tsk_null;
+ }
+
+ /* init() self */
+ if (base->producer){
+ tmedia_producer_set_enc_callback(base->producer, tdav_session_audio_producer_enc_cb, audio);
+ }
+ if (base->consumer){
+ // It's important to create the denoiser and jitter buffer here as dynamic plugins (from shared libs) don't have access to the registry
+ if (!(audio->denoise = tmedia_denoise_create())){
+ TSK_DEBUG_WARN("No Audio denoiser found");
+ }
+ else{
+ // IMPORTANT: This means that the consumer must be child of "tdav_consumer_audio_t" object
+ tdav_consumer_audio_set_denoise(TDAV_CONSUMER_AUDIO(base->consumer), audio->denoise);
+ }
+
+ if (!(audio->jitterbuffer = tmedia_jitterbuffer_create(tmedia_audio))){
+ TSK_DEBUG_ERROR("Failed to create jitter buffer");
+ }
+ else{
+ ret = tmedia_jitterbuffer_init(TMEDIA_JITTER_BUFFER(audio->jitterbuffer));
+ tdav_consumer_audio_set_jitterbuffer(TDAV_CONSUMER_AUDIO(base->consumer), audio->jitterbuffer);
+ }
+ }
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_session_audio_dtor(tsk_object_t * self)
+{
+ tdav_session_audio_t *audio = self;
+ TSK_DEBUG_INFO("*** tdav_session_audio_t destroyed ***");
+ if (audio){
+ tdav_session_audio_stop((tmedia_session_t*)audio);
+ // Do it in this order (deinit self first)
+
+ /* Timer manager */
+ if (audio->timer.started){
+ if (audio->dtmf_events){
+ /* Cancel all events */
+ tsk_list_item_t* item;
+ tsk_list_foreach(item, audio->dtmf_events){
+ tsk_timer_mgr_global_cancel(((tdav_session_audio_dtmfe_t*)item->data)->timer_id);
+ }
+ }
+ }
+
+ tsk_timer_mgr_global_unref(&audio->timer.handle_mgr_global);
+
+ /* CleanUp the DTMF events */
+ TSK_OBJECT_SAFE_FREE(audio->dtmf_events);
+
+ TSK_OBJECT_SAFE_FREE(audio->denoise);
+ TSK_OBJECT_SAFE_FREE(audio->jitterbuffer);
+
+ TSK_OBJECT_SAFE_FREE(audio->encoder.codec);
+ TSK_FREE(audio->encoder.buffer);
+ TSK_OBJECT_SAFE_FREE(audio->decoder.codec);
+ TSK_FREE(audio->decoder.buffer);
+
+ // free resamplers
+ TSK_FREE(audio->encoder.resampler.buffer);
+ TSK_OBJECT_SAFE_FREE(audio->encoder.resampler.instance);
+ TSK_FREE(audio->decoder.resampler.buffer);
+ TSK_OBJECT_SAFE_FREE(audio->decoder.resampler.instance);
+
+ /* deinit base */
+ tdav_session_av_deinit(TDAV_SESSION_AV(self));
+
+ TSK_DEBUG_INFO("*** Audio session destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_session_audio_def_s =
+{
+ sizeof(tdav_session_audio_t),
+ tdav_session_audio_ctor,
+ tdav_session_audio_dtor,
+ tmedia_session_cmp,
+};
+/* plugin definition*/
+static const tmedia_session_plugin_def_t tdav_session_audio_plugin_def_s =
+{
+ &tdav_session_audio_def_s,
+
+ tmedia_audio,
+ "audio",
+
+ tdav_session_audio_set,
+ tdav_session_audio_get,
+ tdav_session_audio_prepare,
+ tdav_session_audio_start,
+ tdav_session_audio_pause,
+ tdav_session_audio_stop,
+
+ /* Audio part */
+ {
+ tdav_session_audio_send_dtmf
+ },
+
+ tdav_session_audio_get_lo,
+ tdav_session_audio_set_ro
+};
+const tmedia_session_plugin_def_t *tdav_session_audio_plugin_def_t = &tdav_session_audio_plugin_def_s;
+static const tmedia_session_plugin_def_t tdav_session_bfcpaudio_plugin_def_s =
+{
+ &tdav_session_audio_def_s,
+
+ tmedia_bfcp_audio,
+ "audio",
+
+ tdav_session_audio_set,
+ tdav_session_audio_get,
+ tdav_session_audio_prepare,
+ tdav_session_audio_start,
+ tdav_session_audio_pause,
+ tdav_session_audio_stop,
+
+ /* Audio part */
+ {
+ tdav_session_audio_send_dtmf
+ },
+
+ tdav_session_audio_get_lo,
+ tdav_session_audio_set_ro
+};
+const tmedia_session_plugin_def_t *tdav_session_bfcpaudio_plugin_def_t = &tdav_session_bfcpaudio_plugin_def_s;
+
+
+
+//=================================================================================================
+// DTMF event object definition
+//
+static tsk_object_t* tdav_session_audio_dtmfe_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_session_audio_dtmfe_t *event = self;
+ if (event){
+ event->timer_id = TSK_INVALID_TIMER_ID;
+ }
+ return self;
+}
+
+static tsk_object_t* tdav_session_audio_dtmfe_dtor(tsk_object_t * self)
+{
+ tdav_session_audio_dtmfe_t *event = self;
+ if (event){
+ TSK_OBJECT_SAFE_FREE(event->packet);
+ }
+
+ return self;
+}
+
+static int tdav_session_audio_dtmfe_cmp(const tsk_object_t *_e1, const tsk_object_t *_e2)
+{
+ int ret;
+ tsk_subsat_int32_ptr(_e1, _e2, &ret);
+ return ret;
+}
+
+static const tsk_object_def_t tdav_session_audio_dtmfe_def_s =
+{
+ sizeof(tdav_session_audio_dtmfe_t),
+ tdav_session_audio_dtmfe_ctor,
+ tdav_session_audio_dtmfe_dtor,
+ tdav_session_audio_dtmfe_cmp,
+};
+const tsk_object_def_t *tdav_session_audio_dtmfe_def_t = &tdav_session_audio_dtmfe_def_s;
diff --git a/tinyDAV/src/audio/tdav_speakup_jitterbuffer.c b/tinyDAV/src/audio/tdav_speakup_jitterbuffer.c
new file mode 100644
index 0000000..cccc235
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_speakup_jitterbuffer.c
@@ -0,0 +1,281 @@
+/*
+* Copyright (C) 2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_speakup_jitterbuffer.c
+ * @brief Speakup Audio jitterbuffer Plugin
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+
+ */
+#include "tinydav/audio/tdav_speakup_jitterbuffer.h"
+
+#if !(HAVE_SPEEX_DSP && HAVE_SPEEX_JB)
+
+#include "tinyrtp/rtp/trtp_rtp_header.h"
+
+#include "tsk_time.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <string.h>
+
+#if TSK_UNDER_WINDOWS
+# include <Winsock2.h> // timeval
+#elif defined(__SYMBIAN32__)
+# include <_timeval.h>
+#else
+# include <sys/time.h>
+#endif
+
+#define TDAV_SPEAKUP_10MS 10
+#define TDAV_SPEAKUP_10MS_FRAME_SIZE(self) (((self)->rate * TDAV_SPEAKUP_10MS)/1000)
+#define TDAV_SPEAKUP_PTIME_FRAME_SIZE(self) (((self)->rate * (self)->framesize)/1000)
+
+static int tdav_speakup_jitterbuffer_set(tmedia_jitterbuffer_t *self, const tmedia_param_t* param)
+{
+ TSK_DEBUG_ERROR("Not implemented");
+ return -2;
+}
+
+static int tdav_speakup_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate, uint32_t channels)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
+ if(!jitterbuffer->jbuffer){
+ if(!(jitterbuffer->jbuffer = jb_new())){
+ TSK_DEBUG_ERROR("Failed to create new buffer");
+ return -1;
+ }
+ jitterbuffer->jcodec = JB_CODEC_OTHER;
+ }
+ jitterbuffer->ref_timestamp = 0;
+ jitterbuffer->frame_duration = frame_duration;
+ jitterbuffer->rate = rate;
+ jitterbuffer->channels = channels;
+ jitterbuffer->_10ms_size_bytes = 160 * (rate/8000);
+
+ return 0;
+}
+
+static int tdav_speakup_jitterbuffer_tick(tmedia_jitterbuffer_t* self)
+{
+ return 0;
+}
+
+static int tdav_speakup_jitterbuffer_put(tmedia_jitterbuffer_t* self, void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
+ const trtp_rtp_header_t* rtp_hdr = (const trtp_rtp_header_t*)proto_hdr;
+ int i;
+ long now, ts;
+ void* _10ms_buf;
+ uint8_t* pdata;
+
+ if(!self || !data || !data_size || !jitterbuffer->jbuffer || !rtp_hdr){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* synchronize the reference timestamp */
+ if(!jitterbuffer->ref_timestamp){
+ uint64_t now = tsk_time_now();
+ struct timeval tv;
+ long ts = (rtp_hdr->timestamp/(jitterbuffer->rate/1000));
+ //=> Do not use (see clock_gettime() on linux): tsk_gettimeofday(&tv, tsk_null);
+ tv.tv_sec = (long)(now)/1000;
+ tv.tv_usec = (long)(now - (tv.tv_sec*1000))*1000;
+
+ tv.tv_sec -= (ts / jitterbuffer->rate);
+ tv.tv_usec -= (ts % jitterbuffer->rate) * 125;
+ if((tv.tv_usec -= (tv.tv_usec % (TDAV_SPEAKUP_10MS * 10000))) <0){
+ tv.tv_usec += 1000000;
+ tv.tv_sec -= 1;
+ }
+ jitterbuffer->ref_timestamp = tsk_time_get_ms(&tv);
+
+ switch(rtp_hdr->payload_type){
+ case 8: /*TMEDIA_CODEC_FORMAT_G711a*/
+ case 0: /* TMEDIA_CODEC_FORMAT_G711u */
+ jitterbuffer->jcodec = JB_CODEC_G711x;
+ break;
+ case 18: /* TMEDIA_CODEC_FORMAT_G729 */
+ jitterbuffer->jcodec = JB_CODEC_G729A;
+ break;
+ case 3: /* TMEDIA_CODEC_FORMAT_GSM */
+ jitterbuffer->jcodec = JB_CODEC_GSM_EFR;
+ break;
+
+ default:
+ jitterbuffer->jcodec = JB_CODEC_OTHER;
+ break;
+ }
+ }
+
+ // split as several 10ms frames
+ now = (long) (tsk_time_now()-jitterbuffer->ref_timestamp);
+ ts = (long)(rtp_hdr->timestamp/(jitterbuffer->rate/1000));
+ pdata = (uint8_t*)data;
+ for(i=0; i<(int)(data_size/jitterbuffer->_10ms_size_bytes);i++){
+ if((_10ms_buf = tsk_calloc(jitterbuffer->_10ms_size_bytes, 1))){
+ memcpy(_10ms_buf, &pdata[i*jitterbuffer->_10ms_size_bytes], jitterbuffer->_10ms_size_bytes);
+ jb_put(jitterbuffer->jbuffer, _10ms_buf, JB_TYPE_VOICE, TDAV_SPEAKUP_10MS, ts, now, jitterbuffer->jcodec);
+ _10ms_buf = tsk_null;
+ }
+ ts += TDAV_SPEAKUP_10MS;
+ }
+
+ return 0;
+}
+
+static tsk_size_t tdav_speakup_jitterbuffer_get(tmedia_jitterbuffer_t* self, void* out_data, tsk_size_t out_size)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
+ int jret;
+
+ int i, _10ms_count;
+ long now;
+ short* _10ms_buf = tsk_null;
+ uint8_t* pout_data = (uint8_t*)out_data;
+
+ if(!out_data || (out_size % jitterbuffer->_10ms_size_bytes)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ _10ms_count = (out_size/jitterbuffer->_10ms_size_bytes);
+ now = (long) (tsk_time_now() - jitterbuffer->ref_timestamp);
+ for(i=0; i<_10ms_count; i++){
+
+ jret = jb_get(jitterbuffer->jbuffer, (void**)&_10ms_buf, now, TDAV_SPEAKUP_10MS);
+ switch(jret){
+ case JB_INTERP:
+ TSK_DEBUG_INFO("JB_INTERP");
+ jb_reset_all(jitterbuffer->jbuffer);
+ memset(&pout_data[i*jitterbuffer->_10ms_size_bytes], 0, (_10ms_count*jitterbuffer->_10ms_size_bytes)-(i*jitterbuffer->_10ms_size_bytes));
+ i = _10ms_count; // for exit
+ break;
+ case JB_OK:
+ case JB_EMPTY:
+ case JB_NOFRAME:
+ case JB_NOJB:
+ {
+ if(_10ms_buf && (jret == JB_OK)){
+ /* copy data */
+ memcpy(&pout_data[i*jitterbuffer->_10ms_size_bytes], _10ms_buf, jitterbuffer->_10ms_size_bytes);
+ }
+ else{
+ /* copy silence */
+ memset(&pout_data[i*jitterbuffer->_10ms_size_bytes], 0, jitterbuffer->_10ms_size_bytes);
+ }
+ }
+
+ default:
+ break;
+ }
+ TSK_FREE(_10ms_buf);
+ }
+
+ return (_10ms_count * jitterbuffer->_10ms_size_bytes);
+}
+
+static int tdav_speakup_jitterbuffer_reset(tmedia_jitterbuffer_t* self)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
+ if(jitterbuffer->jbuffer){
+ jb_reset_all(jitterbuffer->jbuffer);
+ return 0;
+ }
+ else{
+ TSK_DEBUG_ERROR("invalid parameter");
+ return -1;
+ }
+}
+
+static int tdav_speakup_jitterbuffer_close(tmedia_jitterbuffer_t* self)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
+ if(jitterbuffer->jbuffer){
+ jb_destroy(jitterbuffer->jbuffer);
+ jitterbuffer->jbuffer = tsk_null;
+ }
+ return 0;
+}
+
+
+
+//
+// Speakup jitterbufferr Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_speakup_jitterbuffer_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = self;
+ TSK_DEBUG_INFO("Create speekup jitter buffer");
+ if(jitterbuffer){
+ /* init base */
+ tmedia_jitterbuffer_init(TMEDIA_JITTER_BUFFER(jitterbuffer));
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_speakup_jitterbuffer_dtor(tsk_object_t * self)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = self;
+ if(jitterbuffer){
+ /* deinit base */
+ tmedia_jitterbuffer_deinit(TMEDIA_JITTER_BUFFER(jitterbuffer));
+ /* deinit self */
+ if(jitterbuffer->jbuffer){
+ jb_destroy(jitterbuffer->jbuffer);
+ jitterbuffer->jbuffer = tsk_null;
+ }
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_speakup_jitterbuffer_def_s =
+{
+ sizeof(tdav_speakup_jitterbuffer_t),
+ tdav_speakup_jitterbuffer_ctor,
+ tdav_speakup_jitterbuffer_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_jitterbuffer_plugin_def_t tdav_speakup_jitterbuffer_plugin_def_s =
+{
+ &tdav_speakup_jitterbuffer_def_s,
+ tmedia_audio,
+ "Audio/video JitterBuffer based on Speakup",
+
+ tdav_speakup_jitterbuffer_set,
+ tdav_speakup_jitterbuffer_open,
+ tdav_speakup_jitterbuffer_tick,
+ tdav_speakup_jitterbuffer_put,
+ tdav_speakup_jitterbuffer_get,
+ tdav_speakup_jitterbuffer_reset,
+ tdav_speakup_jitterbuffer_close,
+};
+const tmedia_jitterbuffer_plugin_def_t *tdav_speakup_jitterbuffer_plugin_def_t = &tdav_speakup_jitterbuffer_plugin_def_s;
+
+#endif /* !(HAVE_SPEEX_DSP && HAVE_SPEEX_JB) */
diff --git a/tinyDAV/src/audio/tdav_speex_denoise.c b/tinyDAV/src/audio/tdav_speex_denoise.c
new file mode 100644
index 0000000..4f344dd
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_speex_denoise.c
@@ -0,0 +1,312 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_speex_denoise.c
+* @brief Speex Denoiser (Noise suppression, AGC, AEC) Plugin
+*/
+#include "tinydav/audio/tdav_speex_denoise.h"
+
+#if HAVE_SPEEX_DSP && (!defined(HAVE_SPEEX_DENOISE) || HAVE_SPEEX_DENOISE)
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include "tinymedia/tmedia_defaults.h"
+
+#include <string.h>
+
+#include <speex/speex_preprocess.h>
+#include <speex/speex_echo.h>
+
+/** Speex denoiser*/
+typedef struct tdav_speex_denoise_s
+{
+ TMEDIA_DECLARE_DENOISE;
+
+ SpeexPreprocessState *preprocess_state_record;
+ SpeexPreprocessState *preprocess_state_playback;
+ SpeexEchoState *echo_state;
+
+ spx_int16_t* echo_output_frame;
+ uint32_t record_frame_size_samples, record_frame_size_bytes;
+ uint32_t playback_frame_size_samples, playback_frame_size_bytes;
+}
+tdav_speex_denoise_t;
+
+static int tdav_speex_denoise_set(tmedia_denoise_t* _self, const tmedia_param_t* param)
+{
+ tdav_speex_denoise_t *self = (tdav_speex_denoise_t *)_self;
+ if(!self || !param){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(param->value_type == tmedia_pvt_int32){
+ if(tsk_striequals(param->key, "echo-tail")){
+ int32_t echo_tail = *((int32_t*)param->value);
+ TSK_DEBUG_INFO("speex_set_echo_tail(%d) ignore", echo_tail); // because Speex AEC just do not work (use WebRTC)
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int tdav_speex_denoise_open(tmedia_denoise_t* self, uint32_t record_frame_size_samples, uint32_t record_sampling_rate, uint32_t record_channels, uint32_t playback_frame_size_samples, uint32_t playback_sampling_rate, uint32_t playback_channels)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+ float f;
+ int i;
+
+ if (!denoiser->echo_state && TMEDIA_DENOISE(denoiser)->echo_supp_enabled) {
+ TSK_DEBUG_INFO("Init Aec frame_size[%u] filter_length[%u] SampleRate[%u]",
+ (uint32_t)(record_frame_size_samples),TMEDIA_DENOISE(denoiser)->echo_tail*record_frame_size_samples, record_sampling_rate);
+ if((denoiser->echo_state = speex_echo_state_init(record_frame_size_samples, TMEDIA_DENOISE(denoiser)->echo_tail))){
+ speex_echo_ctl(denoiser->echo_state, SPEEX_ECHO_SET_SAMPLING_RATE, &record_sampling_rate);
+ }
+ }
+
+ if (!denoiser->preprocess_state_record && !denoiser->preprocess_state_playback) {
+ denoiser->record_frame_size_samples = record_frame_size_samples;
+ denoiser->record_frame_size_bytes = (record_frame_size_samples << 1);
+ denoiser->playback_frame_size_samples = playback_frame_size_samples;
+ denoiser->playback_frame_size_bytes = (playback_frame_size_samples << 1);
+
+ if((denoiser->preprocess_state_record = speex_preprocess_state_init(record_frame_size_samples, record_sampling_rate))
+ && (denoiser->preprocess_state_playback = speex_preprocess_state_init(playback_frame_size_samples, playback_sampling_rate))
+ ){
+
+ // Echo suppression
+ if(denoiser->echo_state){
+ int echo_supp , echo_supp_active = 0;
+
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_ECHO_STATE, denoiser->echo_state);
+
+ TSK_FREE(denoiser->echo_output_frame);
+ denoiser->echo_output_frame = tsk_calloc(denoiser->record_frame_size_samples, sizeof(spx_int16_t));
+
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_GET_ECHO_SUPPRESS , &echo_supp );
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_GET_ECHO_SUPPRESS_ACTIVE , &echo_supp_active );
+ TSK_DEBUG_INFO("AEC echo_supp level [%d] echo_supp_active level[%d] ", echo_supp , echo_supp_active);
+ echo_supp = -60 ;
+ echo_supp_active = -60 ;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_ECHO_SUPPRESS , &echo_supp );
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_ECHO_SUPPRESS_ACTIVE , &echo_supp_active );
+ // TRACES
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_GET_ECHO_SUPPRESS , &echo_supp );
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_GET_ECHO_SUPPRESS_ACTIVE , &echo_supp_active );
+ TSK_DEBUG_INFO("New aec echo_supp level [%d] echo_supp_active level[%d] ", echo_supp , echo_supp_active);
+ }
+
+ // Noise suppression
+ if(TMEDIA_DENOISE(denoiser)->noise_supp_enabled){
+ TSK_DEBUG_INFO("SpeexDSP: Noise supp enabled");
+ i = 1;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state_playback, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ i = TMEDIA_DENOISE(denoiser)->noise_supp_level;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state_playback, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &i);
+ }
+ else{
+ i = 0;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state_playback, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ }
+
+ // Automatic gain control
+ if(TMEDIA_DENOISE(denoiser)->agc_enabled){
+ float agc_level = TMEDIA_DENOISE(denoiser)->agc_level;
+ TSK_DEBUG_INFO("SpeexDSP: AGC enabled");
+
+ i = 1;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_AGC, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_AGC_LEVEL, &agc_level);
+ }
+ else{
+ i = 0, f = 8000.0f;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_AGC, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_AGC_LEVEL, &f);
+ }
+
+ // Voice Activity detection
+ i = TMEDIA_DENOISE(denoiser)->vad_enabled ? 1 : 0;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_VAD, &i);
+
+ return 0;
+ }
+ else{
+ TSK_DEBUG_ERROR("Failed to create Speex preprocessor state");
+ return -2;
+ }
+ }
+
+ return 0;
+}
+
+static int tdav_speex_denoise_echo_playback(tmedia_denoise_t* self, const void* echo_frame, uint32_t echo_frame_size_bytes)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+
+ if(denoiser->record_frame_size_bytes != echo_frame_size_bytes){
+ TSK_DEBUG_ERROR("Size mismatch: %u<>%u", denoiser->record_frame_size_bytes, echo_frame_size_bytes);
+ return -1;
+ }
+
+ if(denoiser->echo_state){
+ speex_echo_playback(denoiser->echo_state, echo_frame);
+ }
+ return 0;
+}
+
+
+
+static int tdav_speex_denoise_process_record(tmedia_denoise_t* self, void* audio_frame, uint32_t audio_frame_size_bytes, tsk_bool_t* silence_or_noise)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+ int vad;
+
+ if(denoiser->record_frame_size_bytes != audio_frame_size_bytes){
+ TSK_DEBUG_ERROR("Size mismatch: %u<>%u", denoiser->record_frame_size_bytes, audio_frame_size_bytes);
+ return -1;
+ }
+
+ if(denoiser->preprocess_state_record){
+ if(denoiser->echo_state && denoiser->echo_output_frame){
+ speex_echo_capture(denoiser->echo_state, audio_frame, denoiser->echo_output_frame);
+ memcpy(audio_frame, denoiser->echo_output_frame, denoiser->record_frame_size_bytes);
+ }
+ vad = speex_preprocess_run(denoiser->preprocess_state_record, audio_frame);
+ if(!vad && TMEDIA_DENOISE(denoiser)->vad_enabled){
+ *silence_or_noise = tsk_true;
+ }
+ }
+
+ return 0;
+}
+
+static int tdav_speex_denoise_process_playback(tmedia_denoise_t* self, void* audio_frame, uint32_t audio_frame_size_bytes)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+
+ if(denoiser->playback_frame_size_bytes != audio_frame_size_bytes){
+ TSK_DEBUG_ERROR("Size mismatch: %u<>%u", denoiser->playback_frame_size_bytes, audio_frame_size_bytes);
+ return -1;
+ }
+
+ if(denoiser->preprocess_state_playback){
+ speex_preprocess_run(denoiser->preprocess_state_playback, audio_frame);
+ }
+ return 0;
+}
+
+static int tdav_speex_denoise_close(tmedia_denoise_t* self)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+
+ if(denoiser->preprocess_state_record){
+ speex_preprocess_state_destroy(denoiser->preprocess_state_record);
+ denoiser->preprocess_state_record = tsk_null;
+ }
+ if(denoiser->preprocess_state_playback){
+ speex_preprocess_state_destroy(denoiser->preprocess_state_playback);
+ denoiser->preprocess_state_playback = tsk_null;
+ }
+ if(denoiser->echo_state){
+ speex_echo_state_destroy(denoiser->echo_state);
+ denoiser->echo_state = tsk_null;
+ }
+ TSK_FREE(denoiser->echo_output_frame);
+
+ return 0;
+}
+
+
+
+//
+// Speex denoiser Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_speex_denoise_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_speex_denoise_t *denoise = self;
+ if(denoise){
+ /* init base */
+ tmedia_denoise_init(TMEDIA_DENOISE(denoise));
+ /* init self */
+
+ TSK_DEBUG_INFO("Create SpeexDSP denoiser");
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_speex_denoise_dtor(tsk_object_t * self)
+{
+ tdav_speex_denoise_t *denoise = self;
+ if(denoise){
+ /* deinit base */
+ tmedia_denoise_deinit(TMEDIA_DENOISE(denoise));
+ /* deinit self */
+ if(denoise->preprocess_state_record){
+ speex_preprocess_state_destroy(denoise->preprocess_state_record);
+ denoise->preprocess_state_record = tsk_null;
+ }
+ if(denoise->preprocess_state_playback){
+ speex_preprocess_state_destroy(denoise->preprocess_state_playback);
+ denoise->preprocess_state_playback = tsk_null;
+ }
+ if(denoise->echo_state){
+ speex_echo_state_destroy(denoise->echo_state);
+ denoise->echo_state = tsk_null;
+ }
+ TSK_FREE(denoise->echo_output_frame);
+
+ TSK_DEBUG_INFO("*** SpeexDSP denoiser destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_speex_denoise_def_s =
+{
+ sizeof(tdav_speex_denoise_t),
+ tdav_speex_denoise_ctor,
+ tdav_speex_denoise_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_denoise_plugin_def_t tdav_speex_denoise_plugin_def_s =
+{
+ &tdav_speex_denoise_def_s,
+
+ "Audio Denoiser based on SpeexDSP",
+
+ tdav_speex_denoise_set,
+ tdav_speex_denoise_open,
+ tdav_speex_denoise_echo_playback,
+ tdav_speex_denoise_process_record,
+ tdav_speex_denoise_process_playback,
+ tdav_speex_denoise_close,
+};
+const tmedia_denoise_plugin_def_t *tdav_speex_denoise_plugin_def_t = &tdav_speex_denoise_plugin_def_s;
+
+
+#endif /* HAVE_SPEEX_DSP */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/tdav_speex_jitterbuffer.c b/tinyDAV/src/audio/tdav_speex_jitterbuffer.c
new file mode 100644
index 0000000..d4639b9
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_speex_jitterbuffer.c
@@ -0,0 +1,319 @@
+/*
+* Copyright (C) 2011-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_speex_jitterbuffer.c
+ * @brief Speex Audio jitterbuffer Plugin
+ */
+#include "tinydav/audio/tdav_speex_jitterbuffer.h"
+#include "tinyrtp/rtp/trtp_rtp_header.h"
+
+#if HAVE_SPEEX_DSP && HAVE_SPEEX_JB
+
+// rfc3551 - 4.5 Audio Encodings: all frames length are multiple of 10ms
+
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <speex/speex_jitter.h>
+
+/** Speex JitterBuffer*/
+typedef struct tdav_speex_jitterBuffer_s
+{
+ TMEDIA_DECLARE_JITTER_BUFFER;
+
+ JitterBuffer* state;
+ uint32_t rate;
+ uint32_t frame_duration;
+ uint32_t channels;
+ uint32_t x_data_size; // expected data size
+ uint16_t fake_seqnum; // if ptime mismatch then, reassembled pkt will have invalid seqnum
+ struct {
+ uint8_t* ptr;
+ tsk_size_t size;
+ tsk_size_t index;
+ } buff;
+
+ uint64_t num_pkt_in; // Number of incoming pkts since the last reset
+ uint64_t num_pkt_miss; // Number of times we got consecutive "JITTER_BUFFER_MISSING" results
+ uint64_t num_pkt_miss_max; // Max value for "num_pkt_miss" before reset()ing the jitter buffer
+}
+tdav_speex_jitterbuffer_t;
+
+static int tdav_speex_jitterbuffer_set(tmedia_jitterbuffer_t *self, const tmedia_param_t* param)
+{
+ TSK_DEBUG_ERROR("Not implemented");
+ return -2;
+}
+
+static int tdav_speex_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate, uint32_t channels)
+{
+ tdav_speex_jitterbuffer_t *jitterbuffer = (tdav_speex_jitterbuffer_t *)self;
+ spx_int32_t tmp;
+
+ TSK_DEBUG_INFO("Open speex jb (ptime=%u, rate=%u)", frame_duration, rate);
+
+ if (!(jitterbuffer->state = jitter_buffer_init((int)frame_duration))) {
+ TSK_DEBUG_ERROR("jitter_buffer_init() failed");
+ return -2;
+ }
+ jitterbuffer->rate = rate;
+ jitterbuffer->frame_duration = frame_duration;
+ jitterbuffer->channels = channels;
+ jitterbuffer->x_data_size = ((frame_duration * jitterbuffer->rate) / 500) << (channels == 2 ? 1 : 0);
+
+ jitterbuffer->num_pkt_in = 0;
+ jitterbuffer->num_pkt_miss = 0;
+ jitterbuffer->num_pkt_miss_max = (1000 / frame_duration) * 2; // 2 seconds missing --> "Houston, we have a problem"
+
+ jitter_buffer_ctl(jitterbuffer->state, JITTER_BUFFER_GET_MARGIN, &tmp);
+ TSK_DEBUG_INFO("Default Jitter buffer margin=%d", tmp);
+ jitter_buffer_ctl(jitterbuffer->state, JITTER_BUFFER_GET_MAX_LATE_RATE, &tmp);
+ TSK_DEBUG_INFO("Default Jitter max late rate=%d", tmp);
+
+ if ((tmp = tmedia_defaults_get_jb_margin()) >= 0) {
+ jitter_buffer_ctl(jitterbuffer->state, JITTER_BUFFER_SET_MARGIN, &tmp);
+ TSK_DEBUG_INFO("New Jitter buffer margin=%d", tmp);
+ }
+ if ((tmp = tmedia_defaults_get_jb_max_late_rate()) >= 0) {
+ jitter_buffer_ctl(jitterbuffer->state, JITTER_BUFFER_SET_MAX_LATE_RATE, &tmp);
+ TSK_DEBUG_INFO("New Jitter buffer max late rate=%d", tmp);
+ }
+
+ return 0;
+}
+
+static int tdav_speex_jitterbuffer_tick(tmedia_jitterbuffer_t* self)
+{
+ tdav_speex_jitterbuffer_t *jitterbuffer = (tdav_speex_jitterbuffer_t *)self;
+ if (!jitterbuffer->state) {
+ TSK_DEBUG_ERROR("Invalid state");
+ return -1;
+ }
+ jitter_buffer_tick(jitterbuffer->state);
+ return 0;
+}
+
+static int tdav_speex_jitterbuffer_put(tmedia_jitterbuffer_t* self, void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr)
+{
+ tdav_speex_jitterbuffer_t *jb = (tdav_speex_jitterbuffer_t *)self;
+ const trtp_rtp_header_t* rtp_hdr;
+ JitterBufferPacket jb_packet;
+ static uint16_t seq_num = 0;
+
+ if (!data || !data_size || !proto_hdr) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (!jb->state) {
+ TSK_DEBUG_ERROR("Invalid state");
+ return -2;
+ }
+
+ rtp_hdr = TRTP_RTP_HEADER(proto_hdr);
+
+ jb_packet.user_data = 0;
+ jb_packet.span = jb->frame_duration;
+ jb_packet.len = jb->x_data_size;
+
+ if (jb->x_data_size == data_size) { /* ptime match */
+ jb_packet.data = data;
+ jb_packet.sequence = rtp_hdr->seq_num;
+ jb_packet.timestamp = (rtp_hdr->seq_num * jb_packet.span);
+ jitter_buffer_put(jb->state, &jb_packet);
+ }
+ else { /* ptime mismatch */
+ tsk_size_t i;
+ jb_packet.sequence = 0; // Ignore
+ if ((jb->buff.index + data_size) > jb->buff.size) {
+ if (!(jb->buff.ptr = tsk_realloc(jb->buff.ptr, (jb->buff.index + data_size)))) {
+ jb->buff.size = 0;
+ jb->buff.index = 0;
+ return 0;
+ }
+ jb->buff.size = (jb->buff.index + data_size);
+ }
+
+ memcpy(&jb->buff.ptr[jb->buff.index], data, data_size);
+ jb->buff.index += data_size;
+
+ if (jb->buff.index >= jb->x_data_size) {
+ tsk_size_t copied = 0;
+ for (i = 0; (i + jb->x_data_size) <= jb->buff.index; i += jb->x_data_size) {
+ jb_packet.data = (char*)&jb->buff.ptr[i];
+ jb_packet.timestamp = (++jb->fake_seqnum * jb_packet.span);// reassembled pkt will have fake seqnum
+ jitter_buffer_put(jb->state, &jb_packet);
+ copied += jb->x_data_size;
+ }
+ if (copied == jb->buff.index) {
+ // all copied
+ jb->buff.index = 0;
+ }
+ else {
+ memmove(&jb->buff.ptr[0], &jb->buff.ptr[copied], (jb->buff.index - copied));
+ jb->buff.index -= copied;
+ }
+ }
+ }
+ ++jb->num_pkt_in;
+
+ return 0;
+}
+
+static tsk_size_t tdav_speex_jitterbuffer_get(tmedia_jitterbuffer_t* self, void* out_data, tsk_size_t out_size)
+{
+ tdav_speex_jitterbuffer_t *jb = (tdav_speex_jitterbuffer_t *)self;
+ JitterBufferPacket jb_packet;
+ int ret, miss = 0;
+ tsk_size_t ret_size = 0;
+
+ if (!out_data || !out_size) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+ if (!jb->state) {
+ TSK_DEBUG_ERROR("Invalid state");
+ return 0;
+ }
+ if (jb->x_data_size != out_size) { // consumer must request PTIME data
+ TSK_DEBUG_WARN("%d not expected as frame size. %u<>%u", out_size, jb->frame_duration, (out_size * 500) / jb->rate);
+ return 0;
+ }
+
+ jb_packet.data = out_data;
+ jb_packet.len = (spx_uint32_t)out_size;
+
+ if ((ret = jitter_buffer_get(jb->state, &jb_packet, jb->frame_duration/*(out_size * 500)/jb->rate*/, tsk_null)) != JITTER_BUFFER_OK) {
+ ++jb->num_pkt_miss;
+ switch (ret) {
+ case JITTER_BUFFER_MISSING:
+ /*TSK_DEBUG_INFO("JITTER_BUFFER_MISSING - %d", ret);*/
+ if (jb->num_pkt_miss > jb->num_pkt_miss_max /*too much missing pkts*/ && jb->num_pkt_in > jb->num_pkt_miss_max/*we're really receiving pkts*/) {
+ jb->num_pkt_miss = 0;
+ self->plugin->reset(self);
+ TSK_DEBUG_WARN("Too much missing audio pkts");
+ }
+ break;
+ case JITTER_BUFFER_INSERTION:
+ /*TSK_DEBUG_INFO("JITTER_BUFFER_INSERTION - %d", ret);*/
+ break;
+ default:
+ TSK_DEBUG_INFO("jitter_buffer_get() failed - %d", ret);
+ break;
+ }
+ // jitter_buffer_update_delay(jb->state, &jb_packet, NULL);
+ //return 0;
+ }
+ else {
+ jb->num_pkt_miss = 0; // reset
+ ret_size = jb_packet.len;
+ }
+ //jitter_buffer_update_delay(jb->state, &jb_packet, NULL);
+
+ return ret_size;
+}
+
+static int tdav_speex_jitterbuffer_reset(tmedia_jitterbuffer_t* self)
+{
+ tdav_speex_jitterbuffer_t *jb = (tdav_speex_jitterbuffer_t *)self;
+ if (jb->state) {
+ jitter_buffer_reset(jb->state);
+ }
+ jb->num_pkt_in = 0;
+ jb->num_pkt_miss = 0;
+ return 0;
+}
+
+static int tdav_speex_jitterbuffer_close(tmedia_jitterbuffer_t* self)
+{
+ tdav_speex_jitterbuffer_t *jitterbuffer = (tdav_speex_jitterbuffer_t *)self;
+ if (jitterbuffer->state) {
+ jitter_buffer_destroy(jitterbuffer->state);
+ jitterbuffer->state = tsk_null;
+ }
+ return 0;
+}
+
+
+
+//
+// Speex jitterbufferr Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_speex_jitterbuffer_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_speex_jitterbuffer_t *jitterbuffer = self;
+ TSK_DEBUG_INFO("Create SpeexDSP jitter buffer");
+ if (jitterbuffer){
+ /* init base */
+ tmedia_jitterbuffer_init(TMEDIA_JITTER_BUFFER(jitterbuffer));
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_speex_jitterbuffer_dtor(tsk_object_t * self)
+{
+ tdav_speex_jitterbuffer_t *jb = self;
+ if (jb){
+ /* deinit base */
+ tmedia_jitterbuffer_deinit(TMEDIA_JITTER_BUFFER(jb));
+ /* deinit self */
+ if (jb->state){
+ jitter_buffer_destroy(jb->state);
+ jb->state = tsk_null;
+ }
+ TSK_FREE(jb->buff.ptr);
+
+ TSK_DEBUG_INFO("*** SpeexDSP jb destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_speex_jitterbuffer_def_s =
+{
+ sizeof(tdav_speex_jitterbuffer_t),
+ tdav_speex_jitterbuffer_ctor,
+ tdav_speex_jitterbuffer_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_jitterbuffer_plugin_def_t tdav_speex_jitterbuffer_plugin_def_s =
+{
+ &tdav_speex_jitterbuffer_def_s,
+ tmedia_audio,
+ "Audio JitterBuffer based on Speex",
+
+ tdav_speex_jitterbuffer_set,
+ tdav_speex_jitterbuffer_open,
+ tdav_speex_jitterbuffer_tick,
+ tdav_speex_jitterbuffer_put,
+ tdav_speex_jitterbuffer_get,
+ tdav_speex_jitterbuffer_reset,
+ tdav_speex_jitterbuffer_close,
+};
+const tmedia_jitterbuffer_plugin_def_t *tdav_speex_jitterbuffer_plugin_def_t = &tdav_speex_jitterbuffer_plugin_def_s;
+
+
+#endif /* HAVE_SPEEX_DSP */
diff --git a/tinyDAV/src/audio/tdav_speex_resampler.c b/tinyDAV/src/audio/tdav_speex_resampler.c
new file mode 100644
index 0000000..f71ddd2
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_speex_resampler.c
@@ -0,0 +1,254 @@
+/*
+* Copyright (C) 2011-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+#include "tinydav/audio/tdav_speex_resampler.h"
+
+#if HAVE_SPEEX_DSP && (!defined(HAVE_SPEEX_RESAMPLER) || HAVE_SPEEX_RESAMPLER)
+
+#include <speex/speex_resampler.h>
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_SPEEX_RESAMPLER_MAX_QUALITY 10
+
+/** Speex resampler*/
+typedef struct tdav_speex_resampler_s
+{
+ TMEDIA_DECLARE_RESAMPLER;
+
+ tsk_size_t in_size;
+ tsk_size_t out_size;
+ uint32_t in_channels;
+ uint32_t out_channels;
+ uint32_t bytes_per_sample;
+
+ struct{
+ void* ptr;
+ tsk_size_t size_in_samples;
+ } tmp_buffer;
+
+ SpeexResamplerState *state;
+}
+tdav_speex_resampler_t;
+
+static int tdav_speex_resampler_open(tmedia_resampler_t* self, uint32_t in_freq, uint32_t out_freq, uint32_t frame_duration, uint32_t in_channels, uint32_t out_channels, uint32_t quality, uint32_t bits_per_sample)
+{
+ tdav_speex_resampler_t *resampler = (tdav_speex_resampler_t *)self;
+ int ret = 0;
+ uint32_t bytes_per_sample = (bits_per_sample >> 3);
+
+ if (in_channels != 1 && in_channels != 2) {
+ TSK_DEBUG_ERROR("%d not valid as input channel", in_channels);
+ return -1;
+ }
+ if (out_channels != 1 && out_channels != 2) {
+ TSK_DEBUG_ERROR("%d not valid as output channel", out_channels);
+ return -1;
+ }
+ if (bytes_per_sample != sizeof(spx_int16_t) && bytes_per_sample != sizeof(float)) {
+ TSK_DEBUG_ERROR("%d not valid as bits_per_sample", bits_per_sample);
+ return -1;
+ }
+
+ if (!(resampler->state = speex_resampler_init(in_channels, in_freq, out_freq, TSK_CLAMP(0, quality, TDAV_SPEEX_RESAMPLER_MAX_QUALITY), &ret))) {
+ TSK_DEBUG_ERROR("speex_resampler_init() returned %d", ret);
+ return -2;
+ }
+
+ resampler->bytes_per_sample = bytes_per_sample;
+ resampler->in_size = ((in_freq * frame_duration) / 1000) << (in_channels == 2 ? 1 : 0);
+ resampler->out_size = ((out_freq * frame_duration) / 1000) << (out_channels == 2 ? 1 : 0);
+ resampler->in_channels = in_channels;
+ resampler->out_channels = out_channels;
+
+ if (in_channels != out_channels) {
+ resampler->tmp_buffer.size_in_samples = ((TSK_MAX(in_freq, out_freq) * frame_duration) / 1000) << (TSK_MAX(in_channels, out_channels) == 2 ? 1 : 0);
+ if (!(resampler->tmp_buffer.ptr = tsk_realloc(resampler->tmp_buffer.ptr, resampler->tmp_buffer.size_in_samples * resampler->bytes_per_sample))) {
+ resampler->tmp_buffer.size_in_samples = 0;
+ return -2;
+ }
+ }
+
+ return 0;
+}
+
+
+static tsk_size_t tdav_speex_resampler_process(tmedia_resampler_t* self, const void* in_data, tsk_size_t in_size_in_sample, void* out_data, tsk_size_t out_size_in_sample)
+{
+ tdav_speex_resampler_t *resampler = (tdav_speex_resampler_t *)self;
+ int err = RESAMPLER_ERR_SUCCESS;
+ spx_uint32_t _out_size_in_sample = (spx_uint32_t)out_size_in_sample;
+ if (!resampler->state || !out_data) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if (in_size_in_sample != resampler->in_size) {
+ TSK_DEBUG_ERROR("Input data has wrong size");
+ return 0;
+ }
+
+ if (out_size_in_sample < resampler->out_size) {
+ TSK_DEBUG_ERROR("Output data is too short");
+ return 0;
+ }
+
+ if (resampler->in_channels == resampler->out_channels) {
+ if (resampler->bytes_per_sample == sizeof(spx_int16_t)) {
+ err = speex_resampler_process_int(resampler->state, 0,
+ (const spx_int16_t *)in_data, (spx_uint32_t *)&in_size_in_sample,
+ (spx_int16_t *)out_data, &_out_size_in_sample);
+ }
+ else {
+ err = speex_resampler_process_float(resampler->state, 0,
+ (const float *)in_data, (spx_uint32_t *)&in_size_in_sample,
+ (float *)out_data, &_out_size_in_sample);
+ }
+ }
+ else {
+ spx_uint32_t i, j;
+ // in_channels = 1, out_channels = 2
+ if (resampler->in_channels == 1) {
+ if (resampler->bytes_per_sample == sizeof(spx_int16_t)) {
+ err = speex_resampler_process_int(resampler->state, 0, (const spx_int16_t *)in_data, (spx_uint32_t *)&in_size_in_sample, resampler->tmp_buffer.ptr, &_out_size_in_sample);
+ if (err == RESAMPLER_ERR_SUCCESS) {
+ spx_int16_t* pout_data = (spx_int16_t*)(out_data);
+ for (i = 0, j = 0; i < _out_size_in_sample; ++i, j += 2) {
+ pout_data[j] = pout_data[j + 1] = *(((const spx_int16_t*)resampler->tmp_buffer.ptr) + i);
+ }
+ }
+ }
+ else {
+ err = speex_resampler_process_float(resampler->state, 0, (const float *)in_data, (spx_uint32_t *)&in_size_in_sample, resampler->tmp_buffer.ptr, &_out_size_in_sample);
+ if (err == RESAMPLER_ERR_SUCCESS) {
+ float* pout_data = (float*)(out_data);
+ for (i = 0, j = 0; i < _out_size_in_sample; ++i, j += 2) {
+ pout_data[j] = pout_data[j + 1] = *(((const float*)resampler->tmp_buffer.ptr) + i);
+ }
+ }
+ }
+
+ }
+ else {
+ // in_channels = 2, out_channels = 1
+ spx_uint32_t _out_size2_in_sample = (_out_size_in_sample << 1);
+ if (resampler->bytes_per_sample == sizeof(spx_int16_t)) {
+ err = speex_resampler_process_int(resampler->state, 0,
+ (const spx_int16_t *)in_data, (spx_uint32_t *)&in_size_in_sample,
+ (spx_int16_t *)resampler->tmp_buffer.ptr, &_out_size2_in_sample);
+ if (err == RESAMPLER_ERR_SUCCESS) {
+ spx_int16_t* pout_data = (spx_int16_t*)(out_data);
+ _out_size_in_sample = (spx_uint32_t)resampler->out_size;
+ for (i = 0, j = 0; j < _out_size2_in_sample; ++i, j += 2) {
+ pout_data[i] = *(((const spx_int16_t*)resampler->tmp_buffer.ptr) + j);
+ }
+ }
+ }
+ else {
+ err = speex_resampler_process_float(resampler->state, 0,
+ (const float *)in_data, (spx_uint32_t *)&in_size_in_sample,
+ (float *)resampler->tmp_buffer.ptr, &_out_size2_in_sample);
+ if (err == RESAMPLER_ERR_SUCCESS) {
+ float* pout_data = (float*)(out_data);
+ for (i = 0, j = 0; j < _out_size2_in_sample; ++i, j += 2) {
+ pout_data[i] = *(((const float*)resampler->tmp_buffer.ptr) + j);
+ }
+ }
+ }
+ }
+ }
+
+ if (err != RESAMPLER_ERR_SUCCESS) {
+ TSK_DEBUG_ERROR("speex_resampler_process_int() failed with error code %d", err);
+ return 0;
+ }
+ return (tsk_size_t)_out_size_in_sample;
+}
+
+static int tdav_speex_resampler_close(tmedia_resampler_t* self)
+{
+ tdav_speex_resampler_t *resampler = (tdav_speex_resampler_t *)self;
+
+ if (resampler->state) {
+ speex_resampler_destroy(resampler->state);
+ resampler->state = tsk_null;
+ }
+ return 0;
+}
+
+
+
+//
+// Speex resamplerr Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_speex_resampler_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_speex_resampler_t *resampler = (tdav_speex_resampler_t *)self;
+ if (resampler){
+ /* init base */
+ tmedia_resampler_init(TMEDIA_RESAMPLER(resampler));
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_speex_resampler_dtor(tsk_object_t * self)
+{
+ tdav_speex_resampler_t *resampler = (tdav_speex_resampler_t *)self;
+ if (resampler){
+ /* deinit base */
+ tmedia_resampler_deinit(TMEDIA_RESAMPLER(resampler));
+ /* deinit self */
+ if (resampler->state) {
+ speex_resampler_destroy(resampler->state);
+ resampler->state = tsk_null;
+ }
+ TSK_FREE(resampler->tmp_buffer.ptr);
+
+ TSK_DEBUG_INFO("*** SpeexDSP resampler (plugin) destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_speex_resampler_def_s =
+{
+ sizeof(tdav_speex_resampler_t),
+ tdav_speex_resampler_ctor,
+ tdav_speex_resampler_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_resampler_plugin_def_t tdav_speex_resampler_plugin_def_s =
+{
+ &tdav_speex_resampler_def_s,
+
+ "Audio Resampler based on Speex",
+
+ tdav_speex_resampler_open,
+ tdav_speex_resampler_process,
+ tdav_speex_resampler_close,
+};
+const tmedia_resampler_plugin_def_t *tdav_speex_resampler_plugin_def_t = &tdav_speex_resampler_plugin_def_s;
+
+
+#endif /* HAVE_SPEEX_DSP */
diff --git a/tinyDAV/src/audio/tdav_webrtc_denoise.c b/tinyDAV/src/audio/tdav_webrtc_denoise.c
new file mode 100644
index 0000000..598470a
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_webrtc_denoise.c
@@ -0,0 +1,627 @@
+/*
+* Copyright (C) 2011-2015 Mamadou DIOP
+* Copyright (C) 2011-2015 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_webrtc_denoise.c
+* @brief Google WebRTC Denoiser (Noise suppression, AGC, AEC) Plugin
+*/
+#include "tinydav/audio/tdav_webrtc_denoise.h"
+
+#if HAVE_WEBRTC && (!defined(HAVE_WEBRTC_DENOISE) || HAVE_WEBRTC_DENOISE)
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include "tinymedia/tmedia_defaults.h"
+#include "tinymedia/tmedia_resampler.h"
+
+#include <string.h>
+
+#if !defined(WEBRTC_AEC_AGGRESSIVE)
+# define WEBRTC_AEC_AGGRESSIVE 0
+#endif
+#if !defined(WEBRTC_MAX_ECHO_TAIL)
+# define WEBRTC_MAX_ECHO_TAIL 500
+#endif
+#if !defined(WEBRTC_MIN_ECHO_TAIL)
+# define WEBRTC_MIN_ECHO_TAIL 20 // 0 will cause random crashes
+#endif
+
+#if TDAV_UNDER_MOBILE || 1 // FIXME
+typedef int16_t sample_t;
+#else
+typedef float sample_t;
+#endif
+
+typedef struct tdav_webrtc_pin_xs
+{
+ uint32_t n_duration;
+ uint32_t n_rate;
+ uint32_t n_channels;
+ uint32_t n_sample_size;
+}
+tdav_webrtc_pin_xt;
+
+typedef struct tdav_webrtc_resampler_s
+{
+ TSK_DECLARE_OBJECT;
+
+ tmedia_resampler_t* p_resampler;
+ void* p_bufftmp_ptr; // used to convert float <->int16
+ tsk_size_t n_bufftmp_size_in_bytes;
+
+ struct {
+ tdav_webrtc_pin_xt x_pin;
+ tsk_size_t n_buff_size_in_bytes;
+ tsk_size_t n_buff_size_in_samples;
+ } in;
+ struct {
+ tdav_webrtc_pin_xt x_pin;
+ void* p_buff_ptr;
+ tsk_size_t n_buff_size_in_bytes;
+ tsk_size_t n_buff_size_in_samples;
+ } out;
+}
+tdav_webrtc_resampler_t;
+
+static int _tdav_webrtc_resampler_create(const tdav_webrtc_pin_xt* p_pin_in, const tdav_webrtc_pin_xt* p_pin_out, tdav_webrtc_resampler_t **pp_resampler);
+static int _tdav_webrtc_resampler_process(tdav_webrtc_resampler_t* p_self, const void* p_buff_ptr, tsk_size_t n_buff_size_in_bytes);
+
+/** WebRTC denoiser (AEC, NS, AGC...) */
+typedef struct tdav_webrtc_denoise_s
+{
+ TMEDIA_DECLARE_DENOISE;
+
+ void *AEC_inst;
+#if HAVE_SPEEX_DSP && PREFER_SPEEX_DENOISER
+ SpeexPreprocessState *SpeexDenoiser_proc;
+#else
+ TDAV_NsHandle *NS_inst;
+#endif
+
+ uint32_t echo_tail;
+ uint32_t echo_skew;
+
+ struct {
+ tdav_webrtc_resampler_t* p_rpl_in2den; // input -> denoiser
+ tdav_webrtc_resampler_t* p_rpl_den2in; // denoiser -> input
+ } record;
+ struct {
+ tdav_webrtc_resampler_t* p_rpl_in2den; // input -> denoiser
+ tdav_webrtc_resampler_t* p_rpl_den2in; // denoiser -> input
+ } playback;
+
+ struct {
+ uint32_t nb_samples_per_process;
+ uint32_t sampling_rate;
+ uint32_t channels; // always "1"
+ } neg;
+
+ TSK_DECLARE_SAFEOBJ;
+}
+tdav_webrtc_denoise_t;
+
+static int tdav_webrtc_denoise_set(tmedia_denoise_t* _self, const tmedia_param_t* param)
+{
+ tdav_webrtc_denoise_t *self = (tdav_webrtc_denoise_t *)_self;
+ if (!self || !param) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "echo-tail")) {
+ int32_t echo_tail = *((int32_t*)param->value);
+ self->echo_tail = TSK_CLAMP(WEBRTC_MIN_ECHO_TAIL, echo_tail, WEBRTC_MAX_ECHO_TAIL);
+ TSK_DEBUG_INFO("set_echo_tail (%d->%d)", echo_tail, self->echo_tail);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int tdav_webrtc_denoise_open(tmedia_denoise_t* self, uint32_t record_frame_size_samples, uint32_t record_sampling_rate, uint32_t record_channels, uint32_t playback_frame_size_samples, uint32_t playback_sampling_rate, uint32_t playback_channels)
+{
+ tdav_webrtc_denoise_t *denoiser = (tdav_webrtc_denoise_t *)self;
+ int ret;
+ tdav_webrtc_pin_xt pin_record_in = { 0 }, pin_record_den = { 0 }, pin_playback_in = { 0 }, pin_playback_den = { 0 };
+
+ if (!denoiser) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (denoiser->AEC_inst ||
+#if HAVE_SPEEX_DSP && PREFER_SPEEX_DENOISER
+ denoiser->SpeexDenoiser_proc
+#else
+ denoiser->NS_inst
+#endif
+ ){
+ TSK_DEBUG_ERROR("Denoiser already initialized");
+ return -2;
+ }
+
+ denoiser->echo_tail = TSK_CLAMP(WEBRTC_MIN_ECHO_TAIL, TMEDIA_DENOISE(denoiser)->echo_tail, WEBRTC_MAX_ECHO_TAIL);
+ denoiser->echo_skew = TMEDIA_DENOISE(denoiser)->echo_skew;
+ TSK_DEBUG_INFO("echo_tail=%d, echo_skew=%d, echo_supp_enabled=%d, noise_supp_enabled=%d", denoiser->echo_tail, denoiser->echo_skew, self->echo_supp_enabled, self->noise_supp_enabled);
+
+ //
+ // DENOISER
+ //
+#if TDAV_UNDER_MOBILE // AECM= [8-16]k, AEC=[8-32]k
+ denoiser->neg.sampling_rate = TSK_MIN(TSK_MAX(record_sampling_rate, playback_sampling_rate), 16000);
+#else
+ denoiser->neg.sampling_rate = TSK_MIN(TSK_MAX(record_sampling_rate, playback_sampling_rate), 16000); // FIXME: 32000 accepted by echo_process fails
+#endif
+ denoiser->neg.nb_samples_per_process = /*TSK_CLAMP(80,*/ ((denoiser->neg.sampling_rate * 10) / 1000)/*, 160)*/; // Supported by the module: "80"(10ms) and "160"(20ms)
+ denoiser->neg.channels = 1;
+
+ //
+ // RECORD
+ //
+ TSK_OBJECT_SAFE_FREE(denoiser->record.p_rpl_den2in);
+ TSK_OBJECT_SAFE_FREE(denoiser->record.p_rpl_in2den);
+ pin_record_in.n_sample_size = sizeof(int16_t);
+ pin_record_in.n_rate = record_sampling_rate;
+ pin_record_in.n_channels = record_channels;
+ pin_record_in.n_duration = (((record_frame_size_samples * 1000) / record_sampling_rate)) / record_channels;
+ pin_record_den.n_sample_size = sizeof(sample_t);
+ pin_record_den.n_rate = denoiser->neg.sampling_rate;
+
+ pin_record_den.n_channels = 1;
+ pin_record_den.n_duration = pin_record_in.n_duration;
+ if (pin_record_in.n_sample_size != pin_record_den.n_sample_size || pin_record_in.n_rate != pin_record_den.n_rate || pin_record_in.n_channels != pin_record_den.n_channels) {
+ if ((ret = _tdav_webrtc_resampler_create(&pin_record_in, &pin_record_den, &denoiser->record.p_rpl_in2den))) {
+ return ret;
+ }
+ if ((ret = _tdav_webrtc_resampler_create(&pin_record_den, &pin_record_in, &denoiser->record.p_rpl_den2in))) {
+ return ret;
+ }
+ }
+ //
+ // PLAYBACK
+ //
+ TSK_OBJECT_SAFE_FREE(denoiser->playback.p_rpl_den2in);
+ TSK_OBJECT_SAFE_FREE(denoiser->playback.p_rpl_in2den);
+ pin_playback_in.n_sample_size = sizeof(int16_t);
+ pin_playback_in.n_rate = playback_sampling_rate;
+ pin_playback_in.n_channels = playback_channels;
+ pin_playback_in.n_duration = (((playback_frame_size_samples * 1000) / playback_sampling_rate)) / playback_channels;
+ pin_playback_den.n_sample_size = sizeof(sample_t);
+ pin_playback_den.n_rate = denoiser->neg.sampling_rate;
+ pin_playback_den.n_channels = 1;
+ pin_playback_den.n_duration = pin_playback_in.n_duration;
+ if (pin_playback_in.n_sample_size != pin_playback_den.n_sample_size || pin_playback_in.n_rate != pin_playback_den.n_rate || pin_playback_in.n_channels != pin_playback_den.n_channels) {
+ if ((ret = _tdav_webrtc_resampler_create(&pin_playback_in, &pin_playback_den, &denoiser->playback.p_rpl_in2den))) {
+ return ret;
+ }
+ if ((ret = _tdav_webrtc_resampler_create(&pin_playback_den, &pin_playback_in, &denoiser->playback.p_rpl_den2in))) {
+ return ret;
+ }
+ }
+
+ //
+ // AEC instance
+ //
+ if ((ret = TDAV_WebRtcAec_Create(&denoiser->AEC_inst))) {
+ TSK_DEBUG_ERROR("WebRtcAec_Create failed with error code = %d", ret);
+ return ret;
+ }
+ if ((ret = TDAV_WebRtcAec_Init(denoiser->AEC_inst, denoiser->neg.sampling_rate, denoiser->neg.sampling_rate))) {
+ TSK_DEBUG_ERROR("WebRtcAec_Init failed with error code = %d", ret);
+ return ret;
+ }
+
+#if TDAV_UNDER_MOBILE
+#else
+ {
+ AecConfig aecConfig;
+#if WEBRTC_AEC_AGGRESSIVE
+ aecConfig.nlpMode = kAecNlpAggressive;
+#else
+ aecConfig.nlpMode = kAecNlpModerate;
+#endif
+ aecConfig.skewMode = kAecFalse;
+ aecConfig.metricsMode = kAecTrue;
+ aecConfig.delay_logging = kAecFalse;
+ if ((ret = WebRtcAec_set_config(denoiser->AEC_inst, aecConfig))) {
+ TSK_DEBUG_ERROR("WebRtcAec_set_config failed with error code = %d", ret);
+ }
+ }
+#endif
+
+
+ //
+ // Noise Suppression instance
+ //
+ if (TMEDIA_DENOISE(denoiser)->noise_supp_enabled) {
+#if HAVE_SPEEX_DSP && PREFER_SPEEX_DENOISER
+ if ((denoiser->SpeexDenoiser_proc = speex_preprocess_state_init((pin_record_den.n_rate / 1000) * pin_record_den.n_duration, pin_record_den.n_rate))) {
+ int i = 1;
+ speex_preprocess_ctl(denoiser->SpeexDenoiser_proc, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ i = TMEDIA_DENOISE(denoiser)->noise_supp_level;
+ speex_preprocess_ctl(denoiser->SpeexDenoiser_proc, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &i);
+ }
+#else
+ if ((ret = TDAV_WebRtcNs_Create(&denoiser->NS_inst))) {
+ TSK_DEBUG_ERROR("WebRtcNs_Create failed with error code = %d", ret);
+ return ret;
+ }
+ if ((ret = TDAV_WebRtcNs_Init(denoiser->NS_inst, 80))) {
+ TSK_DEBUG_ERROR("WebRtcNs_Init failed with error code = %d", ret);
+ return ret;
+ }
+#endif
+ }
+
+ TSK_DEBUG_INFO("WebRTC denoiser opened: record:%uHz,%uchannels // playback:%uHz,%uchannels // neg:%uHz,%uchannels",
+ record_sampling_rate, record_channels,
+ playback_sampling_rate, playback_channels,
+ denoiser->neg.sampling_rate, denoiser->neg.channels);
+
+ return ret;
+}
+
+static int tdav_webrtc_denoise_echo_playback(tmedia_denoise_t* self, const void* echo_frame, uint32_t echo_frame_size_bytes)
+{
+ tdav_webrtc_denoise_t *p_self = (tdav_webrtc_denoise_t *)self;
+ int ret = 0;
+
+ tsk_safeobj_lock(p_self);
+ if (p_self->AEC_inst && echo_frame && echo_frame_size_bytes) {
+ const sample_t* _echo_frame = (const sample_t*)echo_frame;
+ tsk_size_t _echo_frame_size_bytes = echo_frame_size_bytes;
+ tsk_size_t _echo_frame_size_samples = (_echo_frame_size_bytes / sizeof(int16_t));
+ // IN -> DEN
+ if (p_self->playback.p_rpl_in2den) {
+ if ((ret = _tdav_webrtc_resampler_process(p_self->playback.p_rpl_in2den, _echo_frame, _echo_frame_size_bytes))) {
+ goto bail;
+ }
+ _echo_frame = p_self->playback.p_rpl_in2den->out.p_buff_ptr;
+ _echo_frame_size_bytes = p_self->playback.p_rpl_in2den->out.n_buff_size_in_bytes;
+ _echo_frame_size_samples = p_self->playback.p_rpl_in2den->out.n_buff_size_in_samples;
+ }
+ // PROCESS
+ if (_echo_frame_size_samples && _echo_frame) {
+ uint32_t _samples;
+ for (_samples = 0; _samples < _echo_frame_size_samples; _samples += p_self->neg.nb_samples_per_process) {
+ if ((ret = TDAV_WebRtcAec_BufferFarend(p_self->AEC_inst, &_echo_frame[_samples], p_self->neg.nb_samples_per_process))){
+ TSK_DEBUG_ERROR("WebRtcAec_BufferFarend failed with error code = %d, nb_samples_per_process=%u", ret, p_self->neg.nb_samples_per_process);
+ goto bail;
+ }
+ }
+ }
+ }
+bail:
+ tsk_safeobj_unlock(p_self);
+ return ret;
+}
+
+static int tdav_webrtc_denoise_process_record(tmedia_denoise_t* self, void* audio_frame, uint32_t audio_frame_size_bytes, tsk_bool_t* silence_or_noise)
+{
+ tdav_webrtc_denoise_t *p_self = (tdav_webrtc_denoise_t *)self;
+ int ret = 0;
+
+ *silence_or_noise = tsk_false;
+
+ tsk_safeobj_lock(p_self);
+
+ if (p_self->AEC_inst && audio_frame && audio_frame_size_bytes) {
+ tsk_size_t _samples;
+ const sample_t* _audio_frame = (const sample_t*)audio_frame;
+ tsk_size_t _audio_frame_size_bytes = audio_frame_size_bytes;
+ tsk_size_t _audio_frame_size_samples = (_audio_frame_size_bytes / sizeof(int16_t));
+ // IN -> DEN
+ if (p_self->record.p_rpl_in2den) {
+ if ((ret = _tdav_webrtc_resampler_process(p_self->record.p_rpl_in2den, _audio_frame, _audio_frame_size_bytes))) {
+ goto bail;
+ }
+ _audio_frame = p_self->record.p_rpl_in2den->out.p_buff_ptr;
+ _audio_frame_size_bytes = p_self->record.p_rpl_in2den->out.n_buff_size_in_bytes;
+ _audio_frame_size_samples = p_self->record.p_rpl_in2den->out.n_buff_size_in_samples;
+ }
+ // NOISE SUPPRESSION
+#if HAVE_SPEEX_DSP && PREFER_SPEEX_DENOISER
+ if (p_self->SpeexDenoiser_proc) {
+ speex_preprocess_run(p_self->SpeexDenoiser_proc, (spx_int16_t*)_audio_frame);
+ }
+#else
+ // WebRTC NoiseSupp only accept 10ms frames
+ // Our encoder will always output 20ms frames ==> execute 2x noise_supp
+ if (p_self->NS_inst) {
+ for (_samples = 0; _samples < _audio_frame_size_samples; _samples+= p_self->neg.nb_samples_per_process) {
+ if ((ret = TDAV_WebRtcNs_Process(p_self->NS_inst, &_audio_frame[_samples], tsk_null, _audio_frame, tsk_null))) {
+ TSK_DEBUG_ERROR("WebRtcNs_Process with error code = %d", ret);
+ goto bail;
+ }
+ }
+ }
+#endif
+ // PROCESS
+ if (_audio_frame_size_samples && _audio_frame) {
+ for (_samples = 0; _samples < _audio_frame_size_samples; _samples += p_self->neg.nb_samples_per_process) {
+ if ((ret = TDAV_WebRtcAec_Process(p_self->AEC_inst, &_audio_frame[_samples], tsk_null, (sample_t*)&_audio_frame[_samples], tsk_null, p_self->neg.nb_samples_per_process, p_self->echo_tail, p_self->echo_skew))){
+ TSK_DEBUG_ERROR("WebRtcAec_Process with error code = %d, nb_samples_per_process=%u", ret, p_self->neg.nb_samples_per_process);
+ goto bail;
+ }
+ }
+ }
+ // DEN -> IN
+ if (p_self->record.p_rpl_den2in) {
+ if ((ret = _tdav_webrtc_resampler_process(p_self->record.p_rpl_den2in, _audio_frame, _audio_frame_size_bytes))) {
+ goto bail;
+ }
+ _audio_frame = p_self->record.p_rpl_den2in->out.p_buff_ptr;
+ _audio_frame_size_bytes = p_self->record.p_rpl_den2in->out.n_buff_size_in_bytes;
+ _audio_frame_size_samples = p_self->record.p_rpl_den2in->out.n_buff_size_in_samples;
+ }
+ // Sanity check
+ if (_audio_frame_size_bytes != audio_frame_size_bytes) {
+ TSK_DEBUG_ERROR("Size mismatch: %u <> %u", _audio_frame_size_bytes, audio_frame_size_bytes);
+ ret = -3;
+ goto bail;
+ }
+ if (audio_frame != (const void*)_audio_frame) {
+ memcpy(audio_frame, _audio_frame, _audio_frame_size_bytes);
+ }
+ }
+
+bail:
+ tsk_safeobj_unlock(p_self);
+ return ret;
+}
+
+static int tdav_webrtc_denoise_process_playback(tmedia_denoise_t* self, void* audio_frame, uint32_t audio_frame_size_bytes)
+{
+ tdav_webrtc_denoise_t *denoiser = (tdav_webrtc_denoise_t *)self;
+
+ (void)(denoiser);
+
+ // Not mandatory to denoise audio before playback.
+ // All Doubango clients support noise suppression.
+ return 0;
+}
+
+static int tdav_webrtc_denoise_close(tmedia_denoise_t* self)
+{
+ tdav_webrtc_denoise_t *denoiser = (tdav_webrtc_denoise_t *)self;
+
+ tsk_safeobj_lock(denoiser);
+ if (denoiser->AEC_inst) {
+ TDAV_WebRtcAec_Free(denoiser->AEC_inst);
+ denoiser->AEC_inst = tsk_null;
+ }
+#if HAVE_SPEEX_DSP && PREFER_SPEEX_DENOISER
+ if (denoiser->SpeexDenoiser_proc) {
+ speex_preprocess_state_destroy(denoiser->SpeexDenoiser_proc);
+ denoiser->SpeexDenoiser_proc = tsk_null;
+ }
+#else
+ if (denoiser->NS_inst) {
+ TDAV_WebRtcNs_Free(denoiser->NS_inst);
+ denoiser->NS_inst = tsk_null;
+ }
+#endif
+ tsk_safeobj_unlock(denoiser);
+
+ return 0;
+}
+
+static int _tdav_webrtc_resampler_create(const tdav_webrtc_pin_xt* p_pin_in, const tdav_webrtc_pin_xt* p_pin_out, tdav_webrtc_resampler_t **pp_resampler)
+{
+ extern const tsk_object_def_t *tdav_webrtc_resampler_def_t;
+ int ret = 0;
+ if (!p_pin_in || !p_pin_out || !pp_resampler || *pp_resampler) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if (!(*pp_resampler = tsk_object_new(tdav_webrtc_resampler_def_t))) {
+ TSK_DEBUG_ERROR("Failed to create resampler object");
+ ret = -3;
+ goto bail;
+ }
+ if (!((*pp_resampler)->p_resampler = tmedia_resampler_create())) {
+ ret = -3;
+ goto bail;
+ }
+ ret = tmedia_resampler_open((*pp_resampler)->p_resampler,
+ p_pin_in->n_rate, p_pin_out->n_rate,
+ p_pin_in->n_duration,
+ p_pin_in->n_channels, p_pin_out->n_channels,
+ TMEDIA_RESAMPLER_QUALITY,
+ (p_pin_out->n_sample_size << 3));
+ if (ret) {
+ TSK_DEBUG_ERROR("Failed to open resampler: in_rate=%u,in_duration=%u,in_channels=%u /// out_rate=%u,out_duration=%u,out_channels=%u",
+ p_pin_in->n_rate, p_pin_in->n_duration, p_pin_in->n_channels,
+ p_pin_out->n_rate, p_pin_out->n_duration, p_pin_out->n_channels);
+ goto bail;
+ }
+
+ (*pp_resampler)->out.n_buff_size_in_bytes = ((((p_pin_out->n_rate * p_pin_out->n_duration) / 1000)) * p_pin_out->n_channels) * p_pin_out->n_sample_size;
+ (*pp_resampler)->out.p_buff_ptr = tsk_malloc((*pp_resampler)->out.n_buff_size_in_bytes);
+ if (!(*pp_resampler)->out.p_buff_ptr) {
+ TSK_DEBUG_ERROR("Failed to allocate buffer with size=%u", (*pp_resampler)->out.n_buff_size_in_bytes);
+ ret = -3;
+ goto bail;
+ }
+ (*pp_resampler)->out.n_buff_size_in_samples = (*pp_resampler)->out.n_buff_size_in_bytes / p_pin_out->n_sample_size;
+ (*pp_resampler)->in.n_buff_size_in_bytes = ((((p_pin_in->n_rate * p_pin_in->n_duration) / 1000)) * p_pin_in->n_channels) * p_pin_in->n_sample_size;
+ (*pp_resampler)->in.n_buff_size_in_samples = (*pp_resampler)->in.n_buff_size_in_bytes / p_pin_in->n_sample_size;
+
+ (*pp_resampler)->n_bufftmp_size_in_bytes = (((48000 * TSK_MAX(p_pin_in->n_duration, p_pin_out->n_duration)) / 1000) * 2/*channels*/) * sizeof(float); // Max
+ (*pp_resampler)->p_bufftmp_ptr = tsk_malloc((*pp_resampler)->n_bufftmp_size_in_bytes);
+ if (!(*pp_resampler)->p_bufftmp_ptr) {
+ TSK_DEBUG_ERROR("Failed to allocate buffer with size:%u", (*pp_resampler)->n_bufftmp_size_in_bytes);
+ ret = -3;
+ goto bail;
+ }
+
+ memcpy(&(*pp_resampler)->in.x_pin, p_pin_in, sizeof(tdav_webrtc_pin_xt));
+ memcpy(&(*pp_resampler)->out.x_pin, p_pin_out, sizeof(tdav_webrtc_pin_xt));
+bail:
+ if (ret) {
+ TSK_OBJECT_SAFE_FREE((*pp_resampler));
+ }
+ return ret;
+}
+
+static int _tdav_webrtc_resampler_process(tdav_webrtc_resampler_t *p_self, const void* p_buff_ptr, tsk_size_t n_buff_size_in_bytes)
+{
+ tsk_size_t n_out_size;
+ const void* _p_buff_ptr = p_buff_ptr;
+ tsk_size_t _n_buff_size_in_bytes = n_buff_size_in_bytes;
+ tsk_size_t _n_buff_size_in_samples;
+
+ if (!p_self || !p_buff_ptr || !n_buff_size_in_bytes) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if (p_self->in.n_buff_size_in_bytes != n_buff_size_in_bytes) {
+ TSK_DEBUG_ERROR("Invalid input size: %u <> %u", p_self->in.n_buff_size_in_bytes, n_buff_size_in_bytes);
+ return -2;
+ }
+ _n_buff_size_in_samples = p_self->in.n_buff_size_in_samples;
+ if (p_self->in.x_pin.n_sample_size != p_self->out.x_pin.n_sample_size) {
+ tsk_size_t index;
+ if (p_self->in.x_pin.n_sample_size == sizeof(int16_t)) {
+ // int16_t -> float
+ const int16_t* p_src = (const int16_t*)p_buff_ptr;
+ float* p_dst = (float*)p_self->p_bufftmp_ptr;
+ for (index = 0; index < _n_buff_size_in_samples; ++index) {
+ p_dst[index] = (float)p_src[index];
+ }
+ }
+ else {
+ // float -> int16_t
+ const float* p_src = (const float*)p_buff_ptr;
+ int16_t* p_dst = (int16_t*)p_self->p_bufftmp_ptr;
+ for (index = 0; index < _n_buff_size_in_samples; ++index) {
+ p_dst[index] = (int16_t)p_src[index];
+ }
+ }
+ _p_buff_ptr = p_self->p_bufftmp_ptr;
+ _n_buff_size_in_bytes = p_self->in.n_buff_size_in_bytes;
+ }
+ n_out_size = tmedia_resampler_process(p_self->p_resampler, _p_buff_ptr, _n_buff_size_in_samples, (int16_t*)p_self->out.p_buff_ptr, p_self->out.n_buff_size_in_samples);
+ if (n_out_size != p_self->out.n_buff_size_in_samples) {
+ TSK_DEBUG_ERROR("Invalid output size: %u <> %u", n_out_size, p_self->out.n_buff_size_in_bytes);
+ return -4;
+ }
+ return 0;
+}
+
+//
+// WEBRTC resampler object definition
+//
+static tsk_object_t* tdav_webrtc_resampler_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_webrtc_resampler_t *p_resampler = (tdav_webrtc_resampler_t*)self;
+ if (p_resampler) {
+
+ }
+ return self;
+}
+static tsk_object_t* tdav_webrtc_resampler_dtor(tsk_object_t * self)
+{
+ tdav_webrtc_resampler_t *p_resampler = (tdav_webrtc_resampler_t*)self;
+ if (p_resampler) {
+ TSK_OBJECT_SAFE_FREE(p_resampler->p_resampler);
+ TSK_FREE(p_resampler->out.p_buff_ptr);
+ TSK_FREE(p_resampler->p_bufftmp_ptr);
+ }
+ return self;
+}
+static const tsk_object_def_t tdav_webrtc_resampler_def_s =
+{
+ sizeof(tdav_webrtc_resampler_t),
+ tdav_webrtc_resampler_ctor,
+ tdav_webrtc_resampler_dtor,
+ tsk_object_cmp,
+};
+const tsk_object_def_t *tdav_webrtc_resampler_def_t = &tdav_webrtc_resampler_def_s;
+
+
+//
+// WEBRTC denoiser Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_webrtc_denoise_ctor(tsk_object_t * _self, va_list * app)
+{
+ tdav_webrtc_denoise_t *self = _self;
+ if (self){
+ /* init base */
+ tmedia_denoise_init(TMEDIA_DENOISE(self));
+ /* init self */
+ tsk_safeobj_init(self);
+ self->neg.channels = 1;
+
+ TSK_DEBUG_INFO("Create WebRTC denoiser");
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_webrtc_denoise_dtor(tsk_object_t * _self)
+{
+ tdav_webrtc_denoise_t *self = _self;
+ if (self){
+ /* deinit base (will close the denoise if not done yet) */
+ tmedia_denoise_deinit(TMEDIA_DENOISE(self));
+ /* deinit self */
+ tdav_webrtc_denoise_close(TMEDIA_DENOISE(self));
+ TSK_OBJECT_SAFE_FREE(self->record.p_rpl_in2den);
+ TSK_OBJECT_SAFE_FREE(self->record.p_rpl_den2in);
+ TSK_OBJECT_SAFE_FREE(self->playback.p_rpl_in2den);
+ TSK_OBJECT_SAFE_FREE(self->playback.p_rpl_den2in);
+ tsk_safeobj_deinit(self);
+
+ TSK_DEBUG_INFO("*** Destroy WebRTC denoiser ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_webrtc_denoise_def_s =
+{
+ sizeof(tdav_webrtc_denoise_t),
+ tdav_webrtc_denoise_ctor,
+ tdav_webrtc_denoise_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_denoise_plugin_def_t tdav_webrtc_denoise_plugin_def_s =
+{
+ &tdav_webrtc_denoise_def_s,
+
+ "Audio Denoiser based on Google WebRTC",
+
+ tdav_webrtc_denoise_set,
+ tdav_webrtc_denoise_open,
+ tdav_webrtc_denoise_echo_playback,
+ tdav_webrtc_denoise_process_record,
+ tdav_webrtc_denoise_process_playback,
+ tdav_webrtc_denoise_close,
+};
+const tmedia_denoise_plugin_def_t *tdav_webrtc_denoise_plugin_def_t = &tdav_webrtc_denoise_plugin_def_s;
+
+
+#endif /* HAVE_WEBRTC */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/wasapi/tdav_consumer_wasapi.cxx b/tinyDAV/src/audio/wasapi/tdav_consumer_wasapi.cxx
new file mode 100644
index 0000000..c3a88e3
--- /dev/null
+++ b/tinyDAV/src/audio/wasapi/tdav_consumer_wasapi.cxx
@@ -0,0 +1,676 @@
+/*Copyright (C) 2013 Mamadou DIOP
+* Copyright (C) 2013-2014 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+/**@file tdav_consumer_wasapi.cxx
+ * @brief Microsoft Windows Audio Session API (WASAPI) consumer.
+ * http://msdn.microsoft.com/en-us/library/windows/desktop/dd316551(v=vs.85).aspx
+ */
+#include "tinydav/audio/wasapi/tdav_consumer_wasapi.h"
+
+#if HAVE_WASAPI
+
+#include "tinydav/audio/tdav_consumer_audio.h"
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_string.h"
+#include "tsk_condwait.h"
+#include "tsk_debug.h"
+
+#include <windows.h>
+#include <audioclient.h>
+#include <phoneaudioclient.h>
+
+#include <speex/speex_buffer.h>
+
+#if !defined(TDAV_WASAPI_CONSUMER_NOTIF_POS_COUNT)
+# define TDAV_WASAPI_CONSUMER_NOTIF_POS_COUNT 4
+#endif
+#define WASAPI_MILLIS_TO_100NS(MILLIS) (((LONGLONG)(MILLIS)) * 10000ui64)
+#define WASAPI_100NS_TO_MILLIS(NANOS) (((LONGLONG)(NANOS)) / 10000ui64)
+
+#define WASAPI_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[WASAPI Consumer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[WASAPI Consumer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[WASAPI Consumer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[WASAPI Consumer] " FMT, ##__VA_ARGS__)
+
+struct tdav_consumer_wasapi_s;
+
+namespace Doubango
+{
+ namespace VoIP
+ {
+ ref class AudioRender sealed
+ {
+ public:
+ virtual ~AudioRender();
+ internal:
+ AudioRender();
+
+ int Prepare(struct tdav_consumer_wasapi_s* wasapi, const tmedia_codec_t* codec);
+ int UnPrepare();
+ int Start();
+ int Stop();
+ int Pause();
+ int Consume(const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr);
+ private:
+ tsk_size_t Read(void* data, tsk_size_t size);
+ void AsyncThread(Windows::Foundation::IAsyncAction^ operation);
+
+ private:
+ tsk_mutex_handle_t* m_hMutex;
+ const struct tdav_consumer_wasapi_s* m_pWrappedConsumer; // Must not take ref() otherwise dtor() will be never called (circular reference)
+ IAudioClient2* m_pDevice;
+ IAudioRenderClient* m_pClient;
+ HANDLE m_hEvent;
+ Windows::Foundation::IAsyncAction^ m_pAsyncThread;
+ INT32 m_nBytesPerNotif;
+ INT32 m_nSourceFrameSizeInBytes;
+ UINT32 m_nMaxFrameCount;
+ UINT32 m_nPtime;
+
+ struct {
+ struct {
+ void* buffer;
+ tsk_size_t size;
+ } chunck;
+ tsk_ssize_t leftBytes;
+ SpeexBuffer* buffer;
+ tsk_size_t size;
+ } m_ring;
+
+ bool m_bStarted;
+ bool m_bPrepared;
+ bool m_bPaused;
+ };
+ }
+}
+
+typedef struct tdav_consumer_wasapi_s
+{
+ TDAV_DECLARE_CONSUMER_AUDIO;
+
+ Doubango::VoIP::AudioRender ^AudioRender;
+}
+tdav_consumer_wasapi_t;
+
+extern "C" void tdav_win32_print_error(const char* func, HRESULT hr);
+
+
+/* ============ Media consumer Interface ================= */
+
+static int tdav_consumer_wasapi_set(tmedia_consumer_t* self, const tmedia_param_t* param)
+{
+ return tdav_consumer_audio_set(TDAV_CONSUMER_AUDIO(self), param);
+}
+
+static int tdav_consumer_wasapi_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_consumer_wasapi_t* wasapi = (tdav_consumer_wasapi_t*)self;
+
+ if (!wasapi || !codec || !wasapi->AudioRender) {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TMEDIA_CONSUMER(wasapi)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(wasapi)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(wasapi)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+ WASAPI_DEBUG_INFO("in.channels=%d, out.channles=%d, in.rate=%d, out.rate=%d, ptime=%d",
+ TMEDIA_CONSUMER(wasapi)->audio.in.channels,
+ TMEDIA_CONSUMER(wasapi)->audio.out.channels,
+ TMEDIA_CONSUMER(wasapi)->audio.in.rate,
+ TMEDIA_CONSUMER(wasapi)->audio.out.rate,
+ TMEDIA_CONSUMER(wasapi)->audio.ptime);
+
+ return wasapi->AudioRender->Prepare(wasapi, codec);
+}
+
+static int tdav_consumer_wasapi_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_wasapi_t* wasapi = (tdav_consumer_wasapi_t*)self;
+
+ WASAPI_DEBUG_INFO("tdav_consumer_wasapi_start()");
+
+ if (!wasapi || !wasapi->AudioRender) {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->AudioRender->Start();
+}
+
+
+static int tdav_consumer_wasapi_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_wasapi_t* wasapi = (tdav_consumer_wasapi_t*)self;
+ if (!wasapi || !wasapi->AudioRender || !buffer || !size) {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->AudioRender->Consume(buffer, size, proto_hdr);
+}
+
+static int tdav_consumer_wasapi_pause(tmedia_consumer_t* self)
+{
+ tdav_consumer_wasapi_t* wasapi = (tdav_consumer_wasapi_t*)self;
+
+ if (!wasapi || !wasapi->AudioRender){
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->AudioRender->Pause();
+}
+
+static int tdav_consumer_wasapi_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_wasapi_t* wasapi = (tdav_consumer_wasapi_t*)self;
+
+ WASAPI_DEBUG_INFO("tdav_consumer_wasapi_stop()");
+
+ if (!wasapi || !wasapi->AudioRender) {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->AudioRender->Stop();
+}
+
+
+
+
+
+
+
+Doubango::VoIP::AudioRender::AudioRender()
+ : m_pDevice(nullptr)
+ , m_hMutex(nullptr)
+ , m_pClient(nullptr)
+ , m_hEvent(nullptr)
+ , m_pAsyncThread(nullptr)
+ , m_pWrappedConsumer(nullptr)
+ , m_nBytesPerNotif(0)
+ , m_nSourceFrameSizeInBytes(0)
+ , m_nMaxFrameCount(0)
+ , m_nPtime(0)
+ , m_bStarted(false)
+ , m_bPrepared(false)
+ , m_bPaused(false)
+{
+ memset(&m_ring, 0, sizeof(m_ring));
+
+ if (!(m_hMutex = tsk_mutex_create())) {
+ throw ref new Platform::FailureException(L"Failed to create mutex");
+ }
+}
+
+Doubango::VoIP::AudioRender::~AudioRender()
+{
+ Stop();
+ UnPrepare();
+
+ tsk_mutex_destroy(&m_hMutex);
+}
+
+int Doubango::VoIP::AudioRender::Prepare(tdav_consumer_wasapi_t* wasapi, const tmedia_codec_t* codec)
+{
+ HRESULT hr = E_FAIL;
+ int ret = 0;
+ WAVEFORMATEX wfx = {0};
+ AudioClientProperties properties = {0};
+ LPCWSTR pwstrRenderId = nullptr;
+
+ #define WASAPI_SET_ERROR(code) ret = (code); goto bail;
+
+ tsk_mutex_lock(m_hMutex);
+
+ if (m_bPrepared) {
+ WASAPI_DEBUG_INFO("Already prepared");
+ goto bail;
+ }
+
+ if (!wasapi || !codec) {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ WASAPI_SET_ERROR(-1);
+ }
+
+ if (m_pDevice || m_pClient) {
+ WASAPI_DEBUG_ERROR("consumer already prepared");
+ WASAPI_SET_ERROR(-2);
+ }
+
+ pwstrRenderId = GetDefaultAudioRenderId(AudioDeviceRole::Communications);
+
+ if (NULL == pwstrRenderId) {
+ tdav_win32_print_error("GetDefaultAudioRenderId", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-3);
+ }
+
+ hr = ActivateAudioInterface(pwstrRenderId, __uuidof(IAudioClient2), (void**)&m_pDevice);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("ActivateAudioInterface", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-4);
+ }
+
+ if (SUCCEEDED(hr)) {
+ properties.cbSize = sizeof AudioClientProperties;
+ properties.eCategory = AudioCategory_Communications;
+ hr = m_pDevice->SetClientProperties(&properties);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("SetClientProperties", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-5);
+ }
+ }
+ else {
+ tdav_win32_print_error("ActivateAudioInterface", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-6);
+ }
+
+ /* Set best format */
+ {
+ wfx.wFormatTag = WAVE_FORMAT_PCM;
+ wfx.nChannels = TMEDIA_CONSUMER(wasapi)->audio.in.channels;
+ wfx.nSamplesPerSec = TMEDIA_CONSUMER(wasapi)->audio.in.rate;
+ wfx.wBitsPerSample = TMEDIA_CONSUMER(wasapi)->audio.bits_per_sample;
+ wfx.nBlockAlign = (wfx.nChannels * wfx.wBitsPerSample/8);
+ wfx.nAvgBytesPerSec = (wfx.nSamplesPerSec * wfx.nBlockAlign);
+
+ PWAVEFORMATEX pwfxClosestMatch = NULL;
+ hr = m_pDevice->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, &wfx, &pwfxClosestMatch);
+ if (hr != S_OK && hr != S_FALSE) {
+ tdav_win32_print_error("IsFormatSupported", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-8);
+ }
+
+ if (hr == S_FALSE) {
+ if (!pwfxClosestMatch) {
+ WASAPI_DEBUG_ERROR("malloc(%d) failed", sizeof(WAVEFORMATEX));
+ WASAPI_SET_ERROR(-7);
+ }
+
+ wfx.nSamplesPerSec = pwfxClosestMatch->nSamplesPerSec;
+ wfx.nChannels = pwfxClosestMatch->nChannels;
+#if 0
+ wfx.wBitsPerSample = pwfxClosestMatch->wBitsPerSample;
+#endif
+ wfx.nBlockAlign = wfx.nChannels * (wfx.wBitsPerSample / 8);
+ wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;
+ // Request resampler
+ TMEDIA_CONSUMER(wasapi)->audio.out.rate = (uint32_t)wfx.nSamplesPerSec;
+ TMEDIA_CONSUMER(wasapi)->audio.bits_per_sample = (uint8_t)wfx.wBitsPerSample;
+ TMEDIA_CONSUMER(wasapi)->audio.out.channels = (uint8_t)wfx.nChannels;
+
+ WASAPI_DEBUG_INFO("Audio device format fallback: rate=%d, bps=%d, channels=%d", wfx.nSamplesPerSec, wfx.wBitsPerSample, wfx.nChannels);
+ }
+ if (pwfxClosestMatch) {
+ CoTaskMemFree(pwfxClosestMatch);
+ }
+ }
+
+ m_nSourceFrameSizeInBytes = (wfx.wBitsPerSample >> 3) * wfx.nChannels;
+ m_nBytesPerNotif = ((wfx.nAvgBytesPerSec * TMEDIA_CONSUMER(wasapi)->audio.ptime) / 1000);
+
+ // Initialize
+ hr = m_pDevice->Initialize(
+ AUDCLNT_SHAREMODE_SHARED,
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
+ WASAPI_MILLIS_TO_100NS(TDAV_WASAPI_CONSUMER_NOTIF_POS_COUNT * TMEDIA_CONSUMER(wasapi)->audio.ptime),
+ 0,
+ &wfx,
+ NULL);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("#WASAPI: Render::Initialize", hr);
+ WASAPI_SET_ERROR(-9);
+ }
+
+ REFERENCE_TIME DefaultDevicePeriod, MinimumDevicePeriod;
+ hr = m_pDevice->GetDevicePeriod(&DefaultDevicePeriod, &MinimumDevicePeriod);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("GetDevicePeriod", hr);
+ WASAPI_SET_ERROR(-10);
+ }
+ hr = m_pDevice->GetBufferSize(&m_nMaxFrameCount);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("GetBufferSize", hr);
+ WASAPI_SET_ERROR(-10);
+ }
+
+ WASAPI_DEBUG_INFO("#WASAPI (Playback): BufferSize=%u, DefaultDevicePeriod=%lld ms, MinimumDevicePeriod=%lldms", m_nMaxFrameCount, WASAPI_100NS_TO_MILLIS(DefaultDevicePeriod), WASAPI_100NS_TO_MILLIS(MinimumDevicePeriod));
+
+ if (!m_hEvent) {
+ if (!(m_hEvent = CreateEventEx(NULL, NULL, 0, EVENT_MODIFY_STATE | SYNCHRONIZE))) {
+ tdav_win32_print_error("CreateEventEx(EVENT_MODIFY_STATE | SYNCHRONIZE)", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-11);
+ }
+ }
+
+ hr = m_pDevice->SetEventHandle(m_hEvent);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("SetEventHandle", hr);
+ WASAPI_SET_ERROR(-12);
+ }
+
+ hr = m_pDevice->GetService(__uuidof(IAudioRenderClient), (void**)&m_pClient);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("GetService", hr);
+ WASAPI_SET_ERROR(-14);
+ }
+
+ m_ring.chunck.size = (TMEDIA_CONSUMER(wasapi)->audio.ptime * TMEDIA_CONSUMER(wasapi)->audio.out.rate * ((TMEDIA_CONSUMER(wasapi)->audio.bits_per_sample >> 3) * TMEDIA_CONSUMER(wasapi)->audio.out.channels)) / 1000;
+ m_ring.size = TDAV_WASAPI_CONSUMER_NOTIF_POS_COUNT * m_ring.chunck.size;
+ if (!(m_ring.chunck.buffer = tsk_realloc(m_ring.chunck.buffer, m_ring.chunck.size))) {
+ m_ring.size = 0;
+ WASAPI_DEBUG_ERROR("Failed to allocate new buffer");
+ WASAPI_SET_ERROR(-15);
+ }
+ if (!m_ring.buffer) {
+ m_ring.buffer = speex_buffer_init(m_ring.size);
+ }
+ else {
+ int sret;
+ if ((sret = speex_buffer_resize(m_ring.buffer, m_ring.size)) < 0) {
+ WASAPI_DEBUG_ERROR("speex_buffer_resize(%d) failed with error code=%d", m_ring.size, sret);
+ WASAPI_SET_ERROR(-16);
+ }
+ }
+ if (!m_ring.buffer) {
+ WASAPI_DEBUG_ERROR("Failed to create a new ring buffer with size = %d", m_ring.size);
+ WASAPI_SET_ERROR(-17);
+ }
+
+bail:
+ if (pwstrRenderId) {
+ CoTaskMemFree((LPVOID)pwstrRenderId);
+ }
+ if (ret != 0) {
+ UnPrepare();
+ }
+
+ if ((m_bPrepared = (ret == 0))) {
+ m_pWrappedConsumer = wasapi;
+ m_nPtime = TMEDIA_CONSUMER(wasapi)->audio.ptime;
+ }
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return ret;
+}
+
+int Doubango::VoIP::AudioRender::UnPrepare()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ if (m_hEvent) {
+ CloseHandle(m_hEvent), m_hEvent = nullptr;
+ }
+ if (m_pDevice) {
+ m_pDevice->Release(), m_pDevice = nullptr;
+ }
+ if (m_pClient) {
+ m_pClient->Release(), m_pClient = nullptr;
+ }
+
+ TSK_FREE(m_ring.chunck.buffer);
+ if (m_ring.buffer) {
+ speex_buffer_destroy(m_ring.buffer);
+ m_ring.buffer = nullptr;
+ }
+
+ m_pWrappedConsumer = nullptr;
+
+ m_bPrepared = false;
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+int Doubango::VoIP::AudioRender::Start()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ if (m_bStarted) {
+ WASAPI_DEBUG_INFO("already started");
+ goto bail;
+ }
+ if (!m_bPrepared) {
+ WASAPI_DEBUG_ERROR("not prepared");
+ goto bail;
+ }
+
+ m_pAsyncThread = Windows::System::Threading::ThreadPool::RunAsync(ref new Windows::System::Threading::WorkItemHandler(this, &Doubango::VoIP::AudioRender::AsyncThread),
+ Windows::System::Threading::WorkItemPriority::High,
+ Windows::System::Threading::WorkItemOptions::TimeSliced);
+
+ if ((m_bStarted = (m_pAsyncThread != nullptr))) {
+ HRESULT hr = m_pDevice->Start();
+ if(!SUCCEEDED(hr)) {
+ tdav_win32_print_error("Device::Start", hr);
+ Stop();
+ }
+ m_bPaused = false;
+ }
+
+bail:
+ tsk_mutex_unlock(m_hMutex);
+
+ return (m_bStarted ? 0 : -2);
+}
+
+int Doubango::VoIP::AudioRender::Stop()
+{
+ m_bStarted = false;
+
+ tsk_mutex_lock(m_hMutex);
+
+ if (m_hEvent) {
+ SetEvent(m_hEvent);
+ }
+
+ if (m_pAsyncThread) {
+ m_pAsyncThread->Cancel();
+ m_pAsyncThread->Close();
+ m_pAsyncThread = nullptr;
+ }
+
+ if (m_pDevice) {
+ m_pDevice->Stop();
+ }
+
+ // will be prepared again before next start()
+ UnPrepare();
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+int Doubango::VoIP::AudioRender::Pause()
+{
+ m_bPaused = true;
+
+ return 0;
+}
+
+int Doubango::VoIP::AudioRender::Consume(const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ int ret;
+ // tsk_mutex_lock(m_hMutex);
+ ret = tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(m_pWrappedConsumer), buffer, size, proto_hdr); // thread-safe
+ // tsk_mutex_unlock(m_hMutex);
+ return ret;
+}
+
+tsk_size_t Doubango::VoIP::AudioRender::Read(void* data, tsk_size_t size)
+{
+ tsk_ssize_t retSize = 0, availSize;
+
+ m_ring.leftBytes += size;
+ while (m_ring.leftBytes >= (tsk_ssize_t)m_ring.chunck.size) {
+ m_ring.leftBytes -= m_ring.chunck.size;
+ retSize = (tsk_ssize_t)tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(m_pWrappedConsumer), m_ring.chunck.buffer, m_ring.chunck.size);
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(m_pWrappedConsumer));
+ speex_buffer_write(m_ring.buffer, m_ring.chunck.buffer, retSize);
+ }
+ // IMPORTANT: looks like there is a bug in speex: continously trying to read more than avail
+ // many times can corrupt the buffer. At least on OS X 1.5
+#if 0
+ if (speex_buffer_get_available(m_ring.buffer) >= (tsk_ssize_t)size) {
+ retSize = speex_buffer_read(m_ring.buffer, data, size);
+ }
+ else{
+ memset(data, 0, size);
+ }
+#else
+ availSize = speex_buffer_get_available(m_ring.buffer);
+ if (availSize == 0) {
+ memset(data, 0, size);
+ }
+ else {
+ retSize = speex_buffer_read(m_ring.buffer, data, min(availSize, (tsk_ssize_t)size));
+ if (availSize < (tsk_ssize_t)size) {
+ memset(((uint8_t*)data) + availSize, 0, (size - availSize));
+ }
+ }
+
+#endif
+
+ return retSize;
+}
+
+void Doubango::VoIP::AudioRender::AsyncThread(Windows::Foundation::IAsyncAction^ operation)
+{
+ HRESULT hr = S_OK;
+ INT32 nFramesToWrite;
+ UINT32 nPadding, nRead;
+ DWORD retval;
+
+ WASAPI_DEBUG_INFO("#WASAPI: __playback_thread -- START");
+
+ #define BREAK_WHILE tsk_mutex_unlock(m_hMutex); break;
+
+ while (m_bStarted && SUCCEEDED(hr)) {
+ retval = WaitForSingleObjectEx(m_hEvent, /*m_nPtime*/INFINITE, FALSE);
+
+ tsk_mutex_lock(m_hMutex);
+
+ if (!m_bStarted) {
+ BREAK_WHILE;
+ }
+
+ if (retval == WAIT_OBJECT_0) {
+ hr = m_pDevice->GetCurrentPadding(&nPadding);
+ if (SUCCEEDED(hr)) {
+ BYTE* pRenderBuffer = NULL;
+ nFramesToWrite = m_nMaxFrameCount - nPadding;
+
+ if (nFramesToWrite > 0) {
+ hr = m_pClient->GetBuffer(nFramesToWrite, &pRenderBuffer);
+ if (SUCCEEDED(hr)) {
+ nRead = Read(pRenderBuffer, (nFramesToWrite * m_nSourceFrameSizeInBytes));
+
+ // Release the buffer
+ hr = m_pClient->ReleaseBuffer(nFramesToWrite, (nRead == 0) ? AUDCLNT_BUFFERFLAGS_SILENT : 0);
+ }
+ }
+ }
+ }
+
+ tsk_mutex_unlock(m_hMutex);
+ }// end-of-while
+
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("AsyncThread: ", hr);
+ }
+
+
+ WASAPI_DEBUG_INFO("__playback_thread(%s) -- STOP", (SUCCEEDED(hr) && retval == WAIT_OBJECT_0) ? "OK" : "NOK");
+}
+
+
+
+
+
+
+
+//
+// WaveAPI consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_wasapi_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_wasapi_t *wasapi = (tdav_consumer_wasapi_t*)self;
+ if (wasapi) {
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(wasapi));
+ /* init self */
+
+ wasapi->AudioRender = ref new Doubango::VoIP::AudioRender();
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_wasapi_dtor(tsk_object_t * self)
+{
+ tdav_consumer_wasapi_t *wasapi = (tdav_consumer_wasapi_t*)self;
+ if (wasapi) {
+ /* stop */
+ tdav_consumer_wasapi_stop((tmedia_consumer_t*)self);
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(wasapi));
+ /* deinit self */
+ if (wasapi->AudioRender) {
+ delete wasapi->AudioRender;
+ wasapi->AudioRender = nullptr;
+ }
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_wasapi_def_s =
+{
+ sizeof(tdav_consumer_wasapi_t),
+ tdav_consumer_wasapi_ctor,
+ tdav_consumer_wasapi_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_wasapi_plugin_def_s =
+{
+ &tdav_consumer_wasapi_def_s,
+
+ tmedia_audio,
+ "Microsoft Windows Audio Session API (WASAPI) consumer",
+
+ tdav_consumer_wasapi_set,
+ tdav_consumer_wasapi_prepare,
+ tdav_consumer_wasapi_start,
+ tdav_consumer_wasapi_consume,
+ tdav_consumer_wasapi_pause,
+ tdav_consumer_wasapi_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_wasapi_plugin_def_t = &tdav_consumer_wasapi_plugin_def_s;
+
+
+
+
+#endif /* HAVE_WASAPI */
diff --git a/tinyDAV/src/audio/wasapi/tdav_producer_wasapi.cxx b/tinyDAV/src/audio/wasapi/tdav_producer_wasapi.cxx
new file mode 100644
index 0000000..7d172a2
--- /dev/null
+++ b/tinyDAV/src/audio/wasapi/tdav_producer_wasapi.cxx
@@ -0,0 +1,681 @@
+/*Copyright (C) 2013 Mamadou DIOP
+* Copyright (C) 2013-2014 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+/**@file tdav_producer_wasapi.cxx
+ * @brief Microsoft Windows Audio Session API (WASAPI) producer.
+ * http://msdn.microsoft.com/en-us/library/windows/desktop/dd316551(v=vs.85).aspx
+ */
+#include "tinydav/audio/wasapi/tdav_producer_wasapi.h"
+
+#if HAVE_WASAPI
+
+#include "tinydav/audio/tdav_producer_audio.h"
+
+#include "tsk_memory.h"
+#include "tsk_string.h"
+#include "tsk_debug.h"
+
+#include <windows.h>
+#include <audioclient.h>
+#include <phoneaudioclient.h>
+
+#include <speex/speex_buffer.h>
+
+#if !defined(TDAV_WASAPI_PRODUCER_NOTIF_POS_COUNT)
+# define TDAV_WASAPI_PRODUCER_NOTIF_POS_COUNT 10
+#endif
+#define WASAPI_MILLIS_TO_100NS(MILLIS) (((LONGLONG)(MILLIS)) * 10000ui64)
+#define WASAPI_100NS_TO_MILLIS(NANOS) (((LONGLONG)(NANOS)) / 10000ui64)
+
+#define WASAPI_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[WASAPI Producer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[WASAPI Producer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[WASAPI Producer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[WASAPI Producer] " FMT, ##__VA_ARGS__)
+
+struct tdav_producer_wasapi_s;
+
+namespace Doubango
+{
+ namespace VoIP
+ {
+ ref class AudioCapture sealed
+ {
+ public:
+ virtual ~AudioCapture();
+ internal:
+ AudioCapture();
+
+ int Prepare(struct tdav_producer_wasapi_s* wasapi, const tmedia_codec_t* codec);
+ int UnPrepare();
+ int Start();
+ int Stop();
+ int Pause();
+
+ private:
+ void AsyncThread(Windows::Foundation::IAsyncAction^ operation);
+
+ private:
+ tsk_mutex_handle_t* m_hMutex;
+ IAudioClient2* m_pDevice;
+ IAudioCaptureClient* m_pClient;
+ HANDLE m_hCaptureEvent;
+ HANDLE m_hShutdownEvent;
+ Windows::Foundation::IAsyncAction^ m_pAsyncThread;
+ INT32 m_nBytesPerNotif;
+ INT32 m_nSourceFrameSizeInBytes;
+
+ struct{
+ tmedia_producer_enc_cb_f fn;
+ const void* pcData;
+ } m_callback;
+
+ struct {
+ struct {
+ void* buffer;
+ tsk_size_t size;
+ } chunck;
+ SpeexBuffer* buffer;
+ tsk_size_t size;
+ } m_ring;
+ bool m_bStarted;
+ bool m_bPrepared;
+ bool m_bPaused;
+ };
+ }
+}
+
+typedef struct tdav_producer_wasapi_s
+{
+ TDAV_DECLARE_PRODUCER_AUDIO;
+
+ Doubango::VoIP::AudioCapture ^audioCapture;
+}
+tdav_producer_wasapi_t;
+
+extern "C" void tdav_win32_print_error(const char* func, HRESULT hr);
+
+
+/* ============ Media Producer Interface ================= */
+static int tdav_producer_wasapi_set(tmedia_producer_t* self, const tmedia_param_t* param)
+{
+ tdav_producer_wasapi_t* wasapi = (tdav_producer_wasapi_t*)self;
+ if (param->plugin_type == tmedia_ppt_producer) {
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "volume")) {
+ return 0;
+ }
+ else if (tsk_striequals(param->key, "mute")) {
+ //wasapi->mute = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+#if !FIXME_SEND_SILENCE_ON_MUTE
+ //if(wasapi->started){
+ // if(wasapi->mute){
+ //IDirectSoundCaptureBuffer_Stop(wasapi->captureBuffer);
+ // }
+ // else{
+ //IDirectSoundCaptureBuffer_Start(wasapi->captureBuffer, DSBPLAY_LOOPING);
+ // }
+ //}
+#endif
+ return 0;
+ }
+ }
+ }
+ return tdav_producer_audio_set(TDAV_PRODUCER_AUDIO(self), param);
+}
+
+
+
+static int tdav_producer_wasapi_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_producer_wasapi_t* wasapi = (tdav_producer_wasapi_t*)self;
+
+ if(!wasapi || !codec || !wasapi->audioCapture){
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* codec should have ptime */
+ TMEDIA_PRODUCER(wasapi)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(wasapi)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER(wasapi)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+
+ WASAPI_DEBUG_INFO("channels=%d, rate=%d, ptime=%d",
+ TMEDIA_PRODUCER(wasapi)->audio.channels,
+ TMEDIA_PRODUCER(wasapi)->audio.rate,
+ TMEDIA_PRODUCER(wasapi)->audio.ptime);
+
+ return wasapi->audioCapture->Prepare(wasapi, codec);
+}
+
+static int tdav_producer_wasapi_start(tmedia_producer_t* self)
+{
+ tdav_producer_wasapi_t* wasapi = (tdav_producer_wasapi_t*)self;
+
+ WASAPI_DEBUG_INFO("tdav_producer_wasapi_start()");
+
+ if(!wasapi || !wasapi->audioCapture){
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->audioCapture->Start();
+}
+
+static int tdav_producer_wasapi_pause(tmedia_producer_t* self)
+{
+ tdav_producer_wasapi_t* wasapi = (tdav_producer_wasapi_t*)self;
+
+ if(!wasapi || !wasapi->audioCapture){
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->audioCapture->Pause();
+}
+
+static int tdav_producer_wasapi_stop(tmedia_producer_t* self)
+{
+ tdav_producer_wasapi_t* wasapi = (tdav_producer_wasapi_t*)self;
+
+ WASAPI_DEBUG_INFO("tdav_producer_wasapi_stop()");
+
+ if(!wasapi || !wasapi->audioCapture){
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->audioCapture->Stop();
+}
+
+
+
+
+
+
+
+Doubango::VoIP::AudioCapture::AudioCapture()
+ : m_pDevice(nullptr)
+ , m_hMutex(nullptr)
+ , m_pClient(nullptr)
+ , m_hCaptureEvent(nullptr)
+ , m_hShutdownEvent(nullptr)
+ , m_pAsyncThread(nullptr)
+ , m_nBytesPerNotif(0)
+ , m_nSourceFrameSizeInBytes(0)
+ , m_bStarted(false)
+ , m_bPrepared(false)
+ , m_bPaused(false)
+{
+ m_callback.fn = nullptr, m_callback.pcData = nullptr;
+ memset(&m_ring, 0, sizeof(m_ring));
+
+ if(!(m_hMutex = tsk_mutex_create())){
+ throw ref new Platform::FailureException(L"Failed to create mutex");
+ }
+}
+
+Doubango::VoIP::AudioCapture::~AudioCapture()
+{
+ Stop();
+ UnPrepare();
+
+ tsk_mutex_destroy(&m_hMutex);
+}
+
+int Doubango::VoIP::AudioCapture::Prepare(tdav_producer_wasapi_t* wasapi, const tmedia_codec_t* codec)
+{
+ HRESULT hr = E_FAIL;
+ int ret = 0;
+ WAVEFORMATEX wfx = {0};
+ AudioClientProperties properties = {0};
+ LPCWSTR pwstrCaptureId = nullptr;
+
+ #define WASAPI_SET_ERROR(code) ret = (code); goto bail;
+
+ tsk_mutex_lock(m_hMutex);
+
+ if(m_bPrepared)
+ {
+ WASAPI_DEBUG_INFO("#WASAPI: Audio producer already prepared");
+ goto bail;
+ }
+
+ if(!wasapi || !codec)
+ {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ WASAPI_SET_ERROR(-1);
+ }
+
+ if(m_pDevice || m_pClient){
+ WASAPI_DEBUG_ERROR("Producer already prepared");
+ WASAPI_SET_ERROR(-2);
+ }
+
+ pwstrCaptureId = GetDefaultAudioCaptureId(AudioDeviceRole::Communications);
+
+ if (NULL == pwstrCaptureId){
+ tdav_win32_print_error("GetDefaultAudioCaptureId", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-3);
+ }
+
+ hr = ActivateAudioInterface(pwstrCaptureId, __uuidof(IAudioClient2), (void**)&m_pDevice);
+ if(!SUCCEEDED(hr)){
+ tdav_win32_print_error("ActivateAudioInterface", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-4);
+ }
+
+ if (SUCCEEDED(hr)){
+ properties.cbSize = sizeof AudioClientProperties;
+ properties.eCategory = AudioCategory_Communications;
+ hr = m_pDevice->SetClientProperties(&properties);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("SetClientProperties", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-5);
+ }
+ }
+ else{
+ tdav_win32_print_error("ActivateAudioInterface", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-6);
+ }
+
+ /* Set best format */
+ {
+ wfx.wFormatTag = WAVE_FORMAT_PCM;
+ wfx.nChannels = TMEDIA_PRODUCER(wasapi)->audio.channels;
+ wfx.nSamplesPerSec = TMEDIA_PRODUCER(wasapi)->audio.rate;
+ wfx.wBitsPerSample = TMEDIA_PRODUCER(wasapi)->audio.bits_per_sample;
+ wfx.nBlockAlign = (wfx.nChannels * wfx.wBitsPerSample/8);
+ wfx.nAvgBytesPerSec = (wfx.nSamplesPerSec * wfx.nBlockAlign);
+
+ PWAVEFORMATEX pwfxClosestMatch = NULL;
+ hr = m_pDevice->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, &wfx, &pwfxClosestMatch);
+ if(hr != S_OK && hr != S_FALSE)
+ {
+ tdav_win32_print_error("IsFormatSupported", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-8);
+ }
+
+ if(hr == S_FALSE)
+ {
+ if(!pwfxClosestMatch)
+ {
+ WASAPI_DEBUG_ERROR("malloc(%d) failed", sizeof(WAVEFORMATEX));
+ WASAPI_SET_ERROR(-7);
+ }
+ wfx.nChannels = pwfxClosestMatch->nChannels;
+ wfx.nSamplesPerSec = pwfxClosestMatch->nSamplesPerSec;
+#if 0
+ wfx.wBitsPerSample = pwfxClosestMatch->wBitsPerSample;
+#endif
+ wfx.nBlockAlign = wfx.nChannels * (wfx.wBitsPerSample / 8);
+ wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;
+ // Request resampler
+ TMEDIA_PRODUCER(wasapi)->audio.rate = (uint32_t)wfx.nSamplesPerSec;
+ TMEDIA_PRODUCER(wasapi)->audio.bits_per_sample = (uint8_t)wfx.wBitsPerSample;
+ TMEDIA_PRODUCER(wasapi)->audio.channels = (uint8_t)wfx.nChannels;
+
+ WASAPI_DEBUG_INFO("Audio device format fallback: rate=%d, bps=%d, channels=%d", wfx.nSamplesPerSec, wfx.wBitsPerSample, wfx.nChannels);
+ }
+ if(pwfxClosestMatch)
+ {
+ CoTaskMemFree(pwfxClosestMatch);
+ }
+ }
+
+ m_nSourceFrameSizeInBytes = (wfx.wBitsPerSample >> 3) * wfx.nChannels;
+ m_nBytesPerNotif = ((wfx.nAvgBytesPerSec * TMEDIA_PRODUCER(wasapi)->audio.ptime)/1000);
+
+ // Initialize
+ hr = m_pDevice->Initialize(
+ AUDCLNT_SHAREMODE_SHARED,
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
+ (TDAV_WASAPI_PRODUCER_NOTIF_POS_COUNT * WASAPI_MILLIS_TO_100NS(TMEDIA_PRODUCER(wasapi)->audio.ptime)),
+ 0,
+ &wfx,
+ NULL);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("#WASAPI: Capture::SetClientProperties", hr);
+ WASAPI_SET_ERROR(-9);
+ }
+
+ REFERENCE_TIME DefaultDevicePeriod, MinimumDevicePeriod;
+ hr = m_pDevice->GetDevicePeriod(&DefaultDevicePeriod, &MinimumDevicePeriod);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("GetDevicePeriod", hr);
+ WASAPI_SET_ERROR(-10);
+ }
+ WASAPI_DEBUG_INFO("#WASAPI(Capture): DefaultDevicePeriod=%lld ms, MinimumDevicePeriod=%lldms", WASAPI_100NS_TO_MILLIS(DefaultDevicePeriod), WASAPI_100NS_TO_MILLIS(MinimumDevicePeriod));
+
+ if(!m_hCaptureEvent){
+ if(!(m_hCaptureEvent = CreateEventEx(NULL, NULL, 0, EVENT_ALL_ACCESS))){
+ tdav_win32_print_error("CreateEventEx(Capture)", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-11);
+ }
+ }
+ if(!m_hShutdownEvent){
+ if(!(m_hShutdownEvent = CreateEventEx(NULL, NULL, CREATE_EVENT_MANUAL_RESET, EVENT_ALL_ACCESS))){
+ tdav_win32_print_error("CreateEventEx(Shutdown)", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-12);
+ }
+ }
+
+ hr = m_pDevice->SetEventHandle(m_hCaptureEvent);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("SetEventHandle", hr);
+ WASAPI_SET_ERROR(-13);
+ }
+
+ hr = m_pDevice->GetService(__uuidof(IAudioCaptureClient), (void**)&m_pClient);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("GetService", hr);
+ WASAPI_SET_ERROR(-14);
+ }
+
+ int packetperbuffer = (1000 / TMEDIA_PRODUCER(wasapi)->audio.ptime);
+ m_ring.chunck.size = wfx.nSamplesPerSec * (wfx.wBitsPerSample >> 3) / packetperbuffer;
+ WASAPI_DEBUG_INFO("#WASAPI: Audio producer ring chunk size = %u", m_ring.chunck.size);
+ // allocate our chunck buffer
+ if(!(m_ring.chunck.buffer = tsk_realloc(m_ring.chunck.buffer, m_ring.chunck.size))){
+ WASAPI_DEBUG_ERROR("Failed to allocate new buffer");
+ WASAPI_SET_ERROR(-15);
+ }
+ // create ringbuffer
+ m_ring.size = TDAV_WASAPI_PRODUCER_NOTIF_POS_COUNT * m_ring.chunck.size;
+ WASAPI_DEBUG_INFO("#WASAPI: Audio producer ring size = %u", m_ring.size);
+ if(!m_ring.buffer){
+ m_ring.buffer = speex_buffer_init(m_ring.size);
+ }
+ else {
+ int sret;
+ if((sret = speex_buffer_resize(m_ring.buffer, m_ring.size)) < 0){
+ WASAPI_DEBUG_ERROR("speex_buffer_resize(%d) failed with error code=%d", m_ring.size, sret);
+ WASAPI_SET_ERROR(-16);
+ }
+ }
+ if(!m_ring.buffer){
+ WASAPI_DEBUG_ERROR("Failed to create a new ring buffer with size = %d", m_ring.size);
+ WASAPI_SET_ERROR(-17);
+ }
+
+ m_callback.fn = TMEDIA_PRODUCER(wasapi)->enc_cb.callback;
+ m_callback.pcData = TMEDIA_PRODUCER(wasapi)->enc_cb.callback_data;
+
+bail:
+ if (pwstrCaptureId){
+ CoTaskMemFree((LPVOID)pwstrCaptureId);
+ }
+ if(ret != 0){
+ UnPrepare();
+ }
+ m_bPrepared = (ret == 0);
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return ret;
+}
+
+int Doubango::VoIP::AudioCapture::UnPrepare()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ if(m_hCaptureEvent)
+ {
+ CloseHandle(m_hCaptureEvent), m_hCaptureEvent = nullptr;
+ }
+ if(m_hShutdownEvent)
+ {
+ CloseHandle(m_hShutdownEvent), m_hShutdownEvent = nullptr;
+ }
+ if(m_pDevice)
+ {
+ m_pDevice->Release(), m_pDevice = nullptr;
+ }
+ if(m_pClient)
+ {
+ m_pClient->Release(), m_pClient = nullptr;
+ }
+
+ TSK_FREE(m_ring.chunck.buffer);
+ if(m_ring.buffer){
+ speex_buffer_destroy(m_ring.buffer);
+ m_ring.buffer = nullptr;
+ }
+
+ m_callback.fn = nullptr;
+ m_callback.pcData = nullptr;
+
+ m_bPrepared = false;
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+int Doubango::VoIP::AudioCapture::Start()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ if(m_bStarted)
+ {
+ WASAPI_DEBUG_INFO("#WASAPI: Audio producer already started");
+ goto bail;
+ }
+ if(!m_bPrepared)
+ {
+ WASAPI_DEBUG_ERROR("Audio producer not prepared");
+ goto bail;
+ }
+
+ m_pAsyncThread = Windows::System::Threading::ThreadPool::RunAsync(ref new Windows::System::Threading::WorkItemHandler(this, &Doubango::VoIP::AudioCapture::AsyncThread),
+ Windows::System::Threading::WorkItemPriority::High,
+ Windows::System::Threading::WorkItemOptions::TimeSliced);
+
+ if((m_bStarted = (m_pAsyncThread != nullptr)))
+ {
+ HRESULT hr = m_pDevice->Start();
+ if(!SUCCEEDED(hr))
+ {
+ tdav_win32_print_error("Device::Start", hr);
+ Stop();
+ }
+ m_bPaused = false;
+ }
+
+bail:
+ tsk_mutex_unlock(m_hMutex);
+
+ return (m_bStarted ? 0 : -2);
+}
+
+int Doubango::VoIP::AudioCapture::Stop()
+{
+ m_bStarted = false;
+
+ tsk_mutex_lock(m_hMutex);
+
+ if (m_hShutdownEvent)
+ {
+ SetEvent(m_hShutdownEvent);
+ }
+
+ if (m_pAsyncThread)
+ {
+ m_pAsyncThread->Cancel();
+ m_pAsyncThread->Close();
+ m_pAsyncThread = nullptr;
+ }
+
+ if(m_pDevice)
+ {
+ m_pDevice->Stop();
+ }
+
+ // will be prepared again before next start()
+ UnPrepare();
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+int Doubango::VoIP::AudioCapture::Pause()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ m_bPaused = true;
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+void Doubango::VoIP::AudioCapture::AsyncThread(Windows::Foundation::IAsyncAction^ operation)
+{
+ HRESULT hr = S_OK;
+ BYTE* pbData = nullptr;
+ UINT32 nFrames = 0;
+ DWORD dwFlags = 0;
+ UINT32 incomingBufferSize;
+ INT32 avail;
+ UINT32 nNextPacketSize;
+
+ HANDLE eventHandles[] = {
+ m_hCaptureEvent, // WAIT_OBJECT0
+ m_hShutdownEvent // WAIT_OBJECT1
+ };
+
+ WASAPI_DEBUG_INFO("#WASAPI: __record_thread -- START");
+
+ #define BREAK_WHILE tsk_mutex_unlock(m_hMutex); break;
+
+ while(m_bStarted && SUCCEEDED(hr)){
+ DWORD waitResult = WaitForMultipleObjectsEx(SIZEOF_ARRAY(eventHandles), eventHandles, FALSE, INFINITE, FALSE);
+
+ tsk_mutex_lock(m_hMutex);
+
+ if(!m_bStarted){
+ BREAK_WHILE;
+ }
+
+ if(waitResult == WAIT_OBJECT_0 && m_callback.fn) {
+ hr = m_pClient->GetNextPacketSize(&nNextPacketSize);
+ while(SUCCEEDED(hr) && nNextPacketSize >0){
+ hr = m_pClient->GetBuffer(&pbData, &nFrames, &dwFlags, nullptr, nullptr);
+ if(SUCCEEDED(hr) && pbData && nFrames){
+ if((dwFlags & AUDCLNT_BUFFERFLAGS_SILENT) != AUDCLNT_BUFFERFLAGS_SILENT){
+ incomingBufferSize = nFrames * m_nSourceFrameSizeInBytes;
+ speex_buffer_write(m_ring.buffer, pbData, incomingBufferSize);
+ avail = speex_buffer_get_available(m_ring.buffer);
+ while (m_bStarted && avail >= (INT32)m_ring.chunck.size) {
+ avail -= speex_buffer_read(m_ring.buffer, m_ring.chunck.buffer, m_ring.chunck.size);
+ m_callback.fn(m_callback.pcData, m_ring.chunck.buffer, m_ring.chunck.size);
+ }
+ }
+
+ if (SUCCEEDED(hr)){
+ hr = m_pClient->ReleaseBuffer(nFrames);
+ }
+ if (SUCCEEDED(hr)){
+ hr = m_pClient->GetNextPacketSize(&nNextPacketSize);
+ }
+ }
+ }
+ }
+ else if(waitResult != WAIT_OBJECT_0){
+ BREAK_WHILE;
+ }
+
+ tsk_mutex_unlock(m_hMutex);
+ }// end-of-while
+
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("AsyncThread: ", hr);
+ }
+
+
+ WASAPI_DEBUG_INFO("WASAPI: __record_thread(%s) -- STOP", SUCCEEDED(hr) ? "OK": "NOK");
+}
+
+
+
+
+
+
+
+//
+// WaveAPI producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_wasapi_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_wasapi_t *wasapi = (tdav_producer_wasapi_t*)self;
+ if(wasapi){
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(wasapi));
+ /* init self */
+
+ wasapi->audioCapture = ref new Doubango::VoIP::AudioCapture();
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_wasapi_dtor(tsk_object_t * self)
+{
+ tdav_producer_wasapi_t *wasapi = (tdav_producer_wasapi_t*)self;
+ if(wasapi){
+ /* stop */
+ tdav_producer_wasapi_stop((tmedia_producer_t*)self);
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(wasapi));
+ /* deinit self */
+ if(wasapi->audioCapture){
+ delete wasapi->audioCapture;
+ wasapi->audioCapture = nullptr;
+ }
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_wasapi_def_s =
+{
+ sizeof(tdav_producer_wasapi_t),
+ tdav_producer_wasapi_ctor,
+ tdav_producer_wasapi_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_wasapi_plugin_def_s =
+{
+ &tdav_producer_wasapi_def_s,
+
+ tmedia_audio,
+ "Microsoft Windows Audio Session API (WASAPI) producer",
+
+ tdav_producer_wasapi_set,
+ tdav_producer_wasapi_prepare,
+ tdav_producer_wasapi_start,
+ tdav_producer_wasapi_pause,
+ tdav_producer_wasapi_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_wasapi_plugin_def_t = &tdav_producer_wasapi_plugin_def_s;
+
+
+
+
+#endif /* HAVE_WASAPI */
diff --git a/tinyDAV/src/audio/waveapi/tdav_consumer_waveapi.c b/tinyDAV/src/audio/waveapi/tdav_consumer_waveapi.c
new file mode 100644
index 0000000..1883fa4
--- /dev/null
+++ b/tinyDAV/src/audio/waveapi/tdav_consumer_waveapi.c
@@ -0,0 +1,402 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_consumer_waveapi.c
+ * @brief Audio Consumer for Win32 and WinCE platforms.
+ *
+ */
+#include "tinydav/audio/waveapi/tdav_consumer_waveapi.h"
+
+#if HAVE_WAVE_API
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_WAVEAPI_CONSUMER_ERROR_BUFF_COUNT 0xFF
+
+#define tdav_consumer_waveapi_set tsk_null
+
+static void print_last_error(MMRESULT mmrError, const char* func)
+{
+ static char buffer_err[TDAV_WAVEAPI_CONSUMER_ERROR_BUFF_COUNT];
+
+ waveOutGetErrorTextA(mmrError, buffer_err, sizeof(buffer_err));
+ TSK_DEBUG_ERROR("%s() error: %s", func, buffer_err);
+}
+
+static int free_wavehdr(tdav_consumer_waveapi_t* consumer, tsk_size_t index)
+{
+ if(!consumer || index >= sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_FREE(consumer->hWaveHeaders[index]->lpData);
+ TSK_FREE(consumer->hWaveHeaders[index]);
+
+ return 0;
+}
+
+static int create_wavehdr(tdav_consumer_waveapi_t* consumer, tsk_size_t index)
+{
+ if(!consumer || index >= sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(consumer->hWaveHeaders[index]){
+ free_wavehdr(consumer, index);
+ }
+
+ consumer->hWaveHeaders[index] = tsk_calloc(1, sizeof(WAVEHDR));
+ consumer->hWaveHeaders[index]->lpData = tsk_calloc(1, consumer->bytes_per_notif);
+ consumer->hWaveHeaders[index]->dwBufferLength = (DWORD)consumer->bytes_per_notif;
+ consumer->hWaveHeaders[index]->dwFlags = WHDR_BEGINLOOP | WHDR_ENDLOOP;
+ consumer->hWaveHeaders[index]->dwLoops = 0x01;
+ consumer->hWaveHeaders[index]->dwUser = index;
+
+ return 0;
+}
+
+static int write_wavehdr(tdav_consumer_waveapi_t* consumer, tsk_size_t index)
+{
+ MMRESULT result;
+
+ if(!consumer || !consumer->hWaveHeaders[index] || !consumer->hWaveOut){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ result = waveOutPrepareHeader(consumer->hWaveOut, consumer->hWaveHeaders[index], sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutPrepareHeader");
+ return -2;
+ }
+
+ result = waveOutWrite(consumer->hWaveOut, consumer->hWaveHeaders[index], sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutWrite");
+ return -3;
+ }
+
+ return 0;
+}
+
+static int play_wavehdr(tdav_consumer_waveapi_t* consumer, LPWAVEHDR lpHdr)
+{
+ MMRESULT result;
+ tsk_size_t out_size;
+
+ if(!consumer || !lpHdr || !consumer->hWaveOut){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ result = waveOutUnprepareHeader(consumer->hWaveOut, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutUnprepareHeader");
+ return -2;
+ }
+
+ //
+ //
+ // Fill lpHdr->Data with decoded data
+ //
+ //
+ if((out_size = tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(consumer), lpHdr->lpData, lpHdr->dwBufferLength))){
+ //memcpy(lpHdr->lpData, data, lpHdr->dwBufferLength);
+ //TSK_FREE(data);
+ }
+ else{
+ /* Put silence */
+ memset(lpHdr->lpData, 0, lpHdr->dwBufferLength);
+ }
+
+ if(!consumer->started){
+ return 0;
+ }
+
+ result = waveOutPrepareHeader(consumer->hWaveOut, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutPrepareHeader");
+ return -3;
+ }
+
+ result = waveOutWrite(consumer->hWaveOut, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutWrite");
+ return -4;
+ }
+
+ return 0;
+}
+
+static void* TSK_STDCALL __playback_thread(void *param)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)param;
+ DWORD dwEvent;
+ tsk_size_t i;
+
+ TSK_DEBUG_INFO("__playback_thread -- START");
+
+ SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
+
+ for(;;){
+ dwEvent = WaitForMultipleObjects(2, consumer->events, FALSE, INFINITE);
+
+ if (dwEvent == 1){
+ break;
+ }
+
+ else if (dwEvent == 0){
+ EnterCriticalSection(&consumer->cs);
+ for(i = 0; i< sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ if(consumer->hWaveHeaders[i] && (consumer->hWaveHeaders[i]->dwFlags & WHDR_DONE)){
+ play_wavehdr(consumer, consumer->hWaveHeaders[i]);
+ }
+ }
+ LeaveCriticalSection(&consumer->cs);
+ }
+ }
+
+ TSK_DEBUG_INFO("__playback_thread -- STOP");
+
+
+ return tsk_null;
+}
+
+
+
+
+
+
+
+
+/* ============ Media Consumer Interface ================= */
+int tdav_consumer_waveapi_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+ tsk_size_t i;
+
+ if(!consumer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TMEDIA_CONSUMER(consumer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+ /* codec should have ptime */
+
+
+ /* Format */
+ ZeroMemory(&consumer->wfx, sizeof(WAVEFORMATEX));
+ consumer->wfx.wFormatTag = WAVE_FORMAT_PCM;
+ consumer->wfx.nChannels = TMEDIA_CONSUMER(consumer)->audio.in.channels;
+ consumer->wfx.nSamplesPerSec = TMEDIA_CONSUMER(consumer)->audio.out.rate ? TMEDIA_CONSUMER(consumer)->audio.out.rate : TMEDIA_CONSUMER(consumer)->audio.in.rate;
+ consumer->wfx.wBitsPerSample = TMEDIA_CONSUMER(consumer)->audio.bits_per_sample;
+ consumer->wfx.nBlockAlign = (consumer->wfx.nChannels * consumer->wfx.wBitsPerSample/8);
+ consumer->wfx.nAvgBytesPerSec = (consumer->wfx.nSamplesPerSec * consumer->wfx.nBlockAlign);
+
+ /* Average bytes (count) for each notification */
+ consumer->bytes_per_notif = ((consumer->wfx.nAvgBytesPerSec * TMEDIA_CONSUMER(consumer)->audio.ptime)/1000);
+
+ /* create buffers */
+ for(i = 0; i< sizeof(consumer->hWaveHeaders)/sizeof(consumer->hWaveHeaders[0]); i++){
+ create_wavehdr(consumer, i);
+ }
+
+ return 0;
+}
+
+int tdav_consumer_waveapi_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+ MMRESULT result;
+ tsk_size_t i;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(consumer->started || consumer->hWaveOut){
+ TSK_DEBUG_WARN("Consumer already started");
+ return 0;
+ }
+
+ /* create events */
+ if(!consumer->events[0]){
+ consumer->events[0] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ }
+ if(!consumer->events[1]){
+ consumer->events[1] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ }
+
+ /* open */
+ result = waveOutOpen((HWAVEOUT *)&consumer->hWaveOut, WAVE_MAPPER, &consumer->wfx, (DWORD)consumer->events[0], (DWORD_PTR)consumer, CALLBACK_EVENT);
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutOpen");
+ return -2;
+ }
+
+ /* write */
+ for(i = 0; i< sizeof(consumer->hWaveHeaders)/sizeof(consumer->hWaveHeaders[0]); i++){
+ write_wavehdr(consumer, i);
+ }
+
+ /* start thread */
+ consumer->started = tsk_true;
+ tsk_thread_create(&consumer->tid[0], __playback_thread, consumer);
+
+ return 0;
+}
+
+int tdav_consumer_waveapi_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+
+ if(!consumer || !buffer || !size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* buffer is already decoded */
+ return tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(consumer), buffer, size, proto_hdr);
+}
+
+int tdav_consumer_waveapi_pause(tmedia_consumer_t* self)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+int tdav_consumer_waveapi_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+ MMRESULT result;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!consumer->started){
+ TSK_DEBUG_WARN("Consumer not started");
+ return 0;
+ }
+
+ /* stop thread */
+ if(consumer->tid[0]){
+ SetEvent(consumer->events[1]);
+ tsk_thread_join(&(consumer->tid[0]));
+ }
+
+ /* should be done here */
+ consumer->started = tsk_false;
+
+ if(consumer->hWaveOut && ((result = waveOutReset(consumer->hWaveOut)) != MMSYSERR_NOERROR)){
+ print_last_error(result, "waveOutReset");
+ }
+
+ return 0;
+}
+
+
+//
+// WaveAPI consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_waveapi_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_waveapi_t *consumer = self;
+ if(consumer){
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(consumer));
+ /* init self */
+ InitializeCriticalSection(&consumer->cs);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_waveapi_dtor(tsk_object_t * self)
+{
+ tdav_consumer_waveapi_t *consumer = self;
+ if(consumer){
+ tsk_size_t i;
+
+ /* stop */
+ if(consumer->started){
+ tdav_consumer_waveapi_stop(self);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(consumer));
+ /* deinit self */
+ for(i = 0; i< sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ free_wavehdr(consumer, i);
+ }
+ if(consumer->hWaveOut){
+ waveOutClose(consumer->hWaveOut);
+ }
+ if(consumer->events[0]){
+ CloseHandle(consumer->events[0]);
+ }
+ if(consumer->events[1]){
+ CloseHandle(consumer->events[1]);
+ }
+ DeleteCriticalSection(&consumer->cs);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_waveapi_def_s =
+{
+ sizeof(tdav_consumer_waveapi_t),
+ tdav_consumer_waveapi_ctor,
+ tdav_consumer_waveapi_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_waveapi_plugin_def_s =
+{
+ &tdav_consumer_waveapi_def_s,
+
+ tmedia_audio,
+ "Microsoft WaveAPI consumer",
+
+ tdav_consumer_waveapi_set,
+ tdav_consumer_waveapi_prepare,
+ tdav_consumer_waveapi_start,
+ tdav_consumer_waveapi_consume,
+ tdav_consumer_waveapi_pause,
+ tdav_consumer_waveapi_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_waveapi_plugin_def_t = &tdav_consumer_waveapi_plugin_def_s;
+
+#endif /* HAVE_WAVE_API */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/waveapi/tdav_producer_waveapi.c b/tinyDAV/src/audio/waveapi/tdav_producer_waveapi.c
new file mode 100644
index 0000000..d077790
--- /dev/null
+++ b/tinyDAV/src/audio/waveapi/tdav_producer_waveapi.c
@@ -0,0 +1,393 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_producer_waveapi.c
+ * @brief Audio Producer for Win32 and WinCE platforms.
+ */
+#include "tinydav/audio/waveapi/tdav_producer_waveapi.h"
+
+#if HAVE_WAVE_API
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_WAVEAPI_PRODUCER_ERROR_BUFF_COUNT 0xFF
+
+#define tdav_producer_waveapi_set tsk_null
+
+static void print_last_error(MMRESULT mmrError, const char* func)
+{
+ static char buffer_err[TDAV_WAVEAPI_PRODUCER_ERROR_BUFF_COUNT];
+
+ waveInGetErrorTextA(mmrError, buffer_err, sizeof(buffer_err));
+ TSK_DEBUG_ERROR("%s() error: %s", func, buffer_err);
+}
+
+static int free_wavehdr(tdav_producer_waveapi_t* producer, tsk_size_t index)
+{
+ if(!producer || index >= sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_FREE(producer->hWaveHeaders[index]->lpData);
+ TSK_FREE(producer->hWaveHeaders[index]);
+
+ return 0;
+}
+
+static int create_wavehdr(tdav_producer_waveapi_t* producer, tsk_size_t index)
+{
+ if(!producer || index >= sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(producer->hWaveHeaders[index]){
+ free_wavehdr(producer, index);
+ }
+
+ producer->hWaveHeaders[index] = tsk_calloc(1, sizeof(WAVEHDR));
+ producer->hWaveHeaders[index]->lpData = tsk_calloc(1, producer->bytes_per_notif);
+ producer->hWaveHeaders[index]->dwBufferLength = (DWORD)producer->bytes_per_notif;
+ producer->hWaveHeaders[index]->dwFlags = WHDR_BEGINLOOP | WHDR_ENDLOOP;
+ producer->hWaveHeaders[index]->dwLoops = 0x01;
+ producer->hWaveHeaders[index]->dwUser = index;
+
+ return 0;
+}
+
+static int add_wavehdr(tdav_producer_waveapi_t* producer, tsk_size_t index)
+{
+ MMRESULT result;
+
+ if(!producer || !producer->hWaveHeaders[index] || !producer->hWaveIn){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ result = waveInPrepareHeader(producer->hWaveIn, producer->hWaveHeaders[index], sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInPrepareHeader");
+ return -2;
+ }
+
+ result = waveInAddBuffer(producer->hWaveIn, producer->hWaveHeaders[index], sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInAddBuffer");
+ return -3;
+ }
+
+ return 0;
+}
+
+static int record_wavehdr(tdav_producer_waveapi_t* producer, LPWAVEHDR lpHdr)
+{
+ MMRESULT result;
+
+ if(!producer || !lpHdr || !producer->hWaveIn){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ //
+ // Alert the session that there is new data to send over the network
+ //
+ if(TMEDIA_PRODUCER(producer)->enc_cb.callback){
+#if 0
+ {
+ static FILE* f = NULL;
+ if(!f) f = fopen("./waveapi_producer.raw", "w+");
+ fwrite(lpHdr->lpData, 1, lpHdr->dwBytesRecorded, f);
+ }
+#endif
+ TMEDIA_PRODUCER(producer)->enc_cb.callback(TMEDIA_PRODUCER(producer)->enc_cb.callback_data, lpHdr->lpData, lpHdr->dwBytesRecorded);
+ }
+
+ if(!producer->started){
+ return 0;
+ }
+
+ result = waveInUnprepareHeader(producer->hWaveIn, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInUnprepareHeader");
+ return -2;
+ }
+
+ result = waveInPrepareHeader(producer->hWaveIn, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInPrepareHeader");
+ return -3;
+ }
+
+ result = waveInAddBuffer(producer->hWaveIn, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInAddBuffer");
+ return -4;
+ }
+
+ return 0;
+}
+
+static void* TSK_STDCALL __record_thread(void *param)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)param;
+ DWORD dwEvent;
+ tsk_size_t i;
+
+ TSK_DEBUG_INFO("__record_thread -- START");
+
+ // SetPriorityClass(GetCurrentThread(), REALTIME_PRIORITY_CLASS);
+
+ for(;;){
+ dwEvent = WaitForMultipleObjects(2, producer->events, FALSE, INFINITE);
+
+ if (dwEvent == 1){
+ break;
+ }
+
+ else if (dwEvent == 0){
+ EnterCriticalSection(&producer->cs);
+ for(i = 0; i< sizeof(producer->hWaveHeaders)/sizeof(producer->hWaveHeaders[0]); i++){
+ if(producer->hWaveHeaders[i] && (producer->hWaveHeaders[i]->dwFlags & WHDR_DONE)){
+ record_wavehdr(producer, producer->hWaveHeaders[i]);
+ }
+ }
+ LeaveCriticalSection(&producer->cs);
+ }
+ }
+
+ TSK_DEBUG_INFO("__record_thread() -- STOP");
+
+
+ return tsk_null;
+}
+
+
+
+
+
+
+
+
+/* ============ Media Producer Interface ================= */
+int tdav_producer_waveapi_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)self;
+ tsk_size_t i;
+
+ if(!producer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TMEDIA_PRODUCER(producer)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+ /* codec should have ptime */
+
+
+ /* Format */
+ ZeroMemory(&producer->wfx, sizeof(WAVEFORMATEX));
+ producer->wfx.wFormatTag = WAVE_FORMAT_PCM;
+ producer->wfx.nChannels = TMEDIA_PRODUCER(producer)->audio.channels;
+ producer->wfx.nSamplesPerSec = TMEDIA_PRODUCER(producer)->audio.rate;
+ producer->wfx.wBitsPerSample = TMEDIA_PRODUCER(producer)->audio.bits_per_sample;
+ producer->wfx.nBlockAlign = (producer->wfx.nChannels * producer->wfx.wBitsPerSample/8);
+ producer->wfx.nAvgBytesPerSec = (producer->wfx.nSamplesPerSec * producer->wfx.nBlockAlign);
+
+ /* Average bytes (count) for each notification */
+ producer->bytes_per_notif = ((producer->wfx.nAvgBytesPerSec * TMEDIA_PRODUCER(producer)->audio.ptime)/1000);
+
+ /* create buffers */
+ for(i = 0; i< sizeof(producer->hWaveHeaders)/sizeof(producer->hWaveHeaders[0]); i++){
+ create_wavehdr(producer, i);
+ }
+
+ return 0;
+}
+
+int tdav_producer_waveapi_start(tmedia_producer_t* self)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)self;
+ MMRESULT result;
+ tsk_size_t i;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(producer->started || producer->hWaveIn){
+ TSK_DEBUG_WARN("Producer already started");
+ return 0;
+ }
+
+ /* create events */
+ if(!producer->events[0]){
+ producer->events[0] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ }
+ if(!producer->events[1]){
+ producer->events[1] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ }
+
+ /* open */
+ result = waveInOpen((HWAVEIN *)&producer->hWaveIn, /*WAVE_MAPPER*/0, &producer->wfx, (DWORD)producer->events[0], (DWORD_PTR)producer, CALLBACK_EVENT);
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInOpen");
+ return -2;
+ }
+
+ /* start */
+ result = waveInStart(producer->hWaveIn);
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInStart");
+ return -2;
+ }
+
+ /* write */
+ for(i = 0; i< sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ add_wavehdr(producer, i);
+ }
+
+ /* start thread */
+ producer->started = tsk_true;
+ tsk_thread_create(&producer->tid[0], __record_thread, producer);
+
+ return 0;
+}
+
+int tdav_producer_waveapi_pause(tmedia_producer_t* self)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+int tdav_producer_waveapi_stop(tmedia_producer_t* self)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)self;
+ MMRESULT result;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!producer->started){
+ TSK_DEBUG_WARN("Producer not started");
+ return 0;
+ }
+
+ /* stop thread */
+ if(producer->tid[0]){
+ SetEvent(producer->events[1]);
+ tsk_thread_join(&(producer->tid[0]));
+ }
+
+ /* should be done here */
+ producer->started = tsk_false;
+
+ if(producer->hWaveIn && (((result = waveInReset(producer->hWaveIn)) != MMSYSERR_NOERROR) || ((result = waveInClose(producer->hWaveIn)) != MMSYSERR_NOERROR))){
+ print_last_error(result, "waveInReset/waveInClose");
+ }
+
+ return 0;
+}
+
+
+//
+// WaveAPI producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_waveapi_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_waveapi_t *producer = self;
+ if(producer){
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(producer));
+ /* init self */
+ InitializeCriticalSection(&producer->cs);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_waveapi_dtor(tsk_object_t * self)
+{
+ tdav_producer_waveapi_t *producer = self;
+ if(producer){
+ tsk_size_t i;
+
+ /* stop */
+ if(producer->started){
+ tdav_producer_waveapi_stop(self);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(producer));
+ /* deinit self */
+ for(i = 0; i< sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ free_wavehdr(producer, i);
+ }
+ if(producer->hWaveIn){
+ waveInClose(producer->hWaveIn);
+ }
+ if(producer->events[0]){
+ CloseHandle(producer->events[0]);
+ }
+ if(producer->events[1]){
+ CloseHandle(producer->events[1]);
+ }
+ DeleteCriticalSection(&producer->cs);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_waveapi_def_s =
+{
+ sizeof(tdav_producer_waveapi_t),
+ tdav_producer_waveapi_ctor,
+ tdav_producer_waveapi_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_waveapi_plugin_def_s =
+{
+ &tdav_producer_waveapi_def_s,
+
+ tmedia_audio,
+ "Microsoft WaveAPI producer",
+
+ tdav_producer_waveapi_set,
+ tdav_producer_waveapi_prepare,
+ tdav_producer_waveapi_start,
+ tdav_producer_waveapi_pause,
+ tdav_producer_waveapi_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_waveapi_plugin_def_t = &tdav_producer_waveapi_plugin_def_s;
+
+#endif /* HAVE_WAVE_API */ \ No newline at end of file
diff --git a/tinyDAV/src/bfcp/tdav_session_bfcp.c b/tinyDAV/src/bfcp/tdav_session_bfcp.c
new file mode 100644
index 0000000..07e770b
--- /dev/null
+++ b/tinyDAV/src/bfcp/tdav_session_bfcp.c
@@ -0,0 +1,741 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_session_bfcp.c
+ * @brief The The Binary Floor Control Protocol (BFCP, rfc4582) session.
+ */
+#include "tinydav/bfcp/tdav_session_bfcp.h"
+
+#if !defined(HAVE_TINYBFCP) || HAVE_TINYBFCP
+
+#include "tinybfcp/tbfcp_session.h"
+#include "tinybfcp/tbfcp_pkt.h"
+#include "tinybfcp/tbfcp_utils.h"
+
+#include "tsk_string.h"
+#include "tsk_memory.h" /* TSK_FREE */
+#include "tsk_debug.h"
+
+/*
+ * https://tools.ietf.org/html/rfc4574
+ * https://tools.ietf.org/html/rfc4582
+ * https://tools.ietf.org/html/rfc4583
+ * http://tools.ietf.org/html/rfc4796
+ * https://tools.ietf.org/html/draft-ietf-bfcpbis-rfc4582bis-1
+*/
+
+typedef struct tdav_session_bfcp_s
+{
+ TMEDIA_DECLARE_SESSION_BFCP;
+
+ struct tbfcp_session_s* p_bfcp_s;
+ struct tbfcp_pkt_s* p_pkt_FloorRequest;
+ struct tbfcp_pkt_s* p_pkt_FloorRelease;
+ struct tbfcp_pkt_s* p_pkt_Hello;
+
+ tsk_bool_t b_started;
+ tsk_bool_t b_use_ipv6;
+ tsk_bool_t b_revoked_handled;
+ tsk_bool_t b_conf_idf_changed;
+ tsk_bool_t b_stop_to_reconf;
+
+ char* p_local_ip;
+ //uint16_t local_port;
+
+ /* NAT Traversal context */
+ struct tnet_nat_ctx_s* p_natt_ctx;
+
+ char* p_remote_ip;
+ uint16_t u_remote_port;
+
+ // https://tools.ietf.org/html/rfc4583 attributes
+ struct {
+ char* confid;
+ char* floorid;
+ char* mstrm;
+ char* userid;
+ } rfc4583;
+}
+tdav_session_bfcp_t;
+
+static int _tdav_session_bfcp_notif(const struct tbfcp_session_event_xs *e);
+static int _tdav_session_bfcp_send_Hello(tdav_session_bfcp_t* p_bfcp);
+
+/* ============ Plugin interface ================= */
+
+static int _tdav_session_bfcp_set(tmedia_session_t* p_self, const tmedia_param_t* param)
+{
+ int ret = 0;
+ tdav_session_bfcp_t* p_bfcp;
+
+ if (!p_self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_DEBUG_INFO("tdav_session_bfcp_set");
+
+ p_bfcp = (tdav_session_bfcp_t*)p_self;
+
+ if (param->value_type == tmedia_pvt_pchar) {
+ if (tsk_striequals(param->key, "remote-ip")) {
+ // only if no ip associated to the "m=" line
+ if (param->value && !p_bfcp->p_remote_ip) {
+ p_bfcp->p_remote_ip = tsk_strdup(param->value);
+ }
+ }
+ else if (tsk_striequals(param->key, "local-ip")) {
+ tsk_strupdate(&p_bfcp->p_local_ip, param->value);
+ }
+ else if (tsk_striequals(param->key, "local-ipver")) {
+ p_bfcp->b_use_ipv6 = tsk_striequals(param->value, "ipv6");
+ }
+ }
+ else if (param->value_type == tmedia_pvt_pobject) {
+ if (tsk_striequals(param->key, "natt-ctx")) {
+ TSK_OBJECT_SAFE_FREE(p_bfcp->p_natt_ctx);
+ p_bfcp->p_natt_ctx = tsk_object_ref(param->value);
+ }
+ }
+ else if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "stop-to-reconf")) {
+ p_bfcp->b_stop_to_reconf = TSK_TO_INT32((uint8_t*)param->value) ? tsk_true : tsk_false;
+ }
+ }
+
+
+ return ret;
+}
+
+static int _tdav_session_bfcp_get(tmedia_session_t* p_self, tmedia_param_t* param)
+{
+ int ret = 0;
+ tdav_session_bfcp_t* p_bfcp;
+
+ if (!p_self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_DEBUG_INFO("_tdav_session_bfcp_get");
+
+ p_bfcp = (tdav_session_bfcp_t*)p_self;
+
+ return ret;
+}
+
+static int _tdav_session_bfcp_prepare(tmedia_session_t* p_self)
+{
+ int ret = 0;
+ tdav_session_bfcp_t* p_bfcp;
+
+ if (!p_self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_DEBUG_INFO("_tdav_session_bfcp_prepare");
+
+ p_bfcp = (tdav_session_bfcp_t*)p_self;
+
+ if (!p_bfcp->p_bfcp_s) {
+ enum tnet_socket_type_e e_socket_type = kBfcpTransportDefault;
+ if ((ret = tbfcp_session_create(e_socket_type, p_bfcp->p_local_ip, &p_bfcp->p_bfcp_s))) {
+ return ret;
+ }
+ }
+ if ((ret = tbfcp_session_set_natt_ctx(p_bfcp->p_bfcp_s, p_bfcp->p_natt_ctx))) {
+ return ret;
+ }
+ if ((ret = tbfcp_session_prepare(p_bfcp->p_bfcp_s))) {
+ return ret;
+ }
+ if ((ret = tbfcp_session_set_callback(p_bfcp->p_bfcp_s, _tdav_session_bfcp_notif, p_bfcp))) {
+ return ret;
+ }
+ TSK_OBJECT_SAFE_FREE(p_bfcp->p_pkt_Hello);
+ TSK_OBJECT_SAFE_FREE(p_bfcp->p_pkt_FloorRequest);
+ TSK_OBJECT_SAFE_FREE(p_bfcp->p_pkt_FloorRelease);
+ p_bfcp->b_revoked_handled = tsk_false;
+
+ return ret;
+}
+
+static int _tdav_session_bfcp_start(tmedia_session_t* p_self)
+{
+ int ret = 0;
+ tdav_session_bfcp_t* p_bfcp;
+
+ if (!p_self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_DEBUG_INFO("_tdav_session_bfcp_start");
+
+ p_bfcp = (tdav_session_bfcp_t*)p_self;
+
+ if ((ret = tbfcp_session_set_remote_address(p_bfcp->p_bfcp_s, p_bfcp->p_remote_ip, p_bfcp->u_remote_port))) {
+ return ret;
+ }
+ if ((ret = tbfcp_session_start(p_bfcp->p_bfcp_s))) {
+ return ret;
+ }
+ if ((ret = _tdav_session_bfcp_send_Hello(p_bfcp))) {
+ return ret;
+ }
+
+ p_bfcp->b_started = tsk_true;
+
+ return ret;
+}
+
+static int _tdav_session_bfcp_pause(tmedia_session_t* p_self)
+{
+ int ret = 0;
+ tdav_session_bfcp_t* p_bfcp;
+
+ if (!p_self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_DEBUG_INFO("_tdav_session_bfcp_pause");
+
+ p_bfcp = (tdav_session_bfcp_t*)p_self;
+
+ if (p_bfcp->p_bfcp_s && (ret = tbfcp_session_pause(p_bfcp->p_bfcp_s))) {
+ return ret;
+ }
+
+ return ret;
+}
+
+static int _tdav_session_bfcp_stop(tmedia_session_t* p_self)
+{
+ int ret = 0;
+ tdav_session_bfcp_t* p_bfcp;
+
+ if (!p_self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_DEBUG_INFO("_tdav_session_bfcp_stop");
+
+ p_bfcp = (tdav_session_bfcp_t*)p_self;
+
+ if (!p_bfcp->b_stop_to_reconf) { // If stop-to-reconf then do not release the FloorRequest but reuse it
+ if (p_bfcp->b_started) {
+ /*if (p_bfcp->p_bfcp_s)*/ {
+ /*if (!p_bfcp->p_pkt_FloorRelease) {
+ ret = tbfcp_session_create_pkt_FloorRelease(p_bfcp->p_bfcp_s, &p_bfcp->p_pkt_FloorRelease);
+ }
+ if (ret == 0 && p_bfcp->p_pkt_FloorRelease && (ret = tbfcp_session_send_pkt(p_bfcp->p_bfcp_s, p_bfcp->p_pkt_FloorRelease))) {
+ //!\ do not exit
+ }*/
+ }
+ }
+ tsk_strupdate(&p_bfcp->rfc4583.confid, "");
+ }
+
+ if (p_bfcp->p_bfcp_s) {
+ ret = tbfcp_session_stop(p_bfcp->p_bfcp_s);
+ }
+
+ p_bfcp->b_started = tsk_false;
+ p_bfcp->b_stop_to_reconf = tsk_false; // reset
+
+ return ret;
+}
+
+static const tsdp_header_M_t* _tdav_session_bfcp_get_lo(tmedia_session_t* p_self)
+{
+ tdav_session_bfcp_t* p_bfcp;
+ tsk_bool_t b_changed = tsk_false;
+ const char *pc_local_ip, *pc_local_profile, *pc_local_role, *pc_local_setup;
+ tnet_port_t u_local_port;
+ enum tbfcp_role_e e_local_role;
+ enum tbfcp_setup_e e_local_setup;
+ int ret;
+
+ TSK_DEBUG_INFO("_tdav_session_bfcp_get_lo");
+
+ if (!p_self || !p_self->plugin) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+
+ p_bfcp = (tdav_session_bfcp_t*)p_self;
+
+ b_changed = (p_self->ro_changed || !p_self->M.lo);
+
+ if (!b_changed) {
+ TSK_DEBUG_INFO("No changes to the BFCP session...skip SDP update");
+ return p_self->M.lo;
+ }
+
+ if (b_changed && p_self->M.lo) {
+ static const char* __fields[] = { "floorctrl", "setup", "connection", "curr", "des", "conf" };
+ // remove media-level attributes
+ tsdp_header_A_removeAll_by_fields(p_self->M.lo->Attributes, __fields, sizeof(__fields)/sizeof(__fields[0]));
+ // Codec list never change and FMTs always a single star (*) value. Radvision TelePresence System reject a BFCP session whithout the single FMT (*)
+ // The Codecs and formats are never rebuilt which means we must not clear them
+#if 0
+ tsk_list_clear_items(p_self->M.lo->FMTs);
+#endif
+ }
+
+ // get local address
+ if ((ret = tbfcp_session_get_local_address(p_bfcp->p_bfcp_s, &pc_local_ip, &u_local_port))) {
+ TSK_DEBUG_ERROR("Failed to get local address from BFCP session");
+ return tsk_null;
+ }
+ // get local profile
+ if ((ret = tbfcp_session_get_profile(p_bfcp->p_bfcp_s, &pc_local_profile))) {
+ TSK_DEBUG_ERROR("Failed to get local profile from BFCP session");
+ return tsk_null;
+ }
+ // get local role
+ if ((ret = tbfcp_session_get_local_role(p_bfcp->p_bfcp_s, &e_local_role))) {
+ TSK_DEBUG_ERROR("Failed to get local role from BFCP session");
+ return tsk_null;
+ }
+ if ((ret = tbfcp_utils_get_role(e_local_role, &pc_local_role))) {
+ return tsk_null;
+ }
+ // get local setup
+ if ((ret = tbfcp_session_get_local_setup(p_bfcp->p_bfcp_s, &e_local_setup))) {
+ TSK_DEBUG_ERROR("Failed to get local setup from BFCP session");
+ return tsk_null;
+ }
+ if ((ret = tbfcp_utils_get_setup(e_local_role, &pc_local_setup))) {
+ return tsk_null;
+ }
+
+ if (!p_self->M.lo){
+ if (!(p_self->M.lo = tsdp_header_M_create(p_self->plugin->media, u_local_port, pc_local_profile))) {
+ TSK_DEBUG_ERROR("Failed to create BFCP SDP media header");
+ return tsk_null;
+ }
+ tsdp_header_M_add_headers(p_self->M.lo,
+ TSDP_FMT_VA_ARGS("*"),
+ TSDP_HEADER_A_VA_ARGS("connection", "new"),
+ TSDP_HEADER_A_VA_ARGS("lib", "tinyBFCP"),
+ tsk_null);
+ // If NATT is active, do not rely on the global IP address Connection line
+ if (p_bfcp->p_natt_ctx) {
+ tsdp_header_M_add_headers(p_self->M.lo,
+ TSDP_HEADER_C_VA_ARGS("IN", p_bfcp->b_use_ipv6 ? "IP6" : "IP4", pc_local_ip),
+ tsk_null);
+ }
+ }
+ else {
+ p_self->M.lo->port = u_local_port;
+ tsk_strupdate(&p_self->M.lo->proto, pc_local_profile);
+ }
+
+ // add "floorctrl" and "setup" attributes
+ tsdp_header_M_add_headers(p_self->M.lo,
+ TSDP_HEADER_A_VA_ARGS("connection", "new"),
+ TSDP_HEADER_A_VA_ARGS("floorctrl", pc_local_role),
+ TSDP_HEADER_A_VA_ARGS("setup", pc_local_setup),
+ tsk_null);
+
+ return p_self->M.lo;
+}
+
+static int _tdav_session_bfcp_set_ro(tmedia_session_t* p_self, const tsdp_header_M_t* m)
+{
+ int ret = 0;
+ const tsdp_header_A_t* A;
+ tdav_session_bfcp_t* p_bfcp;
+ enum tbfcp_role_e e_remote_role = tbfcp_role_c_s;
+ uint32_t u_remote_conf_id = 0xFFFF;
+ uint16_t u_remote_user_id = 0xFFFF, u_remote_floor_id = 0xFFFF;
+
+ if (!p_self || !m) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_DEBUG_INFO("_tdav_session_bfcp_set_ro");
+
+ p_bfcp = (tdav_session_bfcp_t*)p_self;
+
+ /* update remote offer */
+ TSK_OBJECT_SAFE_FREE(p_self->M.ro);
+ p_self->M.ro = tsk_object_ref(TSK_OBJECT(m));
+
+
+ // https://tools.ietf.org/html/rfc4583
+ {
+ p_bfcp->b_conf_idf_changed = tsk_false;
+ if ((A = tsdp_header_M_findA(m, "floorctrl"))) {
+ if ((ret = tbfcp_utils_parse_role(A->value, &e_remote_role))) {
+ return ret;
+ }
+ }
+ if ((A = tsdp_header_M_findA(m, "confid"))) {
+ p_bfcp->b_conf_idf_changed |= !tsk_striequals(p_bfcp->rfc4583.confid, A->value);
+ tsk_strupdate(&p_bfcp->rfc4583.confid, A->value);
+ u_remote_conf_id = (uint32_t)tsk_atoi64(p_bfcp->rfc4583.confid);
+ }
+ if ((A = tsdp_header_M_findA(m, "userid"))) {
+ p_bfcp->b_conf_idf_changed |= !tsk_striequals(p_bfcp->rfc4583.userid, A->value);
+ tsk_strupdate(&p_bfcp->rfc4583.userid, A->value);
+ u_remote_user_id = (uint16_t)tsk_atoi64(p_bfcp->rfc4583.userid);
+ }
+ if ((A = tsdp_header_M_findA(m, "floorid"))) {
+ char tmp_str[256];
+ if (sscanf(A->value, "%255s %*s", tmp_str) != EOF) {
+ char *pch, *saveptr;
+ p_bfcp->b_conf_idf_changed |= !tsk_striequals(p_bfcp->rfc4583.floorid, tmp_str);
+ tsk_strupdate(&p_bfcp->rfc4583.floorid, tmp_str);
+ u_remote_floor_id = (uint16_t)tsk_atoi64(p_bfcp->rfc4583.floorid);
+ pch = tsk_strtok_r(&A->value[tsk_strlen(tmp_str) + 1], " ", &saveptr);
+ while (pch) {
+ if (sscanf(pch, "mstrm: %255s", tmp_str) != EOF) {
+ p_bfcp->b_conf_idf_changed |= !tsk_striequals(p_bfcp->rfc4583.mstrm, tmp_str);
+ tsk_strupdate(&p_bfcp->rfc4583.mstrm, tmp_str);
+ break;
+ }
+ pch = tsk_strtok_r(tsk_null, " ", &saveptr);
+ }
+ }
+ }
+ // set remote role
+ if ((ret = tbfcp_session_set_remote_role(p_bfcp->p_bfcp_s, e_remote_role))) {
+ return ret;
+ }
+ if ((e_remote_role & tbfcp_role_s_only)) {
+ // local = client
+ if ((ret = tbfcp_session_set_conf_ids(p_bfcp->p_bfcp_s, u_remote_conf_id, u_remote_user_id, u_remote_floor_id))) {
+ return ret;
+ }
+ }
+ else {
+ // local = remote: Not supported yet and will never happen
+ }
+ }//end-of-rfc4583
+
+ /* get connection associated to this media line
+ * If the connnection is global, then the manager will call tdav_session_audio_set() */
+ if (m->C && m->C->addr) {
+ tsk_strupdate(&p_bfcp->p_remote_ip, m->C->addr);
+ p_bfcp->b_use_ipv6 = tsk_striequals(m->C->addrtype, "IP6");
+ }
+ /* set remote port */
+ p_bfcp->u_remote_port = m->port;
+
+ return ret;
+}
+
+static int _tdav_session_bfcp_send_Hello(tdav_session_bfcp_t* p_bfcp)
+{
+ int ret = 0;
+ if (!p_bfcp) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (!p_bfcp->p_pkt_Hello && (ret = tbfcp_session_create_pkt_Hello(p_bfcp->p_bfcp_s, &p_bfcp->p_pkt_Hello))) {
+ return ret;
+ }
+ if ((ret = tbfcp_session_send_pkt(p_bfcp->p_bfcp_s, p_bfcp->p_pkt_Hello))) {
+ return ret;
+ }
+ return ret;
+}
+
+static int _tdav_session_bfcp_notif(const struct tbfcp_session_event_xs *e)
+{
+ tdav_session_bfcp_t* p_bfcp = tsk_object_ref(TSK_OBJECT(e->pc_usr_data));
+ int ret = 0;
+ static const char* kErrTextGlobalError = "Global error";
+ static const int kErrCodeGlobalError = -56;
+ static const char* kErrTextTimeout = "Timeout";
+ static const int kErrCodeTimeout = -57;
+ static const char* kErrTextUnExpectedIncomingMsg = "Unexpected incoming BFCP message";
+ static const int kErrCodeUnExpectedIncomingMsg = -58;
+ static const char* kErrTextBadRequest = "Bad Request";
+ static const int kErrCodeBadRequest = -59;
+ static const char* kInfoTextFloorReqStatus = "FloorRequestStatus";
+
+#define _RAISE_ERR_AND_GOTO_BAIL(_code, _reason) \
+ if (TMEDIA_SESSION(p_bfcp)->bfcp_cb.fun) { \
+ tmedia_session_bfcp_evt_xt e; \
+ e.type = tmedia_session_bfcp_evt_type_err; e.err.code = _code; e.reason = _reason; \
+ TMEDIA_SESSION(p_bfcp)->bfcp_cb.fun(TMEDIA_SESSION(p_bfcp)->bfcp_cb.usrdata, TMEDIA_SESSION(p_bfcp), &e); \
+ } \
+ ret = _code; goto bail;
+#define _RAISE_FLREQ(_status, _reason) \
+ if (TMEDIA_SESSION(p_bfcp)->bfcp_cb.fun) { \
+ tmedia_session_bfcp_evt_xt e; \
+ e.type = tmedia_session_bfcp_evt_type_flreq_status; e.flreq.status = _status; e.reason = _reason; \
+ TMEDIA_SESSION(p_bfcp)->bfcp_cb.fun(TMEDIA_SESSION(p_bfcp)->bfcp_cb.usrdata, TMEDIA_SESSION(p_bfcp), &e); \
+ } \
+
+ switch (e->e_type) {
+ case tbfcp_session_event_type_inf_inc_msg:
+ {
+ if (p_bfcp->p_pkt_Hello && p_bfcp->p_pkt_Hello->hdr.transac_id == e->pc_pkt->hdr.transac_id && p_bfcp->p_pkt_Hello->hdr.user_id == e->pc_pkt->hdr.user_id && p_bfcp->p_pkt_Hello->hdr.conf_id == e->pc_pkt->hdr.conf_id) {
+ TSK_OBJECT_SAFE_FREE(p_bfcp->p_pkt_Hello);
+ if (e->pc_pkt->hdr.primitive == tbfcp_primitive_HelloAck) {
+ if (!p_bfcp->p_pkt_FloorRequest) {
+ if (p_bfcp->b_conf_idf_changed || 0) {
+ // Create the "FloorRelease" for this "FloorRequest"
+ TSK_OBJECT_SAFE_FREE(p_bfcp->p_pkt_FloorRelease);
+ if ((ret = tbfcp_session_create_pkt_FloorRelease(p_bfcp->p_bfcp_s, &p_bfcp->p_pkt_FloorRelease))) {
+ goto raise_err;
+ }
+ if ((ret = tbfcp_session_create_pkt_FloorRequest(p_bfcp->p_bfcp_s, &p_bfcp->p_pkt_FloorRequest))) {
+ goto raise_err;
+ }
+ if ((ret = tbfcp_session_send_pkt(p_bfcp->p_bfcp_s, p_bfcp->p_pkt_FloorRequest))) {
+ goto raise_err;
+ }
+ }
+ else {
+ TSK_DEBUG_INFO("No change to BFCP session... do not send FloorRequest");
+ }
+ }
+ }
+ else {
+ TSK_DEBUG_ERROR("%s", kErrTextUnExpectedIncomingMsg);
+ _RAISE_ERR_AND_GOTO_BAIL(kErrCodeUnExpectedIncomingMsg, kErrTextUnExpectedIncomingMsg);
+ }
+ }
+ else if(p_bfcp->p_pkt_FloorRequest /*&& p_bfcp->p_pkt_FloorRequest->hdr.transac_id == e->pc_pkt->hdr.transac_id*/ && p_bfcp->p_pkt_FloorRequest->hdr.user_id == e->pc_pkt->hdr.user_id && p_bfcp->p_pkt_FloorRequest->hdr.conf_id == e->pc_pkt->hdr.conf_id) {
+ tsk_bool_t transac_id_matched = (p_bfcp->p_pkt_FloorRequest->hdr.transac_id == e->pc_pkt->hdr.transac_id);
+ if (e->pc_pkt->hdr.primitive == tbfcp_primitive_FloorRequestStatus || e->pc_pkt->hdr.primitive == tbfcp_primitive_FloorStatus) {
+ tsk_size_t u_index0, u_index1, u_index2, u_index3;
+ const tbfcp_attr_grouped_t *pc_attr_FloorRequestInformation = tsk_null,
+ *pc_attr_FloorRequestStatus = tsk_null,
+ *pc_attr_OverallRequestStatus = tsk_null;
+ const tbfcp_attr_octetstring16_t *pc_attr_RequestStatus = tsk_null;
+
+ u_index0 = 0;
+ // Find "FloorRequestInformation"
+ while ((ret = tbfcp_pkt_attr_find_at(e->pc_pkt, tbfcp_attribute_format_Grouped, u_index0++, (const tbfcp_attr_t **)&pc_attr_FloorRequestInformation)) == 0 && pc_attr_FloorRequestInformation) {
+ if (TBFCP_ATTR(pc_attr_FloorRequestInformation)->hdr.type != tbfcp_attribute_type_FLOOR_REQUEST_INFORMATION) {
+ continue;
+ }
+ // Find "FloorRequestStatus"
+ u_index1 = 0;
+ while ((ret = tbfcp_attr_grouped_find_at(pc_attr_FloorRequestInformation, tbfcp_attribute_format_Grouped, u_index1++, (const tbfcp_attr_t **)&pc_attr_FloorRequestStatus)) == 0 && pc_attr_FloorRequestStatus) {
+ if (TBFCP_ATTR(pc_attr_FloorRequestStatus)->hdr.type != tbfcp_attribute_type_FLOOR_REQUEST_STATUS) {
+ continue;
+ }
+ if (pc_attr_FloorRequestStatus->extra_hdr.FloorID != atoi(p_bfcp->rfc4583.floorid)) {
+ continue;
+ }
+ break;
+ }
+ if (!pc_attr_FloorRequestStatus) {
+ continue;
+ }
+ // Find "OverallRequestStatus"
+ u_index2 = 0;
+ while ((ret = tbfcp_attr_grouped_find_at(pc_attr_FloorRequestInformation, tbfcp_attribute_format_Grouped, u_index2++, (const tbfcp_attr_t **)&pc_attr_OverallRequestStatus)) == 0 && pc_attr_OverallRequestStatus) {
+ if (TBFCP_ATTR(pc_attr_OverallRequestStatus)->hdr.type != tbfcp_attribute_type_OVERALL_REQUEST_STATUS) {
+ continue;
+ }
+
+ // Find "RequestStatus"
+ u_index3 = 0;
+ while ((ret = tbfcp_attr_grouped_find_at(pc_attr_OverallRequestStatus, tbfcp_attribute_format_OctetString16, u_index3++, (const tbfcp_attr_t **)&pc_attr_RequestStatus)) == 0 && pc_attr_RequestStatus) {
+ if (TBFCP_ATTR(pc_attr_RequestStatus)->hdr.type != tbfcp_attribute_type_REQUEST_STATUS) {
+ continue;
+ }
+ break;
+ }
+ }
+ if (pc_attr_RequestStatus) {
+ break;
+ }
+ }
+
+ if (pc_attr_RequestStatus) {
+ // https://tools.ietf.org/html/rfc4582#section-5.2.5
+ uint16_t u_status = pc_attr_RequestStatus->OctetString16[0] + (pc_attr_RequestStatus->OctetString16[1] << 8);
+ if (transac_id_matched) {
+ if (u_status == tbfcp_reqstatus_Revoked && !p_bfcp->b_revoked_handled) { // revoked
+ TSK_OBJECT_SAFE_FREE(p_bfcp->p_pkt_FloorRequest); // free the FloorRequest and ask new one once HelloAck is received
+ // Radvision sends a Revoke after a reINVITE to ask for new negotiation.
+ if (p_bfcp->p_pkt_FloorRelease) {
+ if ((ret = tbfcp_session_send_pkt(p_bfcp->p_bfcp_s, p_bfcp->p_pkt_FloorRelease))) {
+ goto raise_err;
+ }
+ }
+ if ((ret = _tdav_session_bfcp_send_Hello(p_bfcp))) {
+ goto raise_err;
+ }
+ p_bfcp->b_revoked_handled = tsk_true;
+ }
+ else {
+ _RAISE_FLREQ(u_status, kInfoTextFloorReqStatus);
+ }
+ }
+ else { //!transac_id_matched
+ // Status from old FloorRequest
+ tbfcp_pkt_t* p_pkt = tsk_null;
+ TSK_DEBUG_INFO("Status from old Request");
+ if (u_status == tbfcp_reqstatus_Pending || u_status == tbfcp_reqstatus_Accepted || u_status == tbfcp_reqstatus_Granted) {
+ if ((ret = tbfcp_pkt_create_FloorRelease_2(e->pc_pkt->hdr.conf_id, e->pc_pkt->hdr.transac_id, e->pc_pkt->hdr.user_id, pc_attr_FloorRequestStatus->extra_hdr.FloorID, &p_pkt))) {
+ goto raise_err;
+ }
+ ret = tbfcp_session_send_pkt(p_bfcp->p_bfcp_s, p_pkt);
+ TSK_OBJECT_SAFE_FREE(p_pkt);
+ if (ret) {
+ goto raise_err;
+ }
+ }
+ }
+ }
+ else {
+ /* /!\ No RequestStatus attribute in FloorRequestStatus */
+ TSK_OBJECT_SAFE_FREE(p_bfcp->p_pkt_FloorRequest);
+ TSK_DEBUG_ERROR("%s", kErrTextBadRequest);
+ _RAISE_ERR_AND_GOTO_BAIL(kErrCodeBadRequest, kErrTextBadRequest);
+ }
+ }
+ else {
+ switch (e->pc_pkt->hdr.primitive) {
+ case tbfcp_primitive_Hello: break; // already handled in "_tbfcp_session_process_incoming_pkt()"
+ default:
+ {
+ TSK_DEBUG_ERROR("%s", kErrTextUnExpectedIncomingMsg);
+ _RAISE_ERR_AND_GOTO_BAIL(kErrCodeUnExpectedIncomingMsg, kErrTextUnExpectedIncomingMsg);
+ break;
+ }
+ }
+ }
+ }
+ break;
+ }
+ case tbfcp_session_event_type_err_send_timedout:
+ {
+ /* /!\ Sending BFCP message timedout */
+ TSK_DEBUG_ERROR("%s", kErrTextTimeout);
+ _RAISE_ERR_AND_GOTO_BAIL(kErrCodeTimeout, kErrTextTimeout);
+ break;
+ }
+ }
+raise_err:
+ if (ret) {
+ TSK_DEBUG_ERROR("%s", kErrTextGlobalError);
+ _RAISE_ERR_AND_GOTO_BAIL(kErrCodeGlobalError, kErrTextGlobalError);
+ }
+bail:
+
+ TSK_OBJECT_SAFE_FREE(p_bfcp);
+ return ret;
+}
+
+
+/* ============ Public functions ================= */
+
+
+
+
+
+//=================================================================================================
+// Session MSRp Plugin object definition
+//
+/* constructor */
+static tsk_object_t* _tdav_session_bfcp_ctor(tsk_object_t * p_self, va_list * app)
+{
+ tdav_session_bfcp_t *p_session = (tdav_session_bfcp_t *)p_self;
+ if (p_session) {
+ /* init base: called by tmedia_session_create() */
+ /* init self */
+ // TMEDIA_SESSION_BFCP(session)->send_file = tdav_session_bfcp_send_file;
+ // TMEDIA_SESSION_BFCP(session)->send_message = tdav_session_bfcp_send_message;
+
+ // session->config = tbfcp_config_create();
+ // session->setup = bfcp_setup_actpass;
+ // session->dir = tdav_bfcp_dir_none;
+ }
+ return p_self;
+}
+/* destructor */
+static tsk_object_t* _tdav_session_bfcp_dtor(tsk_object_t * p_self)
+{
+ tdav_session_bfcp_t *p_session = (tdav_session_bfcp_t *)p_self;
+ if (p_session) {
+ /* deinit self */
+
+ TSK_OBJECT_SAFE_FREE(p_session->p_bfcp_s);
+ TSK_OBJECT_SAFE_FREE(p_session->p_pkt_FloorRequest);
+ TSK_OBJECT_SAFE_FREE(p_session->p_pkt_FloorRelease);
+ TSK_OBJECT_SAFE_FREE(p_session->p_pkt_Hello);
+
+ TSK_FREE(p_session->p_local_ip);
+ TSK_FREE(p_session->p_remote_ip);
+
+ /* rfc4583 */
+ TSK_FREE(p_session->rfc4583.confid);
+ TSK_FREE(p_session->rfc4583.floorid);
+ TSK_FREE(p_session->rfc4583.mstrm);
+ TSK_FREE(p_session->rfc4583.userid);
+
+ /* NAT Traversal context */
+ TSK_OBJECT_SAFE_FREE(p_session->p_natt_ctx);
+
+ /* deinit base */
+ tmedia_session_deinit(p_self);
+
+ TSK_DEBUG_INFO("*** tdav_session_bfcp_t destroyed ***");
+ }
+
+ return p_self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_session_bfcp_def_s =
+{
+ sizeof(tdav_session_bfcp_t),
+ _tdav_session_bfcp_ctor,
+ _tdav_session_bfcp_dtor,
+ tmedia_session_cmp,
+};
+/* plugin definition*/
+static const tmedia_session_plugin_def_t tdav_session_bfcp_plugin_def_s =
+{
+ &tdav_session_bfcp_def_s,
+
+ tmedia_bfcp,
+ "application",
+
+ _tdav_session_bfcp_set,
+ _tdav_session_bfcp_get,
+ _tdav_session_bfcp_prepare,
+ _tdav_session_bfcp_start,
+ _tdav_session_bfcp_pause,
+ _tdav_session_bfcp_stop,
+
+ /* Audio part */
+ { tsk_null },
+
+ _tdav_session_bfcp_get_lo,
+ _tdav_session_bfcp_set_ro
+};
+const tmedia_session_plugin_def_t *tdav_session_bfcp_plugin_def_t = &tdav_session_bfcp_plugin_def_s;
+
+#endif /* !defined(HAVE_TINYBFCP) || HAVE_TINYBFCP */ \ No newline at end of file
diff --git a/tinyDAV/src/codecs/amr/tdav_codec_amr.c b/tinyDAV/src/codecs/amr/tdav_codec_amr.c
new file mode 100644
index 0000000..9304f85
--- /dev/null
+++ b/tinyDAV/src/codecs/amr/tdav_codec_amr.c
@@ -0,0 +1,816 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_amr.c
+ * @brief AMR-NB and AMR-WB codecs.
+ * RTP payloader/depayloader are based on RFC 4867
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/amr/tdav_codec_amr.h"
+
+#include "tsk_params.h"
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <stdlib.h> /* atoi() */
+
+#if HAVE_OPENCORE_AMR
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "..\\thirdparties\\win32\\lib\\opencore\\libopencore-amrnb.a")
+#endif
+
+#define NO_DATA 15
+#define DEFAULT_ENC_MODE ((enum Mode)MR122) /* Higher, could be changed by remote party by using CMR */
+
+/* From WmfDecBytesPerFrame in dec_input_format_tab.cpp */
+static const int tdav_codec_amr_nb_sizes[] = { 12, 13, 15, 17, 19, 20, 26, 31, 5, 6, 5, 5, 0, 0, 0, 0 };
+/* From pvamrwbdecoder_api.h, by dividing by 8 and rounding up */
+static const int tdav_codec_amr_wb_sizes[] = { 17, 23, 32, 36, 40, 46, 50, 58, 60, 5, -1, -1, -1, -1, -1, -1 };
+
+/* ============ Common ================= */
+static int tdav_codec_amr_init(tdav_codec_amr_t* self, tdav_codec_amr_type_t type, tdav_codec_amr_mode_t mode);
+static int tdav_codec_amr_deinit(tdav_codec_amr_t* self);
+static tdav_codec_amr_mode_t tdav_codec_amr_get_mode(const char* fmtp);
+static int tdav_codec_amr_parse_fmtp(tdav_codec_amr_t* self, const char* fmtp);
+static tsk_size_t tdav_codec_amr_oa_decode(tdav_codec_amr_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr);
+static tsk_size_t tdav_codec_amr_be_decode(tdav_codec_amr_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr);
+static tsk_size_t tdav_codec_amr_be_encode(tdav_codec_amr_t* amr, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size);
+static tsk_size_t tdav_codec_amr_oa_encode(tdav_codec_amr_t* amr, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size);
+static uint8_t tdav_codec_amr_bitbuffer_read(const void* bits, tsk_size_t size, tsk_size_t start, tsk_size_t count);
+
+/* ============ AMR-NB Plugin interface =================
+ The AMR codec was originally developed and standardized by the
+ European Telecommunications Standards Institute (ETSI) for GSM
+ cellular systems. It is now chosen by the Third Generation
+ Partnership Project (3GPP) as the mandatory codec for third
+ generation (3G) cellular systems [1].
+
+ The AMR codec is a multi-mode codec that supports eight narrow band
+ speech encoding modes with bit rates between 4.75 and 12.2 kbps. The
+ sampling frequency used in AMR is 8000 Hz and the speech encoding is
+ performed on 20 ms speech frames. Therefore, each encoded AMR speech
+ frame represents 160 samples of the original speech.
+
+ Among the eight AMR encoding modes, three are already separately
+ adopted as standards of their own. Particularly, the 6.7 kbps mode
+ is adopted as PDC-EFR [18], the 7.4 kbps mode as IS-641 codec in TDMA
+ [17], and the 12.2 kbps mode as GSM-EFR [16].
+*/
+
+int tdav_codec_amrnb_open(tmedia_codec_t* self)
+{
+ tdav_codec_amr_t* amrnb = (tdav_codec_amr_t*)self;
+
+ if(!TDAV_CODEC_AMR(amrnb)->encoder){
+ if(!(TDAV_CODEC_AMR(amrnb)->encoder = Encoder_Interface_init(0))){
+ TSK_DEBUG_ERROR("Failed to initialize AMR-NB encoder");
+ return -2;
+ }
+ }
+
+ if(!TDAV_CODEC_AMR(amrnb)->decoder){
+ if(!(TDAV_CODEC_AMR(amrnb)->decoder = Decoder_Interface_init())){
+ TSK_DEBUG_ERROR("Failed to initialize AMR-NB encoder");
+ return -2;
+ }
+ }
+
+ return 0;
+}
+
+int tdav_codec_amrnb_close(tmedia_codec_t* self)
+{
+ tdav_codec_amr_t* amrnb = (tdav_codec_amr_t*)self;
+
+ if(TDAV_CODEC_AMR(amrnb)->encoder){
+ Encoder_Interface_exit(TDAV_CODEC_AMR(amrnb)->encoder);
+ TDAV_CODEC_AMR(amrnb)->encoder = tsk_null;
+ }
+
+ if(TDAV_CODEC_AMR(amrnb)->decoder){
+ Decoder_Interface_exit(TDAV_CODEC_AMR(amrnb)->decoder);
+ TDAV_CODEC_AMR(amrnb)->decoder = tsk_null;
+ }
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_amrnb_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tdav_codec_amr_t* amr = (tdav_codec_amr_t*)self;
+
+ switch(amr->mode){
+ case tdav_codec_amr_mode_be:
+ return tdav_codec_amr_be_encode(amr, in_data, in_size, out_data, out_max_size);
+ default:
+ return tdav_codec_amr_oa_encode(amr, in_data, in_size, out_data, out_max_size);
+ }
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_amrnb_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tdav_codec_amr_t* amr = (tdav_codec_amr_t*)self;
+
+ switch(amr->mode){
+ case tdav_codec_amr_mode_be:
+ return tdav_codec_amr_be_decode(amr, in_data, in_size, out_data, out_max_size, proto_hdr);
+ default:
+ return tdav_codec_amr_oa_decode(amr, in_data, in_size, out_data, out_max_size, proto_hdr);
+ }
+}
+
+char* tdav_codec_amrnb_sdp_att_get(const tmedia_codec_t* codec, const char* att_name)
+{
+ const tdav_codec_amr_t* amr = (const tdav_codec_amr_t*)codec;
+
+ /* We support all modes, all ... */
+ if(amr){
+ switch(amr->mode){
+ case tdav_codec_amr_mode_be:
+ return tsk_strdup("octet-align=0");
+ default:
+ return tsk_strdup("octet-align=1");
+ }
+ }
+ return tsk_null;
+}
+
+tsk_bool_t tdav_codec_amrnb_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ tdav_codec_amr_t* amr;
+ if(!(amr = (tdav_codec_amr_t*)codec) || !att_name){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_false;
+ }
+
+ if(amr && tsk_striequals(att_name, "fmtp")){
+ /* Match mode */
+ if(tdav_codec_amr_get_mode(att_value) != amr->mode){
+ TSK_DEBUG_INFO("Failed to match [%s]", att_value);
+ return tsk_false;
+ }
+ /* check parameters validity */
+ if(tdav_codec_amr_parse_fmtp(amr, att_value)){
+ TSK_DEBUG_INFO("Failed to match [%s]", att_value);
+ return tsk_false;
+ }
+
+ return tsk_true;
+ }
+ return tsk_false;
+}
+
+
+//
+// AMR-NB OA Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_amrnb_oa_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_amr_t *amrnb_oa = self;
+ if(amrnb_oa){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ tdav_codec_amr_init(TDAV_CODEC_AMR(amrnb_oa), tdav_codec_amr_type_nb, tdav_codec_amr_mode_oa);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_amrnb_oa_dtor(tsk_object_t * self)
+{
+ tdav_codec_amr_t *amrnb_oa = self;
+ if(amrnb_oa){
+ /* deinit base */
+ tmedia_codec_audio_deinit(amrnb_oa);
+ /* deinit self */
+ tdav_codec_amr_deinit(TDAV_CODEC_AMR(amrnb_oa));
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_amrnb_oa_def_s =
+{
+ sizeof(tdav_codec_amr_t),
+ tdav_codec_amrnb_oa_ctor,
+ tdav_codec_amrnb_oa_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_amrnb_oa_plugin_def_s =
+{
+ &tdav_codec_amrnb_oa_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_amr_nb_oa,
+ "AMR",
+ "AMR Narrow Band - Octet Aligned (libopencore-amr)",
+ TMEDIA_CODEC_FORMAT_AMR_NB_OA,
+ tsk_true,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 20 // ptime
+ },
+
+ /* video */
+ {0},
+
+ tsk_null, // set()
+ tdav_codec_amrnb_open,
+ tdav_codec_amrnb_close,
+ tdav_codec_amrnb_encode,
+ tdav_codec_amrnb_decode,
+ tdav_codec_amrnb_sdp_att_match,
+ tdav_codec_amrnb_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_amrnb_oa_plugin_def_t = &tdav_codec_amrnb_oa_plugin_def_s;
+
+//
+// AMR-NB BE Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_amrnb_be_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_amr_t *amrnb_be = self;
+ if(amrnb_be){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ tdav_codec_amr_init(TDAV_CODEC_AMR(amrnb_be), tdav_codec_amr_type_nb, tdav_codec_amr_mode_be);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_amrnb_be_dtor(tsk_object_t * self)
+{
+ tdav_codec_amr_t *amrnb_be = self;
+ if(amrnb_be){
+ /* deinit base */
+ tmedia_codec_audio_deinit(amrnb_be);
+ /* deinit self */
+ tdav_codec_amr_deinit(TDAV_CODEC_AMR(amrnb_be));
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_amrnb_be_def_s =
+{
+ sizeof(tdav_codec_amr_t),
+ tdav_codec_amrnb_be_ctor,
+ tdav_codec_amrnb_be_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_amrnb_be_plugin_def_s =
+{
+ &tdav_codec_amrnb_be_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_amr_nb_be,
+ "AMR",
+ "AMR Narrow Band - Bandwidth-Efficient (libopencore-amr)",
+ TMEDIA_CODEC_FORMAT_AMR_NB_BE,
+ tsk_true,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 0 // ptime @deprecated
+ },
+
+ /* video */
+ {0},
+
+ tsk_null, // set()
+ tdav_codec_amrnb_open,
+ tdav_codec_amrnb_close,
+ tdav_codec_amrnb_encode,
+ tdav_codec_amrnb_decode,
+ tdav_codec_amrnb_sdp_att_match,
+ tdav_codec_amrnb_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_amrnb_be_plugin_def_t = &tdav_codec_amrnb_be_plugin_def_s;
+
+
+
+
+
+
+
+
+
+//
+// Common functions
+//
+
+static int tdav_codec_amr_init(tdav_codec_amr_t* self, tdav_codec_amr_type_t type, tdav_codec_amr_mode_t mode)
+{
+ if(self){
+ self->type = type;
+ self->mode = mode;
+ self->encoder_mode = DEFAULT_ENC_MODE;
+
+ return 0;
+ }
+ else{
+ TSK_DEBUG_ERROR("Invalid Parameter");
+ return -1;
+ }
+}
+
+static int tdav_codec_amr_deinit(tdav_codec_amr_t* self)
+{
+ if(self){
+ switch(self->type){
+ case tdav_codec_amr_type_nb:
+ { /* AMR-NB */
+ if(self->encoder){
+ Encoder_Interface_exit(self->encoder);
+ self->encoder = tsk_null;
+ }
+ if(self->decoder){
+ Decoder_Interface_exit(self->decoder);
+ self->decoder = tsk_null;
+ }
+ break;
+ }
+ case tdav_codec_amr_type_wb:
+ { /* AMR-WB */
+ break;
+ }
+ }
+
+ return 0;
+ }
+ else{
+ TSK_DEBUG_ERROR("Invalid Parameter");
+ return -1;
+ }
+}
+
+static tdav_codec_amr_mode_t tdav_codec_amr_get_mode(const char* fmtp)
+{
+ /* RFC 4867 - 8.1. AMR Media Type Registration
+ octet-align: Permissible values are 0 and 1. If 1, octet-aligned
+ operation SHALL be used. If 0 or if not present, bandwidth-efficient operation is employed.
+ */
+ tdav_codec_amr_mode_t mode = tdav_codec_amr_mode_be;
+ tsk_size_t size = tsk_strlen(fmtp);
+ int start, end;
+
+ if((start = tsk_strindexOf(fmtp, size, "octet-align")) !=-1){
+ tsk_param_t* param;
+ if((end = tsk_strindexOf((fmtp+start), (size-start), ";")) == -1){
+ end = size;
+ }
+ if((param = tsk_params_parse_param((fmtp+start), (end-start)))){
+ if(param->value && tsk_strequals(param->value, "1")){
+ mode = tdav_codec_amr_mode_oa;
+ }
+ TSK_OBJECT_SAFE_FREE(param);
+ }
+ }
+ return mode;
+}
+
+int tdav_codec_amr_parse_fmtp(tdav_codec_amr_t* self, const char* fmtp)
+{
+ int ret = 0;
+ int val_int;
+ const char* val_str;
+ //--tdav_codec_amr_mode_t mode = self->mode;
+ tsk_params_L_t* params = tsk_null;
+
+ if((params = tsk_params_fromstring(fmtp, ";", tsk_true))){
+ /* Do not check "octet-align" => already done by the caller of this function */
+
+ /* === mode-set ===*/
+ if((val_str = tsk_params_get_param_value(params, "mode-set"))){
+ char* modes = tsk_strdup(val_str);
+ char *pch, *saveptr;
+ int mode_int;
+ pch = tsk_strtok_r(modes, ", ", &saveptr);
+ while(pch){
+ mode_int = atoi(pch);
+ self->modes |= 0x0001 << mode_int;
+ pch = tsk_strtok_r(tsk_null, ", ", &saveptr);
+ }
+
+ TSK_FREE(modes);
+ }
+ else{
+ self->modes = 0xFFFF;
+ }
+
+ /* === interleaving ===*/
+ if((val_int = tsk_params_get_param_value_as_int(params, "interleaving")) != -1){
+ TSK_DEBUG_WARN("interleaving not supported");
+ ret = -1; goto bail;
+ }
+ /* === mode-change-period ===*/
+ if((val_int = tsk_params_get_param_value_as_int(params, "mode-change-period")) != -1){
+ if(val_int != 1 && val_int != 2){
+ TSK_DEBUG_ERROR("Invalid [mode-change-period]");
+ ret = -1; goto bail;
+ }
+ self->mcp = (unsigned)val_int;
+ }
+ /* === mode-change-capability ===*/
+ if((val_int = tsk_params_get_param_value_as_int(params, "mode-change-capability")) != -1){
+ if(val_int != 1 && val_int != 2){
+ TSK_DEBUG_ERROR("Invalid [mode-change-capability]");
+ ret = -1; goto bail;
+ }
+ self->mcc = (unsigned)val_int;
+ }
+ /* === mode-change-neighbor ===*/
+ if((val_int = tsk_params_get_param_value_as_int(params, "mode-change-neighbor")) != -1){
+ if(val_int != 0 && val_int != 1){
+ TSK_DEBUG_ERROR("Invalid [mode-change-neighbor]");
+ ret = -1; goto bail;
+ }
+ self->mcn = (unsigned)val_int;
+ }
+ }
+
+bail:
+ TSK_OBJECT_SAFE_FREE(params);
+ return ret;
+}
+
+
+/* RFC 4867 - 4.2. Payload Structure
+ +----------------+-------------------+----------------
+ | payload header | table of contents | speech data ...
+ +----------------+-------------------+----------------
+*/
+/* RFC 4867 - 4.4.2. The Payload Table of Contents and Frame CRCs
+ The table of contents (ToC) consists of a list of ToC entries, each representing a speech frame.
+ +---------------------+
+ | list of ToC entries |
+ +---------------------+
+ | list of frame CRCs | (optional)
+ - - - - - - - - - - -
+ Note, for ToC entries with FT=14 or 15, there will be no
+ corresponding speech frame or frame CRC present in the payload.
+*/
+
+
+static tsk_size_t tdav_codec_amr_be_encode(tdav_codec_amr_t* amr, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tsk_size_t out_size = 0, i;
+ int ret_size;
+ uint8_t ToC;
+ static uint8_t CMR = NO_DATA /* No interleaving */;
+
+ uint8_t outbuf[60 + 1]; /* enought for both NB and WB at ptime=20ms */
+ if(!amr || !in_data || !in_size || !out_data || (amr->mode != tdav_codec_amr_mode_be)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* Encode */
+ if((ret_size = Encoder_Interface_Encode(amr->encoder, amr->encoder_mode, in_data, outbuf, 0)) <= 0){
+ TSK_DEBUG_ERROR("Encoder_Interface_Encode() failed");
+ goto bail;
+ }
+
+
+ /* allocate output buffer */
+ if((int)*out_max_size <ret_size){
+ if(!(*out_data = tsk_realloc(*out_data, ret_size))){
+ *out_max_size = 0;
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ goto bail;
+ }
+ *out_max_size = ret_size;
+ }
+
+ out_size = ret_size;
+
+ /* CMR (4bits) */
+ ((uint8_t*)*out_data)[0] = (CMR<<4);
+ /* ToC (Always ONE Frame, don't need to test for [F]) (6bits)*/
+ ToC = outbuf[0]>>2/*2*[P]*/;
+ ((uint8_t*)*out_data)[0] |= (ToC >> 2/*[Q],[1-FT]*/) & 0xF; /* 4bits */
+ ((uint8_t*)*out_data)[1] = (ToC & 0x3/*[1-FT],[Q]*/)<<6; /* 2bits */
+
+ /* === THERE ARE 2 EXTRA BITS === */
+
+ for(i=1; i<out_size-1; i++){
+ ((uint8_t*)*out_data)[i] |= outbuf[i]>>2;/* 6bits */
+ ((uint8_t*)*out_data)[i+1] = outbuf[i]<<6;/* 2bits */
+ }
+
+bail:
+ return out_size;
+}
+
+tsk_size_t tdav_codec_amr_be_decode(tdav_codec_amr_t* amr, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tsk_size_t out_size = 0, pcm_frame_size = 0, index = 0;
+ const uint8_t* pdata = (const uint8_t*)in_data;
+ //--const uint8_t* pend = (pdata + in_size);
+ uint8_t CMR;
+ int toc_entries = 0, i, k; // ToC entries count
+
+ if(!amr || !in_data || !in_size || !out_data || (amr->mode != tdav_codec_amr_mode_be)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* compute PCM frame size */
+ switch(TDAV_CODEC_AMR(amr)->type){
+ case tdav_codec_amr_type_nb:
+ pcm_frame_size = 160 * sizeof(short);
+ break;
+ case tdav_codec_amr_type_wb:
+ pcm_frame_size = 320 * sizeof(short);
+ break;
+ default:
+ TSK_DEBUG_ERROR("Invalid AMR type");
+ return 0;
+ }
+
+ /* CMR (4bits) */
+ CMR = tdav_codec_amr_bitbuffer_read(in_data, (in_size*8), index, 4);
+ index += 4;
+ if(CMR != NO_DATA){
+ amr->encoder_mode = (enum Mode)CMR;
+ }
+
+ /* F(1bit), FT(4bits), Q(1bit) */
+ /* count ToC entries */
+ do{ /* At least ONE ToC */
+ ++toc_entries;
+ ++pdata;
+ index += 6;
+ }
+ while((index < (in_size*8)) && (tdav_codec_amr_bitbuffer_read(in_data, (in_size*8), (index-6), 1)/* F */));
+
+ for(i = 0; (i<toc_entries && (in_size < (in_size*8))) ; i++){
+ int size = -1;
+ uint8_t* speech_data = tsk_null;
+ //--int speech_data_size = 0;
+ uint8_t ToC = tdav_codec_amr_bitbuffer_read(in_data, (in_size*8), 4/*CMR*/ + (i*6), 6);
+
+ switch(TDAV_CODEC_AMR(amr)->type){
+ case tdav_codec_amr_type_nb:
+ size = tdav_codec_amr_nb_sizes[(ToC>>1)&0x0F/* FT */];
+ break;
+ case tdav_codec_amr_type_wb:
+ size = tdav_codec_amr_wb_sizes[(ToC>>1)&0x0F/* FT */];
+ break;
+ }
+
+ if((speech_data = tsk_calloc((size + 2/* ToC + '\0' */), sizeof(uint8_t)))){
+ /* copy ToC */
+ speech_data[0] = (ToC & 0x1F)<<2/* 2*[P] */; /* ToC as OA layout */
+ /* copy speech data */
+ for(k=0; k<size; k++){
+ speech_data[1 + k] = tdav_codec_amr_bitbuffer_read(in_data, (in_size*8), index, 8);
+ index+=8;
+ if((k==size-1) && (index%8)){
+ speech_data[1 + k] <<= (8-(index%8)); //clean
+ }
+ }
+
+ /* allocate/reallocate speech data */
+ if(*out_max_size <(out_size + pcm_frame_size)){
+ if(!(*out_data = tsk_realloc(*out_data, (out_size + pcm_frame_size)))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ TSK_FREE(speech_data);
+ goto bail;
+ }
+ *out_max_size = out_size + pcm_frame_size;
+ }
+
+ /* decode speech data */
+ Decoder_Interface_Decode(amr->decoder, speech_data, &((short*)*out_data)[out_size/sizeof(short)], 0);
+ out_size += pcm_frame_size, pdata+= size;
+
+ TSK_FREE(speech_data);
+ }
+ }
+
+bail:
+ return out_size;
+}
+
+static tsk_size_t tdav_codec_amr_oa_encode(tdav_codec_amr_t* amr, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tsk_size_t out_size = 0;
+ int ret_size;
+ static uint8_t CMR = NO_DATA /* No interleaving */;
+
+ uint8_t outbuf[60 + 1]; /* enought for both NB and WB at ptime=20ms */
+ if(!amr || !in_data || !in_size || !out_data || (amr->mode != tdav_codec_amr_mode_oa)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* Encode */
+ if((ret_size = Encoder_Interface_Encode(amr->encoder, amr->encoder_mode, in_data, outbuf, 0)) <= 0){
+ TSK_DEBUG_ERROR("Encoder_Interface_Encode() failed");
+ goto bail;
+ }
+
+ out_size = ret_size + 1 /* CMR without interleaving */;
+ /* allocate output buffer */
+ if(*out_max_size <out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = out_size = 0;
+ goto bail;
+ }
+ *out_max_size = out_size;
+ }
+
+ /* CMR */
+ ((uint8_t*)*out_data)[0] = (CMR << 4);
+ /* Only ONE ToC --> believe me */
+ memcpy(&((uint8_t*)*out_data)[1], outbuf, ret_size);
+
+bail:
+ return out_size;
+}
+
+static tsk_size_t tdav_codec_amr_oa_decode(tdav_codec_amr_t* amr, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tsk_size_t out_size = 0, pcm_frame_size = 0;
+ const uint8_t* pdata = (const uint8_t*)in_data;
+ const uint8_t* pend = (pdata + in_size);
+ uint8_t CMR;
+ int toc_entries = 0, i; // ToC entries count
+
+ if(!amr || !in_data || !in_size || !out_data || (amr->mode != tdav_codec_amr_mode_oa)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* compute PCM frame size */
+ switch(TDAV_CODEC_AMR(amr)->type){
+ case tdav_codec_amr_type_nb:
+ pcm_frame_size = 160 * sizeof(short);
+ break;
+ case tdav_codec_amr_type_wb:
+ pcm_frame_size = 320 * sizeof(short);
+ break;
+ default:
+ TSK_DEBUG_ERROR("Invalid AMR type");
+ return 0;
+ }
+
+ /* RFC 4867 - 4.4. Octet-Aligned Mode
+ In octet-aligned mode, the payload header consists of a 4-bit CMR, 4
+ reserved bits, and optionally, an 8-bit interleaving header, as shown
+ below:
+
+ 0 1
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+ +-+-+-+-+-+-+-+-+- - - - - - - -
+ | CMR |R|R|R|R| ILL | ILP |
+ +-+-+-+-+-+-+-+-+- - - - - - - -
+
+ CMR (4 bits): same as defined in Section 4.3.1.
+
+ "interleaving" not supported ==> could ignore ILL and ILP (wich are optional)
+ */
+
+ CMR = (*pdata++ >> 4);
+ if(CMR != NO_DATA){
+ /* The codec mode request received in the CMR field is valid until the
+ next codec mode request is received, i.e., a newly received CMR value
+ corresponding to a speech mode, or NO_DATA overrides the previously
+ received CMR value corresponding to a speech mode or NO_DATA. */
+ amr->encoder_mode = (enum Mode)CMR; // As we support all modes, do not check for validity
+ }
+
+ /*
+ A ToC entry takes the following format in octet-aligned mode:
+ 0 1 2 3 4 5 6 7
+ +-+-+-+-+-+-+-+-+
+ |F| FT |Q|P|P|
+ +-+-+-+-+-+-+-+-+
+
+ F (1 bit): see definition in Section 4.3.2.
+ FT (4 bits, unsigned integer): see definition in Section 4.3.2.
+ Q (1 bit): see definition in Section 4.3.2.
+ P bits: padding bits, MUST be set to zero, and MUST be ignored on reception.
+ */
+
+ /* count ToC entries */
+ do{ /* At least ONE ToC */
+ ++toc_entries;
+ ++pdata;
+ }
+ while(pdata && (pdata < pend) && (pdata[-1] >> 7/* F */));
+
+ for(i = 0; (i<toc_entries && (pdata < pend)) ; i++){
+ int size = -1;
+ uint8_t* speech_data = tsk_null;
+ //--int speech_data_size = 0;
+ uint8_t ToC = ((const uint8_t*)in_data)[1/*CMR...*/ + i];
+ switch(TDAV_CODEC_AMR(amr)->type){
+ case tdav_codec_amr_type_nb:
+ size = tdav_codec_amr_nb_sizes[(ToC>>3) & 0x0F/* FT */];
+ break;
+ case tdav_codec_amr_type_wb:
+ size = tdav_codec_amr_wb_sizes[(ToC>>3) & 0x0F/* FT */];
+ break;
+ }
+
+ /* check size */
+ if(size <0 || ((pdata + size) > pend)){
+ TSK_DEBUG_ERROR("Invalid size");
+ break;
+ }
+
+ if((speech_data = tsk_calloc((size + 2/* ToC + '\0' */), sizeof(uint8_t)))){
+ /* copy ToC */
+ *speech_data = ToC & 0x7F/* with 'F'=0 */;
+ /* copy speech data */
+ memcpy((speech_data + 1), pdata, size);
+ /* allocate/reallocate speech data */
+ if(*out_max_size <(out_size + pcm_frame_size)){
+ if(!(*out_data = tsk_realloc(*out_data, (out_size + pcm_frame_size)))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ TSK_FREE(speech_data);
+ goto bail;
+ }
+ *out_max_size = (out_size + pcm_frame_size);
+ }
+ /* decode speech data */
+ Decoder_Interface_Decode(amr->decoder, speech_data, &((short*)*out_data)[out_size/sizeof(short)], 0);
+ out_size += pcm_frame_size, pdata+= size;
+
+ TSK_FREE(speech_data);
+ }
+ }
+
+bail:
+ return out_size;
+}
+
+
+static uint8_t tdav_codec_amr_bitbuffer_read(const void* bits, tsk_size_t size, tsk_size_t start, tsk_size_t count)
+{
+ uint8_t byte, left, right, pad;
+
+ if(!bits || !size || count>8){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if((start + count) > size){
+ count = (size - start);
+ }
+
+ pad = start ? (8 - (start % 8)) : count;
+ left = ((uint8_t*)bits)[start/8] << (8-pad);
+ right = ((uint8_t*)bits)[((start+count)<size ? (start+count) : start)/8] >> pad;
+
+ if((start && (start % 8) != ((start+count)%8)) || (!start && count>8)){
+ /* overlap */
+ byte = (left | right) >> (8-count);
+ }
+ else{
+ byte = (left | right) & (0xFF >> (8-count));
+ }
+
+ return byte;
+}
+
+#endif /* HAVE_OPENCORE_AMR */
diff --git a/tinyDAV/src/codecs/bfcp/tdav_codec_bfcp.c b/tinyDAV/src/codecs/bfcp/tdav_codec_bfcp.c
new file mode 100644
index 0000000..3495295
--- /dev/null
+++ b/tinyDAV/src/codecs/bfcp/tdav_codec_bfcp.c
@@ -0,0 +1,104 @@
+/*
+* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_bfcp.c
+ * @brief The The Binary Floor Control Protocol (BFCP, rfc4582) session.
+ */
+#include "tinydav/codecs/bfcp/tdav_codec_bfcp.h"
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+/* ============ BFCP Plugin interface ================= */
+#define tdav_codec_bfcp_open tsk_null
+#define tdav_codec_bfcp_close tsk_null
+#define tdav_codec_bfcp_sdp_att_get tsk_null
+#define tdav_codec_bfcp_sdp_att_get tsk_null
+#define tdav_codec_bfcp_encode tsk_null
+#define tdav_codec_bfcp_decode tsk_null
+
+static tsk_bool_t tdav_codec_bfcp_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{ /* always match */
+ return tsk_true;
+}
+
+
+//
+// BFCP Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_bfcp_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_bfcp_t *bfcp = self;
+ if (bfcp) {
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_bfcp_dtor(tsk_object_t * self)
+{
+ tdav_codec_bfcp_t *bfcp = self;
+ if (bfcp) {
+ /* deinit base */
+ tmedia_codec_bfcp_deinit(bfcp);
+ /* deinit self */
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_bfcp_def_s =
+{
+ sizeof(tdav_codec_bfcp_t),
+ tdav_codec_bfcp_ctor,
+ tdav_codec_bfcp_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_bfcp_plugin_def_s =
+{
+ &tdav_codec_bfcp_def_s,
+
+ tmedia_bfcp,
+ tmedia_codec_id_none, // fake codec without real id
+ "application",
+ "BFCP fake codec",
+ TMEDIA_CODEC_FORMAT_BFCP,
+ tsk_false,
+ 0, // rate
+
+ /* audio */
+ {0},
+
+ /* video */
+ {0},
+
+ tsk_null, // set()
+ tdav_codec_bfcp_open,
+ tdav_codec_bfcp_close,
+ tdav_codec_bfcp_encode,
+ tdav_codec_bfcp_decode,
+ tdav_codec_bfcp_sdp_att_match,
+ tdav_codec_bfcp_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_bfcp_plugin_def_t = &tdav_codec_bfcp_plugin_def_s;
diff --git a/tinyDAV/src/codecs/bv/tdav_codec_bv16.c b/tinyDAV/src/codecs/bv/tdav_codec_bv16.c
new file mode 100644
index 0000000..21850fb
--- /dev/null
+++ b/tinyDAV/src/codecs/bv/tdav_codec_bv16.c
@@ -0,0 +1,250 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_bv16.c
+ * @brief BroadVoice16 codec
+ * The payloader/depayloader follow RFC 4298
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/bv/tdav_codec_bv16.h"
+
+#if HAVE_BV16
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "..\\thirdparties\\win32\\lib\\BroadVoice16\\libbv16.a")
+#endif
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include "typedef.h"
+#include "bvcommon.h"
+#include "bv16cnst.h"
+#include "bv16strct.h"
+#include "bv16.h"
+#include "utility.h"
+#if G192BITSTREAM
+#include "g192.h"
+#else
+#include "bitpack.h"
+#endif
+#include "memutil.h"
+
+/* RFC 4298 - 3.1. BroadVoice16 Bit Stream Definition */
+#define TDAV_BV16_FRAME_SIZE 10
+#define FRSZ_IN_U8 (FRSZ*2)
+
+/* ============ BV16 Plugin interface ================= */
+
+#define tdav_codec_bv16_sdp_att_get tsk_null
+#define tdav_codec_bv16_fmtp_set tsk_null
+
+static int sizestate = sizeof(struct BV16_Encoder_State);
+static int sizebitstream = sizeof(struct BV16_Bit_Stream);
+
+int tdav_codec_bv16_open(tmedia_codec_t* self)
+{
+ tdav_codec_bv16_t* bv16 = (tdav_codec_bv16_t*)self;
+
+ if(!bv16->encoder.state){
+ bv16->encoder.state = allocWord16(0, sizestate/2-1);
+ Reset_BV16_Encoder((struct BV16_Encoder_State*)bv16->encoder.state);
+ }
+ if(!bv16->encoder.bs){
+ bv16->encoder.bs = allocWord16(0, sizebitstream/2-1);
+ }
+ if(!bv16->encoder.x){
+ bv16->encoder.x = allocWord16(0, FRSZ-1);
+ }
+
+ if(!bv16->decoder.state){
+ bv16->decoder.state = allocWord16(0, sizestate/2-1);
+ Reset_BV16_Decoder((struct BV16_Decoder_State*)bv16->decoder.state);
+ }
+ if(!bv16->decoder.bs){
+ bv16->decoder.bs = allocWord16(0, sizebitstream/2-1);
+ }
+ if(!bv16->decoder.x){
+ bv16->decoder.x = allocWord16(0, FRSZ-1);
+ }
+
+ return 0;
+}
+
+int tdav_codec_bv16_close(tmedia_codec_t* self)
+{
+ tdav_codec_bv16_t* bv16 = (tdav_codec_bv16_t*)self;
+
+ if(bv16->encoder.state){
+ deallocWord16(bv16->encoder.state, 0, sizestate/2-1);
+ bv16->encoder.state = tsk_null;
+ }
+ if(bv16->encoder.bs){
+ deallocWord16(bv16->encoder.bs, 0, sizebitstream/2-1);
+ bv16->encoder.bs = tsk_null;
+ }
+ if(bv16->encoder.x){
+ deallocWord16(bv16->encoder.x, 0, FRSZ-1);
+ bv16->encoder.x = tsk_null;
+ }
+
+ if(bv16->decoder.state){
+ deallocWord16(bv16->decoder.state, 0, sizestate/2-1);
+ bv16->decoder.state = tsk_null;
+ }
+ if(bv16->encoder.bs){
+ deallocWord16(bv16->decoder.bs, 0, sizebitstream/2-1);
+ bv16->decoder.bs = tsk_null;
+ }
+ if(bv16->decoder.x){
+ deallocWord16(bv16->decoder.x, 0, FRSZ-1);
+ bv16->decoder.x = tsk_null;
+ }
+
+
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_bv16_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ //tsk_size_t out_size = 0;
+ tdav_codec_bv16_t* bv16 = (tdav_codec_bv16_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_bv16_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tsk_size_t out_size = 0;
+ int i;
+ tdav_codec_bv16_t* bv16 = (tdav_codec_bv16_t*)self;
+ uint8_t mama[600];
+
+ if(!self || !in_data || !in_size || !out_data || (in_size % TDAV_BV16_FRAME_SIZE)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ for(i=0; i<(int)in_size; i+=TDAV_BV16_FRAME_SIZE){
+ BV16_BitUnPack(mama, (struct BV16_Bit_Stream*)bv16->decoder.bs);
+ //BV16_BitUnPack(&((UWord8 *)in_data)[i], (struct BV16_Bit_Stream*)bv16->decoder.bs);
+ BV16_Decode((struct BV16_Bit_Stream*)bv16->decoder.bs, (struct BV16_Decoder_State*)bv16->decoder.state, bv16->decoder.x);
+
+
+ if(*out_max_size<(out_size + FRSZ_IN_U8)){
+ if(!(*out_data = tsk_realloc(*out_data, (out_size + FRSZ_IN_U8)))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = (out_size + FRSZ_IN_U8);
+ }
+ memcpy(&((uint8_t*)* out_data)[out_size], bv16->decoder.x, FRSZ_IN_U8);
+ out_size += FRSZ_IN_U8;
+ }
+
+
+ return out_size;
+}
+
+tsk_bool_t tdav_codec_bv16_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{ /* always match */
+ return tsk_true;
+}
+
+
+//
+// BV16 Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_bv16_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_bv16_t *bv16 = self;
+ if(bv16){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_bv16_dtor(tsk_object_t * self)
+{
+ tdav_codec_bv16_t *bv16 = self;
+ if(bv16){
+ /* deinit base */
+ tmedia_codec_audio_deinit(bv16);
+ /* deinit self */
+
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_bv16_def_s =
+{
+ sizeof(tdav_codec_bv16_t),
+ tdav_codec_bv16_ctor,
+ tdav_codec_bv16_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_bv16_plugin_def_s =
+{
+ &tdav_codec_bv16_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_bv16,
+ "BV16",
+ "BroadVoice16 Rate",
+ TMEDIA_CODEC_FORMAT_BV16,
+ tsk_true,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 0 // ptime @deprecated
+ },
+
+ /* video */
+ {0},
+
+ tdav_codec_bv16_open,
+ tdav_codec_bv16_close,
+ tdav_codec_bv16_encode,
+ tdav_codec_bv16_decode,
+ tdav_codec_bv16_sdp_att_match,
+ tdav_codec_bv16_sdp_att_get,
+ tdav_codec_bv16_fmtp_set
+};
+const tmedia_codec_plugin_def_t *tdav_codec_bv16_plugin_def_t = &tdav_codec_bv16_plugin_def_s;
+
+
+#endif /* HAVE_BV16 */
diff --git a/tinyDAV/src/codecs/bv/tdav_codec_bv32.c b/tinyDAV/src/codecs/bv/tdav_codec_bv32.c
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tinyDAV/src/codecs/bv/tdav_codec_bv32.c
diff --git a/tinyDAV/src/codecs/dtmf/tdav_codec_dtmf.c b/tinyDAV/src/codecs/dtmf/tdav_codec_dtmf.c
new file mode 100644
index 0000000..103ac8d
--- /dev/null
+++ b/tinyDAV/src/codecs/dtmf/tdav_codec_dtmf.c
@@ -0,0 +1,126 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_dtmf.c
+ * @brief DTMF (RFC 4733) codec plugins.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/dtmf/tdav_codec_dtmf.h"
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+
+/* ============ DTMF Plugin interface ================= */
+
+tsk_size_t tdav_codec_dtmf_fmtp_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ return 0;
+}
+
+tsk_size_t tdav_codec_dtmf_fmtp_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ return 0;
+}
+
+char* tdav_codec_dtmf_sdp_att_get(const tmedia_codec_t* self, const char* att_name)
+{
+ if(tsk_striequals(att_name, "fmtp")){
+ return tsk_strdup("0-16");
+ }
+ return tsk_null;
+}
+
+tsk_bool_t tdav_codec_dtmf_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{ /* always match */
+ return tsk_true;
+}
+
+
+//
+// DTMF Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_dtmf_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_dtmf_t *dtmf = self;
+ if(dtmf){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_dtmf_dtor(tsk_object_t * self)
+{
+ tdav_codec_dtmf_t *dtmf = self;
+ if(dtmf){
+ /* deinit base */
+ tmedia_codec_audio_deinit(dtmf);
+ /* deinit self */
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_dtmf_def_s =
+{
+ sizeof(tdav_codec_dtmf_t),
+ tdav_codec_dtmf_ctor,
+ tdav_codec_dtmf_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_dtmf_plugin_def_s =
+{
+ &tdav_codec_dtmf_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_none, // fake codec without real identifier
+ "telephone-event",
+ "DTMF Codec (RFC 4733)",
+ TMEDIA_CODEC_FORMAT_DTMF,
+ tsk_true,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 20 // ptime
+ },
+
+ /* video */
+ {0},
+
+ tsk_null, // set()
+ tsk_null, // open
+ tsk_null, // close
+ tdav_codec_dtmf_fmtp_encode,
+ tdav_codec_dtmf_fmtp_decode,
+ tdav_codec_dtmf_sdp_att_match,
+ tdav_codec_dtmf_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_dtmf_plugin_def_t = &tdav_codec_dtmf_plugin_def_s;
diff --git a/tinyDAV/src/codecs/fec/tdav_codec_red.c b/tinyDAV/src/codecs/fec/tdav_codec_red.c
new file mode 100644
index 0000000..2fb6f27
--- /dev/null
+++ b/tinyDAV/src/codecs/fec/tdav_codec_red.c
@@ -0,0 +1,263 @@
+/*
+* Copyright (C) 2012 Doubango Telecom <http://www.doubango.org>
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango[dot]org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_red.c
+ * @brief RTP Payload for Redundant Audio Data as per RFC 2198
+ */
+#include "tinydav/codecs/fec/tdav_codec_red.h"
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tsk_memory.h"
+#include "tsk_time.h"
+#include "tsk_debug.h"
+
+typedef struct tdav_codec_red_s
+{
+ TMEDIA_DECLARE_CODEC_VIDEO;
+
+ tdav_codec_red_rtppacket_cb_f callback;
+ const void* callback_data;
+}
+tdav_codec_red_t;
+
+int tdav_codec_red_set_callback(tdav_codec_red_t *self, tdav_codec_red_rtppacket_cb_f callback, const void* callback_data)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ self->callback = callback;
+ self->callback_data = callback_data;
+
+ return 0;
+}
+
+static int tdav_codec_red_open(tmedia_codec_t* self)
+{
+ return 0;
+}
+
+static int tdav_codec_red_close(tmedia_codec_t* self)
+{
+ return 0;
+}
+
+static tsk_size_t tdav_codec_red_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tdav_codec_red_t *red = (tdav_codec_red_t *)self;
+ tsk_size_t xsize = (in_size + 1);
+ static const uint8_t __first_octet = 0x00; // F=1, PT=0. Up to the caller to update this first octet with the right PT.
+
+ if(!red || !in_data || !in_size || !out_data || !out_max_size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if(*out_max_size < xsize){
+ if(!(*out_data = tsk_realloc(*out_data, xsize))){
+ TSK_DEBUG_ERROR("Failed to realloc data");
+ *out_max_size = 0;
+ }
+ *out_max_size = xsize;
+ }
+
+ ((uint8_t*)*out_data)[0] = __first_octet;
+ memcpy(&((uint8_t*)*out_data)[1], in_data, in_size);
+
+ return xsize;
+}
+
+static tsk_size_t tdav_codec_red_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tdav_codec_red_t* red = (tdav_codec_red_t*)self;
+ const trtp_rtp_header_t* rtp_hdr = proto_hdr;
+ trtp_rtp_packet_t* red_rtp_pkt = tsk_null;
+ const uint8_t* pdata = in_data;
+ const uint8_t* red_hdr = in_data;
+ tsk_size_t red_hdrs_count, i;
+ tsk_bool_t last;
+ uint8_t F;
+ uint16_t timestamp_offset, block_length;
+
+ if(!red || !in_data || (in_size < TDAV_CODEC_RED_MIN_PKT_SIZE)|| !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if(!red->callback){
+ TSK_DEBUG_WARN("Not callback installed for RED data");
+ return 0;
+ }
+
+ if((F = (pdata[0] & 0x80)) == 0){
+ i = 1;
+ red_hdrs_count = 1;
+ }
+ else{
+ for(i = 0, red_hdrs_count = 0; i < in_size; i+= 4, ++red_hdrs_count){
+ if((F = (pdata[i] & 0x80)) == 0){ ++i; ++red_hdrs_count; break; }
+ }
+ }
+
+ if(i >= in_size){
+ TSK_DEBUG_ERROR("Invalid data");
+ return 0;
+ }
+
+ pdata += i;
+ in_size -= i;
+
+ for(i = 0; i < red_hdrs_count && in_size > 0; ++i){
+ TSK_OBJECT_SAFE_FREE(red_rtp_pkt);
+ if(!(red_rtp_pkt = trtp_rtp_packet_create_null())){
+ TSK_DEBUG_ERROR("Failed to create RTP packet");
+ continue;
+ }
+ if(!(red_rtp_pkt->header = trtp_rtp_header_create(rtp_hdr->ssrc, rtp_hdr->seq_num, rtp_hdr->timestamp, rtp_hdr->payload_type, rtp_hdr->marker))){
+ TSK_DEBUG_ERROR("Failed to create RTP header");
+ continue;
+ }
+
+ // Must create an RTP packet for each RED chunck as they will be saved in the JB
+ last = (i == (red_hdrs_count - 1));
+ F = (red_hdr[0] & 0x80);
+ red_rtp_pkt->header->payload_type = (red_hdr[0] & 0x7F);
+
+ if(last || !F){
+ /*
+ 0 1 2 3 4 5 6 7
+ +-+-+-+-+-+-+-+-+
+ |0| Block PT |
+ +-+-+-+-+-+-+-+-+
+ */
+ block_length = (uint16_t)in_size;
+ }
+ else{
+ /*
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |1| block PT=7 | timestamp offset | block length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ timestamp_offset = ((red_hdr[1] << 8) | red_hdr[2]) >> 2;
+ block_length = ((red_hdr[2] & 0x03) << 8) | red_hdr[3];
+ if(block_length > in_size){
+ TSK_DEBUG_ERROR("Invalid 'block length'");
+ break;
+ }
+ red_rtp_pkt->header->timestamp += timestamp_offset;
+ red_hdr += 4;
+ }
+
+ // decode
+ if(red->callback){
+ // do not use "data_const" as payload will be saved in the jitter buffer and decoded later (async)
+ if((red_rtp_pkt->payload.data = tsk_malloc(block_length))){
+ memcpy(red_rtp_pkt->payload.data, pdata, block_length);
+ red_rtp_pkt->payload.size = block_length;
+ red->callback(red->callback_data, red_rtp_pkt);
+ }
+ }
+
+ pdata += block_length;
+ in_size -= block_length;
+ }
+
+ TSK_OBJECT_SAFE_FREE(red_rtp_pkt);
+
+ return 0; // must be always zero
+}
+
+static tsk_bool_t tdav_codec_red_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ return tsk_true;
+}
+
+static char* tdav_codec_red_sdp_att_get(const tmedia_codec_t* self, const char* att_name)
+{
+ return tsk_null;
+}
+
+
+/* ============ red object definition ================= */
+
+/* constructor */
+static tsk_object_t* tdav_codec_red_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_red_t *red = self;
+ if(red){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_red_dtor(tsk_object_t * self)
+{
+ tdav_codec_red_t *red = self;
+ if(red){
+ /* deinit base */
+ tmedia_codec_video_deinit(red);
+ /* deinit self */
+
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_red_def_s =
+{
+ sizeof(tdav_codec_red_t),
+ tdav_codec_red_ctor,
+ tdav_codec_red_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_red_plugin_def_s =
+{
+ &tdav_codec_red_def_s,
+
+ (/* tmedia_video | tmedia_audio | */tmedia_t140), // FIXME: for now is only supported with T.140
+ tmedia_codec_id_red,
+ "red",
+ "red codec",
+ TMEDIA_CODEC_FORMAT_RED,
+ tsk_true,
+ 1000, // rate: FIXME: for now it's only for T.140
+
+ /* audio */
+ { 0 },
+
+ /* video (defaul width,height,fps) */
+ {176, 144, 15},
+
+ tsk_null, // set()
+ tdav_codec_red_open,
+ tdav_codec_red_close,
+ tdav_codec_red_encode,
+ tdav_codec_red_decode,
+ tdav_codec_red_sdp_att_match,
+ tdav_codec_red_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_red_plugin_def_t = &tdav_codec_red_plugin_def_s;
diff --git a/tinyDAV/src/codecs/fec/tdav_codec_ulpfec.c b/tinyDAV/src/codecs/fec/tdav_codec_ulpfec.c
new file mode 100644
index 0000000..f492a52
--- /dev/null
+++ b/tinyDAV/src/codecs/fec/tdav_codec_ulpfec.c
@@ -0,0 +1,424 @@
+/*
+* Copyright (C) 2012-2015 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_ulpfec.c
+ * @brief Forward Error Correction (FEC) implementation as per RFC 5109
+ */
+#include "tinydav/codecs/fec/tdav_codec_ulpfec.h"
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_FEC_PKT_HDR_SIZE 10
+
+typedef struct tdav_codec_ulpfec_s
+{
+ TMEDIA_DECLARE_CODEC_VIDEO;
+
+ struct{
+ struct tdav_fec_pkt_s* pkt;
+ } encoder;
+}
+tdav_codec_ulpfec_t;
+
+//
+// FEC LEVEL
+//
+typedef struct tdav_fec_level_s
+{
+ TSK_DECLARE_OBJECT;
+
+ struct{ // 7.4. FEC Level Header for FEC Packets
+ uint16_t length;
+ uint64_t mask;
+ tsk_size_t mask_size; // in bits
+ } hdr;
+ struct{
+ uint8_t* ptr;
+ tsk_size_t size;
+ }payload;
+}tdav_fec_level_t;
+typedef tsk_list_t tdav_fec_levels_L_t;
+static tsk_object_t* tdav_fec_level_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_fec_level_t *level = self;
+ if (level){
+ level->hdr.mask_size = 16; // L=0
+ }
+ return self;
+}
+static tsk_object_t* tdav_fec_level_dtor(tsk_object_t * self)
+{
+ tdav_fec_level_t *level = self;
+ if (level){
+ TSK_FREE(level->payload.ptr);
+ }
+
+ return self;
+}
+static const tsk_object_def_t tdav_fec_level_def_s =
+{
+ sizeof(tdav_fec_level_t),
+ tdav_fec_level_ctor,
+ tdav_fec_level_dtor,
+ tsk_null,
+};
+const tsk_object_def_t *tdav_fec_level_def_t = &tdav_fec_level_def_s;
+
+
+//
+// FEC PACKET
+//
+typedef struct tdav_fec_pkt_s
+{
+ TSK_DECLARE_OBJECT;
+
+ struct{ // RFC 5109 - 7.3. FEC Header for FEC Packets
+ unsigned E : 1;
+ unsigned L : 1;
+ unsigned P : 1;
+ unsigned X : 1;
+ unsigned CC : 4;
+ unsigned M : 1;
+ unsigned PT : 7;
+ struct{
+ uint16_t value;
+ unsigned set : 1;
+ }SN_base;
+ uint32_t TS;
+ uint16_t length;
+ }hdr;
+
+ tdav_fec_levels_L_t* levels;
+}
+tdav_fec_pkt_t;
+static tsk_object_t* tdav_fec_pkt_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_fec_pkt_t *pkt = self;
+ if (pkt){
+ if (!(pkt->levels = tsk_list_create())){
+ TSK_DEBUG_ERROR("Failed to create levels");
+ return tsk_null;
+ }
+ }
+ return self;
+}
+static tsk_object_t* tdav_fec_pkt_dtor(tsk_object_t * self)
+{
+ tdav_fec_pkt_t *pkt = self;
+ if (pkt){
+ TSK_OBJECT_SAFE_FREE(pkt->levels);
+ }
+
+ return self;
+}
+static int tdav_fec_pkt_cmp(const tsk_object_t *_p1, const tsk_object_t *_p2)
+{
+ const tdav_fec_pkt_t *p1 = _p1;
+ const tdav_fec_pkt_t *p2 = _p2;
+
+ if (p1 && p2){
+ return (int)(p1->hdr.SN_base.value - p2->hdr.SN_base.value);
+ }
+ else if (!p1 && !p2) return 0;
+ else return -1;
+}
+static const tsk_object_def_t tdav_fec_pkt_def_s =
+{
+ sizeof(tdav_fec_pkt_t),
+ tdav_fec_pkt_ctor,
+ tdav_fec_pkt_dtor,
+ tdav_fec_pkt_cmp,
+};
+const tsk_object_def_t *tdav_fec_pkt_def_t = &tdav_fec_pkt_def_s;
+
+
+tsk_size_t tdav_codec_ulpfec_guess_serialbuff_size(const tdav_codec_ulpfec_t* self)
+{
+ tsk_size_t size = TDAV_FEC_PKT_HDR_SIZE;
+ tsk_list_item_t *item;
+ tdav_fec_level_t* level;
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ tsk_list_foreach(item, self->encoder.pkt->levels){
+ if (!(level = item->data)){
+ continue;
+ }
+ size += 2 /* Protection length */ + (level->hdr.mask_size >> 3) + level->hdr.length;
+ }
+
+ return size;
+}
+
+int tdav_codec_ulpfec_enc_reset(tdav_codec_ulpfec_t* self)
+{
+ tsk_list_item_t *item;
+ tdav_fec_level_t* level;
+
+ if (!self || !self->encoder.pkt){
+ TSK_DEBUG_ERROR("invalid parameter");
+ return -1;
+ }
+
+ // reset packet
+ memset(&self->encoder.pkt->hdr, 0, sizeof(self->encoder.pkt->hdr));
+
+ // reset levels
+ tsk_list_foreach(item, self->encoder.pkt->levels){
+ if ((level = item->data)){
+ memset(&level->hdr, 0, sizeof(level->hdr));
+ if (level->payload.ptr){
+ memset(level->payload.ptr, 0, level->payload.size);
+ }
+ }
+ }
+ return 0;
+}
+
+int tdav_codec_ulpfec_enc_protect(tdav_codec_ulpfec_t* self, const trtp_rtp_packet_t* rtp_packet)
+{
+ if (!self || !self->encoder.pkt || !rtp_packet || !rtp_packet->header){
+ TSK_DEBUG_ERROR("invalid parameter");
+ return -1;
+ }
+
+ // Packet
+ self->encoder.pkt->hdr.P ^= rtp_packet->header->padding;
+ self->encoder.pkt->hdr.X ^= rtp_packet->header->extension;
+ self->encoder.pkt->hdr.CC ^= rtp_packet->header->csrc_count;
+ self->encoder.pkt->hdr.M ^= rtp_packet->header->marker;
+ self->encoder.pkt->hdr.PT ^= rtp_packet->header->payload_type;
+ if (!self->encoder.pkt->hdr.SN_base.set){
+ self->encoder.pkt->hdr.SN_base.value = rtp_packet->header->seq_num;
+ self->encoder.pkt->hdr.SN_base.set = 1;
+ }
+ else{
+ self->encoder.pkt->hdr.SN_base.value = TSK_MIN(self->encoder.pkt->hdr.SN_base.value, rtp_packet->header->seq_num);
+ }
+ self->encoder.pkt->hdr.TS ^= rtp_packet->header->timestamp;
+ self->encoder.pkt->hdr.length ^= (trtp_rtp_packet_guess_serialbuff_size(rtp_packet) - TRTP_RTP_HEADER_MIN_SIZE);
+
+ // Level
+ // For now, always single-level protection
+ {
+ tdav_fec_level_t* level0 = TSK_LIST_FIRST_DATA(self->encoder.pkt->levels);
+ const uint8_t* rtp_payload = (const uint8_t*)(rtp_packet->payload.data_const ? rtp_packet->payload.data_const : rtp_packet->payload.data);
+ tsk_size_t i;
+ if (!level0){
+ tdav_fec_level_t* _level0;
+ if (!(_level0 = tsk_object_new(tdav_fec_level_def_t))){
+ TSK_DEBUG_ERROR("Failed to create level");
+ return -2;
+ }
+ level0 = _level0;
+ tsk_list_push_back_data(self->encoder.pkt->levels, (void**)&_level0);
+ }
+ if (level0->payload.size < rtp_packet->payload.size){
+ if (!(level0->payload.ptr = tsk_realloc(level0->payload.ptr, rtp_packet->payload.size))){
+ TSK_DEBUG_ERROR("Failed to realloc size %d", rtp_packet->payload.size);
+ level0->payload.size = 0;
+ return -3;
+ }
+ level0->payload.size = rtp_packet->payload.size;
+ }
+ for (i = 0; i < rtp_packet->payload.size; ++i){
+ level0->payload.ptr[i] ^= rtp_payload[i];
+ }
+ level0->hdr.mask_size = self->encoder.pkt->hdr.L ? 48 : 16;
+ level0->hdr.mask |= (uint64_t)((uint64_t)1 << (level0->hdr.mask_size - (rtp_packet->header->seq_num - self->encoder.pkt->hdr.SN_base.value)));
+ level0->hdr.length = (uint16_t)(TSK_MAX(level0->hdr.length, rtp_packet->payload.size));
+ }
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_ulpfec_enc_serialize(const tdav_codec_ulpfec_t* self, void** out_data, tsk_size_t* out_max_size)
+{
+ uint8_t* pdata;
+ tsk_size_t xsize;
+ int32_t i;
+ tsk_list_item_t* item;
+ tdav_fec_level_t* level;
+
+ if (!self || !self->encoder.pkt || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+ xsize = tdav_codec_ulpfec_guess_serialbuff_size(self);
+
+ if (*out_max_size < xsize){
+ if (!(*out_data = tsk_realloc(*out_data, xsize))){
+ TSK_DEBUG_ERROR("Failed to reallocate buffer with size =%d", xsize);
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = xsize;
+ }
+ pdata = (uint8_t*)*out_data;
+
+ // E(1), L(1), P(1), X(1), CC(4)
+ pdata[0] =
+ (self->encoder.pkt->hdr.E << 7) |
+ (self->encoder.pkt->hdr.L << 6) |
+ (self->encoder.pkt->hdr.P << 5) |
+ (self->encoder.pkt->hdr.X << 4) |
+ (self->encoder.pkt->hdr.CC & 0x0F);
+ // M(1), PT(7)
+ pdata[1] = (self->encoder.pkt->hdr.M << 7) | (self->encoder.pkt->hdr.PT & 0x7F);
+ // SN base (16)
+ pdata[2] = (self->encoder.pkt->hdr.SN_base.value >> 8);
+ pdata[3] = (self->encoder.pkt->hdr.SN_base.value & 0xFF);
+ // TS (32)
+ pdata[4] = self->encoder.pkt->hdr.TS >> 24;
+ pdata[5] = (self->encoder.pkt->hdr.TS >> 16) & 0xFF;
+ pdata[6] = (self->encoder.pkt->hdr.TS >> 8) & 0xFF;
+ pdata[7] = (self->encoder.pkt->hdr.TS & 0xFF);
+ // Length (16)
+ pdata[8] = (self->encoder.pkt->hdr.length >> 8);
+ pdata[9] = (self->encoder.pkt->hdr.length & 0xFF);
+
+ pdata += 10;
+
+ tsk_list_foreach(item, self->encoder.pkt->levels){
+ if (!(level = item->data)){
+ continue;
+ }
+ // Protection length (16)
+ pdata[0] = (level->hdr.length >> 8);
+ pdata[1] = (level->hdr.length & 0xFF);
+ pdata += 2;
+ // mask (16 or 48)
+ for (i = (int32_t)(level->hdr.mask_size - 8); i >= 0; i -= 8){
+ *pdata = ((level->hdr.mask >> i) & 0xFF); ++pdata;
+ }
+ // payload
+ memcpy(pdata, level->payload.ptr, level->hdr.length);
+ }
+
+ return xsize;
+}
+
+
+
+static int tdav_codec_ulpfec_open(tmedia_codec_t* self)
+{
+ return 0;
+}
+
+static int tdav_codec_ulpfec_close(tmedia_codec_t* self)
+{
+ return 0;
+}
+
+static tsk_size_t tdav_codec_ulpfec_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ TSK_DEBUG_ERROR("Not expected to be called");
+ return 0;
+}
+
+static tsk_size_t tdav_codec_ulpfec_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ TSK_DEBUG_ERROR("Not expected to be called");
+ return 0;
+}
+
+static tsk_bool_t tdav_codec_ulpfec_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ return tsk_true;
+}
+
+static char* tdav_codec_ulpfec_sdp_att_get(const tmedia_codec_t* self, const char* att_name)
+{
+ return tsk_null;
+}
+
+
+/* ============ ULPFEC object definition ================= */
+
+/* constructor */
+static tsk_object_t* tdav_codec_ulpfec_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_ulpfec_t *ulpfec = self;
+ if (ulpfec){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ if (!(ulpfec->encoder.pkt = tsk_object_new(tdav_fec_pkt_def_t))){
+ TSK_DEBUG_ERROR("Failed to create FEC packet");
+ return tsk_null;
+ }
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_ulpfec_dtor(tsk_object_t * self)
+{
+ tdav_codec_ulpfec_t *ulpfec = self;
+ if (ulpfec){
+ /* deinit base */
+ tmedia_codec_video_deinit(ulpfec);
+ /* deinit self */
+ TSK_OBJECT_SAFE_FREE(ulpfec->encoder.pkt);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_ulpfec_def_s =
+{
+ sizeof(tdav_codec_ulpfec_t),
+ tdav_codec_ulpfec_ctor,
+ tdav_codec_ulpfec_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_ulpfec_plugin_def_s =
+{
+ &tdav_codec_ulpfec_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_none, // fake codec
+ "ulpfec",
+ "ulpfec codec",
+ TMEDIA_CODEC_FORMAT_ULPFEC,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video (defaul width,height,fps) */
+ { 176, 144, 15 },
+
+ tsk_null, // set()
+ tdav_codec_ulpfec_open,
+ tdav_codec_ulpfec_close,
+ tdav_codec_ulpfec_encode,
+ tdav_codec_ulpfec_decode,
+ tdav_codec_ulpfec_sdp_att_match,
+ tdav_codec_ulpfec_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_ulpfec_plugin_def_t = &tdav_codec_ulpfec_plugin_def_s; \ No newline at end of file
diff --git a/tinyDAV/src/codecs/g711/g711.c b/tinyDAV/src/codecs/g711/g711.c
new file mode 100644
index 0000000..fa7c8be
--- /dev/null
+++ b/tinyDAV/src/codecs/g711/g711.c
@@ -0,0 +1,295 @@
+/*
+ * This source code is a product of Sun Microsystems, Inc. and is provided
+ * for unrestricted use. Users may copy or modify this source code without
+ * charge.
+ *
+ * SUN SOURCE CODE IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING
+ * THE WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun source code is provided with no support and without any obligation on
+ * the part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY THIS SOFTWARE
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+/*
+ * g711.c
+ *
+ * u-law, A-law and linear PCM conversions.
+ */
+
+/*
+ * December 30, 1994:
+ * Functions linear2alaw, linear2ulaw have been updated to correctly
+ * convert unquantized 16 bit values.
+ * Tables for direct u- to A-law and A- to u-law conversions have been
+ * corrected.
+ * Borge Lindberg, Center for PersonKommunikation, Aalborg University.
+ * bli@cpk.auc.dk
+ *
+ */
+
+#include "tinydav/codecs/g711/g711.h"
+
+#define SIGN_BIT (0x80) /* Sign bit for a A-law byte. */
+#define QUANT_MASK (0xf) /* Quantization field mask. */
+#define NSEGS (8) /* Number of A-law segments. */
+#define SEG_SHIFT (4) /* Left shift for segment number. */
+#define SEG_MASK (0x70) /* Segment field mask. */
+
+static short seg_aend[8] = {0x1F, 0x3F, 0x7F, 0xFF,
+ 0x1FF, 0x3FF, 0x7FF, 0xFFF};
+static short seg_uend[8] = {0x3F, 0x7F, 0xFF, 0x1FF,
+ 0x3FF, 0x7FF, 0xFFF, 0x1FFF};
+
+/* copy from CCITT G.711 specifications */
+unsigned char _u2a[128] = { /* u- to A-law conversions */
+ 1, 1, 2, 2, 3, 3, 4, 4,
+ 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 27, 29, 31, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44,
+ 46, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+/* corrected:
+ 81, 82, 83, 84, 85, 86, 87, 88,
+ should be: */
+ 80, 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96,
+ 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128};
+
+unsigned char _a2u[128] = { /* A- to u-law conversions */
+ 1, 3, 5, 7, 9, 11, 13, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 32, 33, 33, 34, 34, 35, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 48, 49, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72,
+/* corrected:
+ 73, 74, 75, 76, 77, 78, 79, 79,
+ should be: */
+ 73, 74, 75, 76, 77, 78, 79, 80,
+
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127};
+
+static short search(short val, short *table, short size)
+{
+ short i;
+
+ for (i = 0; i < size; i++) {
+ if (val <= *table++)
+ return (i);
+ }
+ return (size);
+}
+
+/*
+ * linear2alaw() - Convert a 16-bit linear PCM value to 8-bit A-law
+ *
+ * linear2alaw() accepts an 16-bit integer and encodes it as A-law data.
+ *
+ * Linear Input Code Compressed Code
+ * ------------------------ ---------------
+ * 0000000wxyza 000wxyz
+ * 0000001wxyza 001wxyz
+ * 000001wxyzab 010wxyz
+ * 00001wxyzabc 011wxyz
+ * 0001wxyzabcd 100wxyz
+ * 001wxyzabcde 101wxyz
+ * 01wxyzabcdef 110wxyz
+ * 1wxyzabcdefg 111wxyz
+ *
+ * For further information see John C. Bellamy's Digital Telephony, 1982,
+ * John Wiley & Sons, pps 98-111 and 472-476.
+ */
+unsigned char linear2alaw(short pcm_val) /* 2's complement (16-bit range) */
+{
+ short mask;
+ short seg;
+ unsigned char aval;
+
+ pcm_val = pcm_val >> 3;
+
+ if (pcm_val >= 0) {
+ mask = 0xD5; /* sign (7th) bit = 1 */
+ } else {
+ mask = 0x55; /* sign bit = 0 */
+ pcm_val = -pcm_val - 1;
+ }
+
+ /* Convert the scaled magnitude to segment number. */
+ seg = search(pcm_val, seg_aend, 8);
+
+ /* Combine the sign, segment, and quantization bits. */
+
+ if (seg >= 8) /* out of range, return maximum value. */
+ return (unsigned char) (0x7F ^ mask);
+ else {
+ aval = (unsigned char) seg << SEG_SHIFT;
+ if (seg < 2)
+ aval |= (pcm_val >> 1) & QUANT_MASK;
+ else
+ aval |= (pcm_val >> seg) & QUANT_MASK;
+ return (aval ^ mask);
+ }
+}
+
+/*
+ * alaw2linear() - Convert an A-law value to 16-bit linear PCM
+ *
+ */
+short alaw2linear(unsigned char a_val)
+{
+ short t;
+ short seg;
+
+ a_val ^= 0x55;
+
+ t = (a_val & QUANT_MASK) << 4;
+ seg = ((unsigned)a_val & SEG_MASK) >> SEG_SHIFT;
+ switch (seg) {
+ case 0:
+ t += 8;
+ break;
+ case 1:
+ t += 0x108;
+ break;
+ default:
+ t += 0x108;
+ t <<= seg - 1;
+ }
+ return ((a_val & SIGN_BIT) ? t : -t);
+}
+
+#define BIAS (0x84) /* Bias for linear code. */
+#define CLIP 8159
+
+/*
+ * linear2ulaw() - Convert a linear PCM value to u-law
+ *
+ * In order to simplify the encoding process, the original linear magnitude
+ * is biased by adding 33 which shifts the encoding range from (0 - 8158) to
+ * (33 - 8191). The result can be seen in the following encoding table:
+ *
+ * Biased Linear Input Code Compressed Code
+ * ------------------------ ---------------
+ * 00000001wxyza 000wxyz
+ * 0000001wxyzab 001wxyz
+ * 000001wxyzabc 010wxyz
+ * 00001wxyzabcd 011wxyz
+ * 0001wxyzabcde 100wxyz
+ * 001wxyzabcdef 101wxyz
+ * 01wxyzabcdefg 110wxyz
+ * 1wxyzabcdefgh 111wxyz
+ *
+ * Each biased linear code has a leading 1 which identifies the segment
+ * number. The value of the segment number is equal to 7 minus the number
+ * of leading 0's. The quantization interval is directly available as the
+ * four bits wxyz. * The trailing bits (a - h) are ignored.
+ *
+ * Ordinarily the complement of the resulting code word is used for
+ * transmission, and so the code word is complemented before it is returned.
+ *
+ * For further information see John C. Bellamy's Digital Telephony, 1982,
+ * John Wiley & Sons, pps 98-111 and 472-476.
+ */
+unsigned char linear2ulaw(short pcm_val) /* 2's complement (16-bit range) */
+{
+ short mask;
+ short seg;
+ unsigned char uval;
+
+ /* Get the sign and the magnitude of the value. */
+ pcm_val = pcm_val >> 2;
+ if (pcm_val < 0) {
+ pcm_val = -pcm_val;
+ mask = 0x7F;
+ } else {
+ mask = 0xFF;
+ }
+ if ( pcm_val > CLIP ) pcm_val = CLIP; /* clip the magnitude */
+ pcm_val += (BIAS >> 2);
+
+ /* Convert the scaled magnitude to segment number. */
+ seg = search(pcm_val, seg_uend, 8);
+
+ /*
+ * Combine the sign, segment, quantization bits;
+ * and complement the code word.
+ */
+ if (seg >= 8) /* out of range, return maximum value. */
+ return (unsigned char) (0x7F ^ mask);
+ else {
+ uval = (unsigned char) (seg << 4) | ((pcm_val >> (seg + 1)) & 0xF);
+ return (uval ^ mask);
+ }
+
+}
+
+/*
+ * ulaw2linear() - Convert a u-law value to 16-bit linear PCM
+ *
+ * First, a biased linear code is derived from the code word. An unbiased
+ * output can then be obtained by subtracting 33 from the biased code.
+ *
+ * Note that this function expects to be passed the complement of the
+ * original code word. This is in keeping with ISDN conventions.
+ */
+short ulaw2linear(unsigned char u_val)
+{
+ short t;
+
+ /* Complement to obtain normal u-law value. */
+ u_val = ~u_val;
+
+ /*
+ * Extract and bias the quantization bits. Then
+ * shift up by the segment number and subtract out the bias.
+ */
+ t = ((u_val & QUANT_MASK) << 3) + BIAS;
+ t <<= ((unsigned)u_val & SEG_MASK) >> SEG_SHIFT;
+
+ return ((u_val & SIGN_BIT) ? (BIAS - t) : (t - BIAS));
+}
+
+/* A-law to u-law conversion */
+unsigned char alaw2ulaw(unsigned char aval)
+{
+ aval &= 0xff;
+ return (unsigned char) ((aval & 0x80) ? (0xFF ^ _a2u[aval ^ 0xD5]) :
+ (0x7F ^ _a2u[aval ^ 0x55]));
+}
+
+/* u-law to A-law conversion */
+unsigned char ulaw2alaw(unsigned char uval)
+{
+ uval &= 0xff;
+ return (unsigned char) ((uval & 0x80) ? (0xD5 ^ (_u2a[0xFF ^ uval] - 1)) :
+ (unsigned char) (0x55 ^ (_u2a[0x7F ^ uval] - 1)));
+}
diff --git a/tinyDAV/src/codecs/g711/tdav_codec_g711.c b/tinyDAV/src/codecs/g711/tdav_codec_g711.c
new file mode 100644
index 0000000..fa970e1
--- /dev/null
+++ b/tinyDAV/src/codecs/g711/tdav_codec_g711.c
@@ -0,0 +1,326 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_g711.c
+ * @brief G.711u and G.711a (a.k.a PCMU and PCMA) codec plugins.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/g711/tdav_codec_g711.h"
+
+#include "tinydav/codecs/g711/g711.h" /* algorithms */
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+/* ============ G.711u Plugin interface ================= */
+
+#define tdav_codec_g711u_open tsk_null
+#define tdav_codec_g711u_close tsk_null
+#define tdav_codec_g711u_sdp_att_get tsk_null
+
+static tsk_size_t tdav_codec_g711u_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ register tsk_size_t i;
+ register uint8_t* pout_data;
+ register int16_t* pin_data;
+ tsk_size_t out_size;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ out_size = (in_size >> 1);
+
+ if(*out_max_size <out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+
+ pout_data = *out_data;
+ pin_data = (int16_t*)in_data;
+ for(i = 0; i<out_size; i++){
+ pout_data[i] = linear2ulaw(pin_data[i]);
+ }
+
+ return out_size;
+}
+
+static tsk_size_t tdav_codec_g711u_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tsk_size_t i;
+ tsk_size_t out_size;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ out_size = (in_size << 1);
+
+ /* allocate new buffer */
+ if(*out_max_size<out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+
+ for(i = 0; i<in_size; i++){
+ ((short*)*out_data)[i] = ulaw2linear(((uint8_t*)in_data)[i]);
+ }
+
+ return out_size;
+}
+
+static tsk_bool_t tdav_codec_g711u_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{ /* always match */
+ return tsk_true;
+}
+
+
+//
+// G.711u Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_g711u_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_g711u_t *g711u = self;
+ if(g711u){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_g711u_dtor(tsk_object_t * self)
+{
+ tdav_codec_g711u_t *g711u = self;
+ if(g711u){
+ /* deinit base */
+ tmedia_codec_audio_deinit(g711u);
+ /* deinit self */
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_g711u_def_s =
+{
+ sizeof(tdav_codec_g711u_t),
+ tdav_codec_g711u_ctor,
+ tdav_codec_g711u_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_g711u_plugin_def_s =
+{
+ &tdav_codec_g711u_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_pcmu,
+ "PCMU",
+ "G.711u codec (native)",
+ TMEDIA_CODEC_FORMAT_G711u,
+ tsk_false,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 0 // ptime @deprecated
+ },
+
+ /* video */
+ {0},
+
+ tsk_null, // set()
+ tdav_codec_g711u_open,
+ tdav_codec_g711u_close,
+ tdav_codec_g711u_encode,
+ tdav_codec_g711u_decode,
+ tdav_codec_g711u_sdp_att_match,
+ tdav_codec_g711u_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_g711u_plugin_def_t = &tdav_codec_g711u_plugin_def_s;
+
+
+/* ============ G.711a Plugin interface ================= */
+
+#define tdav_codec_g711a_open tsk_null
+#define tdav_codec_g711a_close tsk_null
+#define tdav_codec_g711a_sdp_att_get tsk_null
+
+static tsk_size_t tdav_codec_g711a_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ register tsk_size_t i;
+ register uint8_t* pout_data;
+ register int16_t* pin_data;
+ tsk_size_t out_size;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ out_size = (in_size >> 1);
+
+ if(*out_max_size < out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+
+ pout_data = *out_data;
+ pin_data = (int16_t*)in_data;
+ for(i = 0; i<out_size; i++){
+ pout_data[i] = linear2alaw(pin_data[i]);
+ }
+
+ return out_size;
+}
+
+#if 0
+FILE* file = tsk_null;
+int count = 0;
+#endif
+static tsk_size_t tdav_codec_g711a_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tsk_size_t i, out_size;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+ out_size = (in_size << 1);
+#if 0
+ if(!file && count<=1000){
+ file = fopen("./g711a.pcm", "wb");
+ }
+#endif
+ /* allocate new buffer */
+ if(*out_max_size < out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+
+ for(i = 0; i<in_size; i++){
+ ((short*)*out_data)[i] = alaw2linear(((uint8_t*)in_data)[i]);
+ }
+#if 0
+ if(++count<=1000){
+ fwrite(*out_data, sizeof(short), in_size, file);
+ }
+ else if(file){
+ fclose(file);
+ file = tsk_null;
+ }
+#endif
+ return out_size;
+}
+
+static tsk_bool_t tdav_codec_g711a_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{ /* always match */
+ return tsk_true;
+}
+
+
+//
+// G.711a Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_g711a_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_g711a_t *g711a = self;
+ if(g711a){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_g711a_dtor(tsk_object_t * self)
+{
+ tdav_codec_g711a_t *g711a = self;
+ if(g711a){
+ /* deinit base */
+ tmedia_codec_audio_deinit(g711a);
+ /* deinit self */
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_g711a_def_s =
+{
+ sizeof(tdav_codec_g711a_t),
+ tdav_codec_g711a_ctor,
+ tdav_codec_g711a_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_g711a_plugin_def_s =
+{
+ &tdav_codec_g711a_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_pcma,
+ "PCMA",
+ "G.711a codec (native)",
+ TMEDIA_CODEC_FORMAT_G711a,
+ tsk_false,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 0 // ptime @deprecated
+ },
+
+ /* video */
+ {0},
+
+ tsk_null, // set()
+ tdav_codec_g711a_open,
+ tdav_codec_g711a_close,
+ tdav_codec_g711a_encode,
+ tdav_codec_g711a_decode,
+ tdav_codec_g711a_sdp_att_match,
+ tdav_codec_g711a_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_g711a_plugin_def_t = &tdav_codec_g711a_plugin_def_s;
diff --git a/tinyDAV/src/codecs/g722/g722_decode.c b/tinyDAV/src/codecs/g722/g722_decode.c
new file mode 100644
index 0000000..b6b7830
--- /dev/null
+++ b/tinyDAV/src/codecs/g722/g722_decode.c
@@ -0,0 +1,400 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * g722_decode.c - The ITU G.722 codec, decode part.
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2005 Steve Underwood
+ *
+ * Despite my general liking of the GPL, I place my own contributions
+ * to this code in the public domain for the benefit of all mankind -
+ * even the slimy ones who might try to proprietize my work and use it
+ * to my detriment.
+ *
+ * Based in part on a single channel G.722 codec which is:
+ *
+ * Copyright (c) CMU 1993
+ * Computer Science, Speech Group
+ * Chengxiang Lu and Alex Hauptmann
+ *
+ * $Id: g722_decode.c,v 1.15 2006/07/07 16:37:49 steveu Exp $
+ *
+ * Modifications for WebRtc, 2011/04/28, by tlegrand:
+ * -Removed usage of inttypes.h and tgmath.h
+ * -Changed to use WebRtc types
+ * -Changed __inline__ to __inline
+ * -Added saturation check on output
+ */
+
+/*! \file */
+
+#include <stdio.h>
+#include <memory.h>
+#include <stdlib.h>
+
+#include "tinydav/codecs/g722/g722_enc_dec.h"
+
+#if !defined(FALSE)
+#define FALSE 0
+#endif
+#if !defined(TRUE)
+#define TRUE (!FALSE)
+#endif
+
+static __inline int16_t saturate(int32_t amp)
+{
+ int16_t amp16;
+
+ /* Hopefully this is optimised for the common case - not clipping */
+ amp16 = (int16_t) amp;
+ if (amp == amp16)
+ return amp16;
+ if (amp > TDAV_INT16_MAX)
+ return TDAV_INT16_MAX;
+ return TDAV_INT16_MIN;
+}
+/*- End of function --------------------------------------------------------*/
+
+static void block4(g722_decode_state_t *s, int band, int d);
+
+static void block4(g722_decode_state_t *s, int band, int d)
+{
+ int wd1;
+ int wd2;
+ int wd3;
+ int i;
+
+ /* Block 4, RECONS */
+ s->band[band].d[0] = d;
+ s->band[band].r[0] = saturate(s->band[band].s + d);
+
+ /* Block 4, PARREC */
+ s->band[band].p[0] = saturate(s->band[band].sz + d);
+
+ /* Block 4, UPPOL2 */
+ for (i = 0; i < 3; i++)
+ s->band[band].sg[i] = s->band[band].p[i] >> 15;
+ wd1 = saturate(s->band[band].a[1] << 2);
+
+ wd2 = (s->band[band].sg[0] == s->band[band].sg[1]) ? -wd1 : wd1;
+ if (wd2 > 32767)
+ wd2 = 32767;
+ wd3 = (s->band[band].sg[0] == s->band[band].sg[2]) ? 128 : -128;
+ wd3 += (wd2 >> 7);
+ wd3 += (s->band[band].a[2]*32512) >> 15;
+ if (wd3 > 12288)
+ wd3 = 12288;
+ else if (wd3 < -12288)
+ wd3 = -12288;
+ s->band[band].ap[2] = wd3;
+
+ /* Block 4, UPPOL1 */
+ s->band[band].sg[0] = s->band[band].p[0] >> 15;
+ s->band[band].sg[1] = s->band[band].p[1] >> 15;
+ wd1 = (s->band[band].sg[0] == s->band[band].sg[1]) ? 192 : -192;
+ wd2 = (s->band[band].a[1]*32640) >> 15;
+
+ s->band[band].ap[1] = saturate(wd1 + wd2);
+ wd3 = saturate(15360 - s->band[band].ap[2]);
+ if (s->band[band].ap[1] > wd3)
+ s->band[band].ap[1] = wd3;
+ else if (s->band[band].ap[1] < -wd3)
+ s->band[band].ap[1] = -wd3;
+
+ /* Block 4, UPZERO */
+ wd1 = (d == 0) ? 0 : 128;
+ s->band[band].sg[0] = d >> 15;
+ for (i = 1; i < 7; i++)
+ {
+ s->band[band].sg[i] = s->band[band].d[i] >> 15;
+ wd2 = (s->band[band].sg[i] == s->band[band].sg[0]) ? wd1 : -wd1;
+ wd3 = (s->band[band].b[i]*32640) >> 15;
+ s->band[band].bp[i] = saturate(wd2 + wd3);
+ }
+
+ /* Block 4, DELAYA */
+ for (i = 6; i > 0; i--)
+ {
+ s->band[band].d[i] = s->band[band].d[i - 1];
+ s->band[band].b[i] = s->band[band].bp[i];
+ }
+
+ for (i = 2; i > 0; i--)
+ {
+ s->band[band].r[i] = s->band[band].r[i - 1];
+ s->band[band].p[i] = s->band[band].p[i - 1];
+ s->band[band].a[i] = s->band[band].ap[i];
+ }
+
+ /* Block 4, FILTEP */
+ wd1 = saturate(s->band[band].r[1] + s->band[band].r[1]);
+ wd1 = (s->band[band].a[1]*wd1) >> 15;
+ wd2 = saturate(s->band[band].r[2] + s->band[band].r[2]);
+ wd2 = (s->band[band].a[2]*wd2) >> 15;
+ s->band[band].sp = saturate(wd1 + wd2);
+
+ /* Block 4, FILTEZ */
+ s->band[band].sz = 0;
+ for (i = 6; i > 0; i--)
+ {
+ wd1 = saturate(s->band[band].d[i] + s->band[band].d[i]);
+ s->band[band].sz += (s->band[band].b[i]*wd1) >> 15;
+ }
+ s->band[band].sz = saturate(s->band[band].sz);
+
+ /* Block 4, PREDIC */
+ s->band[band].s = saturate(s->band[band].sp + s->band[band].sz);
+}
+/*- End of function --------------------------------------------------------*/
+
+g722_decode_state_t *g722_decode_init(g722_decode_state_t *s, int rate, int options)
+{
+ if (s == NULL)
+ {
+ if ((s = (g722_decode_state_t *) malloc(sizeof(*s))) == NULL)
+ return NULL;
+ }
+ memset(s, 0, sizeof(*s));
+ if (rate == 48000)
+ s->bits_per_sample = 6;
+ else if (rate == 56000)
+ s->bits_per_sample = 7;
+ else
+ s->bits_per_sample = 8;
+ if ((options & G722_SAMPLE_RATE_8000))
+ s->eight_k = TRUE;
+ if ((options & G722_PACKED) && s->bits_per_sample != 8)
+ s->packed = TRUE;
+ else
+ s->packed = FALSE;
+ s->band[0].det = 32;
+ s->band[1].det = 8;
+ return s;
+}
+/*- End of function --------------------------------------------------------*/
+
+int g722_decode_release(g722_decode_state_t *s)
+{
+ free(s);
+ return 0;
+}
+/*- End of function --------------------------------------------------------*/
+
+int g722_decode(g722_decode_state_t *s, int16_t amp[],
+ const uint8_t g722_data[], int len)
+{
+ static const int wl[8] = {-60, -30, 58, 172, 334, 538, 1198, 3042 };
+ static const int rl42[16] = {0, 7, 6, 5, 4, 3, 2, 1, 7, 6, 5, 4, 3, 2, 1, 0 };
+ static const int ilb[32] =
+ {
+ 2048, 2093, 2139, 2186, 2233, 2282, 2332,
+ 2383, 2435, 2489, 2543, 2599, 2656, 2714,
+ 2774, 2834, 2896, 2960, 3025, 3091, 3158,
+ 3228, 3298, 3371, 3444, 3520, 3597, 3676,
+ 3756, 3838, 3922, 4008
+ };
+ static const int wh[3] = {0, -214, 798};
+ static const int rh2[4] = {2, 1, 2, 1};
+ static const int qm2[4] = {-7408, -1616, 7408, 1616};
+ static const int qm4[16] =
+ {
+ 0, -20456, -12896, -8968,
+ -6288, -4240, -2584, -1200,
+ 20456, 12896, 8968, 6288,
+ 4240, 2584, 1200, 0
+ };
+ static const int qm5[32] =
+ {
+ -280, -280, -23352, -17560,
+ -14120, -11664, -9752, -8184,
+ -6864, -5712, -4696, -3784,
+ -2960, -2208, -1520, -880,
+ 23352, 17560, 14120, 11664,
+ 9752, 8184, 6864, 5712,
+ 4696, 3784, 2960, 2208,
+ 1520, 880, 280, -280
+ };
+ static const int qm6[64] =
+ {
+ -136, -136, -136, -136,
+ -24808, -21904, -19008, -16704,
+ -14984, -13512, -12280, -11192,
+ -10232, -9360, -8576, -7856,
+ -7192, -6576, -6000, -5456,
+ -4944, -4464, -4008, -3576,
+ -3168, -2776, -2400, -2032,
+ -1688, -1360, -1040, -728,
+ 24808, 21904, 19008, 16704,
+ 14984, 13512, 12280, 11192,
+ 10232, 9360, 8576, 7856,
+ 7192, 6576, 6000, 5456,
+ 4944, 4464, 4008, 3576,
+ 3168, 2776, 2400, 2032,
+ 1688, 1360, 1040, 728,
+ 432, 136, -432, -136
+ };
+ static const int qmf_coeffs[12] =
+ {
+ 3, -11, 12, 32, -210, 951, 3876, -805, 362, -156, 53, -11,
+ };
+
+ int dlowt;
+ int rlow;
+ int ihigh;
+ int dhigh;
+ int rhigh;
+ int xout1;
+ int xout2;
+ int wd1;
+ int wd2;
+ int wd3;
+ int code;
+ int outlen;
+ int i;
+ int j;
+
+ outlen = 0;
+ rhigh = 0;
+ for (j = 0; j < len; )
+ {
+ if (s->packed)
+ {
+ /* Unpack the code bits */
+ if (s->in_bits < s->bits_per_sample)
+ {
+ s->in_buffer |= (g722_data[j++] << s->in_bits);
+ s->in_bits += 8;
+ }
+ code = s->in_buffer & ((1 << s->bits_per_sample) - 1);
+ s->in_buffer >>= s->bits_per_sample;
+ s->in_bits -= s->bits_per_sample;
+ }
+ else
+ {
+ code = g722_data[j++];
+ }
+
+ switch (s->bits_per_sample)
+ {
+ default:
+ case 8:
+ wd1 = code & 0x3F;
+ ihigh = (code >> 6) & 0x03;
+ wd2 = qm6[wd1];
+ wd1 >>= 2;
+ break;
+ case 7:
+ wd1 = code & 0x1F;
+ ihigh = (code >> 5) & 0x03;
+ wd2 = qm5[wd1];
+ wd1 >>= 1;
+ break;
+ case 6:
+ wd1 = code & 0x0F;
+ ihigh = (code >> 4) & 0x03;
+ wd2 = qm4[wd1];
+ break;
+ }
+ /* Block 5L, LOW BAND INVQBL */
+ wd2 = (s->band[0].det*wd2) >> 15;
+ /* Block 5L, RECONS */
+ rlow = s->band[0].s + wd2;
+ /* Block 6L, LIMIT */
+ if (rlow > 16383)
+ rlow = 16383;
+ else if (rlow < -16384)
+ rlow = -16384;
+
+ /* Block 2L, INVQAL */
+ wd2 = qm4[wd1];
+ dlowt = (s->band[0].det*wd2) >> 15;
+
+ /* Block 3L, LOGSCL */
+ wd2 = rl42[wd1];
+ wd1 = (s->band[0].nb*127) >> 7;
+ wd1 += wl[wd2];
+ if (wd1 < 0)
+ wd1 = 0;
+ else if (wd1 > 18432)
+ wd1 = 18432;
+ s->band[0].nb = wd1;
+
+ /* Block 3L, SCALEL */
+ wd1 = (s->band[0].nb >> 6) & 31;
+ wd2 = 8 - (s->band[0].nb >> 11);
+ wd3 = (wd2 < 0) ? (ilb[wd1] << -wd2) : (ilb[wd1] >> wd2);
+ s->band[0].det = wd3 << 2;
+
+ block4(s, 0, dlowt);
+
+ if (!s->eight_k)
+ {
+ /* Block 2H, INVQAH */
+ wd2 = qm2[ihigh];
+ dhigh = (s->band[1].det*wd2) >> 15;
+ /* Block 5H, RECONS */
+ rhigh = dhigh + s->band[1].s;
+ /* Block 6H, LIMIT */
+ if (rhigh > 16383)
+ rhigh = 16383;
+ else if (rhigh < -16384)
+ rhigh = -16384;
+
+ /* Block 2H, INVQAH */
+ wd2 = rh2[ihigh];
+ wd1 = (s->band[1].nb*127) >> 7;
+ wd1 += wh[wd2];
+ if (wd1 < 0)
+ wd1 = 0;
+ else if (wd1 > 22528)
+ wd1 = 22528;
+ s->band[1].nb = wd1;
+
+ /* Block 3H, SCALEH */
+ wd1 = (s->band[1].nb >> 6) & 31;
+ wd2 = 10 - (s->band[1].nb >> 11);
+ wd3 = (wd2 < 0) ? (ilb[wd1] << -wd2) : (ilb[wd1] >> wd2);
+ s->band[1].det = wd3 << 2;
+
+ block4(s, 1, dhigh);
+ }
+
+ if (s->itu_test_mode)
+ {
+ amp[outlen++] = (int16_t) (rlow << 1);
+ amp[outlen++] = (int16_t) (rhigh << 1);
+ }
+ else
+ {
+ if (s->eight_k)
+ {
+ amp[outlen++] = (int16_t) (rlow << 1);
+ }
+ else
+ {
+ /* Apply the receive QMF */
+ for (i = 0; i < 22; i++)
+ s->x[i] = s->x[i + 2];
+ s->x[22] = rlow + rhigh;
+ s->x[23] = rlow - rhigh;
+
+ xout1 = 0;
+ xout2 = 0;
+ for (i = 0; i < 12; i++)
+ {
+ xout2 += s->x[2*i]*qmf_coeffs[i];
+ xout1 += s->x[2*i + 1]*qmf_coeffs[11 - i];
+ }
+ /* We shift by 12 to allow for the QMF filters (DC gain = 4096), less 1
+ to allow for the 15 bit input to the G.722 algorithm. */
+ /* WebRtc, tlegrand: added saturation */
+ amp[outlen++] = saturate(xout1 >> 11);
+ amp[outlen++] = saturate(xout2 >> 11);
+ }
+ }
+ }
+ return outlen;
+}
+/*- End of function --------------------------------------------------------*/
+/*- End of file ------------------------------------------------------------*/
diff --git a/tinyDAV/src/codecs/g722/g722_encode.c b/tinyDAV/src/codecs/g722/g722_encode.c
new file mode 100644
index 0000000..68758eb
--- /dev/null
+++ b/tinyDAV/src/codecs/g722/g722_encode.c
@@ -0,0 +1,426 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * g722_encode.c - The ITU G.722 codec, encode part.
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2005 Steve Underwood
+ *
+ * All rights reserved.
+ *
+ * Despite my general liking of the GPL, I place my own contributions
+ * to this code in the public domain for the benefit of all mankind -
+ * even the slimy ones who might try to proprietize my work and use it
+ * to my detriment.
+ *
+ * Based on a single channel 64kbps only G.722 codec which is:
+ *
+ ***** Copyright (c) CMU 1993 *****
+ * Computer Science, Speech Group
+ * Chengxiang Lu and Alex Hauptmann
+ *
+ * $Id: g722_encode.c,v 1.14 2006/07/07 16:37:49 steveu Exp $
+ *
+ * Modifications for WebRtc, 2011/04/28, by tlegrand:
+ * -Removed usage of inttypes.h and tgmath.h
+ * -Changed to use WebRtc types
+ * -Added option to run encoder bitexact with ITU-T reference implementation
+ */
+
+#include <stdio.h>
+#include <memory.h>
+#include <stdlib.h>
+
+#include "tinydav/codecs/g722/g722_enc_dec.h"
+
+#if !defined(FALSE)
+#define FALSE 0
+#endif
+#if !defined(TRUE)
+#define TRUE (!FALSE)
+#endif
+
+static __inline int16_t saturate(int32_t amp)
+{
+ int16_t amp16;
+
+ /* Hopefully this is optimised for the common case - not clipping */
+ amp16 = (int16_t) amp;
+ if (amp == amp16)
+ return amp16;
+ if (amp > TDAV_INT16_MAX)
+ return TDAV_INT16_MAX;
+ return TDAV_INT16_MIN;
+}
+/*- End of function --------------------------------------------------------*/
+
+static void block4(g722_encode_state_t *s, int band, int d)
+{
+ int wd1;
+ int wd2;
+ int wd3;
+ int i;
+
+ /* Block 4, RECONS */
+ s->band[band].d[0] = d;
+ s->band[band].r[0] = saturate(s->band[band].s + d);
+
+ /* Block 4, PARREC */
+ s->band[band].p[0] = saturate(s->band[band].sz + d);
+
+ /* Block 4, UPPOL2 */
+ for (i = 0; i < 3; i++)
+ s->band[band].sg[i] = s->band[band].p[i] >> 15;
+ wd1 = saturate(s->band[band].a[1] << 2);
+
+ wd2 = (s->band[band].sg[0] == s->band[band].sg[1]) ? -wd1 : wd1;
+ if (wd2 > 32767)
+ wd2 = 32767;
+ wd3 = (wd2 >> 7) + ((s->band[band].sg[0] == s->band[band].sg[2]) ? 128 : -128);
+ wd3 += (s->band[band].a[2]*32512) >> 15;
+ if (wd3 > 12288)
+ wd3 = 12288;
+ else if (wd3 < -12288)
+ wd3 = -12288;
+ s->band[band].ap[2] = wd3;
+
+ /* Block 4, UPPOL1 */
+ s->band[band].sg[0] = s->band[band].p[0] >> 15;
+ s->band[band].sg[1] = s->band[band].p[1] >> 15;
+ wd1 = (s->band[band].sg[0] == s->band[band].sg[1]) ? 192 : -192;
+ wd2 = (s->band[band].a[1]*32640) >> 15;
+
+ s->band[band].ap[1] = saturate(wd1 + wd2);
+ wd3 = saturate(15360 - s->band[band].ap[2]);
+ if (s->band[band].ap[1] > wd3)
+ s->band[band].ap[1] = wd3;
+ else if (s->band[band].ap[1] < -wd3)
+ s->band[band].ap[1] = -wd3;
+
+ /* Block 4, UPZERO */
+ wd1 = (d == 0) ? 0 : 128;
+ s->band[band].sg[0] = d >> 15;
+ for (i = 1; i < 7; i++)
+ {
+ s->band[band].sg[i] = s->band[band].d[i] >> 15;
+ wd2 = (s->band[band].sg[i] == s->band[band].sg[0]) ? wd1 : -wd1;
+ wd3 = (s->band[band].b[i]*32640) >> 15;
+ s->band[band].bp[i] = saturate(wd2 + wd3);
+ }
+
+ /* Block 4, DELAYA */
+ for (i = 6; i > 0; i--)
+ {
+ s->band[band].d[i] = s->band[band].d[i - 1];
+ s->band[band].b[i] = s->band[band].bp[i];
+ }
+
+ for (i = 2; i > 0; i--)
+ {
+ s->band[band].r[i] = s->band[band].r[i - 1];
+ s->band[band].p[i] = s->band[band].p[i - 1];
+ s->band[band].a[i] = s->band[band].ap[i];
+ }
+
+ /* Block 4, FILTEP */
+ wd1 = saturate(s->band[band].r[1] + s->band[band].r[1]);
+ wd1 = (s->band[band].a[1]*wd1) >> 15;
+ wd2 = saturate(s->band[band].r[2] + s->band[band].r[2]);
+ wd2 = (s->band[band].a[2]*wd2) >> 15;
+ s->band[band].sp = saturate(wd1 + wd2);
+
+ /* Block 4, FILTEZ */
+ s->band[band].sz = 0;
+ for (i = 6; i > 0; i--)
+ {
+ wd1 = saturate(s->band[band].d[i] + s->band[band].d[i]);
+ s->band[band].sz += (s->band[band].b[i]*wd1) >> 15;
+ }
+ s->band[band].sz = saturate(s->band[band].sz);
+
+ /* Block 4, PREDIC */
+ s->band[band].s = saturate(s->band[band].sp + s->band[band].sz);
+}
+/*- End of function --------------------------------------------------------*/
+
+g722_encode_state_t *g722_encode_init(g722_encode_state_t *s, int rate, int options)
+{
+ if (s == NULL)
+ {
+ if ((s = (g722_encode_state_t *) malloc(sizeof(*s))) == NULL)
+ return NULL;
+ }
+ memset(s, 0, sizeof(*s));
+ if (rate == 48000)
+ s->bits_per_sample = 6;
+ else if (rate == 56000)
+ s->bits_per_sample = 7;
+ else
+ s->bits_per_sample = 8;
+ if ((options & G722_SAMPLE_RATE_8000))
+ s->eight_k = TRUE;
+ if ((options & G722_PACKED) && s->bits_per_sample != 8)
+ s->packed = TRUE;
+ else
+ s->packed = FALSE;
+ s->band[0].det = 32;
+ s->band[1].det = 8;
+ return s;
+}
+/*- End of function --------------------------------------------------------*/
+
+int g722_encode_release(g722_encode_state_t *s)
+{
+ free(s);
+ return 0;
+}
+/*- End of function --------------------------------------------------------*/
+
+/* WebRtc, tlegrand:
+ * Only define the following if bit-exactness with reference implementation
+ * is needed. Will only have any effect if input signal is saturated.
+ */
+//#define RUN_LIKE_REFERENCE_G722
+#ifdef RUN_LIKE_REFERENCE_G722
+int16_t limitValues (int16_t rl)
+{
+
+ int16_t yl;
+
+ yl = (rl > 16383) ? 16383 : ((rl < -16384) ? -16384 : rl);
+
+ return (yl);
+}
+#endif
+
+int g722_encode(g722_encode_state_t *s, uint8_t g722_data[],
+ const int16_t amp[], int len)
+{
+ static const int q6[32] =
+ {
+ 0, 35, 72, 110, 150, 190, 233, 276,
+ 323, 370, 422, 473, 530, 587, 650, 714,
+ 786, 858, 940, 1023, 1121, 1219, 1339, 1458,
+ 1612, 1765, 1980, 2195, 2557, 2919, 0, 0
+ };
+ static const int iln[32] =
+ {
+ 0, 63, 62, 31, 30, 29, 28, 27,
+ 26, 25, 24, 23, 22, 21, 20, 19,
+ 18, 17, 16, 15, 14, 13, 12, 11,
+ 10, 9, 8, 7, 6, 5, 4, 0
+ };
+ static const int ilp[32] =
+ {
+ 0, 61, 60, 59, 58, 57, 56, 55,
+ 54, 53, 52, 51, 50, 49, 48, 47,
+ 46, 45, 44, 43, 42, 41, 40, 39,
+ 38, 37, 36, 35, 34, 33, 32, 0
+ };
+ static const int wl[8] =
+ {
+ -60, -30, 58, 172, 334, 538, 1198, 3042
+ };
+ static const int rl42[16] =
+ {
+ 0, 7, 6, 5, 4, 3, 2, 1, 7, 6, 5, 4, 3, 2, 1, 0
+ };
+ static const int ilb[32] =
+ {
+ 2048, 2093, 2139, 2186, 2233, 2282, 2332,
+ 2383, 2435, 2489, 2543, 2599, 2656, 2714,
+ 2774, 2834, 2896, 2960, 3025, 3091, 3158,
+ 3228, 3298, 3371, 3444, 3520, 3597, 3676,
+ 3756, 3838, 3922, 4008
+ };
+ static const int qm4[16] =
+ {
+ 0, -20456, -12896, -8968,
+ -6288, -4240, -2584, -1200,
+ 20456, 12896, 8968, 6288,
+ 4240, 2584, 1200, 0
+ };
+ static const int qm2[4] =
+ {
+ -7408, -1616, 7408, 1616
+ };
+ static const int qmf_coeffs[12] =
+ {
+ 3, -11, 12, 32, -210, 951, 3876, -805, 362, -156, 53, -11,
+ };
+ static const int ihn[3] = {0, 1, 0};
+ static const int ihp[3] = {0, 3, 2};
+ static const int wh[3] = {0, -214, 798};
+ static const int rh2[4] = {2, 1, 2, 1};
+
+ int dlow;
+ int dhigh;
+ int el;
+ int wd;
+ int wd1;
+ int ril;
+ int wd2;
+ int il4;
+ int ih2;
+ int wd3;
+ int eh;
+ int mih;
+ int i;
+ int j;
+ /* Low and high band PCM from the QMF */
+ int xlow;
+ int xhigh;
+ int g722_bytes;
+ /* Even and odd tap accumulators */
+ int sumeven;
+ int sumodd;
+ int ihigh;
+ int ilow;
+ int code;
+
+ g722_bytes = 0;
+ xhigh = 0;
+ for (j = 0; j < len; )
+ {
+ if (s->itu_test_mode)
+ {
+ xlow =
+ xhigh = amp[j++] >> 1;
+ }
+ else
+ {
+ if (s->eight_k)
+ {
+ /* We shift by 1 to allow for the 15 bit input to the G.722 algorithm. */
+ xlow = amp[j++] >> 1;
+ }
+ else
+ {
+ /* Apply the transmit QMF */
+ /* Shuffle the buffer down */
+ for (i = 0; i < 22; i++)
+ s->x[i] = s->x[i + 2];
+ s->x[22] = amp[j++];
+ s->x[23] = amp[j++];
+
+ /* Discard every other QMF output */
+ sumeven = 0;
+ sumodd = 0;
+ for (i = 0; i < 12; i++)
+ {
+ sumodd += s->x[2*i]*qmf_coeffs[i];
+ sumeven += s->x[2*i + 1]*qmf_coeffs[11 - i];
+ }
+ /* We shift by 12 to allow for the QMF filters (DC gain = 4096), plus 1
+ to allow for us summing two filters, plus 1 to allow for the 15 bit
+ input to the G.722 algorithm. */
+ xlow = (sumeven + sumodd) >> 14;
+ xhigh = (sumeven - sumodd) >> 14;
+
+#ifdef RUN_LIKE_REFERENCE_G722
+ /* The following lines are only used to verify bit-exactness
+ * with reference implementation of G.722. Higher precision
+ * is achieved without limiting the values.
+ */
+ xlow = limitValues(xlow);
+ xhigh = limitValues(xhigh);
+#endif
+ }
+ }
+ /* Block 1L, SUBTRA */
+ el = saturate(xlow - s->band[0].s);
+
+ /* Block 1L, QUANTL */
+ wd = (el >= 0) ? el : -(el + 1);
+
+ for (i = 1; i < 30; i++)
+ {
+ wd1 = (q6[i]*s->band[0].det) >> 12;
+ if (wd < wd1)
+ break;
+ }
+ ilow = (el < 0) ? iln[i] : ilp[i];
+
+ /* Block 2L, INVQAL */
+ ril = ilow >> 2;
+ wd2 = qm4[ril];
+ dlow = (s->band[0].det*wd2) >> 15;
+
+ /* Block 3L, LOGSCL */
+ il4 = rl42[ril];
+ wd = (s->band[0].nb*127) >> 7;
+ s->band[0].nb = wd + wl[il4];
+ if (s->band[0].nb < 0)
+ s->band[0].nb = 0;
+ else if (s->band[0].nb > 18432)
+ s->band[0].nb = 18432;
+
+ /* Block 3L, SCALEL */
+ wd1 = (s->band[0].nb >> 6) & 31;
+ wd2 = 8 - (s->band[0].nb >> 11);
+ wd3 = (wd2 < 0) ? (ilb[wd1] << -wd2) : (ilb[wd1] >> wd2);
+ s->band[0].det = wd3 << 2;
+
+ block4(s, 0, dlow);
+
+ if (s->eight_k)
+ {
+ /* Just leave the high bits as zero */
+ code = (0xC0 | ilow) >> (8 - s->bits_per_sample);
+ }
+ else
+ {
+ /* Block 1H, SUBTRA */
+ eh = saturate(xhigh - s->band[1].s);
+
+ /* Block 1H, QUANTH */
+ wd = (eh >= 0) ? eh : -(eh + 1);
+ wd1 = (564*s->band[1].det) >> 12;
+ mih = (wd >= wd1) ? 2 : 1;
+ ihigh = (eh < 0) ? ihn[mih] : ihp[mih];
+
+ /* Block 2H, INVQAH */
+ wd2 = qm2[ihigh];
+ dhigh = (s->band[1].det*wd2) >> 15;
+
+ /* Block 3H, LOGSCH */
+ ih2 = rh2[ihigh];
+ wd = (s->band[1].nb*127) >> 7;
+ s->band[1].nb = wd + wh[ih2];
+ if (s->band[1].nb < 0)
+ s->band[1].nb = 0;
+ else if (s->band[1].nb > 22528)
+ s->band[1].nb = 22528;
+
+ /* Block 3H, SCALEH */
+ wd1 = (s->band[1].nb >> 6) & 31;
+ wd2 = 10 - (s->band[1].nb >> 11);
+ wd3 = (wd2 < 0) ? (ilb[wd1] << -wd2) : (ilb[wd1] >> wd2);
+ s->band[1].det = wd3 << 2;
+
+ block4(s, 1, dhigh);
+ code = ((ihigh << 6) | ilow) >> (8 - s->bits_per_sample);
+ }
+
+ if (s->packed)
+ {
+ /* Pack the code bits */
+ s->out_buffer |= (code << s->out_bits);
+ s->out_bits += s->bits_per_sample;
+ if (s->out_bits >= 8)
+ {
+ g722_data[g722_bytes++] = (uint8_t) (s->out_buffer & 0xFF);
+ s->out_bits -= 8;
+ s->out_buffer >>= 8;
+ }
+ }
+ else
+ {
+ g722_data[g722_bytes++] = (uint8_t) code;
+ }
+ }
+ return g722_bytes;
+}
+/*- End of function --------------------------------------------------------*/
+/*- End of file ------------------------------------------------------------*/
diff --git a/tinyDAV/src/codecs/g722/tdav_codec_g722.c b/tinyDAV/src/codecs/g722/tdav_codec_g722.c
new file mode 100644
index 0000000..749fa04
--- /dev/null
+++ b/tinyDAV/src/codecs/g722/tdav_codec_g722.c
@@ -0,0 +1,219 @@
+/*
+* Copyright (C) 2011-2015 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_g722.c
+ * @brief G.722 codec plugins.
+ */
+#include "tinydav/codecs/g722/tdav_codec_g722.h"
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+typedef struct tdav_codec_g722_s
+{
+ TMEDIA_DECLARE_CODEC_AUDIO;
+
+ g722_encode_state_t *enc_state;
+ g722_decode_state_t *dec_state;
+}
+tdav_codec_g722_t;
+
+static int tdav_codec_g722_open(tmedia_codec_t* self)
+{
+ tdav_codec_g722_t* g722 = (tdav_codec_g722_t*)self;
+
+ if (!g722){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ // Initialize the decoder
+ if (!g722->dec_state){
+ if (!(g722->dec_state = tsk_calloc(1, sizeof(g722_decode_state_t)))){
+ TSK_DEBUG_ERROR("Failed to create G.722 decoder state");
+ return -2;
+ }
+ // Create and/or reset the G.722 decoder
+ // Bitrate 64 kbps and wideband mode (2)
+ if (!(g722->dec_state = g722_decode_init(g722->dec_state, 64000, 2))){
+ TSK_DEBUG_ERROR("g722_decode_init failed");
+ return -3;
+ }
+ }
+
+ // Initialize the encoder
+ if (!g722->enc_state){
+ if (!(g722->enc_state = tsk_calloc(1, sizeof(g722_encode_state_t)))){
+ TSK_DEBUG_ERROR("Failed to create G.722 encoder state");
+ return -4;
+ }
+ // Create and/or reset the G.722 encoder
+ // Bitrate 64 kbps and wideband mode (2)
+ if (!(g722->enc_state = g722_encode_init(g722->enc_state, 64000, 2))){
+ TSK_DEBUG_ERROR("g722_encode_init failed");
+ return -5;
+ }
+ }
+
+ return 0;
+}
+
+static int tdav_codec_g722_close(tmedia_codec_t* self)
+{
+ tdav_codec_g722_t* g722 = (tdav_codec_g722_t*)self;
+
+ (void)(g722);
+
+ /* resources will be freed by the dctor() */
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_g722_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tsk_size_t out_g722_size;
+ tdav_codec_g722_t* g722 = (tdav_codec_g722_t*)self;
+
+ if (!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ out_g722_size = in_size >> 2;
+
+ if (*out_max_size < out_g722_size){
+ if (!(*out_data = tsk_realloc(*out_data, out_g722_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_g722_size;
+ }
+
+ g722_encode(g722->enc_state, (uint8_t*)*out_data, (int16_t*)in_data, (int)in_size / sizeof(int16_t));
+
+ return out_g722_size;
+}
+
+static tsk_size_t tdav_codec_g722_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tdav_codec_g722_t* g722 = (tdav_codec_g722_t*)self;
+
+ if (!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* allocate new buffer */
+ if (*out_max_size < (in_size << 2)){
+ if (!(*out_data = tsk_realloc(*out_data, in_size << 2))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = in_size << 2;
+ }
+
+ g722_decode(g722->dec_state, (int16_t*)*out_data, (uint8_t*)in_data, (int)in_size);
+
+ return (in_size << 2);
+}
+
+static tsk_bool_t tdav_codec_g722_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ return tsk_true;
+}
+
+static char* tdav_codec_g722_sdp_att_get(const tmedia_codec_t* codec, const char* att_name)
+{
+ return tsk_null;
+}
+
+//
+// g722 Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_g722_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_g722_t *g722 = self;
+ if (g722){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_g722_dtor(tsk_object_t * self)
+{
+ tdav_codec_g722_t *g722 = self;
+ if (g722){
+ /* deinit base */
+ tmedia_codec_audio_deinit(g722);
+ /* deinit self */
+ if (g722->enc_state){
+ g722_encode_release(g722->enc_state), g722->enc_state = tsk_null;
+ }
+ if (g722->dec_state){
+ g722_decode_release(g722->dec_state), g722->dec_state = tsk_null;
+ }
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_g722_def_s =
+{
+ sizeof(tdav_codec_g722_t),
+ tdav_codec_g722_ctor,
+ tdav_codec_g722_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_g722_plugin_def_s =
+{
+ &tdav_codec_g722_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_g722,
+ "G722",
+ "g722 Codec (native)",
+ TMEDIA_CODEC_FORMAT_G722,
+ tsk_false,
+ 16000,
+
+ { /* audio */
+ 1, // channels
+ 0 // ptime @deprecated
+ },
+
+ /* video */
+ { 0 },
+
+ tsk_null, // set()
+ tdav_codec_g722_open,
+ tdav_codec_g722_close,
+ tdav_codec_g722_encode,
+ tdav_codec_g722_decode,
+ tdav_codec_g722_sdp_att_match,
+ tdav_codec_g722_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_g722_plugin_def_t = &tdav_codec_g722_plugin_def_s;
diff --git a/tinyDAV/src/codecs/g729/tdav_codec_g729.c b/tinyDAV/src/codecs/g729/tdav_codec_g729.c
new file mode 100644
index 0000000..8981687
--- /dev/null
+++ b/tinyDAV/src/codecs/g729/tdav_codec_g729.c
@@ -0,0 +1,466 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_g729.c
+ * @brief G729ab codec.
+ * Source from: http://www.itu.int/rec/T-REC-G.729-199610-S!AnnB/en
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ */
+#include "tinydav/codecs/g729/tdav_codec_g729.h"
+
+#if HAVE_G729
+
+#include "g729b/dtx.h"
+#include "g729b/octet.h"
+
+#include "tsk_string.h"
+#include "tsk_params.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "../thirdparties/win32/lib/g729b/g729b.a")
+#endif
+
+int16_t bad_lsf; /* bad LSF indicator */
+
+#ifndef G729_ENABLE_VAD
+# define G729_ENABLE_VAD 0 // FIXME: speexJB not prepared for such feature
+#endif
+
+static int16_t bin2int(int16_t no_of_bits, const int16_t *bitstream);
+static void int2bin(int16_t value, int16_t no_of_bits, int16_t *bitstream);
+
+static void unpack_G729(const uint8_t bitstream[], int16_t bits[], int len);
+static void unpack_SID(const uint8_t bitstream[], int16_t bits[]);
+
+static void pack_G729(const int16_t ituBits[], uint8_t bitstream[]);
+static void pack_SID(const int16_t ituBits[], uint8_t bitstream[]);
+
+/* ============ G.729ab Plugin interface ================= */
+
+#define tdav_codec_g729ab_set tsk_null
+
+static int tdav_codec_g729ab_open(tmedia_codec_t* self)
+{
+ tdav_codec_g729ab_t* g729a = (tdav_codec_g729ab_t*)self;
+
+ // Initialize the decoder
+ bad_lsf = 0;
+ g729a->decoder.synth = (g729a->decoder.synth_buf + M);
+
+ Init_Decod_ld8a();
+ Init_Post_Filter();
+ Init_Post_Process();
+ /* for G.729B */
+ Init_Dec_cng();
+
+ // Initialize the encoder
+ Init_Pre_Process();
+ Init_Coder_ld8a();
+ Set_zero(g729a->encoder.prm, PRM_SIZE + 1);
+ /* for G.729B */
+ Init_Cod_cng();
+
+
+ return 0;
+}
+
+static int tdav_codec_g729ab_close(tmedia_codec_t* self)
+{
+ tdav_codec_g729ab_t* g729a = (tdav_codec_g729ab_t*)self;
+
+ (void)(g729a);
+
+ /* resources will be freed by the dctor() */
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_g729ab_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tsk_size_t ex_size, out_size = 0;
+ tdav_codec_g729ab_t* g729a = (tdav_codec_g729ab_t*)self;
+ int i, frame_count = (in_size / 160);
+
+
+ if(!self || !in_data || !in_size || !out_data || (in_size % 160)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ ex_size = (frame_count * 10);
+
+ // allocate new buffer if needed
+ if(*out_max_size <ex_size){
+ if(!(*out_data = tsk_realloc(*out_data, ex_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = ex_size;
+ }
+
+ for(i=0; i<frame_count; i++){
+ extern int16_t *new_speech;
+
+ if(g729a->encoder.frame == 32767){
+ g729a->encoder.frame = 256;
+ }
+ else{
+ g729a->encoder.frame++;
+ }
+
+ memcpy(new_speech, &((uint8_t*)in_data)[i*L_FRAME*sizeof(int16_t)], sizeof(int16_t)*L_FRAME);
+
+ Pre_Process(new_speech, L_FRAME);
+ Coder_ld8a(g729a->encoder.prm, g729a->encoder.frame, g729a->encoder.vad_enable);
+ prm2bits_ld8k(g729a->encoder.prm, g729a->encoder.serial);
+
+ if(g729a->encoder.serial[1] == RATE_8000){
+ pack_G729(&g729a->encoder.serial[2], &((uint8_t*)(*out_data))[out_size]);
+ out_size += 10;
+ }
+ else if(g729a->encoder.serial[1] == RATE_SID_OCTET){
+ pack_SID(&g729a->encoder.serial[2], &((uint8_t*)(*out_data))[out_size]);
+ out_size += 2;
+ }
+ else{ // RATE_0
+ //TSK_DEBUG_INFO("G729_RATE_0 - Not transmitted");
+ if (!g729a->encoder.vad_enable) {
+ // silence
+ memset(&((uint8_t*)(*out_data))[out_size], 0, 10);
+ out_size += 10;
+ }
+ }
+ }
+
+ return out_size;
+}
+
+static tsk_size_t tdav_codec_g729ab_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tsk_size_t out_size = 0;
+ int i, frame_count;
+ const uint8_t* data_start = (const uint8_t*)in_data;
+ const uint8_t* data_end;
+ tdav_codec_g729ab_t* g729a = (tdav_codec_g729ab_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data || ((in_size % 10) && (in_size % 10 != 2))){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ data_end = (data_start + in_size);
+
+ frame_count = (in_size/10) + ((in_size % 10) ? 1 : 0);
+
+ out_size = 160*frame_count;
+
+ /* allocate new buffer if needed */
+ if(*out_max_size <out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+
+ for(i=0; i<frame_count; i++){
+ memset(g729a->decoder.synth_buf, 0, M);
+ g729a->decoder.synth = g729a->decoder.synth_buf + M;
+
+ if((data_end - data_start) == 2){
+ unpack_SID(data_start, g729a->decoder.serial);
+ data_start += 2;
+ }
+ else{
+ unpack_G729(data_start, g729a->decoder.serial, 10);
+ data_start += 10;
+ }
+
+ bits2prm_ld8k(&g729a->decoder.serial[1], g729a->decoder.parm);
+
+ /* This part was modified for version V1.3 */
+ /* for speech and SID frames, the hardware detects frame erasures
+ by checking if all bits are set to zero */
+ /* for untransmitted frames, the hardware detects frame erasures
+ by testing serial[0] */
+
+ g729a->decoder.parm[0] = 0; /* No frame erasure */
+ if(g729a->decoder.serial[1] != 0) {
+ int j;
+ for (j=0; j < g729a->decoder.serial[1]; j++){
+ if (g729a->decoder.serial[j+2] == 0){
+ g729a->decoder.parm[0] = 1; /* frame erased */
+ break;
+ }
+ }
+ }
+ else if(g729a->decoder.serial[0] != SYNC_WORD){
+ g729a->decoder.parm[0] = 1;
+ }
+ if(g729a->decoder.parm[1] == 1) {
+ /* check parity and put 1 in parm[5] if parity error */
+ g729a->decoder.parm[5] = Check_Parity_Pitch(g729a->decoder.parm[4], g729a->decoder.parm[5]);
+ }
+
+ Decod_ld8a(g729a->decoder.parm, g729a->decoder.synth, g729a->decoder.Az_dec, g729a->decoder.T2, &g729a->decoder.Vad);
+ Post_Filter(g729a->decoder.synth, g729a->decoder.Az_dec, g729a->decoder.T2, g729a->decoder.Vad); /* Post-filter */
+ Post_Process(g729a->decoder.synth, L_FRAME);
+
+ memcpy(&((uint8_t*)*out_data)[160*i], g729a->decoder.synth, 160);
+ }
+
+
+ return out_size;
+}
+
+static tsk_bool_t tdav_codec_g729ab_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ tdav_codec_g729ab_t* g729a = (tdav_codec_g729ab_t*)codec;
+
+ if(tsk_striequals(att_name, "fmtp")){
+ tsk_params_L_t* params = tsk_null;
+ const char* val_str;
+ if((params = tsk_params_fromstring(att_value, ";", tsk_true))){
+ if((val_str = tsk_params_get_param_value(params, "annexb"))){
+ g729a->encoder.vad_enable &= tsk_strequals(val_str, "yes") ? 1 : 0;
+ }
+ TSK_OBJECT_SAFE_FREE(params);
+ }
+ }
+ return tsk_true;
+}
+
+static char* tdav_codec_g729ab_sdp_att_get(const tmedia_codec_t* codec, const char* att_name)
+{
+ tdav_codec_g729ab_t* g729a = (tdav_codec_g729ab_t*)codec;
+
+ if(tsk_striequals(att_name, "fmtp")){
+ if(g729a->encoder.vad_enable){
+ return tsk_strdup("annexb=yes");
+ }
+ else{
+ return tsk_strdup("annexb=no");
+ }
+ }
+ return tsk_null;
+}
+
+
+
+
+
+
+/* ============ Internal functions ================= */
+
+
+/**
+* Converts from bitstream (ITU bits) to int16_t value
+* @param no_of_bits number of bits to read
+* @param bitstream array containing bits
+* @retval decimal value of bit pattern
+*/
+static int16_t bin2int(int16_t no_of_bits, const int16_t *bitstream)
+{
+ int16_t value, i;
+ int16_t bit;
+
+ value = 0;
+ for(i = 0; i < no_of_bits; i++){
+ value <<= 1;
+ bit = *bitstream++;
+ if (bit == BIT_1){
+ value += 1;
+ }
+ }
+ return(value);
+}
+
+/*----------------------------------------------------------------------------
+ * int2bin convert integer to binary and write the bits bitstream array
+ *----------------------------------------------------------------------------
+ */
+
+/**
+* Writes int16_t value to bitstream
+* @param value decimal value to write
+* @param no_of_bits number of bits from value to write
+* @param bitstream pointer to the destination stream (ITU bits)
+*/
+static void int2bin(int16_t value, int16_t no_of_bits, int16_t *bitstream)
+{
+ int16_t *pt_bitstream;
+ int16_t i, bit;
+
+ pt_bitstream = bitstream + no_of_bits;
+
+ for (i = 0; i < no_of_bits; i++){
+ bit = value & (int16_t)0x0001; /* get lsb */
+ if (bit == 0){
+ *--pt_bitstream = BIT_0;
+ }
+ else{
+ *--pt_bitstream = BIT_1;
+ }
+ value >>= 1;
+ }
+}
+
+/**
+* UnPack RTP bitstream as unpacked ITU stream
+* @param bitstream RTP bitstream to unpack
+* @param bits ITU bitstream used as destination (0 - BIT_0, 1 - BIT_1)
+* @param len length of the RTP bitstream
+*/
+static void unpack_G729(const uint8_t bitstream[], int16_t bits[], int len)
+{
+ int16_t i;
+ *bits++ = SYNC_WORD; /* bit[0], at receiver this bits indicates BFI */
+ switch(len){
+ case 10:
+ *bits++ = SIZE_WORD;
+ break;
+ case 8: // RATE_6400
+ case 15: //RATE_11800
+ default:
+ TSK_DEBUG_ERROR("%d is an invalid lenght value", len);
+ return;
+ }
+
+ for(i=0; i<len; i++){
+ int2bin(bitstream[i], 8, &bits[i*8]);
+ }
+}
+
+/**
+* UnPack RTP bitstream containing SID frame as unpacked ITU stream
+* @param bitstream RTP bitstream to unpack
+* @param bits ITU bitstream used as destination (0 - BIT_0, 1 - BIT_1)
+*/
+static void unpack_SID(const uint8_t bitstream[], int16_t bits[])
+{
+ *bits++ = SYNC_WORD;
+ *bits++ = RATE_SID_OCTET;
+ int2bin((int16_t)bitstream[0], 8, &bits[0]);
+ int2bin((int16_t)bitstream[1], 8, &bits[8]);
+}
+
+/**
+* Pack ITU bits into RTP stream
+* @param ituBits ITU stream to pack (80 shorts)
+* @param bitstream RTP bitstream (80 bits, 5 shorts, 10 bytes)
+*/
+static void pack_G729(const int16_t ituBits[], uint8_t bitstream[])
+{
+ int16_t word16, i;
+ for(i=0; i<5; i++){
+ word16 = bin2int(16, (int16_t*)&ituBits[i*16]);
+ bitstream[i*2] = word16>>8, bitstream[(i*2)+1] = (word16 & 0xFF);
+ }
+}
+
+/**
+* Pack ITU bits containing SID frame as RTP stream
+* @param ituBits ITU stream to pack
+* @param bitstream RTP bitstream (15 bits, 1 short, 2 bytes)
+*/
+static void pack_SID(const int16_t ituBits[], uint8_t bitstream[])
+{
+ int16_t word16 = bin2int(16, ituBits);
+ bitstream[0] = word16>>8, bitstream[1] = (word16 & 0xFF);
+}
+
+
+//
+// g729ab Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_g729ab_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_g729ab_t *g729a = self;
+ if(g729a){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ g729a->encoder.vad_enable = G729_ENABLE_VAD; // AnnexB
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_g729ab_dtor(tsk_object_t * self)
+{
+ tdav_codec_g729ab_t *g729a = self;
+ if(g729a){
+ /* deinit base */
+ tmedia_codec_audio_deinit(g729a);
+ /* deinit self */
+
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_g729ab_def_s =
+{
+ sizeof(tdav_codec_g729ab_t),
+ tdav_codec_g729ab_ctor,
+ tdav_codec_g729ab_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_g729ab_plugin_def_s =
+{
+ &tdav_codec_g729ab_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_g729ab,
+ "g729",
+ "g729ab Codec (libg729)",
+ TMEDIA_CODEC_FORMAT_G729,
+ tsk_false,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 0 // ptime @deprecated
+ },
+
+ /* video */
+ {0},
+
+ tdav_codec_g729ab_set,
+ tdav_codec_g729ab_open,
+ tdav_codec_g729ab_close,
+ tdav_codec_g729ab_encode,
+ tdav_codec_g729ab_decode,
+ tdav_codec_g729ab_sdp_att_match,
+ tdav_codec_g729ab_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_g729ab_plugin_def_t = &tdav_codec_g729ab_plugin_def_s;
+
+#endif /* HAVE_G729 */
diff --git a/tinyDAV/src/codecs/gsm/tdav_codec_gsm.c b/tinyDAV/src/codecs/gsm/tdav_codec_gsm.c
new file mode 100644
index 0000000..8b5f1bc
--- /dev/null
+++ b/tinyDAV/src/codecs/gsm/tdav_codec_gsm.c
@@ -0,0 +1,209 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_gsm.c
+ * @brief GSM Full Rate Codec (Based on libgsm)
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/gsm/tdav_codec_gsm.h"
+
+#if HAVE_LIBGSM
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_GSM_FRAME_SIZE 33
+
+/* ============ GSM Plugin interface ================= */
+
+#define tdav_codec_gsm_sdp_att_get tsk_null
+
+int tdav_codec_gsm_open(tmedia_codec_t* self)
+{
+ tdav_codec_gsm_t* gsm = (tdav_codec_gsm_t*)self;
+
+ if(!gsm->encoder && !(gsm->encoder = gsm_create())){
+ TSK_DEBUG_ERROR("Failed to create GSM encoder");
+ return -2;
+ }
+ if(!gsm->decoder && !(gsm->decoder = gsm_create())){
+ TSK_DEBUG_ERROR("Failed to create GSM decoder");
+ return -3;
+ }
+
+ return 0;
+}
+
+int tdav_codec_gsm_close(tmedia_codec_t* self)
+{
+ tdav_codec_gsm_t* gsm = (tdav_codec_gsm_t*)self;
+
+ if(gsm->encoder){
+ gsm_destroy(gsm->encoder);
+ gsm->encoder = tsk_null;
+ }
+ if(gsm->decoder){
+ gsm_destroy(gsm->decoder);
+ gsm->decoder = tsk_null;
+ }
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_gsm_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ tsk_size_t out_size;
+ tdav_codec_gsm_t* gsm = (tdav_codec_gsm_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ out_size = ((in_size / (TMEDIA_CODEC_PCM_FRAME_SIZE_AUDIO_ENCODING(self) * sizeof(short))) * TDAV_GSM_FRAME_SIZE);
+
+ /* allocate new buffer if needed */
+ if(*out_max_size <out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+
+ gsm_encode(gsm->encoder, (gsm_signal*)in_data, (gsm_byte*)*out_data);
+
+ return out_size;
+}
+
+tsk_size_t tdav_codec_gsm_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tsk_size_t out_size;
+ int ret;
+ tdav_codec_gsm_t* gsm = (tdav_codec_gsm_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data || (in_size % TDAV_GSM_FRAME_SIZE)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ out_size = (in_size / TDAV_GSM_FRAME_SIZE) * (TMEDIA_CODEC_PCM_FRAME_SIZE_AUDIO_DECODING(self) * sizeof(short));
+
+ /* allocate new buffer if needed */
+ if(*out_max_size <out_size){
+ if(!(*out_data = tsk_realloc(*out_data, out_size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = out_size;
+ }
+
+ ret = gsm_decode(gsm->decoder, (gsm_byte*)in_data, (gsm_signal*)*out_data);
+
+ return out_size;
+}
+
+tsk_bool_t tdav_codec_gsm_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{ /* always match */
+ return tsk_true;
+}
+
+
+//
+// GSM Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_gsm_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_gsm_t *gsm = self;
+ if(gsm){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_gsm_dtor(tsk_object_t * self)
+{
+ tdav_codec_gsm_t *gsm = self;
+ if(gsm){
+ /* deinit base */
+ tmedia_codec_audio_deinit(gsm);
+ /* deinit self */
+ if(gsm->encoder){
+ gsm_destroy(gsm->encoder);
+ }
+ if(gsm->decoder){
+ gsm_destroy(gsm->decoder);
+ }
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_gsm_def_s =
+{
+ sizeof(tdav_codec_gsm_t),
+ tdav_codec_gsm_ctor,
+ tdav_codec_gsm_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_gsm_plugin_def_s =
+{
+ &tdav_codec_gsm_def_s,
+
+ tmedia_audio,
+ tmedia_codec_id_gsm,
+ "GSM",
+ "GSM Full Rate (libgsm)",
+ TMEDIA_CODEC_FORMAT_GSM,
+ tsk_false,
+ 8000, // rate
+
+ { /* audio */
+ 1, // channels
+ 0 // ptime @deprecated
+ },
+
+ /* video */
+ {0},
+
+ tsk_null, // set()
+ tdav_codec_gsm_open,
+ tdav_codec_gsm_close,
+ tdav_codec_gsm_encode,
+ tdav_codec_gsm_decode,
+ tdav_codec_gsm_sdp_att_match,
+ tdav_codec_gsm_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_gsm_plugin_def_t = &tdav_codec_gsm_plugin_def_s;
+
+
+#endif /* HAVE_LIBGSM */
diff --git a/tinyDAV/src/codecs/h261/tdav_codec_h261.c b/tinyDAV/src/codecs/h261/tdav_codec_h261.c
new file mode 100644
index 0000000..27aaab7
--- /dev/null
+++ b/tinyDAV/src/codecs/h261/tdav_codec_h261.c
@@ -0,0 +1,536 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_h261.c
+ * @brief H.261 codec plugin.
+ * RTP payloader follows RFC 4587
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/h261/tdav_codec_h261.h"
+
+#if HAVE_FFMPEG
+
+#include "tinydav/video/tdav_converter_video.h"
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tnet_endianness.h"
+
+#include "tsk_string.h"
+#include "tsk_time.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define RTP_PAYLOAD_SIZE 700
+#define H261_HEADER_SIZE 4
+
+static void *run(void* self);
+static void tdav_codec_h261_rtp_callback(tdav_codec_h261_t *self, const void *data, tsk_size_t size, tsk_bool_t marker);
+static void tdav_codec_h261_encap(const tdav_codec_h261_t* h261, const uint8_t* pdata, tsk_size_t size);
+
+/* ============ H.261 Plugin interface ================= */
+
+//
+// H.261 object definition
+//
+int tdav_codec_h261_open(tmedia_codec_t* self)
+{
+ int ret;
+ int size;
+
+ tdav_codec_h261_t* h261 = (tdav_codec_h261_t*)self;
+
+ if(!h261){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is not opened */
+
+ //
+ // Encoder
+ //
+ if(!(h261->encoder.codec = avcodec_find_encoder(CODEC_ID_H261))){
+ TSK_DEBUG_ERROR("Failed to find H.261 encoder");
+ return -2;
+ }
+ h261->encoder.context = avcodec_alloc_context();
+ avcodec_get_context_defaults(h261->encoder.context);
+
+ h261->encoder.context->pix_fmt = PIX_FMT_YUV420P;
+ h261->encoder.context->time_base.num = 1;
+ h261->encoder.context->time_base.den = TMEDIA_CODEC_VIDEO(h261)->out.fps;
+ h261->encoder.context->width = TMEDIA_CODEC_VIDEO(h261)->out.width;
+ h261->encoder.context->height = TMEDIA_CODEC_VIDEO(h261)->out.height;
+
+ /*h261->encoder.context->mb_qmin =*/ h261->encoder.context->qmin = 4;
+ /*h261->encoder.context->mb_qmax =*/ h261->encoder.context->qmax = 31;
+ h261->encoder.context->mb_decision = FF_MB_DECISION_SIMPLE;
+
+ h261->encoder.context->thread_count = 1;
+ h261->encoder.context->rtp_payload_size = RTP_PAYLOAD_SIZE;
+ h261->encoder.context->opaque = tsk_null;
+ h261->encoder.context->bit_rate = (float) (500000) * 0.80f;
+ h261->encoder.context->bit_rate_tolerance = (int) (500000 * 0.20f);
+ h261->encoder.context->gop_size = TMEDIA_CODEC_VIDEO(h261)->out.fps*4; /* each 4 seconds */
+
+ // Picture (YUV 420)
+ if(!(h261->encoder.picture = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create encoder picture");
+ return -2;
+ }
+ avcodec_get_frame_defaults(h261->encoder.picture);
+ //if((ret = avpicture_alloc((AVPicture*)h261->encoder.picture, PIX_FMT_YUV420P, h261->encoder.context->width, h261->encoder.context->height))){
+ // TSK_DEBUG_ERROR("Failed to allocate encoder picture");
+ // return ret;
+ //}
+
+ size = avpicture_get_size(PIX_FMT_YUV420P, h261->encoder.context->width, h261->encoder.context->height);
+ if(!(h261->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocate encoder buffer");
+ return -2;
+ }
+
+ // Open encoder
+ if((ret = avcodec_open(h261->encoder.context, h261->encoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open H.261 encoder");
+ return ret;
+ }
+
+ //
+ // Decoder
+ //
+ if(!(h261->decoder.codec = avcodec_find_decoder(CODEC_ID_H261))){
+ TSK_DEBUG_ERROR("Failed to find H.261 decoder");
+ }
+ h261->decoder.context = avcodec_alloc_context();
+ avcodec_get_context_defaults(h261->decoder.context);
+
+ h261->decoder.context->pix_fmt = PIX_FMT_YUV420P;
+ h261->decoder.context->width = TMEDIA_CODEC_VIDEO(h261)->in.width;
+ h261->decoder.context->height = TMEDIA_CODEC_VIDEO(h261)->in.height;
+
+ // Picture (YUV 420)
+ if(!(h261->decoder.picture = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create decoder picture");
+ return -2;
+ }
+ avcodec_get_frame_defaults(h261->decoder.picture);
+
+ size = avpicture_get_size(PIX_FMT_YUV420P, h261->decoder.context->width, h261->decoder.context->height);
+ if(!(h261->decoder.accumulator = tsk_calloc((size + FF_INPUT_BUFFER_PADDING_SIZE), sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocate decoder buffer");
+ return -2;
+ }
+
+ // Open decoder
+ if((ret = avcodec_open(h261->decoder.context, h261->decoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open H.261 decoder");
+ return ret;
+ }
+
+ return 0;
+}
+
+int tdav_codec_h261_close(tmedia_codec_t* self)
+{
+ tdav_codec_h261_t* h261 = (tdav_codec_h261_t*)self;
+
+ if(!h261){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is opened */
+
+ //
+ // Encoder
+ //
+ if(h261->encoder.context){
+ avcodec_close(h261->encoder.context);
+ av_free(h261->encoder.context);
+ h261->encoder.context = tsk_null;
+ }
+ if(h261->encoder.picture){
+ av_free(h261->encoder.picture);
+ }
+ if(h261->encoder.buffer){
+ TSK_FREE(h261->encoder.buffer);
+ }
+
+ //
+ // Decoder
+ //
+ if(h261->decoder.context){
+ avcodec_close(h261->decoder.context);
+ av_free(h261->decoder.context);
+ h261->decoder.context = tsk_null;
+ }
+ if(h261->decoder.picture){
+ av_free(h261->decoder.picture);
+ h261->decoder.picture = tsk_null;
+ }
+ if(h261->decoder.accumulator){
+ TSK_FREE(h261->decoder.accumulator);
+ h261->decoder.accumulator_pos = 0;
+ }
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_h261_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ int ret;
+ int size;
+
+ tdav_codec_h261_t* h261 = (tdav_codec_h261_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ // delete old buffer
+ if(*out_data){
+ TSK_FREE(*out_data);
+ }
+
+ // wrap yuv420 buffer
+ size = avpicture_fill((AVPicture *)h261->encoder.picture, (uint8_t*)in_data, PIX_FMT_YUV420P, h261->encoder.context->width, h261->encoder.context->height);
+ if(size != in_size){
+ /* guard */
+ TSK_DEBUG_ERROR("Invalid size");
+ return 0;
+ }
+
+ // Encode data
+ h261->encoder.picture->pts = AV_NOPTS_VALUE;
+ //h261->encoder.picture->pict_type = FF_I_TYPE;
+ ret = avcodec_encode_video(h261->encoder.context, h261->encoder.buffer, size, h261->encoder.picture);
+ if(ret > 0){
+ tdav_codec_h261_encap(h261, h261->encoder.buffer, (tsk_size_t)ret);
+ }
+
+ return 0;
+}
+
+tsk_size_t tdav_codec_h261_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ uint8_t sbit, ebit;
+ const uint8_t* pdata = in_data;
+ const uint8_t* pay_ptr;
+ tsk_size_t pay_size;
+ tsk_size_t xsize, retsize = 0;
+ int got_picture_ptr;
+ int ret;
+
+ tdav_codec_h261_t* h261 = (tdav_codec_h261_t*)self;
+ const trtp_rtp_header_t* rtp_hdr = proto_hdr;
+
+ if(!self || !in_data || !in_size || !out_data || !h261->decoder.context){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* RFC 4587
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |SBIT |EBIT |I|V| GOBN | MBAP | QUANT | HMVD | VMVD |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ sbit = *pdata >> 5;
+ ebit = (*pdata >> 2) & 0x07;
+
+ /* Check size */
+ if(in_size < H261_HEADER_SIZE){
+ TSK_DEBUG_ERROR("Too short");
+ return 0;
+ }
+
+ pay_ptr = (pdata + H261_HEADER_SIZE);
+ pay_size = (in_size - H261_HEADER_SIZE);
+ xsize = avpicture_get_size(h261->decoder.context->pix_fmt, h261->decoder.context->width, h261->decoder.context->height);
+
+ /* Packet lost? */
+ if(h261->decoder.last_seq != (rtp_hdr->seq_num - 1) && h261->decoder.last_seq){
+ TSK_DEBUG_INFO("Packet lost");
+ }
+ h261->decoder.last_seq = rtp_hdr->seq_num;
+
+ if((int)(h261->decoder.accumulator_pos + pay_size) <= xsize){
+
+ if((h261->decoder.ebit + sbit) == 8){ /* Perfect one Byte to clean up */
+ if(h261->decoder.accumulator_pos){
+ ((uint8_t*)h261->decoder.accumulator)[h261->decoder.accumulator_pos-1] =
+ (((uint8_t*)h261->decoder.accumulator)[h261->decoder.accumulator_pos-1] & (0xFF << h261->decoder.ebit)) |
+ (*pay_ptr << sbit);
+ }
+ pay_ptr++, pay_size--;
+ }
+ h261->decoder.ebit = ebit;
+
+ memcpy(&((uint8_t*)h261->decoder.accumulator)[h261->decoder.accumulator_pos], pay_ptr, pay_size);
+ h261->decoder.accumulator_pos += pay_size;
+ }
+ else{
+ TSK_DEBUG_WARN("Buffer overflow");
+ h261->decoder.accumulator_pos = 0;
+ return 0;
+ }
+
+ if(rtp_hdr->marker){
+ AVPacket packet;
+ /* allocate destination buffer */
+ if(*out_max_size <xsize){
+ if(!(*out_data = tsk_realloc(*out_data, xsize))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ h261->decoder.accumulator_pos = 0;
+ return 0;
+ }
+ *out_max_size = xsize;
+ }
+
+ /* decode the picture */
+ av_init_packet(&packet);
+ packet.size = (int)h261->decoder.accumulator_pos;
+ packet.data = h261->decoder.accumulator;
+ ret = avcodec_decode_video2(h261->decoder.context, h261->decoder.picture, &got_picture_ptr, &packet);
+
+ if(ret <0 || !got_picture_ptr){
+ TSK_DEBUG_WARN("Failed to decode the buffer");
+ }
+ else{
+ retsize = xsize;
+ TMEDIA_CODEC_VIDEO(h261)->in.width = h261->decoder.context->width;
+ TMEDIA_CODEC_VIDEO(h261)->in.height = h261->decoder.context->height;
+ /* copy picture into a linear buffer */
+ avpicture_layout((AVPicture *)h261->decoder.picture, h261->decoder.context->pix_fmt, (int)h261->decoder.context->width, (int)h261->decoder.context->height,
+ *out_data, (int)retsize);
+ }
+ /* in all cases: reset accumulator */
+ h261->decoder.accumulator_pos = 0;
+ }
+
+ return retsize;
+}
+
+tsk_bool_t tdav_codec_h261_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ int ret;
+ unsigned maxbr, fps, width, height;
+ tmedia_codec_video_t* h261 = (tmedia_codec_video_t*)codec;
+
+ if(tsk_striequals(att_value, "fmtp")){
+ if(!(ret = tmedia_codec_parse_fmtp(att_value, &maxbr, &fps, &width, &height))){
+ h261->in.max_br = h261->out.max_br = maxbr * 1000;
+ h261->in.fps = h261->out.fps = fps;
+ h261->in.width = h261->out.width = width;
+ h261->in.height = h261->out.height = height;
+ return tsk_true;
+ }
+ else{
+ TSK_DEBUG_WARN("Failed to match fmtp [%s]", att_value);
+ }
+ }
+ return tsk_false;
+}
+
+char* tdav_codec_h261_sdp_att_get(const tmedia_codec_t* self, const char* att_name)
+{
+#if 0
+ return tsk_strdup("CIF=2/MaxBR=3840;QCIF=2/MaxBR=1920");
+#else
+ return tsk_strdup("QCIF=2");
+#endif
+}
+
+/* constructor */
+static tsk_object_t* tdav_codec_h261_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h261_t *h261 = self;
+ if(h261){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h261_dtor(tsk_object_t * self)
+{
+ tdav_codec_h261_t *h261 = self;
+ if(h261){
+ /* deinit base */
+ tmedia_codec_video_deinit(h261); // will call close()
+ /* deinit self */
+ TSK_FREE(h261->rtp.ptr);
+ h261->rtp.size = 0;
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h261_def_s =
+{
+ sizeof(tdav_codec_h261_t),
+ tdav_codec_h261_ctor,
+ tdav_codec_h261_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h261_plugin_def_s =
+{
+ &tdav_codec_h261_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_h261,
+ "H261",
+ "H261 codec (FFmpeg)",
+ TMEDIA_CODEC_FORMAT_H261,
+ tsk_false,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video */
+ {176, 144, 15},
+
+ tsk_null, // set()
+ tdav_codec_h261_open,
+ tdav_codec_h261_close,
+ tdav_codec_h261_encode,
+ tdav_codec_h261_decode,
+ tdav_codec_h261_sdp_att_match,
+ tdav_codec_h261_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_h261_plugin_def_t = &tdav_codec_h261_plugin_def_s;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/* ============ Callbacks ================= */
+
+static void tdav_codec_h261_encap(const tdav_codec_h261_t* h261, const uint8_t* pdata, tsk_size_t size)
+{
+ uint32_t i, last_index = 0;
+
+ if(size < RTP_PAYLOAD_SIZE){
+ goto last;
+ }
+
+ for(i = 4; i<(size - 4); i++){
+ if(pdata[i] == 0x00 && pdata[i+1] == 0x00 && pdata[i+2]>=0x80){ /* PSC or (GBSC) found */
+ if((i - last_index) >= RTP_PAYLOAD_SIZE){
+ tdav_codec_h261_rtp_callback((tdav_codec_h261_t*)h261, pdata+last_index,
+ (i - last_index), (last_index == size));
+ }
+ last_index = i;
+ }
+ }
+last:
+ if(last_index < size - 3/*PSC/GBSC size*/){
+ tdav_codec_h261_rtp_callback((tdav_codec_h261_t*)h261, pdata + last_index,
+ (size - last_index), tsk_true);
+ }
+}
+
+//static void *run(void* self)
+//{
+// uint32_t i, last_index;
+// tsk_list_item_t *curr;
+//
+// const uint8_t* pdata;
+// tsk_size_t size;
+//
+// const tdav_codec_h261_t* h261 = ((tdav_runnable_video_t*)self)->userdata;
+//
+// TSK_DEBUG_INFO("H261 thread === START");
+//
+// TSK_RUNNABLE_RUN_BEGIN(self);
+//
+// if((curr = TSK_RUNNABLE_POP_FIRST(self))){
+// /* 4 is sizeof(uint32_t) */
+// pdata = ((const tsk_buffer_t*)curr->data)->data;
+// size = ((const tsk_buffer_t*)curr->data)->size;
+// last_index = 0;
+//
+// if(size < RTP_PAYLOAD_SIZE){
+// goto last;
+// }
+//
+// for(i = 4; i<(size - 4); i++){
+// if(pdata[i] == 0x00 && pdata[i+1] == 0x00 && pdata[i+2]>=0x80){ /* PSC or (GBSC) found */
+// if((i - last_index) >= RTP_PAYLOAD_SIZE){
+// tdav_codec_h261_rtp_callback((tdav_codec_h261_t*)h261, pdata+last_index,
+// (i - last_index), (last_index == size));
+// }
+// last_index = i;
+// }
+// }
+//last:
+// if(last_index < size - 3/*PSC/GBSC size*/){
+// tdav_codec_h261_rtp_callback((tdav_codec_h261_t*)h261, pdata + last_index,
+// (size - last_index), tsk_true);
+// }
+//
+// tsk_object_unref(curr);
+// }
+//
+// TSK_RUNNABLE_RUN_END(self);
+//
+// TSK_DEBUG_INFO("H261 thread === STOP");
+//
+// return tsk_null;
+//}
+
+static void tdav_codec_h261_rtp_callback(tdav_codec_h261_t *self, const void *data, tsk_size_t size, tsk_bool_t marker)
+{
+
+}
+
+tsk_bool_t tdav_codec_ffmpeg_h261_is_supported()
+{
+ return /*(avcodec_find_encoder(CODEC_ID_H261) && avcodec_find_decoder(CODEC_ID_H261))*/tsk_false /* @deprecated */;
+}
+
+
+#endif /* HAVE_FFMPEG */ \ No newline at end of file
diff --git a/tinyDAV/src/codecs/h263/tdav_codec_h263.c b/tinyDAV/src/codecs/h263/tdav_codec_h263.c
new file mode 100644
index 0000000..ed5d77f
--- /dev/null
+++ b/tinyDAV/src/codecs/h263/tdav_codec_h263.c
@@ -0,0 +1,1373 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_h263.c
+ * @brief H.263-1996 and H.263-1998 codec plugins.
+ * RTP payloader follows RFC 4629 for H263+ and RFC 2190 for H263.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/h263/tdav_codec_h263.h"
+
+#if HAVE_FFMPEG
+
+#include "tinydav/video/tdav_converter_video.h"
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tnet_endianness.h"
+
+#include "tinymedia/tmedia_params.h"
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tsk_string.h"
+#include "tsk_time.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <libavcodec/avcodec.h>
+
+#define TDAV_H263_GOP_SIZE_IN_SECONDS 25
+#define RTP_PAYLOAD_SIZE 750
+
+#define H263P_HEADER_SIZE 2
+#define H263_HEADER_MODE_A_SIZE 4
+#define H263_HEADER_MODE_B_SIZE 8
+#define H263_HEADER_MODE_C_SIZE 12
+
+#define tdav_codec_h263p_set tdav_codec_h263_set
+#define tdav_codec_h263p_open tdav_codec_h263_open
+#define tdav_codec_h263p_close tdav_codec_h263_close
+#define tdav_codec_h263p_encode tdav_codec_h263_encode
+#define tdav_codec_h263p_sdp_att_match tdav_codec_h263_sdp_att_match
+#define tdav_codec_h263p_sdp_att_get tdav_codec_h263_sdp_att_get
+
+#define tdav_codec_h263pp_set tdav_codec_h263_set
+#define tdav_codec_h263pp_open tdav_codec_h263_open
+#define tdav_codec_h263pp_close tdav_codec_h263_close
+#define tdav_codec_h263pp_encode tdav_codec_h263_encode
+#define tdav_codec_h263pp_decode tdav_codec_h263_decode
+#define tdav_codec_h263pp_sdp_att_match tdav_codec_h263_sdp_att_match
+#define tdav_codec_h263pp_sdp_att_get tdav_codec_h263_sdp_att_get
+
+#define TDAV_CODEC_H263(self) ((tdav_codec_h263_t*)(self))
+
+typedef enum tdav_codec_h263_type_e
+{
+ tdav_codec_h263_1996,
+ tdav_codec_h263_1998,
+ tdav_codec_h263_2000,
+}
+tdav_codec_h263_type_t;
+
+/** H.263-1996 codec */
+typedef struct tdav_codec_h263_s
+{
+ TMEDIA_DECLARE_CODEC_VIDEO;
+
+ tdav_codec_h263_type_t type;
+
+ struct{
+ uint8_t* ptr;
+ tsk_size_t size;
+ } rtp;
+
+ // Encoder
+ struct{
+ AVCodec* codec;
+ AVCodecContext* context;
+ AVFrame* picture;
+ void* buffer;
+ tsk_bool_t force_idr;
+ int32_t quality; // [1-31]
+ int32_t max_bw_kpbs;
+ } encoder;
+
+ // decoder
+ struct{
+ AVCodec* codec;
+ AVCodecContext* context;
+ AVFrame* picture;
+
+ void* accumulator;
+ uint8_t ebit;
+ tsk_size_t accumulator_pos;
+ uint16_t last_seq;
+ } decoder;
+}
+tdav_codec_h263_t;
+
+#define TDAV_DECLARE_CODEC_H263 tdav_codec_h263_t __codec_h263__
+
+static int tdav_codec_h263_init(tdav_codec_h263_t* self, tdav_codec_h263_type_t type, enum CodecID encoder, enum CodecID decoder);
+static int tdav_codec_h263_deinit(tdav_codec_h263_t* self);
+static int tdav_codec_h263_open_encoder(tdav_codec_h263_t* self);
+static int tdav_codec_h263_open_decoder(tdav_codec_h263_t* self);
+static int tdav_codec_h263_close_encoder(tdav_codec_h263_t* self);
+static int tdav_codec_h263_close_decoder(tdav_codec_h263_t* self);
+
+/** H.263-1998 codec */
+typedef struct tdav_codec_h263p_s
+{
+ TDAV_DECLARE_CODEC_H263;
+}
+tdav_codec_h263p_t;
+
+/** H.263-2000 codec */
+typedef struct tdav_codec_h263pp_s
+{
+ TDAV_DECLARE_CODEC_H263;
+}
+tdav_codec_h263pp_t;
+
+
+static void tdav_codec_h263_rtp_callback(tdav_codec_h263_t *self, const void *data, tsk_size_t size, tsk_bool_t marker);
+static void tdav_codec_h263p_rtp_callback(tdav_codec_h263_t *self, const void *data, tsk_size_t size, tsk_bool_t frag, tsk_bool_t marker);
+
+static void tdav_codec_h263_encap(const tdav_codec_h263_t* h263, const uint8_t* pdata, tsk_size_t size);
+
+
+/* ============ Common To all H263 codecs ================= */
+
+static int tdav_codec_h263_set(tmedia_codec_t* self, const tmedia_param_t* param)
+{
+ tdav_codec_h263_t* h263 = (tdav_codec_h263_t*)self;
+ if(!self->opened){
+ TSK_DEBUG_ERROR("Codec not opened");
+ return -1;
+ }
+ if(param->value_type == tmedia_pvt_int32){
+ if(tsk_striequals(param->key, "action")){
+ tmedia_codec_action_t action = (tmedia_codec_action_t)TSK_TO_INT32((uint8_t*)param->value);
+ switch(action){
+ case tmedia_codec_action_encode_idr:
+ {
+ h263->encoder.force_idr = tsk_true;
+ break;
+ }
+ case tmedia_codec_action_bw_down:
+ {
+ h263->encoder.quality = TSK_CLAMP(1, (h263->encoder.quality + 1), 31);
+ h263->encoder.context->global_quality = FF_QP2LAMBDA * h263->encoder.quality;
+ break;
+ }
+ case tmedia_codec_action_bw_up:
+ {
+ h263->encoder.quality = TSK_CLAMP(1, (h263->encoder.quality - 1), 31);
+ h263->encoder.context->global_quality = FF_QP2LAMBDA * h263->encoder.quality;
+ break;
+ }
+ }
+ return 0;
+ }
+ }
+ return -1;
+}
+
+int tdav_codec_h263_init(tdav_codec_h263_t* self, tdav_codec_h263_type_t type, enum CodecID encoder, enum CodecID decoder)
+{
+ int ret = 0;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ self->type = type;
+ self->encoder.quality = 1;
+
+ if(!(self->encoder.codec = avcodec_find_encoder(encoder))){
+ TSK_DEBUG_ERROR("Failed to find [%d]encoder", encoder);
+ ret = -2;
+ }
+
+ if(!(self->decoder.codec = avcodec_find_decoder(decoder))){
+ TSK_DEBUG_ERROR("Failed to find [%d] decoder", decoder);
+ ret = -3;
+ }
+
+ self->encoder.max_bw_kpbs = tmedia_defaults_get_bandwidth_video_upload_max();
+
+ /* allocations MUST be done by open() */
+ return ret;
+}
+
+int tdav_codec_h263_deinit(tdav_codec_h263_t* self)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ self->encoder.codec = tsk_null;
+ self->decoder.codec = tsk_null;
+
+ // FFMpeg resources are destroyed by close()
+
+
+
+ TSK_FREE(self->rtp.ptr);
+ self->rtp.size = 0;
+
+ return 0;
+}
+
+
+
+/* ============ H.263-1996 Plugin interface ================= */
+
+//
+// H.263-1996 object definition
+//
+static int tdav_codec_h263_open(tmedia_codec_t* self)
+{
+ int ret;
+
+ tdav_codec_h263_t* h263 = (tdav_codec_h263_t*)self;
+
+ if(!h263){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is not opened */
+
+ // Encoder
+ if((ret = tdav_codec_h263_open_encoder(h263))){
+ return ret;
+ }
+
+ // Decoder
+ if((ret = tdav_codec_h263_open_decoder(h263))){
+ return ret;
+ }
+
+ return ret;
+}
+
+static int tdav_codec_h263_close(tmedia_codec_t* self)
+{
+ tdav_codec_h263_t* h263 = (tdav_codec_h263_t*)self;
+ int ret;
+
+ if(!h263){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is opened */
+
+ // Encoder
+ ret = tdav_codec_h263_close_encoder(h263);
+ // Decoder
+ ret = tdav_codec_h263_close_decoder(h263);
+
+ return ret;
+}
+
+static tsk_size_t tdav_codec_h263_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ int ret;
+ int size;
+
+ tdav_codec_h263_t* h263 = (tdav_codec_h263_t*)self;
+
+ if(!self || !in_data || !in_size || !out_data){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ // wrap yuv420 buffer
+ size = avpicture_fill((AVPicture *)h263->encoder.picture, (uint8_t*)in_data, PIX_FMT_YUV420P, h263->encoder.context->width, h263->encoder.context->height);
+ if(size != in_size){
+ /* guard */
+ TSK_DEBUG_ERROR("Invalid size");
+ return 0;
+ }
+#if LIBAVCODEC_VERSION_MAJOR <= 53
+ h263->encoder.picture->pict_type = h263->encoder.force_idr ? FF_I_TYPE : 0;
+#else
+ h263->encoder.picture->pict_type = h263->encoder.force_idr ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_NONE;
+#endif
+ h263->encoder.picture->pts = AV_NOPTS_VALUE;
+ h263->encoder.picture->quality = h263->encoder.context->global_quality;
+ ret = avcodec_encode_video(h263->encoder.context, h263->encoder.buffer, size, h263->encoder.picture);
+ if(ret > 0){
+ tdav_codec_h263_encap(h263, h263->encoder.buffer, (tsk_size_t)ret);
+ }
+ h263->encoder.force_idr = tsk_false;
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_h263_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ uint8_t F, P, sbit, ebit;
+ const uint8_t* pdata = in_data;
+ const uint8_t* pay_ptr;
+ tsk_size_t pay_size;
+ tsk_size_t hdr_size;
+ tsk_size_t xsize, retsize = 0;
+ int got_picture_ptr;
+ int ret;
+
+ tdav_codec_h263_t* h263 = (tdav_codec_h263_t*)self;
+ const trtp_rtp_header_t* rtp_hdr = proto_hdr;
+ tsk_bool_t is_idr = tsk_false;
+
+ if(!self || !in_data || !in_size || !out_data || !h263->decoder.context){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ /* RFC 2190
+ get F and P bits, used to determine the header Mode (A, B or C)
+ F: 1 bit
+ The flag bit indicates the mode of the payload header. F=0, mode A;
+ F=1, mode B or mode C depending on P bit defined below.
+ P: 1 bit
+ Optional PB-frames mode as defined by the H.263 [4]. "0" implies
+ normal I or P frame, "1" PB-frames. When F=1, P also indicates modes:
+ mode B if P=0, mode C if P=1.
+
+ I: 1 bit.
+ Picture coding type, bit 9 in PTYPE defined by H.263[4], "0" is
+ intra-coded, "1" is inter-coded.
+ */
+ F = *pdata >> 7;
+ P = (*pdata >> 6) & 0x01;
+
+ /* SBIT and EBIT */
+ sbit = (*pdata >> 3) & 0x0F;
+ ebit = (*pdata & 0x07);
+
+ if(F == 0){
+ /* MODE A
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |F|P|SBIT |EBIT | SRC |I|U|S|A|R |DBQ| TRB | TR |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ hdr_size = H263_HEADER_MODE_A_SIZE;
+ is_idr = (in_size >= 2) && !(pdata[1] & 0x10) /* I==1 */;
+ }
+ else if(P == 0){ // F=1 and P=0
+ /* MODE B
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |F|P|SBIT |EBIT | SRC | QUANT | GOBN | MBA |R |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |I|U|S|A| HMV1 | VMV1 | HMV2 | VMV2 |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ hdr_size = H263_HEADER_MODE_B_SIZE;
+ is_idr = (in_size >= 5) && !(pdata[4] & 0x80) /* I==1 */;
+ }
+ else{ // F=1 and P=1
+ /* MODE C
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |F|P|SBIT |EBIT | SRC | QUANT | GOBN | MBA |R |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |I|U|S|A| HMV1 | VMV1 | HMV2 | VMV2 |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | RR |DBQ| TRB | TR |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ hdr_size = H263_HEADER_MODE_C_SIZE;
+ is_idr = (in_size >= 5) && !(pdata[4] & 0x80) /* I==1 */;
+ }
+
+ /* Check size */
+ if(in_size < hdr_size){
+ TSK_DEBUG_ERROR("Too short");
+ return 0;
+ }
+
+ pay_ptr = (pdata + hdr_size);
+ pay_size = (in_size - hdr_size);
+ xsize = avpicture_get_size(h263->decoder.context->pix_fmt, h263->decoder.context->width, h263->decoder.context->height);
+
+ /* Packet lost? */
+ if(h263->decoder.last_seq != (rtp_hdr->seq_num - 1) && h263->decoder.last_seq){
+ if(h263->decoder.last_seq == rtp_hdr->seq_num){
+ // Could happen on some stupid emulators
+ //TSK_DEBUG_INFO("Packet duplicated, seq_num=%d", rtp_hdr->seq_num);
+ return 0;
+ }
+ TSK_DEBUG_INFO("[H.263] Packet loss, seq_num=%d", rtp_hdr->seq_num);
+ }
+ h263->decoder.last_seq = rtp_hdr->seq_num;
+
+ if((int)(h263->decoder.accumulator_pos + pay_size) <= xsize){
+ if((h263->decoder.ebit + sbit) == 8){ /* Perfect one Byte to clean up */
+ if(h263->decoder.accumulator_pos){
+ ((uint8_t*)h263->decoder.accumulator)[h263->decoder.accumulator_pos-1] = (((uint8_t*)h263->decoder.accumulator)[h263->decoder.accumulator_pos-1] & (0xFF << h263->decoder.ebit)) |
+ (*pay_ptr & (0xFF >> sbit));
+ }
+ pay_ptr++, pay_size--;
+ }
+ h263->decoder.ebit = ebit;
+
+ memcpy(&((uint8_t*)h263->decoder.accumulator)[h263->decoder.accumulator_pos], pay_ptr, pay_size);
+ h263->decoder.accumulator_pos += pay_size;
+ }
+ else{
+ TSK_DEBUG_WARN("Buffer overflow");
+ h263->decoder.accumulator_pos = 0;
+ return 0;
+ }
+
+ if(rtp_hdr->marker){
+ AVPacket packet;
+ /* allocate destination buffer */
+ if(*out_max_size <xsize){
+ if(!(*out_data = tsk_realloc(*out_data, xsize))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ h263->decoder.accumulator_pos = 0;
+ *out_max_size = 0;
+ return 0;
+ }
+ *out_max_size = xsize;
+ }
+
+ av_init_packet(&packet);
+ packet.size = (int)h263->decoder.accumulator_pos;
+ packet.data = h263->decoder.accumulator;
+ ret = avcodec_decode_video2(h263->decoder.context, h263->decoder.picture, &got_picture_ptr, &packet);
+
+ if(ret < 0){
+ TSK_DEBUG_WARN("Failed to decode the buffer with error code = %d", ret);
+ if(TMEDIA_CODEC_VIDEO(self)->in.callback){
+ TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_error;
+ TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
+ TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
+ }
+ }
+ else if(got_picture_ptr){
+ retsize = xsize;
+ // Is it IDR frame?
+ if(is_idr && TMEDIA_CODEC_VIDEO(self)->in.callback){
+ TSK_DEBUG_INFO("Decoded H.263 IDR");
+ TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_idr;
+ TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
+ TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
+ }
+ TMEDIA_CODEC_VIDEO(h263)->in.width = h263->decoder.context->width;
+ TMEDIA_CODEC_VIDEO(h263)->in.height = h263->decoder.context->height;
+ /* copy picture into a linear buffer */
+ avpicture_layout((AVPicture *)h263->decoder.picture, h263->decoder.context->pix_fmt, (int)h263->decoder.context->width, (int)h263->decoder.context->height,
+ *out_data, (int)retsize);
+ }
+ /* in all cases: reset accumulator */
+ h263->decoder.accumulator_pos = 0;
+ }
+
+ return retsize;
+}
+
+static tsk_bool_t tdav_codec_h263_sdp_att_match(const tmedia_codec_t* codec, const char* att_name, const char* att_value)
+{
+ if(tsk_striequals(att_name, "fmtp")){
+ unsigned width, height, fps;
+ if(tmedia_parse_video_fmtp(att_value, TMEDIA_CODEC_VIDEO(codec)->pref_size, &width, &height, &fps)){
+ TSK_DEBUG_ERROR("Failed to match fmtp=%s", att_value);
+ return tsk_false;
+ }
+ TMEDIA_CODEC_VIDEO(codec)->in.width = TMEDIA_CODEC_VIDEO(codec)->out.width = width;
+ TMEDIA_CODEC_VIDEO(codec)->in.height = TMEDIA_CODEC_VIDEO(codec)->out.height = height;
+ TMEDIA_CODEC_VIDEO(codec)->in.fps = TMEDIA_CODEC_VIDEO(codec)->out.fps = fps;
+ }
+#if 0
+ else if(tsk_striequals(att_name, "imageattr")){
+ unsigned in_width, in_height, out_width, out_height;
+ if(tmedia_parse_video_imageattr(att_value, TMEDIA_CODEC_VIDEO(codec)->pref_size, &in_width, &in_height, &out_width, &out_height) != 0){
+ return tsk_false;
+ }
+ TMEDIA_CODEC_VIDEO(codec)->in.width = in_width;
+ TMEDIA_CODEC_VIDEO(codec)->in.height = in_height;
+ TMEDIA_CODEC_VIDEO(codec)->out.width = out_width;
+ TMEDIA_CODEC_VIDEO(codec)->out.height = out_height;
+ }
+#endif
+
+ return tsk_true;
+}
+
+static char* tdav_codec_h263_sdp_att_get(const tmedia_codec_t* codec, const char* att_name)
+{
+ if(tsk_striequals(att_name, "fmtp")){
+ tmedia_pref_video_size_t cif_vs;
+ if(tmedia_video_get_closest_cif_size(TMEDIA_CODEC_VIDEO(codec)->pref_size, &cif_vs)){
+ TSK_DEBUG_ERROR("Failed to get closest CIF family size");
+ return tsk_null;
+ }
+ return tmedia_get_video_fmtp(cif_vs);
+ }
+#if 0
+ else if(tsk_striequals(att_name, "imageattr")){
+ return tmedia_get_video_imageattr(TMEDIA_CODEC_VIDEO(codec)->pref_size,
+ TMEDIA_CODEC_VIDEO(codec)->in.width, TMEDIA_CODEC_VIDEO(codec)->in.height, TMEDIA_CODEC_VIDEO(codec)->out.width, TMEDIA_CODEC_VIDEO(codec)->out.height);
+ }
+#endif
+ return tsk_null;
+}
+
+/* constructor */
+static tsk_object_t* tdav_codec_h263_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h263_t *h263 = self;
+ if(h263){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ tdav_codec_h263_init(TDAV_CODEC_H263(self), tdav_codec_h263_1996, CODEC_ID_H263, CODEC_ID_H263);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h263_dtor(tsk_object_t * self)
+{
+ tdav_codec_h263_t *h263 = self;
+ if(h263){
+ /* deinit base */
+ tmedia_codec_video_deinit(h263);
+ /* deinit self */
+ tdav_codec_h263_deinit(TDAV_CODEC_H263(self));
+
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h263_def_s =
+{
+ sizeof(tdav_codec_h263_t),
+ tdav_codec_h263_ctor,
+ tdav_codec_h263_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h263_plugin_def_s =
+{
+ &tdav_codec_h263_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_h263,
+ "H263",
+ "H263-1996 codec (FFmpeg)",
+ TMEDIA_CODEC_FORMAT_H263,
+ tsk_false,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video */
+ {176, 144, 15},
+
+ tdav_codec_h263_set,
+ tdav_codec_h263_open,
+ tdav_codec_h263_close,
+ tdav_codec_h263_encode,
+ tdav_codec_h263_decode,
+ tdav_codec_h263_sdp_att_match,
+ tdav_codec_h263_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_h263_plugin_def_t = &tdav_codec_h263_plugin_def_s;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/* ============ H.263-1998 Plugin interface ================= */
+
+//
+// H.263-1998 object definition
+//
+
+static tsk_size_t tdav_codec_h263p_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ uint8_t P, V, PLEN, PEBIT;
+ uint8_t* pdata = (uint8_t*)in_data;
+ const uint8_t* pay_ptr;
+ tsk_size_t pay_size;
+ int hdr_size = H263P_HEADER_SIZE;
+ tsk_size_t xsize, retsize = 0;
+ int got_picture_ptr;
+ int ret;
+
+ tdav_codec_h263_t* h263 = (tdav_codec_h263_t*)self;
+ const trtp_rtp_header_t* rtp_hdr = proto_hdr;
+
+ if(!self || !in_data || !in_size || ((int)in_size <= hdr_size) || !out_data || !h263->decoder.context){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+/*
+ rfc4629 - 5.1. General H.263+ Payload Header
+
+ 0 1
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | RR |P|V| PLEN |PEBIT|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+ P = (pdata[0] & 0x04)>>2;
+ V = (pdata[0] & 0x02)>>1;
+ PLEN = (((pdata[0] & 0x01)<<5) | pdata[1]>>3);
+ PEBIT = pdata[1] & 0x07;
+
+ if(V){
+ /*
+ Indicates the presence of an 8-bit field containing information
+ for Video Redundancy Coding (VRC), which follows immediately after
+ the initial 16 bits of the payload header, if present. For syntax
+ and semantics of that 8-bit VRC field, see Section 5.2.
+ */
+ }
+ if(PLEN){
+ /*
+ Length, in bytes, of the extra picture header. If no extra
+ picture header is attached, PLEN is 0. If PLEN>0, the extra
+ picture header is attached immediately following the rest of the
+ payload header. Note that the length reflects the omission of the
+ first two bytes of the picture start code (PSC). See Section 6.1.
+ */
+ hdr_size += PLEN;
+ if(PEBIT){
+ /*
+ Indicates the number of bits that shall be ignored in the last
+ byte of the picture header. If PLEN is not zero, the ignored bits
+ shall be the least significant bits of the byte. If PLEN is zero,
+ then PEBIT shall also be zero.
+ */
+ TSK_DEBUG_WARN("PEBIT ignored");
+ }
+ }
+ if(P){ /* MUST be done after PLEN and PEBIT */
+ /*
+ Indicates the picture start or a picture segment (GOB/Slice) start
+ or a video sequence end (EOS or EOSBS). Two bytes of zero bits
+ then have to be prefixed to the payload of such a packet to
+ compose a complete picture/GOB/slice/EOS/EOSBS start code. This
+ bit allows the omission of the two first bytes of the start codes,
+ thus improving the compression ratio.
+ */
+ hdr_size -= 2;
+ pdata[hdr_size] = 0x00, pdata[hdr_size + 1] = 0x00;
+ }
+
+ pay_ptr = (pdata + hdr_size);
+ pay_size = (in_size - hdr_size);
+ xsize = avpicture_get_size(h263->decoder.context->pix_fmt, h263->decoder.context->width, h263->decoder.context->height);
+
+ /* Packet lost? */
+ if(h263->decoder.last_seq != (rtp_hdr->seq_num - 1) && h263->decoder.last_seq){
+ if(h263->decoder.last_seq == rtp_hdr->seq_num){
+ // Could happen on some stupid emulators
+ //TSK_DEBUG_INFO("Packet duplicated, seq_num=%d", rtp_hdr->seq_num);
+ return 0;
+ }
+ TSK_DEBUG_INFO("[H.263+] Packet loss, seq_num=%d", rtp_hdr->seq_num);
+ }
+ h263->decoder.last_seq = rtp_hdr->seq_num;
+
+ if((int)(h263->decoder.accumulator_pos + pay_size) <= xsize){
+ /* PEBIT is ignored */
+ memcpy(&((uint8_t*)h263->decoder.accumulator)[h263->decoder.accumulator_pos], pay_ptr, pay_size);
+ h263->decoder.accumulator_pos += pay_size;
+ }
+ else{
+ TSK_DEBUG_WARN("Buffer overflow");
+ h263->decoder.accumulator_pos = 0;
+ return 0;
+ }
+
+ if(rtp_hdr->marker){
+ AVPacket packet;
+ /* allocate destination buffer */
+ if(*out_max_size < xsize){
+ if(!(*out_data = tsk_realloc(*out_data, xsize))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ *out_max_size = 0;
+ h263->decoder.accumulator_pos = 0;
+ return 0;
+ }
+ *out_max_size = xsize;
+ }
+
+ /* decode the picture */
+ av_init_packet(&packet);
+ packet.size = (int)h263->decoder.accumulator_pos;
+ packet.data = h263->decoder.accumulator;
+ ret = avcodec_decode_video2(h263->decoder.context, h263->decoder.picture, &got_picture_ptr, &packet);
+
+ if(ret <0 || !got_picture_ptr){
+ TSK_DEBUG_WARN("Failed to decode the buffer");
+ }
+ else{
+ retsize = xsize;
+ TMEDIA_CODEC_VIDEO(h263)->in.width = h263->decoder.context->width;
+ TMEDIA_CODEC_VIDEO(h263)->in.height = h263->decoder.context->height;
+ /* copy picture into a linear buffer */
+ avpicture_layout((AVPicture *)h263->decoder.picture, h263->decoder.context->pix_fmt, (int)h263->decoder.context->width, (int)h263->decoder.context->height,
+ *out_data, (int)retsize);
+ }
+ /* in all cases: reset accumulator */
+ h263->decoder.accumulator_pos = 0;
+ }
+
+ return retsize;
+}
+
+/* constructor */
+static tsk_object_t* tdav_codec_h263p_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h263p_t *h263p = self;
+ if(h263p){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ tdav_codec_h263_init(TDAV_CODEC_H263(self), tdav_codec_h263_1998, CODEC_ID_H263P, CODEC_ID_H263);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h263p_dtor(tsk_object_t * self)
+{
+ tdav_codec_h263p_t *h263p = self;
+ if(h263p){
+ /* deinit base */
+ tmedia_codec_video_deinit(h263p);
+ /* deinit self */
+ tdav_codec_h263_deinit(TDAV_CODEC_H263(self));
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h263p_def_s =
+{
+ sizeof(tdav_codec_h263p_t),
+ tdav_codec_h263p_ctor,
+ tdav_codec_h263p_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h263p_plugin_def_s =
+{
+ &tdav_codec_h263p_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_h263p,
+ "H263-1998",
+ "H263-1998 codec (FFmpeg)",
+ TMEDIA_CODEC_FORMAT_H263_1998,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video (width, height, fps) */
+ {176, 144, 0},// fps is @deprecated
+
+ tdav_codec_h263p_set,
+ tdav_codec_h263p_open,
+ tdav_codec_h263p_close,
+ tdav_codec_h263p_encode,
+ tdav_codec_h263p_decode,
+ tdav_codec_h263p_sdp_att_match,
+ tdav_codec_h263p_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_h263p_plugin_def_t = &tdav_codec_h263p_plugin_def_s;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/* ============ H.263-2000 Plugin interface ================= */
+
+//
+// H.263-2000 object definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_codec_h263pp_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h263pp_t *h263pp = self;
+ if(h263pp){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ tdav_codec_h263_init(TDAV_CODEC_H263(self), tdav_codec_h263_2000, CODEC_ID_H263P, CODEC_ID_H263);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h263pp_dtor(tsk_object_t * self)
+{
+ tdav_codec_h263pp_t *h263pp = self;
+ if(h263pp){
+ /* deinit base */
+ tmedia_codec_video_deinit(h263pp);
+ /* deinit self */
+ tdav_codec_h263_deinit(TDAV_CODEC_H263(self));
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h263pp_def_s =
+{
+ sizeof(tdav_codec_h263pp_t),
+ tdav_codec_h263pp_ctor,
+ tdav_codec_h263pp_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h263pp_plugin_def_s =
+{
+ &tdav_codec_h263pp_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_h263pp,
+ "H263-2000",
+ "H263-2000 codec (FFmpeg)",
+ TMEDIA_CODEC_FORMAT_H263_2000,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video (width, height, fps)*/
+ {176, 144, 0},// fps is @deprecated
+
+ tdav_codec_h263pp_set,
+ tdav_codec_h263pp_open,
+ tdav_codec_h263pp_close,
+ tdav_codec_h263pp_encode,
+ tdav_codec_h263pp_decode,
+ tdav_codec_h263pp_sdp_att_match,
+ tdav_codec_h263pp_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_h263pp_plugin_def_t = &tdav_codec_h263pp_plugin_def_s;
+
+
+
+int tdav_codec_h263_open_encoder(tdav_codec_h263_t* self)
+{
+ int ret;
+ int size;
+ int32_t max_bw_kpbs;
+ if(self->encoder.context){
+ TSK_DEBUG_ERROR("Encoder already opened");
+ return -1;
+ }
+
+ self->encoder.context = avcodec_alloc_context();
+ avcodec_get_context_defaults(self->encoder.context);
+
+ self->encoder.context->pix_fmt = PIX_FMT_YUV420P;
+ self->encoder.context->time_base.num = 1;
+ self->encoder.context->time_base.den = TMEDIA_CODEC_VIDEO(self)->out.fps;
+ self->encoder.context->width = TMEDIA_CODEC_VIDEO(self)->out.width;
+ self->encoder.context->height = TMEDIA_CODEC_VIDEO(self)->out.height;
+
+ self->encoder.context->qmin = 10;
+ self->encoder.context->qmax = 51;
+#if LIBAVCODEC_VERSION_MAJOR <= 53
+ self->encoder.context->mb_qmin = self->encoder.context->qmin;
+ self->encoder.context->mb_qmax = self->encoder.context->qmax;
+#endif
+ self->encoder.context->mb_decision = FF_MB_DECISION_RD;
+ max_bw_kpbs = TSK_CLAMP(
+ 0,
+ tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, TMEDIA_CODEC_VIDEO(self)->out.fps),
+ self->encoder.max_bw_kpbs
+ );
+ self->encoder.context->bit_rate = (max_bw_kpbs * 1024);// bps
+ //self->encoder.context->rc_lookahead = 0;
+ self->encoder.context->rtp_payload_size = RTP_PAYLOAD_SIZE;
+ self->encoder.context->opaque = tsk_null;
+ self->encoder.context->gop_size = (TMEDIA_CODEC_VIDEO(self)->out.fps * TDAV_H263_GOP_SIZE_IN_SECONDS);
+ self->encoder.context->flags |= CODEC_FLAG_QSCALE;
+ self->encoder.context->global_quality = FF_QP2LAMBDA * self->encoder.quality;
+ self->encoder.context->max_b_frames = 0;
+
+ // Picture (YUV 420)
+ if(!(self->encoder.picture = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create encoder picture");
+ return -2;
+ }
+ avcodec_get_frame_defaults(self->encoder.picture);
+ //if((ret = avpicture_alloc((AVPicture*)self->encoder.picture, PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height))){
+ // TSK_DEBUG_ERROR("Failed to allocate encoder picture");
+ // return ret;
+ //}
+
+ size = avpicture_get_size(PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height);
+ if(!(self->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocate encoder buffer");
+ return -2;
+ }
+
+
+ // RTP Callback
+ switch(self->type){
+ case tdav_codec_h263_1996:
+ { // H263 - 1996
+ break;
+ }
+ case tdav_codec_h263_1998:
+ { // H263 - 1998
+#if defined(CODEC_FLAG_H263P_UMV)
+ self->encoder.context->flags |= CODEC_FLAG_H263P_UMV; // Annex D+
+#endif
+ self->encoder.context->flags |= CODEC_FLAG_AC_PRED; // Annex I and T
+ self->encoder.context->flags |= CODEC_FLAG_LOOP_FILTER; // Annex J
+#if defined(CODEC_FLAG_H263P_SLICE_STRUCT)
+ self->encoder.context->flags |= CODEC_FLAG_H263P_SLICE_STRUCT; // Annex K
+#endif
+#if defined(CODEC_FLAG_H263P_AIV)
+ self->encoder.context->flags |= CODEC_FLAG_H263P_AIV; // Annex S
+#endif
+ break;
+ }
+ case tdav_codec_h263_2000:
+ { // H263 - 2000
+#if defined(CODEC_FLAG_H263P_UMV)
+ self->encoder.context->flags |= CODEC_FLAG_H263P_UMV; // Annex D+
+#endif
+ self->encoder.context->flags |= CODEC_FLAG_AC_PRED; // Annex I and T
+ self->encoder.context->flags |= CODEC_FLAG_LOOP_FILTER; // Annex J
+#if defined(CODEC_FLAG_H263P_SLICE_STRUCT)
+ self->encoder.context->flags |= CODEC_FLAG_H263P_SLICE_STRUCT; // Annex K
+#endif
+#if defined(CODEC_FLAG_H263P_AIV)
+ self->encoder.context->flags |= CODEC_FLAG_H263P_AIV; // Annex S
+#endif
+ break;
+ }
+ }
+ // Open encoder
+ if((ret = avcodec_open(self->encoder.context, self->encoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(self)->plugin->desc);
+ return ret;
+ }
+
+ TSK_DEBUG_INFO("[H.263] bitrate=%d bps", self->encoder.context->bit_rate);
+
+ return ret;
+}
+
+int tdav_codec_h263_open_decoder(tdav_codec_h263_t* self)
+{
+ int ret, size;
+
+ if(self->decoder.context){
+ TSK_DEBUG_ERROR("Decoder already opened");
+ return -1;
+ }
+
+ self->decoder.context = avcodec_alloc_context();
+ avcodec_get_context_defaults(self->decoder.context);
+
+ self->decoder.context->pix_fmt = PIX_FMT_YUV420P;
+ self->decoder.context->width = TMEDIA_CODEC_VIDEO(self)->in.width;
+ self->decoder.context->height = TMEDIA_CODEC_VIDEO(self)->in.height;
+
+ // Picture (YUV 420)
+ if(!(self->decoder.picture = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create decoder picture");
+ return -2;
+ }
+ avcodec_get_frame_defaults(self->decoder.picture);
+
+ size = avpicture_get_size(PIX_FMT_YUV420P, self->decoder.context->width, self->decoder.context->height);
+ if(!(self->decoder.accumulator = tsk_calloc((size + FF_INPUT_BUFFER_PADDING_SIZE), sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocate decoder buffer");
+ return -2;
+ }
+
+ // Open decoder
+ if((ret = avcodec_open(self->decoder.context, self->decoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(self)->plugin->desc);
+ return ret;
+ }
+
+ self->decoder.last_seq = 0;
+
+ return ret;
+}
+
+int tdav_codec_h263_close_encoder(tdav_codec_h263_t* self)
+{
+ if(self->encoder.context){
+ avcodec_close(self->encoder.context);
+ av_free(self->encoder.context);
+ self->encoder.context = tsk_null;
+ }
+ if(self->encoder.picture){
+ av_free(self->encoder.picture);
+ self->encoder.picture = tsk_null;
+ }
+ if(self->encoder.buffer){
+ TSK_FREE(self->encoder.buffer);
+ }
+ return 0;
+}
+
+int tdav_codec_h263_close_decoder(tdav_codec_h263_t* self)
+{
+ if(self->decoder.context){
+ avcodec_close(self->decoder.context);
+ av_free(self->decoder.context);
+ self->decoder.context = tsk_null;
+ }
+ if(self->decoder.picture){
+ av_free(self->decoder.picture);
+ self->decoder.picture = tsk_null;
+ }
+ if(self->decoder.accumulator){
+ TSK_FREE(self->decoder.accumulator);
+ self->decoder.accumulator_pos = 0;
+ }
+ return 0;
+}
+
+/* ============ Callbacks ================= */
+
+static void tdav_codec_h263_encap(const tdav_codec_h263_t* h263, const uint8_t* pdata, tsk_size_t size)
+{
+ tsk_bool_t frag = tsk_false;
+ register uint32_t i, last_index = 0;
+
+ if(size < RTP_PAYLOAD_SIZE){
+ goto last;
+ }
+
+ for(i = 4; i<(size - 4); i++){
+ if(pdata[i] == 0x00 && pdata[i+1] == 0x00 && pdata[i+2]>=0x80){ /* PSC or (GBSC) found */
+ if((i - last_index) >= RTP_PAYLOAD_SIZE || tsk_true/* FIXME */){
+ switch(h263->type){
+ case tdav_codec_h263_1996:
+ tdav_codec_h263_rtp_callback((tdav_codec_h263_t*) h263, pdata+last_index,
+ (i - last_index), (last_index == size));
+ break;
+ default:
+ tdav_codec_h263p_rtp_callback((tdav_codec_h263_t*) h263, pdata + last_index,
+ (i - last_index), frag, (last_index == size));
+ frag = tsk_true;
+ break;
+ }
+ last_index = i;
+ }
+ }
+ }
+last:
+ if(last_index < size){
+ switch(h263->type){
+ case tdav_codec_h263_1996:
+ tdav_codec_h263_rtp_callback((tdav_codec_h263_t*) h263, pdata + last_index,
+ (size - last_index), tsk_true);
+ break;
+ default:
+ tdav_codec_h263p_rtp_callback((tdav_codec_h263_t*) h263, pdata + last_index,
+ (size - last_index), frag, tsk_true);
+ break;
+ }
+ }
+}
+
+
+static void tdav_codec_h263_rtp_callback(tdav_codec_h263_t *self, const void *data, tsk_size_t size, tsk_bool_t marker)
+{
+ uint8_t* pdata = (uint8_t*)data;
+
+ if(self->rtp.size < (size + H263_HEADER_MODE_A_SIZE)){
+ if(!(self->rtp.ptr = tsk_realloc(self->rtp.ptr, (size + H263_HEADER_MODE_A_SIZE)))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ return;
+ }
+ self->rtp.size = (size + H263_HEADER_MODE_A_SIZE);
+ }
+ memcpy((self->rtp.ptr + H263_HEADER_MODE_A_SIZE), data, size);
+
+ /* http://eu.sabotage.org/www/ITU/H/H0263e.pdf section 5.1
+ * 5.1.1 Picture Start Code (PSC) (22 bits) - PSC is a word of 22 bits. Its value is 0000 0000 0000 0000 1 00000.
+
+ *
+ * 5.1.1 Picture Start Code (PSC) (22 bits)
+ * 5.1.2 Temporal Reference (TR) (8 bits)
+ * 5.1.3 Type Information (PTYPE) (Variable Length)
+ * Bit 1: Always "1", in order to avoid start code emulation.
+ * Bit 2: Always "0", for distinction with Recommendation H.261.
+
+ * Bit 3: Split screen indicator, "0" off, "1" on.
+ * Bit 4: Document camera indicator, "0" off, "1" on.
+ * Bit 5: Full Picture Freeze Release, "0" off, "1" on.
+ * Bits 6-8: Source Format, "000" forbidden, "001" sub-QCIF, "010" QCIF, "011" CIF,
+ "100" 4CIF, "101" 16CIF, "110" reserved, "111" extended PTYPE.
+ If bits 6-8 are not equal to "111", which indicates an extended PTYPE (PLUSPTYPE), the following
+ five bits are also present in PTYPE:
+ Bit 9: Picture Coding Type, "0" INTRA (I-picture), "1" INTER (P-picture).
+ Bit 10: Optional Unrestricted Motion Vector mode (see Annex D), "0" off, "1" on.
+ Bit 11: Optional Syntax-based Arithmetic Coding mode (see Annex E), "0" off, "1" on.
+ Bit 12: Optional Advanced Prediction mode (see Annex F), "0" off, "1" on.
+ Bit 13: Optional PB-frames mode (see Annex G), "0" normal I- or P-picture, "1" PB-frame.
+ */
+ if(pdata[0] == 0x00 && pdata[1] == 0x00 && (pdata[2] & 0xfc)==0x80){ /* PSC */
+ /* RFC 2190 -5.1 Mode A
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |F|P|SBIT |EBIT | SRC |I|U|S|A|R |DBQ| TRB | TR |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ SRC : 3 bits
+ Source format, bit 6,7 and 8 in PTYPE defined by H.263 [4], specifies
+ the resolution of the current picture.
+
+ I: 1 bit.
+ Picture coding type, bit 9 in PTYPE defined by H.263[4], "0" is
+ intra-coded, "1" is inter-coded.
+ */
+
+ // PDATA[4] ======> Bits 3-10 of PTYPE
+ uint32_t rtp_hdr = 0;
+ uint8_t format, pict_type;
+
+ // Source Format = 4,5,6
+ format = (pdata[4] & 0x3C)>>2;
+ // Picture Coding Type = 7
+ pict_type = (pdata[4] & 0x02)>>1;
+ // RTP mode A header
+ ((uint8_t*)&rtp_hdr)[1] = (format <<5) | (pict_type << 4);
+ //rtp_hdr = tnet_htonl(rtp_hdr);
+ memcpy(self->rtp.ptr, &rtp_hdr, sizeof(rtp_hdr));
+ }
+
+ // Send data over the network
+ if(TMEDIA_CODEC_VIDEO(self)->out.callback){
+ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.ptr = self->rtp.ptr;
+ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.size = (size + H263_HEADER_MODE_A_SIZE);
+ TMEDIA_CODEC_VIDEO(self)->out.result.duration = (uint32_t)((1./(double)TMEDIA_CODEC_VIDEO(self)->out.fps) * TMEDIA_CODEC(self)->plugin->rate);
+ TMEDIA_CODEC_VIDEO(self)->out.result.last_chunck = marker;
+ TMEDIA_CODEC_VIDEO(self)->out.callback(&TMEDIA_CODEC_VIDEO(self)->out.result);
+ }
+}
+
+static void tdav_codec_h263p_rtp_callback(tdav_codec_h263_t *self, const void *data, tsk_size_t size, tsk_bool_t frag, tsk_bool_t marker)
+{
+ uint8_t* pdata = (uint8_t*)data;
+ //uint8_t rtp_hdr[2] = {0x00, 0x00};
+ //tsk_bool_t eos = tsk_false;
+
+ const void* _ptr = tsk_null;
+ tsk_size_t _size = 0;
+ //static tsk_bool_t frag = tsk_false;
+ //tsk_bool_t found_gob = tsk_false;
+
+ /* RFC 4629 - 5.1. General H.263+ Payload Header
+ The H.263+ payload header is structured as follows:
+ 0 1
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | RR |P|V| PLEN |PEBIT|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+ /* http://eu.sabotage.org/www/ITU/H/H0263e.pdf
+ *
+ * 5.1.1 Picture Start Code (PSC) (22 bits)
+ * ->PSC is a word of 22 bits. Its value is 0000 0000 0000 0000 1 00000.
+ * 5.1.27 End Of Sequence (EOS) (22 bits)
+ * ->A codeword of 22 bits. Its value is 0000 0000 0000 0000 1 11111
+ * 5.2.2 Group of Block Start Code (GBSC) (17 bits)
+ * ->A word of 17 bits. Its value is 0000 0000 0000 0000 1
+ * C.4.1 End Of Sub-Bitstream code (EOSBS) (23 bits)
+ * ->The EOSBS code is a codeword of 23 bits. Its value is 0000 0000 0000 0000 1 11110 0
+ *
+ *
+ * 5.2.3 Group Number (GN) (5 bits)
+ * -> last 5 bits
+ */
+ //if(pdata[0] == 0x00 && pdata[1] == 0x00 && pdata[2] >= 0x80){ /* PSC or EOS or GBSC */
+ // uint8_t GN = ((pdata[2]>>2) & 0x1F);
+ // found_gob = tsk_true;
+ // //TSK_DEBUG_INFO("GN=%u", pdata[2]);
+ //
+ // /* RFC 4629 - 6.1.1. Packets that begin with a Picture Start Code
+ // A packet that begins at the location of a Picture, GOB, slice, EOS,
+ // or EOSBS start code shall omit the first two (all zero) bytes from
+ // the H.263+ bitstream and signify their presence by setting P=1 in the
+ // payload header.
+ // */
+
+ // if(GN == 0x00){ /* PSC 00000 */
+ // /* Use the two first bytes as RTP header */
+ // //pdata[0] |= 0x04; // P=1
+
+ // /*
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | RR |1|V|0|0|0|0|0|0|0|0|0| bitstream data without the :
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // : first two 0 bytes of the PSC
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // */
+
+ // //TSK_DEBUG_INFO("H263 - PSC");
+ // }
+ // else if(GN == 0x1F){ /* EOS 11111 */
+ // /* Use the two first bytes as RTP header */
+ // //pdata[0] |= 0x04; // P=1
+ // eos = tsk_true;
+ // /* RFC 4629 - 6.1.3. Packets that begin with an EOS or EOSBS Code
+ // 0 1 2
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | RR |1|V|0|0|0|0|0|0|0|0|0|1|1|1|1|1|1|0|0|
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // */
+ // //TSK_DEBUG_INFO("H263 - EOS");
+ // }
+ // else /*if((GN >> 4) == 0x01)*/{ /* GBSC 10000 */
+ // /* Use the two first bytes as RTP header */
+ // //pdata[0] |= 0x04; // P=1
+ //
+ // /* RFC 4629 - 6.1.2. Packets that begin with GBSC or SSC
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | RR |1|V|0 0 1 0 0 1|PEBIT|1 0 0 0 0 0| picture header :
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // : starting with TR, PTYPE ... |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | ... | bitstream :
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // : data starting with GBSC/SSC without its first two 0 bytes
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // */
+ // //TSK_DEBUG_INFO("H263 - GBSC");
+ // found_gob = tsk_false;
+ // }
+ // //else if(EOSBS) -> Not Supported
+ //}
+ //else{
+ // /* 6.2. Encapsulating Follow-on Packet (P=0) */
+ // int i = 0;
+ // i++;
+ //}
+
+ //if(/*eos*/!found_gob && frag){
+ // if(self->rtp.size < (size + 2/* H263+ Header size */)){
+ // if(!(self->rtp.ptr = tsk_realloc(self->rtp.ptr, (size + 2)))){
+ // TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ // return;
+ // }
+ // self->rtp.size = (size + 2);
+ // }
+ // /* RFC 4629 - 6. Packetization Schemes */
+ // //rtp_hdr[0] |= 0x00;
+ // //memcpy(self->rtp.ptr, rtp_hdr/* zeros-> is it corretc? */, 2);
+ // //memcpy((self->rtp.ptr + 2), pdata, size);
+ // //_ptr = self->rtp.ptr;
+ // //_size = (size + 2);
+
+ // pdata[0] |= pdata[2] > 0x80 ? 0x04 : 0x04;
+ // _ptr = pdata;
+ // _size = size;
+ //}
+ //else{
+ // pdata[0] |= pdata[2] > 0x80 ? 0x04 : 0x04;
+ // _ptr = pdata;
+ // _size = size;
+ //}
+
+// FIXME
+ pdata[0] |= pdata[2] > 0x80 ? 0x04 : 0x04;
+ _ptr = pdata;
+ _size = size;
+
+
+ // Send data over the network
+ if(TMEDIA_CODEC_VIDEO(self)->out.callback){
+ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.ptr = _ptr;
+ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.size = _size;
+ TMEDIA_CODEC_VIDEO(self)->out.result.duration = (uint32_t)((1./(double)TMEDIA_CODEC_VIDEO(self)->out.fps) * TMEDIA_CODEC(self)->plugin->rate);
+ TMEDIA_CODEC_VIDEO(self)->out.result.last_chunck = marker;
+ TMEDIA_CODEC_VIDEO(self)->out.callback(&TMEDIA_CODEC_VIDEO(self)->out.result);
+ }
+}
+
+tsk_bool_t tdav_codec_ffmpeg_h263_is_supported()
+{
+ return (avcodec_find_encoder(CODEC_ID_H263) && avcodec_find_decoder(CODEC_ID_H263));
+}
+
+tsk_bool_t tdav_codec_ffmpeg_h263p_is_supported()
+{
+ return (avcodec_find_encoder(CODEC_ID_H263P) && avcodec_find_decoder(CODEC_ID_H263));
+}
+
+tsk_bool_t tdav_codec_ffmpeg_h263pp_is_supported()
+{
+ return tdav_codec_ffmpeg_h263p_is_supported();
+}
+
+
+#endif /* HAVE_FFMPEG */
diff --git a/tinyDAV/src/codecs/h264/tdav_codec_h264.c b/tinyDAV/src/codecs/h264/tdav_codec_h264.c
new file mode 100644
index 0000000..0ec3760
--- /dev/null
+++ b/tinyDAV/src/codecs/h264/tdav_codec_h264.c
@@ -0,0 +1,993 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_codec_h264.c
+ * @brief H.264 codec plugin using FFmpeg for decoding and x264 for encoding
+ * RTP payloader/depayloader follows RFC 3984
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/codecs/h264/tdav_codec_h264.h"
+
+#if HAVE_FFMPEG || HAVE_H264_PASSTHROUGH
+
+#include "tinydav/codecs/h264/tdav_codec_h264_rtp.h"
+#include "tinydav/video/tdav_converter_video.h"
+
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tinymedia/tmedia_params.h"
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tsk_params.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#if HAVE_FFMPEG
+# include <libavcodec/avcodec.h>
+# if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0))
+# include <libavutil/opt.h>
+# endif
+#endif
+
+typedef struct tdav_codec_h264_s
+{
+ TDAV_DECLARE_CODEC_H264_COMMON;
+
+ // Encoder
+ struct{
+#if HAVE_FFMPEG
+ AVCodec* codec;
+ AVCodecContext* context;
+ AVFrame* picture;
+#endif
+ void* buffer;
+ int64_t frame_count;
+ tsk_bool_t force_idr;
+ int32_t quality; // [1-31]
+ int rotation;
+ int32_t max_bw_kpbs;
+ tsk_bool_t passthrough; // whether to bypass encoding
+ } encoder;
+
+ // decoder
+ struct{
+#if HAVE_FFMPEG
+ AVCodec* codec;
+ AVCodecContext* context;
+ AVFrame* picture;
+#endif
+ void* accumulator;
+ tsk_size_t accumulator_pos;
+ tsk_size_t accumulator_size;
+ uint16_t last_seq;
+ tsk_bool_t passthrough; // whether to bypass decoding
+ } decoder;
+}
+tdav_codec_h264_t;
+
+#if !defined(TDAV_H264_GOP_SIZE_IN_SECONDS)
+# define TDAV_H264_GOP_SIZE_IN_SECONDS 25
+#endif
+
+#define kResetRotationTrue tsk_true
+#define kResetRotationFalse tsk_false
+
+static int tdav_codec_h264_init(tdav_codec_h264_t* self, profile_idc_t profile);
+static int tdav_codec_h264_deinit(tdav_codec_h264_t* self);
+static int tdav_codec_h264_open_encoder(tdav_codec_h264_t* self);
+static int tdav_codec_h264_close_encoder(tdav_codec_h264_t* self, tsk_bool_t reset_rotation);
+static int tdav_codec_h264_open_decoder(tdav_codec_h264_t* self);
+static int tdav_codec_h264_close_decoder(tdav_codec_h264_t* self);
+
+/* ============ H.264 Base/Main Profile X.X Plugin interface functions ================= */
+
+static int tdav_codec_h264_set(tmedia_codec_t* self, const tmedia_param_t* param)
+{
+ tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self;
+ if (param->value_type == tmedia_pvt_int32) {
+ if(tsk_striequals(param->key, "action")){
+ tmedia_codec_action_t action = (tmedia_codec_action_t)TSK_TO_INT32((uint8_t*)param->value);
+ switch(action){
+ case tmedia_codec_action_encode_idr:
+ {
+ h264->encoder.force_idr = tsk_true;
+ break;
+ }
+ case tmedia_codec_action_bw_down:
+ {
+ h264->encoder.quality = TSK_CLAMP(1, (h264->encoder.quality + 1), 31);
+#if HAVE_FFMPEG
+ if (h264->encoder.context) {
+ h264->encoder.context->global_quality = FF_QP2LAMBDA * h264->encoder.quality;
+ }
+#endif
+ break;
+ }
+ case tmedia_codec_action_bw_up:
+ {
+ h264->encoder.quality = TSK_CLAMP(1, (h264->encoder.quality - 1), 31);
+#if HAVE_FFMPEG
+ if (h264->encoder.context) {
+ h264->encoder.context->global_quality = FF_QP2LAMBDA * h264->encoder.quality;
+ }
+#endif
+ break;
+ }
+ }
+ return 0;
+ }
+ else if(tsk_striequals(param->key, "bw_kbps")){
+ int32_t max_bw_userdefine = self->bandwidth_max_upload;
+ int32_t max_bw_new = *((int32_t*)param->value);
+ if (max_bw_userdefine > 0) {
+ // do not use more than what the user defined in it's configuration
+ h264->encoder.max_bw_kpbs = TSK_MIN(max_bw_new, max_bw_userdefine);
+ }
+ else {
+ h264->encoder.max_bw_kpbs = max_bw_new;
+ }
+ return 0;
+ }
+ else if(tsk_striequals(param->key, "bypass-encoding")){
+ h264->encoder.passthrough = *((int32_t*)param->value) ? tsk_true : tsk_false;
+ TSK_DEBUG_INFO("[H.264] bypass-encoding = %d", h264->encoder.passthrough);
+ return 0;
+ }
+ else if(tsk_striequals(param->key, "bypass-decoding")){
+ h264->decoder.passthrough = *((int32_t*)param->value) ? tsk_true : tsk_false;
+ TSK_DEBUG_INFO("[H.264] bypass-decoding = %d", h264->decoder.passthrough);
+ return 0;
+ }
+ else if(tsk_striequals(param->key, "rotation")){
+ int32_t rotation = *((int32_t*)param->value);
+ if(h264->encoder.rotation != rotation){
+ h264->encoder.rotation = rotation;
+ if (self->opened) {
+ int ret;
+ if ((ret = tdav_codec_h264_close_encoder(h264, kResetRotationFalse))) {
+ return ret;
+ }
+ if ((ret = tdav_codec_h264_open_encoder(h264))) {
+ return ret;
+ }
+#if 0 // Not working
+ if((ret = avcodec_close(h264->encoder.context))){
+ TSK_DEBUG_ERROR("Failed to close [%s] codec", TMEDIA_CODEC(h264)->plugin->desc);
+ return ret;
+ }
+ h264->encoder.context->width = (rotation == 90 || rotation == 270) ? TMEDIA_CODEC_VIDEO(h264)->out.height : TMEDIA_CODEC_VIDEO(h264)->out.width;
+ h264->encoder.context->height = (rotation == 90 || rotation == 270) ? TMEDIA_CODEC_VIDEO(h264)->out.width : TMEDIA_CODEC_VIDEO(h264)->out.height;
+ if((ret = avcodec_open(h264->encoder.context, h264->encoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(h264)->plugin->desc);
+ return ret;
+ }
+ h264->encoder.force_idr = tsk_true;
+#endif
+ }
+ }
+ return 0;
+ }
+ }
+ return -1;
+}
+
+
+static int tdav_codec_h264_open(tmedia_codec_t* self)
+{
+ int ret;
+ tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self;
+
+ if(!h264){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) already checked that the codec is not opened */
+
+ // Encoder
+ if((ret = tdav_codec_h264_open_encoder(h264))){
+ return ret;
+ }
+
+ // Decoder
+ if((ret = tdav_codec_h264_open_decoder(h264))){
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tdav_codec_h264_close(tmedia_codec_t* self)
+{
+ tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self;
+
+ if(!h264){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* the caller (base class) alreasy checked that the codec is opened */
+
+ // Encoder
+ tdav_codec_h264_close_encoder(h264, kResetRotationTrue);
+
+ // Decoder
+ tdav_codec_h264_close_decoder(h264);
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_h264_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
+{
+ int ret = 0;
+
+#if HAVE_FFMPEG
+ int size;
+ tsk_bool_t send_idr, send_hdr;
+#endif
+
+ tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self;
+
+ if(!self || !in_data || !in_size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if(!self->opened){
+ TSK_DEBUG_ERROR("Codec not opened");
+ return 0;
+ }
+
+ if(h264->encoder.passthrough) {
+ tdav_codec_h264_rtp_encap(TDAV_CODEC_H264_COMMON(h264), (const uint8_t*)in_data, in_size);
+ }
+ else { // !h264->encoder.passthrough
+#if HAVE_FFMPEG // wrap yuv420 buffer
+ size = avpicture_fill((AVPicture *)h264->encoder.picture, (uint8_t*)in_data, PIX_FMT_YUV420P, h264->encoder.context->width, h264->encoder.context->height);
+ if (size != in_size){
+ /* guard */
+ TSK_DEBUG_ERROR("Invalid size: %u<>%u", size, in_size);
+ return 0;
+ }
+
+ // send IDR for:
+ // - the first frame
+ // - remote peer requested an IDR
+ // - every second within the first 4seconds
+ send_idr = (
+ h264->encoder.frame_count++ == 0
+ || h264 ->encoder.force_idr
+ //|| ( (h264->encoder.frame_count < (int)TMEDIA_CODEC_VIDEO(h264)->out.fps * 4) && ((h264->encoder.frame_count % TMEDIA_CODEC_VIDEO(h264)->out.fps)==0) )
+ );
+
+ // send SPS and PPS headers for:
+ // - IDR frames (not required but it's the easiest way to deal with pkt loss)
+ // - every 5 seconds after the first 4seconds
+ send_hdr = (
+ send_idr
+ //|| ( (h264->encoder.frame_count % (TMEDIA_CODEC_VIDEO(h264)->out.fps * 5))==0 )
+ );
+ if(send_hdr){
+ tdav_codec_h264_rtp_encap(TDAV_CODEC_H264_COMMON(h264), h264->encoder.context->extradata, (tsk_size_t)h264->encoder.context->extradata_size);
+ }
+
+ // Encode data
+ #if LIBAVCODEC_VERSION_MAJOR <= 53
+ h264->encoder.picture->pict_type = send_idr ? FF_I_TYPE : 0;
+ #else
+ h264->encoder.picture->pict_type = send_idr ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_NONE;
+ #endif
+ h264->encoder.picture->key_frame = send_idr ? 1 : 0;
+ h264->encoder.picture->pts = AV_NOPTS_VALUE;
+ h264->encoder.picture->quality = h264->encoder.context->global_quality;
+ // h264->encoder.picture->pts = h264->encoder.frame_count; MUST NOT
+ ret = avcodec_encode_video(h264->encoder.context, h264->encoder.buffer, size, h264->encoder.picture);
+ if(ret > 0){
+ tdav_codec_h264_rtp_encap(TDAV_CODEC_H264_COMMON(h264), h264->encoder.buffer, (tsk_size_t)ret);
+ }
+ h264 ->encoder.force_idr = tsk_false;
+#endif
+ }// else(!h264->encoder.passthrough)
+
+ return 0;
+}
+
+static tsk_size_t tdav_codec_h264_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
+{
+ tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self;
+ const trtp_rtp_header_t* rtp_hdr = (const trtp_rtp_header_t*)proto_hdr;
+
+ const uint8_t* pay_ptr = tsk_null;
+ tsk_size_t pay_size = 0;
+ int ret;
+ tsk_bool_t sps_or_pps, append_scp, end_of_unit;
+ tsk_size_t retsize = 0, size_to_copy = 0;
+ static const tsk_size_t xmax_size = (3840 * 2160 * 3) >> 3; // >>3 instead of >>1 (not an error)
+ static tsk_size_t start_code_prefix_size = sizeof(H264_START_CODE_PREFIX);
+#if HAVE_FFMPEG
+ int got_picture_ptr = 0;
+#endif
+
+ if(!h264 || !in_data || !in_size || !out_data
+#if HAVE_FFMPEG
+ || !h264->decoder.context
+#endif
+ )
+ {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ //TSK_DEBUG_INFO("SeqNo=%hu", rtp_hdr->seq_num);
+
+ /* Packet lost? */
+ if((h264->decoder.last_seq + 1) != rtp_hdr->seq_num && h264->decoder.last_seq){
+ TSK_DEBUG_INFO("[H.264] Packet loss, seq_num=%d", (h264->decoder.last_seq + 1));
+ }
+ h264->decoder.last_seq = rtp_hdr->seq_num;
+
+
+ /* 5.3. NAL Unit Octet Usage
+ +---------------+
+ |0|1|2|3|4|5|6|7|
+ +-+-+-+-+-+-+-+-+
+ |F|NRI| Type |
+ +---------------+
+ */
+ if(*((uint8_t*)in_data) & 0x80){
+ TSK_DEBUG_WARN("F=1");
+ /* reset accumulator */
+ h264->decoder.accumulator_pos = 0;
+ return 0;
+ }
+
+ /* get payload */
+ if((ret = tdav_codec_h264_get_pay(in_data, in_size, (const void**)&pay_ptr, &pay_size, &append_scp, &end_of_unit)) || !pay_ptr || !pay_size){
+ TSK_DEBUG_ERROR("Depayloader failed to get H.264 content");
+ return 0;
+ }
+ //append_scp = tsk_true;
+ size_to_copy = pay_size + (append_scp ? start_code_prefix_size : 0);
+ // whether it's SPS or PPS (append_scp is false for subsequent FUA chuncks)
+ sps_or_pps = append_scp && pay_ptr && ((pay_ptr[0] & 0x1F) == 7 || (pay_ptr[0] & 0x1F) == 8);
+
+ // start-accumulator
+ if(!h264->decoder.accumulator){
+ if(size_to_copy > xmax_size){
+ TSK_DEBUG_ERROR("%u too big to contain valid encoded data. xmax_size=%u", size_to_copy, xmax_size);
+ return 0;
+ }
+ if(!(h264->decoder.accumulator = tsk_calloc(size_to_copy, sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocated new buffer");
+ return 0;
+ }
+ h264->decoder.accumulator_size = size_to_copy;
+ }
+ if((h264->decoder.accumulator_pos + size_to_copy) >= xmax_size){
+ TSK_DEBUG_ERROR("BufferOverflow");
+ h264->decoder.accumulator_pos = 0;
+ return 0;
+ }
+ if((h264->decoder.accumulator_pos + size_to_copy) > h264->decoder.accumulator_size){
+ if(!(h264->decoder.accumulator = tsk_realloc(h264->decoder.accumulator, (h264->decoder.accumulator_pos + size_to_copy)))){
+ TSK_DEBUG_ERROR("Failed to reallocated new buffer");
+ h264->decoder.accumulator_pos = 0;
+ h264->decoder.accumulator_size = 0;
+ return 0;
+ }
+ h264->decoder.accumulator_size = (h264->decoder.accumulator_pos + size_to_copy);
+ }
+
+ if(append_scp){
+ memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], H264_START_CODE_PREFIX, start_code_prefix_size);
+ h264->decoder.accumulator_pos += start_code_prefix_size;
+ }
+ memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], pay_ptr, pay_size);
+ h264->decoder.accumulator_pos += pay_size;
+ // end-accumulator
+
+ if(sps_or_pps){
+ // http://libav-users.943685.n4.nabble.com/Decode-H264-streams-how-to-fill-AVCodecContext-from-SPS-PPS-td2484472.html
+ // SPS and PPS should be bundled with IDR
+ TSK_DEBUG_INFO("Receiving SPS or PPS ...to be tied to an IDR");
+ }
+ else if(rtp_hdr->marker){
+ if(h264->decoder.passthrough){
+ if(*out_max_size < h264->decoder.accumulator_pos){
+ if((*out_data = tsk_realloc(*out_data, h264->decoder.accumulator_pos))){
+ *out_max_size = h264->decoder.accumulator_pos;
+ }
+ else{
+ *out_max_size = 0;
+ return 0;
+ }
+ }
+ memcpy(*out_data, h264->decoder.accumulator, h264->decoder.accumulator_pos);
+ retsize = h264->decoder.accumulator_pos;
+ }
+ else { // !h264->decoder.passthrough
+#if HAVE_FFMPEG
+ AVPacket packet;
+
+ /* decode the picture */
+ av_init_packet(&packet);
+ packet.dts = packet.pts = AV_NOPTS_VALUE;
+ packet.size = (int)h264->decoder.accumulator_pos;
+ packet.data = h264->decoder.accumulator;
+ ret = avcodec_decode_video2(h264->decoder.context, h264->decoder.picture, &got_picture_ptr, &packet);
+
+ if(ret <0){
+ TSK_DEBUG_INFO("Failed to decode the buffer with error code =%d, size=%u, append=%s", ret, h264->decoder.accumulator_pos, append_scp ? "yes" : "no");
+ if(TMEDIA_CODEC_VIDEO(self)->in.callback){
+ TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_error;
+ TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
+ TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
+ }
+ }
+ else if(got_picture_ptr){
+ tsk_size_t xsize;
+
+ /* IDR ? */
+ if(((pay_ptr[0] & 0x1F) == 0x05) && TMEDIA_CODEC_VIDEO(self)->in.callback){
+ TSK_DEBUG_INFO("Decoded H.264 IDR");
+ TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_idr;
+ TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
+ TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
+ }
+ /* fill out */
+ xsize = avpicture_get_size(h264->decoder.context->pix_fmt, h264->decoder.context->width, h264->decoder.context->height);
+ if(*out_max_size<xsize){
+ if((*out_data = tsk_realloc(*out_data, (xsize + FF_INPUT_BUFFER_PADDING_SIZE)))){
+ *out_max_size = xsize;
+ }
+ else{
+ *out_max_size = 0;
+ return 0;
+ }
+ }
+ retsize = xsize;
+ TMEDIA_CODEC_VIDEO(h264)->in.width = h264->decoder.context->width;
+ TMEDIA_CODEC_VIDEO(h264)->in.height = h264->decoder.context->height;
+ avpicture_layout((AVPicture *)h264->decoder.picture, h264->decoder.context->pix_fmt, (int)h264->decoder.context->width, (int)h264->decoder.context->height,
+ *out_data, (int)retsize);
+ }
+#endif /* HAVE_FFMPEG */
+ } // else(h264->decoder.passthrough)
+
+ h264->decoder.accumulator_pos = 0;
+ } // else if(rtp_hdr->marker)
+
+ return retsize;
+}
+
+static tsk_bool_t tdav_codec_h264_sdp_att_match(const tmedia_codec_t* self, const char* att_name, const char* att_value)
+{
+ return tdav_codec_h264_common_sdp_att_match((tdav_codec_h264_common_t*)self, att_name, att_value);
+}
+
+static char* tdav_codec_h264_sdp_att_get(const tmedia_codec_t* self, const char* att_name)
+{
+ char* att = tdav_codec_h264_common_sdp_att_get((const tdav_codec_h264_common_t*)self, att_name);
+ if(att && tsk_striequals(att_name, "fmtp")) {
+ tsk_strcat_2(&att, "; impl=%s",
+#if HAVE_FFMPEG
+ "FFMPEG"
+#elif HAVE_H264_PASSTHROUGH
+ "PASSTHROUGH"
+#endif
+ );
+ }
+ return att;
+}
+
+
+
+
+/* ============ H.264 Base Profile Plugin interface ================= */
+
+/* constructor */
+static tsk_object_t* tdav_codec_h264_base_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h264_t *h264 = (tdav_codec_h264_t*)self;
+ if(h264){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ if(tdav_codec_h264_init(h264, profile_idc_baseline) != 0){
+ return tsk_null;
+ }
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h264_base_dtor(tsk_object_t * self)
+{
+ tdav_codec_h264_t *h264 = (tdav_codec_h264_t*)self;
+ if(h264){
+ /* deinit base */
+ tdav_codec_h264_common_deinit((tdav_codec_h264_common_t*)self);
+ /* deinit self */
+ tdav_codec_h264_deinit(h264);
+
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h264_base_def_s =
+{
+ sizeof(tdav_codec_h264_t),
+ tdav_codec_h264_base_ctor,
+ tdav_codec_h264_base_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h264_base_plugin_def_s =
+{
+ &tdav_codec_h264_base_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_h264_bp,
+ "H264",
+ "H264 Base Profile (FFmpeg, x264)",
+ TMEDIA_CODEC_FORMAT_H264_BP,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video (width, height, fps) */
+ {176, 144, 0}, // fps is @deprecated
+
+ tdav_codec_h264_set,
+ tdav_codec_h264_open,
+ tdav_codec_h264_close,
+ tdav_codec_h264_encode,
+ tdav_codec_h264_decode,
+ tdav_codec_h264_sdp_att_match,
+ tdav_codec_h264_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_h264_base_plugin_def_t = &tdav_codec_h264_base_plugin_def_s;
+
+/* ============ H.264 Main Profile Plugin interface ================= */
+
+/* constructor */
+static tsk_object_t* tdav_codec_h264_main_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_codec_h264_t *h264 = (tdav_codec_h264_t*)self;
+ if(h264){
+ /* init base: called by tmedia_codec_create() */
+ /* init self */
+ if(tdav_codec_h264_init(h264, profile_idc_main) != 0){
+ return tsk_null;
+ }
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_codec_h264_main_dtor(tsk_object_t * self)
+{
+ tdav_codec_h264_t *h264 = (tdav_codec_h264_t*)self;
+ if(h264){
+ /* deinit base */
+ tdav_codec_h264_common_deinit((tdav_codec_h264_common_t*)self);
+ /* deinit self */
+ tdav_codec_h264_deinit(h264);
+
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_codec_h264_main_def_s =
+{
+ sizeof(tdav_codec_h264_t),
+ tdav_codec_h264_main_ctor,
+ tdav_codec_h264_main_dtor,
+ tmedia_codec_cmp,
+};
+/* plugin definition*/
+static const tmedia_codec_plugin_def_t tdav_codec_h264_main_plugin_def_s =
+{
+ &tdav_codec_h264_main_def_s,
+
+ tmedia_video,
+ tmedia_codec_id_h264_mp,
+ "H264",
+ "H264 Main Profile (FFmpeg, x264)",
+ TMEDIA_CODEC_FORMAT_H264_MP,
+ tsk_true,
+ 90000, // rate
+
+ /* audio */
+ { 0 },
+
+ /* video (width, height, fps)*/
+ {176, 144, 0},// fps is @deprecated
+
+ tdav_codec_h264_set,
+ tdav_codec_h264_open,
+ tdav_codec_h264_close,
+ tdav_codec_h264_encode,
+ tdav_codec_h264_decode,
+ tdav_codec_h264_sdp_att_match,
+ tdav_codec_h264_sdp_att_get
+};
+const tmedia_codec_plugin_def_t *tdav_codec_h264_main_plugin_def_t = &tdav_codec_h264_main_plugin_def_s;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/* ============ Common To all H264 codecs ================= */
+
+int tdav_codec_h264_open_encoder(tdav_codec_h264_t* self)
+{
+#if HAVE_FFMPEG
+ int ret;
+ tsk_size_t size;
+
+ if(self->encoder.context){
+ TSK_DEBUG_ERROR("Encoder already opened");
+ return -1;
+ }
+
+#if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0))
+ if((self->encoder.context = avcodec_alloc_context3(self->encoder.codec))){
+ avcodec_get_context_defaults3(self->encoder.context, self->encoder.codec);
+ }
+#else
+ if((self->encoder.context = avcodec_alloc_context())){
+ avcodec_get_context_defaults(self->encoder.context);
+ }
+#endif
+
+ if(!self->encoder.context){
+ TSK_DEBUG_ERROR("Failed to allocate context");
+ return -1;
+ }
+
+#if TDAV_UNDER_X86 && LIBAVCODEC_VERSION_MAJOR <= 53
+ self->encoder.context->dsp_mask = (FF_MM_MMX | FF_MM_MMXEXT | FF_MM_SSE);
+#endif
+
+ self->encoder.context->pix_fmt = PIX_FMT_YUV420P;
+ self->encoder.context->time_base.num = 1;
+ self->encoder.context->time_base.den = TMEDIA_CODEC_VIDEO(self)->out.fps;
+ self->encoder.context->width = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.height : TMEDIA_CODEC_VIDEO(self)->out.width;
+ self->encoder.context->height = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.width : TMEDIA_CODEC_VIDEO(self)->out.height;
+ self->encoder.max_bw_kpbs = TSK_CLAMP(
+ 0,
+ tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, TMEDIA_CODEC_VIDEO(self)->out.fps),
+ TMEDIA_CODEC(self)->bandwidth_max_upload
+ );
+ self->encoder.context->bit_rate = (self->encoder.max_bw_kpbs * 1024);// bps
+
+ self->encoder.context->rc_min_rate = (self->encoder.context->bit_rate >> 3);
+ self->encoder.context->rc_max_rate = self->encoder.context->bit_rate;
+
+#if LIBAVCODEC_VERSION_MAJOR <= 53
+ self->encoder.context->rc_lookahead = 0;
+#endif
+ self->encoder.context->global_quality = FF_QP2LAMBDA * self->encoder.quality;
+
+#if LIBAVCODEC_VERSION_MAJOR <= 53
+ self->encoder.context->partitions = X264_PART_I4X4 | X264_PART_I8X8 | X264_PART_P8X8 | X264_PART_B8X8;
+#endif
+ self->encoder.context->me_method = ME_UMH;
+ self->encoder.context->me_range = 16;
+ self->encoder.context->qmin = 10;
+ self->encoder.context->qmax = 51;
+#if LIBAVCODEC_VERSION_MAJOR <= 53
+ self->encoder.context->mb_qmin = self->encoder.context->qmin;
+ self->encoder.context->mb_qmax = self->encoder.context->qmax;
+#endif
+ /* METROPOLIS = G2J.COM TelePresence client. Check Issue 378: No video when calling "TANDBERG/4129 (X8.1.1)" */
+#if !METROPOLIS && 0
+ self->encoder.context->flags |= CODEC_FLAG_GLOBAL_HEADER;
+#endif
+ self->encoder.context->flags |= CODEC_FLAG_LOW_DELAY;
+ if (self->encoder.context->profile == FF_PROFILE_H264_BASELINE) {
+ self->encoder.context->max_b_frames = 0;
+ }
+
+ switch(TDAV_CODEC_H264_COMMON(self)->profile){
+ case profile_idc_baseline:
+ default:
+ self->encoder.context->profile = FF_PROFILE_H264_BASELINE;
+ self->encoder.context->level = TDAV_CODEC_H264_COMMON(self)->level;
+ break;
+ case profile_idc_main:
+ self->encoder.context->profile = FF_PROFILE_H264_MAIN;
+ self->encoder.context->level = TDAV_CODEC_H264_COMMON(self)->level;
+ break;
+ }
+
+ /* Comment from libavcodec/libx264.c:
+ * Allow x264 to be instructed through AVCodecContext about the maximum
+ * size of the RTP payload. For example, this enables the production of
+ * payload suitable for the H.264 RTP packetization-mode 0 i.e. single
+ * NAL unit per RTP packet.
+ */
+ self->encoder.context->rtp_payload_size = H264_RTP_PAYLOAD_SIZE;
+ self->encoder.context->opaque = tsk_null;
+ self->encoder.context->gop_size = (TMEDIA_CODEC_VIDEO(self)->out.fps * TDAV_H264_GOP_SIZE_IN_SECONDS);
+
+#if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0))
+ if((ret = av_opt_set_int(self->encoder.context->priv_data, "slice-max-size", H264_RTP_PAYLOAD_SIZE, 0))){
+ TSK_DEBUG_ERROR("Failed to set x264 slice-max-size to %d", H264_RTP_PAYLOAD_SIZE);
+ }
+ if((ret = av_opt_set(self->encoder.context->priv_data, "profile", (self->encoder.context->profile == FF_PROFILE_H264_BASELINE ? "baseline" : "main"), 0))){
+ TSK_DEBUG_ERROR("Failed to set x264 profile");
+ }
+ if((ret = av_opt_set(self->encoder.context->priv_data, "preset", "veryfast", 0))){
+ TSK_DEBUG_ERROR("Failed to set x264 preset to veryfast");
+ }
+ if((ret = av_opt_set_int(self->encoder.context->priv_data, "rc-lookahead", 0, 0)) && (ret = av_opt_set_int(self->encoder.context->priv_data, "rc_lookahead", 0, 0))){
+ TSK_DEBUG_ERROR("Failed to set x264 rc_lookahead=0");
+ }
+ if((ret = av_opt_set(self->encoder.context->priv_data, "tune", "animation+zerolatency", 0))){
+ TSK_DEBUG_ERROR("Failed to set x264 tune to zerolatency");
+ }
+#endif
+
+ // Picture (YUV 420)
+ if(!(self->encoder.picture = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create encoder picture");
+ return -2;
+ }
+ avcodec_get_frame_defaults(self->encoder.picture);
+
+
+ size = avpicture_get_size(PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height);
+ if(!(self->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){
+ TSK_DEBUG_ERROR("Failed to allocate encoder buffer");
+ return -2;
+ }
+
+ // Open encoder
+ if((ret = avcodec_open(self->encoder.context, self->encoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(self)->plugin->desc);
+ return ret;
+ }
+
+ self->encoder.frame_count = 0;
+
+ TSK_DEBUG_INFO("[H.264] bitrate=%d bps", self->encoder.context->bit_rate);
+
+ return ret;
+#elif HAVE_H264_PASSTHROUGH
+ self->encoder.frame_count = 0;
+ return 0;
+#endif
+
+ TSK_DEBUG_ERROR("Not expected code called");
+ return -1;
+}
+
+int tdav_codec_h264_close_encoder(tdav_codec_h264_t* self, tsk_bool_t reset_rotation)
+{
+#if HAVE_FFMPEG
+ if(self->encoder.context){
+ avcodec_close(self->encoder.context);
+ av_free(self->encoder.context);
+ self->encoder.context = tsk_null;
+ }
+ if(self->encoder.picture){
+ av_free(self->encoder.picture);
+ self->encoder.picture = tsk_null;
+ }
+#endif
+ if(self->encoder.buffer){
+ TSK_FREE(self->encoder.buffer);
+ }
+ self->encoder.frame_count = 0;
+ if (reset_rotation) {
+ self->encoder.rotation = 0; // reset rotation
+ }
+
+ return 0;
+}
+
+int tdav_codec_h264_open_decoder(tdav_codec_h264_t* self)
+{
+#if HAVE_FFMPEG
+ int ret;
+
+ if(self->decoder.context){
+ TSK_DEBUG_ERROR("Decoder already opened");
+ return -1;
+ }
+
+ self->decoder.context = avcodec_alloc_context();
+ avcodec_get_context_defaults(self->decoder.context);
+
+ self->decoder.context->pix_fmt = PIX_FMT_YUV420P;
+ self->decoder.context->flags2 |= CODEC_FLAG2_FAST;
+ self->decoder.context->width = TMEDIA_CODEC_VIDEO(self)->in.width;
+ self->decoder.context->height = TMEDIA_CODEC_VIDEO(self)->in.height;
+
+ // Picture (YUV 420)
+ if(!(self->decoder.picture = avcodec_alloc_frame())){
+ TSK_DEBUG_ERROR("Failed to create decoder picture");
+ return -2;
+ }
+ avcodec_get_frame_defaults(self->decoder.picture);
+
+ // Open decoder
+ if((ret = avcodec_open(self->decoder.context, self->decoder.codec)) < 0){
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(self)->plugin-