summaryrefslogtreecommitdiffstats
path: root/tinyDAV/src/audio
diff options
context:
space:
mode:
Diffstat (limited to 'tinyDAV/src/audio')
-rw-r--r--tinyDAV/src/audio/alsa/tdav_common_alsa.c275
-rw-r--r--tinyDAV/src/audio/alsa/tdav_consumer_alsa.c288
-rw-r--r--tinyDAV/src/audio/alsa/tdav_producer_alsa.c261
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_audiounit.c425
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_consumer_audioqueue.c268
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_consumer_audiounit.c447
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_producer_audioqueue.c253
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_producer_audiounit.c422
-rw-r--r--tinyDAV/src/audio/directsound/tdav_consumer_dsound.c458
-rw-r--r--tinyDAV/src/audio/directsound/tdav_producer_dsound.c402
-rw-r--r--tinyDAV/src/audio/oss/tdav_consumer_oss.c397
-rw-r--r--tinyDAV/src/audio/oss/tdav_producer_oss.c369
-rw-r--r--tinyDAV/src/audio/tdav_consumer_audio.c272
-rw-r--r--tinyDAV/src/audio/tdav_jitterbuffer.c1036
-rw-r--r--tinyDAV/src/audio/tdav_producer_audio.c133
-rw-r--r--tinyDAV/src/audio/tdav_session_audio.c991
-rw-r--r--tinyDAV/src/audio/tdav_speakup_jitterbuffer.c281
-rw-r--r--tinyDAV/src/audio/tdav_speex_denoise.c312
-rw-r--r--tinyDAV/src/audio/tdav_speex_jitterbuffer.c319
-rw-r--r--tinyDAV/src/audio/tdav_speex_resampler.c254
-rw-r--r--tinyDAV/src/audio/tdav_webrtc_denoise.c627
-rw-r--r--tinyDAV/src/audio/wasapi/tdav_consumer_wasapi.cxx676
-rw-r--r--tinyDAV/src/audio/wasapi/tdav_producer_wasapi.cxx681
-rw-r--r--tinyDAV/src/audio/waveapi/tdav_consumer_waveapi.c402
-rw-r--r--tinyDAV/src/audio/waveapi/tdav_producer_waveapi.c393
25 files changed, 10642 insertions, 0 deletions
diff --git a/tinyDAV/src/audio/alsa/tdav_common_alsa.c b/tinyDAV/src/audio/alsa/tdav_common_alsa.c
new file mode 100644
index 0000000..d1deec8
--- /dev/null
+++ b/tinyDAV/src/audio/alsa/tdav_common_alsa.c
@@ -0,0 +1,275 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/audio/alsa/tdav_common_alsa.h"
+
+#if HAVE_ALSA_ASOUNDLIB_H
+
+#define ALSA_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[ALSA Common] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[ALSA Common] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[ALSA Common] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[ALSA Common] " FMT, ##__VA_ARGS__)
+
+#define ALSA_PLAYBACK_PERIODS 6
+
+int tdav_common_alsa_init(tdav_common_alsa_t* p_self)
+{
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if (p_self->b_initialized) {
+ ALSA_DEBUG_WARN("Already initialized");
+ return 0;
+ }
+ tsk_safeobj_init(p_self);
+ p_self->b_initialized = tsk_true;
+ return 0;
+}
+
+int tdav_common_alsa_lock(tdav_common_alsa_t* p_self)
+{
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ return tsk_safeobj_lock(p_self);
+}
+
+int tdav_common_alsa_unlock(tdav_common_alsa_t* p_self)
+{
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ return tsk_safeobj_unlock(p_self);
+}
+
+int tdav_common_alsa_prepare(tdav_common_alsa_t* p_self, tsk_bool_t is_capture, int ptime, int channels, int sample_rate)
+{
+ int err = 0, val;
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(p_self);
+
+ if (p_self->b_prepared) {
+ ALSA_DEBUG_WARN("Already prepared");
+ goto bail;
+ }
+ if (!p_self->p_device_name) {
+ p_self->p_device_name = strdup("default");
+ }
+ p_self->b_capture = is_capture;
+
+ if ((err = snd_pcm_open(&p_self->p_handle, p_self->p_device_name, is_capture ? SND_PCM_STREAM_CAPTURE : SND_PCM_STREAM_PLAYBACK, /*SND_PCM_NONBLOCK | SND_PCM_ASYNC*/0)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to open audio device %s (%s)", p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+ ALSA_DEBUG_INFO("device('%s') opened", p_self->p_device_name);
+
+ if ((err = snd_pcm_hw_params_malloc(&p_self->p_params)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to allocate hardware parameter structure(%s)", snd_strerror(err));
+ goto bail;
+ }
+
+ if ((err = snd_pcm_hw_params_any(p_self->p_handle, p_self->p_params)) < 0) {
+ ALSA_DEBUG_ERROR("Failed to initialize hardware parameter structure (device=%s, err=%s)", p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ if ((err = snd_pcm_hw_params_set_access(p_self->p_handle, p_self->p_params, SND_PCM_ACCESS_RW_INTERLEAVED)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to set access type (device=%s, err=%s)", p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ if ((err = snd_pcm_hw_params_set_format(p_self->p_handle, p_self->p_params, SND_PCM_FORMAT_S16_LE)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to set sample format (device=%s, err=%s)", p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ val = sample_rate;
+ if ((err = snd_pcm_hw_params_set_rate_near(p_self->p_handle, p_self->p_params, &val, 0)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to set sample rate (rate=%d, device=%s, err=%s)", p_self->sample_rate, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+ ALSA_DEBUG_INFO("sample_rate: req=%d, resp=%d", sample_rate, val);
+ p_self->sample_rate = val;
+
+ val = channels;
+ if ((err = snd_pcm_hw_params_set_channels_near(p_self->p_handle, p_self->p_params, &val)) != 0) {
+ ALSA_DEBUG_ERROR("Failed to set channels (channels=%d, device=%s, err=%s)", p_self->channels, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+ ALSA_DEBUG_INFO("channels: req=%d, resp=%d", channels, val);
+ p_self->channels = val;
+
+ if (!is_capture) {
+ unsigned int periods = ALSA_PLAYBACK_PERIODS;
+ snd_pcm_uframes_t periodSize = (ptime * p_self->sample_rate * p_self->channels) / 1000;
+ if ((err = snd_pcm_hw_params_set_periods_near(p_self->p_handle, p_self->p_params, &periods, 0)) != 0) {
+ ALSA_DEBUG_ERROR ("Failed to set periods (val=%u, device=%s, err=%s)", periods, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ snd_pcm_uframes_t bufferSize = (periodSize * periods);
+ if ((err = snd_pcm_hw_params_set_buffer_size(p_self->p_handle, p_self->p_params, bufferSize)) != 0) {
+ ALSA_DEBUG_ERROR ("Failed to set buffer size (val=%lu, device=%s, err=%s)", bufferSize, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+ ALSA_DEBUG_INFO("periods=%u, buffersize=%lu", periods, bufferSize);
+ }
+
+ if ((err = snd_pcm_hw_params (p_self->p_handle, p_self->p_params)) != 0) {
+ ALSA_DEBUG_ERROR ("Failed to set parameters (channels=%d, rate=%d, device=%s, err=%s)", p_self->channels, p_self->sample_rate, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+ if ((err = snd_pcm_prepare(p_self->p_handle)) != 0) {
+ ALSA_DEBUG_ERROR ("Failed to prepare device (channels=%d, rate=%d, device=%s, err=%s)", p_self->channels, p_self->sample_rate, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ /*if (is_capture)*/ {
+ p_self->n_buff_size_in_bytes = (ptime * p_self->sample_rate * (2/*SND_PCM_FORMAT_S16_LE*/ * p_self->channels)) / 1000;
+ if (!(p_self->p_buff_ptr = tsk_realloc(p_self->p_buff_ptr, p_self->n_buff_size_in_bytes))) {
+ ALSA_DEBUG_ERROR("Failed to allocate buffer with size = %u", p_self->n_buff_size_in_bytes);
+ err = -4;
+ goto bail;
+ }
+ p_self->n_buff_size_in_samples = (p_self->n_buff_size_in_bytes >> 1/*SND_PCM_FORMAT_S16_LE*/);
+ ALSA_DEBUG_INFO("n_buff_size_in_bytes=%u", p_self->n_buff_size_in_bytes);
+ }
+
+ ALSA_DEBUG_INFO("device('%s') prepared", p_self->p_device_name);
+
+ // everything is OK
+ p_self->b_prepared = tsk_true;
+bail:
+ if (err) {
+ tdav_common_alsa_unprepare(p_self);
+ }
+ tdav_common_alsa_unlock(p_self);
+ return err;
+
+}
+
+int tdav_common_alsa_unprepare(tdav_common_alsa_t* p_self)
+{
+ int err = 0;
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(p_self);
+
+ if (p_self->b_started) {
+ ALSA_DEBUG_ERROR("Must stop the capture device before unpreparing");
+ err = -2;
+ goto bail;
+ }
+
+ if (p_self->p_params) {
+ snd_pcm_hw_params_free(p_self->p_params);
+ p_self->p_params = tsk_null;
+ }
+ if (p_self->p_handle) {
+ snd_pcm_close(p_self->p_handle);
+ p_self->p_handle = tsk_null;
+ }
+ p_self->b_prepared = tsk_false;
+
+ ALSA_DEBUG_INFO("device('%s') unprepared", p_self->p_device_name);
+
+bail:
+ tdav_common_alsa_unlock(p_self);
+ return err;
+}
+
+int tdav_common_alsa_start(tdav_common_alsa_t* p_self)
+{
+ int err = 0;
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(p_self);
+
+ if (p_self->b_started) {
+ ALSA_DEBUG_WARN("Already started");
+ err = - 3;
+ goto bail;
+ }
+ if (!p_self->b_prepared) {
+ ALSA_DEBUG_ERROR("Not prepared");
+ err = -2;
+ goto bail;
+ }
+
+ if ((err = snd_pcm_start(p_self->p_handle)) != 0) {
+ ALSA_DEBUG_ERROR ("Failed to start device (channels=%d, rate=%d, device=%s, err=%s)", p_self->channels, p_self->sample_rate, p_self->p_device_name, snd_strerror(err));
+ goto bail;
+ }
+
+ p_self->b_started = tsk_true;
+ ALSA_DEBUG_INFO("device('%s') started", p_self->p_device_name);
+bail:
+ tdav_common_alsa_unlock(p_self);
+ return err;
+}
+
+int tdav_common_alsa_stop(tdav_common_alsa_t* p_self)
+{
+ int err = 0;
+ if (!p_self) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(p_self);
+
+ if (p_self->b_started) {
+ p_self->b_started = tsk_false;
+ //err = snd_pcm_drain(p_self->p_handle);
+ ALSA_DEBUG_INFO("device('%s') stopped", p_self->p_device_name);
+ }
+ if (p_self->b_prepared) {
+ tdav_common_alsa_unprepare(p_self);
+ }
+bail:
+ tdav_common_alsa_unlock(p_self);
+ return err;
+}
+
+int tdav_common_alsa_deinit(tdav_common_alsa_t* p_self)
+{
+ if (p_self && p_self->b_initialized) {
+ tdav_common_alsa_stop(p_self);
+ tdav_common_alsa_unprepare(p_self);
+ TSK_FREE(p_self->p_device_name);
+ TSK_FREE(p_self->p_buff_ptr);
+ tsk_safeobj_deinit(p_self);
+ p_self->b_initialized = tsk_false;
+ }
+ return 0;
+}
+
+#endif /* HAVE_ALSA_ASOUNDLIB_H */
+
diff --git a/tinyDAV/src/audio/alsa/tdav_consumer_alsa.c b/tinyDAV/src/audio/alsa/tdav_consumer_alsa.c
new file mode 100644
index 0000000..65bfcd8
--- /dev/null
+++ b/tinyDAV/src/audio/alsa/tdav_consumer_alsa.c
@@ -0,0 +1,288 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/audio/alsa/tdav_consumer_alsa.h"
+
+#if HAVE_ALSA_ASOUNDLIB_H
+
+#include "tinydav/audio/alsa/tdav_common_alsa.h"
+
+#define ALSA_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[ALSA Consumer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[ALSA Consumer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[ALSA Consumer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[ALSA Consumer] " FMT, ##__VA_ARGS__)
+
+typedef struct tdav_consumer_alsa_s
+{
+ TDAV_DECLARE_CONSUMER_AUDIO;
+
+ tsk_bool_t b_muted;
+ tsk_bool_t b_started;
+ tsk_bool_t b_paused;
+
+ tsk_thread_handle_t* tid[1];
+
+ struct tdav_common_alsa_s alsa_common;
+}
+tdav_consumer_alsa_t;
+
+static void* TSK_STDCALL _tdav_producer_alsa_playback_thread(void *param)
+{
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)param;
+ int err;
+
+ ALSA_DEBUG_INFO("__playback_thread -- START");
+
+ tsk_thread_set_priority_2(TSK_THREAD_PRIORITY_TIME_CRITICAL);
+
+ while (p_alsa->b_started) {
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+ //snd_pcm_wait(p_alsa->alsa_common.p_handle, 20);
+ //ALSA_DEBUG_INFO ("get (%d)", p_alsa->alsa_common.n_buff_size_in_bytes);
+ err = tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(p_alsa), p_alsa->alsa_common.p_buff_ptr, p_alsa->alsa_common.n_buff_size_in_bytes); // requires 16bits, thread-safe
+ //ALSA_DEBUG_INFO ("get returned %d", err);
+ if (err < p_alsa->alsa_common.n_buff_size_in_bytes) {
+ memset(((uint8_t*)p_alsa->alsa_common.p_buff_ptr) + err, 0, (p_alsa->alsa_common.n_buff_size_in_bytes - err));
+
+ }
+ if ((err = snd_pcm_writei(p_alsa->alsa_common.p_handle, p_alsa->alsa_common.p_buff_ptr, p_alsa->alsa_common.n_buff_size_in_samples)) != p_alsa->alsa_common.n_buff_size_in_samples) {
+ if (err == -EPIPE) { // pipe broken
+ err = snd_pcm_recover(p_alsa->alsa_common.p_handle, err, 0);
+ if (err == 0) {
+ ALSA_DEBUG_INFO ("recovered");
+ goto next;
+ }
+ }
+ ALSA_DEBUG_ERROR ("Failed to read data from audio interface failed (%d->%s)", err, snd_strerror(err));
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ goto bail;
+ }
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(p_alsa));
+next:
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ }
+bail:
+ ALSA_DEBUG_INFO("__playback_thread -- STOP");
+ return tsk_null;
+}
+
+
+/* ============ Media Consumer Interface ================= */
+static int tdav_consumer_alsa_set(tmedia_consumer_t* self, const tmedia_param_t* param)
+{
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)self;
+ int ret = 0;
+
+ ret = tdav_consumer_audio_set(TDAV_CONSUMER_AUDIO(self), param);
+
+ return ret;
+}
+
+static int tdav_consumer_alsa_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)self;
+ int err = 0;
+ ALSA_DEBUG_INFO("******* tdav_consumer_alsa_prepare ******");
+
+ if (! p_alsa || !codec && codec->plugin) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+
+ // Set using requested
+ TMEDIA_CONSUMER(p_alsa)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(p_alsa)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(p_alsa)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+ // Prepare
+ err = tdav_common_alsa_prepare(&p_alsa->alsa_common, tsk_false/*is_record*/, TMEDIA_CONSUMER( p_alsa)->audio.ptime, TMEDIA_CONSUMER( p_alsa)->audio.in.channels, TMEDIA_CONSUMER( p_alsa)->audio.in.rate);
+ if (err) {
+ goto bail;
+ }
+
+ ALSA_DEBUG_INFO("prepared: req_channels=%d; req_rate=%d, resp_channels=%d; resp_rate=%d",
+ TMEDIA_CONSUMER(p_alsa)->audio.in.channels, TMEDIA_CONSUMER(p_alsa)->audio.in.rate,
+ p_alsa->alsa_common.channels, p_alsa->alsa_common.sample_rate);
+
+ // Set using supported (up to the resampler to convert to requested)
+ TMEDIA_CONSUMER(p_alsa)->audio.out.channels = p_alsa->alsa_common.channels;
+ TMEDIA_CONSUMER(p_alsa)->audio.out.rate = p_alsa->alsa_common.sample_rate;
+
+bail:
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ return err;
+}
+
+static int tdav_consumer_alsa_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)self;
+ int err = 0;
+
+ ALSA_DEBUG_INFO("******* tdav_consumer_alsa_start ******");
+
+ if (!p_alsa) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+
+ if (p_alsa->b_started) {
+ ALSA_DEBUG_WARN("Already started");
+ goto bail;
+ }
+
+ /* start device */
+ err = tdav_common_alsa_start(&p_alsa->alsa_common);
+ if (err) {
+ goto bail;
+ }
+
+ /* start thread */
+ p_alsa->b_started = tsk_true;
+ tsk_thread_create(&p_alsa->tid[0], _tdav_producer_alsa_playback_thread, p_alsa);
+
+ ALSA_DEBUG_INFO("started");
+
+bail:
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ return err;
+}
+
+static int tdav_consumer_alsa_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ int err = 0;
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)self;
+
+ if (!p_alsa || !buffer || !size) {
+ ALSA_DEBUG_ERROR("Invalid paramter");
+ return -1;
+ }
+
+ //tdav_common_alsa_lock(&p_alsa->alsa_common);
+
+ if (!p_alsa->b_started) {
+ ALSA_DEBUG_WARN("Not started");
+ err = -2;
+ goto bail;
+ }
+
+ if ((err = tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(p_alsa), buffer, size, proto_hdr))) {//thread-safe
+ ALSA_DEBUG_WARN("Failed to put audio data to the jitter buffer");
+ goto bail;
+ }
+
+bail:
+ //tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ return err;
+}
+
+static int tdav_consumer_alsa_pause(tmedia_consumer_t* self)
+{
+ return 0;
+}
+
+static int tdav_consumer_alsa_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_alsa_t* p_alsa = (tdav_consumer_alsa_t*)self;
+ int err;
+
+ if (!p_alsa) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* should be done here */
+ p_alsa->b_started = tsk_false;
+
+ err = tdav_common_alsa_stop(&p_alsa->alsa_common);
+
+ /* stop thread */
+ if (p_alsa->tid[0]) {
+ tsk_thread_join(&(p_alsa->tid[0]));
+ }
+
+ ALSA_DEBUG_INFO("stopped");
+
+ return 0;
+}
+
+
+//
+// ALSA consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_alsa_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_alsa_t *p_alsa = self;
+ if (p_alsa) {
+ ALSA_DEBUG_INFO("create");
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(p_alsa));
+ /* init self */
+ tdav_common_alsa_init(&p_alsa->alsa_common);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_alsa_dtor(tsk_object_t * self)
+{
+ tdav_consumer_alsa_t *p_alsa = self;
+ if (p_alsa) {
+ /* stop */
+ if (p_alsa->b_started) {
+ tdav_consumer_alsa_stop((tmedia_consumer_t*)p_alsa);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(p_alsa));
+ /* deinit self */
+ tdav_common_alsa_deinit(&p_alsa->alsa_common);
+
+ ALSA_DEBUG_INFO("*** destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_alsa_def_s =
+{
+ sizeof(tdav_consumer_alsa_t),
+ tdav_consumer_alsa_ctor,
+ tdav_consumer_alsa_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_alsa_plugin_def_s =
+{
+ &tdav_consumer_alsa_def_s,
+
+ tmedia_audio,
+ "Linux ALSA consumer",
+
+ tdav_consumer_alsa_set,
+ tdav_consumer_alsa_prepare,
+ tdav_consumer_alsa_start,
+ tdav_consumer_alsa_consume,
+ tdav_consumer_alsa_pause,
+ tdav_consumer_alsa_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_alsa_plugin_def_t = &tdav_consumer_alsa_plugin_def_s;
+
+#endif /* #if HAVE_ALSA_ASOUNDLIB_H */
diff --git a/tinyDAV/src/audio/alsa/tdav_producer_alsa.c b/tinyDAV/src/audio/alsa/tdav_producer_alsa.c
new file mode 100644
index 0000000..d5c4021
--- /dev/null
+++ b/tinyDAV/src/audio/alsa/tdav_producer_alsa.c
@@ -0,0 +1,261 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/audio/alsa/tdav_producer_alsa.h"
+
+#if HAVE_ALSA_ASOUNDLIB_H
+
+#include "tinydav/audio/alsa/tdav_common_alsa.h"
+
+#define ALSA_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[ALSA Producer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[ALSA Producer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[ALSA Producer] " FMT, ##__VA_ARGS__)
+#define ALSA_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[ALSA Producer] " FMT, ##__VA_ARGS__)
+
+typedef struct tdav_producer_alsa_s
+{
+ TDAV_DECLARE_PRODUCER_AUDIO;
+
+ tsk_bool_t b_muted;
+ tsk_bool_t b_started;
+ tsk_bool_t b_paused;
+
+ tsk_thread_handle_t* tid[1];
+
+ struct tdav_common_alsa_s alsa_common;
+}
+tdav_producer_alsa_t;
+
+static void* TSK_STDCALL _tdav_producer_alsa_record_thread(void *param)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)param;
+ int err;
+
+ ALSA_DEBUG_INFO("__record_thread -- START");
+
+ tsk_thread_set_priority_2(TSK_THREAD_PRIORITY_TIME_CRITICAL);
+
+ while (p_alsa->b_started) {
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+ if ((err = snd_pcm_readi(p_alsa->alsa_common.p_handle, p_alsa->alsa_common.p_buff_ptr, p_alsa->alsa_common.n_buff_size_in_samples)) != p_alsa->alsa_common.n_buff_size_in_samples) {
+ ALSA_DEBUG_ERROR ("Failed to read data from audio interface failed (%d->%s)", err, snd_strerror(err));
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ goto bail;
+ }
+ if (!p_alsa->b_muted && TMEDIA_PRODUCER(p_alsa)->enc_cb.callback) {
+ TMEDIA_PRODUCER(p_alsa)->enc_cb.callback(TMEDIA_PRODUCER(p_alsa)->enc_cb.callback_data, p_alsa->alsa_common.p_buff_ptr, p_alsa->alsa_common.n_buff_size_in_bytes);
+ }
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ }
+bail:
+ ALSA_DEBUG_INFO("__record_thread -- STOP");
+ return tsk_null;
+}
+
+
+/* ============ Media Producer Interface ================= */
+static int tdav_producer_alsa_set(tmedia_producer_t* self, const tmedia_param_t* param)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)self;
+ if (param->plugin_type == tmedia_ppt_producer) {
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "volume")) {
+ return 0;
+ }
+ else if(tsk_striequals(param->key, "mute")){
+ p_alsa->b_muted = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+ return 0;
+ }
+ }
+ }
+ return tdav_producer_audio_set(TDAV_PRODUCER_AUDIO(self), param);
+}
+
+static int tdav_producer_alsa_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)self;
+ int err = 0;
+ ALSA_DEBUG_INFO("******* tdav_producer_alsa_prepare ******");
+
+ if (! p_alsa || !codec && codec->plugin) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+
+ // Set using requested
+ TMEDIA_PRODUCER( p_alsa)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER( p_alsa)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER( p_alsa)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+
+ // Prepare
+ err = tdav_common_alsa_prepare(&p_alsa->alsa_common, tsk_true/*is_capture*/, TMEDIA_PRODUCER( p_alsa)->audio.ptime, TMEDIA_PRODUCER( p_alsa)->audio.channels, TMEDIA_PRODUCER( p_alsa)->audio.rate);
+ if (err) {
+ goto bail;
+ }
+
+ ALSA_DEBUG_INFO("prepared: req_channels=%d; req_rate=%d, resp_channels=%d; resp_rate=%d",
+ TMEDIA_PRODUCER(p_alsa)->audio.channels, TMEDIA_PRODUCER(p_alsa)->audio.rate,
+ p_alsa->alsa_common.channels, p_alsa->alsa_common.sample_rate);
+
+ // Set using supported (up to the resampler to convert to requested)
+ TMEDIA_PRODUCER(p_alsa)->audio.channels = p_alsa->alsa_common.channels;
+ TMEDIA_PRODUCER(p_alsa)->audio.rate = p_alsa->alsa_common.sample_rate;
+
+bail:
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ return err;
+}
+
+static int tdav_producer_alsa_start(tmedia_producer_t* self)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)self;
+ int err = 0;
+
+ ALSA_DEBUG_INFO("******* tdav_producer_alsa_start ******");
+
+ if (!p_alsa) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tdav_common_alsa_lock(&p_alsa->alsa_common);
+
+ if (p_alsa->b_started) {
+ ALSA_DEBUG_WARN("Already started");
+ goto bail;
+ }
+
+ /* start device */
+ err = tdav_common_alsa_start(&p_alsa->alsa_common);
+ if (err) {
+ goto bail;
+ }
+
+ /* start thread */
+ p_alsa->b_started = tsk_true;
+ tsk_thread_create(&p_alsa->tid[0], _tdav_producer_alsa_record_thread, p_alsa);
+
+ ALSA_DEBUG_INFO("started");
+
+bail:
+ tdav_common_alsa_unlock(&p_alsa->alsa_common);
+ return err;
+}
+
+static int tdav_producer_alsa_pause(tmedia_producer_t* self)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)self;
+
+ if (!p_alsa) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ ALSA_DEBUG_INFO("paused");
+
+ return 0;
+}
+
+static int tdav_producer_alsa_stop(tmedia_producer_t* self)
+{
+ tdav_producer_alsa_t* p_alsa = (tdav_producer_alsa_t*)self;
+ int err;
+
+ if (!p_alsa) {
+ ALSA_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* should be done here */
+ p_alsa->b_started = tsk_false;
+
+ err = tdav_common_alsa_stop(&p_alsa->alsa_common);
+
+ /* stop thread */
+ if (p_alsa->tid[0]) {
+ tsk_thread_join(&(p_alsa->tid[0]));
+ }
+
+ ALSA_DEBUG_INFO("stopped");
+
+ return 0;
+}
+
+
+//
+// ALSA producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_alsa_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_alsa_t *p_alsa = (tdav_producer_alsa_t*)self;
+ if (p_alsa) {
+ ALSA_DEBUG_INFO("create");
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(p_alsa));
+ /* init self */
+ tdav_common_alsa_init(&p_alsa->alsa_common);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_alsa_dtor(tsk_object_t * self)
+{
+ tdav_producer_alsa_t *p_alsa = (tdav_producer_alsa_t *)self;
+ if (p_alsa) {
+ /* stop */
+ if (p_alsa->b_started) {
+ tdav_producer_alsa_stop((tmedia_producer_t*)p_alsa);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(p_alsa));
+ /* deinit self */
+ tdav_common_alsa_deinit(&p_alsa->alsa_common);
+
+ ALSA_DEBUG_INFO("*** destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_alsa_def_s =
+{
+ sizeof(tdav_producer_alsa_t),
+ tdav_producer_alsa_ctor,
+ tdav_producer_alsa_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_alsa_plugin_def_s =
+{
+ &tdav_producer_alsa_def_s,
+
+ tmedia_audio,
+ "Linux ALSA producer",
+
+ tdav_producer_alsa_set,
+ tdav_producer_alsa_prepare,
+ tdav_producer_alsa_start,
+ tdav_producer_alsa_pause,
+ tdav_producer_alsa_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_alsa_plugin_def_t = &tdav_producer_alsa_plugin_def_s;
+
+#endif /* #if HAVE_ALSA_ASOUNDLIB_H */
diff --git a/tinyDAV/src/audio/coreaudio/tdav_audiounit.c b/tinyDAV/src/audio/coreaudio/tdav_audiounit.c
new file mode 100644
index 0000000..dc11f10
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_audiounit.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (C) 2010-2011 Mamadou Diop.
+ *
+ * Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+#include "tinydav/audio/coreaudio/tdav_audiounit.h"
+
+#if HAVE_COREAUDIO_AUDIO_UNIT
+
+#include "tinydav/tdav_apple.h"
+
+#include "tsk_string.h"
+#include "tsk_list.h"
+#include "tsk_safeobj.h"
+#include "tsk_debug.h"
+
+#if TARGET_OS_IPHONE
+static UInt32 kOne = 1;
+static UInt32 kZero = 0;
+#endif /* TARGET_OS_IPHONE */
+
+#if TARGET_OS_IPHONE
+ #if TARGET_IPHONE_SIMULATOR // VoiceProcessingIO will give unexpected result on the simulator when using iOS 5
+ #define kDoubangoAudioUnitSubType kAudioUnitSubType_RemoteIO
+ #else // Echo cancellation, AGC, ...
+ #define kDoubangoAudioUnitSubType kAudioUnitSubType_VoiceProcessingIO
+ #endif
+#elif TARGET_OS_MAC
+ #define kDoubangoAudioUnitSubType kAudioUnitSubType_HALOutput
+#else
+ #error "Unknown target"
+#endif
+
+#undef kInputBus
+#define kInputBus 1
+#undef kOutputBus
+#define kOutputBus 0
+
+typedef struct tdav_audiounit_instance_s
+{
+ TSK_DECLARE_OBJECT;
+ uint64_t session_id;
+ uint32_t frame_duration;
+ AudioComponentInstance audioUnit;
+ struct{
+ unsigned consumer:1;
+ unsigned producer:1;
+ } prepared;
+ unsigned started:1;
+ unsigned interrupted:1;
+
+ TSK_DECLARE_SAFEOBJ;
+
+}
+tdav_audiounit_instance_t;
+TINYDAV_GEXTERN const tsk_object_def_t *tdav_audiounit_instance_def_t;
+typedef tsk_list_t tdav_audiounit_instances_L_t;
+
+
+static AudioComponent __audioSystem = tsk_null;
+static tdav_audiounit_instances_L_t* __audioUnitInstances = tsk_null;
+
+static int _tdav_audiounit_handle_signal_xxx_prepared(tdav_audiounit_handle_t* self, tsk_bool_t consumer)
+{
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+ if(!inst || !inst->audioUnit){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(inst);
+
+ if(consumer){
+ inst->prepared.consumer = tsk_true;
+ }
+ else {
+ inst->prepared.producer = tsk_true;
+ }
+
+ OSStatus status;
+
+ // For iOS we are using full-duplex AudioUnit and we wait for both consumer and producer to be prepared
+#if TARGET_OS_IPHONE
+ if(inst->prepared.consumer && inst->prepared.producer)
+#endif
+ {
+ status = AudioUnitInitialize(inst->audioUnit);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitInitialize failed with status =%ld", (signed long)status);
+ tsk_safeobj_unlock(inst);
+ return -2;
+ }
+ }
+
+ tsk_safeobj_unlock(inst);
+ return 0;
+}
+
+tdav_audiounit_handle_t* tdav_audiounit_handle_create(uint64_t session_id)
+{
+ tdav_audiounit_instance_t* inst = tsk_null;
+
+ // create audio unit component
+ if(!__audioSystem){
+ AudioComponentDescription audioDescription;
+ audioDescription.componentType = kAudioUnitType_Output;
+ audioDescription.componentSubType = kDoubangoAudioUnitSubType;
+ audioDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
+ audioDescription.componentFlags = 0;
+ audioDescription.componentFlagsMask = 0;
+ if((__audioSystem = AudioComponentFindNext(NULL, &audioDescription))){
+ // leave blank
+ }
+ else {
+ TSK_DEBUG_ERROR("Failed to find new audio component");
+ goto done;
+ }
+
+ }
+ // create list used to hold instances
+ if(!__audioUnitInstances && !(__audioUnitInstances = tsk_list_create())){
+ TSK_DEBUG_ERROR("Failed to create new list");
+ goto done;
+ }
+
+ //= lock the list
+ tsk_list_lock(__audioUnitInstances);
+
+ // For iOS we are using full-duplex AudioUnit and to keep it unique for both
+ // the consumer and producer we use the session id.
+#if TARGET_OS_IPHONE
+ // find the instance from the list
+ const tsk_list_item_t* item;
+ tsk_list_foreach(item,__audioUnitInstances){
+ if(((tdav_audiounit_instance_t*)item->data)->session_id == session_id){
+ inst = tsk_object_ref(item->data);
+ goto done;
+ }
+ }
+#endif
+
+ // create instance object and put it into the list
+ if((inst = tsk_object_new(tdav_audiounit_instance_def_t))){
+ OSStatus status = noErr;
+ tdav_audiounit_instance_t* _inst;
+
+ // create new instance
+ if((status= AudioComponentInstanceNew(__audioSystem, &inst->audioUnit)) != noErr){
+ TSK_DEBUG_ERROR("AudioComponentInstanceNew() failed with status=%ld", (signed long)status);
+ TSK_OBJECT_SAFE_FREE(inst);
+ goto done;
+ }
+ _inst = inst, _inst->session_id = session_id;
+ tsk_list_push_back_data(__audioUnitInstances, (void**)&_inst);
+ }
+
+done:
+ //= unlock the list
+ tsk_list_unlock(__audioUnitInstances);
+ return (tdav_audiounit_handle_t*)inst;
+}
+
+AudioComponentInstance tdav_audiounit_handle_get_instance(tdav_audiounit_handle_t* self)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+ return ((tdav_audiounit_instance_t*)self)->audioUnit;
+}
+
+int tdav_audiounit_handle_signal_consumer_prepared(tdav_audiounit_handle_t* self)
+{
+ return _tdav_audiounit_handle_signal_xxx_prepared(self, tsk_true);
+}
+
+int tdav_audiounit_handle_signal_producer_prepared(tdav_audiounit_handle_t* self)
+{
+ return _tdav_audiounit_handle_signal_xxx_prepared(self, tsk_false);
+}
+
+int tdav_audiounit_handle_start(tdav_audiounit_handle_t* self)
+{
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+ OSStatus status = noErr;
+ if(!inst || !inst->audioUnit){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(inst);
+ status = (OSStatus)tdav_apple_enable_audio();
+ if (status == noErr) {
+ if ((!inst->started || inst->interrupted) && (status = AudioOutputUnitStart(inst->audioUnit))) {
+ TSK_DEBUG_ERROR("AudioOutputUnitStart failed with status=%ld", (signed long)status);
+ }
+ }
+ else {
+ TSK_DEBUG_ERROR("tdav_apple_enable_audio() failed with status=%ld", (signed long)status);
+ }
+ inst->started = (status == noErr) ? tsk_true : tsk_false;
+ if (inst->started) inst->interrupted = 0;
+ tsk_safeobj_unlock(inst);
+ return status ? -2 : 0;
+}
+
+uint32_t tdav_audiounit_handle_get_frame_duration(tdav_audiounit_handle_t* self)
+{
+ if(self){
+ return ((tdav_audiounit_instance_t*)self)->frame_duration;
+ }
+ return 0;
+}
+
+int tdav_audiounit_handle_configure(tdav_audiounit_handle_t* self, tsk_bool_t consumer, uint32_t ptime, AudioStreamBasicDescription* audioFormat)
+{
+ OSStatus status = noErr;
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+
+ if(!inst || !audioFormat){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+#if TARGET_OS_IPHONE
+ // set preferred buffer size
+ Float32 preferredBufferSize = ((Float32)ptime / 1000.f); // in seconds
+ UInt32 size = sizeof(preferredBufferSize);
+ status = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration) failed with status=%d", (int)status);
+ TSK_OBJECT_SAFE_FREE(inst);
+ goto done;
+ }
+ status = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration, &size, &preferredBufferSize);
+ if(status == noErr){
+ inst->frame_duration = (preferredBufferSize * 1000);
+ TSK_DEBUG_INFO("Frame duration=%d", inst->frame_duration);
+ }
+ else {
+ TSK_DEBUG_ERROR("AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration, %f) failed", preferredBufferSize);
+ }
+
+
+ UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
+ status = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioSessionSetProperty(kAudioSessionProperty_AudioCategory) failed with status code=%d", (int)status);
+ goto done;
+ }
+
+#elif TARGET_OS_MAC
+#if 1
+ // set preferred buffer size
+ UInt32 preferredBufferSize = ((ptime * audioFormat->mSampleRate)/1000); // in bytes
+ UInt32 size = sizeof(preferredBufferSize);
+ status = AudioUnitSetProperty(inst->audioUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &preferredBufferSize, size);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_SetInputCallback) failed with status=%ld", (signed long)status);
+ }
+ status = AudioUnitGetProperty(inst->audioUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &preferredBufferSize, &size);
+ if(status == noErr){
+ inst->frame_duration = ((preferredBufferSize * 1000)/audioFormat->mSampleRate);
+ TSK_DEBUG_INFO("Frame duration=%d", inst->frame_duration);
+ }
+ else {
+ TSK_DEBUG_ERROR("AudioUnitGetProperty(kAudioDevicePropertyBufferFrameSize, %lu) failed", (unsigned long)preferredBufferSize);
+ }
+#endif
+
+#endif
+
+done:
+ return (status == noErr) ? 0 : -2;
+}
+
+int tdav_audiounit_handle_mute(tdav_audiounit_handle_t* self, tsk_bool_t mute)
+{
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+ if(!inst || !inst->audioUnit){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+#if TARGET_OS_IPHONE
+ OSStatus status = noErr;
+ status = AudioUnitSetProperty(inst->audioUnit, kAUVoiceIOProperty_MuteOutput,
+ kAudioUnitScope_Output, kOutputBus, mute ? &kOne : &kZero, mute ? sizeof(kOne) : sizeof(kZero));
+
+ return (status == noErr) ? 0 : -2;
+#else
+ return 0;
+#endif
+}
+
+int tdav_audiounit_handle_interrupt(tdav_audiounit_handle_t* self, tsk_bool_t interrupt)
+{
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+ if (!inst){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ OSStatus status = noErr;
+ if (inst->interrupted != interrupt && inst->started) {
+ if (interrupt) {
+ status = AudioOutputUnitStop(inst->audioUnit);
+ if (status != noErr) {
+ TSK_DEBUG_ERROR("AudioOutputUnitStop failed with status=%ld", (signed long)status);
+ goto bail;
+ }
+ }
+ else {
+#if TARGET_OS_IPHONE
+ status = (OSStatus)tdav_apple_enable_audio();
+ if (status != noErr) {
+ TSK_DEBUG_ERROR("AudioSessionSetActive failed with status=%ld", (signed long)status);
+ goto bail;
+ }
+#endif
+ status = AudioOutputUnitStart(inst->audioUnit);
+ if (status != noErr) {
+ TSK_DEBUG_ERROR("AudioOutputUnitStart failed with status=%ld", (signed long)status);
+ goto bail;
+ }
+ }
+ }
+ inst->interrupted = interrupt ? 1: 0;
+bail:
+ return (status != noErr) ? -2 : 0;
+}
+
+int tdav_audiounit_handle_stop(tdav_audiounit_handle_t* self)
+{
+ tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
+ OSStatus status = noErr;
+ if(!inst || (inst->started && !inst->audioUnit)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(inst);
+ if(inst->started && (status = AudioOutputUnitStop(inst->audioUnit))){
+ TSK_DEBUG_ERROR("AudioOutputUnitStop failed with status=%ld", (signed long)status);
+ }
+ inst->started = (status == noErr ? tsk_false : tsk_true);
+ tsk_safeobj_unlock(inst);
+ return (status != noErr) ? -2 : 0;
+}
+
+int tdav_audiounit_handle_destroy(tdav_audiounit_handle_t** self){
+ if(!self || !*self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ tsk_list_lock(__audioUnitInstances);
+ if(tsk_object_get_refcount(*self)==1){
+ tsk_list_remove_item_by_data(__audioUnitInstances, *self);
+ }
+ else {
+ tsk_object_unref(*self);
+ }
+ tsk_list_unlock(__audioUnitInstances);
+ *self = tsk_null;
+ return 0;
+}
+
+//
+// Object definition for and AudioUnit instance
+//
+static tsk_object_t* tdav_audiounit_instance_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_audiounit_instance_t* inst = self;
+ if(inst){
+ tsk_safeobj_init(inst);
+ }
+ return self;
+}
+static tsk_object_t* tdav_audiounit_instance_dtor(tsk_object_t * self)
+{
+ tdav_audiounit_instance_t* inst = self;
+ if(inst){
+ tsk_safeobj_lock(inst);
+ if(inst->audioUnit){
+ AudioUnitUninitialize(inst->audioUnit);
+ AudioComponentInstanceDispose(inst->audioUnit);
+ inst->audioUnit = tsk_null;
+ }
+ tsk_safeobj_unlock(inst);
+
+ tsk_safeobj_deinit(inst);
+ TSK_DEBUG_INFO("*** AudioUnit Instance destroyed ***");
+ }
+ return self;
+}
+static int tdav_audiounit_instance_cmp(const tsk_object_t *_ai1, const tsk_object_t *_ai2)
+{
+ return (int)(_ai1 - _ai2);
+}
+static const tsk_object_def_t tdav_audiounit_instance_def_s =
+{
+ sizeof(tdav_audiounit_instance_t),
+ tdav_audiounit_instance_ctor,
+ tdav_audiounit_instance_dtor,
+ tdav_audiounit_instance_cmp,
+};
+const tsk_object_def_t *tdav_audiounit_instance_def_t = &tdav_audiounit_instance_def_s;
+
+
+
+#endif /* HAVE_COREAUDIO_AUDIO_UNIT */
diff --git a/tinyDAV/src/audio/coreaudio/tdav_consumer_audioqueue.c b/tinyDAV/src/audio/coreaudio/tdav_consumer_audioqueue.c
new file mode 100644
index 0000000..2f5fd90
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_consumer_audioqueue.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2010-2011 Mamadou Diop.
+ *
+ * Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+
+/**@file tdav_consumer_audioqueue.c
+ * @brief Audio Consumer for MacOSX and iOS platforms.
+ *
+ * @authors
+ * - Laurent Etiemble <laurent.etiemble(at)gmail.com>
+ * - Mamadou Diop <diopmamadou(at)doubango(dot)org>
+ *
+ * @date Created: Sat Nov 8 16:54:58 2009 letiemble
+ */
+#include "tinydav/audio/coreaudio/tdav_consumer_audioqueue.h"
+
+
+// http://developer.apple.com/library/mac/#documentation/MusicAudio/Reference/AudioQueueReference/Reference/reference.html
+#if HAVE_COREAUDIO_AUDIO_QUEUE
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+static void __handle_output_buffer(void *userdata, AudioQueueRef queue, AudioQueueBufferRef buffer) {
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)userdata;
+
+ if (!consumer->started) {
+ return;
+ }
+
+ if(!tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(consumer), buffer->mAudioData, consumer->buffer_size)){
+ // Put silence
+ memset(buffer->mAudioData, 0, consumer->buffer_size);
+ }
+
+ // Re-enqueue the buffer
+ AudioQueueEnqueueBuffer(consumer->queue, buffer, 0, NULL);
+ // alert the jitter buffer
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(consumer));
+}
+
+/* ============ Media Consumer Interface ================= */
+#define tdav_consumer_audioqueue_set tsk_null
+
+int tdav_consumer_audioqueue_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ OSStatus ret;
+ tsk_size_t i;
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)self;
+
+ if(!consumer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TMEDIA_CONSUMER(consumer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+ /* codec should have ptime */
+
+ // Set audio category
+#if TARGET_OS_IPHONE
+ UInt32 category = kAudioSessionCategory_PlayAndRecord;
+ AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
+#endif
+
+ // Create the audio stream description
+ AudioStreamBasicDescription *description = &(consumer->description);
+ description->mSampleRate = TMEDIA_CONSUMER(consumer)->audio.out.rate ? TMEDIA_CONSUMER(consumer)->audio.out.rate : TMEDIA_CONSUMER(consumer)->audio.in.rate;
+ description->mFormatID = kAudioFormatLinearPCM;
+ description->mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
+ description->mChannelsPerFrame = TMEDIA_CONSUMER(consumer)->audio.in.channels;
+ description->mFramesPerPacket = 1;
+ description->mBitsPerChannel = TMEDIA_CONSUMER(consumer)->audio.bits_per_sample;
+ description->mBytesPerPacket = description->mBitsPerChannel / 8 * description->mChannelsPerFrame;
+ description->mBytesPerFrame = description->mBytesPerPacket;
+ description->mReserved = 0;
+
+ int packetperbuffer = 1000 / TMEDIA_CONSUMER(consumer)->audio.ptime;
+ consumer->buffer_size = description->mSampleRate * description->mBytesPerFrame / packetperbuffer;
+
+ // Create the playback audio queue
+ ret = AudioQueueNewOutput(&(consumer->description),
+ __handle_output_buffer,
+ consumer,
+ NULL,
+ NULL,
+ 0,
+ &(consumer->queue));
+
+ for(i = 0; i < CoreAudioPlayBuffers; i++) {
+ // Create the buffer for the queue
+ ret = AudioQueueAllocateBuffer(consumer->queue, consumer->buffer_size, &(consumer->buffers[i]));
+ if (ret) {
+ break;
+ }
+
+ // Clear the data
+ memset(consumer->buffers[i]->mAudioData, 0, consumer->buffer_size);
+ consumer->buffers[i]->mAudioDataByteSize = consumer->buffer_size;
+
+ // Enqueue the buffer
+ ret = AudioQueueEnqueueBuffer(consumer->queue, consumer->buffers[i], 0, NULL);
+ if (ret) {
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int tdav_consumer_audioqueue_start(tmedia_consumer_t* self)
+{
+ OSStatus ret;
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(consumer->started){
+ TSK_DEBUG_WARN("Consumer already started");
+ return 0;
+ }
+
+ consumer->started = tsk_true;
+ ret = AudioQueueStart(consumer->queue, NULL);
+
+ return ret;
+}
+
+int tdav_consumer_audioqueue_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)self;
+
+ if(!consumer || !buffer || !size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ // buffer is already decoded
+ return tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(consumer), buffer, size, proto_hdr);
+}
+
+int tdav_consumer_audioqueue_pause(tmedia_consumer_t* self)
+{
+ OSStatus ret;
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ ret = AudioQueuePause(consumer->queue);
+
+ return ret;
+}
+
+int tdav_consumer_audioqueue_stop(tmedia_consumer_t* self)
+{
+ OSStatus ret;
+ tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)self;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!consumer->started){
+ TSK_DEBUG_WARN("Consumer not started");
+ return 0;
+ }
+
+ consumer->started = tsk_false;
+ ret = AudioQueueStop(consumer->queue, false);
+
+ return ret;
+}
+
+//
+// coreaudio consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_audioqueue_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_audioqueue_t *consumer = self;
+ if(consumer){
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(consumer));
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_audioqueue_dtor(tsk_object_t * self)
+{
+ tdav_consumer_audioqueue_t *consumer = self;
+ if(consumer){
+ // Stop the consumer if not done
+ if(consumer->started){
+ tdav_consumer_audioqueue_stop(self);
+ }
+
+ // Free all buffers and dispose the queue
+ if (consumer->queue) {
+ tsk_size_t i;
+
+ for(i=0; i<CoreAudioPlayBuffers; i++){
+ AudioQueueFreeBuffer(consumer->queue, consumer->buffers[i]);
+ }
+
+ AudioQueueDispose(consumer->queue, true);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(consumer));
+ }
+
+ return self;
+}
+
+/* object definition */
+static const tsk_object_def_t tdav_consumer_audioqueue_def_s =
+{
+ sizeof(tdav_consumer_audioqueue_t),
+ tdav_consumer_audioqueue_ctor,
+ tdav_consumer_audioqueue_dtor,
+ tdav_consumer_audio_cmp,
+};
+
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_audioqueue_plugin_def_s =
+{
+ &tdav_consumer_audioqueue_def_s,
+
+ tmedia_audio,
+ "Apple CoreAudio consumer(AudioQueue)",
+
+ tdav_consumer_audioqueue_set,
+ tdav_consumer_audioqueue_prepare,
+ tdav_consumer_audioqueue_start,
+ tdav_consumer_audioqueue_consume,
+ tdav_consumer_audioqueue_pause,
+ tdav_consumer_audioqueue_stop
+};
+
+const tmedia_consumer_plugin_def_t *tdav_consumer_audioqueue_plugin_def_t = &tdav_consumer_audioqueue_plugin_def_s;
+
+#endif /* HAVE_COREAUDIO_AUDIO_QUEUE */
diff --git a/tinyDAV/src/audio/coreaudio/tdav_consumer_audiounit.c b/tinyDAV/src/audio/coreaudio/tdav_consumer_audiounit.c
new file mode 100644
index 0000000..947d782
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_consumer_audiounit.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (C) 2010-2011 Mamadou Diop.
+ *
+ * Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+#include "tinydav/audio/coreaudio/tdav_consumer_audiounit.h"
+
+// http://developer.apple.com/library/ios/#documentation/MusicAudio/Conceptual/AudioUnitHostingGuide_iOS/Introduction/Introduction.html%23//apple_ref/doc/uid/TP40009492-CH1-SW1
+// Resampler: http://developer.apple.com/library/mac/#technotes/tn2097/_index.html
+
+#if HAVE_COREAUDIO_AUDIO_UNIT
+
+#undef DISABLE_JITTER_BUFFER
+#define DISABLE_JITTER_BUFFER 0
+
+#include "tsk_debug.h"
+#include "tsk_memory.h"
+#include "tsk_string.h"
+
+#define kNoDataError -1
+#define kRingPacketCount +10
+
+static tsk_size_t tdav_consumer_audiounit_get(tdav_consumer_audiounit_t* self, void* data, tsk_size_t size);
+
+static OSStatus __handle_output_buffer(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
+ AudioBufferList *ioData) {
+ OSStatus status = noErr;
+ // tsk_size_t out_size;
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t* )inRefCon;
+
+ if(!consumer->started || consumer->paused){
+ goto done;
+ }
+
+ if(!ioData){
+ TSK_DEBUG_ERROR("Invalid argument");
+ status = kNoDataError;
+ goto done;
+ }
+ // read from jitter buffer and fill ioData buffers
+ tsk_mutex_lock(consumer->ring.mutex);
+ for(int i=0; i<ioData->mNumberBuffers; i++){
+ /* int ret = */ tdav_consumer_audiounit_get(consumer, ioData->mBuffers[i].mData, ioData->mBuffers[i].mDataByteSize);
+ }
+ tsk_mutex_unlock(consumer->ring.mutex);
+
+done:
+ return status;
+}
+
+static tsk_size_t tdav_consumer_audiounit_get(tdav_consumer_audiounit_t* self, void* data, tsk_size_t size)
+{
+ tsk_ssize_t retSize = 0;
+
+#if DISABLE_JITTER_BUFFER
+ retSize = speex_buffer_read(self->ring.buffer, data, size);
+ if(retSize < size){
+ memset(((uint8_t*)data)+retSize, 0, (size - retSize));
+ }
+#else
+ self->ring.leftBytes += size;
+ while (self->ring.leftBytes >= self->ring.chunck.size) {
+ self->ring.leftBytes -= self->ring.chunck.size;
+ retSize = (tsk_ssize_t)tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(self), self->ring.chunck.buffer, self->ring.chunck.size);
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(self));
+ speex_buffer_write(self->ring.buffer, self->ring.chunck.buffer, retSize);
+ }
+ // IMPORTANT: looks like there is a bug in speex: continously trying to read more than avail
+ // many times can corrupt the buffer. At least on OS X 1.5
+ if(speex_buffer_get_available(self->ring.buffer) >= size){
+ retSize = (tsk_ssize_t)speex_buffer_read(self->ring.buffer, data, (int)size);
+ }
+ else{
+ memset(data, 0, size);
+ }
+#endif
+
+ return retSize;
+}
+
+/* ============ Media Consumer Interface ================= */
+int tdav_consumer_audiounit_set(tmedia_consumer_t* self, const tmedia_param_t* param)
+{
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+ if (param->plugin_type == tmedia_ppt_consumer) {
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "interrupt")) {
+ int32_t interrupt = *((uint8_t*)param->value) ? 1 : 0;
+ return tdav_audiounit_handle_interrupt(consumer->audioUnitHandle, interrupt);
+ }
+ }
+ }
+ return tdav_consumer_audio_set(TDAV_CONSUMER_AUDIO(self), param);
+}
+
+static int tdav_consumer_audiounit_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ static UInt32 flagOne = 1;
+ AudioStreamBasicDescription audioFormat;
+#define kOutputBus 0
+
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+ OSStatus status = noErr;
+
+ if(!consumer || !codec || !codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(!consumer->audioUnitHandle){
+ if(!(consumer->audioUnitHandle = tdav_audiounit_handle_create(TMEDIA_CONSUMER(consumer)->session_id))){
+ TSK_DEBUG_ERROR("Failed to get audio unit instance for session with id=%lld", TMEDIA_CONSUMER(consumer)->session_id);
+ return -3;
+ }
+ }
+
+ // enable
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle),
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output,
+ kOutputBus,
+ &flagOne,
+ sizeof(flagOne));
+ if(status){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_EnableIO) failed with status=%d", (int32_t)status);
+ return -4;
+ }
+ else {
+
+#if !TARGET_OS_IPHONE // strange: TARGET_OS_MAC is equal to '1' on Smulator
+ UInt32 param;
+
+ // disable input
+ param = 0;
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle), kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &param, sizeof(UInt32));
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_EnableIO) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+
+ // set default audio device
+ param = sizeof(AudioDeviceID);
+ AudioDeviceID outputDeviceID;
+ status = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice, &param, &outputDeviceID);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+
+ // set the current device to the default input unit
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle),
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global,
+ 0,
+ &outputDeviceID,
+ sizeof(AudioDeviceID));
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_CurrentDevice) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+
+#endif
+
+ TMEDIA_CONSUMER(consumer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+ TSK_DEBUG_INFO("AudioUnit consumer: in.channels=%d, out.channles=%d, in.rate=%d, out.rate=%d, ptime=%d",
+ TMEDIA_CONSUMER(consumer)->audio.in.channels,
+ TMEDIA_CONSUMER(consumer)->audio.out.channels,
+ TMEDIA_CONSUMER(consumer)->audio.in.rate,
+ TMEDIA_CONSUMER(consumer)->audio.out.rate,
+ TMEDIA_CONSUMER(consumer)->audio.ptime);
+
+ audioFormat.mSampleRate = TMEDIA_CONSUMER(consumer)->audio.out.rate ? TMEDIA_CONSUMER(consumer)->audio.out.rate : TMEDIA_CONSUMER(consumer)->audio.in.rate;
+ audioFormat.mFormatID = kAudioFormatLinearPCM;
+ audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
+ audioFormat.mChannelsPerFrame = TMEDIA_CONSUMER(consumer)->audio.in.channels;
+ audioFormat.mFramesPerPacket = 1;
+ audioFormat.mBitsPerChannel = TMEDIA_CONSUMER(consumer)->audio.bits_per_sample;
+ audioFormat.mBytesPerPacket = audioFormat.mBitsPerChannel / 8 * audioFormat.mChannelsPerFrame;
+ audioFormat.mBytesPerFrame = audioFormat.mBytesPerPacket;
+ audioFormat.mReserved = 0;
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle),
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ kOutputBus,
+ &audioFormat,
+ sizeof(audioFormat));
+
+ if(status){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioUnitProperty_StreamFormat) failed with status=%ld", (signed long)status);
+ return -5;
+ }
+ else {
+ // configure
+ if(tdav_audiounit_handle_configure(consumer->audioUnitHandle, tsk_true, TMEDIA_CONSUMER(consumer)->audio.ptime, &audioFormat)){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_set_rate(%d) failed", TMEDIA_CONSUMER(consumer)->audio.out.rate);
+ return -4;
+ }
+
+ // set callback function
+ AURenderCallbackStruct callback;
+ callback.inputProc = __handle_output_buffer;
+ callback.inputProcRefCon = consumer;
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle),
+ kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Input,
+ kOutputBus,
+ &callback,
+ sizeof(callback));
+ if(status){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_SetInputCallback) failed with status=%ld", (signed long)status);
+ return -6;
+ }
+ }
+ }
+
+ // allocate the chunck buffer and create the ring
+ consumer->ring.chunck.size = (TMEDIA_CONSUMER(consumer)->audio.ptime * audioFormat.mSampleRate * audioFormat.mBytesPerFrame) / 1000;
+ consumer->ring.size = kRingPacketCount * consumer->ring.chunck.size;
+ if(!(consumer->ring.chunck.buffer = tsk_realloc(consumer->ring.chunck.buffer, consumer->ring.chunck.size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ return -7;
+ }
+ if(!consumer->ring.buffer){
+ consumer->ring.buffer = speex_buffer_init((int)consumer->ring.size);
+ }
+ else {
+ int ret;
+ if((ret = (int)speex_buffer_resize(consumer->ring.buffer, (int)consumer->ring.size)) < 0){
+ TSK_DEBUG_ERROR("speex_buffer_resize(%d) failed with error code=%d", (int)consumer->ring.size, ret);
+ return ret;
+ }
+ }
+ if(!consumer->ring.buffer){
+ TSK_DEBUG_ERROR("Failed to create a new ring buffer with size = %d", (int)consumer->ring.size);
+ return -8;
+ }
+ if(!consumer->ring.mutex && !(consumer->ring.mutex = tsk_mutex_create_2(tsk_false))){
+ TSK_DEBUG_ERROR("Failed to create mutex");
+ return -9;
+ }
+
+ // set maximum frames per slice as buffer size
+ //UInt32 numFrames = (UInt32)consumer->ring.chunck.size;
+ //status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(consumer->audioUnitHandle),
+ // kAudioUnitProperty_MaximumFramesPerSlice,
+ // kAudioUnitScope_Global,
+ // 0,
+ // &numFrames,
+ // sizeof(numFrames));
+ //if(status){
+ // TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioUnitProperty_MaximumFramesPerSlice, %u) failed with status=%d", (unsigned)numFrames, (int32_t)status);
+ // return -6;
+ //}
+
+ TSK_DEBUG_INFO("AudioUnit consumer prepared");
+ return tdav_audiounit_handle_signal_consumer_prepared(consumer->audioUnitHandle);
+}
+
+static int tdav_consumer_audiounit_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(consumer->paused){
+ consumer->paused = tsk_false;
+ }
+ if(consumer->started){
+ TSK_DEBUG_WARN("Already started");
+ return 0;
+ }
+ else {
+ int ret = tdav_audiounit_handle_start(consumer->audioUnitHandle);
+ if(ret){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_start failed with error code=%d", ret);
+ return ret;
+ }
+ }
+ consumer->started = tsk_true;
+ TSK_DEBUG_INFO("AudioUnit consumer started");
+ return 0;
+}
+
+static int tdav_consumer_audiounit_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+ if(!consumer || !buffer || !size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+#if DISABLE_JITTER_BUFFER
+ {
+ if(consumer->ring.buffer){
+ tsk_mutex_lock(consumer->ring.mutex);
+ speex_buffer_write(consumer->ring.buffer, (void*)buffer, size);
+ tsk_mutex_unlock(consumer->ring.mutex);
+ return 0;
+ }
+ return -2;
+ }
+#else
+ {
+ return tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(consumer), buffer, size, proto_hdr);
+ }
+#endif
+}
+
+static int tdav_consumer_audiounit_pause(tmedia_consumer_t* self)
+{
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ consumer->paused = tsk_true;
+ TSK_DEBUG_INFO("AudioUnit consumer paused");
+ return 0;
+}
+
+static int tdav_consumer_audiounit_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_audiounit_t* consumer = (tdav_consumer_audiounit_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(!consumer->started){
+ TSK_DEBUG_INFO("Not started");
+ return 0;
+ }
+ else {
+ int ret = tdav_audiounit_handle_stop(consumer->audioUnitHandle);
+ if(ret){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_stop failed with error code=%d", ret);
+ return ret;
+ }
+ }
+#if TARGET_OS_IPHONE
+ //https://devforums.apple.com/thread/118595
+ if(consumer->audioUnitHandle){
+ tdav_audiounit_handle_destroy(&consumer->audioUnitHandle);
+ }
+#endif
+
+ consumer->started = tsk_false;
+ TSK_DEBUG_INFO("AudioUnit consumer stoppped");
+ return 0;
+
+}
+
+//
+// coreaudio consumer (AudioUnit) object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_audiounit_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_audiounit_t *consumer = self;
+ if(consumer){
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(consumer));
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_audiounit_dtor(tsk_object_t * self)
+{
+ tdav_consumer_audiounit_t *consumer = self;
+ if(consumer){
+ /* deinit self */
+ // Stop the consumer if not done
+ if(consumer->started){
+ tdav_consumer_audiounit_stop(self);
+ }
+ // destroy handle
+ if(consumer->audioUnitHandle){
+ tdav_audiounit_handle_destroy(&consumer->audioUnitHandle);
+ }
+ TSK_FREE(consumer->ring.chunck.buffer);
+ if(consumer->ring.buffer){
+ speex_buffer_destroy(consumer->ring.buffer);
+ }
+ if(consumer->ring.mutex){
+ tsk_mutex_destroy(&consumer->ring.mutex);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(consumer));
+ TSK_DEBUG_INFO("*** AudioUnit Consumer destroyed ***");
+ }
+
+ return self;
+}
+
+/* object definition */
+static const tsk_object_def_t tdav_consumer_audiounit_def_s =
+{
+ sizeof(tdav_consumer_audiounit_t),
+ tdav_consumer_audiounit_ctor,
+ tdav_consumer_audiounit_dtor,
+ tdav_consumer_audio_cmp,
+};
+
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_audiounit_plugin_def_s =
+{
+ &tdav_consumer_audiounit_def_s,
+
+ tmedia_audio,
+ "Apple CoreAudio consumer(AudioUnit)",
+
+ tdav_consumer_audiounit_set,
+ tdav_consumer_audiounit_prepare,
+ tdav_consumer_audiounit_start,
+ tdav_consumer_audiounit_consume,
+ tdav_consumer_audiounit_pause,
+ tdav_consumer_audiounit_stop
+};
+
+const tmedia_consumer_plugin_def_t *tdav_consumer_audiounit_plugin_def_t = &tdav_consumer_audiounit_plugin_def_s;
+
+#endif /* HAVE_COREAUDIO_AUDIO_UNIT */
diff --git a/tinyDAV/src/audio/coreaudio/tdav_producer_audioqueue.c b/tinyDAV/src/audio/coreaudio/tdav_producer_audioqueue.c
new file mode 100644
index 0000000..d96fd67
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_producer_audioqueue.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2010-2011 Mamadou Diop.
+ *
+ * Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+
+/**@file tdav_producer_audioqueue.c
+ * @brief Audio Producer for MacOSX and iOS platforms using AudioQueue.
+ *
+ * @authors
+ * - Laurent Etiemble <laurent.etiemble(at)gmail.com>
+ * - Mamadou Diop <diopmamadou(at)doubango(dot)org>
+ *
+ * @date Created: Sat Nov 8 16:54:58 2009 letiemble
+ */
+#include "tinydav/audio/coreaudio/tdav_producer_audioqueue.h"
+
+
+// http://developer.apple.com/library/mac/#documentation/MusicAudio/Reference/AudioQueueReference/Reference/reference.html
+
+#if HAVE_COREAUDIO_AUDIO_QUEUE
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+static void __handle_input_buffer (void *userdata, AudioQueueRef queue, AudioQueueBufferRef buffer, const AudioTimeStamp *start_time, UInt32 number_packet_descriptions, const AudioStreamPacketDescription *packet_descriptions ) {
+ tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)userdata;
+
+ if (!producer->started) {
+ return;
+ }
+
+ // Alert the session that there is new data to send
+ if(TMEDIA_PRODUCER(producer)->enc_cb.callback) {
+ TMEDIA_PRODUCER(producer)->enc_cb.callback(TMEDIA_PRODUCER(producer)->enc_cb.callback_data, buffer->mAudioData, buffer->mAudioDataByteSize);
+ }
+
+ // Re-enqueue the buffer
+ AudioQueueEnqueueBuffer(producer->queue, buffer, 0, NULL);
+}
+
+/* ============ Media Producer Interface ================= */
+#define tdav_producer_audioqueue_set tsk_null
+
+static int tdav_producer_audioqueue_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ OSStatus ret;
+ tsk_size_t i;
+ tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)self;
+
+ if(!producer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TMEDIA_PRODUCER(producer)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+ /* codec should have ptime */
+
+
+ // Set audio category
+#if TARGET_OS_IPHONE
+ UInt32 category = kAudioSessionCategory_PlayAndRecord;
+ AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
+#endif
+ // Create the audio stream description
+ AudioStreamBasicDescription *description = &(producer->description);
+ description->mSampleRate = TMEDIA_PRODUCER(producer)->audio.rate;
+ description->mFormatID = kAudioFormatLinearPCM;
+ description->mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
+ description->mChannelsPerFrame = TMEDIA_PRODUCER(producer)->audio.channels;
+ description->mFramesPerPacket = 1;
+ description->mBitsPerChannel = TMEDIA_PRODUCER(producer)->audio.bits_per_sample;
+ description->mBytesPerPacket = description->mBitsPerChannel / 8 * description->mChannelsPerFrame;
+ description->mBytesPerFrame = description->mBytesPerPacket;
+ description->mReserved = 0;
+
+ int packetperbuffer = 1000 / TMEDIA_PRODUCER(producer)->audio.ptime;
+ producer->buffer_size = description->mSampleRate * description->mBytesPerFrame / packetperbuffer;
+
+ // Create the record audio queue
+ ret = AudioQueueNewInput(&(producer->description),
+ __handle_input_buffer,
+ producer,
+ NULL,
+ kCFRunLoopCommonModes,
+ 0,
+ &(producer->queue));
+
+ for(i = 0; i < CoreAudioRecordBuffers; i++) {
+ // Create the buffer for the queue
+ ret = AudioQueueAllocateBuffer(producer->queue, producer->buffer_size, &(producer->buffers[i]));
+ if (ret) {
+ break;
+ }
+
+ // Clear the data
+ memset(producer->buffers[i]->mAudioData, 0, producer->buffer_size);
+ producer->buffers[i]->mAudioDataByteSize = producer->buffer_size;
+
+ // Enqueue the buffer
+ ret = AudioQueueEnqueueBuffer(producer->queue, producer->buffers[i], 0, NULL);
+ if (ret) {
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int tdav_producer_audioqueue_start(tmedia_producer_t* self)
+{
+ OSStatus ret;
+ tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(producer->started){
+ TSK_DEBUG_WARN("Producer already started");
+ return 0;
+ }
+
+ producer->started = tsk_true;
+ ret = AudioQueueStart(producer->queue, NULL);
+
+ return ret;
+}
+
+static int tdav_producer_audioqueue_pause(tmedia_producer_t* self)
+{
+ OSStatus ret;
+ tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ ret = AudioQueuePause(producer->queue);
+
+ return ret;
+}
+
+static int tdav_producer_audioqueue_stop(tmedia_producer_t* self)
+{
+ OSStatus ret;
+ tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)self;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!producer->started){
+ TSK_DEBUG_WARN("Producer not started");
+ return 0;
+ }
+
+ producer->started = tsk_false;
+ ret = AudioQueueStop(producer->queue, false);
+
+ return ret;
+}
+
+
+//
+// CoreAudio producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_audioqueue_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_audioqueue_t *producer = self;
+ if(producer){
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(producer));
+ /* init self */
+ // TODO
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_audioqueue_dtor(tsk_object_t * self)
+{
+ tdav_producer_audioqueue_t *producer = self;
+ if(producer){
+ // Stop the producer if not done
+ if(producer->started){
+ tdav_producer_audioqueue_stop(self);
+ }
+
+ // Free all buffers and dispose the queue
+ if (producer->queue) {
+ tsk_size_t i;
+
+ for(i=0; i<CoreAudioRecordBuffers; i++){
+ AudioQueueFreeBuffer(producer->queue, producer->buffers[i]);
+ }
+ AudioQueueDispose(producer->queue, true);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(producer));
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_audioqueue_def_s =
+{
+ sizeof(tdav_producer_audioqueue_t),
+ tdav_producer_audioqueue_ctor,
+ tdav_producer_audioqueue_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_audioqueue_plugin_def_s =
+{
+ &tdav_producer_audioqueue_def_s,
+
+ tmedia_audio,
+ "Apple CoreAudio producer (AudioQueue)",
+
+ tdav_producer_audioqueue_set,
+ tdav_producer_audioqueue_prepare,
+ tdav_producer_audioqueue_start,
+ tdav_producer_audioqueue_pause,
+ tdav_producer_audioqueue_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_audioqueue_plugin_def_t = &tdav_producer_audioqueue_plugin_def_s;
+
+#endif /* HAVE_COREAUDIO_AUDIO_QUEUE */
diff --git a/tinyDAV/src/audio/coreaudio/tdav_producer_audiounit.c b/tinyDAV/src/audio/coreaudio/tdav_producer_audiounit.c
new file mode 100644
index 0000000..a88261e
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_producer_audiounit.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2010-2011 Mamadou Diop.
+ *
+ * Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * This file is part of Open Source Doubango Framework.
+ *
+ * DOUBANGO is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * DOUBANGO is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with DOUBANGO.
+ *
+ */
+#include "tinydav/audio/coreaudio/tdav_producer_audiounit.h"
+
+// http://developer.apple.com/library/ios/#documentation/MusicAudio/Conceptual/AudioUnitHostingGuide_iOS/Introduction/Introduction.html%23//apple_ref/doc/uid/TP40009492-CH1-SW1
+
+#if HAVE_COREAUDIO_AUDIO_UNIT
+
+#include <mach/mach.h>
+#import <sys/sysctl.h>
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_thread.h"
+#include "tsk_debug.h"
+
+#define kRingPacketCount 10
+
+static OSStatus __handle_input_buffer(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
+ AudioBufferList *ioData) {
+ OSStatus status = noErr;
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)inRefCon;
+
+ // holder
+ AudioBuffer buffer;
+ buffer.mData = tsk_null;
+ buffer.mDataByteSize = 0;
+ buffer.mNumberChannels = TMEDIA_PRODUCER(producer)->audio.channels;
+
+ // list of holders
+ AudioBufferList buffers;
+ buffers.mNumberBuffers = 1;
+ buffers.mBuffers[0] = buffer;
+
+ // render to get frames from the system
+ status = AudioUnitRender(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ ioActionFlags,
+ inTimeStamp,
+ inBusNumber,
+ inNumberFrames,
+ &buffers);
+ if(status == 0){
+ // must not be done on async thread: doing it gives bad audio quality when audio+video call is done with CPU consuming codec (e.g. speex or g729)
+ speex_buffer_write(producer->ring.buffer, buffers.mBuffers[0].mData, buffers.mBuffers[0].mDataByteSize);
+ int avail = speex_buffer_get_available(producer->ring.buffer);
+ while (producer->started && avail >= producer->ring.chunck.size) {
+ avail -= speex_buffer_read(producer->ring.buffer, (void*)producer->ring.chunck.buffer, (int)producer->ring.chunck.size);
+ TMEDIA_PRODUCER(producer)->enc_cb.callback(TMEDIA_PRODUCER(producer)->enc_cb.callback_data,
+ producer->ring.chunck.buffer, producer->ring.chunck.size);
+ }
+ }
+
+ return status;
+}
+
+/* ============ Media Producer Interface ================= */
+int tdav_producer_audiounit_set(tmedia_producer_t* self, const tmedia_param_t* param)
+{
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)self;
+ if(param->plugin_type == tmedia_ppt_producer){
+ if(param->value_type == tmedia_pvt_int32){
+ if (tsk_striequals(param->key, "mute")) {
+ producer->muted = TSK_TO_INT32((uint8_t*)param->value);
+ return tdav_audiounit_handle_mute(((tdav_producer_audiounit_t*)self)->audioUnitHandle, producer->muted);
+ }
+ else if (tsk_striequals(param->key, "interrupt")) {
+ int32_t interrupt = *((uint8_t*)param->value) ? 1 : 0;
+ return tdav_audiounit_handle_interrupt(producer->audioUnitHandle, interrupt);
+ }
+ }
+ }
+ return tdav_producer_audio_set(TDAV_PRODUCER_AUDIO(self), param);
+}
+
+static int tdav_producer_audiounit_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ static UInt32 flagOne = 1;
+ UInt32 param;
+ // static UInt32 flagZero = 0;
+#define kInputBus 1
+
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)self;
+ OSStatus status = noErr;
+ AudioStreamBasicDescription audioFormat;
+ AudioStreamBasicDescription deviceFormat;
+
+ if(!producer || !codec || !codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(!producer->audioUnitHandle){
+ if(!(producer->audioUnitHandle = tdav_audiounit_handle_create(TMEDIA_PRODUCER(producer)->session_id))){
+ TSK_DEBUG_ERROR("Failed to get audio unit instance for session with id=%lld", TMEDIA_PRODUCER(producer)->session_id);
+ return -3;
+ }
+ }
+
+ // enable
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Input,
+ kInputBus,
+ &flagOne,
+ sizeof(flagOne));
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_EnableIO) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+ else {
+#if !TARGET_OS_IPHONE // strange: TARGET_OS_MAC is equal to '1' on Smulator
+ // disable output
+ param = 0;
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output,
+ 0,
+ &param,
+ sizeof(UInt32));
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_EnableIO) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+
+ // set default audio device
+ param = sizeof(AudioDeviceID);
+ AudioDeviceID inputDeviceID;
+ status = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &param, &inputDeviceID);
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+
+ // set the current device to the default input unit
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Output,
+ 0,
+ &inputDeviceID,
+ sizeof(AudioDeviceID));
+ if(status != noErr){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_CurrentDevice) failed with status=%ld", (signed long)status);
+ return -4;
+ }
+#endif /* TARGET_OS_MAC */
+
+ /* codec should have ptime */
+ TMEDIA_PRODUCER(producer)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+
+ TSK_DEBUG_INFO("AudioUnit producer: channels=%d, rate=%d, ptime=%d",
+ TMEDIA_PRODUCER(producer)->audio.channels,
+ TMEDIA_PRODUCER(producer)->audio.rate,
+ TMEDIA_PRODUCER(producer)->audio.ptime);
+
+ // get device format
+ param = sizeof(AudioStreamBasicDescription);
+ status = AudioUnitGetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ kInputBus,
+ &deviceFormat, &param);
+ if(status == noErr && deviceFormat.mSampleRate){
+#if TARGET_OS_IPHONE
+ // iOS support 8Khz, 16kHz and 32kHz => do not override the sampleRate
+#elif TARGET_OS_MAC
+ // For example, iSight supports only 48kHz
+ TMEDIA_PRODUCER(producer)->audio.rate = deviceFormat.mSampleRate;
+#endif
+ }
+
+ // set format
+ audioFormat.mSampleRate = TMEDIA_PRODUCER(producer)->audio.rate;
+ audioFormat.mFormatID = kAudioFormatLinearPCM;
+ audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved;
+ audioFormat.mChannelsPerFrame = TMEDIA_PRODUCER(producer)->audio.channels;
+ audioFormat.mFramesPerPacket = 1;
+ audioFormat.mBitsPerChannel = TMEDIA_PRODUCER(producer)->audio.bits_per_sample;
+ audioFormat.mBytesPerPacket = audioFormat.mBitsPerChannel / 8 * audioFormat.mChannelsPerFrame;
+ audioFormat.mBytesPerFrame = audioFormat.mBytesPerPacket;
+ audioFormat.mReserved = 0;
+ if(audioFormat.mFormatID == kAudioFormatLinearPCM && audioFormat.mChannelsPerFrame == 1){
+ audioFormat.mFormatFlags &= ~kLinearPCMFormatFlagIsNonInterleaved;
+ }
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ kInputBus,
+ &audioFormat,
+ sizeof(audioFormat));
+ if(status){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioUnitProperty_StreamFormat) failed with status=%ld", (signed long)status);
+ return -5;
+ }
+ else {
+
+ // configure
+ if(tdav_audiounit_handle_configure(producer->audioUnitHandle, tsk_false, TMEDIA_PRODUCER(producer)->audio.ptime, &audioFormat)){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_set_rate(%d) failed", TMEDIA_PRODUCER(producer)->audio.rate);
+ return -4;
+ }
+
+ // set callback function
+ AURenderCallbackStruct callback;
+ callback.inputProc = __handle_input_buffer;
+ callback.inputProcRefCon = producer;
+ status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ kAudioOutputUnitProperty_SetInputCallback,
+ kAudioUnitScope_Output,
+ kInputBus,
+ &callback,
+ sizeof(callback));
+ if(status){
+ TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_SetInputCallback) failed with status=%ld", (signed long)status);
+ return -6;
+ }
+ else {
+ // disbale buffer allocation as we will provide ours
+ //status = AudioUnitSetProperty(tdav_audiounit_handle_get_instance(producer->audioUnitHandle),
+ // kAudioUnitProperty_ShouldAllocateBuffer,
+ // kAudioUnitScope_Output,
+ // kInputBus,
+ // &flagZero,
+ // sizeof(flagZero));
+
+ producer->ring.chunck.size = (TMEDIA_PRODUCER(producer)->audio.ptime * audioFormat.mSampleRate * audioFormat.mBytesPerFrame) / 1000;
+ // allocate our chunck buffer
+ if(!(producer->ring.chunck.buffer = tsk_realloc(producer->ring.chunck.buffer, producer->ring.chunck.size))){
+ TSK_DEBUG_ERROR("Failed to allocate new buffer");
+ return -7;
+ }
+ // create ringbuffer
+ producer->ring.size = kRingPacketCount * producer->ring.chunck.size;
+ if(!producer->ring.buffer){
+ producer->ring.buffer = speex_buffer_init((int)producer->ring.size);
+ }
+ else {
+ int ret;
+ if((ret = speex_buffer_resize(producer->ring.buffer, producer->ring.size)) < 0){
+ TSK_DEBUG_ERROR("speex_buffer_resize(%d) failed with error code=%d", (int)producer->ring.size, ret);
+ return ret;
+ }
+ }
+ if(!producer->ring.buffer){
+ TSK_DEBUG_ERROR("Failed to create a new ring buffer with size = %d", (int)producer->ring.size);
+ return -9;
+ }
+ }
+
+ }
+ }
+
+ TSK_DEBUG_INFO("AudioUnit producer prepared");
+ return tdav_audiounit_handle_signal_producer_prepared(producer->audioUnitHandle);;
+}
+
+static int tdav_producer_audiounit_start(tmedia_producer_t* self)
+{
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(producer->paused){
+ producer->paused = tsk_false;
+ return tsk_false;
+ }
+
+ int ret;
+ if(producer->started){
+ TSK_DEBUG_WARN("Already started");
+ return 0;
+ }
+ else {
+ ret = tdav_audiounit_handle_start(producer->audioUnitHandle);
+ if(ret){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_start failed with error code=%d", ret);
+ return ret;
+ }
+ }
+ producer->started = tsk_true;
+
+ // apply parameters (because could be lost when the producer is restarted -handle recreated-)
+ ret = tdav_audiounit_handle_mute(producer->audioUnitHandle, producer->muted);
+
+ TSK_DEBUG_INFO("AudioUnit producer started");
+ return 0;
+}
+
+static int tdav_producer_audiounit_pause(tmedia_producer_t* self)
+{
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)self;
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ producer->paused = tsk_true;
+ TSK_DEBUG_INFO("AudioUnit producer paused");
+ return 0;
+}
+
+static int tdav_producer_audiounit_stop(tmedia_producer_t* self)
+{
+ tdav_producer_audiounit_t* producer = (tdav_producer_audiounit_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if(!producer->started){
+ TSK_DEBUG_INFO("Not started");
+ return 0;
+ }
+ else {
+ int ret = tdav_audiounit_handle_stop(producer->audioUnitHandle);
+ if(ret){
+ TSK_DEBUG_ERROR("tdav_audiounit_handle_stop failed with error code=%d", ret);
+ // do not return even if failed => we MUST stop the thread!
+ }
+#if TARGET_OS_IPHONE
+ //https://devforums.apple.com/thread/118595
+ if(producer->audioUnitHandle){
+ tdav_audiounit_handle_destroy(&producer->audioUnitHandle);
+ }
+#endif
+ }
+ producer->started = tsk_false;
+ TSK_DEBUG_INFO("AudioUnit producer stoppped");
+ return 0;
+}
+
+
+//
+// CoreAudio producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_audiounit_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_audiounit_t *producer = self;
+ if(producer){
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(producer));
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_audiounit_dtor(tsk_object_t * self)
+{
+ tdav_producer_audiounit_t *producer = self;
+ if(producer){
+ // Stop the producer if not done
+ if(producer->started){
+ tdav_producer_audiounit_stop(self);
+ }
+
+ // Free all buffers and dispose the queue
+ if (producer->audioUnitHandle) {
+ tdav_audiounit_handle_destroy(&producer->audioUnitHandle);
+ }
+ TSK_FREE(producer->ring.chunck.buffer);
+ if(producer->ring.buffer){
+ speex_buffer_destroy(producer->ring.buffer);
+ }
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(producer));
+
+ TSK_DEBUG_INFO("*** AudioUnit Producer destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_audiounit_def_s =
+{
+ sizeof(tdav_producer_audiounit_t),
+ tdav_producer_audiounit_ctor,
+ tdav_producer_audiounit_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_audiounit_plugin_def_s =
+{
+ &tdav_producer_audiounit_def_s,
+
+ tmedia_audio,
+ "Apple CoreAudio producer (AudioUnit)",
+
+ tdav_producer_audiounit_set,
+ tdav_producer_audiounit_prepare,
+ tdav_producer_audiounit_start,
+ tdav_producer_audiounit_pause,
+ tdav_producer_audiounit_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_audiounit_plugin_def_t = &tdav_producer_audiounit_plugin_def_s;
+
+
+#endif /* HAVE_COREAUDIO_AUDIO_UNIT */
diff --git a/tinyDAV/src/audio/directsound/tdav_consumer_dsound.c b/tinyDAV/src/audio/directsound/tdav_consumer_dsound.c
new file mode 100644
index 0000000..82e125b
--- /dev/null
+++ b/tinyDAV/src/audio/directsound/tdav_consumer_dsound.c
@@ -0,0 +1,458 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_consumer_dsound.c
+ * @brief Microsoft DirectSound consumer.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ */
+#include "tinydav/audio/directsound/tdav_consumer_dsound.h"
+
+#if HAVE_DSOUND_H
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "dsound.lib")
+// # pragma comment(lib, "dxguid.lib")
+#endif
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <initguid.h>
+#include <dsound.h>
+
+extern void tdav_win32_print_error(const char* func, HRESULT hr);
+
+#if !defined(TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT)
+# define TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT 20
+#endif /* TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT */
+
+typedef struct tdav_consumer_dsound_s
+{
+ TDAV_DECLARE_CONSUMER_AUDIO;
+
+ tsk_bool_t started;
+ tsk_size_t bytes_per_notif_size;
+ uint8_t* bytes_per_notif_ptr;
+ tsk_thread_handle_t* tid[1];
+
+ LPDIRECTSOUND device;
+ LPDIRECTSOUNDBUFFER primaryBuffer;
+ LPDIRECTSOUNDBUFFER secondaryBuffer;
+ HANDLE notifEvents[TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT];
+}
+tdav_consumer_dsound_t;
+
+static _inline int32_t __convert_volume(int32_t volume)
+{
+ static const int32_t __step = (DSBVOLUME_MAX - DSBVOLUME_MIN) / 100;
+ return (volume * __step) + DSBVOLUME_MIN;
+}
+
+static void* TSK_STDCALL _tdav_consumer_dsound_playback_thread(void *param)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)param;
+
+ HRESULT hr;
+ LPVOID lpvAudio1, lpvAudio2;
+ DWORD dwBytesAudio1, dwBytesAudio2, dwEvent;
+ static const DWORD dwWriteCursor = 0;
+ tsk_size_t out_size;
+
+ TSK_DEBUG_INFO("_tdav_consumer_dsound_playback_thread -- START");
+
+ SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
+
+ while (dsound->started) {
+ dwEvent = WaitForMultipleObjects(TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT, dsound->notifEvents, FALSE, INFINITE);
+ if (!dsound->started) {
+ break;
+ }
+
+ // lock
+ hr = IDirectSoundBuffer_Lock(
+ dsound->secondaryBuffer,
+ dwWriteCursor/* Ignored because of DSBLOCK_FROMWRITECURSOR */,
+ (DWORD)dsound->bytes_per_notif_size,
+ &lpvAudio1, &dwBytesAudio1,
+ &lpvAudio2, &dwBytesAudio2,
+ DSBLOCK_FROMWRITECURSOR);
+ if (hr != DS_OK) {
+ tdav_win32_print_error("IDirectSoundBuffer_Lock", hr);
+ goto next;
+ }
+
+ out_size = tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(dsound), dsound->bytes_per_notif_ptr, dsound->bytes_per_notif_size);
+ if (out_size < dsound->bytes_per_notif_size) {
+ // fill with silence
+ memset(&dsound->bytes_per_notif_ptr[out_size], 0, (dsound->bytes_per_notif_size - out_size));
+ }
+ if ((dwBytesAudio1 + dwBytesAudio2) == dsound->bytes_per_notif_size) {
+ memcpy(lpvAudio1, dsound->bytes_per_notif_ptr, dwBytesAudio1);
+ if (lpvAudio2 && dwBytesAudio2) {
+ memcpy(lpvAudio2, &dsound->bytes_per_notif_ptr[dwBytesAudio1], dwBytesAudio2);
+ }
+ }
+ else {
+ TSK_DEBUG_ERROR("Not expected: %d+%d#%d", dwBytesAudio1, dwBytesAudio2, dsound->bytes_per_notif_size);
+ }
+#if 0
+ memset(lpvAudio1, rand(), dwBytesAudio1);
+#endif
+ // unlock
+ if ((hr = IDirectSoundBuffer_Unlock(dsound->secondaryBuffer, lpvAudio1, dwBytesAudio1, lpvAudio2, dwBytesAudio2)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundBuffer_UnLock", hr);
+ goto next;
+ }
+next:
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(dsound));
+ }
+
+ TSK_DEBUG_INFO("_tdav_consumer_dsound_playback_thread -- STOP");
+
+
+ return tsk_null;
+}
+
+static int _tdav_consumer_dsound_unprepare(tdav_consumer_dsound_t *dsound)
+{
+ if(dsound){
+ tsk_size_t i;
+ if(dsound->primaryBuffer){
+ IDirectSoundBuffer_Release(dsound->primaryBuffer);
+ dsound->primaryBuffer = NULL;
+ }
+ if(dsound->secondaryBuffer){
+ IDirectSoundBuffer_Release(dsound->secondaryBuffer);
+ dsound->secondaryBuffer = NULL;
+ }
+ if(dsound->device){
+ IDirectSound_Release(dsound->device);
+ dsound->device = NULL;
+ }
+ for(i = 0; i<sizeof(dsound->notifEvents)/sizeof(dsound->notifEvents[0]); i++){
+ if(dsound->notifEvents[i]){
+ CloseHandle(dsound->notifEvents[i]);
+ dsound->notifEvents[i] = NULL;
+ }
+ }
+ }
+ return 0;
+}
+
+
+
+/* ============ Media Consumer Interface ================= */
+static int tdav_consumer_dsound_set(tmedia_consumer_t* self, const tmedia_param_t* param)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+ int ret = tdav_consumer_audio_set(TDAV_CONSUMER_AUDIO(self), param);
+
+ if(ret == 0){
+ if(dsound->secondaryBuffer && tsk_striequals(param->key, "volume")){
+ if(IDirectSoundBuffer_SetVolume(dsound->secondaryBuffer, __convert_volume(TMEDIA_CONSUMER(self)->audio.volume)) != DS_OK){
+ TSK_DEBUG_ERROR("IDirectSoundBuffer_SetVolume() failed");
+ ret = -1;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int tdav_consumer_dsound_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ HRESULT hr;
+ HWND hWnd;
+
+ WAVEFORMATEX wfx = {0};
+ DSBUFFERDESC dsbd = {0};
+
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+
+ if(!dsound){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(dsound->device || dsound->primaryBuffer || dsound->secondaryBuffer){
+ TSK_DEBUG_ERROR("Consumer already prepared");
+ return -2;
+ }
+
+ TMEDIA_CONSUMER(dsound)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(dsound)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(dsound)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+#if 0
+ TMEDIA_CONSUMER(dsound)->audio.out.rate = 48000;
+ TMEDIA_CONSUMER(dsound)->audio.out.channels = 2;
+#endif
+
+ /* Create sound device */
+ if((hr = DirectSoundCreate(NULL, &dsound->device, NULL) != DS_OK)){
+ tdav_win32_print_error("DirectSoundCreate", hr);
+ return -3;
+ }
+
+ /* Set CooperativeLevel */
+ if((hWnd = GetForegroundWindow()) || (hWnd = GetDesktopWindow()) || (hWnd = GetConsoleWindow())){
+ if((hr = IDirectSound_SetCooperativeLevel(dsound->device, hWnd, DSSCL_PRIORITY)) != DS_OK){
+ tdav_win32_print_error("IDirectSound_SetCooperativeLevel", hr);
+ return -2;
+ }
+ }
+
+ /* Creates the primary buffer and apply format */
+ wfx.wFormatTag = WAVE_FORMAT_PCM;
+ wfx.nChannels = TMEDIA_CONSUMER(dsound)->audio.out.channels ? TMEDIA_CONSUMER(dsound)->audio.out.channels : TMEDIA_CONSUMER(dsound)->audio.in.channels;
+ wfx.nSamplesPerSec = TMEDIA_CONSUMER(dsound)->audio.out.rate ? TMEDIA_CONSUMER(dsound)->audio.out.rate : TMEDIA_CONSUMER(dsound)->audio.in.rate;
+ wfx.wBitsPerSample = TMEDIA_CONSUMER(dsound)->audio.bits_per_sample;
+ wfx.nBlockAlign = (wfx.nChannels * wfx.wBitsPerSample/8);
+ wfx.nAvgBytesPerSec = (wfx.nSamplesPerSec * wfx.nBlockAlign);
+
+ /* Average bytes (count) for each notification */
+ dsound->bytes_per_notif_size = ((wfx.nAvgBytesPerSec * TMEDIA_CONSUMER(dsound)->audio.ptime)/1000);
+ if(!(dsound->bytes_per_notif_ptr = tsk_realloc(dsound->bytes_per_notif_ptr, dsound->bytes_per_notif_size))){
+ TSK_DEBUG_ERROR("Failed to allocate buffer with size = %u", dsound->bytes_per_notif_size);
+ return -3;
+ }
+
+ dsbd.dwSize = sizeof(DSBUFFERDESC);
+ dsbd.dwFlags = DSBCAPS_PRIMARYBUFFER;
+ dsbd.dwBufferBytes = 0;
+ dsbd.lpwfxFormat = NULL;
+
+ if((hr = IDirectSound_CreateSoundBuffer(dsound->device, &dsbd, &dsound->primaryBuffer, NULL)) != DS_OK){
+ tdav_win32_print_error("IDirectSound_CreateSoundBuffer", hr);
+ return -4;
+ }
+ if((hr = IDirectSoundBuffer_SetFormat(dsound->primaryBuffer, &wfx)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_SetFormat", hr);
+ return -5;
+ }
+
+ /* Creates the secondary buffer and apply format */
+ dsbd.dwFlags = (DSBCAPS_CTRLPOSITIONNOTIFY | DSBCAPS_GLOBALFOCUS | DSBCAPS_CTRLVOLUME);
+ dsbd.dwBufferBytes = (DWORD)(TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT * dsound->bytes_per_notif_size);
+ dsbd.lpwfxFormat = &wfx;
+
+ if((hr = IDirectSound_CreateSoundBuffer(dsound->device, &dsbd, &dsound->secondaryBuffer, NULL)) != DS_OK){
+ tdav_win32_print_error("IDirectSound_CreateSoundBuffer", hr);
+ return -6;
+ }
+
+ /* Set Volume */
+ if(IDirectSoundBuffer_SetVolume(dsound->secondaryBuffer, __convert_volume(TMEDIA_CONSUMER(self)->audio.volume)) != DS_OK){
+ TSK_DEBUG_ERROR("IDirectSoundBuffer_SetVolume() failed");
+ }
+
+ return 0;
+}
+
+static int tdav_consumer_dsound_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+
+ tsk_size_t i;
+ HRESULT hr;
+ LPDIRECTSOUNDNOTIFY lpDSBNotify;
+ DSBPOSITIONNOTIFY pPosNotify[TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT] = {0};
+
+ static DWORD dwMajorVersion = -1;
+
+ // Get OS version
+ if(dwMajorVersion == -1){
+ OSVERSIONINFO osvi;
+ ZeroMemory(&osvi, sizeof(OSVERSIONINFO));
+ osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+ GetVersionEx(&osvi);
+ dwMajorVersion = osvi.dwMajorVersion;
+ }
+
+ if(!dsound){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!dsound->device || !dsound->primaryBuffer || !dsound->secondaryBuffer){
+ TSK_DEBUG_ERROR("Consumer not prepared");
+ return -2;
+ }
+
+ if(dsound->started){
+ return 0;
+ }
+
+ if((hr = IDirectSoundBuffer_QueryInterface(dsound->secondaryBuffer, &IID_IDirectSoundNotify, (LPVOID*)&lpDSBNotify)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_QueryInterface", hr);
+ return -3;
+ }
+
+ /* Events associated to notification points */
+ for(i = 0; i<TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT; i++){
+ dsound->notifEvents[i] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ // set notification point offset at the start of the buffer for Windows Vista and later and at the half of the buffer of XP and before
+ pPosNotify[i].dwOffset = (DWORD)((dsound->bytes_per_notif_size * i) + (dwMajorVersion > 5 ? (dsound->bytes_per_notif_size >> 1) : 1));
+ pPosNotify[i].hEventNotify = dsound->notifEvents[i];
+ }
+ if((hr = IDirectSoundNotify_SetNotificationPositions(lpDSBNotify, TDAV_DSOUND_CONSUMER_NOTIF_POS_COUNT, pPosNotify)) != DS_OK){
+ IDirectSoundNotify_Release(lpDSBNotify);
+ tdav_win32_print_error("IDirectSoundBuffer_QueryInterface", hr);
+ return -4;
+ }
+
+ if((hr = IDirectSoundNotify_Release(lpDSBNotify))){
+ tdav_win32_print_error("IDirectSoundNotify_Release", hr);
+ }
+
+ /* Start the buffer */
+ if((hr = IDirectSoundBuffer_Play(dsound->secondaryBuffer, 0, 0, DSBPLAY_LOOPING)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundNotify_Release", hr);
+ return -5;
+ }
+
+ /* start the reader thread */
+ dsound->started = tsk_true;
+ tsk_thread_create(&dsound->tid[0], _tdav_consumer_dsound_playback_thread, dsound);
+
+ return 0;
+}
+
+static int tdav_consumer_dsound_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+
+ if(!dsound || !buffer || !size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* buffer is already decoded */
+ return tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(dsound), buffer, size, proto_hdr);
+}
+
+static int tdav_consumer_dsound_pause(tmedia_consumer_t* self)
+{
+ return 0;
+}
+
+static int tdav_consumer_dsound_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+
+ HRESULT hr;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!dsound->started){
+ return 0;
+ }
+
+ /* should be done here */
+ dsound->started = tsk_false;
+
+ /* stop thread */
+ if(dsound->tid[0]){
+ tsk_thread_join(&(dsound->tid[0]));
+ }
+
+ if((hr = IDirectSoundBuffer_Stop(dsound->secondaryBuffer)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_Stop", hr);
+ }
+ if((hr = IDirectSoundBuffer_SetCurrentPosition(dsound->secondaryBuffer, 0)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_SetCurrentPosition", hr);
+ }
+
+ // unprepare
+ // will be prepared again before calling next start()
+ _tdav_consumer_dsound_unprepare(dsound);
+
+ return 0;
+}
+
+
+//
+// WaveAPI consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_dsound_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_dsound_t *consumer = self;
+ if(consumer){
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(consumer));
+ /* init self */
+
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_dsound_dtor(tsk_object_t * self)
+{
+ tdav_consumer_dsound_t *dsound = self;
+ if(dsound){
+ /* stop */
+ if(dsound->started){
+ tdav_consumer_dsound_stop(self);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(dsound));
+ /* deinit self */
+ _tdav_consumer_dsound_unprepare(dsound);
+ TSK_FREE(dsound->bytes_per_notif_ptr);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_dsound_def_s =
+{
+ sizeof(tdav_consumer_dsound_t),
+ tdav_consumer_dsound_ctor,
+ tdav_consumer_dsound_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_dsound_plugin_def_s =
+{
+ &tdav_consumer_dsound_def_s,
+
+ tmedia_audio,
+ "Microsoft DirectSound consumer",
+
+ tdav_consumer_dsound_set,
+ tdav_consumer_dsound_prepare,
+ tdav_consumer_dsound_start,
+ tdav_consumer_dsound_consume,
+ tdav_consumer_dsound_pause,
+ tdav_consumer_dsound_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_dsound_plugin_def_t = &tdav_consumer_dsound_plugin_def_s;
+
+
+#endif /* HAVE_DSOUND_H */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/directsound/tdav_producer_dsound.c b/tinyDAV/src/audio/directsound/tdav_producer_dsound.c
new file mode 100644
index 0000000..c5ae167
--- /dev/null
+++ b/tinyDAV/src/audio/directsound/tdav_producer_dsound.c
@@ -0,0 +1,402 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_producer_dsound.c
+ * @brief Microsoft DirectSound producer.
+ *
+ */
+#include "tinydav/audio/directsound/tdav_producer_dsound.h"
+
+#if HAVE_DSOUND_H
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "dsound.lib")
+// # pragma comment(lib, "dxguid.lib")
+#endif
+
+#if !defined(SEND_SILENCE_ON_MUTE)
+# if METROPOLIS
+# define SEND_SILENCE_ON_MUTE 1
+# else
+# define SEND_SILENCE_ON_MUTE 0
+# endif
+#endif /* SEND_SILENCE_ON_MUTE */
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <initguid.h>
+#include <dsound.h>
+
+extern void tdav_win32_print_error(const char* func, HRESULT hr);
+
+#if !defined(TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT)
+# define TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT 10
+#endif /* TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT */
+
+typedef struct tdav_producer_dsound_s
+{
+ TDAV_DECLARE_PRODUCER_AUDIO;
+
+ tsk_bool_t started;
+ tsk_bool_t mute;
+ tsk_size_t bytes_per_notif_size;
+ tsk_thread_handle_t* tid[1];
+
+ LPDIRECTSOUNDCAPTURE device;
+ LPDIRECTSOUNDCAPTUREBUFFER captureBuffer;
+ HANDLE notifEvents[TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT];
+}
+tdav_producer_dsound_t;
+
+static void* TSK_STDCALL _tdav_producer_dsound_record_thread(void *param)
+{
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)param;
+
+ HRESULT hr;
+ LPVOID lpvAudio1, lpvAudio2;
+ DWORD dwBytesAudio1, dwBytesAudio2, dwEvent, dwIndex;
+
+ TSK_DEBUG_INFO("_tdav_producer_dsound_record_thread -- START");
+
+ SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_TIME_CRITICAL);
+
+ while (dsound->started) {
+ dwEvent = WaitForMultipleObjects(TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT, dsound->notifEvents, FALSE, INFINITE);
+ if (!dsound->started) {
+ break;
+ }
+ if (dwEvent < WAIT_OBJECT_0 || dwEvent >(WAIT_OBJECT_0 + TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT)) {
+ TSK_DEBUG_ERROR("Invalid dwEvent(%d)", dwEvent);
+ break;
+ }
+ dwIndex = (dwEvent - WAIT_OBJECT_0);
+
+ // lock
+ if ((hr = IDirectSoundCaptureBuffer_Lock(dsound->captureBuffer, (DWORD)(dwIndex * dsound->bytes_per_notif_size), (DWORD)dsound->bytes_per_notif_size, &lpvAudio1, &dwBytesAudio1, &lpvAudio2, &dwBytesAudio2, 0)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_Lock", hr);
+ continue;
+ }
+
+ if (TMEDIA_PRODUCER(dsound)->enc_cb.callback) {
+#if SEND_SILENCE_ON_MUTE
+ if (dsound->mute) {
+ memset(lpvAudio1, 0, dwBytesAudio1);
+ if(lpvAudio2){
+ memset(lpvAudio2, 0, dwBytesAudio2);
+ }
+ }
+#endif
+ TMEDIA_PRODUCER(dsound)->enc_cb.callback(TMEDIA_PRODUCER(dsound)->enc_cb.callback_data, lpvAudio1, dwBytesAudio1);
+ if (lpvAudio2) {
+ TMEDIA_PRODUCER(dsound)->enc_cb.callback(TMEDIA_PRODUCER(dsound)->enc_cb.callback_data, lpvAudio2, dwBytesAudio2);
+ }
+ }
+
+ // unlock
+ if ((hr = IDirectSoundCaptureBuffer_Unlock(dsound->captureBuffer, lpvAudio1, dwBytesAudio1, lpvAudio2, dwBytesAudio2)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_Unlock", hr);
+ continue;
+ }
+ }
+
+ TSK_DEBUG_INFO("_tdav_producer_dsound_record_thread -- STOP");
+
+
+ return tsk_null;
+}
+
+static int _tdav_producer_dsound_unprepare(tdav_producer_dsound_t* dsound)
+{
+ if (dsound) {
+ tsk_size_t i;
+ if (dsound->captureBuffer) {
+ IDirectSoundCaptureBuffer_Release(dsound->captureBuffer);
+ dsound->captureBuffer = NULL;
+ }
+ if (dsound->device) {
+ IDirectSoundCapture_Release(dsound->device);
+ dsound->device = NULL;
+ }
+ for (i = 0; i < (sizeof(dsound->notifEvents) / sizeof(dsound->notifEvents[0])); i++){
+ if (dsound->notifEvents[i]) {
+ CloseHandle(dsound->notifEvents[i]);
+ dsound->notifEvents[i] = NULL;
+ }
+ }
+ }
+ return 0;
+}
+
+
+
+
+/* ============ Media Producer Interface ================= */
+static int tdav_producer_dsound_set(tmedia_producer_t* self, const tmedia_param_t* param)
+{
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)self;
+ if (param->plugin_type == tmedia_ppt_producer) {
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "volume")) {
+ return 0;
+ }
+ else if (tsk_striequals(param->key, "mute")) {
+ dsound->mute = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+#if !SEND_SILENCE_ON_MUTE
+ if (dsound->started) {
+ if (dsound->mute) {
+ IDirectSoundCaptureBuffer_Stop(dsound->captureBuffer);
+ }
+ else {
+ IDirectSoundCaptureBuffer_Start(dsound->captureBuffer, DSBPLAY_LOOPING);
+ }
+ }
+#endif
+ return 0;
+ }
+ }
+ }
+ return tdav_producer_audio_set(TDAV_PRODUCER_AUDIO(self), param);
+}
+static int tdav_producer_dsound_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ HRESULT hr;
+
+ WAVEFORMATEX wfx = { 0 };
+ DSCBUFFERDESC dsbd = { 0 };
+
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)self;
+
+ if (!dsound || !codec) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (dsound->device || dsound->captureBuffer) {
+ TSK_DEBUG_ERROR("Producer already prepared");
+ return -2;
+ }
+
+ TMEDIA_PRODUCER(dsound)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(dsound)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER(dsound)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+
+#if 0
+ TMEDIA_PRODUCER(dsound)->audio.rate = 48000;
+ TMEDIA_PRODUCER(dsound)->audio.channels = 1;
+#endif
+
+ /* Create capture device */
+ if ((hr = DirectSoundCaptureCreate(NULL, &dsound->device, NULL) != DS_OK)) {
+ tdav_win32_print_error("DirectSoundCaptureCreate", hr);
+ return -3;
+ }
+
+ /* Creates the capture buffer */
+ wfx.wFormatTag = WAVE_FORMAT_PCM;
+ wfx.nChannels = TMEDIA_PRODUCER(dsound)->audio.channels;
+ wfx.nSamplesPerSec = TMEDIA_PRODUCER(dsound)->audio.rate;
+ wfx.wBitsPerSample = TMEDIA_PRODUCER(dsound)->audio.bits_per_sample;
+ wfx.nBlockAlign = (wfx.nChannels * wfx.wBitsPerSample / 8);
+ wfx.nAvgBytesPerSec = (wfx.nSamplesPerSec * wfx.nBlockAlign);
+
+ /* Average bytes (count) for each notification */
+ dsound->bytes_per_notif_size = ((wfx.nAvgBytesPerSec * TMEDIA_PRODUCER(dsound)->audio.ptime) / 1000);
+
+ dsbd.dwSize = sizeof(DSCBUFFERDESC);
+ dsbd.dwBufferBytes = (DWORD)(TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT * dsound->bytes_per_notif_size);
+ dsbd.lpwfxFormat = &wfx;
+
+ if ((hr = IDirectSoundCapture_CreateCaptureBuffer(dsound->device, &dsbd, &dsound->captureBuffer, NULL)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCapture_CreateCaptureBuffer", hr);
+ return -4;
+ }
+
+ return 0;
+}
+
+static int tdav_producer_dsound_start(tmedia_producer_t* self)
+{
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)self;
+
+ tsk_size_t i;
+ DWORD dwOffset;
+ HRESULT hr;
+ LPDIRECTSOUNDNOTIFY lpDSBNotify;
+ DSBPOSITIONNOTIFY pPosNotify[TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT] = { 0 };
+
+ if (!dsound) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (!dsound->device || !dsound->captureBuffer) {
+ TSK_DEBUG_ERROR("Producer not prepared");
+ return -2;
+ }
+
+ if (dsound->started) {
+ return 0;
+ }
+
+ if ((hr = IDirectSoundCaptureBuffer_QueryInterface(dsound->captureBuffer, &IID_IDirectSoundNotify, (LPVOID*)&lpDSBNotify)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_QueryInterface", hr);
+ return -3;
+ }
+
+ /* Events associated to notification points */
+ dwOffset = (DWORD)(dsound->bytes_per_notif_size - 1);
+ for (i = 0; i < (sizeof(dsound->notifEvents) / sizeof(dsound->notifEvents[0])); i++){
+ dsound->notifEvents[i] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ pPosNotify[i].dwOffset = dwOffset;
+ pPosNotify[i].hEventNotify = dsound->notifEvents[i];
+ dwOffset += (DWORD)dsound->bytes_per_notif_size;
+ }
+ if ((hr = IDirectSoundNotify_SetNotificationPositions(lpDSBNotify, TDAV_DSOUND_PRODUCER_NOTIF_POS_COUNT, pPosNotify)) != DS_OK) {
+ IDirectSoundNotify_Release(lpDSBNotify);
+ tdav_win32_print_error("IDirectSoundBuffer_QueryInterface", hr);
+ return -4;
+ }
+
+ if ((hr = IDirectSoundNotify_Release(lpDSBNotify))) {
+ tdav_win32_print_error("IDirectSoundNotify_Release", hr);
+ }
+
+ /* Start the buffer */
+ if ((hr = IDirectSoundCaptureBuffer_Start(dsound->captureBuffer, DSBPLAY_LOOPING)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_Start", hr);
+ return -5;
+ }
+
+ /* start the reader thread */
+ dsound->started = tsk_true;
+ tsk_thread_create(&dsound->tid[0], _tdav_producer_dsound_record_thread, dsound);
+
+ return 0;
+}
+
+static int tdav_producer_dsound_pause(tmedia_producer_t* self)
+{
+ return 0;
+}
+
+static int tdav_producer_dsound_stop(tmedia_producer_t* self)
+{
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)self;
+
+ HRESULT hr;
+
+ if (!self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (!dsound->started) {
+ return 0;
+ }
+
+ // should be done here
+ dsound->started = tsk_false;
+
+#if !SEND_SILENCE_ON_MUTE
+ if (dsound->mute && dsound->notifEvents[0]) {
+ // thread is paused -> raise event now that "started" is equal to false
+ SetEvent(dsound->notifEvents[0]);
+ }
+#endif
+
+ // stop thread
+ if (dsound->tid[0]) {
+ tsk_thread_join(&(dsound->tid[0]));
+ }
+
+ if ((hr = IDirectSoundCaptureBuffer_Stop(dsound->captureBuffer)) != DS_OK) {
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_Stop", hr);
+ }
+
+ // unprepare
+ // will be prepared again before next start()
+ _tdav_producer_dsound_unprepare(dsound);
+
+ return 0;
+}
+
+
+//
+// WaveAPI producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_dsound_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_dsound_t *producer = self;
+ if (producer) {
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(producer));
+ /* init self */
+
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_dsound_dtor(tsk_object_t * self)
+{
+ tdav_producer_dsound_t *dsound = self;
+ if (dsound) {
+ /* stop */
+ if (dsound->started) {
+ tdav_producer_dsound_stop(self);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(dsound));
+ /* deinit self */
+ _tdav_producer_dsound_unprepare(dsound);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_dsound_def_s =
+{
+ sizeof(tdav_producer_dsound_t),
+ tdav_producer_dsound_ctor,
+ tdav_producer_dsound_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_dsound_plugin_def_s =
+{
+ &tdav_producer_dsound_def_s,
+
+ tmedia_audio,
+ "Microsoft DirectSound producer",
+
+ tdav_producer_dsound_set,
+ tdav_producer_dsound_prepare,
+ tdav_producer_dsound_start,
+ tdav_producer_dsound_pause,
+ tdav_producer_dsound_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_dsound_plugin_def_t = &tdav_producer_dsound_plugin_def_s;
+
+
+#endif /* HAVE_DSOUND_H */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/oss/tdav_consumer_oss.c b/tinyDAV/src/audio/oss/tdav_consumer_oss.c
new file mode 100644
index 0000000..0370210
--- /dev/null
+++ b/tinyDAV/src/audio/oss/tdav_consumer_oss.c
@@ -0,0 +1,397 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/audio/oss/tdav_consumer_oss.h"
+
+#if HAVE_LINUX_SOUNDCARD_H
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_safeobj.h"
+#include "tsk_debug.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+#include <linux/soundcard.h>
+
+#define OSS_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[OSS Consumer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[OSS Consumer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[OSS Consumer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[OSS Consumer] " FMT, ##__VA_ARGS__)
+
+typedef struct tdav_consumer_oss_s
+{
+ TDAV_DECLARE_CONSUMER_AUDIO;
+
+ tsk_bool_t b_started;
+ tsk_bool_t b_prepared;
+ tsk_bool_t b_muted;
+ int n_bits_per_sample;
+
+ int fd;
+ tsk_thread_handle_t* tid[1];
+
+ tsk_size_t n_buff_size_in_bytes;
+ tsk_size_t n_buff_size_in_samples;
+ uint8_t* p_buff_ptr;
+
+ tsk_size_t n_buff16_size_in_bytes;
+ tsk_size_t n_buff16_size_in_samples;
+ uint16_t* p_buff16_ptr;
+
+ TSK_DECLARE_SAFEOBJ;
+}
+tdav_consumer_oss_t;
+
+static int __oss_from_16bits_to_8bits(const void* p_src, void* p_dst, tsk_size_t n_samples)
+{
+ tsk_size_t i;
+ uint16_t *_p_src = (uint16_t*)p_src;
+ uint8_t *_p_dst = (uint8_t*)p_dst;
+
+ if (!p_src || !p_dst || !n_samples) {
+ OSS_DEBUG_ERROR("invalid parameter");
+ return -1;
+ }
+ for (i = 0; i < n_samples; ++i) {
+ _p_dst[i] = _p_src[i];
+ }
+ return 0;
+}
+
+static void* TSK_STDCALL _tdav_consumer_oss_playback_thread(void *param)
+{
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)param;
+ int err;
+ void* p_buffer = ((p_oss->n_bits_per_sample == 8) ? (void*)p_oss->p_buff16_ptr: (void*)p_oss->p_buff_ptr);
+ tsk_size_t n_buffer_in_bytes = (p_oss->n_bits_per_sample == 8) ? p_oss->n_buff16_size_in_bytes : p_oss->n_buff_size_in_bytes;
+ tsk_size_t n_buffer_in_samples = p_oss->n_buff_size_in_samples;
+
+ const void* _p_buffer;
+ tsk_size_t _n_buffer_in_bytes;
+
+ OSS_DEBUG_INFO("__playback_thread -- START");
+
+ tsk_thread_set_priority_2(TSK_THREAD_PRIORITY_TIME_CRITICAL);
+
+ while (p_oss->b_started) {
+ tsk_safeobj_lock(p_oss);
+ err = tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(p_oss), p_buffer, n_buffer_in_bytes); // requires 16bits, thread-safe
+ if (err >= 0) {
+ _p_buffer = p_buffer;
+ _n_buffer_in_bytes = n_buffer_in_bytes;
+ if (err < n_buffer_in_bytes) {
+ memset(((uint8_t*)p_buffer) + err, 0, (n_buffer_in_bytes - err));
+ }
+ if (p_oss->n_bits_per_sample == 8) {
+ __oss_from_16bits_to_8bits(p_buffer, p_oss->p_buff_ptr, n_buffer_in_samples);
+ _p_buffer = p_oss->p_buff_ptr;
+ _n_buffer_in_bytes >>= 1;
+ }
+ if ((err = write(p_oss->fd, _p_buffer, _n_buffer_in_bytes)) != _n_buffer_in_bytes) {
+ OSS_DEBUG_ERROR ("Failed to read data from audio interface failed (%d -> %s)", err , strerror(errno));
+ tsk_safeobj_unlock(p_oss);
+ goto bail;
+ }
+ }
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(p_oss));
+
+ tsk_safeobj_unlock(p_oss);
+ }
+bail:
+ OSS_DEBUG_INFO("__playback_thread -- STOP");
+ return tsk_null;
+}
+
+/* ============ Media Consumer Interface ================= */
+static int tdav_consumer_oss_set(tmedia_consumer_t* self, const tmedia_param_t* param)
+{
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)self;
+ int ret = 0;
+
+ ret = tdav_consumer_audio_set(TDAV_CONSUMER_AUDIO(self), param);
+
+ return ret;
+}
+
+static int tdav_consumer_oss_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)self;
+ int err = 0, channels, sample_rate, bits_per_sample;
+
+ if (!p_oss || !codec && codec->plugin) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ if (p_oss->fd == -1) {
+ if ((p_oss->fd = open("/dev/dsp", O_WRONLY)) < 0) {
+ OSS_DEBUG_ERROR("open('/dev/dsp') failed: %s", strerror(errno));
+ err = -2;
+ goto bail;
+ }
+ }
+
+ TMEDIA_CONSUMER(p_oss)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(p_oss)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(p_oss)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+ // Set using requested
+ channels = TMEDIA_CONSUMER(p_oss)->audio.in.channels;
+ sample_rate = TMEDIA_CONSUMER(p_oss)->audio.in.rate;
+ bits_per_sample = TMEDIA_CONSUMER(p_oss)->audio.bits_per_sample; // 16
+
+ // Prepare
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_BITS, &bits_per_sample)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_BITS, %d) failed: %d->%s", bits_per_sample, err, strerror(errno));
+ goto bail;
+ }
+ if (bits_per_sample != 16 && bits_per_sample != 8) {
+ OSS_DEBUG_ERROR("bits_per_sample=%d not supported", bits_per_sample);
+ err = -3;
+ goto bail;
+ }
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_CHANNELS, &channels)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_CHANNELS, %d) failed: %d->%s", channels, err, strerror(errno));
+ goto bail;
+ }
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_RATE, &sample_rate)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_RATE, %d) failed: %d->%s", sample_rate, err, strerror(errno));
+ goto bail;
+ }
+
+ p_oss->n_buff_size_in_bytes = (TMEDIA_CONSUMER(p_oss)->audio.ptime * sample_rate * ((bits_per_sample >> 3) * channels)) / 1000;
+ if (!(p_oss->p_buff_ptr = tsk_realloc(p_oss->p_buff_ptr, p_oss->n_buff_size_in_bytes))) {
+ OSS_DEBUG_ERROR("Failed to allocate buffer with size = %u", p_oss->n_buff_size_in_bytes);
+ err = -4;
+ goto bail;
+ }
+ p_oss->n_buff_size_in_samples = (p_oss->n_buff_size_in_bytes / (bits_per_sample >> 3));
+ if (bits_per_sample == 8) {
+ p_oss->n_buff16_size_in_bytes = p_oss->n_buff_size_in_bytes << 1;
+ if (!(p_oss->p_buff16_ptr = tsk_realloc(p_oss->p_buff16_ptr, p_oss->n_buff16_size_in_bytes))) {
+ OSS_DEBUG_ERROR("Failed to allocate buffer with size = %u", p_oss->n_buff_size_in_bytes);
+ err = -5;
+ goto bail;
+ }
+ p_oss->n_buff16_size_in_samples = p_oss->n_buff_size_in_samples;
+ }
+
+ OSS_DEBUG_INFO("prepared: req_bits_per_sample=%d; req_channels=%d; req_rate=%d, resp_bits_per_sample=%d; resp_channels=%d; resp_rate=%d /// n_buff_size_in_samples=%u;n_buff_size_in_bytes=%u",
+ TMEDIA_CONSUMER(p_oss)->audio.bits_per_sample, TMEDIA_CONSUMER(p_oss)->audio.in.channels, TMEDIA_CONSUMER(p_oss)->audio.in.rate,
+ bits_per_sample, channels, sample_rate,
+ p_oss->n_buff_size_in_samples, p_oss->n_buff_size_in_bytes);
+
+ // Set using supported (up to the resampler to convert to requested)
+ TMEDIA_CONSUMER(p_oss)->audio.out.channels = channels;
+ TMEDIA_CONSUMER(p_oss)->audio.out.rate = sample_rate;
+ // TMEDIA_CONSUMER(p_oss)->audio.bits_per_sample = bits_per_sample;
+
+ p_oss->n_bits_per_sample = bits_per_sample;
+ p_oss->b_prepared = tsk_true;
+
+bail:
+ if (err) {
+ if (p_oss->fd != -1) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ }
+ tsk_safeobj_unlock(p_oss);
+
+ return err;
+}
+
+static int tdav_consumer_oss_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)self;
+ int err = 0;
+
+ if (! p_oss) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ if (!p_oss->b_prepared) {
+ OSS_DEBUG_WARN("Not prepared");
+ err = -2;
+ goto bail;
+ }
+
+ if (p_oss->b_started) {
+ OSS_DEBUG_WARN("Already started");
+ goto bail;
+ }
+
+ /* start thread */
+ p_oss->b_started = tsk_true;
+ tsk_thread_create(&p_oss->tid[0], _tdav_consumer_oss_playback_thread, p_oss);
+
+ OSS_DEBUG_INFO("started");
+
+bail:
+ tsk_safeobj_unlock(p_oss);
+ return err;
+}
+
+static int tdav_consumer_oss_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ int err = 0;
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)self;
+
+ if (!p_oss || !buffer || !size) {
+ OSS_DEBUG_ERROR("Invalid paramter");
+ return -1;
+ }
+
+ //tsk_safeobj_lock(p_oss);
+
+ if (!p_oss->b_started) {
+ OSS_DEBUG_WARN("Not started");
+ err = -2;
+ goto bail;
+ }
+ if ((err = tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(p_oss), buffer, size, proto_hdr))/*thread-safe*/) {
+ OSS_DEBUG_WARN("Failed to put audio data to the jitter buffer");
+ goto bail;
+ }
+
+bail:
+ //tsk_safeobj_unlock(p_oss);
+ return err;
+}
+
+static int tdav_consumer_oss_pause(tmedia_consumer_t* self)
+{
+ return 0;
+}
+
+static int tdav_consumer_oss_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_oss_t* p_oss = (tdav_consumer_oss_t*)self;
+ int err;
+
+ if (!p_oss) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ /* should be done here */
+ p_oss->b_started = tsk_false;
+
+ /* stop thread */
+ if (p_oss->tid[0]) {
+ tsk_thread_join(&(p_oss->tid[0]));
+ }
+ if (p_oss->fd != -1) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ p_oss->b_prepared = tsk_false;
+
+ OSS_DEBUG_INFO("stopped");
+
+ tsk_safeobj_unlock(p_oss);
+
+ return 0;
+}
+
+
+//
+// Linux OSS consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_oss_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_oss_t *p_oss = self;
+ if (p_oss) {
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(p_oss));
+ /* init self */
+
+ p_oss->fd = -1;
+ tsk_safeobj_init(p_oss);
+
+ OSS_DEBUG_INFO("created");
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_oss_dtor(tsk_object_t * self)
+{
+ tdav_consumer_oss_t *p_oss = self;
+ if (p_oss) {
+
+ /* stop */
+ if (p_oss->b_started) {
+ tdav_consumer_oss_stop(self);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(p_oss));
+ /* deinit self */
+ if (p_oss->fd > 0) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ TSK_FREE(p_oss->p_buff_ptr);
+ TSK_FREE(p_oss->p_buff16_ptr);
+ tsk_safeobj_deinit(p_oss);
+
+ OSS_DEBUG_INFO("*** destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_oss_def_s =
+{
+ sizeof(tdav_consumer_oss_t),
+ tdav_consumer_oss_ctor,
+ tdav_consumer_oss_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_oss_plugin_def_s =
+{
+ &tdav_consumer_oss_def_s,
+
+ tmedia_audio,
+ "Linux OSS consumer",
+
+ tdav_consumer_oss_set,
+ tdav_consumer_oss_prepare,
+ tdav_consumer_oss_start,
+ tdav_consumer_oss_consume,
+ tdav_consumer_oss_pause,
+ tdav_consumer_oss_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_oss_plugin_def_t = &tdav_consumer_oss_plugin_def_s;
+
+#endif /* HAVE_LINUX_SOUNDCARD_H */
diff --git a/tinyDAV/src/audio/oss/tdav_producer_oss.c b/tinyDAV/src/audio/oss/tdav_producer_oss.c
new file mode 100644
index 0000000..d61fb96
--- /dev/null
+++ b/tinyDAV/src/audio/oss/tdav_producer_oss.c
@@ -0,0 +1,369 @@
+/* Copyright (C) 2014 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+#include "tinydav/audio/oss/tdav_producer_oss.h"
+
+#if HAVE_LINUX_SOUNDCARD_H
+
+#include "tsk_string.h"
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_safeobj.h"
+#include "tsk_debug.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+#include <linux/soundcard.h>
+
+#define OSS_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[OSS Producer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[OSS Producer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[OSS Producer] " FMT, ##__VA_ARGS__)
+#define OSS_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[OSS Producer] " FMT, ##__VA_ARGS__)
+
+typedef struct tdav_producer_oss_s
+{
+ TDAV_DECLARE_PRODUCER_AUDIO;
+
+ tsk_bool_t b_started;
+ tsk_bool_t b_prepared;
+ tsk_bool_t b_muted;
+ int n_bits_per_sample;
+
+ int fd;
+ tsk_thread_handle_t* tid[1];
+
+ tsk_size_t n_buff_size_in_bytes;
+ tsk_size_t n_buff_size_in_samples;
+ uint8_t* p_buff_ptr;
+
+ tsk_size_t n_buff16_size_in_bytes;
+ tsk_size_t n_buff16_size_in_samples;
+ uint16_t* p_buff16_ptr;
+
+ TSK_DECLARE_SAFEOBJ;
+}
+tdav_producer_oss_t;
+
+static int __oss_from_8bits_to_16bits(const void* p_src, void* p_dst, tsk_size_t n_samples)
+{
+ tsk_size_t i;
+ const uint8_t *_p_src = (const uint8_t*)p_src;
+ uint16_t *_p_dst = (uint16_t*)p_dst;
+
+ if (!p_src || !p_dst || !n_samples) {
+ OSS_DEBUG_ERROR("invalid parameter");
+ return -1;
+ }
+ for (i = 0; i < n_samples; ++i) {
+ _p_dst[i] = _p_src[i];
+ }
+ return 0;
+}
+
+static void* TSK_STDCALL _tdav_producer_oss_record_thread(void *param)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)param;
+ int err;
+ const void* p_buffer = ((p_oss->n_bits_per_sample == 8) ? (const void*)p_oss->p_buff16_ptr: (const void*)p_oss->p_buff_ptr);
+ tsk_size_t n_buffer_in_bytes = (p_oss->n_bits_per_sample == 8) ? p_oss->n_buff16_size_in_bytes : p_oss->n_buff_size_in_bytes;
+
+ OSS_DEBUG_INFO("__record_thread -- START");
+
+ tsk_thread_set_priority_2(TSK_THREAD_PRIORITY_TIME_CRITICAL);
+
+ while (p_oss->b_started) {
+ tsk_safeobj_lock(p_oss);
+ if ((err = read(p_oss->fd, p_oss->p_buff_ptr, p_oss->n_buff_size_in_bytes)) != p_oss->n_buff_size_in_bytes) {
+ OSS_DEBUG_ERROR ("Failed to read data from audio interface failed (%d -> %s)", err , strerror(errno));
+ tsk_safeobj_unlock(p_oss);
+ goto bail;
+ }
+ if (p_oss->n_bits_per_sample == 8) {
+ if ((err = __oss_from_8bits_to_16bits(p_oss->p_buff_ptr, p_oss->p_buff16_ptr, p_oss->n_buff_size_in_samples))) {
+ tsk_safeobj_unlock(p_oss);
+ goto bail;
+ }
+ }
+ if (!p_oss->b_muted && TMEDIA_PRODUCER(p_oss)->enc_cb.callback) {
+ TMEDIA_PRODUCER(p_oss)->enc_cb.callback(TMEDIA_PRODUCER(p_oss)->enc_cb.callback_data, p_buffer, n_buffer_in_bytes);
+ }
+ tsk_safeobj_unlock(p_oss);
+ }
+bail:
+ OSS_DEBUG_INFO("__record_thread -- STOP");
+ return tsk_null;
+}
+
+
+/* ============ Media Producer Interface ================= */
+static int tdav_producer_oss_set(tmedia_producer_t* self, const tmedia_param_t* param)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)self;
+ if (param->plugin_type == tmedia_ppt_producer) {
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "volume")) {
+ return 0;
+ }
+ else if(tsk_striequals(param->key, "mute")){
+ p_oss->b_muted = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+ return 0;
+ }
+ }
+ }
+ return tdav_producer_audio_set(TDAV_PRODUCER_AUDIO(self), param);
+}
+
+static int tdav_producer_oss_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)self;
+ int err = 0, channels, sample_rate, bits_per_sample;
+
+ if (!p_oss || !codec && codec->plugin) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ if (p_oss->fd == -1) {
+ if ((p_oss->fd = open("/dev/dsp", O_RDONLY)) < 0) {
+ OSS_DEBUG_ERROR("open('/dev/dsp') failed: %s", strerror(errno));
+ err = -2;
+ goto bail;
+ }
+ }
+
+ // Set using requested
+ channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ sample_rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ bits_per_sample = TMEDIA_PRODUCER(p_oss)->audio.bits_per_sample; // 16
+
+ // Prepare
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_BITS, &bits_per_sample)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_BITS, %d) failed: %d->%s", bits_per_sample, err, strerror(errno));
+ goto bail;
+ }
+ if (bits_per_sample != 16 && bits_per_sample != 8) {
+ OSS_DEBUG_ERROR("bits_per_sample=%d not supported", bits_per_sample);
+ err = -3;
+ goto bail;
+ }
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_CHANNELS, &channels)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_CHANNELS, %d) failed: %d->%s", channels, err, strerror(errno));
+ goto bail;
+ }
+ if ((err = ioctl(p_oss->fd, SOUND_PCM_WRITE_RATE, &sample_rate)) != 0) {
+ OSS_DEBUG_ERROR("ioctl(SOUND_PCM_WRITE_RATE, %d) failed: %d->%s", sample_rate, err, strerror(errno));
+ goto bail;
+ }
+
+ p_oss->n_buff_size_in_bytes = (TMEDIA_PRODUCER(p_oss)->audio.ptime * sample_rate * ((bits_per_sample >> 3) * channels)) / 1000;
+ if (!(p_oss->p_buff_ptr = tsk_realloc(p_oss->p_buff_ptr, p_oss->n_buff_size_in_bytes))) {
+ OSS_DEBUG_ERROR("Failed to allocate buffer with size = %u", p_oss->n_buff_size_in_bytes);
+ err = -4;
+ goto bail;
+ }
+ p_oss->n_buff_size_in_samples = (p_oss->n_buff_size_in_bytes / (bits_per_sample >> 3));
+ if (bits_per_sample == 8) {
+ p_oss->n_buff16_size_in_bytes = p_oss->n_buff_size_in_bytes << 1;
+ if (!(p_oss->p_buff16_ptr = tsk_realloc(p_oss->p_buff16_ptr, p_oss->n_buff16_size_in_bytes))) {
+ OSS_DEBUG_ERROR("Failed to allocate buffer with size = %u", p_oss->n_buff_size_in_bytes);
+ err = -5;
+ goto bail;
+ }
+ p_oss->n_buff16_size_in_samples = p_oss->n_buff_size_in_samples;
+ }
+
+ OSS_DEBUG_INFO("prepared: req_bits_per_sample=%d; req_channels=%d; req_rate=%d, resp_bits_per_sample=%d; resp_channels=%d; resp_rate=%d /// n_buff_size_in_samples=%u;n_buff_size_in_bytes=%u",
+ TMEDIA_PRODUCER(p_oss)->audio.bits_per_sample, TMEDIA_PRODUCER(p_oss)->audio.channels, TMEDIA_PRODUCER(p_oss)->audio.rate,
+ bits_per_sample, channels, sample_rate,
+ p_oss->n_buff_size_in_samples, p_oss->n_buff_size_in_bytes);
+
+ // Set using supported (up to the resampler to convert to requested)
+ TMEDIA_PRODUCER(p_oss)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(p_oss)->audio.channels = channels;
+ TMEDIA_PRODUCER(p_oss)->audio.rate = sample_rate;
+ // TMEDIA_PRODUCER(p_oss)->audio.bits_per_sample = bits_per_sample;
+
+ p_oss->n_bits_per_sample = bits_per_sample;
+ p_oss->b_prepared = tsk_true;
+
+bail:
+ if (err) {
+ if (p_oss->fd != -1) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ }
+ tsk_safeobj_unlock(p_oss);
+
+ return err;
+}
+
+static int tdav_producer_oss_start(tmedia_producer_t* self)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)self;
+ int err = 0;
+
+ if (! p_oss) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ if (!p_oss->b_prepared) {
+ OSS_DEBUG_WARN("Not prepared");
+ err = -2;
+ goto bail;
+ }
+
+ if (p_oss->b_started) {
+ OSS_DEBUG_WARN("Already started");
+ goto bail;
+ }
+
+ /* start thread */
+ p_oss->b_started = tsk_true;
+ tsk_thread_create(&p_oss->tid[0], _tdav_producer_oss_record_thread, p_oss);
+
+ OSS_DEBUG_INFO("started");
+
+bail:
+ tsk_safeobj_unlock(p_oss);
+ return err;
+}
+
+static int tdav_producer_oss_pause(tmedia_producer_t* self)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)self;
+
+ if (!p_oss) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ OSS_DEBUG_INFO("paused");
+
+ return 0;
+}
+
+static int tdav_producer_oss_stop(tmedia_producer_t* self)
+{
+ tdav_producer_oss_t* p_oss = (tdav_producer_oss_t*)self;
+ int err;
+
+ if (!p_oss) {
+ OSS_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(p_oss);
+
+ /* should be done here */
+ p_oss->b_started = tsk_false;
+
+ /* stop thread */
+ if (p_oss->tid[0]) {
+ tsk_thread_join(&(p_oss->tid[0]));
+ }
+ if (p_oss->fd != -1) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ p_oss->b_prepared = tsk_false;
+
+ OSS_DEBUG_INFO("stopped");
+
+ tsk_safeobj_unlock(p_oss);
+
+ return 0;
+}
+
+
+//
+// Linux OSS producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_oss_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_oss_t *p_oss = (tdav_producer_oss_t*)self;
+ if (p_oss) {
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(p_oss));
+ /* init self */
+ p_oss->fd = -1;
+ tsk_safeobj_init(p_oss);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_oss_dtor(tsk_object_t * self)
+{
+ tdav_producer_oss_t *p_oss = (tdav_producer_oss_t *)self;
+ if (p_oss) {
+ /* stop */
+ if (p_oss->b_started) {
+ tdav_producer_oss_stop((tmedia_producer_t*)p_oss);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(p_oss));
+ /* deinit self */
+ if (p_oss->fd != -1) {
+ close(p_oss->fd);
+ p_oss->fd = -1;
+ }
+ TSK_FREE(p_oss->p_buff_ptr);
+ TSK_FREE(p_oss->p_buff16_ptr);
+ tsk_safeobj_deinit(p_oss);
+
+ OSS_DEBUG_INFO("*** destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_oss_def_s =
+{
+ sizeof(tdav_producer_oss_t),
+ tdav_producer_oss_ctor,
+ tdav_producer_oss_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_oss_plugin_def_s =
+{
+ &tdav_producer_oss_def_s,
+
+ tmedia_audio,
+ "Linux OSS producer",
+
+ tdav_producer_oss_set,
+ tdav_producer_oss_prepare,
+ tdav_producer_oss_start,
+ tdav_producer_oss_pause,
+ tdav_producer_oss_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_oss_plugin_def_t = &tdav_producer_oss_plugin_def_s;
+
+#endif /* HAVE_LINUX_SOUNDCARD_H */
diff --git a/tinyDAV/src/audio/tdav_consumer_audio.c b/tinyDAV/src/audio/tdav_consumer_audio.c
new file mode 100644
index 0000000..73d9688
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_consumer_audio.c
@@ -0,0 +1,272 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+
+/**@file tdav_consumer_audio.c
+* @brief Base class for all Audio consumers.
+*/
+#include "tinydav/audio/tdav_consumer_audio.h"
+
+#include "tinymedia/tmedia_defaults.h"
+#include "tinymedia/tmedia_denoise.h"
+#include "tinymedia/tmedia_resampler.h"
+#include "tinymedia/tmedia_jitterbuffer.h"
+#include "tinyrtp/rtp/trtp_rtp_header.h"
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_time.h"
+#include "tsk_debug.h"
+
+#if TSK_UNDER_WINDOWS
+# include <Winsock2.h> // timeval
+#elif defined(__SYMBIAN32__)
+# include <_timeval.h>
+#else
+# include <sys/time.h>
+#endif
+
+#define TDAV_BITS_PER_SAMPLE_DEFAULT 16
+#define TDAV_CHANNELS_DEFAULT 2
+#define TDAV_RATE_DEFAULT 8000
+#define TDAV_PTIME_DEFAULT 20
+
+#define TDAV_AUDIO_GAIN_MAX 15
+
+/** Initialize audio consumer */
+int tdav_consumer_audio_init(tdav_consumer_audio_t* self)
+{
+ int ret;
+
+ TSK_DEBUG_INFO("tdav_consumer_audio_init()");
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* base */
+ if ((ret = tmedia_consumer_init(TMEDIA_CONSUMER(self)))){
+ return ret;
+ }
+
+ /* self (should be update by prepare() by using the codec's info)*/
+ TMEDIA_CONSUMER(self)->audio.bits_per_sample = TDAV_BITS_PER_SAMPLE_DEFAULT;
+ TMEDIA_CONSUMER(self)->audio.ptime = TDAV_PTIME_DEFAULT;
+ TMEDIA_CONSUMER(self)->audio.in.channels = TDAV_CHANNELS_DEFAULT;
+ TMEDIA_CONSUMER(self)->audio.in.rate = TDAV_RATE_DEFAULT;
+ TMEDIA_CONSUMER(self)->audio.gain = TSK_MIN(tmedia_defaults_get_audio_consumer_gain(), TDAV_AUDIO_GAIN_MAX);
+
+ tsk_safeobj_init(self);
+
+ return 0;
+}
+
+/**
+* Generic function to compare two consumers.
+* @param consumer1 The first consumer to compare.
+* @param consumer2 The second consumer to compare.
+* @retval Returns an integral value indicating the relationship between the two consumers:
+* <0 : @a consumer1 less than @a consumer2.<br>
+* 0 : @a consumer1 identical to @a consumer2.<br>
+* >0 : @a consumer1 greater than @a consumer2.<br>
+*/
+int tdav_consumer_audio_cmp(const tsk_object_t* consumer1, const tsk_object_t* consumer2)
+{
+ int ret;
+ tsk_subsat_int32_ptr(consumer1, consumer2, &ret);
+ return ret;
+}
+
+int tdav_consumer_audio_set(tdav_consumer_audio_t* self, const tmedia_param_t* param)
+{
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (param->plugin_type == tmedia_ppt_consumer){
+ if (param->value_type == tmedia_pvt_int32){
+ if (tsk_striequals(param->key, "gain")){
+ int32_t gain = *((int32_t*)param->value);
+ if (gain < TDAV_AUDIO_GAIN_MAX && gain >= 0){
+ TMEDIA_CONSUMER(self)->audio.gain = (uint8_t)gain;
+ TSK_DEBUG_INFO("audio consumer gain=%u", gain);
+ }
+ else{
+ TSK_DEBUG_ERROR("%u is invalid as gain value", gain);
+ return -2;
+ }
+ }
+ else if (tsk_striequals(param->key, "volume")){
+ TMEDIA_CONSUMER(self)->audio.volume = TSK_TO_INT32((uint8_t*)param->value);
+ TMEDIA_CONSUMER(self)->audio.volume = TSK_CLAMP(0, TMEDIA_CONSUMER(self)->audio.volume, 100);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* put data (bytes not shorts) into the jitter buffer (consumers always have ptime of 20ms) */
+int tdav_consumer_audio_put(tdav_consumer_audio_t* self, const void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr)
+{
+ int ret;
+
+ if (!self || !data || !self->jitterbuffer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(self);
+
+ if (!TMEDIA_JITTER_BUFFER(self->jitterbuffer)->opened){
+ uint32_t rate = TMEDIA_CONSUMER(self)->audio.out.rate ? TMEDIA_CONSUMER(self)->audio.out.rate : TMEDIA_CONSUMER(self)->audio.in.rate;
+ uint32_t channels = TMEDIA_CONSUMER(self)->audio.out.channels ? TMEDIA_CONSUMER(self)->audio.out.channels : tmedia_defaults_get_audio_channels_playback();
+ if ((ret = tmedia_jitterbuffer_open(self->jitterbuffer, TMEDIA_CONSUMER(self)->audio.ptime, rate, channels))){
+ TSK_DEBUG_ERROR("Failed to open jitterbuffer (%d)", ret);
+ tsk_safeobj_unlock(self);
+ return ret;
+ }
+ }
+
+ ret = tmedia_jitterbuffer_put(self->jitterbuffer, (void*)data, data_size, proto_hdr);
+
+ tsk_safeobj_unlock(self);
+
+ return ret;
+}
+
+/* get data from the jitter buffer (consumers should always have ptime of 20ms) */
+tsk_size_t tdav_consumer_audio_get(tdav_consumer_audio_t* self, void* out_data, tsk_size_t out_size)
+{
+ tsk_size_t ret_size = 0;
+ if (!self || !self->jitterbuffer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ tsk_safeobj_lock(self);
+
+ if (!TMEDIA_JITTER_BUFFER(self->jitterbuffer)->opened){
+ int ret;
+ uint32_t frame_duration = TMEDIA_CONSUMER(self)->audio.ptime;
+ uint32_t rate = TMEDIA_CONSUMER(self)->audio.out.rate ? TMEDIA_CONSUMER(self)->audio.out.rate : TMEDIA_CONSUMER(self)->audio.in.rate;
+ uint32_t channels = TMEDIA_CONSUMER(self)->audio.out.channels ? TMEDIA_CONSUMER(self)->audio.out.channels : tmedia_defaults_get_audio_channels_playback();
+ if ((ret = tmedia_jitterbuffer_open(TMEDIA_JITTER_BUFFER(self->jitterbuffer), frame_duration, rate, channels))){
+ TSK_DEBUG_ERROR("Failed to open jitterbuffer (%d)", ret);
+ tsk_safeobj_unlock(self);
+ return 0;
+ }
+ }
+ ret_size = tmedia_jitterbuffer_get(TMEDIA_JITTER_BUFFER(self->jitterbuffer), out_data, out_size);
+
+ tsk_safeobj_unlock(self);
+
+ // denoiser
+ if (self->denoise && self->denoise->opened && (self->denoise->echo_supp_enabled || self->denoise->noise_supp_enabled)) {
+ if (self->denoise->echo_supp_enabled) {
+ // Echo process last frame
+ if (self->denoise->playback_frame && self->denoise->playback_frame->size) {
+ tmedia_denoise_echo_playback(self->denoise, self->denoise->playback_frame->data, (uint32_t)self->denoise->playback_frame->size);
+ }
+ if (ret_size){
+ // save
+ tsk_buffer_copy(self->denoise->playback_frame, 0, out_data, ret_size);
+ }
+ }
+
+#if 1 // suppress noise if not supported by remote party's encoder
+ // suppress noise
+ if (self->denoise->noise_supp_enabled && ret_size) {
+ tmedia_denoise_process_playback(self->denoise, out_data, (uint32_t)ret_size);
+ }
+#endif
+ }
+
+ return ret_size;
+}
+
+int tdav_consumer_audio_tick(tdav_consumer_audio_t* self)
+{
+ if (!self || !self->jitterbuffer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+ return tmedia_jitterbuffer_tick(TMEDIA_JITTER_BUFFER(self->jitterbuffer));
+}
+
+/* set denioiser */
+void tdav_consumer_audio_set_denoise(tdav_consumer_audio_t* self, struct tmedia_denoise_s* denoise)
+{
+ tsk_safeobj_lock(self);
+ TSK_OBJECT_SAFE_FREE(self->denoise);
+ self->denoise = (struct tmedia_denoise_s*)tsk_object_ref(denoise);
+ tsk_safeobj_unlock(self);
+}
+
+void tdav_consumer_audio_set_jitterbuffer(tdav_consumer_audio_t* self, struct tmedia_jitterbuffer_s* jitterbuffer)
+{
+ tsk_safeobj_lock(self);
+ TSK_OBJECT_SAFE_FREE(self->jitterbuffer);
+ self->jitterbuffer = (struct tmedia_jitterbuffer_s*)tsk_object_ref(jitterbuffer);
+ tsk_safeobj_unlock(self);
+}
+
+/** Reset jitterbuffer */
+int tdav_consumer_audio_reset(tdav_consumer_audio_t* self){
+ int ret;
+ if (!self) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(self);
+ ret = tmedia_jitterbuffer_reset(TMEDIA_JITTER_BUFFER(self->jitterbuffer));
+ tsk_safeobj_unlock(self);
+
+ return ret;
+}
+
+/* tsk_safeobj_lock(self); */
+/* tsk_safeobj_unlock(self); */
+
+/** DeInitialize audio consumer */
+int tdav_consumer_audio_deinit(tdav_consumer_audio_t* self)
+{
+ int ret;
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* base */
+ if ((ret = tmedia_consumer_deinit(TMEDIA_CONSUMER(self)))){
+ /* return ret; */
+ }
+
+ /* self */
+ TSK_OBJECT_SAFE_FREE(self->denoise);
+ TSK_OBJECT_SAFE_FREE(self->resampler);
+ TSK_OBJECT_SAFE_FREE(self->jitterbuffer);
+
+ tsk_safeobj_deinit(self);
+
+ return 0;
+}
+
diff --git a/tinyDAV/src/audio/tdav_jitterbuffer.c b/tinyDAV/src/audio/tdav_jitterbuffer.c
new file mode 100644
index 0000000..4fd1010
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_jitterbuffer.c
@@ -0,0 +1,1036 @@
+/* File from: http://cms.speakup.nl/tech/opensource/jitterbuffer/verslag-20051209.pdf/ */
+
+/*******************************************************
+* jitterbuffer:
+* an application-independent jitterbuffer, which tries
+* to achieve the maximum user perception during a call.
+* For more information look at:
+* http://www.speakup.nl/opensource/jitterbuffer/
+*
+* Copyright on this file is held by:
+* - Jesse Kaijen <jesse@speakup.nl>
+* - SpeakUp <info@speakup.nl>
+*
+* Contributors:
+* Jesse Kaijen <jesse@speakup.nl>
+*
+* This program is free software, distributed under the terms of:
+* - the GNU Lesser (Library) General Public License
+* - the Mozilla Public License
+*
+* if you are interested in an different licence type, please contact us.
+*
+* How to use the jitterbuffer, please look at the comments
+* in the headerfile.
+*
+* Further details on specific implementations,
+* please look at the comments in the code file.
+*/
+#include "tinydav/audio/tdav_jitterbuffer.h"
+
+#if !(HAVE_SPEEX_DSP && HAVE_SPEEX_JB)
+
+#include "tsk_memory.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+
+#define jb_warn(...) (warnf ? warnf(__VA_ARGS__) : (void)0)
+#define jb_err(...) (errf ? errf(__VA_ARGS__) : (void)0)
+#define jb_dbg(...) (dbgf ? dbgf(__VA_ARGS__) : (void)0)
+
+//public functions
+jitterbuffer *jb_new();
+void jb_reset(jitterbuffer *jb);
+void jb_reset_all(jitterbuffer *jb);
+void jb_destroy(jitterbuffer *jb);
+void jb_set_settings(jitterbuffer *jb, jb_settings *settings);
+
+void jb_get_info(jitterbuffer *jb, jb_info *stats);
+void jb_get_settings(jitterbuffer *jb, jb_settings *settings);
+float jb_guess_mos(float p, long d, int codec);
+int jb_has_frames(jitterbuffer *jb);
+
+void jb_put(jitterbuffer *jb, void *data, int type, long ms, long ts, long now, int codec);
+int jb_get(jitterbuffer *jb, void **data, long now, long interpl);
+
+
+
+//private functions
+static void set_default_settings(jitterbuffer *jb);
+static void reset(jitterbuffer *jb);
+static long find_pointer(long *array, long max_index, long value); static void frame_free(jb_frame *frame);
+
+static void put_control(jitterbuffer *jb, void *data, int type, long ts);
+static void put_voice(jitterbuffer *jb, void *data, int type, long ms, long ts, int codec);
+static void put_history(jitterbuffer *jb, long ts, long now, long ms, int codec);
+static void calculate_info(jitterbuffer *jb, long ts, long now, int codec);
+
+static int get_control(jitterbuffer *jb, void **data);
+static int get_voice(jitterbuffer *jb, void **data, long now, long interpl);
+static int get_voicecase(jitterbuffer *jb, void **data, long now, long interpl, long diff);
+
+static int get_next_frametype(jitterbuffer *jb, long ts);
+static long get_next_framets(jitterbuffer *jb);
+static jb_frame *get_frame(jitterbuffer *jb, long ts);
+static jb_frame *get_all_frames(jitterbuffer *jb);
+
+//debug...
+static jb_output_function_t warnf, errf, dbgf;
+void jb_setoutput(jb_output_function_t warn, jb_output_function_t err, jb_output_function_t dbg) {
+ warnf = warn;
+ errf = err;
+ dbgf = dbg;
+}
+
+
+/***********
+ * create a new jitterbuffer
+ * return NULL if malloc doesn't work
+ * else return jb with default_settings.
+ */
+jitterbuffer *jb_new()
+{
+ jitterbuffer *jb;
+
+ jb_dbg("N");
+ jb = tsk_calloc(1, sizeof(jitterbuffer));
+ if (!jb) {
+ jb_err("cannot allocate jitterbuffer\n");
+ return NULL;
+ }
+ set_default_settings(jb);
+ reset(jb);
+ return jb;
+}
+
+
+/***********
+ * empty voice messages
+ * reset statistics
+ * keep the settings
+ */
+void jb_reset(jitterbuffer *jb)
+{
+ jb_frame *frame;
+
+ jb_dbg("R");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_reset()\n");
+ return;
+ }
+
+ //free voice
+ while(jb->voiceframes) {
+ frame = get_all_frames(jb);
+ frame_free(frame);
+ }
+ //reset stats
+ memset(&(jb->info),0,sizeof(jb_info) );
+ // set default settings
+ reset(jb);
+}
+
+
+/***********
+ * empty nonvoice messages
+ * empty voice messages
+ * reset statistics
+ * reset settings to default
+ */
+void jb_reset_all(jitterbuffer *jb)
+{
+ jb_frame *frame;
+
+ jb_dbg("r");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_reset_all()\n");
+ return;
+ }
+
+ // free nonvoice
+ while(jb->controlframes) {
+ frame = jb->controlframes;
+ jb->controlframes = frame->next;
+ frame_free(frame);
+ }
+ // free voice and reset statistics is done by jb_reset
+ jb_reset(jb);
+ set_default_settings(jb);
+}
+
+
+/***********
+ * destroy the jitterbuffer
+ * free all the [non]voice frames with reset_all
+ * free the jitterbuffer
+ */
+void jb_destroy(jitterbuffer *jb)
+{
+ jb_dbg("D");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_destroy()\n");
+ return;
+ }
+
+ jb_reset_all(jb);
+ free(jb);
+}
+
+
+/***********
+ * Set settings for the jitterbuffer.
+ * Only if a setting is defined it will be written
+ * in the jb->settings.
+ * This means that no setting can be set to zero
+ */
+void jb_set_settings(jitterbuffer *jb, jb_settings *settings)
+{
+ jb_dbg("S");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_set_settings()\n");
+ return;
+ }
+
+ if (settings->min_jb) {
+ jb->settings.min_jb = settings->min_jb;
+ }
+ if (settings->max_jb) {
+ jb->settings.max_jb = settings->max_jb;
+ }
+ if (settings->max_successive_interp) {
+ jb->settings.max_successive_interp = settings->max_successive_interp;
+ }
+ if (settings->extra_delay) {
+ jb->settings.extra_delay = settings->extra_delay;
+ }
+ if (settings->wait_grow) {
+ jb->settings.wait_grow = settings->wait_grow;
+ }
+ if (settings->wait_shrink) {
+ jb->settings.wait_shrink = settings->wait_shrink;
+ }
+ if (settings->max_diff) {
+ jb->settings.max_diff = settings->max_diff;
+ }
+}
+
+
+/***********
+ * validates the statistics
+ * the losspct due the jitterbuffer will be calculated.
+ * delay and delay_target will be calculated
+ * *stats = info
+ */
+void jb_get_info(jitterbuffer *jb, jb_info *stats)
+{
+ long max_index, pointer;
+
+ jb_dbg("I");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_get_info()\n");
+ return;
+ }
+
+ jb->info.delay = jb->current - jb->min;
+ jb->info.delay_target = jb->target - jb->min;
+
+ //calculate the losspct...
+ max_index = (jb->hist_pointer < JB_HISTORY_SIZE) ?
+jb->hist_pointer : JB_HISTORY_SIZE-1;
+ if (max_index>1) {
+ pointer = find_pointer(&jb->hist_sorted_delay[0], max_index,
+jb->current);
+ jb->info.losspct = ((max_index - pointer)*100/max_index);
+ if (jb->info.losspct < 0) {
+ jb->info.losspct = 0;
+ }
+ } else {
+ jb->info.losspct = 0;
+ }
+
+ *stats = jb->info;
+}
+
+
+/***********
+ * gives the settings for this jitterbuffer
+ * *settings = settings
+ */
+void jb_get_settings(jitterbuffer *jb, jb_settings *settings)
+{
+ jb_dbg("S");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_get_settings()\n");
+ return;
+ }
+
+ *settings = jb->settings;
+}
+
+
+/***********
+ * returns an estimate on the MOS with given loss, delay and codec
+ * if the formula is not present the default will be used
+ * please use the JB_CODEC_OTHER if you want to define your own formula
+ *
+ */
+float jb_guess_mos(float p, long d, int codec)
+{
+ float result;
+
+ switch (codec) {
+ case JB_CODEC_GSM_EFR:
+ result = (4.31f - 0.23f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_G723_1:
+ result = (3.99f - 0.16f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_G729:
+ case JB_CODEC_G729A:
+ result = (4.13f - 0.14f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_G711x_PLC:
+ result = (4.42f - 0.087f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_G711x:
+ result = (4.42f - 0.63f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_OTHER:
+ default:
+ result = (4.42f - 0.63f*p - 0.0071f*d);
+
+ }
+ return result;
+}
+
+
+/***********
+ * if there are any frames left in JB returns JB_OK, otherwise returns JB_EMPTY
+ */
+int jb_has_frames(jitterbuffer *jb)
+{
+ jb_dbg("H");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_has_frames()\n");
+ return JB_NOJB;
+ }
+
+ if(jb->controlframes || jb->voiceframes) {
+ return JB_OK;
+ } else {
+ return JB_EMPTY;
+ }
+}
+
+
+/***********
+ * Put a packet into the jitterbuffers
+ * Only the timestamps of voicepackets are put in the history
+ * this because the jitterbuffer only works for voicepackets
+ * don't put packets twice in history and queue (e.g. transmitting every frame twice)
+ * keep track of statistics
+ */
+void jb_put(jitterbuffer *jb, void *data, int type, long ms, long ts, long now, int codec)
+{
+ long pointer, max_index;
+
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_put()\n");
+ return;
+ }
+
+ jb->info.frames_received++;
+
+ if (type == JB_TYPE_CONTROL) {
+ //put the packet into the contol-queue of the jitterbuffer
+ jb_dbg("pC");
+ put_control(jb,data,type,ts);
+
+ } else if (type == JB_TYPE_VOICE) {
+ // only add voice that aren't already in the buffer
+ max_index = (jb->hist_pointer < JB_HISTORY_SIZE) ? jb->hist_pointer : JB_HISTORY_SIZE-1;
+ pointer = find_pointer(&jb->hist_sorted_timestamp[0], max_index, ts);
+ if (jb->hist_sorted_timestamp[pointer]==ts) { //timestamp already in queue
+ jb_dbg("pT");
+ free(data);
+ jb->info.frames_dropped_twice++;
+ } else { //add
+ jb_dbg("pV");
+ /* add voicepacket to history */
+ put_history(jb,ts,now,ms,codec);
+ /*calculate jitterbuffer size*/
+ calculate_info(jb, ts, now, codec);
+ /*put the packet into the queue of the jitterbuffer*/
+ put_voice(jb,data,type,ms,ts,codec);
+ }
+
+ } else if (type == JB_TYPE_SILENCE){ //silence
+ jb_dbg("pS");
+ put_voice(jb,data,type,ms,ts,codec);
+
+ } else {//should NEVER happen
+ jb_err("jb_put(): type not known\n");
+ free(data);
+ }
+}
+
+
+/***********
+ * control frames have a higher priority then voice frames
+ * returns JB_OK if a frame is available and *data points to the packet
+ * returns JB_NOFRAME if it's no time to play voice and no control available
+ * returns JB_INTERP if interpolating is required
+ * returns JB_EMPTY if no voice frame is in the jitterbuffer (only during silence)
+ */
+int jb_get(jitterbuffer *jb, void **data, long now, long interpl)
+{
+ int result;
+
+ jb_dbg("A");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_get()\n");
+ return JB_NOJB;
+ }
+
+ result = get_control(jb, data);
+ if (result != JB_OK ) { //no control message available maybe there is voice...
+ result = get_voice(jb, data, now, interpl);
+ }
+ return result;
+}
+
+
+/***********
+ * set all the settings to default
+ */
+static void set_default_settings(jitterbuffer *jb)
+{
+ jb->settings.min_jb = JB_MIN_SIZE;
+ jb->settings.max_jb = JB_MAX_SIZE;
+ jb->settings.max_successive_interp = JB_MAX_SUCCESSIVE_INTERP;
+ jb->settings.extra_delay = JB_ALLOW_EXTRA_DELAY;
+ jb->settings.wait_grow = JB_WAIT_GROW;
+ jb->settings.wait_shrink = JB_WAIT_SHRINK;
+ jb->settings.max_diff = JB_MAX_DIFF;
+}
+
+
+/***********
+ * reset the jitterbuffer so we can start in silence and
+ * we start with a new history
+ */
+static void reset(jitterbuffer *jb)
+{
+ jb->hist_pointer = 0; //start over
+ jb->silence_begin_ts = 0; //no begin_ts defined
+ jb->info.silence =1; //we always start in silence
+}
+
+
+/***********
+ * Search algorithm
+ * @REQUIRE max_index is within array
+ *
+ * Find the position of value in hist_sorted_delay
+ * if value doesn't exist return first pointer where array[low]>value
+ * int low; //the lowest index being examined
+ * int max_index; //the highest index being examined
+ * int mid; //the middle index between low and max_index.
+ * mid ==(low+max_index)/2
+ * at the end low is the position of value or where array[low]>value
+ */
+static long find_pointer(long *array, long max_index, long value)
+{
+ register long low, mid, high;
+ low = 0;
+ high = max_index;
+ while (low<=high) {
+ mid= (low+high)/2;
+ if (array[mid] < value) {
+ low = mid+1;
+ } else {
+ high = mid-1;
+ }
+ }
+ while(low < max_index && (array[low]==array[(low+1)]) ) {
+ low++;
+ }
+ return low;
+}
+
+
+/***********
+ * free the given frame, afterwards the framepointer is undefined
+ */
+static void frame_free(jb_frame *frame)
+{
+ if (frame->data) {
+ free(frame->data);
+ }
+ free(frame);
+}
+
+
+/***********
+ * put a nonvoice frame into the nonvoice queue
+ */
+static void put_control(jitterbuffer *jb, void *data, int type, long ts)
+{
+ jb_frame *frame, *p;
+
+ frame = malloc(sizeof(jb_frame));
+ if(!frame) {
+ jb_err("cannot allocate frame\n");
+ return;
+ }
+ frame->data = data;
+ frame->ts = ts;
+ frame->type = type;
+ frame->next = NULL;
+ data = NULL;//to avoid stealing memory
+
+ p = jb->controlframes;
+ if (p) { //there are already control messages
+ if (ts < p->ts) {
+ jb->controlframes = frame;
+ frame->next = p;
+ } else {
+ while (p->next && (ts >=p->next->ts)) {//sort on timestamps! so find place to put...
+ p = p->next;
+ }
+ if (p->next) {
+ frame->next = p->next;
+ }
+ p->next = frame;
+ }
+ } else {
+ jb->controlframes = frame;
+ }
+}
+
+
+/***********
+ * put a voice or silence frame into the jitterbuffer
+ */
+static void put_voice(jitterbuffer *jb, void *data, int type, long ms, long ts, int codec)
+{
+ jb_frame *frame, *p;
+ frame = malloc(sizeof(jb_frame));
+ if(!frame) {
+ jb_err("cannot allocate frame\n");
+ return;
+ }
+
+ frame->data = data;
+ frame->ts = ts;
+ frame->ms = ms;
+ frame->type = type;
+ frame->codec = codec;
+
+ data = NULL; //to avoid stealing the memory location
+ /*
+ * frames are a circular list, jb->voiceframes points to to the lowest ts,
+ * jb->voiceframes->prev points to the highest ts
+ */
+ if(!jb->voiceframes) { /* queue is empty */
+ jb->voiceframes = frame;
+ frame->next = frame;
+ frame->prev = frame;
+ } else {
+ p = jb->voiceframes;
+ if(ts < p->prev->ts) { //frame is out of order
+ jb->info.frames_ooo++;
+ }
+ if (ts < p->ts) { //frame is lowest, let voiceframes point to it!
+ jb->voiceframes = frame;
+ } else {
+ while(ts < p->prev->ts ) {
+ p = p->prev;
+ }
+ }
+ frame->next = p;
+ frame->prev = p->prev;
+ frame->next->prev = frame;
+ frame->prev->next = frame;
+ }
+}
+
+
+/***********
+ * puts the timestamps of a received packet in the history of *jb
+ * for later calculations of the size of jitterbuffer *jb.
+ *
+ * summary of function:
+ * - calculate delay difference
+ * - delete old value from hist & sorted_history_delay & sorted_history_timestamp if needed
+ * - add new value to history & sorted_history_delay & sorted_history_timestamp
+ * - we keep sorted_history_delay for calculations
+ * - we keep sorted_history_timestamp for ensuring each timestamp isn't put twice in the buffer.
+ */
+static void put_history(jitterbuffer *jb, long ts, long now, long ms, int codec)
+{
+ jb_hist_element out, in;
+ long max_index, pointer, location;
+
+ // max_index is the highest possible index
+ max_index = (jb->hist_pointer < JB_HISTORY_SIZE) ? jb->hist_pointer : JB_HISTORY_SIZE-1;
+ location = (jb->hist_pointer % JB_HISTORY_SIZE);
+
+ // we want to delete a value from the jitterbuffer
+ // only when we are through the history.
+ if (jb->hist_pointer > JB_HISTORY_SIZE-1) {
+ /* the value we need to delete from sorted histories */
+ out = jb->hist[location];
+ //delete delay from hist_sorted_delay
+ pointer = find_pointer(&jb->hist_sorted_delay[0], max_index, out.delay);
+ /* move over pointer is the position of kicked*/
+ if (pointer<max_index) { //only move if we have something to move
+ memmove( &(jb->hist_sorted_delay[pointer]),
+ &(jb->hist_sorted_delay[pointer+1]),
+ ((JB_HISTORY_SIZE-(pointer+1)) * sizeof(long)) );
+ }
+
+ //delete timestamp from hist_sorted_timestamp
+ pointer = find_pointer(&jb->hist_sorted_timestamp[0], max_index, out.ts);
+ /* move over pointer is the position of kicked*/
+ if (pointer<max_index) { //only move if we have something to move
+ memmove( &(jb->hist_sorted_timestamp[pointer]),
+ &(jb->hist_sorted_timestamp[pointer+1]),
+ ((JB_HISTORY_SIZE-(pointer+1)) * sizeof(long)) );
+ }
+ }
+
+ in.delay = now - ts; //delay of current packet
+ in.ts = ts; //timestamp of current packet
+ in.ms = ms; //length of current packet
+ in.codec = codec; //codec of current packet
+
+ /* adding the new delay to the sorted history
+ * first special cases:
+ * - delay is the first history stamp
+ * - delay > highest history stamp
+ */
+ if (max_index==0 || in.delay >= jb->hist_sorted_delay[max_index-1]) {
+ jb->hist_sorted_delay[max_index] = in.delay;
+ } else {
+ pointer = find_pointer(&jb->hist_sorted_delay[0], (max_index-1), in.delay);
+ /* move over and add delay */
+ memmove( &(jb->hist_sorted_delay[pointer+1]),
+ &(jb->hist_sorted_delay[pointer]),
+ ((JB_HISTORY_SIZE-(pointer+1)) * sizeof(long)) );
+ jb->hist_sorted_delay[pointer] = in.delay;
+ }
+
+ /* adding the new timestamp to the sorted history
+ * first special cases:
+ * - timestamp is the first history stamp
+ * - timestamp > highest history stamp
+ */
+ if (max_index==0 || in.ts >= jb->hist_sorted_timestamp[max_index-1]) {
+ jb->hist_sorted_timestamp[max_index] = in.ts;
+ } else {
+
+ pointer = find_pointer(&jb->hist_sorted_timestamp[0], (max_index-1), in.ts);
+ /* move over and add timestamp */
+ memmove( &(jb->hist_sorted_timestamp[pointer+1]),
+ &(jb->hist_sorted_timestamp[pointer]),
+ ((JB_HISTORY_SIZE-(pointer+1)) * sizeof(long)) );
+ jb->hist_sorted_timestamp[pointer] = in.ts;
+ }
+
+ /* put the jb_hist_element in the history
+ * then increase hist_pointer for next time
+ */
+ jb->hist[location] = in;
+ jb->hist_pointer++;
+}
+
+
+/***********
+ * this tries to make a jitterbuffer that behaves like
+ * the jitterbuffer proposed in this article:
+ * Adaptive Playout Buffer Algorithm for Enhancing Perceived Quality of Streaming Applications
+ * by: Kouhei Fujimoto & Shingo Ata & Masayuki Murata
+ * http://www.nal.ics.es.osaka-u.ac.jp/achievements/web2002/pdf/journal/k-fujimo02TSJ-AdaptivePlayoutBuffer.pdf
+ *
+ * it calculates jitter and minimum delay
+ * get the best delay for the specified codec
+
+ */
+static void calculate_info(jitterbuffer *jb, long ts, long now, int codec)
+{
+ long diff, size, max_index, d, d1, d2, n;
+ float p, p1, p2, A, B;
+ //size = how many items there in the history
+ size = (jb->hist_pointer < JB_HISTORY_SIZE) ? jb->hist_pointer : JB_HISTORY_SIZE;
+ max_index = size-1;
+
+ /*
+ * the Inter-Quartile Range can be used for estimating jitter
+ * http://www.slac.stanford.edu/comp/net/wan-mon/tutorial.html#variable
+ * just take the square root of the iqr for jitter
+ */
+ jb->info.iqr = jb->hist_sorted_delay[max_index*3/4] - jb->hist_sorted_delay[max_index/4];
+
+
+ /*
+ * The RTP way of calculating jitter.
+ * This one is used at the moment, although it is not correct.
+ * But in this way the other side understands us.
+ */
+ diff = now - ts - jb->last_delay;
+ if (!jb->last_delay) {
+ diff = 0; //this to make sure we won't get odd jitter due first ts.
+ }
+ jb->last_delay = now - ts;
+ if (diff <0){
+ diff = -diff;
+ }
+ jb->info.jitter = jb->info.jitter + (diff - jb->info.jitter)/16;
+
+ /* jb->min is minimum delay in hist_sorted_delay, we don't look at the lowest 2% */
+ /* because sometimes there are odd delays in there */
+ jb->min = jb->hist_sorted_delay[(max_index*2/100)];
+
+ /*
+ * calculating the preferred size of the jitterbuffer:
+ * instead of calculating the optimum delay using the Pareto equation
+ * I use look at the array of sorted delays and choose my optimum from there
+ * always walk trough a percentage of the history this because imagine following tail:
+ * [...., 12, 300, 301 ,302]
+ * her we want to discard last three but that won't happen if we won't walk the array
+ * the number of frames we walk depends on how scattered the sorted delays are.
+ * For that we look at the iqr. The dependencies of the iqr are based on
+ * tests we've done here in the lab. But are not optimized.
+ */
+ //init:
+ //the higest delay..
+ d = d1= d2 = jb->hist_sorted_delay[max_index]- jb->min;
+ A=B=LONG_MIN;
+ p = p2 =0;
+ n=0;
+ p1 = 5; //always look at the top 5%
+ if (jb->info.iqr >200) { //with more jitter look at more delays
+ p1=25;
+ } else if (jb->info.iqr >100) {
+ p1=20;
+ } else if (jb->info.iqr >50){
+ p1=11;
+ }
+
+ //find the optimum delay..
+ while(max_index>10 && (B > A ||p2<p1)) { // By MDI: from ">=" to ">"
+ //the packetloss with this delay
+ p2 =(n*100.0f/size);
+ // estimate MOS-value
+ B = jb_guess_mos(p2,d2,codec);
+ if (B > A) {
+ p = p2;
+ d = d2;
+ A = B;
+ }
+ d1 = d2;
+ //find next delay != delay so the same delay isn't calculated twice
+ //don't look further if we have seen half of the history
+ while((d2>=d1) && ((n*2)<max_index) ) {
+ n++;
+ d2 = jb->hist_sorted_delay[(max_index-n)] - jb->min;
+ }
+ }
+ //the targeted size of the jitterbuffer
+ if (jb->settings.min_jb && (jb->settings.min_jb > d) ) {
+ jb->target = jb->min + jb->settings.min_jb;
+ } else if (jb->settings.max_jb && (jb->settings.max_jb > d) ){
+ jb->target = jb->min + jb->settings.max_jb;
+ } else {
+ jb->target = jb->min + d;
+ }
+}
+
+
+/***********
+ * if there is a nonvoice frame it will be returned [*data] and the frame
+ * will be made free
+ */
+static int get_control(jitterbuffer *jb, void **data)
+{
+ jb_frame *frame;
+ int result;
+
+ frame = jb->controlframes;
+ if (frame) {
+ jb_dbg("gC");
+ *data = frame->data;
+ frame->data = NULL;
+ jb->controlframes = frame->next;
+ frame_free(frame);
+ result = JB_OK;
+ } else {
+ result = JB_NOFRAME;
+ }
+ return result;
+}
+
+
+/***********
+ * returns JB_OK if a frame is available and *data points to the packet
+ * returns JB_NOFRAME if it's no time to play voice and or no frame available
+ * returns JB_INTERP if interpolating is required
+ * returns JB_EMPTY if no voice frame is in the jitterbuffer (only during silence)
+ *
+ * if the next frame is a silence frame we will go in silence-mode
+ * each new instance of the jitterbuffer will start in silence mode
+ * in silence mode we will set the jitterbuffer to the size we want
+ * when we are not in silence mode get_voicecase will handle the rest.
+ */
+static int get_voice(jitterbuffer *jb, void **data, long now, long interpl)
+{
+ jb_frame *frame;
+ long diff;
+ int result;
+
+ diff = jb->target - jb->current;
+
+ //if the next frame is a silence frame, go in silence mode...
+ if((get_next_frametype(jb, now - jb->current) == JB_TYPE_SILENCE) ) {
+ jb_dbg("gs");
+ frame = get_frame(jb, now - jb->current);
+ *data = frame->data;
+ frame->data = NULL;
+ jb->info.silence =1;
+ jb->silence_begin_ts = frame->ts;
+ frame_free(frame);
+ result = JB_OK;
+ } else {
+ if(jb->info.silence) { // we are in silence
+ /*
+ * During silence we can set the jitterbuffer size to the size
+ * we want...
+ */
+ if (diff) {
+ jb->current = jb->target;
+ }
+ frame = get_frame(jb, now - jb->current);
+ if (frame) {
+ if (jb->silence_begin_ts && frame->ts < jb->silence_begin_ts) {
+ jb_dbg("gL");
+ /* voice frame is late, next!*/
+ jb->info.frames_late++;
+ frame_free(frame);
+ result = get_voice(jb, data, now, interpl);
+ } else {
+ jb_dbg("gP");
+ /* voice frame */
+ jb->info.silence = 0;
+ jb->silence_begin_ts = 0;
+ jb->next_voice_time = frame->ts + frame->ms;
+ jb->info.last_voice_ms = frame->ms;
+ *data = frame->data;
+ frame->data = NULL;
+ frame_free(frame);
+ result = JB_OK;
+ }
+ } else { //no frame
+ jb_dbg("gS");
+ result = JB_EMPTY;
+ }
+ } else { //voice case
+ result = get_voicecase(jb,data,now,interpl,diff);
+ }
+ }
+ return result;
+}
+
+
+/***********
+ * The voicecase has four 'options'
+ * - difference is way off, reset
+ * - diff > 0, we may need to grow
+ * - diff < 0, we may need to shrink
+ * - everything else
+ */
+static int get_voicecase(jitterbuffer *jb, void **data, long now, long interpl, long diff)
+{
+ jb_frame *frame;
+ int result;
+
+ // * - difference is way off, reset
+ if (diff > jb->settings.max_diff || -diff > jb->settings.max_diff) {
+ jb_err("wakko diff in get_voicecase\n");
+ reset(jb); //reset hist because the timestamps are wakko.
+ result = JB_NOFRAME;
+ //- diff > 0, we may need to grow
+ } else if ((diff > 0) &&
+ (now > (jb->last_adjustment + jb->settings.wait_grow)
+ || (now + jb->current + interpl) < get_next_framets(jb) ) ) { //grow
+ /* first try to grow */
+ if (diff<interpl/2) {
+ jb_dbg("ag");
+ jb->current +=diff;
+ } else {
+ jb_dbg("aG");
+ /* grow by interp frame len */
+ jb->current += interpl;
+ }
+ jb->last_adjustment = now;
+ result = get_voice(jb, data, now, interpl);
+ //- diff < 0, we may need to shrink
+ } else if ( (diff < 0)
+ && (now > (jb->last_adjustment + jb->settings.wait_shrink))
+ && ((-diff) > jb->settings.extra_delay) ) {
+ /* now try to shrink
+ * if there is a frame shrink by frame length
+ * otherwise shrink by interpl
+ */
+ jb->last_adjustment = now;
+
+ frame = get_frame(jb, now - jb->current);
+ if(frame) {
+ jb_dbg("as");
+ /* shrink by frame size we're throwing out */
+ jb->info.frames_dropped++;
+ jb->current -= frame->ms;
+ frame_free(frame);
+ } else {
+ jb_dbg("aS");
+ /* shrink by interpl */
+ jb->current -= interpl;
+ }
+ result = get_voice(jb, data, now, interpl);
+ } else {
+ /* if it is not the time to play a result = JB_NOFRAME
+ * else We try to play a frame if a frame is available
+ * and not late it is played otherwise
+ * if available it is dropped and the next is tried
+ * last option is interpolating
+ */
+ if (now - jb->current < jb->next_voice_time) {
+ jb_dbg("aN");
+ result = JB_NOFRAME;
+ } else {
+ frame = get_frame(jb, now - jb->current);
+ if (frame) { //there is a frame
+ /* voice frame is late */
+ if(frame->ts < jb->next_voice_time) { //late
+ jb_dbg("aL");
+ jb->info.frames_late++;
+ frame_free(frame);
+ result = get_voice(jb, data, now, interpl);
+ } else {
+ jb_dbg("aP");
+ /* normal case; return the frame, increment stuff */
+ *data = frame->data;
+ frame->data = NULL;
+ jb->next_voice_time = frame->ts + frame->ms;
+ jb->cnt_successive_interp = 0;
+ frame_free(frame);
+ result = JB_OK;
+ }
+ } else { // no frame, thus interpolate
+ jb->cnt_successive_interp++;
+ /* assume silence instead of continuing to interpolate */
+ if (jb->settings.max_successive_interp && jb->cnt_successive_interp >= jb->settings.max_successive_interp) {
+ jb->info.silence = 1;
+ jb->silence_begin_ts = jb->next_voice_time;
+ }
+ jb_dbg("aI");
+ jb->next_voice_time += interpl;
+ result = JB_INTERP;
+ }
+ }
+ }
+ return result;
+
+}
+
+
+/***********
+ * if there are frames and next frame->ts is smaller or equal ts
+ * return type of next frame.
+ * else return 0
+ */
+static int get_next_frametype(jitterbuffer *jb, long ts)
+{
+ jb_frame *frame;
+ int result;
+
+ result = 0;
+ frame = jb->voiceframes;
+ if (frame && frame->ts <= ts) {
+ result = frame->type;
+ }
+ return result;
+}
+
+
+/***********
+ * returns ts from next frame in jb->voiceframes
+ * or returns LONG_MAX if there is no frame
+ */
+static long get_next_framets(jitterbuffer *jb)
+{
+ if (jb->voiceframes) {
+ return jb->voiceframes->ts;
+ }
+ return LONG_MAX;
+}
+
+
+/***********
+ * if there is a frame in jb->voiceframes and
+ * has a timestamp smaller/equal to ts
+ * this frame will be returned and
+ * removed from the queue
+ */
+static jb_frame *get_frame(jitterbuffer *jb, long ts)
+{
+ jb_frame *frame;
+
+ frame = jb->voiceframes;
+ if (frame && frame->ts <= ts) {
+ if(frame->next == frame) {
+ jb->voiceframes = NULL;
+ } else {
+ /* remove this frame */
+ frame->prev->next = frame->next;
+ frame->next->prev = frame->prev;
+ jb->voiceframes = frame->next;
+ }
+ return frame;
+ }
+ return NULL;
+}
+
+/***********
+ * if there is a frame in jb->voiceframes
+ * this frame will be unconditionally returned and
+ * removed from the queue
+ */
+static jb_frame *get_all_frames(jitterbuffer *jb)
+{
+ jb_frame *frame;
+
+ frame = jb->voiceframes;
+ if (frame) {
+ if(frame->next == frame) {
+ jb->voiceframes = NULL;
+ } else {
+ /* remove this frame */
+ frame->prev->next = frame->next;
+ frame->next->prev = frame->prev;
+ jb->voiceframes = frame->next;
+ }
+ return frame;
+ }
+ return NULL;
+}
+
+
+#endif // !(HAVE_SPEEX_DSP && HAVE_SPEEX_JB)
diff --git a/tinyDAV/src/audio/tdav_producer_audio.c b/tinyDAV/src/audio/tdav_producer_audio.c
new file mode 100644
index 0000000..8c73c9f
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_producer_audio.c
@@ -0,0 +1,133 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_producer_audio.c
+ * @brief Base class for all Audio producers.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+
+ */
+#include "tinydav/audio/tdav_producer_audio.h"
+
+#include "tinymedia/tmedia_defaults.h"
+
+#define TDAV_PRODUCER_BITS_PER_SAMPLE_DEFAULT 16
+#define TDAV_PRODUCER_CHANNELS_DEFAULT 1
+#define TDAV_PRODUCER_RATE_DEFAULT 8000
+#define TDAV_PRODUCER_PTIME_DEFAULT 20
+#define TDAV_PRODUCER_AUDIO_GAIN_MAX 15
+
+#include "tsk_string.h"
+#include "tsk_debug.h"
+
+/** Initialize Audio producer
+* @param self The producer to initialize
+*/
+int tdav_producer_audio_init(tdav_producer_audio_t* self)
+{
+ int ret;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* base */
+ if((ret = tmedia_producer_init(TMEDIA_PRODUCER(self)))){
+ return ret;
+ }
+
+ /* self (should be update by prepare() by using the codec's info)*/
+ TMEDIA_PRODUCER(self)->audio.bits_per_sample = TDAV_PRODUCER_BITS_PER_SAMPLE_DEFAULT;
+ TMEDIA_PRODUCER(self)->audio.channels = TDAV_PRODUCER_CHANNELS_DEFAULT;
+ TMEDIA_PRODUCER(self)->audio.rate = TDAV_PRODUCER_RATE_DEFAULT;
+ TMEDIA_PRODUCER(self)->audio.ptime = TDAV_PRODUCER_PTIME_DEFAULT;
+ TMEDIA_PRODUCER(self)->audio.gain = TSK_MIN(tmedia_defaults_get_audio_producer_gain(), TDAV_PRODUCER_AUDIO_GAIN_MAX);
+
+ return 0;
+}
+
+/**
+* Generic function to compare two producers.
+* @param producer1 The first producer to compare.
+* @param producer2 The second producer to compare.
+* @retval Returns an integral value indicating the relationship between the two producers:
+* <0 : @a producer1 less than @a producer2.<br>
+* 0 : @a producer1 identical to @a producer2.<br>
+* >0 : @a producer1 greater than @a producer2.<br>
+*/
+int tdav_producer_audio_cmp(const tsk_object_t* producer1, const tsk_object_t* producer2)
+{
+ int ret;
+ tsk_subsat_int32_ptr(producer1, producer2, &ret);
+ return ret;
+}
+
+int tdav_producer_audio_set(tdav_producer_audio_t* self, const tmedia_param_t* param)
+{
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(param->plugin_type == tmedia_ppt_producer){
+ if(param->value_type == tmedia_pvt_int32){
+ if(tsk_striequals(param->key, "gain")){
+ int32_t gain = *((int32_t*)param->value);
+ if(gain<TDAV_PRODUCER_AUDIO_GAIN_MAX && gain>=0){
+ TMEDIA_PRODUCER(self)->audio.gain = (uint8_t)gain;
+ TSK_DEBUG_INFO("audio producer gain=%u", gain);
+ }
+ else{
+ TSK_DEBUG_ERROR("%u is invalid as gain value", gain);
+ return -2;
+ }
+ }
+ else if(tsk_striequals(param->key, "volume")){
+ TMEDIA_PRODUCER(self)->audio.volume = TSK_TO_INT32((uint8_t*)param->value);
+ TMEDIA_PRODUCER(self)->audio.volume = TSK_CLAMP(0, TMEDIA_PRODUCER(self)->audio.volume, 100);
+ TSK_DEBUG_INFO("audio producer volume=%u", TMEDIA_PRODUCER(self)->audio.volume);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/** Deinitialize a producer
+*/
+int tdav_producer_audio_deinit(tdav_producer_audio_t* self)
+{
+ int ret;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* base */
+ if((ret = tmedia_producer_deinit(TMEDIA_PRODUCER(self)))){
+ return ret;
+ }
+
+ return ret;
+} \ No newline at end of file
diff --git a/tinyDAV/src/audio/tdav_session_audio.c b/tinyDAV/src/audio/tdav_session_audio.c
new file mode 100644
index 0000000..f12e801
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_session_audio.c
@@ -0,0 +1,991 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_session_audio.c
+* @brief Audio Session plugin.
+*
+* @author Mamadou Diop <diopmamadou(at)doubango.org>
+* @contributors: See $(DOUBANGO_HOME)\contributors.txt
+*/
+#include "tinydav/audio/tdav_session_audio.h"
+
+//#include "tinydav/codecs/dtmf/tdav_codec_dtmf.h"
+#include "tinydav/audio/tdav_consumer_audio.h"
+
+#include "tinymedia/tmedia_resampler.h"
+#include "tinymedia/tmedia_denoise.h"
+#include "tinymedia/tmedia_jitterbuffer.h"
+#include "tinymedia/tmedia_consumer.h"
+#include "tinymedia/tmedia_producer.h"
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tinyrtp/trtp_manager.h"
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tsk_timer.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_AUDIO_RESAMPLER_DEFAULT_QUALITY 5
+
+static int _tdav_session_audio_dtmfe_timercb(const void* arg, tsk_timer_id_t timer_id);
+static struct tdav_session_audio_dtmfe_s* _tdav_session_audio_dtmfe_create(const tdav_session_audio_t* session, uint8_t event, uint16_t duration, uint32_t seq, uint32_t timestamp, uint8_t format, tsk_bool_t M, tsk_bool_t E);
+static void _tdav_session_audio_apply_gain(void* buffer, int len, int bps, int gain);
+static tmedia_resampler_t* _tdav_session_audio_resampler_create(int32_t bytes_per_sample, uint32_t in_freq, uint32_t out_freq, uint32_t frame_duration, uint32_t in_channels, uint32_t out_channels, uint32_t quality, void** resampler_buffer, tsk_size_t *resampler_buffer_size);
+
+/* DTMF event object */
+typedef struct tdav_session_audio_dtmfe_s
+{
+ TSK_DECLARE_OBJECT;
+
+ tsk_timer_id_t timer_id;
+ trtp_rtp_packet_t* packet;
+
+ const tdav_session_audio_t* session;
+}
+tdav_session_audio_dtmfe_t;
+extern const tsk_object_def_t *tdav_session_audio_dtmfe_def_t;
+
+// RTP/RTCP callback (From the network to the consumer)
+static int tdav_session_audio_rtp_cb(const void* callback_data, const struct trtp_rtp_packet_s* packet)
+{
+ tdav_session_audio_t* audio = (tdav_session_audio_t*)callback_data;
+ tmedia_codec_t* codec = tsk_null;
+ tdav_session_av_t* base = (tdav_session_av_t*)callback_data;
+ int ret = -1;
+
+ if (!audio || !packet || !packet->header) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ goto bail;
+ }
+
+ if (audio->is_started && base->consumer && base->consumer->is_started) {
+ tsk_size_t out_size = 0;
+
+ // Find the codec to use to decode the RTP payload
+ if (!audio->decoder.codec || audio->decoder.payload_type != packet->header->payload_type) {
+ tsk_istr_t format;
+ TSK_OBJECT_SAFE_FREE(audio->decoder.codec);
+ tsk_itoa(packet->header->payload_type, &format);
+ if (!(audio->decoder.codec = tmedia_codec_find_by_format(TMEDIA_SESSION(audio)->neg_codecs, format)) || !audio->decoder.codec->plugin || !audio->decoder.codec->plugin->decode){
+ TSK_DEBUG_ERROR("%s is not a valid payload for this session", format);
+ ret = -2;
+ goto bail;
+ }
+ audio->decoder.payload_type = packet->header->payload_type;
+ }
+ // ref() the codec to be able to use it short time after stop(SAFE_FREE(codec))
+ if (!(codec = tsk_object_ref(TSK_OBJECT(audio->decoder.codec)))) {
+ TSK_DEBUG_ERROR("Failed to get decoder codec");
+ goto bail;
+ }
+
+ // Open codec if not already done
+ if (!TMEDIA_CODEC(codec)->opened) {
+ tsk_safeobj_lock(base);
+ if ((ret = tmedia_codec_open(codec))) {
+ tsk_safeobj_unlock(base);
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", codec->plugin->desc);
+ TSK_OBJECT_SAFE_FREE(audio->decoder.codec);
+ goto bail;
+ }
+ tsk_safeobj_unlock(base);
+ }
+ // Decode data
+ out_size = codec->plugin->decode(codec, packet->payload.data, packet->payload.size, &audio->decoder.buffer, &audio->decoder.buffer_size, packet->header);
+ if (out_size && audio->is_started) { // check "is_started" again ...to be sure stop() not called by another thread
+ void* buffer = audio->decoder.buffer;
+ tsk_size_t size = out_size;
+
+ // resample if needed
+ if ((base->consumer->audio.out.rate && base->consumer->audio.out.rate != codec->in.rate) || (base->consumer->audio.out.channels && base->consumer->audio.out.channels != TMEDIA_CODEC_AUDIO(codec)->in.channels)) {
+ tsk_size_t resampler_result_size = 0;
+ int bytesPerSample = (base->consumer->audio.bits_per_sample >> 3);
+
+ if (!audio->decoder.resampler.instance) {
+ TSK_DEBUG_INFO("Create audio resampler(%s) for consumer: rate=%d->%d, channels=%d->%d, bytesPerSample=%d",
+ codec->plugin->desc,
+ codec->in.rate, base->consumer->audio.out.rate,
+ TMEDIA_CODEC_AUDIO(codec)->in.channels, base->consumer->audio.out.channels,
+ bytesPerSample);
+ audio->decoder.resampler.instance = _tdav_session_audio_resampler_create(
+ bytesPerSample,
+ codec->in.rate, base->consumer->audio.out.rate,
+ base->consumer->audio.ptime,
+ TMEDIA_CODEC_AUDIO(codec)->in.channels, base->consumer->audio.out.channels,
+ TDAV_AUDIO_RESAMPLER_DEFAULT_QUALITY,
+ &audio->decoder.resampler.buffer, &audio->decoder.resampler.buffer_size
+ );
+ }
+ if (!audio->decoder.resampler.instance) {
+ TSK_DEBUG_ERROR("No resampler to handle data");
+ ret = -5;
+ goto bail;
+ }
+ if (!(resampler_result_size = tmedia_resampler_process(audio->decoder.resampler.instance, buffer, size / bytesPerSample, audio->decoder.resampler.buffer, audio->decoder.resampler.buffer_size / bytesPerSample))){
+ TSK_DEBUG_ERROR("Failed to process audio resampler input buffer");
+ ret = -6;
+ goto bail;
+ }
+
+ buffer = audio->decoder.resampler.buffer;
+ size = audio->decoder.resampler.buffer_size;
+ }
+
+ // adjust the gain
+ if (base->consumer->audio.gain) {
+ _tdav_session_audio_apply_gain(buffer, (int)size, base->consumer->audio.bits_per_sample, base->consumer->audio.gain);
+ }
+ // consume the frame
+ tmedia_consumer_consume(base->consumer, buffer, size, packet->header);
+ }
+ }
+ else {
+ TSK_DEBUG_INFO("Session audio not ready");
+ }
+
+ // everything is ok
+ ret = 0;
+
+bail:
+ tsk_object_unref(TSK_OBJECT(codec));
+ return ret;
+}
+
+// Producer callback (From the producer to the network). Will encode() data before sending
+static int tdav_session_audio_producer_enc_cb(const void* callback_data, const void* buffer, tsk_size_t size)
+{
+ int ret = 0;
+
+ tdav_session_audio_t* audio = (tdav_session_audio_t*)callback_data;
+ tdav_session_av_t* base = (tdav_session_av_t*)callback_data;
+
+ if (!audio) {
+ TSK_DEBUG_ERROR("Null session");
+ return 0;
+ }
+
+ // do nothing if session is held
+ // when the session is held the end user will get feedback he also has possibilities to put the consumer and producer on pause
+ if (TMEDIA_SESSION(audio)->lo_held) {
+ return 0;
+ }
+
+ // get best negotiated codec if not already done
+ // the encoder codec could be null when session is renegotiated without re-starting (e.g. hold/resume)
+ if (!audio->encoder.codec) {
+ const tmedia_codec_t* codec;
+ tsk_safeobj_lock(base);
+ if (!(codec = tdav_session_av_get_best_neg_codec(base))) {
+ TSK_DEBUG_ERROR("No codec matched");
+ tsk_safeobj_unlock(base);
+ return -2;
+ }
+ audio->encoder.codec = tsk_object_ref(TSK_OBJECT(codec));
+ tsk_safeobj_unlock(base);
+ }
+
+ if (audio->is_started && base->rtp_manager && base->rtp_manager->is_started) {
+ /* encode */
+ tsk_size_t out_size = 0;
+
+ // Open codec if not already done
+ if (!audio->encoder.codec->opened) {
+ tsk_safeobj_lock(base);
+ if ((ret = tmedia_codec_open(audio->encoder.codec))) {
+ tsk_safeobj_unlock(base);
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", audio->encoder.codec->plugin->desc);
+ return -4;
+ }
+ tsk_safeobj_unlock(base);
+ }
+ // check if we're sending DTMF or not
+ if (audio->is_sending_dtmf_events) {
+ if (base->rtp_manager) {
+ // increment the timestamp
+ base->rtp_manager->rtp.timestamp += TMEDIA_CODEC_PCM_FRAME_SIZE_AUDIO_ENCODING(audio->encoder.codec)/*duration*/;
+ }
+ TSK_DEBUG_INFO("Skiping audio frame as we're sending DTMF...");
+ return 0;
+ }
+
+ // resample if needed
+ if (base->producer->audio.rate != audio->encoder.codec->out.rate || base->producer->audio.channels != TMEDIA_CODEC_AUDIO(audio->encoder.codec)->out.channels){
+ tsk_size_t resampler_result_size = 0;
+ int bytesPerSample = (base->producer->audio.bits_per_sample >> 3);
+
+ if (!audio->encoder.resampler.instance){
+ TSK_DEBUG_INFO("Create audio resampler(%s) for producer: rate=%d->%d, channels=%d->%d, bytesPerSample=%d",
+ audio->encoder.codec->plugin->desc,
+ base->producer->audio.rate, audio->encoder.codec->out.rate,
+ base->producer->audio.channels, TMEDIA_CODEC_AUDIO(audio->encoder.codec)->out.channels,
+ bytesPerSample);
+ audio->encoder.resampler.instance = _tdav_session_audio_resampler_create(
+ bytesPerSample,
+ base->producer->audio.rate, audio->encoder.codec->out.rate,
+ base->producer->audio.ptime,
+ base->producer->audio.channels, TMEDIA_CODEC_AUDIO(audio->encoder.codec)->out.channels,
+ TDAV_AUDIO_RESAMPLER_DEFAULT_QUALITY,
+ &audio->encoder.resampler.buffer, &audio->encoder.resampler.buffer_size
+ );
+ }
+ if (!audio->encoder.resampler.instance){
+ TSK_DEBUG_ERROR("No resampler to handle data");
+ ret = -1;
+ goto done;
+ }
+ if (!(resampler_result_size = tmedia_resampler_process(audio->encoder.resampler.instance, buffer, size / bytesPerSample, audio->encoder.resampler.buffer, audio->encoder.resampler.buffer_size / bytesPerSample))){
+ TSK_DEBUG_ERROR("Failed to process audio resampler input buffer");
+ ret = -1;
+ goto done;
+ }
+
+ buffer = audio->encoder.resampler.buffer;
+ size = audio->encoder.resampler.buffer_size;
+ }
+
+ // Denoise (VAD, AGC, Noise suppression, ...)
+ // Must be done after resampling
+ if (audio->denoise){
+ tsk_bool_t silence_or_noise = tsk_false;
+ if (audio->denoise->echo_supp_enabled){
+ ret = tmedia_denoise_process_record(TMEDIA_DENOISE(audio->denoise), (void*)buffer, (uint32_t)size, &silence_or_noise);
+ }
+ }
+ // adjust the gain
+ // Must be done after resampling
+ if (base->producer->audio.gain){
+ _tdav_session_audio_apply_gain((void*)buffer, (int)size, base->producer->audio.bits_per_sample, base->producer->audio.gain);
+ }
+
+ // Encode data
+ if ((audio->encoder.codec = tsk_object_ref(audio->encoder.codec))){ /* Thread safeness (SIP reINVITE or UPDATE could update the encoder) */
+ out_size = audio->encoder.codec->plugin->encode(audio->encoder.codec, buffer, size, &audio->encoder.buffer, &audio->encoder.buffer_size);
+ if (out_size){
+ trtp_manager_send_rtp(base->rtp_manager, audio->encoder.buffer, out_size, TMEDIA_CODEC_FRAME_DURATION_AUDIO_ENCODING(audio->encoder.codec), tsk_false/*Marker*/, tsk_true/*lastPacket*/);
+ }
+ tsk_object_unref(audio->encoder.codec);
+ }
+ else{
+ TSK_DEBUG_WARN("No encoder");
+ }
+ }
+
+done:
+ return ret;
+}
+
+
+/* ============ Plugin interface ================= */
+
+static int tdav_session_audio_set(tmedia_session_t* self, const tmedia_param_t* param)
+{
+ int ret = 0;
+ tdav_session_audio_t* audio;
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (tdav_session_av_set(TDAV_SESSION_AV(self), param) == tsk_true){
+ return 0;
+ }
+
+ audio = (tdav_session_audio_t*)self;
+
+ if (param->plugin_type == tmedia_ppt_consumer){
+ TSK_DEBUG_ERROR("Not expected consumer_set(%s)", param->key);
+ }
+ else if (param->plugin_type == tmedia_ppt_producer){
+ TSK_DEBUG_ERROR("Not expected producer_set(%s)", param->key);
+ }
+ else{
+ if (param->value_type == tmedia_pvt_int32){
+ if (tsk_striequals(param->key, "echo-supp")){
+ if (audio->denoise){
+ audio->denoise->echo_supp_enabled = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+ }
+ }
+ else if (tsk_striequals(param->key, "echo-tail")){
+ if (audio->denoise){
+ return tmedia_denoise_set(audio->denoise, param);
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int tdav_session_audio_get(tmedia_session_t* self, tmedia_param_t* param)
+{
+ if (!self || !param){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ // try with the base class to see if this option is supported or not
+ if (tdav_session_av_get(TDAV_SESSION_AV(self), param) == tsk_true){
+ return 0;
+ }
+ else {
+ // the codec information is held by the session even if the user is authorized to request it for the consumer/producer
+ if (param->value_type == tmedia_pvt_pobject){
+ if (param->plugin_type == tmedia_ppt_consumer){
+ TSK_DEBUG_ERROR("Not implemented");
+ return -4;
+ }
+ else if (param->plugin_type == tmedia_ppt_producer){
+ if (tsk_striequals("codec", param->key)) {
+ const tmedia_codec_t* codec;
+ if (!(codec = TDAV_SESSION_AUDIO(self)->encoder.codec)){
+ codec = tdav_session_av_get_best_neg_codec((const tdav_session_av_t*)self); // up to the caller to release the object
+ }
+ *((tsk_object_t**)param->value) = tsk_object_ref(TSK_OBJECT(codec));
+ return 0;
+ }
+ }
+ else if (param->plugin_type == tmedia_ppt_session) {
+ if (tsk_striequals(param->key, "codec-encoder")) {
+ *((tsk_object_t**)param->value) = tsk_object_ref(TDAV_SESSION_AUDIO(self)->encoder.codec); // up to the caller to release the object
+ return 0;
+ }
+ }
+ }
+ }
+
+ TSK_DEBUG_WARN("This session doesn't support get(%s)", param->key);
+ return -2;
+}
+
+static int tdav_session_audio_prepare(tmedia_session_t* self)
+{
+ tdav_session_av_t* base = (tdav_session_av_t*)(self);
+ int ret;
+
+ if ((ret = tdav_session_av_prepare(base))){
+ TSK_DEBUG_ERROR("tdav_session_av_prepare(audio) failed");
+ return ret;
+ }
+
+ if (base->rtp_manager){
+ ret = trtp_manager_set_rtp_callback(base->rtp_manager, tdav_session_audio_rtp_cb, base);
+ }
+
+ return ret;
+}
+
+static int tdav_session_audio_start(tmedia_session_t* self)
+{
+ int ret;
+ tdav_session_audio_t* audio;
+ const tmedia_codec_t* codec;
+ tdav_session_av_t* base;
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ audio = (tdav_session_audio_t*)self;
+ base = (tdav_session_av_t*)self;
+
+ if (audio->is_started) {
+ TSK_DEBUG_INFO("Audio session already started");
+ return 0;
+ }
+
+ if (!(codec = tdav_session_av_get_best_neg_codec(base))){
+ TSK_DEBUG_ERROR("No codec matched");
+ return -2;
+ }
+
+ TSK_OBJECT_SAFE_FREE(audio->encoder.codec);
+ audio->encoder.codec = tsk_object_ref((tsk_object_t*)codec);
+
+ if ((ret = tdav_session_av_start(base, codec))){
+ TSK_DEBUG_ERROR("tdav_session_av_start(audio) failed");
+ return ret;
+ }
+
+ if (base->rtp_manager){
+ /* Denoise (AEC, Noise Suppression, AGC)
+ * tmedia_denoise_process_record() is called after resampling and before encoding which means sampling rate is equal to codec's rate
+ * tmedia_denoise_echo_playback() is called before playback which means sampling rate is equal to consumer's rate
+ */
+ if (audio->denoise){
+ uint32_t record_frame_size_samples = TMEDIA_CODEC_PCM_FRAME_SIZE_AUDIO_ENCODING(audio->encoder.codec);
+ uint32_t record_sampling_rate = TMEDIA_CODEC_RATE_ENCODING(audio->encoder.codec);
+ uint32_t record_channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(audio->encoder.codec);
+
+ uint32_t playback_frame_size_samples = (base->consumer && base->consumer->audio.ptime && base->consumer->audio.out.rate && base->consumer->audio.out.channels)
+ ? ((base->consumer->audio.ptime * base->consumer->audio.out.rate) / 1000) * base->consumer->audio.out.channels
+ : TMEDIA_CODEC_PCM_FRAME_SIZE_AUDIO_DECODING(audio->encoder.codec);
+ uint32_t playback_sampling_rate = (base->consumer && base->consumer->audio.out.rate)
+ ? base->consumer->audio.out.rate
+ : TMEDIA_CODEC_RATE_DECODING(audio->encoder.codec);
+ uint32_t playback_channels = (base->consumer && base->consumer->audio.out.channels)
+ ? base->consumer->audio.out.channels
+ : TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(audio->encoder.codec);
+
+ TSK_DEBUG_INFO("Audio denoiser to be opened(record_frame_size_samples=%u, record_sampling_rate=%u, record_channels=%u, playback_frame_size_samples=%u, playback_sampling_rate=%u, playback_channels=%u)",
+ record_frame_size_samples, record_sampling_rate, record_channels, playback_frame_size_samples, playback_sampling_rate, playback_channels);
+
+ // close()
+ tmedia_denoise_close(audio->denoise);
+ // open() with new values
+ tmedia_denoise_open(audio->denoise,
+ record_frame_size_samples, record_sampling_rate, TSK_CLAMP(1, record_channels, 2),
+ playback_frame_size_samples, playback_sampling_rate, TSK_CLAMP(1, playback_channels, 2));
+ }
+ }
+
+ audio->is_started = (ret == 0);
+
+ return ret;
+}
+
+static int tdav_session_audio_stop(tmedia_session_t* self)
+{
+ tdav_session_audio_t* audio = TDAV_SESSION_AUDIO(self);
+ tdav_session_av_t* base = TDAV_SESSION_AV(self);
+ int ret = tdav_session_av_stop(base);
+ audio->is_started = tsk_false;
+ TSK_OBJECT_SAFE_FREE(audio->encoder.codec);
+ TSK_OBJECT_SAFE_FREE(audio->decoder.codec);
+
+ // close the jitter buffer and denoiser to be sure it will be reopened and reinitialized if reINVITE or UPDATE
+ // this is a "must" when the initial and updated sessions use codecs with different rate
+ if (audio->jitterbuffer && audio->jitterbuffer->opened) {
+ ret = tmedia_jitterbuffer_close(audio->jitterbuffer);
+ }
+ if (audio->denoise && audio->denoise->opened) {
+ ret = tmedia_denoise_close(audio->denoise);
+ }
+ return ret;
+}
+
+static int tdav_session_audio_send_dtmf(tmedia_session_t* self, uint8_t event)
+{
+ tdav_session_audio_t* audio;
+ tdav_session_av_t* base;
+ tmedia_codec_t* codec;
+ int ret, rate = 8000, ptime = 20;
+ uint16_t duration;
+ tdav_session_audio_dtmfe_t *dtmfe, *copy;
+ int format = 101;
+
+ if (!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ audio = (tdav_session_audio_t*)self;
+ base = (tdav_session_av_t*)self;
+
+ // Find the DTMF codec to use to use the RTP payload
+ if ((codec = tmedia_codec_find_by_format(TMEDIA_SESSION(audio)->codecs, TMEDIA_CODEC_FORMAT_DTMF))){
+ rate = (int)codec->out.rate;
+ format = atoi(codec->neg_format ? codec->neg_format : codec->format);
+ TSK_OBJECT_SAFE_FREE(codec);
+ }
+
+ /* do we have an RTP manager? */
+ if (!base->rtp_manager){
+ TSK_DEBUG_ERROR("No RTP manager associated to this session");
+ return -2;
+ }
+
+ /* Create Events list */
+ if (!audio->dtmf_events){
+ audio->dtmf_events = tsk_list_create();
+ }
+
+ /* Create global reference to the timer manager */
+ if (!audio->timer.handle_mgr_global){
+ if (!(audio->timer.handle_mgr_global = tsk_timer_mgr_global_ref())){
+ TSK_DEBUG_ERROR("Failed to create Global Timer Manager");
+ return -3;
+ }
+ }
+
+ /* Start the timer manager */
+ if (!audio->timer.started){
+ if ((ret = tsk_timer_manager_start(audio->timer.handle_mgr_global))){
+ TSK_DEBUG_ERROR("Failed to start Global Timer Manager");
+ return ret;
+ }
+ audio->timer.started = tsk_true;
+ }
+
+
+ /* RFC 4733 - 5. Examples
+
+ +-------+-----------+------+--------+------+--------+--------+------+
+ | Time | Event | M | Time- | Seq | Event | Dura- | E |
+ | (ms) | | bit | stamp | No | Code | tion | bit |
+ +-------+-----------+------+--------+------+--------+--------+------+
+ | 0 | "9" | | | | | | |
+ | | starts | | | | | | |
+ | 50 | RTP | "1" | 0 | 1 | 9 | 400 | "0" |
+ | | packet 1 | | | | | | |
+ | | sent | | | | | | |
+ | 100 | RTP | "0" | 0 | 2 | 9 | 800 | "0" |
+ | | packet 2 | | | | | | |
+ | | sent | | | | | | |
+ | 150 | RTP | "0" | 0 | 3 | 9 | 1200 | "0" |
+ | | packet 3 | | | | | | |
+ | | sent | | | | | | |
+ | 200 | RTP | "0" | 0 | 4 | 9 | 1600 | "0" |
+ | | packet 4 | | | | | | |
+ | | sent | | | | | | |
+ | 200 | "9" ends | | | | | | |
+ | 250 | RTP | "0" | 0 | 5 | 9 | 1600 | "1" |
+ | | packet 4 | | | | | | |
+ | | first | | | | | | |
+ | | retrans- | | | | | | |
+ | | mission | | | | | | |
+ | 300 | RTP | "0" | 0 | 6 | 9 | 1600 | "1" |
+ | | packet 4 | | | | | | |
+ | | second | | | | | | |
+ | | retrans- | | | | | | |
+ | | mission | | | | | | |
+ =====================================================================
+ | 880 | First "1" | | | | | | |
+ | | starts | | | | | | |
+ | 930 | RTP | "1" | 7040 | 7 | 1 | 400 | "0" |
+ | | packet 5 | | | | | | |
+ | | sent | | | | | | |
+ */
+
+ // ref()(thread safeness)
+ audio = tsk_object_ref(audio);
+
+ // says we're sending DTMF digits to avoid mixing with audio (SRTP won't let this happen because of senquence numbers)
+ // flag will be turned OFF when the list is empty
+ audio->is_sending_dtmf_events = tsk_true;
+
+ duration = TMEDIA_CODEC_PCM_FRAME_SIZE_AUDIO_ENCODING(audio->encoder.codec);
+
+ // lock() list
+ tsk_list_lock(audio->dtmf_events);
+
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 1, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_true, tsk_false);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 0, _tdav_session_audio_dtmfe_timercb, copy);
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 2, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_false, tsk_false);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 1, _tdav_session_audio_dtmfe_timercb, copy);
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 3, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_false, tsk_false);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 2, _tdav_session_audio_dtmfe_timercb, copy);
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 4, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_false, tsk_false);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 3, _tdav_session_audio_dtmfe_timercb, copy);
+
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 4, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_false, tsk_true);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 4, _tdav_session_audio_dtmfe_timercb, copy);
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration * 4, ++base->rtp_manager->rtp.seq_num, base->rtp_manager->rtp.timestamp, (uint8_t)format, tsk_false, tsk_true);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime * 5, _tdav_session_audio_dtmfe_timercb, copy);
+
+ // unlock() list
+ tsk_list_unlock(audio->dtmf_events);
+
+ // increment timestamp
+ base->rtp_manager->rtp.timestamp += duration;
+
+ // unref()(thread safeness)
+ audio = tsk_object_unref(audio);
+
+ return 0;
+}
+
+static int tdav_session_audio_pause(tmedia_session_t* self)
+{
+ return tdav_session_av_pause(TDAV_SESSION_AV(self));
+}
+
+static const tsdp_header_M_t* tdav_session_audio_get_lo(tmedia_session_t* self)
+{
+ tsk_bool_t updated = tsk_false;
+ const tsdp_header_M_t* ret;
+ tdav_session_av_t* base = TDAV_SESSION_AV(self);
+
+
+ if (!(ret = tdav_session_av_get_lo(base, &updated))){
+ TSK_DEBUG_ERROR("tdav_session_av_get_lo(audio) failed");
+ return tsk_null;
+ }
+
+ if (updated){
+ tsk_safeobj_lock(base);
+ TSK_OBJECT_SAFE_FREE(TDAV_SESSION_AUDIO(self)->encoder.codec);
+ tsk_safeobj_unlock(base);
+ }
+
+ return ret;
+}
+
+static int tdav_session_audio_set_ro(tmedia_session_t* self, const tsdp_header_M_t* m)
+{
+ int ret;
+ tsk_bool_t updated = tsk_false;
+ tdav_session_av_t* base = TDAV_SESSION_AV(self);
+
+ if ((ret = tdav_session_av_set_ro(base, m, &updated))){
+ TSK_DEBUG_ERROR("tdav_session_av_set_ro(audio) failed");
+ return ret;
+ }
+
+ if (updated) {
+ tsk_safeobj_lock(base);
+ // reset audio jitter buffer (new Offer probably comes with new seq_nums or timestamps)
+ if (base->consumer) {
+ ret = tdav_consumer_audio_reset(TDAV_CONSUMER_AUDIO(base->consumer));
+ }
+ // destroy encoder to force requesting new one
+ TSK_OBJECT_SAFE_FREE(TDAV_SESSION_AUDIO(self)->encoder.codec);
+ tsk_safeobj_unlock(base);
+ }
+
+ return ret;
+}
+
+/* apply gain */
+static void _tdav_session_audio_apply_gain(void* buffer, int len, int bps, int gain)
+{
+ register int i;
+ int max_val;
+
+ max_val = (1 << (bps - 1 - gain)) - 1;
+
+ if (bps == 8) {
+ int8_t *buff = buffer;
+ for (i = 0; i < len; i++) {
+ if (buff[i] > -max_val && buff[i] < max_val)
+ buff[i] = buff[i] << gain;
+ }
+ }
+ else if (bps == 16) {
+ int16_t *buff = buffer;
+ for (i = 0; i < len / 2; i++) {
+ if (buff[i] > -max_val && buff[i] < max_val)
+ buff[i] = buff[i] << gain;
+ }
+ }
+}
+
+
+/* Internal function used to create new DTMF event */
+static tdav_session_audio_dtmfe_t* _tdav_session_audio_dtmfe_create(const tdav_session_audio_t* session, uint8_t event, uint16_t duration, uint32_t seq, uint32_t timestamp, uint8_t format, tsk_bool_t M, tsk_bool_t E)
+{
+ tdav_session_audio_dtmfe_t* dtmfe;
+ const tdav_session_av_t* base = (const tdav_session_av_t*)session;
+ static uint8_t volume = 10;
+ static uint32_t ssrc = 0x5234A8;
+
+ uint8_t pay[4] = { 0 };
+
+ /* RFC 4733 - 2.3. Payload Format
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | event |E|R| volume | duration |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+ if (!(dtmfe = tsk_object_new(tdav_session_audio_dtmfe_def_t))){
+ TSK_DEBUG_ERROR("Failed to create new DTMF event");
+ return tsk_null;
+ }
+ dtmfe->session = session;
+
+ if (!(dtmfe->packet = trtp_rtp_packet_create((session && base->rtp_manager) ? base->rtp_manager->rtp.ssrc.local : ssrc, seq, timestamp, format, M))){
+ TSK_DEBUG_ERROR("Failed to create DTMF RTP packet");
+ TSK_OBJECT_SAFE_FREE(dtmfe);
+ return tsk_null;
+ }
+
+ pay[0] = event;
+ pay[1] |= ((E << 7) | (volume & 0x3F));
+ pay[2] = (duration >> 8);
+ pay[3] = (duration & 0xFF);
+
+ /* set data */
+ if ((dtmfe->packet->payload.data = tsk_calloc(sizeof(pay), sizeof(uint8_t)))){
+ memcpy(dtmfe->packet->payload.data, pay, sizeof(pay));
+ dtmfe->packet->payload.size = sizeof(pay);
+ }
+
+ return dtmfe;
+}
+
+static int _tdav_session_audio_dtmfe_timercb(const void* arg, tsk_timer_id_t timer_id)
+{
+ tdav_session_audio_dtmfe_t* dtmfe = (tdav_session_audio_dtmfe_t*)arg;
+ tdav_session_audio_t *audio;
+
+ if (!dtmfe || !dtmfe->session || !dtmfe->session->dtmf_events){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* Send the data */
+ TSK_DEBUG_INFO("Sending DTMF event...");
+ trtp_manager_send_rtp_packet(TDAV_SESSION_AV(dtmfe->session)->rtp_manager, dtmfe->packet, tsk_false);
+
+
+ audio = tsk_object_ref(TSK_OBJECT(dtmfe->session));
+ tsk_list_lock(audio->dtmf_events);
+ /* Remove and delete the event from the queue */
+ tsk_list_remove_item_by_data(audio->dtmf_events, dtmfe);
+ /* Check if there are pending events */
+ audio->is_sending_dtmf_events = !TSK_LIST_IS_EMPTY(audio->dtmf_events);
+ tsk_list_unlock(audio->dtmf_events);
+ tsk_object_unref(audio);
+
+ return 0;
+}
+
+static tmedia_resampler_t* _tdav_session_audio_resampler_create(int32_t bytes_per_sample, uint32_t in_freq, uint32_t out_freq, uint32_t frame_duration, uint32_t in_channels, uint32_t out_channels, uint32_t quality, void** resampler_buffer, tsk_size_t *resampler_buffer_size)
+{
+ uint32_t resampler_buff_size;
+ tmedia_resampler_t* resampler;
+ int ret;
+
+ if (out_channels > 2 || in_channels > 2) {
+ TSK_DEBUG_ERROR("Invalid parameter: out_channels=%u, in_channels=%u", out_channels, in_channels);
+ return tsk_null;
+ }
+
+ resampler_buff_size = (((out_freq * frame_duration) / 1000) * bytes_per_sample) << (out_channels == 2 ? 1 : 0);
+
+ if (!(resampler = tmedia_resampler_create())) {
+ TSK_DEBUG_ERROR("Failed to create audio resampler");
+ return tsk_null;
+ }
+ else {
+ if ((ret = tmedia_resampler_open(resampler, in_freq, out_freq, frame_duration, in_channels, out_channels, quality, 16))) {
+ TSK_DEBUG_ERROR("Failed to open audio resampler (%d, %d, %d, %d, %d,%d) with retcode=%d", in_freq, out_freq, frame_duration, in_channels, out_channels, quality, ret);
+ TSK_OBJECT_SAFE_FREE(resampler);
+ goto done;
+ }
+ }
+ // create temp resampler buffer
+ if ((*resampler_buffer = tsk_realloc(*resampler_buffer, resampler_buff_size))) {
+ *resampler_buffer_size = resampler_buff_size;
+ }
+ else {
+ *resampler_buffer_size = 0;
+ TSK_DEBUG_ERROR("Failed to allocate resampler buffer with size = %d", resampler_buff_size);
+ TSK_OBJECT_SAFE_FREE(resampler);
+ goto done;
+ }
+done:
+ return resampler;
+}
+
+//=================================================================================================
+// Session Audio Plugin object definition
+//
+/* constructor */
+static tsk_object_t* tdav_session_audio_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_session_audio_t *audio = self;
+ if (audio){
+ int ret;
+ tdav_session_av_t *base = TDAV_SESSION_AV(self);
+
+ /* init() base */
+ if ((ret = tdav_session_av_init(base, tmedia_audio)) != 0){
+ TSK_DEBUG_ERROR("tdav_session_av_init(audio) failed");
+ return tsk_null;
+ }
+
+ /* init() self */
+ if (base->producer){
+ tmedia_producer_set_enc_callback(base->producer, tdav_session_audio_producer_enc_cb, audio);
+ }
+ if (base->consumer){
+ // It's important to create the denoiser and jitter buffer here as dynamic plugins (from shared libs) don't have access to the registry
+ if (!(audio->denoise = tmedia_denoise_create())){
+ TSK_DEBUG_WARN("No Audio denoiser found");
+ }
+ else{
+ // IMPORTANT: This means that the consumer must be child of "tdav_consumer_audio_t" object
+ tdav_consumer_audio_set_denoise(TDAV_CONSUMER_AUDIO(base->consumer), audio->denoise);
+ }
+
+ if (!(audio->jitterbuffer = tmedia_jitterbuffer_create(tmedia_audio))){
+ TSK_DEBUG_ERROR("Failed to create jitter buffer");
+ }
+ else{
+ ret = tmedia_jitterbuffer_init(TMEDIA_JITTER_BUFFER(audio->jitterbuffer));
+ tdav_consumer_audio_set_jitterbuffer(TDAV_CONSUMER_AUDIO(base->consumer), audio->jitterbuffer);
+ }
+ }
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_session_audio_dtor(tsk_object_t * self)
+{
+ tdav_session_audio_t *audio = self;
+ TSK_DEBUG_INFO("*** tdav_session_audio_t destroyed ***");
+ if (audio){
+ tdav_session_audio_stop((tmedia_session_t*)audio);
+ // Do it in this order (deinit self first)
+
+ /* Timer manager */
+ if (audio->timer.started){
+ if (audio->dtmf_events){
+ /* Cancel all events */
+ tsk_list_item_t* item;
+ tsk_list_foreach(item, audio->dtmf_events){
+ tsk_timer_mgr_global_cancel(((tdav_session_audio_dtmfe_t*)item->data)->timer_id);
+ }
+ }
+ }
+
+ tsk_timer_mgr_global_unref(&audio->timer.handle_mgr_global);
+
+ /* CleanUp the DTMF events */
+ TSK_OBJECT_SAFE_FREE(audio->dtmf_events);
+
+ TSK_OBJECT_SAFE_FREE(audio->denoise);
+ TSK_OBJECT_SAFE_FREE(audio->jitterbuffer);
+
+ TSK_OBJECT_SAFE_FREE(audio->encoder.codec);
+ TSK_FREE(audio->encoder.buffer);
+ TSK_OBJECT_SAFE_FREE(audio->decoder.codec);
+ TSK_FREE(audio->decoder.buffer);
+
+ // free resamplers
+ TSK_FREE(audio->encoder.resampler.buffer);
+ TSK_OBJECT_SAFE_FREE(audio->encoder.resampler.instance);
+ TSK_FREE(audio->decoder.resampler.buffer);
+ TSK_OBJECT_SAFE_FREE(audio->decoder.resampler.instance);
+
+ /* deinit base */
+ tdav_session_av_deinit(TDAV_SESSION_AV(self));
+
+ TSK_DEBUG_INFO("*** Audio session destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_session_audio_def_s =
+{
+ sizeof(tdav_session_audio_t),
+ tdav_session_audio_ctor,
+ tdav_session_audio_dtor,
+ tmedia_session_cmp,
+};
+/* plugin definition*/
+static const tmedia_session_plugin_def_t tdav_session_audio_plugin_def_s =
+{
+ &tdav_session_audio_def_s,
+
+ tmedia_audio,
+ "audio",
+
+ tdav_session_audio_set,
+ tdav_session_audio_get,
+ tdav_session_audio_prepare,
+ tdav_session_audio_start,
+ tdav_session_audio_pause,
+ tdav_session_audio_stop,
+
+ /* Audio part */
+ {
+ tdav_session_audio_send_dtmf
+ },
+
+ tdav_session_audio_get_lo,
+ tdav_session_audio_set_ro
+};
+const tmedia_session_plugin_def_t *tdav_session_audio_plugin_def_t = &tdav_session_audio_plugin_def_s;
+static const tmedia_session_plugin_def_t tdav_session_bfcpaudio_plugin_def_s =
+{
+ &tdav_session_audio_def_s,
+
+ tmedia_bfcp_audio,
+ "audio",
+
+ tdav_session_audio_set,
+ tdav_session_audio_get,
+ tdav_session_audio_prepare,
+ tdav_session_audio_start,
+ tdav_session_audio_pause,
+ tdav_session_audio_stop,
+
+ /* Audio part */
+ {
+ tdav_session_audio_send_dtmf
+ },
+
+ tdav_session_audio_get_lo,
+ tdav_session_audio_set_ro
+};
+const tmedia_session_plugin_def_t *tdav_session_bfcpaudio_plugin_def_t = &tdav_session_bfcpaudio_plugin_def_s;
+
+
+
+//=================================================================================================
+// DTMF event object definition
+//
+static tsk_object_t* tdav_session_audio_dtmfe_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_session_audio_dtmfe_t *event = self;
+ if (event){
+ event->timer_id = TSK_INVALID_TIMER_ID;
+ }
+ return self;
+}
+
+static tsk_object_t* tdav_session_audio_dtmfe_dtor(tsk_object_t * self)
+{
+ tdav_session_audio_dtmfe_t *event = self;
+ if (event){
+ TSK_OBJECT_SAFE_FREE(event->packet);
+ }
+
+ return self;
+}
+
+static int tdav_session_audio_dtmfe_cmp(const tsk_object_t *_e1, const tsk_object_t *_e2)
+{
+ int ret;
+ tsk_subsat_int32_ptr(_e1, _e2, &ret);
+ return ret;
+}
+
+static const tsk_object_def_t tdav_session_audio_dtmfe_def_s =
+{
+ sizeof(tdav_session_audio_dtmfe_t),
+ tdav_session_audio_dtmfe_ctor,
+ tdav_session_audio_dtmfe_dtor,
+ tdav_session_audio_dtmfe_cmp,
+};
+const tsk_object_def_t *tdav_session_audio_dtmfe_def_t = &tdav_session_audio_dtmfe_def_s;
diff --git a/tinyDAV/src/audio/tdav_speakup_jitterbuffer.c b/tinyDAV/src/audio/tdav_speakup_jitterbuffer.c
new file mode 100644
index 0000000..cccc235
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_speakup_jitterbuffer.c
@@ -0,0 +1,281 @@
+/*
+* Copyright (C) 2011 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_speakup_jitterbuffer.c
+ * @brief Speakup Audio jitterbuffer Plugin
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+
+ */
+#include "tinydav/audio/tdav_speakup_jitterbuffer.h"
+
+#if !(HAVE_SPEEX_DSP && HAVE_SPEEX_JB)
+
+#include "tinyrtp/rtp/trtp_rtp_header.h"
+
+#include "tsk_time.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <string.h>
+
+#if TSK_UNDER_WINDOWS
+# include <Winsock2.h> // timeval
+#elif defined(__SYMBIAN32__)
+# include <_timeval.h>
+#else
+# include <sys/time.h>
+#endif
+
+#define TDAV_SPEAKUP_10MS 10
+#define TDAV_SPEAKUP_10MS_FRAME_SIZE(self) (((self)->rate * TDAV_SPEAKUP_10MS)/1000)
+#define TDAV_SPEAKUP_PTIME_FRAME_SIZE(self) (((self)->rate * (self)->framesize)/1000)
+
+static int tdav_speakup_jitterbuffer_set(tmedia_jitterbuffer_t *self, const tmedia_param_t* param)
+{
+ TSK_DEBUG_ERROR("Not implemented");
+ return -2;
+}
+
+static int tdav_speakup_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate, uint32_t channels)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
+ if(!jitterbuffer->jbuffer){
+ if(!(jitterbuffer->jbuffer = jb_new())){
+ TSK_DEBUG_ERROR("Failed to create new buffer");
+ return -1;
+ }
+ jitterbuffer->jcodec = JB_CODEC_OTHER;
+ }
+ jitterbuffer->ref_timestamp = 0;
+ jitterbuffer->frame_duration = frame_duration;
+ jitterbuffer->rate = rate;
+ jitterbuffer->channels = channels;
+ jitterbuffer->_10ms_size_bytes = 160 * (rate/8000);
+
+ return 0;
+}
+
+static int tdav_speakup_jitterbuffer_tick(tmedia_jitterbuffer_t* self)
+{
+ return 0;
+}
+
+static int tdav_speakup_jitterbuffer_put(tmedia_jitterbuffer_t* self, void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
+ const trtp_rtp_header_t* rtp_hdr = (const trtp_rtp_header_t*)proto_hdr;
+ int i;
+ long now, ts;
+ void* _10ms_buf;
+ uint8_t* pdata;
+
+ if(!self || !data || !data_size || !jitterbuffer->jbuffer || !rtp_hdr){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* synchronize the reference timestamp */
+ if(!jitterbuffer->ref_timestamp){
+ uint64_t now = tsk_time_now();
+ struct timeval tv;
+ long ts = (rtp_hdr->timestamp/(jitterbuffer->rate/1000));
+ //=> Do not use (see clock_gettime() on linux): tsk_gettimeofday(&tv, tsk_null);
+ tv.tv_sec = (long)(now)/1000;
+ tv.tv_usec = (long)(now - (tv.tv_sec*1000))*1000;
+
+ tv.tv_sec -= (ts / jitterbuffer->rate);
+ tv.tv_usec -= (ts % jitterbuffer->rate) * 125;
+ if((tv.tv_usec -= (tv.tv_usec % (TDAV_SPEAKUP_10MS * 10000))) <0){
+ tv.tv_usec += 1000000;
+ tv.tv_sec -= 1;
+ }
+ jitterbuffer->ref_timestamp = tsk_time_get_ms(&tv);
+
+ switch(rtp_hdr->payload_type){
+ case 8: /*TMEDIA_CODEC_FORMAT_G711a*/
+ case 0: /* TMEDIA_CODEC_FORMAT_G711u */
+ jitterbuffer->jcodec = JB_CODEC_G711x;
+ break;
+ case 18: /* TMEDIA_CODEC_FORMAT_G729 */
+ jitterbuffer->jcodec = JB_CODEC_G729A;
+ break;
+ case 3: /* TMEDIA_CODEC_FORMAT_GSM */
+ jitterbuffer->jcodec = JB_CODEC_GSM_EFR;
+ break;
+
+ default:
+ jitterbuffer->jcodec = JB_CODEC_OTHER;
+ break;
+ }
+ }
+
+ // split as several 10ms frames
+ now = (long) (tsk_time_now()-jitterbuffer->ref_timestamp);
+ ts = (long)(rtp_hdr->timestamp/(jitterbuffer->rate/1000));
+ pdata = (uint8_t*)data;
+ for(i=0; i<(int)(data_size/jitterbuffer->_10ms_size_bytes);i++){
+ if((_10ms_buf = tsk_calloc(jitterbuffer->_10ms_size_bytes, 1))){
+ memcpy(_10ms_buf, &pdata[i*jitterbuffer->_10ms_size_bytes], jitterbuffer->_10ms_size_bytes);
+ jb_put(jitterbuffer->jbuffer, _10ms_buf, JB_TYPE_VOICE, TDAV_SPEAKUP_10MS, ts, now, jitterbuffer->jcodec);
+ _10ms_buf = tsk_null;
+ }
+ ts += TDAV_SPEAKUP_10MS;
+ }
+
+ return 0;
+}
+
+static tsk_size_t tdav_speakup_jitterbuffer_get(tmedia_jitterbuffer_t* self, void* out_data, tsk_size_t out_size)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
+ int jret;
+
+ int i, _10ms_count;
+ long now;
+ short* _10ms_buf = tsk_null;
+ uint8_t* pout_data = (uint8_t*)out_data;
+
+ if(!out_data || (out_size % jitterbuffer->_10ms_size_bytes)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ _10ms_count = (out_size/jitterbuffer->_10ms_size_bytes);
+ now = (long) (tsk_time_now() - jitterbuffer->ref_timestamp);
+ for(i=0; i<_10ms_count; i++){
+
+ jret = jb_get(jitterbuffer->jbuffer, (void**)&_10ms_buf, now, TDAV_SPEAKUP_10MS);
+ switch(jret){
+ case JB_INTERP:
+ TSK_DEBUG_INFO("JB_INTERP");
+ jb_reset_all(jitterbuffer->jbuffer);
+ memset(&pout_data[i*jitterbuffer->_10ms_size_bytes], 0, (_10ms_count*jitterbuffer->_10ms_size_bytes)-(i*jitterbuffer->_10ms_size_bytes));
+ i = _10ms_count; // for exit
+ break;
+ case JB_OK:
+ case JB_EMPTY:
+ case JB_NOFRAME:
+ case JB_NOJB:
+ {
+ if(_10ms_buf && (jret == JB_OK)){
+ /* copy data */
+ memcpy(&pout_data[i*jitterbuffer->_10ms_size_bytes], _10ms_buf, jitterbuffer->_10ms_size_bytes);
+ }
+ else{
+ /* copy silence */
+ memset(&pout_data[i*jitterbuffer->_10ms_size_bytes], 0, jitterbuffer->_10ms_size_bytes);
+ }
+ }
+
+ default:
+ break;
+ }
+ TSK_FREE(_10ms_buf);
+ }
+
+ return (_10ms_count * jitterbuffer->_10ms_size_bytes);
+}
+
+static int tdav_speakup_jitterbuffer_reset(tmedia_jitterbuffer_t* self)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
+ if(jitterbuffer->jbuffer){
+ jb_reset_all(jitterbuffer->jbuffer);
+ return 0;
+ }
+ else{
+ TSK_DEBUG_ERROR("invalid parameter");
+ return -1;
+ }
+}
+
+static int tdav_speakup_jitterbuffer_close(tmedia_jitterbuffer_t* self)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
+ if(jitterbuffer->jbuffer){
+ jb_destroy(jitterbuffer->jbuffer);
+ jitterbuffer->jbuffer = tsk_null;
+ }
+ return 0;
+}
+
+
+
+//
+// Speakup jitterbufferr Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_speakup_jitterbuffer_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = self;
+ TSK_DEBUG_INFO("Create speekup jitter buffer");
+ if(jitterbuffer){
+ /* init base */
+ tmedia_jitterbuffer_init(TMEDIA_JITTER_BUFFER(jitterbuffer));
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_speakup_jitterbuffer_dtor(tsk_object_t * self)
+{
+ tdav_speakup_jitterbuffer_t *jitterbuffer = self;
+ if(jitterbuffer){
+ /* deinit base */
+ tmedia_jitterbuffer_deinit(TMEDIA_JITTER_BUFFER(jitterbuffer));
+ /* deinit self */
+ if(jitterbuffer->jbuffer){
+ jb_destroy(jitterbuffer->jbuffer);
+ jitterbuffer->jbuffer = tsk_null;
+ }
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_speakup_jitterbuffer_def_s =
+{
+ sizeof(tdav_speakup_jitterbuffer_t),
+ tdav_speakup_jitterbuffer_ctor,
+ tdav_speakup_jitterbuffer_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_jitterbuffer_plugin_def_t tdav_speakup_jitterbuffer_plugin_def_s =
+{
+ &tdav_speakup_jitterbuffer_def_s,
+ tmedia_audio,
+ "Audio/video JitterBuffer based on Speakup",
+
+ tdav_speakup_jitterbuffer_set,
+ tdav_speakup_jitterbuffer_open,
+ tdav_speakup_jitterbuffer_tick,
+ tdav_speakup_jitterbuffer_put,
+ tdav_speakup_jitterbuffer_get,
+ tdav_speakup_jitterbuffer_reset,
+ tdav_speakup_jitterbuffer_close,
+};
+const tmedia_jitterbuffer_plugin_def_t *tdav_speakup_jitterbuffer_plugin_def_t = &tdav_speakup_jitterbuffer_plugin_def_s;
+
+#endif /* !(HAVE_SPEEX_DSP && HAVE_SPEEX_JB) */
diff --git a/tinyDAV/src/audio/tdav_speex_denoise.c b/tinyDAV/src/audio/tdav_speex_denoise.c
new file mode 100644
index 0000000..4f344dd
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_speex_denoise.c
@@ -0,0 +1,312 @@
+/*
+* Copyright (C) 2010-2011 Mamadou Diop.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_speex_denoise.c
+* @brief Speex Denoiser (Noise suppression, AGC, AEC) Plugin
+*/
+#include "tinydav/audio/tdav_speex_denoise.h"
+
+#if HAVE_SPEEX_DSP && (!defined(HAVE_SPEEX_DENOISE) || HAVE_SPEEX_DENOISE)
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include "tinymedia/tmedia_defaults.h"
+
+#include <string.h>
+
+#include <speex/speex_preprocess.h>
+#include <speex/speex_echo.h>
+
+/** Speex denoiser*/
+typedef struct tdav_speex_denoise_s
+{
+ TMEDIA_DECLARE_DENOISE;
+
+ SpeexPreprocessState *preprocess_state_record;
+ SpeexPreprocessState *preprocess_state_playback;
+ SpeexEchoState *echo_state;
+
+ spx_int16_t* echo_output_frame;
+ uint32_t record_frame_size_samples, record_frame_size_bytes;
+ uint32_t playback_frame_size_samples, playback_frame_size_bytes;
+}
+tdav_speex_denoise_t;
+
+static int tdav_speex_denoise_set(tmedia_denoise_t* _self, const tmedia_param_t* param)
+{
+ tdav_speex_denoise_t *self = (tdav_speex_denoise_t *)_self;
+ if(!self || !param){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(param->value_type == tmedia_pvt_int32){
+ if(tsk_striequals(param->key, "echo-tail")){
+ int32_t echo_tail = *((int32_t*)param->value);
+ TSK_DEBUG_INFO("speex_set_echo_tail(%d) ignore", echo_tail); // because Speex AEC just do not work (use WebRTC)
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int tdav_speex_denoise_open(tmedia_denoise_t* self, uint32_t record_frame_size_samples, uint32_t record_sampling_rate, uint32_t record_channels, uint32_t playback_frame_size_samples, uint32_t playback_sampling_rate, uint32_t playback_channels)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+ float f;
+ int i;
+
+ if (!denoiser->echo_state && TMEDIA_DENOISE(denoiser)->echo_supp_enabled) {
+ TSK_DEBUG_INFO("Init Aec frame_size[%u] filter_length[%u] SampleRate[%u]",
+ (uint32_t)(record_frame_size_samples),TMEDIA_DENOISE(denoiser)->echo_tail*record_frame_size_samples, record_sampling_rate);
+ if((denoiser->echo_state = speex_echo_state_init(record_frame_size_samples, TMEDIA_DENOISE(denoiser)->echo_tail))){
+ speex_echo_ctl(denoiser->echo_state, SPEEX_ECHO_SET_SAMPLING_RATE, &record_sampling_rate);
+ }
+ }
+
+ if (!denoiser->preprocess_state_record && !denoiser->preprocess_state_playback) {
+ denoiser->record_frame_size_samples = record_frame_size_samples;
+ denoiser->record_frame_size_bytes = (record_frame_size_samples << 1);
+ denoiser->playback_frame_size_samples = playback_frame_size_samples;
+ denoiser->playback_frame_size_bytes = (playback_frame_size_samples << 1);
+
+ if((denoiser->preprocess_state_record = speex_preprocess_state_init(record_frame_size_samples, record_sampling_rate))
+ && (denoiser->preprocess_state_playback = speex_preprocess_state_init(playback_frame_size_samples, playback_sampling_rate))
+ ){
+
+ // Echo suppression
+ if(denoiser->echo_state){
+ int echo_supp , echo_supp_active = 0;
+
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_ECHO_STATE, denoiser->echo_state);
+
+ TSK_FREE(denoiser->echo_output_frame);
+ denoiser->echo_output_frame = tsk_calloc(denoiser->record_frame_size_samples, sizeof(spx_int16_t));
+
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_GET_ECHO_SUPPRESS , &echo_supp );
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_GET_ECHO_SUPPRESS_ACTIVE , &echo_supp_active );
+ TSK_DEBUG_INFO("AEC echo_supp level [%d] echo_supp_active level[%d] ", echo_supp , echo_supp_active);
+ echo_supp = -60 ;
+ echo_supp_active = -60 ;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_ECHO_SUPPRESS , &echo_supp );
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_ECHO_SUPPRESS_ACTIVE , &echo_supp_active );
+ // TRACES
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_GET_ECHO_SUPPRESS , &echo_supp );
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_GET_ECHO_SUPPRESS_ACTIVE , &echo_supp_active );
+ TSK_DEBUG_INFO("New aec echo_supp level [%d] echo_supp_active level[%d] ", echo_supp , echo_supp_active);
+ }
+
+ // Noise suppression
+ if(TMEDIA_DENOISE(denoiser)->noise_supp_enabled){
+ TSK_DEBUG_INFO("SpeexDSP: Noise supp enabled");
+ i = 1;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state_playback, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ i = TMEDIA_DENOISE(denoiser)->noise_supp_level;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state_playback, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &i);
+ }
+ else{
+ i = 0;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state_playback, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ }
+
+ // Automatic gain control
+ if(TMEDIA_DENOISE(denoiser)->agc_enabled){
+ float agc_level = TMEDIA_DENOISE(denoiser)->agc_level;
+ TSK_DEBUG_INFO("SpeexDSP: AGC enabled");
+
+ i = 1;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_AGC, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_AGC_LEVEL, &agc_level);
+ }
+ else{
+ i = 0, f = 8000.0f;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_AGC, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_AGC_LEVEL, &f);
+ }
+
+ // Voice Activity detection
+ i = TMEDIA_DENOISE(denoiser)->vad_enabled ? 1 : 0;
+ speex_preprocess_ctl(denoiser->preprocess_state_record, SPEEX_PREPROCESS_SET_VAD, &i);
+
+ return 0;
+ }
+ else{
+ TSK_DEBUG_ERROR("Failed to create Speex preprocessor state");
+ return -2;
+ }
+ }
+
+ return 0;
+}
+
+static int tdav_speex_denoise_echo_playback(tmedia_denoise_t* self, const void* echo_frame, uint32_t echo_frame_size_bytes)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+
+ if(denoiser->record_frame_size_bytes != echo_frame_size_bytes){
+ TSK_DEBUG_ERROR("Size mismatch: %u<>%u", denoiser->record_frame_size_bytes, echo_frame_size_bytes);
+ return -1;
+ }
+
+ if(denoiser->echo_state){
+ speex_echo_playback(denoiser->echo_state, echo_frame);
+ }
+ return 0;
+}
+
+
+
+static int tdav_speex_denoise_process_record(tmedia_denoise_t* self, void* audio_frame, uint32_t audio_frame_size_bytes, tsk_bool_t* silence_or_noise)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+ int vad;
+
+ if(denoiser->record_frame_size_bytes != audio_frame_size_bytes){
+ TSK_DEBUG_ERROR("Size mismatch: %u<>%u", denoiser->record_frame_size_bytes, audio_frame_size_bytes);
+ return -1;
+ }
+
+ if(denoiser->preprocess_state_record){
+ if(denoiser->echo_state && denoiser->echo_output_frame){
+ speex_echo_capture(denoiser->echo_state, audio_frame, denoiser->echo_output_frame);
+ memcpy(audio_frame, denoiser->echo_output_frame, denoiser->record_frame_size_bytes);
+ }
+ vad = speex_preprocess_run(denoiser->preprocess_state_record, audio_frame);
+ if(!vad && TMEDIA_DENOISE(denoiser)->vad_enabled){
+ *silence_or_noise = tsk_true;
+ }
+ }
+
+ return 0;
+}
+
+static int tdav_speex_denoise_process_playback(tmedia_denoise_t* self, void* audio_frame, uint32_t audio_frame_size_bytes)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+
+ if(denoiser->playback_frame_size_bytes != audio_frame_size_bytes){
+ TSK_DEBUG_ERROR("Size mismatch: %u<>%u", denoiser->playback_frame_size_bytes, audio_frame_size_bytes);
+ return -1;
+ }
+
+ if(denoiser->preprocess_state_playback){
+ speex_preprocess_run(denoiser->preprocess_state_playback, audio_frame);
+ }
+ return 0;
+}
+
+static int tdav_speex_denoise_close(tmedia_denoise_t* self)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+
+ if(denoiser->preprocess_state_record){
+ speex_preprocess_state_destroy(denoiser->preprocess_state_record);
+ denoiser->preprocess_state_record = tsk_null;
+ }
+ if(denoiser->preprocess_state_playback){
+ speex_preprocess_state_destroy(denoiser->preprocess_state_playback);
+ denoiser->preprocess_state_playback = tsk_null;
+ }
+ if(denoiser->echo_state){
+ speex_echo_state_destroy(denoiser->echo_state);
+ denoiser->echo_state = tsk_null;
+ }
+ TSK_FREE(denoiser->echo_output_frame);
+
+ return 0;
+}
+
+
+
+//
+// Speex denoiser Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_speex_denoise_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_speex_denoise_t *denoise = self;
+ if(denoise){
+ /* init base */
+ tmedia_denoise_init(TMEDIA_DENOISE(denoise));
+ /* init self */
+
+ TSK_DEBUG_INFO("Create SpeexDSP denoiser");
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_speex_denoise_dtor(tsk_object_t * self)
+{
+ tdav_speex_denoise_t *denoise = self;
+ if(denoise){
+ /* deinit base */
+ tmedia_denoise_deinit(TMEDIA_DENOISE(denoise));
+ /* deinit self */
+ if(denoise->preprocess_state_record){
+ speex_preprocess_state_destroy(denoise->preprocess_state_record);
+ denoise->preprocess_state_record = tsk_null;
+ }
+ if(denoise->preprocess_state_playback){
+ speex_preprocess_state_destroy(denoise->preprocess_state_playback);
+ denoise->preprocess_state_playback = tsk_null;
+ }
+ if(denoise->echo_state){
+ speex_echo_state_destroy(denoise->echo_state);
+ denoise->echo_state = tsk_null;
+ }
+ TSK_FREE(denoise->echo_output_frame);
+
+ TSK_DEBUG_INFO("*** SpeexDSP denoiser destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_speex_denoise_def_s =
+{
+ sizeof(tdav_speex_denoise_t),
+ tdav_speex_denoise_ctor,
+ tdav_speex_denoise_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_denoise_plugin_def_t tdav_speex_denoise_plugin_def_s =
+{
+ &tdav_speex_denoise_def_s,
+
+ "Audio Denoiser based on SpeexDSP",
+
+ tdav_speex_denoise_set,
+ tdav_speex_denoise_open,
+ tdav_speex_denoise_echo_playback,
+ tdav_speex_denoise_process_record,
+ tdav_speex_denoise_process_playback,
+ tdav_speex_denoise_close,
+};
+const tmedia_denoise_plugin_def_t *tdav_speex_denoise_plugin_def_t = &tdav_speex_denoise_plugin_def_s;
+
+
+#endif /* HAVE_SPEEX_DSP */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/tdav_speex_jitterbuffer.c b/tinyDAV/src/audio/tdav_speex_jitterbuffer.c
new file mode 100644
index 0000000..d4639b9
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_speex_jitterbuffer.c
@@ -0,0 +1,319 @@
+/*
+* Copyright (C) 2011-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_speex_jitterbuffer.c
+ * @brief Speex Audio jitterbuffer Plugin
+ */
+#include "tinydav/audio/tdav_speex_jitterbuffer.h"
+#include "tinyrtp/rtp/trtp_rtp_header.h"
+
+#if HAVE_SPEEX_DSP && HAVE_SPEEX_JB
+
+// rfc3551 - 4.5 Audio Encodings: all frames length are multiple of 10ms
+
+#include "tinymedia/tmedia_defaults.h"
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <speex/speex_jitter.h>
+
+/** Speex JitterBuffer*/
+typedef struct tdav_speex_jitterBuffer_s
+{
+ TMEDIA_DECLARE_JITTER_BUFFER;
+
+ JitterBuffer* state;
+ uint32_t rate;
+ uint32_t frame_duration;
+ uint32_t channels;
+ uint32_t x_data_size; // expected data size
+ uint16_t fake_seqnum; // if ptime mismatch then, reassembled pkt will have invalid seqnum
+ struct {
+ uint8_t* ptr;
+ tsk_size_t size;
+ tsk_size_t index;
+ } buff;
+
+ uint64_t num_pkt_in; // Number of incoming pkts since the last reset
+ uint64_t num_pkt_miss; // Number of times we got consecutive "JITTER_BUFFER_MISSING" results
+ uint64_t num_pkt_miss_max; // Max value for "num_pkt_miss" before reset()ing the jitter buffer
+}
+tdav_speex_jitterbuffer_t;
+
+static int tdav_speex_jitterbuffer_set(tmedia_jitterbuffer_t *self, const tmedia_param_t* param)
+{
+ TSK_DEBUG_ERROR("Not implemented");
+ return -2;
+}
+
+static int tdav_speex_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate, uint32_t channels)
+{
+ tdav_speex_jitterbuffer_t *jitterbuffer = (tdav_speex_jitterbuffer_t *)self;
+ spx_int32_t tmp;
+
+ TSK_DEBUG_INFO("Open speex jb (ptime=%u, rate=%u)", frame_duration, rate);
+
+ if (!(jitterbuffer->state = jitter_buffer_init((int)frame_duration))) {
+ TSK_DEBUG_ERROR("jitter_buffer_init() failed");
+ return -2;
+ }
+ jitterbuffer->rate = rate;
+ jitterbuffer->frame_duration = frame_duration;
+ jitterbuffer->channels = channels;
+ jitterbuffer->x_data_size = ((frame_duration * jitterbuffer->rate) / 500) << (channels == 2 ? 1 : 0);
+
+ jitterbuffer->num_pkt_in = 0;
+ jitterbuffer->num_pkt_miss = 0;
+ jitterbuffer->num_pkt_miss_max = (1000 / frame_duration) * 2; // 2 seconds missing --> "Houston, we have a problem"
+
+ jitter_buffer_ctl(jitterbuffer->state, JITTER_BUFFER_GET_MARGIN, &tmp);
+ TSK_DEBUG_INFO("Default Jitter buffer margin=%d", tmp);
+ jitter_buffer_ctl(jitterbuffer->state, JITTER_BUFFER_GET_MAX_LATE_RATE, &tmp);
+ TSK_DEBUG_INFO("Default Jitter max late rate=%d", tmp);
+
+ if ((tmp = tmedia_defaults_get_jb_margin()) >= 0) {
+ jitter_buffer_ctl(jitterbuffer->state, JITTER_BUFFER_SET_MARGIN, &tmp);
+ TSK_DEBUG_INFO("New Jitter buffer margin=%d", tmp);
+ }
+ if ((tmp = tmedia_defaults_get_jb_max_late_rate()) >= 0) {
+ jitter_buffer_ctl(jitterbuffer->state, JITTER_BUFFER_SET_MAX_LATE_RATE, &tmp);
+ TSK_DEBUG_INFO("New Jitter buffer max late rate=%d", tmp);
+ }
+
+ return 0;
+}
+
+static int tdav_speex_jitterbuffer_tick(tmedia_jitterbuffer_t* self)
+{
+ tdav_speex_jitterbuffer_t *jitterbuffer = (tdav_speex_jitterbuffer_t *)self;
+ if (!jitterbuffer->state) {
+ TSK_DEBUG_ERROR("Invalid state");
+ return -1;
+ }
+ jitter_buffer_tick(jitterbuffer->state);
+ return 0;
+}
+
+static int tdav_speex_jitterbuffer_put(tmedia_jitterbuffer_t* self, void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr)
+{
+ tdav_speex_jitterbuffer_t *jb = (tdav_speex_jitterbuffer_t *)self;
+ const trtp_rtp_header_t* rtp_hdr;
+ JitterBufferPacket jb_packet;
+ static uint16_t seq_num = 0;
+
+ if (!data || !data_size || !proto_hdr) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (!jb->state) {
+ TSK_DEBUG_ERROR("Invalid state");
+ return -2;
+ }
+
+ rtp_hdr = TRTP_RTP_HEADER(proto_hdr);
+
+ jb_packet.user_data = 0;
+ jb_packet.span = jb->frame_duration;
+ jb_packet.len = jb->x_data_size;
+
+ if (jb->x_data_size == data_size) { /* ptime match */
+ jb_packet.data = data;
+ jb_packet.sequence = rtp_hdr->seq_num;
+ jb_packet.timestamp = (rtp_hdr->seq_num * jb_packet.span);
+ jitter_buffer_put(jb->state, &jb_packet);
+ }
+ else { /* ptime mismatch */
+ tsk_size_t i;
+ jb_packet.sequence = 0; // Ignore
+ if ((jb->buff.index + data_size) > jb->buff.size) {
+ if (!(jb->buff.ptr = tsk_realloc(jb->buff.ptr, (jb->buff.index + data_size)))) {
+ jb->buff.size = 0;
+ jb->buff.index = 0;
+ return 0;
+ }
+ jb->buff.size = (jb->buff.index + data_size);
+ }
+
+ memcpy(&jb->buff.ptr[jb->buff.index], data, data_size);
+ jb->buff.index += data_size;
+
+ if (jb->buff.index >= jb->x_data_size) {
+ tsk_size_t copied = 0;
+ for (i = 0; (i + jb->x_data_size) <= jb->buff.index; i += jb->x_data_size) {
+ jb_packet.data = (char*)&jb->buff.ptr[i];
+ jb_packet.timestamp = (++jb->fake_seqnum * jb_packet.span);// reassembled pkt will have fake seqnum
+ jitter_buffer_put(jb->state, &jb_packet);
+ copied += jb->x_data_size;
+ }
+ if (copied == jb->buff.index) {
+ // all copied
+ jb->buff.index = 0;
+ }
+ else {
+ memmove(&jb->buff.ptr[0], &jb->buff.ptr[copied], (jb->buff.index - copied));
+ jb->buff.index -= copied;
+ }
+ }
+ }
+ ++jb->num_pkt_in;
+
+ return 0;
+}
+
+static tsk_size_t tdav_speex_jitterbuffer_get(tmedia_jitterbuffer_t* self, void* out_data, tsk_size_t out_size)
+{
+ tdav_speex_jitterbuffer_t *jb = (tdav_speex_jitterbuffer_t *)self;
+ JitterBufferPacket jb_packet;
+ int ret, miss = 0;
+ tsk_size_t ret_size = 0;
+
+ if (!out_data || !out_size) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+ if (!jb->state) {
+ TSK_DEBUG_ERROR("Invalid state");
+ return 0;
+ }
+ if (jb->x_data_size != out_size) { // consumer must request PTIME data
+ TSK_DEBUG_WARN("%d not expected as frame size. %u<>%u", out_size, jb->frame_duration, (out_size * 500) / jb->rate);
+ return 0;
+ }
+
+ jb_packet.data = out_data;
+ jb_packet.len = (spx_uint32_t)out_size;
+
+ if ((ret = jitter_buffer_get(jb->state, &jb_packet, jb->frame_duration/*(out_size * 500)/jb->rate*/, tsk_null)) != JITTER_BUFFER_OK) {
+ ++jb->num_pkt_miss;
+ switch (ret) {
+ case JITTER_BUFFER_MISSING:
+ /*TSK_DEBUG_INFO("JITTER_BUFFER_MISSING - %d", ret);*/
+ if (jb->num_pkt_miss > jb->num_pkt_miss_max /*too much missing pkts*/ && jb->num_pkt_in > jb->num_pkt_miss_max/*we're really receiving pkts*/) {
+ jb->num_pkt_miss = 0;
+ self->plugin->reset(self);
+ TSK_DEBUG_WARN("Too much missing audio pkts");
+ }
+ break;
+ case JITTER_BUFFER_INSERTION:
+ /*TSK_DEBUG_INFO("JITTER_BUFFER_INSERTION - %d", ret);*/
+ break;
+ default:
+ TSK_DEBUG_INFO("jitter_buffer_get() failed - %d", ret);
+ break;
+ }
+ // jitter_buffer_update_delay(jb->state, &jb_packet, NULL);
+ //return 0;
+ }
+ else {
+ jb->num_pkt_miss = 0; // reset
+ ret_size = jb_packet.len;
+ }
+ //jitter_buffer_update_delay(jb->state, &jb_packet, NULL);
+
+ return ret_size;
+}
+
+static int tdav_speex_jitterbuffer_reset(tmedia_jitterbuffer_t* self)
+{
+ tdav_speex_jitterbuffer_t *jb = (tdav_speex_jitterbuffer_t *)self;
+ if (jb->state) {
+ jitter_buffer_reset(jb->state);
+ }
+ jb->num_pkt_in = 0;
+ jb->num_pkt_miss = 0;
+ return 0;
+}
+
+static int tdav_speex_jitterbuffer_close(tmedia_jitterbuffer_t* self)
+{
+ tdav_speex_jitterbuffer_t *jitterbuffer = (tdav_speex_jitterbuffer_t *)self;
+ if (jitterbuffer->state) {
+ jitter_buffer_destroy(jitterbuffer->state);
+ jitterbuffer->state = tsk_null;
+ }
+ return 0;
+}
+
+
+
+//
+// Speex jitterbufferr Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_speex_jitterbuffer_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_speex_jitterbuffer_t *jitterbuffer = self;
+ TSK_DEBUG_INFO("Create SpeexDSP jitter buffer");
+ if (jitterbuffer){
+ /* init base */
+ tmedia_jitterbuffer_init(TMEDIA_JITTER_BUFFER(jitterbuffer));
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_speex_jitterbuffer_dtor(tsk_object_t * self)
+{
+ tdav_speex_jitterbuffer_t *jb = self;
+ if (jb){
+ /* deinit base */
+ tmedia_jitterbuffer_deinit(TMEDIA_JITTER_BUFFER(jb));
+ /* deinit self */
+ if (jb->state){
+ jitter_buffer_destroy(jb->state);
+ jb->state = tsk_null;
+ }
+ TSK_FREE(jb->buff.ptr);
+
+ TSK_DEBUG_INFO("*** SpeexDSP jb destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_speex_jitterbuffer_def_s =
+{
+ sizeof(tdav_speex_jitterbuffer_t),
+ tdav_speex_jitterbuffer_ctor,
+ tdav_speex_jitterbuffer_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_jitterbuffer_plugin_def_t tdav_speex_jitterbuffer_plugin_def_s =
+{
+ &tdav_speex_jitterbuffer_def_s,
+ tmedia_audio,
+ "Audio JitterBuffer based on Speex",
+
+ tdav_speex_jitterbuffer_set,
+ tdav_speex_jitterbuffer_open,
+ tdav_speex_jitterbuffer_tick,
+ tdav_speex_jitterbuffer_put,
+ tdav_speex_jitterbuffer_get,
+ tdav_speex_jitterbuffer_reset,
+ tdav_speex_jitterbuffer_close,
+};
+const tmedia_jitterbuffer_plugin_def_t *tdav_speex_jitterbuffer_plugin_def_t = &tdav_speex_jitterbuffer_plugin_def_s;
+
+
+#endif /* HAVE_SPEEX_DSP */
diff --git a/tinyDAV/src/audio/tdav_speex_resampler.c b/tinyDAV/src/audio/tdav_speex_resampler.c
new file mode 100644
index 0000000..f71ddd2
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_speex_resampler.c
@@ -0,0 +1,254 @@
+/*
+* Copyright (C) 2011-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+#include "tinydav/audio/tdav_speex_resampler.h"
+
+#if HAVE_SPEEX_DSP && (!defined(HAVE_SPEEX_RESAMPLER) || HAVE_SPEEX_RESAMPLER)
+
+#include <speex/speex_resampler.h>
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_SPEEX_RESAMPLER_MAX_QUALITY 10
+
+/** Speex resampler*/
+typedef struct tdav_speex_resampler_s
+{
+ TMEDIA_DECLARE_RESAMPLER;
+
+ tsk_size_t in_size;
+ tsk_size_t out_size;
+ uint32_t in_channels;
+ uint32_t out_channels;
+ uint32_t bytes_per_sample;
+
+ struct{
+ void* ptr;
+ tsk_size_t size_in_samples;
+ } tmp_buffer;
+
+ SpeexResamplerState *state;
+}
+tdav_speex_resampler_t;
+
+static int tdav_speex_resampler_open(tmedia_resampler_t* self, uint32_t in_freq, uint32_t out_freq, uint32_t frame_duration, uint32_t in_channels, uint32_t out_channels, uint32_t quality, uint32_t bits_per_sample)
+{
+ tdav_speex_resampler_t *resampler = (tdav_speex_resampler_t *)self;
+ int ret = 0;
+ uint32_t bytes_per_sample = (bits_per_sample >> 3);
+
+ if (in_channels != 1 && in_channels != 2) {
+ TSK_DEBUG_ERROR("%d not valid as input channel", in_channels);
+ return -1;
+ }
+ if (out_channels != 1 && out_channels != 2) {
+ TSK_DEBUG_ERROR("%d not valid as output channel", out_channels);
+ return -1;
+ }
+ if (bytes_per_sample != sizeof(spx_int16_t) && bytes_per_sample != sizeof(float)) {
+ TSK_DEBUG_ERROR("%d not valid as bits_per_sample", bits_per_sample);
+ return -1;
+ }
+
+ if (!(resampler->state = speex_resampler_init(in_channels, in_freq, out_freq, TSK_CLAMP(0, quality, TDAV_SPEEX_RESAMPLER_MAX_QUALITY), &ret))) {
+ TSK_DEBUG_ERROR("speex_resampler_init() returned %d", ret);
+ return -2;
+ }
+
+ resampler->bytes_per_sample = bytes_per_sample;
+ resampler->in_size = ((in_freq * frame_duration) / 1000) << (in_channels == 2 ? 1 : 0);
+ resampler->out_size = ((out_freq * frame_duration) / 1000) << (out_channels == 2 ? 1 : 0);
+ resampler->in_channels = in_channels;
+ resampler->out_channels = out_channels;
+
+ if (in_channels != out_channels) {
+ resampler->tmp_buffer.size_in_samples = ((TSK_MAX(in_freq, out_freq) * frame_duration) / 1000) << (TSK_MAX(in_channels, out_channels) == 2 ? 1 : 0);
+ if (!(resampler->tmp_buffer.ptr = tsk_realloc(resampler->tmp_buffer.ptr, resampler->tmp_buffer.size_in_samples * resampler->bytes_per_sample))) {
+ resampler->tmp_buffer.size_in_samples = 0;
+ return -2;
+ }
+ }
+
+ return 0;
+}
+
+
+static tsk_size_t tdav_speex_resampler_process(tmedia_resampler_t* self, const void* in_data, tsk_size_t in_size_in_sample, void* out_data, tsk_size_t out_size_in_sample)
+{
+ tdav_speex_resampler_t *resampler = (tdav_speex_resampler_t *)self;
+ int err = RESAMPLER_ERR_SUCCESS;
+ spx_uint32_t _out_size_in_sample = (spx_uint32_t)out_size_in_sample;
+ if (!resampler->state || !out_data) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return 0;
+ }
+
+ if (in_size_in_sample != resampler->in_size) {
+ TSK_DEBUG_ERROR("Input data has wrong size");
+ return 0;
+ }
+
+ if (out_size_in_sample < resampler->out_size) {
+ TSK_DEBUG_ERROR("Output data is too short");
+ return 0;
+ }
+
+ if (resampler->in_channels == resampler->out_channels) {
+ if (resampler->bytes_per_sample == sizeof(spx_int16_t)) {
+ err = speex_resampler_process_int(resampler->state, 0,
+ (const spx_int16_t *)in_data, (spx_uint32_t *)&in_size_in_sample,
+ (spx_int16_t *)out_data, &_out_size_in_sample);
+ }
+ else {
+ err = speex_resampler_process_float(resampler->state, 0,
+ (const float *)in_data, (spx_uint32_t *)&in_size_in_sample,
+ (float *)out_data, &_out_size_in_sample);
+ }
+ }
+ else {
+ spx_uint32_t i, j;
+ // in_channels = 1, out_channels = 2
+ if (resampler->in_channels == 1) {
+ if (resampler->bytes_per_sample == sizeof(spx_int16_t)) {
+ err = speex_resampler_process_int(resampler->state, 0, (const spx_int16_t *)in_data, (spx_uint32_t *)&in_size_in_sample, resampler->tmp_buffer.ptr, &_out_size_in_sample);
+ if (err == RESAMPLER_ERR_SUCCESS) {
+ spx_int16_t* pout_data = (spx_int16_t*)(out_data);
+ for (i = 0, j = 0; i < _out_size_in_sample; ++i, j += 2) {
+ pout_data[j] = pout_data[j + 1] = *(((const spx_int16_t*)resampler->tmp_buffer.ptr) + i);
+ }
+ }
+ }
+ else {
+ err = speex_resampler_process_float(resampler->state, 0, (const float *)in_data, (spx_uint32_t *)&in_size_in_sample, resampler->tmp_buffer.ptr, &_out_size_in_sample);
+ if (err == RESAMPLER_ERR_SUCCESS) {
+ float* pout_data = (float*)(out_data);
+ for (i = 0, j = 0; i < _out_size_in_sample; ++i, j += 2) {
+ pout_data[j] = pout_data[j + 1] = *(((const float*)resampler->tmp_buffer.ptr) + i);
+ }
+ }
+ }
+
+ }
+ else {
+ // in_channels = 2, out_channels = 1
+ spx_uint32_t _out_size2_in_sample = (_out_size_in_sample << 1);
+ if (resampler->bytes_per_sample == sizeof(spx_int16_t)) {
+ err = speex_resampler_process_int(resampler->state, 0,
+ (const spx_int16_t *)in_data, (spx_uint32_t *)&in_size_in_sample,
+ (spx_int16_t *)resampler->tmp_buffer.ptr, &_out_size2_in_sample);
+ if (err == RESAMPLER_ERR_SUCCESS) {
+ spx_int16_t* pout_data = (spx_int16_t*)(out_data);
+ _out_size_in_sample = (spx_uint32_t)resampler->out_size;
+ for (i = 0, j = 0; j < _out_size2_in_sample; ++i, j += 2) {
+ pout_data[i] = *(((const spx_int16_t*)resampler->tmp_buffer.ptr) + j);
+ }
+ }
+ }
+ else {
+ err = speex_resampler_process_float(resampler->state, 0,
+ (const float *)in_data, (spx_uint32_t *)&in_size_in_sample,
+ (float *)resampler->tmp_buffer.ptr, &_out_size2_in_sample);
+ if (err == RESAMPLER_ERR_SUCCESS) {
+ float* pout_data = (float*)(out_data);
+ for (i = 0, j = 0; j < _out_size2_in_sample; ++i, j += 2) {
+ pout_data[i] = *(((const float*)resampler->tmp_buffer.ptr) + j);
+ }
+ }
+ }
+ }
+ }
+
+ if (err != RESAMPLER_ERR_SUCCESS) {
+ TSK_DEBUG_ERROR("speex_resampler_process_int() failed with error code %d", err);
+ return 0;
+ }
+ return (tsk_size_t)_out_size_in_sample;
+}
+
+static int tdav_speex_resampler_close(tmedia_resampler_t* self)
+{
+ tdav_speex_resampler_t *resampler = (tdav_speex_resampler_t *)self;
+
+ if (resampler->state) {
+ speex_resampler_destroy(resampler->state);
+ resampler->state = tsk_null;
+ }
+ return 0;
+}
+
+
+
+//
+// Speex resamplerr Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_speex_resampler_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_speex_resampler_t *resampler = (tdav_speex_resampler_t *)self;
+ if (resampler){
+ /* init base */
+ tmedia_resampler_init(TMEDIA_RESAMPLER(resampler));
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_speex_resampler_dtor(tsk_object_t * self)
+{
+ tdav_speex_resampler_t *resampler = (tdav_speex_resampler_t *)self;
+ if (resampler){
+ /* deinit base */
+ tmedia_resampler_deinit(TMEDIA_RESAMPLER(resampler));
+ /* deinit self */
+ if (resampler->state) {
+ speex_resampler_destroy(resampler->state);
+ resampler->state = tsk_null;
+ }
+ TSK_FREE(resampler->tmp_buffer.ptr);
+
+ TSK_DEBUG_INFO("*** SpeexDSP resampler (plugin) destroyed ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_speex_resampler_def_s =
+{
+ sizeof(tdav_speex_resampler_t),
+ tdav_speex_resampler_ctor,
+ tdav_speex_resampler_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_resampler_plugin_def_t tdav_speex_resampler_plugin_def_s =
+{
+ &tdav_speex_resampler_def_s,
+
+ "Audio Resampler based on Speex",
+
+ tdav_speex_resampler_open,
+ tdav_speex_resampler_process,
+ tdav_speex_resampler_close,
+};
+const tmedia_resampler_plugin_def_t *tdav_speex_resampler_plugin_def_t = &tdav_speex_resampler_plugin_def_s;
+
+
+#endif /* HAVE_SPEEX_DSP */
diff --git a/tinyDAV/src/audio/tdav_webrtc_denoise.c b/tinyDAV/src/audio/tdav_webrtc_denoise.c
new file mode 100644
index 0000000..598470a
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_webrtc_denoise.c
@@ -0,0 +1,627 @@
+/*
+* Copyright (C) 2011-2015 Mamadou DIOP
+* Copyright (C) 2011-2015 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_webrtc_denoise.c
+* @brief Google WebRTC Denoiser (Noise suppression, AGC, AEC) Plugin
+*/
+#include "tinydav/audio/tdav_webrtc_denoise.h"
+
+#if HAVE_WEBRTC && (!defined(HAVE_WEBRTC_DENOISE) || HAVE_WEBRTC_DENOISE)
+
+#include "tsk_string.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include "tinymedia/tmedia_defaults.h"
+#include "tinymedia/tmedia_resampler.h"
+
+#include <string.h>
+
+#if !defined(WEBRTC_AEC_AGGRESSIVE)
+# define WEBRTC_AEC_AGGRESSIVE 0
+#endif
+#if !defined(WEBRTC_MAX_ECHO_TAIL)
+# define WEBRTC_MAX_ECHO_TAIL 500
+#endif
+#if !defined(WEBRTC_MIN_ECHO_TAIL)
+# define WEBRTC_MIN_ECHO_TAIL 20 // 0 will cause random crashes
+#endif
+
+#if TDAV_UNDER_MOBILE || 1 // FIXME
+typedef int16_t sample_t;
+#else
+typedef float sample_t;
+#endif
+
+typedef struct tdav_webrtc_pin_xs
+{
+ uint32_t n_duration;
+ uint32_t n_rate;
+ uint32_t n_channels;
+ uint32_t n_sample_size;
+}
+tdav_webrtc_pin_xt;
+
+typedef struct tdav_webrtc_resampler_s
+{
+ TSK_DECLARE_OBJECT;
+
+ tmedia_resampler_t* p_resampler;
+ void* p_bufftmp_ptr; // used to convert float <->int16
+ tsk_size_t n_bufftmp_size_in_bytes;
+
+ struct {
+ tdav_webrtc_pin_xt x_pin;
+ tsk_size_t n_buff_size_in_bytes;
+ tsk_size_t n_buff_size_in_samples;
+ } in;
+ struct {
+ tdav_webrtc_pin_xt x_pin;
+ void* p_buff_ptr;
+ tsk_size_t n_buff_size_in_bytes;
+ tsk_size_t n_buff_size_in_samples;
+ } out;
+}
+tdav_webrtc_resampler_t;
+
+static int _tdav_webrtc_resampler_create(const tdav_webrtc_pin_xt* p_pin_in, const tdav_webrtc_pin_xt* p_pin_out, tdav_webrtc_resampler_t **pp_resampler);
+static int _tdav_webrtc_resampler_process(tdav_webrtc_resampler_t* p_self, const void* p_buff_ptr, tsk_size_t n_buff_size_in_bytes);
+
+/** WebRTC denoiser (AEC, NS, AGC...) */
+typedef struct tdav_webrtc_denoise_s
+{
+ TMEDIA_DECLARE_DENOISE;
+
+ void *AEC_inst;
+#if HAVE_SPEEX_DSP && PREFER_SPEEX_DENOISER
+ SpeexPreprocessState *SpeexDenoiser_proc;
+#else
+ TDAV_NsHandle *NS_inst;
+#endif
+
+ uint32_t echo_tail;
+ uint32_t echo_skew;
+
+ struct {
+ tdav_webrtc_resampler_t* p_rpl_in2den; // input -> denoiser
+ tdav_webrtc_resampler_t* p_rpl_den2in; // denoiser -> input
+ } record;
+ struct {
+ tdav_webrtc_resampler_t* p_rpl_in2den; // input -> denoiser
+ tdav_webrtc_resampler_t* p_rpl_den2in; // denoiser -> input
+ } playback;
+
+ struct {
+ uint32_t nb_samples_per_process;
+ uint32_t sampling_rate;
+ uint32_t channels; // always "1"
+ } neg;
+
+ TSK_DECLARE_SAFEOBJ;
+}
+tdav_webrtc_denoise_t;
+
+static int tdav_webrtc_denoise_set(tmedia_denoise_t* _self, const tmedia_param_t* param)
+{
+ tdav_webrtc_denoise_t *self = (tdav_webrtc_denoise_t *)_self;
+ if (!self || !param) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "echo-tail")) {
+ int32_t echo_tail = *((int32_t*)param->value);
+ self->echo_tail = TSK_CLAMP(WEBRTC_MIN_ECHO_TAIL, echo_tail, WEBRTC_MAX_ECHO_TAIL);
+ TSK_DEBUG_INFO("set_echo_tail (%d->%d)", echo_tail, self->echo_tail);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int tdav_webrtc_denoise_open(tmedia_denoise_t* self, uint32_t record_frame_size_samples, uint32_t record_sampling_rate, uint32_t record_channels, uint32_t playback_frame_size_samples, uint32_t playback_sampling_rate, uint32_t playback_channels)
+{
+ tdav_webrtc_denoise_t *denoiser = (tdav_webrtc_denoise_t *)self;
+ int ret;
+ tdav_webrtc_pin_xt pin_record_in = { 0 }, pin_record_den = { 0 }, pin_playback_in = { 0 }, pin_playback_den = { 0 };
+
+ if (!denoiser) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if (denoiser->AEC_inst ||
+#if HAVE_SPEEX_DSP && PREFER_SPEEX_DENOISER
+ denoiser->SpeexDenoiser_proc
+#else
+ denoiser->NS_inst
+#endif
+ ){
+ TSK_DEBUG_ERROR("Denoiser already initialized");
+ return -2;
+ }
+
+ denoiser->echo_tail = TSK_CLAMP(WEBRTC_MIN_ECHO_TAIL, TMEDIA_DENOISE(denoiser)->echo_tail, WEBRTC_MAX_ECHO_TAIL);
+ denoiser->echo_skew = TMEDIA_DENOISE(denoiser)->echo_skew;
+ TSK_DEBUG_INFO("echo_tail=%d, echo_skew=%d, echo_supp_enabled=%d, noise_supp_enabled=%d", denoiser->echo_tail, denoiser->echo_skew, self->echo_supp_enabled, self->noise_supp_enabled);
+
+ //
+ // DENOISER
+ //
+#if TDAV_UNDER_MOBILE // AECM= [8-16]k, AEC=[8-32]k
+ denoiser->neg.sampling_rate = TSK_MIN(TSK_MAX(record_sampling_rate, playback_sampling_rate), 16000);
+#else
+ denoiser->neg.sampling_rate = TSK_MIN(TSK_MAX(record_sampling_rate, playback_sampling_rate), 16000); // FIXME: 32000 accepted by echo_process fails
+#endif
+ denoiser->neg.nb_samples_per_process = /*TSK_CLAMP(80,*/ ((denoiser->neg.sampling_rate * 10) / 1000)/*, 160)*/; // Supported by the module: "80"(10ms) and "160"(20ms)
+ denoiser->neg.channels = 1;
+
+ //
+ // RECORD
+ //
+ TSK_OBJECT_SAFE_FREE(denoiser->record.p_rpl_den2in);
+ TSK_OBJECT_SAFE_FREE(denoiser->record.p_rpl_in2den);
+ pin_record_in.n_sample_size = sizeof(int16_t);
+ pin_record_in.n_rate = record_sampling_rate;
+ pin_record_in.n_channels = record_channels;
+ pin_record_in.n_duration = (((record_frame_size_samples * 1000) / record_sampling_rate)) / record_channels;
+ pin_record_den.n_sample_size = sizeof(sample_t);
+ pin_record_den.n_rate = denoiser->neg.sampling_rate;
+
+ pin_record_den.n_channels = 1;
+ pin_record_den.n_duration = pin_record_in.n_duration;
+ if (pin_record_in.n_sample_size != pin_record_den.n_sample_size || pin_record_in.n_rate != pin_record_den.n_rate || pin_record_in.n_channels != pin_record_den.n_channels) {
+ if ((ret = _tdav_webrtc_resampler_create(&pin_record_in, &pin_record_den, &denoiser->record.p_rpl_in2den))) {
+ return ret;
+ }
+ if ((ret = _tdav_webrtc_resampler_create(&pin_record_den, &pin_record_in, &denoiser->record.p_rpl_den2in))) {
+ return ret;
+ }
+ }
+ //
+ // PLAYBACK
+ //
+ TSK_OBJECT_SAFE_FREE(denoiser->playback.p_rpl_den2in);
+ TSK_OBJECT_SAFE_FREE(denoiser->playback.p_rpl_in2den);
+ pin_playback_in.n_sample_size = sizeof(int16_t);
+ pin_playback_in.n_rate = playback_sampling_rate;
+ pin_playback_in.n_channels = playback_channels;
+ pin_playback_in.n_duration = (((playback_frame_size_samples * 1000) / playback_sampling_rate)) / playback_channels;
+ pin_playback_den.n_sample_size = sizeof(sample_t);
+ pin_playback_den.n_rate = denoiser->neg.sampling_rate;
+ pin_playback_den.n_channels = 1;
+ pin_playback_den.n_duration = pin_playback_in.n_duration;
+ if (pin_playback_in.n_sample_size != pin_playback_den.n_sample_size || pin_playback_in.n_rate != pin_playback_den.n_rate || pin_playback_in.n_channels != pin_playback_den.n_channels) {
+ if ((ret = _tdav_webrtc_resampler_create(&pin_playback_in, &pin_playback_den, &denoiser->playback.p_rpl_in2den))) {
+ return ret;
+ }
+ if ((ret = _tdav_webrtc_resampler_create(&pin_playback_den, &pin_playback_in, &denoiser->playback.p_rpl_den2in))) {
+ return ret;
+ }
+ }
+
+ //
+ // AEC instance
+ //
+ if ((ret = TDAV_WebRtcAec_Create(&denoiser->AEC_inst))) {
+ TSK_DEBUG_ERROR("WebRtcAec_Create failed with error code = %d", ret);
+ return ret;
+ }
+ if ((ret = TDAV_WebRtcAec_Init(denoiser->AEC_inst, denoiser->neg.sampling_rate, denoiser->neg.sampling_rate))) {
+ TSK_DEBUG_ERROR("WebRtcAec_Init failed with error code = %d", ret);
+ return ret;
+ }
+
+#if TDAV_UNDER_MOBILE
+#else
+ {
+ AecConfig aecConfig;
+#if WEBRTC_AEC_AGGRESSIVE
+ aecConfig.nlpMode = kAecNlpAggressive;
+#else
+ aecConfig.nlpMode = kAecNlpModerate;
+#endif
+ aecConfig.skewMode = kAecFalse;
+ aecConfig.metricsMode = kAecTrue;
+ aecConfig.delay_logging = kAecFalse;
+ if ((ret = WebRtcAec_set_config(denoiser->AEC_inst, aecConfig))) {
+ TSK_DEBUG_ERROR("WebRtcAec_set_config failed with error code = %d", ret);
+ }
+ }
+#endif
+
+
+ //
+ // Noise Suppression instance
+ //
+ if (TMEDIA_DENOISE(denoiser)->noise_supp_enabled) {
+#if HAVE_SPEEX_DSP && PREFER_SPEEX_DENOISER
+ if ((denoiser->SpeexDenoiser_proc = speex_preprocess_state_init((pin_record_den.n_rate / 1000) * pin_record_den.n_duration, pin_record_den.n_rate))) {
+ int i = 1;
+ speex_preprocess_ctl(denoiser->SpeexDenoiser_proc, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ i = TMEDIA_DENOISE(denoiser)->noise_supp_level;
+ speex_preprocess_ctl(denoiser->SpeexDenoiser_proc, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &i);
+ }
+#else
+ if ((ret = TDAV_WebRtcNs_Create(&denoiser->NS_inst))) {
+ TSK_DEBUG_ERROR("WebRtcNs_Create failed with error code = %d", ret);
+ return ret;
+ }
+ if ((ret = TDAV_WebRtcNs_Init(denoiser->NS_inst, 80))) {
+ TSK_DEBUG_ERROR("WebRtcNs_Init failed with error code = %d", ret);
+ return ret;
+ }
+#endif
+ }
+
+ TSK_DEBUG_INFO("WebRTC denoiser opened: record:%uHz,%uchannels // playback:%uHz,%uchannels // neg:%uHz,%uchannels",
+ record_sampling_rate, record_channels,
+ playback_sampling_rate, playback_channels,
+ denoiser->neg.sampling_rate, denoiser->neg.channels);
+
+ return ret;
+}
+
+static int tdav_webrtc_denoise_echo_playback(tmedia_denoise_t* self, const void* echo_frame, uint32_t echo_frame_size_bytes)
+{
+ tdav_webrtc_denoise_t *p_self = (tdav_webrtc_denoise_t *)self;
+ int ret = 0;
+
+ tsk_safeobj_lock(p_self);
+ if (p_self->AEC_inst && echo_frame && echo_frame_size_bytes) {
+ const sample_t* _echo_frame = (const sample_t*)echo_frame;
+ tsk_size_t _echo_frame_size_bytes = echo_frame_size_bytes;
+ tsk_size_t _echo_frame_size_samples = (_echo_frame_size_bytes / sizeof(int16_t));
+ // IN -> DEN
+ if (p_self->playback.p_rpl_in2den) {
+ if ((ret = _tdav_webrtc_resampler_process(p_self->playback.p_rpl_in2den, _echo_frame, _echo_frame_size_bytes))) {
+ goto bail;
+ }
+ _echo_frame = p_self->playback.p_rpl_in2den->out.p_buff_ptr;
+ _echo_frame_size_bytes = p_self->playback.p_rpl_in2den->out.n_buff_size_in_bytes;
+ _echo_frame_size_samples = p_self->playback.p_rpl_in2den->out.n_buff_size_in_samples;
+ }
+ // PROCESS
+ if (_echo_frame_size_samples && _echo_frame) {
+ uint32_t _samples;
+ for (_samples = 0; _samples < _echo_frame_size_samples; _samples += p_self->neg.nb_samples_per_process) {
+ if ((ret = TDAV_WebRtcAec_BufferFarend(p_self->AEC_inst, &_echo_frame[_samples], p_self->neg.nb_samples_per_process))){
+ TSK_DEBUG_ERROR("WebRtcAec_BufferFarend failed with error code = %d, nb_samples_per_process=%u", ret, p_self->neg.nb_samples_per_process);
+ goto bail;
+ }
+ }
+ }
+ }
+bail:
+ tsk_safeobj_unlock(p_self);
+ return ret;
+}
+
+static int tdav_webrtc_denoise_process_record(tmedia_denoise_t* self, void* audio_frame, uint32_t audio_frame_size_bytes, tsk_bool_t* silence_or_noise)
+{
+ tdav_webrtc_denoise_t *p_self = (tdav_webrtc_denoise_t *)self;
+ int ret = 0;
+
+ *silence_or_noise = tsk_false;
+
+ tsk_safeobj_lock(p_self);
+
+ if (p_self->AEC_inst && audio_frame && audio_frame_size_bytes) {
+ tsk_size_t _samples;
+ const sample_t* _audio_frame = (const sample_t*)audio_frame;
+ tsk_size_t _audio_frame_size_bytes = audio_frame_size_bytes;
+ tsk_size_t _audio_frame_size_samples = (_audio_frame_size_bytes / sizeof(int16_t));
+ // IN -> DEN
+ if (p_self->record.p_rpl_in2den) {
+ if ((ret = _tdav_webrtc_resampler_process(p_self->record.p_rpl_in2den, _audio_frame, _audio_frame_size_bytes))) {
+ goto bail;
+ }
+ _audio_frame = p_self->record.p_rpl_in2den->out.p_buff_ptr;
+ _audio_frame_size_bytes = p_self->record.p_rpl_in2den->out.n_buff_size_in_bytes;
+ _audio_frame_size_samples = p_self->record.p_rpl_in2den->out.n_buff_size_in_samples;
+ }
+ // NOISE SUPPRESSION
+#if HAVE_SPEEX_DSP && PREFER_SPEEX_DENOISER
+ if (p_self->SpeexDenoiser_proc) {
+ speex_preprocess_run(p_self->SpeexDenoiser_proc, (spx_int16_t*)_audio_frame);
+ }
+#else
+ // WebRTC NoiseSupp only accept 10ms frames
+ // Our encoder will always output 20ms frames ==> execute 2x noise_supp
+ if (p_self->NS_inst) {
+ for (_samples = 0; _samples < _audio_frame_size_samples; _samples+= p_self->neg.nb_samples_per_process) {
+ if ((ret = TDAV_WebRtcNs_Process(p_self->NS_inst, &_audio_frame[_samples], tsk_null, _audio_frame, tsk_null))) {
+ TSK_DEBUG_ERROR("WebRtcNs_Process with error code = %d", ret);
+ goto bail;
+ }
+ }
+ }
+#endif
+ // PROCESS
+ if (_audio_frame_size_samples && _audio_frame) {
+ for (_samples = 0; _samples < _audio_frame_size_samples; _samples += p_self->neg.nb_samples_per_process) {
+ if ((ret = TDAV_WebRtcAec_Process(p_self->AEC_inst, &_audio_frame[_samples], tsk_null, (sample_t*)&_audio_frame[_samples], tsk_null, p_self->neg.nb_samples_per_process, p_self->echo_tail, p_self->echo_skew))){
+ TSK_DEBUG_ERROR("WebRtcAec_Process with error code = %d, nb_samples_per_process=%u", ret, p_self->neg.nb_samples_per_process);
+ goto bail;
+ }
+ }
+ }
+ // DEN -> IN
+ if (p_self->record.p_rpl_den2in) {
+ if ((ret = _tdav_webrtc_resampler_process(p_self->record.p_rpl_den2in, _audio_frame, _audio_frame_size_bytes))) {
+ goto bail;
+ }
+ _audio_frame = p_self->record.p_rpl_den2in->out.p_buff_ptr;
+ _audio_frame_size_bytes = p_self->record.p_rpl_den2in->out.n_buff_size_in_bytes;
+ _audio_frame_size_samples = p_self->record.p_rpl_den2in->out.n_buff_size_in_samples;
+ }
+ // Sanity check
+ if (_audio_frame_size_bytes != audio_frame_size_bytes) {
+ TSK_DEBUG_ERROR("Size mismatch: %u <> %u", _audio_frame_size_bytes, audio_frame_size_bytes);
+ ret = -3;
+ goto bail;
+ }
+ if (audio_frame != (const void*)_audio_frame) {
+ memcpy(audio_frame, _audio_frame, _audio_frame_size_bytes);
+ }
+ }
+
+bail:
+ tsk_safeobj_unlock(p_self);
+ return ret;
+}
+
+static int tdav_webrtc_denoise_process_playback(tmedia_denoise_t* self, void* audio_frame, uint32_t audio_frame_size_bytes)
+{
+ tdav_webrtc_denoise_t *denoiser = (tdav_webrtc_denoise_t *)self;
+
+ (void)(denoiser);
+
+ // Not mandatory to denoise audio before playback.
+ // All Doubango clients support noise suppression.
+ return 0;
+}
+
+static int tdav_webrtc_denoise_close(tmedia_denoise_t* self)
+{
+ tdav_webrtc_denoise_t *denoiser = (tdav_webrtc_denoise_t *)self;
+
+ tsk_safeobj_lock(denoiser);
+ if (denoiser->AEC_inst) {
+ TDAV_WebRtcAec_Free(denoiser->AEC_inst);
+ denoiser->AEC_inst = tsk_null;
+ }
+#if HAVE_SPEEX_DSP && PREFER_SPEEX_DENOISER
+ if (denoiser->SpeexDenoiser_proc) {
+ speex_preprocess_state_destroy(denoiser->SpeexDenoiser_proc);
+ denoiser->SpeexDenoiser_proc = tsk_null;
+ }
+#else
+ if (denoiser->NS_inst) {
+ TDAV_WebRtcNs_Free(denoiser->NS_inst);
+ denoiser->NS_inst = tsk_null;
+ }
+#endif
+ tsk_safeobj_unlock(denoiser);
+
+ return 0;
+}
+
+static int _tdav_webrtc_resampler_create(const tdav_webrtc_pin_xt* p_pin_in, const tdav_webrtc_pin_xt* p_pin_out, tdav_webrtc_resampler_t **pp_resampler)
+{
+ extern const tsk_object_def_t *tdav_webrtc_resampler_def_t;
+ int ret = 0;
+ if (!p_pin_in || !p_pin_out || !pp_resampler || *pp_resampler) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if (!(*pp_resampler = tsk_object_new(tdav_webrtc_resampler_def_t))) {
+ TSK_DEBUG_ERROR("Failed to create resampler object");
+ ret = -3;
+ goto bail;
+ }
+ if (!((*pp_resampler)->p_resampler = tmedia_resampler_create())) {
+ ret = -3;
+ goto bail;
+ }
+ ret = tmedia_resampler_open((*pp_resampler)->p_resampler,
+ p_pin_in->n_rate, p_pin_out->n_rate,
+ p_pin_in->n_duration,
+ p_pin_in->n_channels, p_pin_out->n_channels,
+ TMEDIA_RESAMPLER_QUALITY,
+ (p_pin_out->n_sample_size << 3));
+ if (ret) {
+ TSK_DEBUG_ERROR("Failed to open resampler: in_rate=%u,in_duration=%u,in_channels=%u /// out_rate=%u,out_duration=%u,out_channels=%u",
+ p_pin_in->n_rate, p_pin_in->n_duration, p_pin_in->n_channels,
+ p_pin_out->n_rate, p_pin_out->n_duration, p_pin_out->n_channels);
+ goto bail;
+ }
+
+ (*pp_resampler)->out.n_buff_size_in_bytes = ((((p_pin_out->n_rate * p_pin_out->n_duration) / 1000)) * p_pin_out->n_channels) * p_pin_out->n_sample_size;
+ (*pp_resampler)->out.p_buff_ptr = tsk_malloc((*pp_resampler)->out.n_buff_size_in_bytes);
+ if (!(*pp_resampler)->out.p_buff_ptr) {
+ TSK_DEBUG_ERROR("Failed to allocate buffer with size=%u", (*pp_resampler)->out.n_buff_size_in_bytes);
+ ret = -3;
+ goto bail;
+ }
+ (*pp_resampler)->out.n_buff_size_in_samples = (*pp_resampler)->out.n_buff_size_in_bytes / p_pin_out->n_sample_size;
+ (*pp_resampler)->in.n_buff_size_in_bytes = ((((p_pin_in->n_rate * p_pin_in->n_duration) / 1000)) * p_pin_in->n_channels) * p_pin_in->n_sample_size;
+ (*pp_resampler)->in.n_buff_size_in_samples = (*pp_resampler)->in.n_buff_size_in_bytes / p_pin_in->n_sample_size;
+
+ (*pp_resampler)->n_bufftmp_size_in_bytes = (((48000 * TSK_MAX(p_pin_in->n_duration, p_pin_out->n_duration)) / 1000) * 2/*channels*/) * sizeof(float); // Max
+ (*pp_resampler)->p_bufftmp_ptr = tsk_malloc((*pp_resampler)->n_bufftmp_size_in_bytes);
+ if (!(*pp_resampler)->p_bufftmp_ptr) {
+ TSK_DEBUG_ERROR("Failed to allocate buffer with size:%u", (*pp_resampler)->n_bufftmp_size_in_bytes);
+ ret = -3;
+ goto bail;
+ }
+
+ memcpy(&(*pp_resampler)->in.x_pin, p_pin_in, sizeof(tdav_webrtc_pin_xt));
+ memcpy(&(*pp_resampler)->out.x_pin, p_pin_out, sizeof(tdav_webrtc_pin_xt));
+bail:
+ if (ret) {
+ TSK_OBJECT_SAFE_FREE((*pp_resampler));
+ }
+ return ret;
+}
+
+static int _tdav_webrtc_resampler_process(tdav_webrtc_resampler_t *p_self, const void* p_buff_ptr, tsk_size_t n_buff_size_in_bytes)
+{
+ tsk_size_t n_out_size;
+ const void* _p_buff_ptr = p_buff_ptr;
+ tsk_size_t _n_buff_size_in_bytes = n_buff_size_in_bytes;
+ tsk_size_t _n_buff_size_in_samples;
+
+ if (!p_self || !p_buff_ptr || !n_buff_size_in_bytes) {
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ if (p_self->in.n_buff_size_in_bytes != n_buff_size_in_bytes) {
+ TSK_DEBUG_ERROR("Invalid input size: %u <> %u", p_self->in.n_buff_size_in_bytes, n_buff_size_in_bytes);
+ return -2;
+ }
+ _n_buff_size_in_samples = p_self->in.n_buff_size_in_samples;
+ if (p_self->in.x_pin.n_sample_size != p_self->out.x_pin.n_sample_size) {
+ tsk_size_t index;
+ if (p_self->in.x_pin.n_sample_size == sizeof(int16_t)) {
+ // int16_t -> float
+ const int16_t* p_src = (const int16_t*)p_buff_ptr;
+ float* p_dst = (float*)p_self->p_bufftmp_ptr;
+ for (index = 0; index < _n_buff_size_in_samples; ++index) {
+ p_dst[index] = (float)p_src[index];
+ }
+ }
+ else {
+ // float -> int16_t
+ const float* p_src = (const float*)p_buff_ptr;
+ int16_t* p_dst = (int16_t*)p_self->p_bufftmp_ptr;
+ for (index = 0; index < _n_buff_size_in_samples; ++index) {
+ p_dst[index] = (int16_t)p_src[index];
+ }
+ }
+ _p_buff_ptr = p_self->p_bufftmp_ptr;
+ _n_buff_size_in_bytes = p_self->in.n_buff_size_in_bytes;
+ }
+ n_out_size = tmedia_resampler_process(p_self->p_resampler, _p_buff_ptr, _n_buff_size_in_samples, (int16_t*)p_self->out.p_buff_ptr, p_self->out.n_buff_size_in_samples);
+ if (n_out_size != p_self->out.n_buff_size_in_samples) {
+ TSK_DEBUG_ERROR("Invalid output size: %u <> %u", n_out_size, p_self->out.n_buff_size_in_bytes);
+ return -4;
+ }
+ return 0;
+}
+
+//
+// WEBRTC resampler object definition
+//
+static tsk_object_t* tdav_webrtc_resampler_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_webrtc_resampler_t *p_resampler = (tdav_webrtc_resampler_t*)self;
+ if (p_resampler) {
+
+ }
+ return self;
+}
+static tsk_object_t* tdav_webrtc_resampler_dtor(tsk_object_t * self)
+{
+ tdav_webrtc_resampler_t *p_resampler = (tdav_webrtc_resampler_t*)self;
+ if (p_resampler) {
+ TSK_OBJECT_SAFE_FREE(p_resampler->p_resampler);
+ TSK_FREE(p_resampler->out.p_buff_ptr);
+ TSK_FREE(p_resampler->p_bufftmp_ptr);
+ }
+ return self;
+}
+static const tsk_object_def_t tdav_webrtc_resampler_def_s =
+{
+ sizeof(tdav_webrtc_resampler_t),
+ tdav_webrtc_resampler_ctor,
+ tdav_webrtc_resampler_dtor,
+ tsk_object_cmp,
+};
+const tsk_object_def_t *tdav_webrtc_resampler_def_t = &tdav_webrtc_resampler_def_s;
+
+
+//
+// WEBRTC denoiser Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_webrtc_denoise_ctor(tsk_object_t * _self, va_list * app)
+{
+ tdav_webrtc_denoise_t *self = _self;
+ if (self){
+ /* init base */
+ tmedia_denoise_init(TMEDIA_DENOISE(self));
+ /* init self */
+ tsk_safeobj_init(self);
+ self->neg.channels = 1;
+
+ TSK_DEBUG_INFO("Create WebRTC denoiser");
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_webrtc_denoise_dtor(tsk_object_t * _self)
+{
+ tdav_webrtc_denoise_t *self = _self;
+ if (self){
+ /* deinit base (will close the denoise if not done yet) */
+ tmedia_denoise_deinit(TMEDIA_DENOISE(self));
+ /* deinit self */
+ tdav_webrtc_denoise_close(TMEDIA_DENOISE(self));
+ TSK_OBJECT_SAFE_FREE(self->record.p_rpl_in2den);
+ TSK_OBJECT_SAFE_FREE(self->record.p_rpl_den2in);
+ TSK_OBJECT_SAFE_FREE(self->playback.p_rpl_in2den);
+ TSK_OBJECT_SAFE_FREE(self->playback.p_rpl_den2in);
+ tsk_safeobj_deinit(self);
+
+ TSK_DEBUG_INFO("*** Destroy WebRTC denoiser ***");
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_webrtc_denoise_def_s =
+{
+ sizeof(tdav_webrtc_denoise_t),
+ tdav_webrtc_denoise_ctor,
+ tdav_webrtc_denoise_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_denoise_plugin_def_t tdav_webrtc_denoise_plugin_def_s =
+{
+ &tdav_webrtc_denoise_def_s,
+
+ "Audio Denoiser based on Google WebRTC",
+
+ tdav_webrtc_denoise_set,
+ tdav_webrtc_denoise_open,
+ tdav_webrtc_denoise_echo_playback,
+ tdav_webrtc_denoise_process_record,
+ tdav_webrtc_denoise_process_playback,
+ tdav_webrtc_denoise_close,
+};
+const tmedia_denoise_plugin_def_t *tdav_webrtc_denoise_plugin_def_t = &tdav_webrtc_denoise_plugin_def_s;
+
+
+#endif /* HAVE_WEBRTC */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/wasapi/tdav_consumer_wasapi.cxx b/tinyDAV/src/audio/wasapi/tdav_consumer_wasapi.cxx
new file mode 100644
index 0000000..c3a88e3
--- /dev/null
+++ b/tinyDAV/src/audio/wasapi/tdav_consumer_wasapi.cxx
@@ -0,0 +1,676 @@
+/*Copyright (C) 2013 Mamadou DIOP
+* Copyright (C) 2013-2014 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+/**@file tdav_consumer_wasapi.cxx
+ * @brief Microsoft Windows Audio Session API (WASAPI) consumer.
+ * http://msdn.microsoft.com/en-us/library/windows/desktop/dd316551(v=vs.85).aspx
+ */
+#include "tinydav/audio/wasapi/tdav_consumer_wasapi.h"
+
+#if HAVE_WASAPI
+
+#include "tinydav/audio/tdav_consumer_audio.h"
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_string.h"
+#include "tsk_condwait.h"
+#include "tsk_debug.h"
+
+#include <windows.h>
+#include <audioclient.h>
+#include <phoneaudioclient.h>
+
+#include <speex/speex_buffer.h>
+
+#if !defined(TDAV_WASAPI_CONSUMER_NOTIF_POS_COUNT)
+# define TDAV_WASAPI_CONSUMER_NOTIF_POS_COUNT 4
+#endif
+#define WASAPI_MILLIS_TO_100NS(MILLIS) (((LONGLONG)(MILLIS)) * 10000ui64)
+#define WASAPI_100NS_TO_MILLIS(NANOS) (((LONGLONG)(NANOS)) / 10000ui64)
+
+#define WASAPI_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[WASAPI Consumer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[WASAPI Consumer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[WASAPI Consumer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[WASAPI Consumer] " FMT, ##__VA_ARGS__)
+
+struct tdav_consumer_wasapi_s;
+
+namespace Doubango
+{
+ namespace VoIP
+ {
+ ref class AudioRender sealed
+ {
+ public:
+ virtual ~AudioRender();
+ internal:
+ AudioRender();
+
+ int Prepare(struct tdav_consumer_wasapi_s* wasapi, const tmedia_codec_t* codec);
+ int UnPrepare();
+ int Start();
+ int Stop();
+ int Pause();
+ int Consume(const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr);
+ private:
+ tsk_size_t Read(void* data, tsk_size_t size);
+ void AsyncThread(Windows::Foundation::IAsyncAction^ operation);
+
+ private:
+ tsk_mutex_handle_t* m_hMutex;
+ const struct tdav_consumer_wasapi_s* m_pWrappedConsumer; // Must not take ref() otherwise dtor() will be never called (circular reference)
+ IAudioClient2* m_pDevice;
+ IAudioRenderClient* m_pClient;
+ HANDLE m_hEvent;
+ Windows::Foundation::IAsyncAction^ m_pAsyncThread;
+ INT32 m_nBytesPerNotif;
+ INT32 m_nSourceFrameSizeInBytes;
+ UINT32 m_nMaxFrameCount;
+ UINT32 m_nPtime;
+
+ struct {
+ struct {
+ void* buffer;
+ tsk_size_t size;
+ } chunck;
+ tsk_ssize_t leftBytes;
+ SpeexBuffer* buffer;
+ tsk_size_t size;
+ } m_ring;
+
+ bool m_bStarted;
+ bool m_bPrepared;
+ bool m_bPaused;
+ };
+ }
+}
+
+typedef struct tdav_consumer_wasapi_s
+{
+ TDAV_DECLARE_CONSUMER_AUDIO;
+
+ Doubango::VoIP::AudioRender ^AudioRender;
+}
+tdav_consumer_wasapi_t;
+
+extern "C" void tdav_win32_print_error(const char* func, HRESULT hr);
+
+
+/* ============ Media consumer Interface ================= */
+
+static int tdav_consumer_wasapi_set(tmedia_consumer_t* self, const tmedia_param_t* param)
+{
+ return tdav_consumer_audio_set(TDAV_CONSUMER_AUDIO(self), param);
+}
+
+static int tdav_consumer_wasapi_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_consumer_wasapi_t* wasapi = (tdav_consumer_wasapi_t*)self;
+
+ if (!wasapi || !codec || !wasapi->AudioRender) {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TMEDIA_CONSUMER(wasapi)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(wasapi)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(wasapi)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+ WASAPI_DEBUG_INFO("in.channels=%d, out.channles=%d, in.rate=%d, out.rate=%d, ptime=%d",
+ TMEDIA_CONSUMER(wasapi)->audio.in.channels,
+ TMEDIA_CONSUMER(wasapi)->audio.out.channels,
+ TMEDIA_CONSUMER(wasapi)->audio.in.rate,
+ TMEDIA_CONSUMER(wasapi)->audio.out.rate,
+ TMEDIA_CONSUMER(wasapi)->audio.ptime);
+
+ return wasapi->AudioRender->Prepare(wasapi, codec);
+}
+
+static int tdav_consumer_wasapi_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_wasapi_t* wasapi = (tdav_consumer_wasapi_t*)self;
+
+ WASAPI_DEBUG_INFO("tdav_consumer_wasapi_start()");
+
+ if (!wasapi || !wasapi->AudioRender) {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->AudioRender->Start();
+}
+
+
+static int tdav_consumer_wasapi_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_wasapi_t* wasapi = (tdav_consumer_wasapi_t*)self;
+ if (!wasapi || !wasapi->AudioRender || !buffer || !size) {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->AudioRender->Consume(buffer, size, proto_hdr);
+}
+
+static int tdav_consumer_wasapi_pause(tmedia_consumer_t* self)
+{
+ tdav_consumer_wasapi_t* wasapi = (tdav_consumer_wasapi_t*)self;
+
+ if (!wasapi || !wasapi->AudioRender){
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->AudioRender->Pause();
+}
+
+static int tdav_consumer_wasapi_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_wasapi_t* wasapi = (tdav_consumer_wasapi_t*)self;
+
+ WASAPI_DEBUG_INFO("tdav_consumer_wasapi_stop()");
+
+ if (!wasapi || !wasapi->AudioRender) {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->AudioRender->Stop();
+}
+
+
+
+
+
+
+
+Doubango::VoIP::AudioRender::AudioRender()
+ : m_pDevice(nullptr)
+ , m_hMutex(nullptr)
+ , m_pClient(nullptr)
+ , m_hEvent(nullptr)
+ , m_pAsyncThread(nullptr)
+ , m_pWrappedConsumer(nullptr)
+ , m_nBytesPerNotif(0)
+ , m_nSourceFrameSizeInBytes(0)
+ , m_nMaxFrameCount(0)
+ , m_nPtime(0)
+ , m_bStarted(false)
+ , m_bPrepared(false)
+ , m_bPaused(false)
+{
+ memset(&m_ring, 0, sizeof(m_ring));
+
+ if (!(m_hMutex = tsk_mutex_create())) {
+ throw ref new Platform::FailureException(L"Failed to create mutex");
+ }
+}
+
+Doubango::VoIP::AudioRender::~AudioRender()
+{
+ Stop();
+ UnPrepare();
+
+ tsk_mutex_destroy(&m_hMutex);
+}
+
+int Doubango::VoIP::AudioRender::Prepare(tdav_consumer_wasapi_t* wasapi, const tmedia_codec_t* codec)
+{
+ HRESULT hr = E_FAIL;
+ int ret = 0;
+ WAVEFORMATEX wfx = {0};
+ AudioClientProperties properties = {0};
+ LPCWSTR pwstrRenderId = nullptr;
+
+ #define WASAPI_SET_ERROR(code) ret = (code); goto bail;
+
+ tsk_mutex_lock(m_hMutex);
+
+ if (m_bPrepared) {
+ WASAPI_DEBUG_INFO("Already prepared");
+ goto bail;
+ }
+
+ if (!wasapi || !codec) {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ WASAPI_SET_ERROR(-1);
+ }
+
+ if (m_pDevice || m_pClient) {
+ WASAPI_DEBUG_ERROR("consumer already prepared");
+ WASAPI_SET_ERROR(-2);
+ }
+
+ pwstrRenderId = GetDefaultAudioRenderId(AudioDeviceRole::Communications);
+
+ if (NULL == pwstrRenderId) {
+ tdav_win32_print_error("GetDefaultAudioRenderId", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-3);
+ }
+
+ hr = ActivateAudioInterface(pwstrRenderId, __uuidof(IAudioClient2), (void**)&m_pDevice);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("ActivateAudioInterface", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-4);
+ }
+
+ if (SUCCEEDED(hr)) {
+ properties.cbSize = sizeof AudioClientProperties;
+ properties.eCategory = AudioCategory_Communications;
+ hr = m_pDevice->SetClientProperties(&properties);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("SetClientProperties", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-5);
+ }
+ }
+ else {
+ tdav_win32_print_error("ActivateAudioInterface", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-6);
+ }
+
+ /* Set best format */
+ {
+ wfx.wFormatTag = WAVE_FORMAT_PCM;
+ wfx.nChannels = TMEDIA_CONSUMER(wasapi)->audio.in.channels;
+ wfx.nSamplesPerSec = TMEDIA_CONSUMER(wasapi)->audio.in.rate;
+ wfx.wBitsPerSample = TMEDIA_CONSUMER(wasapi)->audio.bits_per_sample;
+ wfx.nBlockAlign = (wfx.nChannels * wfx.wBitsPerSample/8);
+ wfx.nAvgBytesPerSec = (wfx.nSamplesPerSec * wfx.nBlockAlign);
+
+ PWAVEFORMATEX pwfxClosestMatch = NULL;
+ hr = m_pDevice->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, &wfx, &pwfxClosestMatch);
+ if (hr != S_OK && hr != S_FALSE) {
+ tdav_win32_print_error("IsFormatSupported", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-8);
+ }
+
+ if (hr == S_FALSE) {
+ if (!pwfxClosestMatch) {
+ WASAPI_DEBUG_ERROR("malloc(%d) failed", sizeof(WAVEFORMATEX));
+ WASAPI_SET_ERROR(-7);
+ }
+
+ wfx.nSamplesPerSec = pwfxClosestMatch->nSamplesPerSec;
+ wfx.nChannels = pwfxClosestMatch->nChannels;
+#if 0
+ wfx.wBitsPerSample = pwfxClosestMatch->wBitsPerSample;
+#endif
+ wfx.nBlockAlign = wfx.nChannels * (wfx.wBitsPerSample / 8);
+ wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;
+ // Request resampler
+ TMEDIA_CONSUMER(wasapi)->audio.out.rate = (uint32_t)wfx.nSamplesPerSec;
+ TMEDIA_CONSUMER(wasapi)->audio.bits_per_sample = (uint8_t)wfx.wBitsPerSample;
+ TMEDIA_CONSUMER(wasapi)->audio.out.channels = (uint8_t)wfx.nChannels;
+
+ WASAPI_DEBUG_INFO("Audio device format fallback: rate=%d, bps=%d, channels=%d", wfx.nSamplesPerSec, wfx.wBitsPerSample, wfx.nChannels);
+ }
+ if (pwfxClosestMatch) {
+ CoTaskMemFree(pwfxClosestMatch);
+ }
+ }
+
+ m_nSourceFrameSizeInBytes = (wfx.wBitsPerSample >> 3) * wfx.nChannels;
+ m_nBytesPerNotif = ((wfx.nAvgBytesPerSec * TMEDIA_CONSUMER(wasapi)->audio.ptime) / 1000);
+
+ // Initialize
+ hr = m_pDevice->Initialize(
+ AUDCLNT_SHAREMODE_SHARED,
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
+ WASAPI_MILLIS_TO_100NS(TDAV_WASAPI_CONSUMER_NOTIF_POS_COUNT * TMEDIA_CONSUMER(wasapi)->audio.ptime),
+ 0,
+ &wfx,
+ NULL);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("#WASAPI: Render::Initialize", hr);
+ WASAPI_SET_ERROR(-9);
+ }
+
+ REFERENCE_TIME DefaultDevicePeriod, MinimumDevicePeriod;
+ hr = m_pDevice->GetDevicePeriod(&DefaultDevicePeriod, &MinimumDevicePeriod);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("GetDevicePeriod", hr);
+ WASAPI_SET_ERROR(-10);
+ }
+ hr = m_pDevice->GetBufferSize(&m_nMaxFrameCount);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("GetBufferSize", hr);
+ WASAPI_SET_ERROR(-10);
+ }
+
+ WASAPI_DEBUG_INFO("#WASAPI (Playback): BufferSize=%u, DefaultDevicePeriod=%lld ms, MinimumDevicePeriod=%lldms", m_nMaxFrameCount, WASAPI_100NS_TO_MILLIS(DefaultDevicePeriod), WASAPI_100NS_TO_MILLIS(MinimumDevicePeriod));
+
+ if (!m_hEvent) {
+ if (!(m_hEvent = CreateEventEx(NULL, NULL, 0, EVENT_MODIFY_STATE | SYNCHRONIZE))) {
+ tdav_win32_print_error("CreateEventEx(EVENT_MODIFY_STATE | SYNCHRONIZE)", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-11);
+ }
+ }
+
+ hr = m_pDevice->SetEventHandle(m_hEvent);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("SetEventHandle", hr);
+ WASAPI_SET_ERROR(-12);
+ }
+
+ hr = m_pDevice->GetService(__uuidof(IAudioRenderClient), (void**)&m_pClient);
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("GetService", hr);
+ WASAPI_SET_ERROR(-14);
+ }
+
+ m_ring.chunck.size = (TMEDIA_CONSUMER(wasapi)->audio.ptime * TMEDIA_CONSUMER(wasapi)->audio.out.rate * ((TMEDIA_CONSUMER(wasapi)->audio.bits_per_sample >> 3) * TMEDIA_CONSUMER(wasapi)->audio.out.channels)) / 1000;
+ m_ring.size = TDAV_WASAPI_CONSUMER_NOTIF_POS_COUNT * m_ring.chunck.size;
+ if (!(m_ring.chunck.buffer = tsk_realloc(m_ring.chunck.buffer, m_ring.chunck.size))) {
+ m_ring.size = 0;
+ WASAPI_DEBUG_ERROR("Failed to allocate new buffer");
+ WASAPI_SET_ERROR(-15);
+ }
+ if (!m_ring.buffer) {
+ m_ring.buffer = speex_buffer_init(m_ring.size);
+ }
+ else {
+ int sret;
+ if ((sret = speex_buffer_resize(m_ring.buffer, m_ring.size)) < 0) {
+ WASAPI_DEBUG_ERROR("speex_buffer_resize(%d) failed with error code=%d", m_ring.size, sret);
+ WASAPI_SET_ERROR(-16);
+ }
+ }
+ if (!m_ring.buffer) {
+ WASAPI_DEBUG_ERROR("Failed to create a new ring buffer with size = %d", m_ring.size);
+ WASAPI_SET_ERROR(-17);
+ }
+
+bail:
+ if (pwstrRenderId) {
+ CoTaskMemFree((LPVOID)pwstrRenderId);
+ }
+ if (ret != 0) {
+ UnPrepare();
+ }
+
+ if ((m_bPrepared = (ret == 0))) {
+ m_pWrappedConsumer = wasapi;
+ m_nPtime = TMEDIA_CONSUMER(wasapi)->audio.ptime;
+ }
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return ret;
+}
+
+int Doubango::VoIP::AudioRender::UnPrepare()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ if (m_hEvent) {
+ CloseHandle(m_hEvent), m_hEvent = nullptr;
+ }
+ if (m_pDevice) {
+ m_pDevice->Release(), m_pDevice = nullptr;
+ }
+ if (m_pClient) {
+ m_pClient->Release(), m_pClient = nullptr;
+ }
+
+ TSK_FREE(m_ring.chunck.buffer);
+ if (m_ring.buffer) {
+ speex_buffer_destroy(m_ring.buffer);
+ m_ring.buffer = nullptr;
+ }
+
+ m_pWrappedConsumer = nullptr;
+
+ m_bPrepared = false;
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+int Doubango::VoIP::AudioRender::Start()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ if (m_bStarted) {
+ WASAPI_DEBUG_INFO("already started");
+ goto bail;
+ }
+ if (!m_bPrepared) {
+ WASAPI_DEBUG_ERROR("not prepared");
+ goto bail;
+ }
+
+ m_pAsyncThread = Windows::System::Threading::ThreadPool::RunAsync(ref new Windows::System::Threading::WorkItemHandler(this, &Doubango::VoIP::AudioRender::AsyncThread),
+ Windows::System::Threading::WorkItemPriority::High,
+ Windows::System::Threading::WorkItemOptions::TimeSliced);
+
+ if ((m_bStarted = (m_pAsyncThread != nullptr))) {
+ HRESULT hr = m_pDevice->Start();
+ if(!SUCCEEDED(hr)) {
+ tdav_win32_print_error("Device::Start", hr);
+ Stop();
+ }
+ m_bPaused = false;
+ }
+
+bail:
+ tsk_mutex_unlock(m_hMutex);
+
+ return (m_bStarted ? 0 : -2);
+}
+
+int Doubango::VoIP::AudioRender::Stop()
+{
+ m_bStarted = false;
+
+ tsk_mutex_lock(m_hMutex);
+
+ if (m_hEvent) {
+ SetEvent(m_hEvent);
+ }
+
+ if (m_pAsyncThread) {
+ m_pAsyncThread->Cancel();
+ m_pAsyncThread->Close();
+ m_pAsyncThread = nullptr;
+ }
+
+ if (m_pDevice) {
+ m_pDevice->Stop();
+ }
+
+ // will be prepared again before next start()
+ UnPrepare();
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+int Doubango::VoIP::AudioRender::Pause()
+{
+ m_bPaused = true;
+
+ return 0;
+}
+
+int Doubango::VoIP::AudioRender::Consume(const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ int ret;
+ // tsk_mutex_lock(m_hMutex);
+ ret = tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(m_pWrappedConsumer), buffer, size, proto_hdr); // thread-safe
+ // tsk_mutex_unlock(m_hMutex);
+ return ret;
+}
+
+tsk_size_t Doubango::VoIP::AudioRender::Read(void* data, tsk_size_t size)
+{
+ tsk_ssize_t retSize = 0, availSize;
+
+ m_ring.leftBytes += size;
+ while (m_ring.leftBytes >= (tsk_ssize_t)m_ring.chunck.size) {
+ m_ring.leftBytes -= m_ring.chunck.size;
+ retSize = (tsk_ssize_t)tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(m_pWrappedConsumer), m_ring.chunck.buffer, m_ring.chunck.size);
+ tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(m_pWrappedConsumer));
+ speex_buffer_write(m_ring.buffer, m_ring.chunck.buffer, retSize);
+ }
+ // IMPORTANT: looks like there is a bug in speex: continously trying to read more than avail
+ // many times can corrupt the buffer. At least on OS X 1.5
+#if 0
+ if (speex_buffer_get_available(m_ring.buffer) >= (tsk_ssize_t)size) {
+ retSize = speex_buffer_read(m_ring.buffer, data, size);
+ }
+ else{
+ memset(data, 0, size);
+ }
+#else
+ availSize = speex_buffer_get_available(m_ring.buffer);
+ if (availSize == 0) {
+ memset(data, 0, size);
+ }
+ else {
+ retSize = speex_buffer_read(m_ring.buffer, data, min(availSize, (tsk_ssize_t)size));
+ if (availSize < (tsk_ssize_t)size) {
+ memset(((uint8_t*)data) + availSize, 0, (size - availSize));
+ }
+ }
+
+#endif
+
+ return retSize;
+}
+
+void Doubango::VoIP::AudioRender::AsyncThread(Windows::Foundation::IAsyncAction^ operation)
+{
+ HRESULT hr = S_OK;
+ INT32 nFramesToWrite;
+ UINT32 nPadding, nRead;
+ DWORD retval;
+
+ WASAPI_DEBUG_INFO("#WASAPI: __playback_thread -- START");
+
+ #define BREAK_WHILE tsk_mutex_unlock(m_hMutex); break;
+
+ while (m_bStarted && SUCCEEDED(hr)) {
+ retval = WaitForSingleObjectEx(m_hEvent, /*m_nPtime*/INFINITE, FALSE);
+
+ tsk_mutex_lock(m_hMutex);
+
+ if (!m_bStarted) {
+ BREAK_WHILE;
+ }
+
+ if (retval == WAIT_OBJECT_0) {
+ hr = m_pDevice->GetCurrentPadding(&nPadding);
+ if (SUCCEEDED(hr)) {
+ BYTE* pRenderBuffer = NULL;
+ nFramesToWrite = m_nMaxFrameCount - nPadding;
+
+ if (nFramesToWrite > 0) {
+ hr = m_pClient->GetBuffer(nFramesToWrite, &pRenderBuffer);
+ if (SUCCEEDED(hr)) {
+ nRead = Read(pRenderBuffer, (nFramesToWrite * m_nSourceFrameSizeInBytes));
+
+ // Release the buffer
+ hr = m_pClient->ReleaseBuffer(nFramesToWrite, (nRead == 0) ? AUDCLNT_BUFFERFLAGS_SILENT : 0);
+ }
+ }
+ }
+ }
+
+ tsk_mutex_unlock(m_hMutex);
+ }// end-of-while
+
+ if (!SUCCEEDED(hr)) {
+ tdav_win32_print_error("AsyncThread: ", hr);
+ }
+
+
+ WASAPI_DEBUG_INFO("__playback_thread(%s) -- STOP", (SUCCEEDED(hr) && retval == WAIT_OBJECT_0) ? "OK" : "NOK");
+}
+
+
+
+
+
+
+
+//
+// WaveAPI consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_wasapi_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_wasapi_t *wasapi = (tdav_consumer_wasapi_t*)self;
+ if (wasapi) {
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(wasapi));
+ /* init self */
+
+ wasapi->AudioRender = ref new Doubango::VoIP::AudioRender();
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_wasapi_dtor(tsk_object_t * self)
+{
+ tdav_consumer_wasapi_t *wasapi = (tdav_consumer_wasapi_t*)self;
+ if (wasapi) {
+ /* stop */
+ tdav_consumer_wasapi_stop((tmedia_consumer_t*)self);
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(wasapi));
+ /* deinit self */
+ if (wasapi->AudioRender) {
+ delete wasapi->AudioRender;
+ wasapi->AudioRender = nullptr;
+ }
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_wasapi_def_s =
+{
+ sizeof(tdav_consumer_wasapi_t),
+ tdav_consumer_wasapi_ctor,
+ tdav_consumer_wasapi_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_wasapi_plugin_def_s =
+{
+ &tdav_consumer_wasapi_def_s,
+
+ tmedia_audio,
+ "Microsoft Windows Audio Session API (WASAPI) consumer",
+
+ tdav_consumer_wasapi_set,
+ tdav_consumer_wasapi_prepare,
+ tdav_consumer_wasapi_start,
+ tdav_consumer_wasapi_consume,
+ tdav_consumer_wasapi_pause,
+ tdav_consumer_wasapi_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_wasapi_plugin_def_t = &tdav_consumer_wasapi_plugin_def_s;
+
+
+
+
+#endif /* HAVE_WASAPI */
diff --git a/tinyDAV/src/audio/wasapi/tdav_producer_wasapi.cxx b/tinyDAV/src/audio/wasapi/tdav_producer_wasapi.cxx
new file mode 100644
index 0000000..7d172a2
--- /dev/null
+++ b/tinyDAV/src/audio/wasapi/tdav_producer_wasapi.cxx
@@ -0,0 +1,681 @@
+/*Copyright (C) 2013 Mamadou DIOP
+* Copyright (C) 2013-2014 Doubango Telecom <http://www.doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*/
+/**@file tdav_producer_wasapi.cxx
+ * @brief Microsoft Windows Audio Session API (WASAPI) producer.
+ * http://msdn.microsoft.com/en-us/library/windows/desktop/dd316551(v=vs.85).aspx
+ */
+#include "tinydav/audio/wasapi/tdav_producer_wasapi.h"
+
+#if HAVE_WASAPI
+
+#include "tinydav/audio/tdav_producer_audio.h"
+
+#include "tsk_memory.h"
+#include "tsk_string.h"
+#include "tsk_debug.h"
+
+#include <windows.h>
+#include <audioclient.h>
+#include <phoneaudioclient.h>
+
+#include <speex/speex_buffer.h>
+
+#if !defined(TDAV_WASAPI_PRODUCER_NOTIF_POS_COUNT)
+# define TDAV_WASAPI_PRODUCER_NOTIF_POS_COUNT 10
+#endif
+#define WASAPI_MILLIS_TO_100NS(MILLIS) (((LONGLONG)(MILLIS)) * 10000ui64)
+#define WASAPI_100NS_TO_MILLIS(NANOS) (((LONGLONG)(NANOS)) / 10000ui64)
+
+#define WASAPI_DEBUG_INFO(FMT, ...) TSK_DEBUG_INFO("[WASAPI Producer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_WARN(FMT, ...) TSK_DEBUG_WARN("[WASAPI Producer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_ERROR(FMT, ...) TSK_DEBUG_ERROR("[WASAPI Producer] " FMT, ##__VA_ARGS__)
+#define WASAPI_DEBUG_FATAL(FMT, ...) TSK_DEBUG_FATAL("[WASAPI Producer] " FMT, ##__VA_ARGS__)
+
+struct tdav_producer_wasapi_s;
+
+namespace Doubango
+{
+ namespace VoIP
+ {
+ ref class AudioCapture sealed
+ {
+ public:
+ virtual ~AudioCapture();
+ internal:
+ AudioCapture();
+
+ int Prepare(struct tdav_producer_wasapi_s* wasapi, const tmedia_codec_t* codec);
+ int UnPrepare();
+ int Start();
+ int Stop();
+ int Pause();
+
+ private:
+ void AsyncThread(Windows::Foundation::IAsyncAction^ operation);
+
+ private:
+ tsk_mutex_handle_t* m_hMutex;
+ IAudioClient2* m_pDevice;
+ IAudioCaptureClient* m_pClient;
+ HANDLE m_hCaptureEvent;
+ HANDLE m_hShutdownEvent;
+ Windows::Foundation::IAsyncAction^ m_pAsyncThread;
+ INT32 m_nBytesPerNotif;
+ INT32 m_nSourceFrameSizeInBytes;
+
+ struct{
+ tmedia_producer_enc_cb_f fn;
+ const void* pcData;
+ } m_callback;
+
+ struct {
+ struct {
+ void* buffer;
+ tsk_size_t size;
+ } chunck;
+ SpeexBuffer* buffer;
+ tsk_size_t size;
+ } m_ring;
+ bool m_bStarted;
+ bool m_bPrepared;
+ bool m_bPaused;
+ };
+ }
+}
+
+typedef struct tdav_producer_wasapi_s
+{
+ TDAV_DECLARE_PRODUCER_AUDIO;
+
+ Doubango::VoIP::AudioCapture ^audioCapture;
+}
+tdav_producer_wasapi_t;
+
+extern "C" void tdav_win32_print_error(const char* func, HRESULT hr);
+
+
+/* ============ Media Producer Interface ================= */
+static int tdav_producer_wasapi_set(tmedia_producer_t* self, const tmedia_param_t* param)
+{
+ tdav_producer_wasapi_t* wasapi = (tdav_producer_wasapi_t*)self;
+ if (param->plugin_type == tmedia_ppt_producer) {
+ if (param->value_type == tmedia_pvt_int32) {
+ if (tsk_striequals(param->key, "volume")) {
+ return 0;
+ }
+ else if (tsk_striequals(param->key, "mute")) {
+ //wasapi->mute = (TSK_TO_INT32((uint8_t*)param->value) != 0);
+#if !FIXME_SEND_SILENCE_ON_MUTE
+ //if(wasapi->started){
+ // if(wasapi->mute){
+ //IDirectSoundCaptureBuffer_Stop(wasapi->captureBuffer);
+ // }
+ // else{
+ //IDirectSoundCaptureBuffer_Start(wasapi->captureBuffer, DSBPLAY_LOOPING);
+ // }
+ //}
+#endif
+ return 0;
+ }
+ }
+ }
+ return tdav_producer_audio_set(TDAV_PRODUCER_AUDIO(self), param);
+}
+
+
+
+static int tdav_producer_wasapi_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_producer_wasapi_t* wasapi = (tdav_producer_wasapi_t*)self;
+
+ if(!wasapi || !codec || !wasapi->audioCapture){
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* codec should have ptime */
+ TMEDIA_PRODUCER(wasapi)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(wasapi)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER(wasapi)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+
+ WASAPI_DEBUG_INFO("channels=%d, rate=%d, ptime=%d",
+ TMEDIA_PRODUCER(wasapi)->audio.channels,
+ TMEDIA_PRODUCER(wasapi)->audio.rate,
+ TMEDIA_PRODUCER(wasapi)->audio.ptime);
+
+ return wasapi->audioCapture->Prepare(wasapi, codec);
+}
+
+static int tdav_producer_wasapi_start(tmedia_producer_t* self)
+{
+ tdav_producer_wasapi_t* wasapi = (tdav_producer_wasapi_t*)self;
+
+ WASAPI_DEBUG_INFO("tdav_producer_wasapi_start()");
+
+ if(!wasapi || !wasapi->audioCapture){
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->audioCapture->Start();
+}
+
+static int tdav_producer_wasapi_pause(tmedia_producer_t* self)
+{
+ tdav_producer_wasapi_t* wasapi = (tdav_producer_wasapi_t*)self;
+
+ if(!wasapi || !wasapi->audioCapture){
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->audioCapture->Pause();
+}
+
+static int tdav_producer_wasapi_stop(tmedia_producer_t* self)
+{
+ tdav_producer_wasapi_t* wasapi = (tdav_producer_wasapi_t*)self;
+
+ WASAPI_DEBUG_INFO("tdav_producer_wasapi_stop()");
+
+ if(!wasapi || !wasapi->audioCapture){
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return wasapi->audioCapture->Stop();
+}
+
+
+
+
+
+
+
+Doubango::VoIP::AudioCapture::AudioCapture()
+ : m_pDevice(nullptr)
+ , m_hMutex(nullptr)
+ , m_pClient(nullptr)
+ , m_hCaptureEvent(nullptr)
+ , m_hShutdownEvent(nullptr)
+ , m_pAsyncThread(nullptr)
+ , m_nBytesPerNotif(0)
+ , m_nSourceFrameSizeInBytes(0)
+ , m_bStarted(false)
+ , m_bPrepared(false)
+ , m_bPaused(false)
+{
+ m_callback.fn = nullptr, m_callback.pcData = nullptr;
+ memset(&m_ring, 0, sizeof(m_ring));
+
+ if(!(m_hMutex = tsk_mutex_create())){
+ throw ref new Platform::FailureException(L"Failed to create mutex");
+ }
+}
+
+Doubango::VoIP::AudioCapture::~AudioCapture()
+{
+ Stop();
+ UnPrepare();
+
+ tsk_mutex_destroy(&m_hMutex);
+}
+
+int Doubango::VoIP::AudioCapture::Prepare(tdav_producer_wasapi_t* wasapi, const tmedia_codec_t* codec)
+{
+ HRESULT hr = E_FAIL;
+ int ret = 0;
+ WAVEFORMATEX wfx = {0};
+ AudioClientProperties properties = {0};
+ LPCWSTR pwstrCaptureId = nullptr;
+
+ #define WASAPI_SET_ERROR(code) ret = (code); goto bail;
+
+ tsk_mutex_lock(m_hMutex);
+
+ if(m_bPrepared)
+ {
+ WASAPI_DEBUG_INFO("#WASAPI: Audio producer already prepared");
+ goto bail;
+ }
+
+ if(!wasapi || !codec)
+ {
+ WASAPI_DEBUG_ERROR("Invalid parameter");
+ WASAPI_SET_ERROR(-1);
+ }
+
+ if(m_pDevice || m_pClient){
+ WASAPI_DEBUG_ERROR("Producer already prepared");
+ WASAPI_SET_ERROR(-2);
+ }
+
+ pwstrCaptureId = GetDefaultAudioCaptureId(AudioDeviceRole::Communications);
+
+ if (NULL == pwstrCaptureId){
+ tdav_win32_print_error("GetDefaultAudioCaptureId", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-3);
+ }
+
+ hr = ActivateAudioInterface(pwstrCaptureId, __uuidof(IAudioClient2), (void**)&m_pDevice);
+ if(!SUCCEEDED(hr)){
+ tdav_win32_print_error("ActivateAudioInterface", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-4);
+ }
+
+ if (SUCCEEDED(hr)){
+ properties.cbSize = sizeof AudioClientProperties;
+ properties.eCategory = AudioCategory_Communications;
+ hr = m_pDevice->SetClientProperties(&properties);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("SetClientProperties", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-5);
+ }
+ }
+ else{
+ tdav_win32_print_error("ActivateAudioInterface", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-6);
+ }
+
+ /* Set best format */
+ {
+ wfx.wFormatTag = WAVE_FORMAT_PCM;
+ wfx.nChannels = TMEDIA_PRODUCER(wasapi)->audio.channels;
+ wfx.nSamplesPerSec = TMEDIA_PRODUCER(wasapi)->audio.rate;
+ wfx.wBitsPerSample = TMEDIA_PRODUCER(wasapi)->audio.bits_per_sample;
+ wfx.nBlockAlign = (wfx.nChannels * wfx.wBitsPerSample/8);
+ wfx.nAvgBytesPerSec = (wfx.nSamplesPerSec * wfx.nBlockAlign);
+
+ PWAVEFORMATEX pwfxClosestMatch = NULL;
+ hr = m_pDevice->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, &wfx, &pwfxClosestMatch);
+ if(hr != S_OK && hr != S_FALSE)
+ {
+ tdav_win32_print_error("IsFormatSupported", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-8);
+ }
+
+ if(hr == S_FALSE)
+ {
+ if(!pwfxClosestMatch)
+ {
+ WASAPI_DEBUG_ERROR("malloc(%d) failed", sizeof(WAVEFORMATEX));
+ WASAPI_SET_ERROR(-7);
+ }
+ wfx.nChannels = pwfxClosestMatch->nChannels;
+ wfx.nSamplesPerSec = pwfxClosestMatch->nSamplesPerSec;
+#if 0
+ wfx.wBitsPerSample = pwfxClosestMatch->wBitsPerSample;
+#endif
+ wfx.nBlockAlign = wfx.nChannels * (wfx.wBitsPerSample / 8);
+ wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;
+ // Request resampler
+ TMEDIA_PRODUCER(wasapi)->audio.rate = (uint32_t)wfx.nSamplesPerSec;
+ TMEDIA_PRODUCER(wasapi)->audio.bits_per_sample = (uint8_t)wfx.wBitsPerSample;
+ TMEDIA_PRODUCER(wasapi)->audio.channels = (uint8_t)wfx.nChannels;
+
+ WASAPI_DEBUG_INFO("Audio device format fallback: rate=%d, bps=%d, channels=%d", wfx.nSamplesPerSec, wfx.wBitsPerSample, wfx.nChannels);
+ }
+ if(pwfxClosestMatch)
+ {
+ CoTaskMemFree(pwfxClosestMatch);
+ }
+ }
+
+ m_nSourceFrameSizeInBytes = (wfx.wBitsPerSample >> 3) * wfx.nChannels;
+ m_nBytesPerNotif = ((wfx.nAvgBytesPerSec * TMEDIA_PRODUCER(wasapi)->audio.ptime)/1000);
+
+ // Initialize
+ hr = m_pDevice->Initialize(
+ AUDCLNT_SHAREMODE_SHARED,
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
+ (TDAV_WASAPI_PRODUCER_NOTIF_POS_COUNT * WASAPI_MILLIS_TO_100NS(TMEDIA_PRODUCER(wasapi)->audio.ptime)),
+ 0,
+ &wfx,
+ NULL);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("#WASAPI: Capture::SetClientProperties", hr);
+ WASAPI_SET_ERROR(-9);
+ }
+
+ REFERENCE_TIME DefaultDevicePeriod, MinimumDevicePeriod;
+ hr = m_pDevice->GetDevicePeriod(&DefaultDevicePeriod, &MinimumDevicePeriod);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("GetDevicePeriod", hr);
+ WASAPI_SET_ERROR(-10);
+ }
+ WASAPI_DEBUG_INFO("#WASAPI(Capture): DefaultDevicePeriod=%lld ms, MinimumDevicePeriod=%lldms", WASAPI_100NS_TO_MILLIS(DefaultDevicePeriod), WASAPI_100NS_TO_MILLIS(MinimumDevicePeriod));
+
+ if(!m_hCaptureEvent){
+ if(!(m_hCaptureEvent = CreateEventEx(NULL, NULL, 0, EVENT_ALL_ACCESS))){
+ tdav_win32_print_error("CreateEventEx(Capture)", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-11);
+ }
+ }
+ if(!m_hShutdownEvent){
+ if(!(m_hShutdownEvent = CreateEventEx(NULL, NULL, CREATE_EVENT_MANUAL_RESET, EVENT_ALL_ACCESS))){
+ tdav_win32_print_error("CreateEventEx(Shutdown)", HRESULT_FROM_WIN32(GetLastError()));
+ WASAPI_SET_ERROR(-12);
+ }
+ }
+
+ hr = m_pDevice->SetEventHandle(m_hCaptureEvent);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("SetEventHandle", hr);
+ WASAPI_SET_ERROR(-13);
+ }
+
+ hr = m_pDevice->GetService(__uuidof(IAudioCaptureClient), (void**)&m_pClient);
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("GetService", hr);
+ WASAPI_SET_ERROR(-14);
+ }
+
+ int packetperbuffer = (1000 / TMEDIA_PRODUCER(wasapi)->audio.ptime);
+ m_ring.chunck.size = wfx.nSamplesPerSec * (wfx.wBitsPerSample >> 3) / packetperbuffer;
+ WASAPI_DEBUG_INFO("#WASAPI: Audio producer ring chunk size = %u", m_ring.chunck.size);
+ // allocate our chunck buffer
+ if(!(m_ring.chunck.buffer = tsk_realloc(m_ring.chunck.buffer, m_ring.chunck.size))){
+ WASAPI_DEBUG_ERROR("Failed to allocate new buffer");
+ WASAPI_SET_ERROR(-15);
+ }
+ // create ringbuffer
+ m_ring.size = TDAV_WASAPI_PRODUCER_NOTIF_POS_COUNT * m_ring.chunck.size;
+ WASAPI_DEBUG_INFO("#WASAPI: Audio producer ring size = %u", m_ring.size);
+ if(!m_ring.buffer){
+ m_ring.buffer = speex_buffer_init(m_ring.size);
+ }
+ else {
+ int sret;
+ if((sret = speex_buffer_resize(m_ring.buffer, m_ring.size)) < 0){
+ WASAPI_DEBUG_ERROR("speex_buffer_resize(%d) failed with error code=%d", m_ring.size, sret);
+ WASAPI_SET_ERROR(-16);
+ }
+ }
+ if(!m_ring.buffer){
+ WASAPI_DEBUG_ERROR("Failed to create a new ring buffer with size = %d", m_ring.size);
+ WASAPI_SET_ERROR(-17);
+ }
+
+ m_callback.fn = TMEDIA_PRODUCER(wasapi)->enc_cb.callback;
+ m_callback.pcData = TMEDIA_PRODUCER(wasapi)->enc_cb.callback_data;
+
+bail:
+ if (pwstrCaptureId){
+ CoTaskMemFree((LPVOID)pwstrCaptureId);
+ }
+ if(ret != 0){
+ UnPrepare();
+ }
+ m_bPrepared = (ret == 0);
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return ret;
+}
+
+int Doubango::VoIP::AudioCapture::UnPrepare()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ if(m_hCaptureEvent)
+ {
+ CloseHandle(m_hCaptureEvent), m_hCaptureEvent = nullptr;
+ }
+ if(m_hShutdownEvent)
+ {
+ CloseHandle(m_hShutdownEvent), m_hShutdownEvent = nullptr;
+ }
+ if(m_pDevice)
+ {
+ m_pDevice->Release(), m_pDevice = nullptr;
+ }
+ if(m_pClient)
+ {
+ m_pClient->Release(), m_pClient = nullptr;
+ }
+
+ TSK_FREE(m_ring.chunck.buffer);
+ if(m_ring.buffer){
+ speex_buffer_destroy(m_ring.buffer);
+ m_ring.buffer = nullptr;
+ }
+
+ m_callback.fn = nullptr;
+ m_callback.pcData = nullptr;
+
+ m_bPrepared = false;
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+int Doubango::VoIP::AudioCapture::Start()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ if(m_bStarted)
+ {
+ WASAPI_DEBUG_INFO("#WASAPI: Audio producer already started");
+ goto bail;
+ }
+ if(!m_bPrepared)
+ {
+ WASAPI_DEBUG_ERROR("Audio producer not prepared");
+ goto bail;
+ }
+
+ m_pAsyncThread = Windows::System::Threading::ThreadPool::RunAsync(ref new Windows::System::Threading::WorkItemHandler(this, &Doubango::VoIP::AudioCapture::AsyncThread),
+ Windows::System::Threading::WorkItemPriority::High,
+ Windows::System::Threading::WorkItemOptions::TimeSliced);
+
+ if((m_bStarted = (m_pAsyncThread != nullptr)))
+ {
+ HRESULT hr = m_pDevice->Start();
+ if(!SUCCEEDED(hr))
+ {
+ tdav_win32_print_error("Device::Start", hr);
+ Stop();
+ }
+ m_bPaused = false;
+ }
+
+bail:
+ tsk_mutex_unlock(m_hMutex);
+
+ return (m_bStarted ? 0 : -2);
+}
+
+int Doubango::VoIP::AudioCapture::Stop()
+{
+ m_bStarted = false;
+
+ tsk_mutex_lock(m_hMutex);
+
+ if (m_hShutdownEvent)
+ {
+ SetEvent(m_hShutdownEvent);
+ }
+
+ if (m_pAsyncThread)
+ {
+ m_pAsyncThread->Cancel();
+ m_pAsyncThread->Close();
+ m_pAsyncThread = nullptr;
+ }
+
+ if(m_pDevice)
+ {
+ m_pDevice->Stop();
+ }
+
+ // will be prepared again before next start()
+ UnPrepare();
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+int Doubango::VoIP::AudioCapture::Pause()
+{
+ tsk_mutex_lock(m_hMutex);
+
+ m_bPaused = true;
+
+ tsk_mutex_unlock(m_hMutex);
+
+ return 0;
+}
+
+void Doubango::VoIP::AudioCapture::AsyncThread(Windows::Foundation::IAsyncAction^ operation)
+{
+ HRESULT hr = S_OK;
+ BYTE* pbData = nullptr;
+ UINT32 nFrames = 0;
+ DWORD dwFlags = 0;
+ UINT32 incomingBufferSize;
+ INT32 avail;
+ UINT32 nNextPacketSize;
+
+ HANDLE eventHandles[] = {
+ m_hCaptureEvent, // WAIT_OBJECT0
+ m_hShutdownEvent // WAIT_OBJECT1
+ };
+
+ WASAPI_DEBUG_INFO("#WASAPI: __record_thread -- START");
+
+ #define BREAK_WHILE tsk_mutex_unlock(m_hMutex); break;
+
+ while(m_bStarted && SUCCEEDED(hr)){
+ DWORD waitResult = WaitForMultipleObjectsEx(SIZEOF_ARRAY(eventHandles), eventHandles, FALSE, INFINITE, FALSE);
+
+ tsk_mutex_lock(m_hMutex);
+
+ if(!m_bStarted){
+ BREAK_WHILE;
+ }
+
+ if(waitResult == WAIT_OBJECT_0 && m_callback.fn) {
+ hr = m_pClient->GetNextPacketSize(&nNextPacketSize);
+ while(SUCCEEDED(hr) && nNextPacketSize >0){
+ hr = m_pClient->GetBuffer(&pbData, &nFrames, &dwFlags, nullptr, nullptr);
+ if(SUCCEEDED(hr) && pbData && nFrames){
+ if((dwFlags & AUDCLNT_BUFFERFLAGS_SILENT) != AUDCLNT_BUFFERFLAGS_SILENT){
+ incomingBufferSize = nFrames * m_nSourceFrameSizeInBytes;
+ speex_buffer_write(m_ring.buffer, pbData, incomingBufferSize);
+ avail = speex_buffer_get_available(m_ring.buffer);
+ while (m_bStarted && avail >= (INT32)m_ring.chunck.size) {
+ avail -= speex_buffer_read(m_ring.buffer, m_ring.chunck.buffer, m_ring.chunck.size);
+ m_callback.fn(m_callback.pcData, m_ring.chunck.buffer, m_ring.chunck.size);
+ }
+ }
+
+ if (SUCCEEDED(hr)){
+ hr = m_pClient->ReleaseBuffer(nFrames);
+ }
+ if (SUCCEEDED(hr)){
+ hr = m_pClient->GetNextPacketSize(&nNextPacketSize);
+ }
+ }
+ }
+ }
+ else if(waitResult != WAIT_OBJECT_0){
+ BREAK_WHILE;
+ }
+
+ tsk_mutex_unlock(m_hMutex);
+ }// end-of-while
+
+ if (!SUCCEEDED(hr)){
+ tdav_win32_print_error("AsyncThread: ", hr);
+ }
+
+
+ WASAPI_DEBUG_INFO("WASAPI: __record_thread(%s) -- STOP", SUCCEEDED(hr) ? "OK": "NOK");
+}
+
+
+
+
+
+
+
+//
+// WaveAPI producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_wasapi_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_wasapi_t *wasapi = (tdav_producer_wasapi_t*)self;
+ if(wasapi){
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(wasapi));
+ /* init self */
+
+ wasapi->audioCapture = ref new Doubango::VoIP::AudioCapture();
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_wasapi_dtor(tsk_object_t * self)
+{
+ tdav_producer_wasapi_t *wasapi = (tdav_producer_wasapi_t*)self;
+ if(wasapi){
+ /* stop */
+ tdav_producer_wasapi_stop((tmedia_producer_t*)self);
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(wasapi));
+ /* deinit self */
+ if(wasapi->audioCapture){
+ delete wasapi->audioCapture;
+ wasapi->audioCapture = nullptr;
+ }
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_wasapi_def_s =
+{
+ sizeof(tdav_producer_wasapi_t),
+ tdav_producer_wasapi_ctor,
+ tdav_producer_wasapi_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_wasapi_plugin_def_s =
+{
+ &tdav_producer_wasapi_def_s,
+
+ tmedia_audio,
+ "Microsoft Windows Audio Session API (WASAPI) producer",
+
+ tdav_producer_wasapi_set,
+ tdav_producer_wasapi_prepare,
+ tdav_producer_wasapi_start,
+ tdav_producer_wasapi_pause,
+ tdav_producer_wasapi_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_wasapi_plugin_def_t = &tdav_producer_wasapi_plugin_def_s;
+
+
+
+
+#endif /* HAVE_WASAPI */
diff --git a/tinyDAV/src/audio/waveapi/tdav_consumer_waveapi.c b/tinyDAV/src/audio/waveapi/tdav_consumer_waveapi.c
new file mode 100644
index 0000000..1883fa4
--- /dev/null
+++ b/tinyDAV/src/audio/waveapi/tdav_consumer_waveapi.c
@@ -0,0 +1,402 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_consumer_waveapi.c
+ * @brief Audio Consumer for Win32 and WinCE platforms.
+ *
+ */
+#include "tinydav/audio/waveapi/tdav_consumer_waveapi.h"
+
+#if HAVE_WAVE_API
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_WAVEAPI_CONSUMER_ERROR_BUFF_COUNT 0xFF
+
+#define tdav_consumer_waveapi_set tsk_null
+
+static void print_last_error(MMRESULT mmrError, const char* func)
+{
+ static char buffer_err[TDAV_WAVEAPI_CONSUMER_ERROR_BUFF_COUNT];
+
+ waveOutGetErrorTextA(mmrError, buffer_err, sizeof(buffer_err));
+ TSK_DEBUG_ERROR("%s() error: %s", func, buffer_err);
+}
+
+static int free_wavehdr(tdav_consumer_waveapi_t* consumer, tsk_size_t index)
+{
+ if(!consumer || index >= sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_FREE(consumer->hWaveHeaders[index]->lpData);
+ TSK_FREE(consumer->hWaveHeaders[index]);
+
+ return 0;
+}
+
+static int create_wavehdr(tdav_consumer_waveapi_t* consumer, tsk_size_t index)
+{
+ if(!consumer || index >= sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(consumer->hWaveHeaders[index]){
+ free_wavehdr(consumer, index);
+ }
+
+ consumer->hWaveHeaders[index] = tsk_calloc(1, sizeof(WAVEHDR));
+ consumer->hWaveHeaders[index]->lpData = tsk_calloc(1, consumer->bytes_per_notif);
+ consumer->hWaveHeaders[index]->dwBufferLength = (DWORD)consumer->bytes_per_notif;
+ consumer->hWaveHeaders[index]->dwFlags = WHDR_BEGINLOOP | WHDR_ENDLOOP;
+ consumer->hWaveHeaders[index]->dwLoops = 0x01;
+ consumer->hWaveHeaders[index]->dwUser = index;
+
+ return 0;
+}
+
+static int write_wavehdr(tdav_consumer_waveapi_t* consumer, tsk_size_t index)
+{
+ MMRESULT result;
+
+ if(!consumer || !consumer->hWaveHeaders[index] || !consumer->hWaveOut){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ result = waveOutPrepareHeader(consumer->hWaveOut, consumer->hWaveHeaders[index], sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutPrepareHeader");
+ return -2;
+ }
+
+ result = waveOutWrite(consumer->hWaveOut, consumer->hWaveHeaders[index], sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutWrite");
+ return -3;
+ }
+
+ return 0;
+}
+
+static int play_wavehdr(tdav_consumer_waveapi_t* consumer, LPWAVEHDR lpHdr)
+{
+ MMRESULT result;
+ tsk_size_t out_size;
+
+ if(!consumer || !lpHdr || !consumer->hWaveOut){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ result = waveOutUnprepareHeader(consumer->hWaveOut, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutUnprepareHeader");
+ return -2;
+ }
+
+ //
+ //
+ // Fill lpHdr->Data with decoded data
+ //
+ //
+ if((out_size = tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(consumer), lpHdr->lpData, lpHdr->dwBufferLength))){
+ //memcpy(lpHdr->lpData, data, lpHdr->dwBufferLength);
+ //TSK_FREE(data);
+ }
+ else{
+ /* Put silence */
+ memset(lpHdr->lpData, 0, lpHdr->dwBufferLength);
+ }
+
+ if(!consumer->started){
+ return 0;
+ }
+
+ result = waveOutPrepareHeader(consumer->hWaveOut, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutPrepareHeader");
+ return -3;
+ }
+
+ result = waveOutWrite(consumer->hWaveOut, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutWrite");
+ return -4;
+ }
+
+ return 0;
+}
+
+static void* TSK_STDCALL __playback_thread(void *param)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)param;
+ DWORD dwEvent;
+ tsk_size_t i;
+
+ TSK_DEBUG_INFO("__playback_thread -- START");
+
+ SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
+
+ for(;;){
+ dwEvent = WaitForMultipleObjects(2, consumer->events, FALSE, INFINITE);
+
+ if (dwEvent == 1){
+ break;
+ }
+
+ else if (dwEvent == 0){
+ EnterCriticalSection(&consumer->cs);
+ for(i = 0; i< sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ if(consumer->hWaveHeaders[i] && (consumer->hWaveHeaders[i]->dwFlags & WHDR_DONE)){
+ play_wavehdr(consumer, consumer->hWaveHeaders[i]);
+ }
+ }
+ LeaveCriticalSection(&consumer->cs);
+ }
+ }
+
+ TSK_DEBUG_INFO("__playback_thread -- STOP");
+
+
+ return tsk_null;
+}
+
+
+
+
+
+
+
+
+/* ============ Media Consumer Interface ================= */
+int tdav_consumer_waveapi_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+ tsk_size_t i;
+
+ if(!consumer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TMEDIA_CONSUMER(consumer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.channels = TMEDIA_CODEC_CHANNELS_AUDIO_DECODING(codec);
+ TMEDIA_CONSUMER(consumer)->audio.in.rate = TMEDIA_CODEC_RATE_DECODING(codec);
+
+ /* codec should have ptime */
+
+
+ /* Format */
+ ZeroMemory(&consumer->wfx, sizeof(WAVEFORMATEX));
+ consumer->wfx.wFormatTag = WAVE_FORMAT_PCM;
+ consumer->wfx.nChannels = TMEDIA_CONSUMER(consumer)->audio.in.channels;
+ consumer->wfx.nSamplesPerSec = TMEDIA_CONSUMER(consumer)->audio.out.rate ? TMEDIA_CONSUMER(consumer)->audio.out.rate : TMEDIA_CONSUMER(consumer)->audio.in.rate;
+ consumer->wfx.wBitsPerSample = TMEDIA_CONSUMER(consumer)->audio.bits_per_sample;
+ consumer->wfx.nBlockAlign = (consumer->wfx.nChannels * consumer->wfx.wBitsPerSample/8);
+ consumer->wfx.nAvgBytesPerSec = (consumer->wfx.nSamplesPerSec * consumer->wfx.nBlockAlign);
+
+ /* Average bytes (count) for each notification */
+ consumer->bytes_per_notif = ((consumer->wfx.nAvgBytesPerSec * TMEDIA_CONSUMER(consumer)->audio.ptime)/1000);
+
+ /* create buffers */
+ for(i = 0; i< sizeof(consumer->hWaveHeaders)/sizeof(consumer->hWaveHeaders[0]); i++){
+ create_wavehdr(consumer, i);
+ }
+
+ return 0;
+}
+
+int tdav_consumer_waveapi_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+ MMRESULT result;
+ tsk_size_t i;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(consumer->started || consumer->hWaveOut){
+ TSK_DEBUG_WARN("Consumer already started");
+ return 0;
+ }
+
+ /* create events */
+ if(!consumer->events[0]){
+ consumer->events[0] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ }
+ if(!consumer->events[1]){
+ consumer->events[1] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ }
+
+ /* open */
+ result = waveOutOpen((HWAVEOUT *)&consumer->hWaveOut, WAVE_MAPPER, &consumer->wfx, (DWORD)consumer->events[0], (DWORD_PTR)consumer, CALLBACK_EVENT);
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutOpen");
+ return -2;
+ }
+
+ /* write */
+ for(i = 0; i< sizeof(consumer->hWaveHeaders)/sizeof(consumer->hWaveHeaders[0]); i++){
+ write_wavehdr(consumer, i);
+ }
+
+ /* start thread */
+ consumer->started = tsk_true;
+ tsk_thread_create(&consumer->tid[0], __playback_thread, consumer);
+
+ return 0;
+}
+
+int tdav_consumer_waveapi_consume(tmedia_consumer_t* self, const void* buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+
+ if(!consumer || !buffer || !size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* buffer is already decoded */
+ return tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(consumer), buffer, size, proto_hdr);
+}
+
+int tdav_consumer_waveapi_pause(tmedia_consumer_t* self)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+int tdav_consumer_waveapi_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+ MMRESULT result;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!consumer->started){
+ TSK_DEBUG_WARN("Consumer not started");
+ return 0;
+ }
+
+ /* stop thread */
+ if(consumer->tid[0]){
+ SetEvent(consumer->events[1]);
+ tsk_thread_join(&(consumer->tid[0]));
+ }
+
+ /* should be done here */
+ consumer->started = tsk_false;
+
+ if(consumer->hWaveOut && ((result = waveOutReset(consumer->hWaveOut)) != MMSYSERR_NOERROR)){
+ print_last_error(result, "waveOutReset");
+ }
+
+ return 0;
+}
+
+
+//
+// WaveAPI consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_waveapi_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_waveapi_t *consumer = self;
+ if(consumer){
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(consumer));
+ /* init self */
+ InitializeCriticalSection(&consumer->cs);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_waveapi_dtor(tsk_object_t * self)
+{
+ tdav_consumer_waveapi_t *consumer = self;
+ if(consumer){
+ tsk_size_t i;
+
+ /* stop */
+ if(consumer->started){
+ tdav_consumer_waveapi_stop(self);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(consumer));
+ /* deinit self */
+ for(i = 0; i< sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ free_wavehdr(consumer, i);
+ }
+ if(consumer->hWaveOut){
+ waveOutClose(consumer->hWaveOut);
+ }
+ if(consumer->events[0]){
+ CloseHandle(consumer->events[0]);
+ }
+ if(consumer->events[1]){
+ CloseHandle(consumer->events[1]);
+ }
+ DeleteCriticalSection(&consumer->cs);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_waveapi_def_s =
+{
+ sizeof(tdav_consumer_waveapi_t),
+ tdav_consumer_waveapi_ctor,
+ tdav_consumer_waveapi_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_waveapi_plugin_def_s =
+{
+ &tdav_consumer_waveapi_def_s,
+
+ tmedia_audio,
+ "Microsoft WaveAPI consumer",
+
+ tdav_consumer_waveapi_set,
+ tdav_consumer_waveapi_prepare,
+ tdav_consumer_waveapi_start,
+ tdav_consumer_waveapi_consume,
+ tdav_consumer_waveapi_pause,
+ tdav_consumer_waveapi_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_waveapi_plugin_def_t = &tdav_consumer_waveapi_plugin_def_s;
+
+#endif /* HAVE_WAVE_API */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/waveapi/tdav_producer_waveapi.c b/tinyDAV/src/audio/waveapi/tdav_producer_waveapi.c
new file mode 100644
index 0000000..d077790
--- /dev/null
+++ b/tinyDAV/src/audio/waveapi/tdav_producer_waveapi.c
@@ -0,0 +1,393 @@
+/*
+* Copyright (C) 2010-2015 Mamadou DIOP.
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_producer_waveapi.c
+ * @brief Audio Producer for Win32 and WinCE platforms.
+ */
+#include "tinydav/audio/waveapi/tdav_producer_waveapi.h"
+
+#if HAVE_WAVE_API
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_WAVEAPI_PRODUCER_ERROR_BUFF_COUNT 0xFF
+
+#define tdav_producer_waveapi_set tsk_null
+
+static void print_last_error(MMRESULT mmrError, const char* func)
+{
+ static char buffer_err[TDAV_WAVEAPI_PRODUCER_ERROR_BUFF_COUNT];
+
+ waveInGetErrorTextA(mmrError, buffer_err, sizeof(buffer_err));
+ TSK_DEBUG_ERROR("%s() error: %s", func, buffer_err);
+}
+
+static int free_wavehdr(tdav_producer_waveapi_t* producer, tsk_size_t index)
+{
+ if(!producer || index >= sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_FREE(producer->hWaveHeaders[index]->lpData);
+ TSK_FREE(producer->hWaveHeaders[index]);
+
+ return 0;
+}
+
+static int create_wavehdr(tdav_producer_waveapi_t* producer, tsk_size_t index)
+{
+ if(!producer || index >= sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(producer->hWaveHeaders[index]){
+ free_wavehdr(producer, index);
+ }
+
+ producer->hWaveHeaders[index] = tsk_calloc(1, sizeof(WAVEHDR));
+ producer->hWaveHeaders[index]->lpData = tsk_calloc(1, producer->bytes_per_notif);
+ producer->hWaveHeaders[index]->dwBufferLength = (DWORD)producer->bytes_per_notif;
+ producer->hWaveHeaders[index]->dwFlags = WHDR_BEGINLOOP | WHDR_ENDLOOP;
+ producer->hWaveHeaders[index]->dwLoops = 0x01;
+ producer->hWaveHeaders[index]->dwUser = index;
+
+ return 0;
+}
+
+static int add_wavehdr(tdav_producer_waveapi_t* producer, tsk_size_t index)
+{
+ MMRESULT result;
+
+ if(!producer || !producer->hWaveHeaders[index] || !producer->hWaveIn){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ result = waveInPrepareHeader(producer->hWaveIn, producer->hWaveHeaders[index], sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInPrepareHeader");
+ return -2;
+ }
+
+ result = waveInAddBuffer(producer->hWaveIn, producer->hWaveHeaders[index], sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInAddBuffer");
+ return -3;
+ }
+
+ return 0;
+}
+
+static int record_wavehdr(tdav_producer_waveapi_t* producer, LPWAVEHDR lpHdr)
+{
+ MMRESULT result;
+
+ if(!producer || !lpHdr || !producer->hWaveIn){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ //
+ // Alert the session that there is new data to send over the network
+ //
+ if(TMEDIA_PRODUCER(producer)->enc_cb.callback){
+#if 0
+ {
+ static FILE* f = NULL;
+ if(!f) f = fopen("./waveapi_producer.raw", "w+");
+ fwrite(lpHdr->lpData, 1, lpHdr->dwBytesRecorded, f);
+ }
+#endif
+ TMEDIA_PRODUCER(producer)->enc_cb.callback(TMEDIA_PRODUCER(producer)->enc_cb.callback_data, lpHdr->lpData, lpHdr->dwBytesRecorded);
+ }
+
+ if(!producer->started){
+ return 0;
+ }
+
+ result = waveInUnprepareHeader(producer->hWaveIn, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInUnprepareHeader");
+ return -2;
+ }
+
+ result = waveInPrepareHeader(producer->hWaveIn, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInPrepareHeader");
+ return -3;
+ }
+
+ result = waveInAddBuffer(producer->hWaveIn, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInAddBuffer");
+ return -4;
+ }
+
+ return 0;
+}
+
+static void* TSK_STDCALL __record_thread(void *param)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)param;
+ DWORD dwEvent;
+ tsk_size_t i;
+
+ TSK_DEBUG_INFO("__record_thread -- START");
+
+ // SetPriorityClass(GetCurrentThread(), REALTIME_PRIORITY_CLASS);
+
+ for(;;){
+ dwEvent = WaitForMultipleObjects(2, producer->events, FALSE, INFINITE);
+
+ if (dwEvent == 1){
+ break;
+ }
+
+ else if (dwEvent == 0){
+ EnterCriticalSection(&producer->cs);
+ for(i = 0; i< sizeof(producer->hWaveHeaders)/sizeof(producer->hWaveHeaders[0]); i++){
+ if(producer->hWaveHeaders[i] && (producer->hWaveHeaders[i]->dwFlags & WHDR_DONE)){
+ record_wavehdr(producer, producer->hWaveHeaders[i]);
+ }
+ }
+ LeaveCriticalSection(&producer->cs);
+ }
+ }
+
+ TSK_DEBUG_INFO("__record_thread() -- STOP");
+
+
+ return tsk_null;
+}
+
+
+
+
+
+
+
+
+/* ============ Media Producer Interface ================= */
+int tdav_producer_waveapi_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)self;
+ tsk_size_t i;
+
+ if(!producer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TMEDIA_PRODUCER(producer)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
+ TMEDIA_PRODUCER(producer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
+ /* codec should have ptime */
+
+
+ /* Format */
+ ZeroMemory(&producer->wfx, sizeof(WAVEFORMATEX));
+ producer->wfx.wFormatTag = WAVE_FORMAT_PCM;
+ producer->wfx.nChannels = TMEDIA_PRODUCER(producer)->audio.channels;
+ producer->wfx.nSamplesPerSec = TMEDIA_PRODUCER(producer)->audio.rate;
+ producer->wfx.wBitsPerSample = TMEDIA_PRODUCER(producer)->audio.bits_per_sample;
+ producer->wfx.nBlockAlign = (producer->wfx.nChannels * producer->wfx.wBitsPerSample/8);
+ producer->wfx.nAvgBytesPerSec = (producer->wfx.nSamplesPerSec * producer->wfx.nBlockAlign);
+
+ /* Average bytes (count) for each notification */
+ producer->bytes_per_notif = ((producer->wfx.nAvgBytesPerSec * TMEDIA_PRODUCER(producer)->audio.ptime)/1000);
+
+ /* create buffers */
+ for(i = 0; i< sizeof(producer->hWaveHeaders)/sizeof(producer->hWaveHeaders[0]); i++){
+ create_wavehdr(producer, i);
+ }
+
+ return 0;
+}
+
+int tdav_producer_waveapi_start(tmedia_producer_t* self)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)self;
+ MMRESULT result;
+ tsk_size_t i;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(producer->started || producer->hWaveIn){
+ TSK_DEBUG_WARN("Producer already started");
+ return 0;
+ }
+
+ /* create events */
+ if(!producer->events[0]){
+ producer->events[0] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ }
+ if(!producer->events[1]){
+ producer->events[1] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ }
+
+ /* open */
+ result = waveInOpen((HWAVEIN *)&producer->hWaveIn, /*WAVE_MAPPER*/0, &producer->wfx, (DWORD)producer->events[0], (DWORD_PTR)producer, CALLBACK_EVENT);
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInOpen");
+ return -2;
+ }
+
+ /* start */
+ result = waveInStart(producer->hWaveIn);
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInStart");
+ return -2;
+ }
+
+ /* write */
+ for(i = 0; i< sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ add_wavehdr(producer, i);
+ }
+
+ /* start thread */
+ producer->started = tsk_true;
+ tsk_thread_create(&producer->tid[0], __record_thread, producer);
+
+ return 0;
+}
+
+int tdav_producer_waveapi_pause(tmedia_producer_t* self)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+int tdav_producer_waveapi_stop(tmedia_producer_t* self)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)self;
+ MMRESULT result;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!producer->started){
+ TSK_DEBUG_WARN("Producer not started");
+ return 0;
+ }
+
+ /* stop thread */
+ if(producer->tid[0]){
+ SetEvent(producer->events[1]);
+ tsk_thread_join(&(producer->tid[0]));
+ }
+
+ /* should be done here */
+ producer->started = tsk_false;
+
+ if(producer->hWaveIn && (((result = waveInReset(producer->hWaveIn)) != MMSYSERR_NOERROR) || ((result = waveInClose(producer->hWaveIn)) != MMSYSERR_NOERROR))){
+ print_last_error(result, "waveInReset/waveInClose");
+ }
+
+ return 0;
+}
+
+
+//
+// WaveAPI producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_waveapi_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_waveapi_t *producer = self;
+ if(producer){
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(producer));
+ /* init self */
+ InitializeCriticalSection(&producer->cs);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_waveapi_dtor(tsk_object_t * self)
+{
+ tdav_producer_waveapi_t *producer = self;
+ if(producer){
+ tsk_size_t i;
+
+ /* stop */
+ if(producer->started){
+ tdav_producer_waveapi_stop(self);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(producer));
+ /* deinit self */
+ for(i = 0; i< sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ free_wavehdr(producer, i);
+ }
+ if(producer->hWaveIn){
+ waveInClose(producer->hWaveIn);
+ }
+ if(producer->events[0]){
+ CloseHandle(producer->events[0]);
+ }
+ if(producer->events[1]){
+ CloseHandle(producer->events[1]);
+ }
+ DeleteCriticalSection(&producer->cs);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_waveapi_def_s =
+{
+ sizeof(tdav_producer_waveapi_t),
+ tdav_producer_waveapi_ctor,
+ tdav_producer_waveapi_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_waveapi_plugin_def_s =
+{
+ &tdav_producer_waveapi_def_s,
+
+ tmedia_audio,
+ "Microsoft WaveAPI producer",
+
+ tdav_producer_waveapi_set,
+ tdav_producer_waveapi_prepare,
+ tdav_producer_waveapi_start,
+ tdav_producer_waveapi_pause,
+ tdav_producer_waveapi_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_waveapi_plugin_def_t = &tdav_producer_waveapi_plugin_def_s;
+
+#endif /* HAVE_WAVE_API */ \ No newline at end of file
OpenPOWER on IntegriCloud