summaryrefslogtreecommitdiffstats
path: root/tinyDAV/src/audio
diff options
context:
space:
mode:
authorbossiel <bossiel@yahoo.fr>2011-08-10 22:59:15 +0000
committerbossiel <bossiel@yahoo.fr>2011-08-10 22:59:15 +0000
commit1ebf5a5fcda0c9154e22ed02404fd46525a7fd9f (patch)
tree4b6214a7142ab1035cb0e47444e88af38e712421 /tinyDAV/src/audio
downloaddoubango-1.0.zip
doubango-1.0.tar.gz
Move deprecated v1.0 from trunk to branches1.0
Diffstat (limited to 'tinyDAV/src/audio')
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_consumer_coreaudio.c269
-rw-r--r--tinyDAV/src/audio/coreaudio/tdav_producer_coreaudio.c229
-rw-r--r--tinyDAV/src/audio/directsound/tdav_consumer_dsound.c377
-rw-r--r--tinyDAV/src/audio/directsound/tdav_producer_dsound.c320
-rw-r--r--tinyDAV/src/audio/tdav_consumer_audio.c311
-rw-r--r--tinyDAV/src/audio/tdav_jitterbuffer.c1034
-rw-r--r--tinyDAV/src/audio/tdav_producer_audio.c95
-rw-r--r--tinyDAV/src/audio/tdav_session_audio.c892
-rw-r--r--tinyDAV/src/audio/tdav_speex_denoise.c221
-rw-r--r--tinyDAV/src/audio/waveapi/tdav_consumer_waveapi.c406
-rw-r--r--tinyDAV/src/audio/waveapi/tdav_producer_waveapi.c393
11 files changed, 4547 insertions, 0 deletions
diff --git a/tinyDAV/src/audio/coreaudio/tdav_consumer_coreaudio.c b/tinyDAV/src/audio/coreaudio/tdav_consumer_coreaudio.c
new file mode 100644
index 0000000..39fe1a9
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_consumer_coreaudio.c
@@ -0,0 +1,269 @@
+/*
+* Copyright (C) 2009-2010 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_consumer_coreaudio.c
+ * @brief Audio Consumer for MacOSX and iOS platforms.
+ *
+ * @authors
+ * - Laurent Etiemble <laurent.etiemble(at)gmail.com>
+ * - Mamadou Diop <diopmamadou(at)doubango(dot)org>
+ *
+ * @date Created: Sat Nov 8 16:54:58 2009 letiemble
+ */
+#include "tinydav/audio/coreaudio/tdav_consumer_coreaudio.h"
+
+
+// http://developer.apple.com/library/mac/#documentation/MusicAudio/Reference/AudioQueueReference/Reference/reference.html
+#if HAVE_COREAUDIO
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+static void __handle_output_buffer(void *userdata, AudioQueueRef queue, AudioQueueBufferRef buffer) {
+ OSStatus ret;
+ void *data;
+ tsk_size_t out_size = 0;
+ tdav_consumer_coreaudio_t* consumer = (tdav_consumer_coreaudio_t*)userdata;
+
+ if (!consumer->started) {
+ return;
+ }
+
+ if((data = tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(consumer), &out_size))){
+ // If we can get audio to play, then copy in the buffer
+ memcpy(buffer->mAudioData, data, TSK_MIN(consumer->buffer_size, out_size));
+ TSK_FREE(data);
+ } else{
+ // Put silence if there is no audio to play
+ memset(buffer->mAudioData, 0, consumer->buffer_size);
+ }
+
+ // Re-enqueue the buffer
+ ret = AudioQueueEnqueueBuffer(consumer->queue, buffer, 0, NULL);
+}
+
+/* ============ Media Consumer Interface ================= */
+#define tdav_consumer_coreaudio_set tsk_null
+
+int tdav_consumer_coreaudio_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ OSStatus ret;
+ tsk_size_t i;
+ tdav_consumer_coreaudio_t* consumer = (tdav_consumer_coreaudio_t*)self;
+
+ if(!consumer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TDAV_CONSUMER_AUDIO(consumer)->channels = codec->plugin->audio.channels;
+ TDAV_CONSUMER_AUDIO(consumer)->rate = codec->plugin->rate;
+ /* codec should have ptime */
+
+ // Set audio category
+ UInt32 category = kAudioSessionCategory_PlayAndRecord;
+ AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
+
+ // Create the audio stream description
+ AudioStreamBasicDescription *description = &(consumer->description);
+ description->mSampleRate = TDAV_CONSUMER_AUDIO(consumer)->rate;
+ description->mFormatID = kAudioFormatLinearPCM;
+ description->mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
+ description->mChannelsPerFrame = TDAV_CONSUMER_AUDIO(consumer)->channels;
+ description->mFramesPerPacket = 1;
+ description->mBitsPerChannel = TDAV_CONSUMER_AUDIO(consumer)->bits_per_sample;
+ description->mBytesPerPacket = description->mBitsPerChannel / 8 * description->mChannelsPerFrame;
+ description->mBytesPerFrame = description->mBytesPerPacket;
+ description->mReserved = 0;
+
+ int packetperbuffer = 1000 / TDAV_CONSUMER_AUDIO(consumer)->ptime;
+ consumer->buffer_size = description->mSampleRate * description->mBytesPerFrame / packetperbuffer;
+
+ // Create the playback audio queue
+ ret = AudioQueueNewOutput(&(consumer->description),
+ __handle_output_buffer,
+ consumer,
+ NULL,
+ NULL,
+ 0,
+ &(consumer->queue));
+
+ for(i = 0; i < CoreAudioPlayBuffers; i++) {
+ // Create the buffer for the queue
+ ret = AudioQueueAllocateBuffer(consumer->queue, consumer->buffer_size, &(consumer->buffers[i]));
+ if (ret) {
+ break;
+ }
+
+ // Clear the data
+ memset(consumer->buffers[i]->mAudioData, 0, consumer->buffer_size);
+ consumer->buffers[i]->mAudioDataByteSize = consumer->buffer_size;
+
+ // Enqueue the buffer
+ ret = AudioQueueEnqueueBuffer(consumer->queue, consumer->buffers[i], 0, NULL);
+ if (ret) {
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int tdav_consumer_coreaudio_start(tmedia_consumer_t* self)
+{
+ OSStatus ret;
+ tdav_consumer_coreaudio_t* consumer = (tdav_consumer_coreaudio_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(consumer->started){
+ TSK_DEBUG_WARN("Consumer already started");
+ return 0;
+ }
+
+ consumer->started = tsk_true;
+ ret = AudioQueueStart(consumer->queue, NULL);
+
+ return ret;
+}
+
+int tdav_consumer_coreaudio_consume(tmedia_consumer_t* self, void** buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_coreaudio_t* consumer = (tdav_consumer_coreaudio_t*)self;
+
+ if(!consumer || !buffer || !*buffer || !size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ // buffer is already decoded
+ return tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(consumer), buffer, size, proto_hdr);
+}
+
+int tdav_consumer_coreaudio_pause(tmedia_consumer_t* self)
+{
+ OSStatus ret;
+ tdav_consumer_coreaudio_t* consumer = (tdav_consumer_coreaudio_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ ret = AudioQueuePause(consumer->queue);
+
+ return ret;
+}
+
+int tdav_consumer_coreaudio_stop(tmedia_consumer_t* self)
+{
+ OSStatus ret;
+ tdav_consumer_coreaudio_t* consumer = (tdav_consumer_coreaudio_t*)self;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!consumer->started){
+ TSK_DEBUG_WARN("Consumer not started");
+ return 0;
+ }
+
+ consumer->started = tsk_false;
+ ret = AudioQueueStop(consumer->queue, false);
+
+ return ret;
+}
+
+//
+// coreaudio consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_coreaudio_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_coreaudio_t *consumer = self;
+ if(consumer){
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(consumer));
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_coreaudio_dtor(tsk_object_t * self)
+{
+ tdav_consumer_coreaudio_t *consumer = self;
+ if(consumer){
+ // Stop the consumer if not done
+ if(consumer->started){
+ tdav_consumer_coreaudio_stop(self);
+ }
+
+ // Free all buffers and dispose the queue
+ if (consumer->queue) {
+ tsk_size_t i;
+
+ for(i=0; i<CoreAudioPlayBuffers; i++){
+ AudioQueueFreeBuffer(consumer->queue, consumer->buffers[i]);
+ }
+
+ AudioQueueDispose(consumer->queue, true);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(consumer));
+ }
+
+ return self;
+}
+
+/* object definition */
+static const tsk_object_def_t tdav_consumer_coreaudio_def_s =
+{
+ sizeof(tdav_consumer_coreaudio_t),
+ tdav_consumer_coreaudio_ctor,
+ tdav_consumer_coreaudio_dtor,
+ tdav_consumer_audio_cmp,
+};
+
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_coreaudio_plugin_def_s =
+{
+ &tdav_consumer_coreaudio_def_s,
+
+ tmedia_audio,
+ "Apple CoreAudio consumer",
+
+ tdav_consumer_coreaudio_set,
+ tdav_consumer_coreaudio_prepare,
+ tdav_consumer_coreaudio_start,
+ tdav_consumer_coreaudio_consume,
+ tdav_consumer_coreaudio_pause,
+ tdav_consumer_coreaudio_stop
+};
+
+const tmedia_consumer_plugin_def_t *tdav_consumer_coreaudio_plugin_def_t = &tdav_consumer_coreaudio_plugin_def_s;
+
+#endif /* HAVE_COREAUDIO */
diff --git a/tinyDAV/src/audio/coreaudio/tdav_producer_coreaudio.c b/tinyDAV/src/audio/coreaudio/tdav_producer_coreaudio.c
new file mode 100644
index 0000000..e0cc3e8
--- /dev/null
+++ b/tinyDAV/src/audio/coreaudio/tdav_producer_coreaudio.c
@@ -0,0 +1,229 @@
+/**@file tdav_producer_coreaudio.c
+ * @brief Audio Producer for MacOSX and iOS platforms.
+ *
+ * @authors
+ * - Laurent Etiemble <laurent.etiemble(at)gmail.com>
+ * - Mamadou Diop <diopmamadou(at)doubango(dot)org>
+ *
+ * @date Created: Sat Nov 8 16:54:58 2009 letiemble
+ */
+#include "tinydav/audio/coreaudio/tdav_producer_coreaudio.h"
+
+
+// http://developer.apple.com/library/mac/#documentation/MusicAudio/Reference/AudioQueueReference/Reference/reference.html
+
+#if HAVE_COREAUDIO
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+void __handle_input_buffer (void *userdata, AudioQueueRef queue, AudioQueueBufferRef buffer, const AudioTimeStamp *start_time, UInt32 number_packet_descriptions, const AudioStreamPacketDescription *packet_descriptions ) {
+ OSStatus ret;
+ tdav_producer_coreaudio_t* producer = (tdav_producer_coreaudio_t*)userdata;
+
+ if (!producer->started) {
+ return;
+ }
+
+ // Alert the session that there is new data to send
+ if(TMEDIA_PRODUCER(producer)->enc_cb.callback) {
+ TMEDIA_PRODUCER(producer)->enc_cb.callback(TMEDIA_PRODUCER(producer)->enc_cb.callback_data, buffer->mAudioData, buffer->mAudioDataByteSize);
+ }
+
+ // Re-enqueue the buffer
+ ret = AudioQueueEnqueueBuffer(producer->queue, buffer, 0, NULL);
+}
+
+/* ============ Media Producer Interface ================= */
+#define tdav_producer_coreaudio_set tsk_null
+
+int tdav_producer_coreaudio_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ OSStatus ret;
+ tsk_size_t i;
+ tdav_producer_coreaudio_t* producer = (tdav_producer_coreaudio_t*)self;
+
+ if(!producer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TDAV_PRODUCER_AUDIO(producer)->channels = codec->plugin->audio.channels;
+ TDAV_PRODUCER_AUDIO(producer)->rate = codec->plugin->rate;
+ /* codec should have ptime */
+
+
+ // Set audio category
+ UInt32 category = kAudioSessionCategory_PlayAndRecord;
+ AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
+
+ // Create the audio stream description
+ AudioStreamBasicDescription *description = &(producer->description);
+ description->mSampleRate = TDAV_PRODUCER_AUDIO(producer)->rate;
+ description->mFormatID = kAudioFormatLinearPCM;
+ description->mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
+ description->mChannelsPerFrame = TDAV_PRODUCER_AUDIO(producer)->channels;
+ description->mFramesPerPacket = 1;
+ description->mBitsPerChannel = TDAV_PRODUCER_AUDIO(producer)->bits_per_sample;
+ description->mBytesPerPacket = description->mBitsPerChannel / 8 * description->mChannelsPerFrame;
+ description->mBytesPerFrame = description->mBytesPerPacket;
+ description->mReserved = 0;
+
+ int packetperbuffer = 1000 / TDAV_PRODUCER_AUDIO(producer)->ptime;
+ producer->buffer_size = description->mSampleRate * description->mBytesPerFrame / packetperbuffer;
+
+ // Create the record audio queue
+ ret = AudioQueueNewInput(&(producer->description),
+ __handle_input_buffer,
+ producer,
+ NULL,
+ kCFRunLoopCommonModes,
+ 0,
+ &(producer->queue));
+
+ for(i = 0; i < CoreAudioRecordBuffers; i++) {
+ // Create the buffer for the queue
+ ret = AudioQueueAllocateBuffer(producer->queue, producer->buffer_size, &(producer->buffers[i]));
+ if (ret) {
+ break;
+ }
+
+ // Clear the data
+ memset(producer->buffers[i]->mAudioData, 0, producer->buffer_size);
+ producer->buffers[i]->mAudioDataByteSize = producer->buffer_size;
+
+ // Enqueue the buffer
+ ret = AudioQueueEnqueueBuffer(producer->queue, producer->buffers[i], 0, NULL);
+ if (ret) {
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int tdav_producer_coreaudio_start(tmedia_producer_t* self)
+{
+ OSStatus ret;
+ tdav_producer_coreaudio_t* producer = (tdav_producer_coreaudio_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(producer->started){
+ TSK_DEBUG_WARN("Producer already started");
+ return 0;
+ }
+
+ producer->started = tsk_true;
+ ret = AudioQueueStart(producer->queue, NULL);
+
+ return ret;
+}
+
+int tdav_producer_coreaudio_pause(tmedia_producer_t* self)
+{
+ OSStatus ret;
+ tdav_producer_coreaudio_t* producer = (tdav_producer_coreaudio_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ ret = AudioQueuePause(producer->queue);
+
+ return ret;
+}
+
+int tdav_producer_coreaudio_stop(tmedia_producer_t* self)
+{
+ OSStatus ret;
+ tdav_producer_coreaudio_t* producer = (tdav_producer_coreaudio_t*)self;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!producer->started){
+ TSK_DEBUG_WARN("Producer not started");
+ return 0;
+ }
+
+ producer->started = tsk_false;
+ ret = AudioQueueStop(producer->queue, false);
+
+ return ret;
+}
+
+
+//
+// CoreAudio producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_coreaudio_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_coreaudio_t *producer = self;
+ if(producer){
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(producer));
+ /* init self */
+ // TODO
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_coreaudio_dtor(tsk_object_t * self)
+{
+ tdav_producer_coreaudio_t *producer = self;
+ if(producer){
+ // Stop the producer if not done
+ if(producer->started){
+ tdav_producer_coreaudio_stop(self);
+ }
+
+ // Free all buffers and dispose the queue
+ if (producer->queue) {
+ tsk_size_t i;
+
+ for(i=0; i<CoreAudioRecordBuffers; i++){
+ AudioQueueFreeBuffer(producer->queue, producer->buffers[i]);
+ }
+ AudioQueueDispose(producer->queue, true);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(producer));
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_coreaudio_def_s =
+{
+ sizeof(tdav_producer_coreaudio_t),
+ tdav_producer_coreaudio_ctor,
+ tdav_producer_coreaudio_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_coreaudio_plugin_def_s =
+{
+ &tdav_producer_coreaudio_def_s,
+
+ tmedia_audio,
+ "Apple CoreAudio producer",
+
+ tdav_producer_coreaudio_set,
+ tdav_producer_coreaudio_prepare,
+ tdav_producer_coreaudio_start,
+ tdav_producer_coreaudio_pause,
+ tdav_producer_coreaudio_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_coreaudio_plugin_def_t = &tdav_producer_coreaudio_plugin_def_s;
+
+#endif /* HAVE_COREAUDIO */
diff --git a/tinyDAV/src/audio/directsound/tdav_consumer_dsound.c b/tinyDAV/src/audio/directsound/tdav_consumer_dsound.c
new file mode 100644
index 0000000..dd8d6ed
--- /dev/null
+++ b/tinyDAV/src/audio/directsound/tdav_consumer_dsound.c
@@ -0,0 +1,377 @@
+/*
+* Copyright (C) 2009-2010 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_consumer_dsound.c
+ * @brief Microsoft DirectSound consumer.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * @date Created: Sat Nov 8 16:54:58 2009 mdiop
+ */
+#include "tinydav/audio/directsound/tdav_consumer_dsound.h"
+
+#if HAVE_DSOUND_H
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "dsound.lib")
+# pragma comment(lib, "dxguid.lib")
+#endif
+
+#include "tinydav/tdav_win32.h"
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <initguid.h>
+
+#define tdav_consumer_dsound_set tsk_null
+
+static void *__playback_thread(void *param)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)param;
+
+ HRESULT hr;
+ LPVOID lpvAudio1, lpvAudio2;
+ DWORD dwBytesAudio1, dwBytesAudio2;
+
+ void* data;
+ int index;
+
+ TSK_DEBUG_INFO("__playback_thread -- START");
+
+ SetPriorityClass(GetCurrentThread(), REALTIME_PRIORITY_CLASS);
+
+ for(;;){
+ DWORD dwEvent = WaitForMultipleObjects(sizeof(dsound->notifEvents)/sizeof(HANDLE), dsound->notifEvents, FALSE, INFINITE);
+
+ if(!dsound->started){
+ break;
+ }
+ else {
+ tsk_size_t out_size = 0;
+ data = tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(dsound), &out_size);
+ index = (dwEvent == (TDAV_DSOUNS_CONSUMER_NOTIF_POS_COUNT-1)) ? 0 : (dwEvent + 1);
+
+ // lock
+ if((hr = IDirectSoundBuffer_Lock(dsound->secondaryBuffer, (index * dsound->bytes_per_notif), dsound->bytes_per_notif, &lpvAudio1, &dwBytesAudio1, &lpvAudio2, &dwBytesAudio2, 0)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_Lock", hr);
+ goto next;
+ }
+
+ if(data){
+ // copy data to dsound buffers
+ memcpy(lpvAudio1, data, TSK_MIN(dwBytesAudio1, out_size));
+ if(lpvAudio2){
+ memcpy(lpvAudio2, ((LPBYTE*)data) + dwBytesAudio1, dwBytesAudio2);
+ }
+ }
+ else{
+ // Put silence
+ memset(lpvAudio1, 0, dwBytesAudio1);
+ if(lpvAudio2){
+ memset(lpvAudio2, 0, dwBytesAudio2);
+ }
+ }
+
+ // unlock
+ if((hr = IDirectSoundBuffer_Unlock(dsound->secondaryBuffer, lpvAudio1, dwBytesAudio1, lpvAudio2, dwBytesAudio2)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_UnLock", hr);
+ goto next;
+ }
+next:
+ TSK_FREE(data);
+ }
+ }
+
+ TSK_DEBUG_INFO("__playback_thread -- STOP");
+
+
+ return tsk_null;
+}
+
+
+
+
+/* ============ Media Consumer Interface ================= */
+int tdav_consumer_dsound_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ HRESULT hr;
+ HWND hWnd;
+
+ WAVEFORMATEX wfx = {0};
+ DSBUFFERDESC dsbd = {0};
+
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+
+ if(!dsound){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(dsound->device || dsound->primaryBuffer || dsound->secondaryBuffer){
+ TSK_DEBUG_ERROR("Consumer already prepared");
+ return -2;
+ }
+
+ TDAV_CONSUMER_AUDIO(dsound)->channels = codec->plugin->audio.channels;
+ TDAV_CONSUMER_AUDIO(dsound)->rate = codec->plugin->rate;
+
+ /* Create sound device */
+ if((hr = DirectSoundCreate(NULL, &dsound->device, NULL) != DS_OK)){
+ tdav_win32_print_error("DirectSoundCreate", hr);
+ return -3;
+ }
+
+ /* Set CooperativeLevel */
+ if((hWnd = GetConsoleWindow()) || (hWnd = GetDesktopWindow()) || (hWnd = GetForegroundWindow())){
+ if((hr = IDirectSound_SetCooperativeLevel(dsound->device, hWnd, DSSCL_PRIORITY)) != DS_OK){
+ tdav_win32_print_error("IDirectSound_SetCooperativeLevel", hr);
+ }
+ }
+
+ /* Creates the primary buffer and apply format */
+ wfx.wFormatTag = WAVE_FORMAT_PCM;
+ wfx.nChannels = TDAV_CONSUMER_AUDIO(dsound)->channels;
+ wfx.nSamplesPerSec = TDAV_CONSUMER_AUDIO(dsound)->rate;
+ wfx.wBitsPerSample = TDAV_CONSUMER_AUDIO(dsound)->bits_per_sample;
+ wfx.nBlockAlign = (wfx.nChannels * wfx.wBitsPerSample/8);
+ wfx.nAvgBytesPerSec = (wfx.nSamplesPerSec * wfx.nBlockAlign);
+
+ /* Average bytes (count) for each notification */
+ dsound->bytes_per_notif = ((wfx.nAvgBytesPerSec * TDAV_CONSUMER_AUDIO(dsound)->ptime)/1000);
+
+ dsbd.dwSize = sizeof(DSBUFFERDESC);
+ dsbd.dwFlags = DSBCAPS_PRIMARYBUFFER;
+ dsbd.dwBufferBytes = 0;
+ dsbd.lpwfxFormat = NULL;
+
+ if((hr = IDirectSound_CreateSoundBuffer(dsound->device, &dsbd, &dsound->primaryBuffer, NULL)) != DS_OK){
+ tdav_win32_print_error("IDirectSound_CreateSoundBuffer", hr);
+ return -4;
+ }
+ if((hr = IDirectSoundBuffer_SetFormat(dsound->primaryBuffer, &wfx)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_SetFormat", hr);
+ return -5;
+ }
+
+ /* Creates the secondary buffer and apply format */
+ dsbd.dwFlags = (DSBCAPS_CTRLPOSITIONNOTIFY | DSBCAPS_GLOBALFOCUS);
+ dsbd.dwBufferBytes = (TDAV_DSOUNS_CONSUMER_NOTIF_POS_COUNT * dsound->bytes_per_notif);
+ dsbd.lpwfxFormat = &wfx;
+
+ if((hr = IDirectSound_CreateSoundBuffer(dsound->device, &dsbd, &dsound->secondaryBuffer, NULL)) != DS_OK){
+ tdav_win32_print_error("IDirectSound_CreateSoundBuffer", hr);
+ return -6;
+ }
+
+
+ return 0;
+}
+
+int tdav_consumer_dsound_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+
+ tsk_size_t i;
+ HRESULT hr;
+ LPDIRECTSOUNDNOTIFY lpDSBNotify;
+ DSBPOSITIONNOTIFY pPosNotify[TDAV_DSOUNS_CONSUMER_NOTIF_POS_COUNT] = {0};
+
+ if(!dsound){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!dsound->device || !dsound->primaryBuffer || !dsound->secondaryBuffer){
+ TSK_DEBUG_ERROR("Consumer not prepared");
+ return -2;
+ }
+
+ if(dsound->started){
+ TSK_DEBUG_WARN("Consumer already started");
+ return 0;
+ }
+
+ if((hr = IDirectSoundBuffer_QueryInterface(dsound->secondaryBuffer, &IID_IDirectSoundNotify, (LPVOID*)&lpDSBNotify)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_QueryInterface", hr);
+ return -3;
+ }
+
+ /* Events associated to notification points */
+ for(i = 0; i<sizeof(dsound->notifEvents)/sizeof(HANDLE); i++){
+ dsound->notifEvents[i] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ pPosNotify[i].dwOffset = ((dsound->bytes_per_notif * i) + dsound->bytes_per_notif) - 1;
+ pPosNotify[i].hEventNotify = dsound->notifEvents[i];
+ }
+ if((hr = IDirectSoundNotify_SetNotificationPositions(lpDSBNotify, TDAV_DSOUNS_CONSUMER_NOTIF_POS_COUNT, pPosNotify)) != DS_OK){
+ IDirectSoundNotify_Release(lpDSBNotify);
+ tdav_win32_print_error("IDirectSoundBuffer_QueryInterface", hr);
+ return -4;
+ }
+
+ if((hr = IDirectSoundNotify_Release(lpDSBNotify))){
+ tdav_win32_print_error("IDirectSoundNotify_Release", hr);
+ }
+
+ /* start the reader thread */
+ tsk_thread_create(&dsound->tid[0], __playback_thread, dsound);
+
+ /* Start the buffer */
+ if((hr = IDirectSoundBuffer_Play(dsound->secondaryBuffer,0, 0, DSBPLAY_LOOPING)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundNotify_Release", hr);
+ return -5;
+ }
+
+ dsound->started = tsk_true;
+
+ return 0;
+}
+
+int tdav_consumer_dsound_consume(tmedia_consumer_t* self, void** buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+
+ if(!dsound || !buffer || !*buffer || !size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* buffer is already decoded */
+ return tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(dsound), buffer, size, proto_hdr);
+}
+
+int tdav_consumer_dsound_pause(tmedia_consumer_t* self)
+{
+ return 0;
+}
+
+int tdav_consumer_dsound_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_dsound_t* dsound = (tdav_consumer_dsound_t*)self;
+
+ HRESULT hr;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!dsound->started){
+ TSK_DEBUG_WARN("Consumer not started");
+ return 0;
+ }
+
+ /* should be done here */
+ dsound->started = tsk_false;
+
+ /* stop thread */
+ if(dsound->tid[0]){
+ tsk_thread_join(&(dsound->tid[0]));
+ }
+
+ if((hr = IDirectSoundBuffer_Stop(dsound->secondaryBuffer)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_Stop", hr);
+ }
+ if((hr = IDirectSoundBuffer_SetCurrentPosition(dsound->secondaryBuffer, 0)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundBuffer_SetCurrentPosition", hr);
+ }
+
+ return 0;
+}
+
+
+//
+// WaveAPI consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_dsound_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_dsound_t *consumer = self;
+ if(consumer){
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(consumer));
+ /* init self */
+
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_dsound_dtor(tsk_object_t * self)
+{
+ tdav_consumer_dsound_t *dsound = self;
+ if(dsound){
+ tsk_size_t i;
+
+ /* stop */
+ if(dsound->started){
+ tdav_consumer_dsound_stop(self);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(dsound));
+ /* deinit self */
+ // Delete secondary buffer
+ if(dsound->primaryBuffer){
+ IDirectSoundBuffer_Release(dsound->primaryBuffer);
+ }
+ if(dsound->secondaryBuffer){
+ IDirectSoundBuffer_Release(dsound->secondaryBuffer);
+ }
+ if(dsound->device){
+ IDirectSound_Release(dsound->device);
+ }
+ for(i = 0; i<sizeof(dsound->notifEvents)/sizeof(HANDLE); i++){
+ if(dsound->notifEvents[i]){
+ CloseHandle(dsound->notifEvents[i]);
+ }
+ }
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_dsound_def_s =
+{
+ sizeof(tdav_consumer_dsound_t),
+ tdav_consumer_dsound_ctor,
+ tdav_consumer_dsound_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_dsound_plugin_def_s =
+{
+ &tdav_consumer_dsound_def_s,
+
+ tmedia_audio,
+ "Microsoft DirectSound consumer",
+
+ tdav_consumer_dsound_set,
+ tdav_consumer_dsound_prepare,
+ tdav_consumer_dsound_start,
+ tdav_consumer_dsound_consume,
+ tdav_consumer_dsound_pause,
+ tdav_consumer_dsound_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_dsound_plugin_def_t = &tdav_consumer_dsound_plugin_def_s;
+
+
+#endif /* HAVE_DSOUND_H */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/directsound/tdav_producer_dsound.c b/tinyDAV/src/audio/directsound/tdav_producer_dsound.c
new file mode 100644
index 0000000..8e80ec6
--- /dev/null
+++ b/tinyDAV/src/audio/directsound/tdav_producer_dsound.c
@@ -0,0 +1,320 @@
+/*
+* Copyright (C) 2009-2010 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_producer_dsound.c
+ * @brief Microsoft DirectSound producer.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * @date Created: Sat Nov 8 16:54:58 2009 mdiop
+ */
+#include "tinydav/audio/directsound/tdav_producer_dsound.h"
+
+#if HAVE_DSOUND_H
+
+#if defined(_MSC_VER)
+# pragma comment(lib, "dsound.lib")
+# pragma comment(lib, "dxguid.lib")
+#endif
+
+#include "tinydav/tdav_win32.h"
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#include <initguid.h>
+
+#define tdav_producer_dsound_set tsk_null
+
+static void *__playback_thread(void *param)
+{
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)param;
+
+ HRESULT hr;
+ LPVOID lpvAudio1, lpvAudio2;
+ DWORD dwBytesAudio1, dwBytesAudio2;
+
+ TSK_DEBUG_INFO("__record_thread -- START");
+
+ SetPriorityClass(GetCurrentThread(), REALTIME_PRIORITY_CLASS);
+
+ for(;;){
+ DWORD dwEvent = WaitForMultipleObjects(sizeof(dsound->notifEvents)/sizeof(HANDLE), dsound->notifEvents, FALSE, INFINITE);
+
+ if(!dsound->started){
+ break;
+ }
+ else {
+ // lock
+ if((hr = IDirectSoundCaptureBuffer_Lock(dsound->captureBuffer, (dwEvent * dsound->bytes_per_notif), dsound->bytes_per_notif, &lpvAudio1, &dwBytesAudio1, &lpvAudio2, &dwBytesAudio2, 0)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_Lock", hr);
+ goto next;
+ }
+
+ if(TMEDIA_PRODUCER(dsound)->enc_cb.callback){
+ if(lpvAudio2){
+ TMEDIA_PRODUCER(dsound)->enc_cb.callback(TMEDIA_PRODUCER(dsound)->enc_cb.callback_data, lpvAudio1, dwBytesAudio1);
+ TMEDIA_PRODUCER(dsound)->enc_cb.callback(TMEDIA_PRODUCER(dsound)->enc_cb.callback_data, lpvAudio2, dwBytesAudio2);
+ }
+ else{
+ TMEDIA_PRODUCER(dsound)->enc_cb.callback(TMEDIA_PRODUCER(dsound)->enc_cb.callback_data, lpvAudio1, dwBytesAudio1);
+ }
+ }
+
+ // unlock
+ if((hr = IDirectSoundCaptureBuffer_Unlock(dsound->captureBuffer, lpvAudio1, dwBytesAudio1, lpvAudio2, dwBytesAudio2)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_Unlock", hr);
+ goto next;
+ }
+next:;
+ }
+ }
+
+ TSK_DEBUG_INFO("__record_thread -- STOP");
+
+
+ return tsk_null;
+}
+
+
+
+
+/* ============ Media Producer Interface ================= */
+int tdav_producer_dsound_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ HRESULT hr;
+
+ WAVEFORMATEX wfx = {0};
+ DSCBUFFERDESC dsbd = {0};
+
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)self;
+
+ if(!dsound || !codec){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(dsound->device || dsound->captureBuffer){
+ TSK_DEBUG_ERROR("Producer already prepared");
+ return -2;
+ }
+
+ TDAV_PRODUCER_AUDIO(dsound)->channels = codec->plugin->audio.channels;
+ TDAV_PRODUCER_AUDIO(dsound)->rate = codec->plugin->rate;
+
+ /* Create capture device */
+ if((hr = DirectSoundCaptureCreate(NULL, &dsound->device, NULL) != DS_OK)){
+ tdav_win32_print_error("DirectSoundCaptureCreate", hr);
+ return -3;
+ }
+
+ /* Creates the capture buffer */
+ wfx.wFormatTag = WAVE_FORMAT_PCM;
+ wfx.nChannels = TDAV_PRODUCER_AUDIO(dsound)->channels;
+ wfx.nSamplesPerSec = TDAV_PRODUCER_AUDIO(dsound)->rate;
+ wfx.wBitsPerSample = TDAV_PRODUCER_AUDIO(dsound)->bits_per_sample;
+ wfx.nBlockAlign = (wfx.nChannels * wfx.wBitsPerSample/8);
+ wfx.nAvgBytesPerSec = (wfx.nSamplesPerSec * wfx.nBlockAlign);
+
+ /* Average bytes (count) for each notification */
+ dsound->bytes_per_notif = ((wfx.nAvgBytesPerSec * TDAV_PRODUCER_AUDIO(dsound)->ptime)/1000);
+
+ dsbd.dwSize = sizeof(DSCBUFFERDESC);
+ dsbd.dwBufferBytes = (TDAV_DSOUNS_PRODUCER_NOTIF_POS_COUNT * dsound->bytes_per_notif);
+ dsbd.lpwfxFormat = &wfx;
+
+ if((hr = IDirectSoundCapture_CreateCaptureBuffer(dsound->device, &dsbd, &dsound->captureBuffer, NULL)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundCapture_CreateCaptureBuffer", hr);
+ return -4;
+ }
+
+ return 0;
+}
+
+int tdav_producer_dsound_start(tmedia_producer_t* self)
+{
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)self;
+
+ tsk_size_t i;
+ HRESULT hr;
+ LPDIRECTSOUNDNOTIFY lpDSBNotify;
+ DSBPOSITIONNOTIFY pPosNotify[TDAV_DSOUNS_PRODUCER_NOTIF_POS_COUNT] = {0};
+
+ if(!dsound){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!dsound->device || !dsound->captureBuffer){
+ TSK_DEBUG_ERROR("Producer not prepared");
+ return -2;
+ }
+
+ if(dsound->started){
+ TSK_DEBUG_WARN("Producer already started");
+ return 0;
+ }
+
+ if((hr = IDirectSoundCaptureBuffer_QueryInterface(dsound->captureBuffer, &IID_IDirectSoundNotify, (LPVOID*)&lpDSBNotify)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_QueryInterface", hr);
+ return -3;
+ }
+
+ /* Events associated to notification points */
+ for(i = 0; i<sizeof(dsound->notifEvents)/sizeof(HANDLE); i++){
+ dsound->notifEvents[i] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ pPosNotify[i].dwOffset = ((dsound->bytes_per_notif * i) + dsound->bytes_per_notif) - 1;
+ pPosNotify[i].hEventNotify = dsound->notifEvents[i];
+ }
+ if((hr = IDirectSoundNotify_SetNotificationPositions(lpDSBNotify, TDAV_DSOUNS_PRODUCER_NOTIF_POS_COUNT, pPosNotify)) != DS_OK){
+ IDirectSoundNotify_Release(lpDSBNotify);
+ tdav_win32_print_error("IDirectSoundBuffer_QueryInterface", hr);
+ return -4;
+ }
+
+ if((hr = IDirectSoundNotify_Release(lpDSBNotify))){
+ tdav_win32_print_error("IDirectSoundNotify_Release", hr);
+ }
+
+ /* start the reader thread */
+ tsk_thread_create(&dsound->tid[0], __playback_thread, dsound);
+
+ /* Start the buffer */
+ if((hr = IDirectSoundCaptureBuffer_Start(dsound->captureBuffer, DSBPLAY_LOOPING)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_Start", hr);
+ return -5;
+ }
+
+ dsound->started = tsk_true;
+
+ return 0;
+}
+
+int tdav_producer_dsound_pause(tmedia_producer_t* self)
+{
+ return 0;
+}
+
+int tdav_producer_dsound_stop(tmedia_producer_t* self)
+{
+ tdav_producer_dsound_t* dsound = (tdav_producer_dsound_t*)self;
+
+ HRESULT hr;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!dsound->started){
+ TSK_DEBUG_WARN("Producer not started");
+ return 0;
+ }
+
+ /* should be done here */
+ dsound->started = tsk_false;
+
+ /* stop thread */
+ if(dsound->tid[0]){
+ tsk_thread_join(&(dsound->tid[0]));
+ }
+
+ if((hr = IDirectSoundCaptureBuffer_Stop(dsound->captureBuffer)) != DS_OK){
+ tdav_win32_print_error("IDirectSoundCaptureBuffer_Stop", hr);
+ }
+
+ return 0;
+}
+
+
+//
+// WaveAPI producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_dsound_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_dsound_t *producer = self;
+ if(producer){
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(producer));
+ /* init self */
+
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_dsound_dtor(tsk_object_t * self)
+{
+ tdav_producer_dsound_t *dsound = self;
+ if(dsound){
+ tsk_size_t i;
+
+ /* stop */
+ if(dsound->started){
+ tdav_producer_dsound_stop(self);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(dsound));
+ /* deinit self */
+ if(dsound->captureBuffer){
+ IDirectSoundCaptureBuffer_Release(dsound->captureBuffer);
+ }
+ if(dsound->device){
+ IDirectSoundCapture_Release(dsound->device);
+ }
+ for(i = 0; i<sizeof(dsound->notifEvents)/sizeof(HANDLE); i++){
+ if(dsound->notifEvents[i]){
+ CloseHandle(dsound->notifEvents[i]);
+ }
+ }
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_dsound_def_s =
+{
+ sizeof(tdav_producer_dsound_t),
+ tdav_producer_dsound_ctor,
+ tdav_producer_dsound_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_dsound_plugin_def_s =
+{
+ &tdav_producer_dsound_def_s,
+
+ tmedia_audio,
+ "Microsoft DirectSound producer",
+
+ tdav_producer_dsound_set,
+ tdav_producer_dsound_prepare,
+ tdav_producer_dsound_start,
+ tdav_producer_dsound_pause,
+ tdav_producer_dsound_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_dsound_plugin_def_t = &tdav_producer_dsound_plugin_def_s;
+
+
+#endif /* HAVE_DSOUND_H */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/tdav_consumer_audio.c b/tinyDAV/src/audio/tdav_consumer_audio.c
new file mode 100644
index 0000000..289e0c4
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_consumer_audio.c
@@ -0,0 +1,311 @@
+/*
+* Copyright (C) 2009-2010 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_consumer_audio.c
+ * @brief Base class for all Audio consumers.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * @date Created: Sat Nov 8 16:54:58 2009 mdiop
+ */
+#include "tinydav/audio/tdav_consumer_audio.h"
+
+#include "tinymedia/tmedia_denoise.h"
+#include "tinyrtp/rtp/trtp_rtp_header.h"
+
+#include "tsk_memory.h"
+#include "tsk_time.h"
+#include "tsk_debug.h"
+
+#if TSK_UNDER_WINDOWS
+# include <Winsock2.h> // timeval
+#elif defined(__SYMBIAN32__)
+# include <_timeval.h>
+#else
+# include <sys/time.h>
+#endif
+
+#define TDAV_BITS_PER_SAMPLE_DEFAULT 16
+#define TDAV_CHANNELS_DEFAULT 2
+#define TDAV_RATE_DEFAULT 8000
+#define TDAV_PTIME_DEFAULT 20
+
+#define TDAV_10MS 10
+#define TDAV_10MS_FRAME_SIZE(self) (((self)->rate * TDAV_10MS)/1000)
+#define TDAV_PTIME_FRAME_SIZE(self) (((self)->rate * (self)->ptime)/1000)
+
+int static size_of_short = sizeof(short);
+
+/** Initialize audio consumer */
+int tdav_consumer_audio_init(tdav_consumer_audio_t* self)
+{
+ int ret;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* base */
+ if((ret = tmedia_consumer_init(TMEDIA_CONSUMER(self)))){
+ return ret;
+ }
+
+ /* self (should be update by prepare() by using the codec's info)*/
+ self->bits_per_sample = TDAV_BITS_PER_SAMPLE_DEFAULT;
+ self->channels = TDAV_CHANNELS_DEFAULT;
+ self->rate = TDAV_RATE_DEFAULT;
+ self->ptime = TDAV_PTIME_DEFAULT;
+
+ /* self:jitterbuffer */
+ if(!self->jb.jbuffer){
+ self->jb.jbuffer = jb_new();
+ self->jb.jcodec = JB_CODEC_OTHER; // FIXME: e.g. JB_CODEC_G711x
+ }
+
+ tsk_safeobj_init(self);
+
+ return 0;
+}
+
+/**
+* Generic function to compare two consumers.
+* @param consumer1 The first consumer to compare.
+* @param consumer2 The second consumer to compare.
+* @retval Returns an integral value indicating the relationship between the two consumers:
+* <0 : @a consumer1 less than @a consumer2.<br>
+* 0 : @a consumer1 identical to @a consumer2.<br>
+* >0 : @a consumer1 greater than @a consumer2.<br>
+*/
+int tdav_consumer_audio_cmp(const tsk_object_t* consumer1, const tsk_object_t* consumer2)
+{
+ return (TDAV_CONSUMER_AUDIO(consumer1) - TDAV_CONSUMER_AUDIO(consumer2));
+}
+
+/* put data (bytes not shorts) into the jitter buffer (consumers always have ptime of 20ms) */
+int tdav_consumer_audio_put(tdav_consumer_audio_t* self, void** data, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ const trtp_rtp_header_t* rtp_hdr = (const trtp_rtp_header_t*)proto_hdr;
+ int i, _10ms_size_shorts, _10ms_size_bytes;
+ long now, ts;
+ short* _10ms_buf; // 10ms frame
+
+ if(!self || !data || !*data || !self->jb.jbuffer || !rtp_hdr){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* synchronize the reference timestamp */
+ if(!self->jb.ref_timestamp){
+ uint64_t epoch = tsk_time_epoch();
+ struct timeval tv;
+ long ts = (rtp_hdr->timestamp/(self->rate/1000));
+ //=> Do not use (see clock_gettime() on linux): tsk_gettimeofday(&tv, tsk_null);
+ tv.tv_sec = (long)(epoch)/1000;
+ tv.tv_usec = (long)(epoch - (tv.tv_sec*1000))*1000;
+
+ tv.tv_sec -= (ts / self->rate);
+ tv.tv_usec -= (ts % self->rate) * 125;
+ if((tv.tv_usec -= (tv.tv_usec % (TDAV_10MS * 10000))) <0){
+ tv.tv_usec += 1000000;
+ tv.tv_sec -= 1;
+ }
+ self->jb.ref_timestamp = tsk_time_get_ms(&tv);
+
+ switch(rtp_hdr->payload_type){
+ // FIXME: TMEDIA_CODEC_FORMAT_* are "char*" just define int values to avoid char comparison
+ case 8: /*TMEDIA_CODEC_FORMAT_G711a*/
+ case 0: /* TMEDIA_CODEC_FORMAT_G711u */
+ self->jb.jcodec = JB_CODEC_G711x;
+ break;
+ case 18: /* TMEDIA_CODEC_FORMAT_G729 */
+ self->jb.jcodec = JB_CODEC_G729A;
+ break;
+ case 3: /* TMEDIA_CODEC_FORMAT_GSM */
+ self->jb.jcodec = JB_CODEC_GSM_EFR;
+ break;
+
+ default:
+ self->jb.jcodec = JB_CODEC_OTHER;
+ break;
+ }
+ }
+
+ tsk_safeobj_lock(self);
+ // split as several 10ms frames
+ now = (long) (tsk_time_now()-self->jb.ref_timestamp);
+ ts = (long)(rtp_hdr->timestamp/(self->rate/1000));
+ _10ms_size_shorts = TDAV_10MS_FRAME_SIZE(self);
+ _10ms_size_bytes = _10ms_size_shorts * size_of_short;
+ for(i=0; i<(int)(size/_10ms_size_bytes);i++){
+ if((_10ms_buf = tsk_calloc(_10ms_size_shorts, size_of_short))){
+ memcpy(_10ms_buf, &((uint8_t*)*data)[i*_10ms_size_bytes], _10ms_size_bytes);
+ jb_put(self->jb.jbuffer, _10ms_buf, JB_TYPE_VOICE, TDAV_10MS, ts, now, self->jb.jcodec);
+ _10ms_buf = tsk_null;
+ }
+ ts += TDAV_10MS;
+ }
+ tsk_safeobj_unlock(self);
+
+ return 0;
+}
+
+/* get data drom the jitter buffer (consumers should always have ptime of 20ms) */
+void* tdav_consumer_audio_get(tdav_consumer_audio_t* self, tsk_size_t* out_size)
+{
+ void* data = tsk_null;
+ int jret;
+
+ int i, _10ms_count, _10ms_size_bytes, _10ms_size_shorts;
+ long now;
+ short* _10ms_buf = tsk_null;
+
+ *out_size = 0;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+
+ _10ms_size_shorts = TDAV_10MS_FRAME_SIZE(self);
+ _10ms_size_bytes = (_10ms_size_shorts * size_of_short);
+ _10ms_count = (TDAV_PTIME_FRAME_SIZE(self)/_10ms_size_shorts);
+ now = (long) (tsk_time_now()-self->jb.ref_timestamp);
+
+ tsk_safeobj_lock(self);
+ for(i=0; i<_10ms_count; i++){
+
+ jret = jb_get(self->jb.jbuffer, (void**)&_10ms_buf, now, TDAV_10MS);
+
+ //if(!_10ms_buf){
+ // TSK_DEBUG_ERROR("NO DATA");
+ //}
+
+ switch(jret){
+ case JB_INTERP:
+ TSK_DEBUG_INFO("JB_INTERP");
+ jb_reset_all(self->jb.jbuffer);
+ if((data = tsk_realloc(data, _10ms_size_bytes * _10ms_count))){
+ *out_size = _10ms_size_bytes * _10ms_count;
+ memset(data, 0, *out_size); // silence
+ }
+ i = _10ms_count; // for exit
+ break;
+ case JB_OK:
+ case JB_EMPTY:
+ case JB_NOFRAME:
+ case JB_NOJB:
+ {
+ if(data){
+ if((data = tsk_realloc(data, (*out_size + _10ms_size_bytes)))){
+ if(_10ms_buf && (jret == JB_OK)){
+ /* copy data */
+ memcpy(&((uint8_t*)data)[*out_size], _10ms_buf, _10ms_size_bytes);
+ }
+ else{
+ /* copy silence */
+ memset(&((uint8_t*)data)[*out_size], 0, _10ms_size_bytes);
+ }
+ *out_size += _10ms_size_bytes;
+ }
+ else{ /* realloc failed */
+ *out_size = 0;
+ }
+ }
+ else{
+ data = _10ms_buf, _10ms_buf = tsk_null;
+ *out_size = _10ms_size_bytes;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ TSK_FREE(_10ms_buf);
+ }
+ tsk_safeobj_unlock(self);
+
+ // Denoise()
+ if(data && *out_size == _10ms_size_bytes*2){
+ if(self->denoise && self->denoise->opened){
+ tmedia_denoise_echo_playback(self->denoise, data);
+ }
+ }
+ //else{
+ // TSK_DEBUG_WARN("Invalid buffer");
+ //}
+
+ return data;
+}
+
+/* set denioiser */
+void tdav_consumer_audio_set_denoise(tdav_consumer_audio_t* self, struct tmedia_denoise_s* denoise)
+{
+ TSK_OBJECT_SAFE_FREE(self->denoise);
+ self->denoise = tsk_object_ref(denoise);
+}
+
+/** Reset jitterbuffer */
+int tdav_consumer_audio_reset(tdav_consumer_audio_t* self){
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ tsk_safeobj_lock(self);
+ if(self->jb.jbuffer){
+ jb_reset_all(self->jb.jbuffer);
+ }
+ tsk_safeobj_unlock(self);
+
+ return 0;
+}
+
+/* tsk_safeobj_lock(self); */
+/* tsk_safeobj_unlock(self); */
+
+/** DeInitialize audio consumer */
+int tdav_consumer_audio_deinit(tdav_consumer_audio_t* self)
+{
+ int ret;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* base */
+ if((ret = tmedia_consumer_deinit(TMEDIA_CONSUMER(self)))){
+ /* return ret; */
+ }
+
+ /* self */
+ if(self->jb.jbuffer){
+ jb_destroy(self->jb.jbuffer);
+ }
+ TSK_OBJECT_SAFE_FREE(self->denoise);
+
+ tsk_safeobj_deinit(self);
+
+ return 0;
+}
+
diff --git a/tinyDAV/src/audio/tdav_jitterbuffer.c b/tinyDAV/src/audio/tdav_jitterbuffer.c
new file mode 100644
index 0000000..4794f80
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_jitterbuffer.c
@@ -0,0 +1,1034 @@
+/* File from: http://cms.speakup.nl/tech/opensource/jitterbuffer/verslag-20051209.pdf/ */
+
+/*******************************************************
+* jitterbuffer:
+* an application-independent jitterbuffer, which tries
+* to achieve the maximum user perception during a call.
+* For more information look at:
+* http://www.speakup.nl/opensource/jitterbuffer/
+*
+* Copyright on this file is held by:
+* - Jesse Kaijen <jesse@speakup.nl>
+* - SpeakUp <info@speakup.nl>
+*
+* Contributors:
+* Jesse Kaijen <jesse@speakup.nl>
+*
+* This program is free software, distributed under the terms of:
+* - the GNU Lesser (Library) General Public License
+* - the Mozilla Public License
+*
+* if you are interested in an different licence type, please contact us.
+*
+* How to use the jitterbuffer, please look at the comments
+* in the headerfile.
+*
+* Further details on specific implementations,
+* please look at the comments in the code file.
+*/
+#include "tinydav/audio/tdav_jitterbuffer.h"
+
+#include "tsk_memory.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+
+#define jb_warn(...) (warnf ? warnf(__VA_ARGS__) : (void)0)
+#define jb_err(...) (errf ? errf(__VA_ARGS__) : (void)0)
+#define jb_dbg(...) (dbgf ? dbgf(__VA_ARGS__) : (void)0)
+
+//public functions
+jitterbuffer *jb_new();
+void jb_reset(jitterbuffer *jb);
+void jb_reset_all(jitterbuffer *jb);
+void jb_destroy(jitterbuffer *jb);
+void jb_set_settings(jitterbuffer *jb, jb_settings *settings);
+
+void jb_get_info(jitterbuffer *jb, jb_info *stats);
+void jb_get_settings(jitterbuffer *jb, jb_settings *settings);
+float jb_guess_mos(float p, long d, int codec);
+int jb_has_frames(jitterbuffer *jb);
+
+void jb_put(jitterbuffer *jb, void *data, int type, long ms, long ts, long now, int codec);
+int jb_get(jitterbuffer *jb, void **data, long now, long interpl);
+
+
+
+//private functions
+static void set_default_settings(jitterbuffer *jb);
+static void reset(jitterbuffer *jb);
+static long find_pointer(long *array, long max_index, long value); static void frame_free(jb_frame *frame);
+
+static void put_control(jitterbuffer *jb, void *data, int type, long ts);
+static void put_voice(jitterbuffer *jb, void *data, int type, long ms, long ts, int codec);
+static void put_history(jitterbuffer *jb, long ts, long now, long ms, int codec);
+static void calculate_info(jitterbuffer *jb, long ts, long now, int codec);
+
+static int get_control(jitterbuffer *jb, void **data);
+static int get_voice(jitterbuffer *jb, void **data, long now, long interpl);
+static int get_voicecase(jitterbuffer *jb, void **data, long now, long interpl, long diff);
+
+static int get_next_frametype(jitterbuffer *jb, long ts);
+static long get_next_framets(jitterbuffer *jb);
+static jb_frame *get_frame(jitterbuffer *jb, long ts);
+static jb_frame *get_all_frames(jitterbuffer *jb);
+
+//debug...
+static jb_output_function_t warnf, errf, dbgf;
+void jb_setoutput(jb_output_function_t warn, jb_output_function_t err, jb_output_function_t dbg) {
+ warnf = warn;
+ errf = err;
+ dbgf = dbg;
+}
+
+
+/***********
+ * create a new jitterbuffer
+ * return NULL if malloc doesn't work
+ * else return jb with default_settings.
+ */
+jitterbuffer *jb_new()
+{
+ jitterbuffer *jb;
+
+ jb_dbg("N");
+ jb = tsk_calloc(1, sizeof(jitterbuffer));
+ if (!jb) {
+ jb_err("cannot allocate jitterbuffer\n");
+ return NULL;
+ }
+ set_default_settings(jb);
+ reset(jb);
+ return jb;
+}
+
+
+/***********
+ * empty voice messages
+ * reset statistics
+ * keep the settings
+ */
+void jb_reset(jitterbuffer *jb)
+{
+ jb_frame *frame;
+
+ jb_dbg("R");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_reset()\n");
+ return;
+ }
+
+ //free voice
+ while(jb->voiceframes) {
+ frame = get_all_frames(jb);
+ frame_free(frame);
+ }
+ //reset stats
+ memset(&(jb->info),0,sizeof(jb_info) );
+ // set default settings
+ reset(jb);
+}
+
+
+/***********
+ * empty nonvoice messages
+ * empty voice messages
+ * reset statistics
+ * reset settings to default
+ */
+void jb_reset_all(jitterbuffer *jb)
+{
+ jb_frame *frame;
+
+ jb_dbg("r");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_reset_all()\n");
+ return;
+ }
+
+ // free nonvoice
+ while(jb->controlframes) {
+ frame = jb->controlframes;
+ jb->controlframes = frame->next;
+ frame_free(frame);
+ }
+ // free voice and reset statistics is done by jb_reset
+ jb_reset(jb);
+ set_default_settings(jb);
+}
+
+
+/***********
+ * destroy the jitterbuffer
+ * free all the [non]voice frames with reset_all
+ * free the jitterbuffer
+ */
+void jb_destroy(jitterbuffer *jb)
+{
+ jb_dbg("D");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_destroy()\n");
+ return;
+ }
+
+ jb_reset_all(jb);
+ free(jb);
+}
+
+
+/***********
+ * Set settings for the jitterbuffer.
+ * Only if a setting is defined it will be written
+ * in the jb->settings.
+ * This means that no setting can be set to zero
+ */
+void jb_set_settings(jitterbuffer *jb, jb_settings *settings)
+{
+ jb_dbg("S");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_set_settings()\n");
+ return;
+ }
+
+ if (settings->min_jb) {
+ jb->settings.min_jb = settings->min_jb;
+ }
+ if (settings->max_jb) {
+ jb->settings.max_jb = settings->max_jb;
+ }
+ if (settings->max_successive_interp) {
+ jb->settings.max_successive_interp = settings->max_successive_interp;
+ }
+ if (settings->extra_delay) {
+ jb->settings.extra_delay = settings->extra_delay;
+ }
+ if (settings->wait_grow) {
+ jb->settings.wait_grow = settings->wait_grow;
+ }
+ if (settings->wait_shrink) {
+ jb->settings.wait_shrink = settings->wait_shrink;
+ }
+ if (settings->max_diff) {
+ jb->settings.max_diff = settings->max_diff;
+ }
+}
+
+
+/***********
+ * validates the statistics
+ * the losspct due the jitterbuffer will be calculated.
+ * delay and delay_target will be calculated
+ * *stats = info
+ */
+void jb_get_info(jitterbuffer *jb, jb_info *stats)
+{
+ long max_index, pointer;
+
+ jb_dbg("I");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_get_info()\n");
+ return;
+ }
+
+ jb->info.delay = jb->current - jb->min;
+ jb->info.delay_target = jb->target - jb->min;
+
+ //calculate the losspct...
+ max_index = (jb->hist_pointer < JB_HISTORY_SIZE) ?
+jb->hist_pointer : JB_HISTORY_SIZE-1;
+ if (max_index>1) {
+ pointer = find_pointer(&jb->hist_sorted_delay[0], max_index,
+jb->current);
+ jb->info.losspct = ((max_index - pointer)*100/max_index);
+ if (jb->info.losspct < 0) {
+ jb->info.losspct = 0;
+ }
+ } else {
+ jb->info.losspct = 0;
+ }
+
+ *stats = jb->info;
+}
+
+
+/***********
+ * gives the settings for this jitterbuffer
+ * *settings = settings
+ */
+void jb_get_settings(jitterbuffer *jb, jb_settings *settings)
+{
+ jb_dbg("S");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_get_settings()\n");
+ return;
+ }
+
+ *settings = jb->settings;
+}
+
+
+/***********
+ * returns an estimate on the MOS with given loss, delay and codec
+ * if the formula is not present the default will be used
+ * please use the JB_CODEC_OTHER if you want to define your own formula
+ *
+ */
+float jb_guess_mos(float p, long d, int codec)
+{
+ float result;
+
+ switch (codec) {
+ case JB_CODEC_GSM_EFR:
+ result = (4.31f - 0.23f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_G723_1:
+ result = (3.99f - 0.16f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_G729:
+ case JB_CODEC_G729A:
+ result = (4.13f - 0.14f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_G711x_PLC:
+ result = (4.42f - 0.087f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_G711x:
+ result = (4.42f - 0.63f*p - 0.0071f*d);
+ break;
+
+ case JB_CODEC_OTHER:
+ default:
+ result = (4.42f - 0.63f*p - 0.0071f*d);
+
+ }
+ return result;
+}
+
+
+/***********
+ * if there are any frames left in JB returns JB_OK, otherwise returns JB_EMPTY
+ */
+int jb_has_frames(jitterbuffer *jb)
+{
+ jb_dbg("H");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_has_frames()\n");
+ return JB_NOJB;
+ }
+
+ if(jb->controlframes || jb->voiceframes) {
+ return JB_OK;
+ } else {
+ return JB_EMPTY;
+ }
+}
+
+
+/***********
+ * Put a packet into the jitterbuffers
+ * Only the timestamps of voicepackets are put in the history
+ * this because the jitterbuffer only works for voicepackets
+ * don't put packets twice in history and queue (e.g. transmitting every frame twice)
+ * keep track of statistics
+ */
+void jb_put(jitterbuffer *jb, void *data, int type, long ms, long ts, long now, int codec)
+{
+ long pointer, max_index;
+
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_put()\n");
+ return;
+ }
+
+ jb->info.frames_received++;
+
+ if (type == JB_TYPE_CONTROL) {
+ //put the packet into the contol-queue of the jitterbuffer
+ jb_dbg("pC");
+ put_control(jb,data,type,ts);
+
+ } else if (type == JB_TYPE_VOICE) {
+ // only add voice that aren't already in the buffer
+ max_index = (jb->hist_pointer < JB_HISTORY_SIZE) ? jb->hist_pointer : JB_HISTORY_SIZE-1;
+ pointer = find_pointer(&jb->hist_sorted_timestamp[0], max_index, ts);
+ if (jb->hist_sorted_timestamp[pointer]==ts) { //timestamp already in queue
+ jb_dbg("pT");
+ free(data);
+ jb->info.frames_dropped_twice++;
+ } else { //add
+ jb_dbg("pV");
+ /* add voicepacket to history */
+ put_history(jb,ts,now,ms,codec);
+ /*calculate jitterbuffer size*/
+ calculate_info(jb, ts, now, codec);
+ /*put the packet into the queue of the jitterbuffer*/
+ put_voice(jb,data,type,ms,ts,codec);
+ }
+
+ } else if (type == JB_TYPE_SILENCE){ //silence
+ jb_dbg("pS");
+ put_voice(jb,data,type,ms,ts,codec);
+
+ } else {//should NEVER happen
+ jb_err("jb_put(): type not known\n");
+ free(data);
+ }
+}
+
+
+/***********
+ * control frames have a higher priority then voice frames
+ * returns JB_OK if a frame is available and *data points to the packet
+ * returns JB_NOFRAME if it's no time to play voice and no control available
+ * returns JB_INTERP if interpolating is required
+ * returns JB_EMPTY if no voice frame is in the jitterbuffer (only during silence)
+ */
+int jb_get(jitterbuffer *jb, void **data, long now, long interpl)
+{
+ int result;
+
+ jb_dbg("A");
+ if (jb == NULL) {
+ jb_err("no jitterbuffer in jb_get()\n");
+ return JB_NOJB;
+ }
+
+ result = get_control(jb, data);
+ if (result != JB_OK ) { //no control message available maybe there is voice...
+ result = get_voice(jb, data, now, interpl);
+ }
+ return result;
+}
+
+
+/***********
+ * set all the settings to default
+ */
+static void set_default_settings(jitterbuffer *jb)
+{
+ jb->settings.min_jb = JB_MIN_SIZE;
+ jb->settings.max_jb = JB_MAX_SIZE;
+ jb->settings.max_successive_interp = JB_MAX_SUCCESSIVE_INTERP;
+ jb->settings.extra_delay = JB_ALLOW_EXTRA_DELAY;
+ jb->settings.wait_grow = JB_WAIT_GROW;
+ jb->settings.wait_shrink = JB_WAIT_SHRINK;
+ jb->settings.max_diff = JB_MAX_DIFF;
+}
+
+
+/***********
+ * reset the jitterbuffer so we can start in silence and
+ * we start with a new history
+ */
+static void reset(jitterbuffer *jb)
+{
+ jb->hist_pointer = 0; //start over
+ jb->silence_begin_ts = 0; //no begin_ts defined
+ jb->info.silence =1; //we always start in silence
+}
+
+
+/***********
+ * Search algorithm
+ * @REQUIRE max_index is within array
+ *
+ * Find the position of value in hist_sorted_delay
+ * if value doesn't exist return first pointer where array[low]>value
+ * int low; //the lowest index being examined
+ * int max_index; //the highest index being examined
+ * int mid; //the middle index between low and max_index.
+ * mid ==(low+max_index)/2
+ * at the end low is the position of value or where array[low]>value
+ */
+static long find_pointer(long *array, long max_index, long value)
+{
+ register long low, mid, high;
+ low = 0;
+ high = max_index;
+ while (low<=high) {
+ mid= (low+high)/2;
+ if (array[mid] < value) {
+ low = mid+1;
+ } else {
+ high = mid-1;
+ }
+ }
+ while(low < max_index && (array[low]==array[(low+1)]) ) {
+ low++;
+ }
+ return low;
+}
+
+
+/***********
+ * free the given frame, afterwards the framepointer is undefined
+ */
+static void frame_free(jb_frame *frame)
+{
+ if (frame->data) {
+ free(frame->data);
+ }
+ free(frame);
+}
+
+
+/***********
+ * put a nonvoice frame into the nonvoice queue
+ */
+static void put_control(jitterbuffer *jb, void *data, int type, long ts)
+{
+ jb_frame *frame, *p;
+
+ frame = malloc(sizeof(jb_frame));
+ if(!frame) {
+ jb_err("cannot allocate frame\n");
+ return;
+ }
+ frame->data = data;
+ frame->ts = ts;
+ frame->type = type;
+ frame->next = NULL;
+ data = NULL;//to avoid stealing memory
+
+ p = jb->controlframes;
+ if (p) { //there are already control messages
+ if (ts < p->ts) {
+ jb->controlframes = frame;
+ frame->next = p;
+ } else {
+ while (p->next && (ts >=p->next->ts)) {//sort on timestamps! so find place to put...
+ p = p->next;
+ }
+ if (p->next) {
+ frame->next = p->next;
+ }
+ p->next = frame;
+ }
+ } else {
+ jb->controlframes = frame;
+ }
+}
+
+
+/***********
+ * put a voice or silence frame into the jitterbuffer
+ */
+static void put_voice(jitterbuffer *jb, void *data, int type, long ms, long ts, int codec)
+{
+ jb_frame *frame, *p;
+ frame = malloc(sizeof(jb_frame));
+ if(!frame) {
+ jb_err("cannot allocate frame\n");
+ return;
+ }
+
+ frame->data = data;
+ frame->ts = ts;
+ frame->ms = ms;
+ frame->type = type;
+ frame->codec = codec;
+
+ data = NULL; //to avoid stealing the memory location
+ /*
+ * frames are a circular list, jb->voiceframes points to to the lowest ts,
+ * jb->voiceframes->prev points to the highest ts
+ */
+ if(!jb->voiceframes) { /* queue is empty */
+ jb->voiceframes = frame;
+ frame->next = frame;
+ frame->prev = frame;
+ } else {
+ p = jb->voiceframes;
+ if(ts < p->prev->ts) { //frame is out of order
+ jb->info.frames_ooo++;
+ }
+ if (ts < p->ts) { //frame is lowest, let voiceframes point to it!
+ jb->voiceframes = frame;
+ } else {
+ while(ts < p->prev->ts ) {
+ p = p->prev;
+ }
+ }
+ frame->next = p;
+ frame->prev = p->prev;
+ frame->next->prev = frame;
+ frame->prev->next = frame;
+ }
+}
+
+
+/***********
+ * puts the timestamps of a received packet in the history of *jb
+ * for later calculations of the size of jitterbuffer *jb.
+ *
+ * summary of function:
+ * - calculate delay difference
+ * - delete old value from hist & sorted_history_delay & sorted_history_timestamp if needed
+ * - add new value to history & sorted_history_delay & sorted_history_timestamp
+ * - we keep sorted_history_delay for calculations
+ * - we keep sorted_history_timestamp for ensuring each timestamp isn't put twice in the buffer.
+ */
+static void put_history(jitterbuffer *jb, long ts, long now, long ms, int codec)
+{
+ jb_hist_element out, in;
+ long max_index, pointer, location;
+
+ // max_index is the highest possible index
+ max_index = (jb->hist_pointer < JB_HISTORY_SIZE) ? jb->hist_pointer : JB_HISTORY_SIZE-1;
+ location = (jb->hist_pointer % JB_HISTORY_SIZE);
+
+ // we want to delete a value from the jitterbuffer
+ // only when we are through the history.
+ if (jb->hist_pointer > JB_HISTORY_SIZE-1) {
+ /* the value we need to delete from sorted histories */
+ out = jb->hist[location];
+ //delete delay from hist_sorted_delay
+ pointer = find_pointer(&jb->hist_sorted_delay[0], max_index, out.delay);
+ /* move over pointer is the position of kicked*/
+ if (pointer<max_index) { //only move if we have something to move
+ memmove( &(jb->hist_sorted_delay[pointer]),
+ &(jb->hist_sorted_delay[pointer+1]),
+ ((JB_HISTORY_SIZE-(pointer+1)) * sizeof(long)) );
+ }
+
+ //delete timestamp from hist_sorted_timestamp
+ pointer = find_pointer(&jb->hist_sorted_timestamp[0], max_index, out.ts);
+ /* move over pointer is the position of kicked*/
+ if (pointer<max_index) { //only move if we have something to move
+ memmove( &(jb->hist_sorted_timestamp[pointer]),
+ &(jb->hist_sorted_timestamp[pointer+1]),
+ ((JB_HISTORY_SIZE-(pointer+1)) * sizeof(long)) );
+ }
+ }
+
+ in.delay = now - ts; //delay of current packet
+ in.ts = ts; //timestamp of current packet
+ in.ms = ms; //length of current packet
+ in.codec = codec; //codec of current packet
+
+ /* adding the new delay to the sorted history
+ * first special cases:
+ * - delay is the first history stamp
+ * - delay > highest history stamp
+ */
+ if (max_index==0 || in.delay >= jb->hist_sorted_delay[max_index-1]) {
+ jb->hist_sorted_delay[max_index] = in.delay;
+ } else {
+ pointer = find_pointer(&jb->hist_sorted_delay[0], (max_index-1), in.delay);
+ /* move over and add delay */
+ memmove( &(jb->hist_sorted_delay[pointer+1]),
+ &(jb->hist_sorted_delay[pointer]),
+ ((JB_HISTORY_SIZE-(pointer+1)) * sizeof(long)) );
+ jb->hist_sorted_delay[pointer] = in.delay;
+ }
+
+ /* adding the new timestamp to the sorted history
+ * first special cases:
+ * - timestamp is the first history stamp
+ * - timestamp > highest history stamp
+ */
+ if (max_index==0 || in.ts >= jb->hist_sorted_timestamp[max_index-1]) {
+ jb->hist_sorted_timestamp[max_index] = in.ts;
+ } else {
+
+ pointer = find_pointer(&jb->hist_sorted_timestamp[0], (max_index-1), in.ts);
+ /* move over and add timestamp */
+ memmove( &(jb->hist_sorted_timestamp[pointer+1]),
+ &(jb->hist_sorted_timestamp[pointer]),
+ ((JB_HISTORY_SIZE-(pointer+1)) * sizeof(long)) );
+ jb->hist_sorted_timestamp[pointer] = in.ts;
+ }
+
+ /* put the jb_hist_element in the history
+ * then increase hist_pointer for next time
+ */
+ jb->hist[location] = in;
+ jb->hist_pointer++;
+}
+
+
+/***********
+ * this tries to make a jitterbuffer that behaves like
+ * the jitterbuffer proposed in this article:
+ * Adaptive Playout Buffer Algorithm for Enhancing Perceived Quality of Streaming Applications
+ * by: Kouhei Fujimoto & Shingo Ata & Masayuki Murata
+ * http://www.nal.ics.es.osaka-u.ac.jp/achievements/web2002/pdf/journal/k-fujimo02TSJ-AdaptivePlayoutBuffer.pdf
+ *
+ * it calculates jitter and minimum delay
+ * get the best delay for the specified codec
+
+ */
+static void calculate_info(jitterbuffer *jb, long ts, long now, int codec)
+{
+ long diff, size, max_index, d, d1, d2, n;
+ float p, p1, p2, A, B;
+ //size = how many items there in the history
+ size = (jb->hist_pointer < JB_HISTORY_SIZE) ? jb->hist_pointer : JB_HISTORY_SIZE;
+ max_index = size-1;
+
+ /*
+ * the Inter-Quartile Range can be used for estimating jitter
+ * http://www.slac.stanford.edu/comp/net/wan-mon/tutorial.html#variable
+ * just take the square root of the iqr for jitter
+ */
+ jb->info.iqr = jb->hist_sorted_delay[max_index*3/4] - jb->hist_sorted_delay[max_index/4];
+
+
+ /*
+ * The RTP way of calculating jitter.
+ * This one is used at the moment, although it is not correct.
+ * But in this way the other side understands us.
+ */
+ diff = now - ts - jb->last_delay;
+ if (!jb->last_delay) {
+ diff = 0; //this to make sure we won't get odd jitter due first ts.
+ }
+ jb->last_delay = now - ts;
+ if (diff <0){
+ diff = -diff;
+ }
+ jb->info.jitter = jb->info.jitter + (diff - jb->info.jitter)/16;
+
+ /* jb->min is minimum delay in hist_sorted_delay, we don't look at the lowest 2% */
+ /* because sometimes there are odd delays in there */
+ jb->min = jb->hist_sorted_delay[(max_index*2/100)];
+
+ /*
+ * calculating the preferred size of the jitterbuffer:
+ * instead of calculating the optimum delay using the Pareto equation
+ * I use look at the array of sorted delays and choose my optimum from there
+ * always walk trough a percentage of the history this because imagine following tail:
+ * [...., 12, 300, 301 ,302]
+ * her we want to discard last three but that won't happen if we won't walk the array
+ * the number of frames we walk depends on how scattered the sorted delays are.
+ * For that we look at the iqr. The dependencies of the iqr are based on
+ * tests we've done here in the lab. But are not optimized.
+ */
+ //init:
+ //the higest delay..
+ d = d1= d2 = jb->hist_sorted_delay[max_index]- jb->min;
+ A=B=LONG_MIN;
+ p = p2 =0;
+ n=0;
+ p1 = 5; //always look at the top 5%
+ if (jb->info.iqr >200) { //with more jitter look at more delays
+ p1=25;
+ } else if (jb->info.iqr >100) {
+ p1=20;
+ } else if (jb->info.iqr >50){
+ p1=11;
+ }
+
+ //find the optimum delay..
+ while(max_index>10 && (B > A ||p2<p1)) { // By MDI: from ">=" to ">"
+ //the packetloss with this delay
+ p2 =(n*100.0f/size);
+ // estimate MOS-value
+ B = jb_guess_mos(p2,d2,codec);
+ if (B > A) {
+ p = p2;
+ d = d2;
+ A = B;
+ }
+ d1 = d2;
+ //find next delay != delay so the same delay isn't calculated twice
+ //don't look further if we have seen half of the history
+ while((d2>=d1) && ((n*2)<max_index) ) {
+ n++;
+ d2 = jb->hist_sorted_delay[(max_index-n)] - jb->min;
+ }
+ }
+ //the targeted size of the jitterbuffer
+ if (jb->settings.min_jb && (jb->settings.min_jb > d) ) {
+ jb->target = jb->min + jb->settings.min_jb;
+ } else if (jb->settings.max_jb && (jb->settings.max_jb > d) ){
+ jb->target = jb->min + jb->settings.max_jb;
+ } else {
+ jb->target = jb->min + d;
+ }
+}
+
+
+/***********
+ * if there is a nonvoice frame it will be returned [*data] and the frame
+ * will be made free
+ */
+static int get_control(jitterbuffer *jb, void **data)
+{
+ jb_frame *frame;
+ int result;
+
+ frame = jb->controlframes;
+ if (frame) {
+ jb_dbg("gC");
+ *data = frame->data;
+ frame->data = NULL;
+ jb->controlframes = frame->next;
+ frame_free(frame);
+ result = JB_OK;
+ } else {
+ result = JB_NOFRAME;
+ }
+ return result;
+}
+
+
+/***********
+ * returns JB_OK if a frame is available and *data points to the packet
+ * returns JB_NOFRAME if it's no time to play voice and or no frame available
+ * returns JB_INTERP if interpolating is required
+ * returns JB_EMPTY if no voice frame is in the jitterbuffer (only during silence)
+ *
+ * if the next frame is a silence frame we will go in silence-mode
+ * each new instance of the jitterbuffer will start in silence mode
+ * in silence mode we will set the jitterbuffer to the size we want
+ * when we are not in silence mode get_voicecase will handle the rest.
+ */
+static int get_voice(jitterbuffer *jb, void **data, long now, long interpl)
+{
+ jb_frame *frame;
+ long diff;
+ int result;
+
+ diff = jb->target - jb->current;
+
+ //if the next frame is a silence frame, go in silence mode...
+ if((get_next_frametype(jb, now - jb->current) == JB_TYPE_SILENCE) ) {
+ jb_dbg("gs");
+ frame = get_frame(jb, now - jb->current);
+ *data = frame->data;
+ frame->data = NULL;
+ jb->info.silence =1;
+ jb->silence_begin_ts = frame->ts;
+ frame_free(frame);
+ result = JB_OK;
+ } else {
+ if(jb->info.silence) { // we are in silence
+ /*
+ * During silence we can set the jitterbuffer size to the size
+ * we want...
+ */
+ if (diff) {
+ jb->current = jb->target;
+ }
+ frame = get_frame(jb, now - jb->current);
+ if (frame) {
+ if (jb->silence_begin_ts && frame->ts < jb->silence_begin_ts) {
+ jb_dbg("gL");
+ /* voice frame is late, next!*/
+ jb->info.frames_late++;
+ frame_free(frame);
+ result = get_voice(jb, data, now, interpl);
+ } else {
+ jb_dbg("gP");
+ /* voice frame */
+ jb->info.silence = 0;
+ jb->silence_begin_ts = 0;
+ jb->next_voice_time = frame->ts + frame->ms;
+ jb->info.last_voice_ms = frame->ms;
+ *data = frame->data;
+ frame->data = NULL;
+ frame_free(frame);
+ result = JB_OK;
+ }
+ } else { //no frame
+ jb_dbg("gS");
+ result = JB_EMPTY;
+ }
+ } else { //voice case
+ result = get_voicecase(jb,data,now,interpl,diff);
+ }
+ }
+ return result;
+}
+
+
+/***********
+ * The voicecase has four 'options'
+ * - difference is way off, reset
+ * - diff > 0, we may need to grow
+ * - diff < 0, we may need to shrink
+ * - everything else
+ */
+static int get_voicecase(jitterbuffer *jb, void **data, long now, long interpl, long diff)
+{
+ jb_frame *frame;
+ int result;
+
+ // * - difference is way off, reset
+ if (diff > jb->settings.max_diff || -diff > jb->settings.max_diff) {
+ jb_err("wakko diff in get_voicecase\n");
+ reset(jb); //reset hist because the timestamps are wakko.
+ result = JB_NOFRAME;
+ //- diff > 0, we may need to grow
+ } else if ((diff > 0) &&
+ (now > (jb->last_adjustment + jb->settings.wait_grow)
+ || (now + jb->current + interpl) < get_next_framets(jb) ) ) { //grow
+ /* first try to grow */
+ if (diff<interpl/2) {
+ jb_dbg("ag");
+ jb->current +=diff;
+ } else {
+ jb_dbg("aG");
+ /* grow by interp frame len */
+ jb->current += interpl;
+ }
+ jb->last_adjustment = now;
+ result = get_voice(jb, data, now, interpl);
+ //- diff < 0, we may need to shrink
+ } else if ( (diff < 0)
+ && (now > (jb->last_adjustment + jb->settings.wait_shrink))
+ && ((-diff) > jb->settings.extra_delay) ) {
+ /* now try to shrink
+ * if there is a frame shrink by frame length
+ * otherwise shrink by interpl
+ */
+ jb->last_adjustment = now;
+
+ frame = get_frame(jb, now - jb->current);
+ if(frame) {
+ jb_dbg("as");
+ /* shrink by frame size we're throwing out */
+ jb->info.frames_dropped++;
+ jb->current -= frame->ms;
+ frame_free(frame);
+ } else {
+ jb_dbg("aS");
+ /* shrink by interpl */
+ jb->current -= interpl;
+ }
+ result = get_voice(jb, data, now, interpl);
+ } else {
+ /* if it is not the time to play a result = JB_NOFRAME
+ * else We try to play a frame if a frame is available
+ * and not late it is played otherwise
+ * if available it is dropped and the next is tried
+ * last option is interpolating
+ */
+ if (now - jb->current < jb->next_voice_time) {
+ jb_dbg("aN");
+ result = JB_NOFRAME;
+ } else {
+ frame = get_frame(jb, now - jb->current);
+ if (frame) { //there is a frame
+ /* voice frame is late */
+ if(frame->ts < jb->next_voice_time) { //late
+ jb_dbg("aL");
+ jb->info.frames_late++;
+ frame_free(frame);
+ result = get_voice(jb, data, now, interpl);
+ } else {
+ jb_dbg("aP");
+ /* normal case; return the frame, increment stuff */
+ *data = frame->data;
+ frame->data = NULL;
+ jb->next_voice_time = frame->ts + frame->ms;
+ jb->cnt_successive_interp = 0;
+ frame_free(frame);
+ result = JB_OK;
+ }
+ } else { // no frame, thus interpolate
+ jb->cnt_successive_interp++;
+ /* assume silence instead of continuing to interpolate */
+ if (jb->settings.max_successive_interp && jb->cnt_successive_interp >= jb->settings.max_successive_interp) {
+ jb->info.silence = 1;
+ jb->silence_begin_ts = jb->next_voice_time;
+ }
+ jb_dbg("aI");
+ jb->next_voice_time += interpl;
+ result = JB_INTERP;
+ }
+ }
+ }
+ return result;
+
+}
+
+
+/***********
+ * if there are frames and next frame->ts is smaller or equal ts
+ * return type of next frame.
+ * else return 0
+ */
+static int get_next_frametype(jitterbuffer *jb, long ts)
+{
+ jb_frame *frame;
+ int result;
+
+ result = 0;
+ frame = jb->voiceframes;
+ if (frame && frame->ts <= ts) {
+ result = frame->type;
+ }
+ return result;
+}
+
+
+/***********
+ * returns ts from next frame in jb->voiceframes
+ * or returns LONG_MAX if there is no frame
+ */
+static long get_next_framets(jitterbuffer *jb)
+{
+ if (jb->voiceframes) {
+ return jb->voiceframes->ts;
+ }
+ return LONG_MAX;
+}
+
+
+/***********
+ * if there is a frame in jb->voiceframes and
+ * has a timestamp smaller/equal to ts
+ * this frame will be returned and
+ * removed from the queue
+ */
+static jb_frame *get_frame(jitterbuffer *jb, long ts)
+{
+ jb_frame *frame;
+
+ frame = jb->voiceframes;
+ if (frame && frame->ts <= ts) {
+ if(frame->next == frame) {
+ jb->voiceframes = NULL;
+ } else {
+ /* remove this frame */
+ frame->prev->next = frame->next;
+ frame->next->prev = frame->prev;
+ jb->voiceframes = frame->next;
+ }
+ return frame;
+ }
+ return NULL;
+}
+
+/***********
+ * if there is a frame in jb->voiceframes
+ * this frame will be unconditionally returned and
+ * removed from the queue
+ */
+static jb_frame *get_all_frames(jitterbuffer *jb)
+{
+ jb_frame *frame;
+
+ frame = jb->voiceframes;
+ if (frame) {
+ if(frame->next == frame) {
+ jb->voiceframes = NULL;
+ } else {
+ /* remove this frame */
+ frame->prev->next = frame->next;
+ frame->next->prev = frame->prev;
+ jb->voiceframes = frame->next;
+ }
+ return frame;
+ }
+ return NULL;
+}
+
+
+//EOF
diff --git a/tinyDAV/src/audio/tdav_producer_audio.c b/tinyDAV/src/audio/tdav_producer_audio.c
new file mode 100644
index 0000000..7efeb79
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_producer_audio.c
@@ -0,0 +1,95 @@
+/*
+* Copyright (C) 2009-2010 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_producer_audio.c
+ * @brief Base class for all Audio producers.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * @date Created: Sat Nov 8 16:54:58 2009 mdiop
+ */
+#include "tinydav/audio/tdav_producer_audio.h"
+
+#define TDAV_BITS_PER_SAMPLE_DEFAULT 16
+#define TDAV_CHANNELS_DEFAULT 1
+#define TDAV_RATE_DEFAULT 8000
+#define TDAV_PTIME_DEFAULT 20
+
+#include "tsk_debug.h"
+
+/** Initialize Audio producer
+* @param self The producer to initialize
+*/
+int tdav_producer_audio_init(tdav_producer_audio_t* self)
+{
+ int ret;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* base */
+ if((ret = tmedia_producer_init(TMEDIA_PRODUCER(self)))){
+ return ret;
+ }
+
+ /* self (should be update by prepare() by using the codec's info)*/
+ self->bits_per_sample = TDAV_BITS_PER_SAMPLE_DEFAULT;
+ self->channels = TDAV_CHANNELS_DEFAULT;
+ self->rate = TDAV_RATE_DEFAULT;
+ self->ptime = TDAV_PTIME_DEFAULT;
+
+ return 0;
+}
+
+/**
+* Generic function to compare two producers.
+* @param producer1 The first producer to compare.
+* @param producer2 The second producer to compare.
+* @retval Returns an integral value indicating the relationship between the two producers:
+* <0 : @a producer1 less than @a producer2.<br>
+* 0 : @a producer1 identical to @a producer2.<br>
+* >0 : @a producer1 greater than @a producer2.<br>
+*/
+int tdav_producer_audio_cmp(const tsk_object_t* producer1, const tsk_object_t* producer2)
+{
+ return (TDAV_PRODUCER_AUDIO(producer1) - TDAV_PRODUCER_AUDIO(producer2));
+}
+
+/** Deinitialize a producer
+*/
+int tdav_producer_audio_deinit(tdav_producer_audio_t* self)
+{
+ int ret;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* base */
+ if((ret = tmedia_producer_deinit(TMEDIA_PRODUCER(self)))){
+ return ret;
+ }
+
+ return ret;
+} \ No newline at end of file
diff --git a/tinyDAV/src/audio/tdav_session_audio.c b/tinyDAV/src/audio/tdav_session_audio.c
new file mode 100644
index 0000000..9a94b5c
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_session_audio.c
@@ -0,0 +1,892 @@
+/*
+* Copyright (C) 2009-2010 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_session_audio.c
+ * @brief Audio Session plugin.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * @date Created: Sat Nov 8 16:54:58 2009 mdiop
+ */
+#include "tinydav/audio/tdav_session_audio.h"
+
+#include "tinydav/codecs/dtmf/tdav_codec_dtmf.h"
+#include "tinydav/audio/tdav_consumer_audio.h"
+
+#include "tinymedia/tmedia_denoise.h"
+#include "tinymedia/tmedia_consumer.h"
+#include "tinymedia/tmedia_producer.h"
+
+#include "tinyrtp/trtp_manager.h"
+#include "tinyrtp/rtp/trtp_rtp_packet.h"
+
+#include "tsk_timer.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define IS_DTMF_CODEC(codec) (TMEDIA_CODEC((codec))->plugin == tdav_codec_dtmf_plugin_def_t)
+
+static int _tdav_session_audio_dtmfe_timercb(const void* arg, tsk_timer_id_t timer_id);
+static struct tdav_session_audio_dtmfe_s* _tdav_session_audio_dtmfe_create(const tdav_session_audio_t* session, uint8_t event, uint16_t duration, uint32_t seq, uint32_t timestamp, uint8_t format, tsk_bool_t M, tsk_bool_t E);
+static const tmedia_codec_t* _tdav_first_best_neg_codec(const tdav_session_audio_t* session);
+
+
+/* DTMF event object */
+typedef struct tdav_session_audio_dtmfe_s
+{
+ TSK_DECLARE_OBJECT;
+
+ tsk_timer_id_t timer_id;
+ trtp_rtp_packet_t* packet;
+
+ const tdav_session_audio_t* session;
+}
+tdav_session_audio_dtmfe_t;
+extern const tsk_object_def_t *tdav_session_audio_dtmfe_def_t;
+
+// RTP/RTCP callback (From the network to the consumer)
+static int tdav_session_audio_rtp_cb(const void* callback_data, const struct trtp_rtp_packet_s* packet)
+{
+ tdav_session_audio_t* audio = (tdav_session_audio_t*)callback_data;
+
+ if(!audio || !packet){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(audio->consumer){
+ tsk_size_t out_size = 0;
+ tmedia_codec_t* codec;
+ tsk_istr_t format;
+
+ // Find the codec to use to decode the RTP payload
+ tsk_itoa(packet->header->payload_type, &format);
+ if(!(codec = tmedia_codec_find_by_format(TMEDIA_SESSION(audio)->neg_codecs, format)) || !codec->plugin || !codec->plugin->decode){
+ TSK_DEBUG_ERROR("%s is not a valid payload for this session", format);
+ TSK_OBJECT_SAFE_FREE(codec);
+ return -2;
+ }
+ // Open codec if not already done
+ if(!TMEDIA_CODEC(codec)->opened){
+ int ret;
+ tsk_safeobj_lock(audio);
+ if((ret = tmedia_codec_open(codec))){
+ tsk_safeobj_unlock(audio);
+ TSK_OBJECT_SAFE_FREE(codec);
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", codec->plugin->desc);
+ return ret;
+ }
+ tsk_safeobj_unlock(audio);
+ }
+ // Decode data
+ out_size = codec->plugin->decode(codec, packet->payload.data, packet->payload.size, &audio->decoder.buffer, &audio->decoder.buffer_size, packet->header);
+ if(out_size){
+ // Denoise (VAD, AGC, Noise suppression, ...)
+ // See tdav_consumer_audio.c::tdav_consumer_audio_get()
+ //if(audio->denoise && TMEDIA_DENOISE(audio->denoise)->opened){
+ // tmedia_denoise_echo_playback(TMEDIA_DENOISE(audio->denoise), audio->decoder.buffer);
+ //}
+ tmedia_consumer_consume(audio->consumer, &audio->decoder.buffer, out_size, packet->header);
+ if(!audio->decoder.buffer){
+ /* taken by the consumer */
+ audio->decoder.buffer_size = 0;
+ }
+ }
+ TSK_OBJECT_SAFE_FREE(codec);
+ }
+ return 0;
+}
+
+// Producer callback (From the producer to the network). Will encode() data before sending
+static int tdav_session_audio_producer_enc_cb(const void* callback_data, const void* buffer, tsk_size_t size)
+{
+ int ret;
+
+ tdav_session_audio_t* audio = (tdav_session_audio_t*)callback_data;
+
+ if(audio->rtp_manager){
+ /* encode */
+ tsk_size_t out_size = 0;
+
+ ret = 0;
+
+ //
+ // Find Encoder (call one time)
+ //
+ if(!audio->encoder.codec){
+ tsk_list_item_t* item;
+ tsk_list_foreach(item, TMEDIA_SESSION(audio)->neg_codecs){
+ if(!tsk_striequals(TMEDIA_CODEC(item->data)->neg_format, TMEDIA_CODEC_FORMAT_DTMF) &&
+ !tsk_striequals(TMEDIA_CODEC(item->data)->format, TMEDIA_CODEC_FORMAT_DTMF)){
+ audio->encoder.codec = tsk_object_ref(item->data);
+ trtp_manager_set_payload_type(audio->rtp_manager, audio->encoder.codec->neg_format ? atoi(audio->encoder.codec->neg_format) : atoi(audio->encoder.codec->format));
+ /* Denoise */
+ if(audio->denoise && !audio->denoise->opened){
+ ret = tmedia_denoise_open(audio->denoise,
+ TMEDIA_CODEC_PCM_FRAME_SIZE(audio->encoder.codec), //160 if 20ms at 8khz
+ TMEDIA_CODEC_RATE(audio->encoder.codec), tsk_true, 8000.0f, tsk_true, tsk_false);
+ }
+ break;
+ }
+ }
+ }
+ if(!audio->encoder.codec){
+ TSK_DEBUG_ERROR("Failed to find a valid codec");
+ return -3;
+ }
+
+ // Open codec if not already done
+ if(!audio->encoder.codec->opened){
+ tsk_safeobj_lock(audio);
+ if((ret = tmedia_codec_open(audio->encoder.codec))){
+ tsk_safeobj_unlock(audio);
+ TSK_DEBUG_ERROR("Failed to open [%s] codec", audio->encoder.codec->plugin->desc);
+ return -4;
+ }
+ tsk_safeobj_unlock(audio);
+ }
+ // Denoise (VAD, AGC, Noise suppression, ...)
+ if(audio->denoise){
+ tsk_bool_t silence_or_noise = tsk_false;
+ ret = tmedia_denoise_process(TMEDIA_DENOISE(audio->denoise), (void*)buffer, &silence_or_noise);
+ if(silence_or_noise && (ret == 0)){
+ //FIXME:
+ TSK_DEBUG_INFO("Silence or Noise buffer");
+ return 0;
+ }
+ }
+
+ // Encode data
+ if((audio->encoder.codec = tsk_object_ref(audio->encoder.codec))){ /* Thread safeness (SIP reINVITE or UPDATE could update the encoder) */
+ out_size = audio->encoder.codec->plugin->encode(audio->encoder.codec, buffer, size, &audio->encoder.buffer, &audio->encoder.buffer_size);
+ if(out_size){
+ ret = trtp_manager_send_rtp(audio->rtp_manager, audio->encoder.buffer, out_size, TMEDIA_CODEC_PCM_FRAME_SIZE(audio->encoder.codec), tsk_false/*Marker*/, tsk_true/*lastPacket*/);
+ }
+ tsk_object_unref(audio->encoder.codec);
+ }
+ else{
+ TSK_DEBUG_WARN("No encoder");
+ }
+ }
+
+ return ret;
+}
+
+
+/* ============ Plugin interface ================= */
+
+int tdav_session_audio_set(tmedia_session_t* self, const tmedia_param_t* param)
+{
+ int ret = 0;
+ tdav_session_audio_t* audio;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ audio = (tdav_session_audio_t*)self;
+
+ if(param->plugin_type == tmedia_ppt_consumer){
+ TSK_DEBUG_WARN("Not implemented");
+ }
+ else if(param->plugin_type == tmedia_ppt_producer){
+ TSK_DEBUG_WARN("Not implemented");
+ }
+ else{
+ if(param->value_type == tmedia_pvt_pchar){
+ if(tsk_striequals(param->key, "remote-ip")){
+ /* only if no ip associated to the "m=" line */
+ if(param->value && !audio->remote_ip){
+ audio->remote_ip = tsk_strdup(param->value);
+ }
+ }
+ else if(tsk_striequals(param->key, "local-ip")){
+ tsk_strupdate(&audio->local_ip, param->value);
+ }
+ else if(tsk_striequals(param->key, "local-ipver")){
+ audio->useIPv6 = tsk_striequals(param->value, "ipv6");
+ }
+ }
+ else if(param->value_type == tmedia_pvt_pobject){
+ if(tsk_striequals(param->key, "natt-ctx")){
+ TSK_OBJECT_SAFE_FREE(audio->natt_ctx);
+ audio->natt_ctx = tsk_object_ref(param->value);
+ }
+ }
+ }
+
+ return ret;
+}
+
+int tdav_session_audio_prepare(tmedia_session_t* self)
+{
+ tdav_session_audio_t* audio;
+ int ret = 0;
+
+ audio = (tdav_session_audio_t*)self;
+
+ /* set local port */
+ if(!audio->rtp_manager){
+ if((audio->rtp_manager = trtp_manager_create(audio->rtcp_enabled, audio->local_ip, audio->useIPv6))){
+
+ ret = trtp_manager_set_rtp_callback(audio->rtp_manager, tdav_session_audio_rtp_cb, audio);
+ ret = trtp_manager_prepare(audio->rtp_manager);
+ if(audio->natt_ctx){
+ ret = trtp_manager_set_natt_ctx(audio->rtp_manager, audio->natt_ctx);
+ }
+ }
+ }
+
+ /* Consumer will be prepared in tdav_session_audio_start() */
+ /* Producer will be prepared in tdav_session_audio_start() */
+
+ return ret;
+}
+
+int tdav_session_audio_start(tmedia_session_t* self)
+{
+ tdav_session_audio_t* audio;
+ const tmedia_codec_t* codec;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ audio = (tdav_session_audio_t*)self;
+
+ if(!(codec = _tdav_first_best_neg_codec(audio))){
+ TSK_DEBUG_ERROR("No codec matched");
+ return -2;
+ }
+
+ if(audio->rtp_manager){
+ int ret;
+ /* RTP/RTCP manager: use latest information. */
+ ret = trtp_manager_set_rtp_remote(audio->rtp_manager, audio->remote_ip, audio->remote_port);
+ //trtp_manager_set_payload_type(audio->rtp_manager, codec->neg_format ? atoi(codec->neg_format) : atoi(codec->format));
+ ret = trtp_manager_start(audio->rtp_manager);
+
+ /* Consumer */
+ if(audio->consumer){
+ tmedia_consumer_prepare(audio->consumer, codec);
+ tmedia_consumer_start(audio->consumer);
+ }
+ /* Producer */
+ if(audio->producer){
+ tmedia_producer_prepare(audio->producer, codec);
+ tmedia_producer_start(audio->producer);
+ }
+ /* Denoise (AEC, Noise Suppression, AGC) */
+ if(audio->denoise && audio->encoder.codec){
+ tmedia_denoise_open(audio->denoise, TMEDIA_CODEC_PCM_FRAME_SIZE(audio->encoder.codec), TMEDIA_CODEC_RATE(audio->encoder.codec), tsk_true, 8000.0f, tsk_true, tsk_true);
+ }
+
+ /* for test */
+ //trtp_manager_send_rtp(audio->rtp_manager, "test", 4, tsk_true);
+ return ret;
+ }
+ else{
+ TSK_DEBUG_ERROR("Invalid RTP/RTCP manager");
+ return -3;
+ }
+
+ return 0;
+}
+
+int tdav_session_audio_stop(tmedia_session_t* self)
+{
+ tdav_session_audio_t* audio;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ audio = (tdav_session_audio_t*)self;
+
+ /* RTP/RTCP manager */
+ if(audio->rtp_manager){
+ trtp_manager_stop(audio->rtp_manager);
+ }
+
+ /* Consumer */
+ if(audio->consumer){
+ tmedia_consumer_stop(audio->consumer);
+ }
+ /* Producer */
+ if(audio->producer){
+ tmedia_producer_stop(audio->producer);
+ }
+
+ return 0;
+}
+
+int tdav_session_audio_send_dtmf(tmedia_session_t* self, uint8_t event)
+{
+ tdav_session_audio_t* audio;
+ tmedia_codec_t* codec;
+ int ret, rate = 8000, ptime = 20;
+ uint16_t duration;
+ tdav_session_audio_dtmfe_t *dtmfe, *copy;
+ static uint32_t timestamp = 0x3200;
+ static uint32_t seq_num = 0;
+ int format = 101;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ audio = (tdav_session_audio_t*)self;
+
+ // Find the DTMF codec to use to use the RTP payload
+ if((codec = tmedia_codec_find_by_format(TMEDIA_SESSION(audio)->codecs, TMEDIA_CODEC_FORMAT_DTMF))){
+ rate = (int)codec->plugin->rate;
+ format = atoi(codec->neg_format ? codec->neg_format : codec->format);
+ TSK_OBJECT_SAFE_FREE(codec);
+ }
+
+ /* do we have an RTP manager? */
+ if(!audio->rtp_manager){
+ TSK_DEBUG_ERROR("No RTP manager associated to this session");
+ return -2;
+ }
+
+ /* Create Events list */
+ if(!audio->dtmf_events){
+ audio->dtmf_events = tsk_list_create();
+ }
+
+ /* Create global reference to the timer manager */
+ if(!audio->timer.created){
+ if((ret = tsk_timer_mgr_global_ref())){
+ TSK_DEBUG_ERROR("Failed to create Global Timer Manager");
+ return ret;
+ }
+ audio->timer.created = tsk_true;
+ }
+
+ /* Start the timer manager */
+ if(!audio->timer.started){
+ if((ret = tsk_timer_mgr_global_start())){
+ TSK_DEBUG_ERROR("Failed to start Global Timer Manager");
+ return ret;
+ }
+ audio->timer.started = tsk_true;
+ }
+
+
+ /* RFC 4733 - 5. Examples
+
+ +-------+-----------+------+--------+------+--------+--------+------+
+ | Time | Event | M | Time- | Seq | Event | Dura- | E |
+ | (ms) | | bit | stamp | No | Code | tion | bit |
+ +-------+-----------+------+--------+------+--------+--------+------+
+ | 0 | "9" | | | | | | |
+ | | starts | | | | | | |
+ | 50 | RTP | "1" | 0 | 1 | 9 | 400 | "0" |
+ | | packet 1 | | | | | | |
+ | | sent | | | | | | |
+ | 100 | RTP | "0" | 0 | 2 | 9 | 800 | "0" |
+ | | packet 2 | | | | | | |
+ | | sent | | | | | | |
+ | 150 | RTP | "0" | 0 | 3 | 9 | 1200 | "0" |
+ | | packet 3 | | | | | | |
+ | | sent | | | | | | |
+ | 200 | RTP | "0" | 0 | 4 | 9 | 1600 | "0" |
+ | | packet 4 | | | | | | |
+ | | sent | | | | | | |
+ | 200 | "9" ends | | | | | | |
+ | 250 | RTP | "0" | 0 | 5 | 9 | 1600 | "1" |
+ | | packet 4 | | | | | | |
+ | | first | | | | | | |
+ | | retrans- | | | | | | |
+ | | mission | | | | | | |
+ | 300 | RTP | "0" | 0 | 6 | 9 | 1600 | "1" |
+ | | packet 4 | | | | | | |
+ | | second | | | | | | |
+ | | retrans- | | | | | | |
+ | | mission | | | | | | |
+ =====================================================================
+ | 880 | First "1" | | | | | | |
+ | | starts | | | | | | |
+ | 930 | RTP | "1" | 7040 | 7 | 1 | 400 | "0" |
+ | | packet 5 | | | | | | |
+ | | sent | | | | | | |
+ */
+
+ // ref()(thread safeness)
+ audio = tsk_object_ref(audio);
+
+ duration = (rate * ptime)/1000;
+ /* Not mandatory but elegant */
+ timestamp += duration;
+
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration*1, ++seq_num, timestamp, (uint8_t)format, tsk_true, tsk_false);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime*0, _tdav_session_audio_dtmfe_timercb, copy);
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration*2, ++seq_num, timestamp, (uint8_t)format, tsk_false, tsk_false);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime*1, _tdav_session_audio_dtmfe_timercb, copy);
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration*3, ++seq_num, timestamp, (uint8_t)format, tsk_false, tsk_false);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime*2, _tdav_session_audio_dtmfe_timercb, copy);
+
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration*4, ++seq_num, timestamp, (uint8_t)format, tsk_false, tsk_true);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime*3, _tdav_session_audio_dtmfe_timercb, copy);
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration*4, seq_num, timestamp, (uint8_t)format, tsk_false, tsk_true);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime*4, _tdav_session_audio_dtmfe_timercb, copy);
+ copy = dtmfe = _tdav_session_audio_dtmfe_create(audio, event, duration*4, seq_num, timestamp, (uint8_t)format, tsk_false, tsk_true);
+ tsk_list_push_back_data(audio->dtmf_events, (void**)&dtmfe);
+ tsk_timer_mgr_global_schedule(ptime*5, _tdav_session_audio_dtmfe_timercb, copy);
+
+ // unref()(thread safeness)
+ audio = tsk_object_unref(audio);
+
+ return 0;
+}
+
+int tdav_session_audio_pause(tmedia_session_t* self)
+{
+ tdav_session_audio_t* audio;
+
+ audio = (tdav_session_audio_t*)self;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* Consumer */
+ if(audio->consumer){
+ tmedia_consumer_pause(audio->consumer);
+ }
+ /* Producer */
+ if(audio->producer){
+ tmedia_producer_pause(audio->producer);
+ }
+
+ return 0;
+}
+
+const tsdp_header_M_t* tdav_session_audio_get_lo(tmedia_session_t* self)
+{
+ tdav_session_audio_t* audio;
+ tsk_bool_t changed = tsk_false;
+
+ if(!self || !self->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return tsk_null;
+ }
+
+ audio = (tdav_session_audio_t*)self;
+
+ if(!audio->rtp_manager || !audio->rtp_manager->transport){
+ TSK_DEBUG_ERROR("RTP/RTCP manager in invalid");
+ return tsk_null;
+ }
+
+ if(self->ro_changed && self->M.lo){
+ /* Codecs */
+ tsdp_header_A_removeAll_by_field(self->M.lo->Attributes, "fmtp");
+ tsdp_header_A_removeAll_by_field(self->M.lo->Attributes, "rtpmap");
+ tsk_list_clear_items(self->M.lo->FMTs);
+
+ /* QoS */
+ tsdp_header_A_removeAll_by_field(self->M.lo->Attributes, "curr");
+ tsdp_header_A_removeAll_by_field(self->M.lo->Attributes, "des");
+ tsdp_header_A_removeAll_by_field(self->M.lo->Attributes, "conf");
+ }
+
+ changed = (self->ro_changed || !self->M.lo);
+
+ if(!self->M.lo){
+ if((self->M.lo = tsdp_header_M_create(self->plugin->media, audio->rtp_manager->rtp.public_port, "RTP/AVP"))){
+ /* If NATT is active, do not rely on the global IP address Connection line */
+ if(audio->natt_ctx){
+ tsdp_header_M_add_headers(self->M.lo,
+ TSDP_HEADER_C_VA_ARGS("IN", audio->useIPv6 ? "IP6" : "IP4", audio->rtp_manager->rtp.public_ip),
+ tsk_null);
+ }
+ /* 3GPP TS 24.229 - 6.1.1 General
+ In order to support accurate bandwidth calculations, the UE may include the "a=ptime" attribute for all "audio" media
+ lines as described in RFC 4566 [39]. If a UE receives an "audio" media line with "a=ptime" specified, the UE should
+ transmit at the specified packetization rate. If a UE receives an "audio" media line which does not have "a=ptime"
+ specified or the UE does not support the "a=ptime" attribute, the UE should transmit at the default codec packetization
+ rate as defined in RFC 3551 [55A]. The UE will transmit consistent with the resources available from the network.
+
+ For "video" and "audio" media types that utilize the RTP/RTCP, the UE shall specify the proposed bandwidth for each
+ media stream utilizing the "b=" media descriptor and the "AS" bandwidth modifier in the SDP.
+
+ The UE shall include the MIME subtype "telephone-event" in the "m=" media descriptor in the SDP for audio media
+ flows that support both audio codec and DTMF payloads in RTP packets as described in RFC 4733 [23].
+ */
+ tsdp_header_M_add_headers(self->M.lo,
+ TSDP_HEADER_A_VA_ARGS("ptime", "20"),
+ tsk_null);
+ // the "telephone-event" fmt/rtpmap is added below
+ }
+ else{
+ TSK_DEBUG_ERROR("Failed to create lo");
+ return tsk_null;
+ }
+ }
+
+ /* from codecs to sdp */
+ if(changed){
+ tmedia_codecs_L_t* neg_codecs = tsk_null;
+
+ if(self->M.ro){
+ TSK_OBJECT_SAFE_FREE(self->neg_codecs);
+ /* update negociated codecs */
+ if((neg_codecs = tmedia_session_match_codec(self, self->M.ro))){
+ self->neg_codecs = neg_codecs;
+ TSK_OBJECT_SAFE_FREE(audio->encoder.codec);
+ }
+ /* from codecs to sdp */
+ if(TSK_LIST_IS_EMPTY(self->neg_codecs) || ((self->neg_codecs->tail == self->neg_codecs->head) && IS_DTMF_CODEC(TSK_LIST_FIRST_DATA(self->neg_codecs)))){
+ self->M.lo->port = 0; /* Keep the RTP transport and reuse it when we receive a reINVITE or UPDATE request */
+ goto DONE;
+ }
+ else{
+ tmedia_codec_to_sdp(self->neg_codecs, self->M.lo);
+ }
+ }
+ else{
+ /* from codecs to sdp */
+ tmedia_codec_to_sdp(self->codecs, self->M.lo);
+ }
+
+ /* Hold/Resume */
+ if(self->M.ro){
+ if(tsdp_header_M_is_held(self->M.ro, tsk_false)){
+ tsdp_header_M_hold(self->M.lo, tsk_false);
+ }
+ else{
+ tsdp_header_M_resume(self->M.lo, tsk_false);
+ }
+ }
+ ///* 3GPP TS 24.229 - 6.1.1 General
+ // The UE shall include the MIME subtype "telephone-event" in the "m=" media descriptor in the SDP for audio media
+ // flows that support both audio codec and DTMF payloads in RTP packets as described in RFC 4733 [23].
+ //*/
+ //tsdp_header_M_add_fmt(self->M.lo, TMEDIA_CODEC_FORMAT_DTMF);
+ //tsdp_header_M_add_headers(self->M.lo,
+ // TSDP_HEADER_A_VA_ARGS("fmtp", TMEDIA_CODEC_FORMAT_DTMF" 0-15"),
+ // tsk_null);
+ //tsdp_header_M_add_headers(self->M.lo,
+ // TSDP_HEADER_A_VA_ARGS("rtpmap", TMEDIA_CODEC_FORMAT_DTMF" telephone-event/8000"),
+ // tsk_null);
+ /* QoS */
+ if(self->qos){
+ tmedia_qos_tline_t* ro_tline;
+ if(self->M.ro && (ro_tline = tmedia_qos_tline_from_sdp(self->M.ro))){
+ tmedia_qos_tline_set_ro(self->qos, ro_tline);
+ TSK_OBJECT_SAFE_FREE(ro_tline);
+ }
+ tmedia_qos_tline_to_sdp(self->qos, self->M.lo);
+ }
+DONE:;
+ }
+
+ return self->M.lo;
+}
+
+int tdav_session_audio_set_ro(tmedia_session_t* self, const tsdp_header_M_t* m)
+{
+ tdav_session_audio_t* audio;
+ tmedia_codecs_L_t* neg_codecs;
+
+ if(!self || !m){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ audio = (tdav_session_audio_t*)self;
+
+ /* update remote offer */
+ TSK_OBJECT_SAFE_FREE(self->M.ro);
+ self->M.ro = tsk_object_ref((void*)m);
+
+ if(self->M.lo){
+ if((neg_codecs = tmedia_session_match_codec(self, m))){
+ /* update negociated codecs */
+ TSK_OBJECT_SAFE_FREE(self->neg_codecs);
+ self->neg_codecs = neg_codecs;
+ TSK_OBJECT_SAFE_FREE(audio->encoder.codec);
+ }
+ else{
+ TSK_DEBUG_ERROR("None Match");
+ return -1;
+ }
+ /* QoS */
+ if(self->qos){
+ tmedia_qos_tline_t* ro_tline;
+ if(self->M.ro && (ro_tline = tmedia_qos_tline_from_sdp(self->M.ro))){
+ tmedia_qos_tline_set_ro(self->qos, ro_tline);
+ TSK_OBJECT_SAFE_FREE(ro_tline);
+ }
+ }
+ }
+
+ /* get connection associated to this media line
+ * If the connnection is global, then the manager will call tdav_session_audio_set() */
+ if(m->C && m->C->addr){
+ tsk_strupdate(&audio->remote_ip, m->C->addr);
+ audio->useIPv6 = tsk_striequals(m->C->addrtype, "IP6");
+ }
+ /* set remote port */
+ audio->remote_port = m->port;
+
+
+ return 0;
+}
+
+/* first best negotiated codec (ignore dtmf) */
+const tmedia_codec_t* _tdav_first_best_neg_codec(const tdav_session_audio_t* session)
+{
+ const tsk_list_item_t* item;
+ tsk_list_foreach(item, TMEDIA_SESSION(session)->neg_codecs){
+ if(!IS_DTMF_CODEC(item->data)){
+ return TMEDIA_CODEC(item->data);
+ }
+ }
+ return tsk_null;
+}
+
+
+/* Internal function used to create new DTMF event */
+tdav_session_audio_dtmfe_t* _tdav_session_audio_dtmfe_create(const tdav_session_audio_t* session, uint8_t event, uint16_t duration, uint32_t seq, uint32_t timestamp, uint8_t format, tsk_bool_t M, tsk_bool_t E)
+{
+ tdav_session_audio_dtmfe_t* dtmfe;
+ static uint8_t volume = 10;
+ static uint32_t ssrc = 0x5234A8;
+
+ uint8_t pay[4] = {0};
+
+ /* RFC 4733 - 2.3. Payload Format
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | event |E|R| volume | duration |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+ if(!(dtmfe = tsk_object_new(tdav_session_audio_dtmfe_def_t))){
+ TSK_DEBUG_ERROR("Failed to create new DTMF event");
+ return tsk_null;
+ }
+ dtmfe->session = session;
+
+ if(!(dtmfe->packet = trtp_rtp_packet_create((session && session->rtp_manager) ? session->rtp_manager->rtp.ssrc : ssrc, seq, timestamp, format, M))){
+ TSK_DEBUG_ERROR("Failed to create DTMF RTP packet");
+ TSK_OBJECT_SAFE_FREE(dtmfe);
+ return tsk_null;
+ }
+
+ pay[0] = event;
+ pay[1] |= ((E << 7) | (volume & 0x3F));
+ pay[2] = (duration >> 8);
+ pay[3] = (duration & 0xFF);
+
+ /* set data */
+ if((dtmfe->packet->payload.data = tsk_calloc(sizeof(pay), sizeof(uint8_t)))){
+ memcpy(dtmfe->packet->payload.data, pay, sizeof(pay));
+ dtmfe->packet->payload.size = sizeof(pay);
+ }
+
+ return dtmfe;
+}
+
+int _tdav_session_audio_dtmfe_timercb(const void* arg, tsk_timer_id_t timer_id)
+{
+ tdav_session_audio_dtmfe_t* dtmfe = (tdav_session_audio_dtmfe_t*)arg;
+ int ret;
+
+ if(!dtmfe || !dtmfe->session){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ /* Send the data */
+ TSK_DEBUG_INFO("Sending DTMF event");
+ ret = trtp_manager_send_rtp_2(dtmfe->session->rtp_manager, dtmfe->packet);
+
+ /* Remove and delete the event from the queue */
+ tsk_list_remove_item_by_data(dtmfe->session->dtmf_events, dtmfe);
+
+ return ret;
+}
+
+//=================================================================================================
+// Session Audio Plugin object definition
+//
+/* constructor */
+static tsk_object_t* tdav_session_audio_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_session_audio_t *session = self;
+ if(session){
+ /* init base: called by tmedia_session_create() */
+ /* init self */
+ tsk_safeobj_init(session);
+ if(!(session->consumer = tmedia_consumer_create(tdav_session_audio_plugin_def_t->type, TMEDIA_SESSION(session)->id))){
+ TSK_DEBUG_ERROR("Failed to create Audio consumer");
+ }
+ if((session->producer = tmedia_producer_create(tdav_session_audio_plugin_def_t->type, TMEDIA_SESSION(session)->id))){
+ tmedia_producer_set_enc_callback(session->producer, tdav_session_audio_producer_enc_cb, self);
+ }
+ else{
+ TSK_DEBUG_ERROR("Failed to create Audio producer");
+ }
+ if(!(session->denoise = tmedia_denoise_create())){
+ TSK_DEBUG_WARN("No Audio denoiser found");
+ }
+ else if(session->consumer){// IMPORTANT: This means that the consumer must be child of "tdav_consumer_audio_t" object.
+ tdav_consumer_audio_set_denoise(TDAV_CONSUMER_AUDIO(session->consumer), session->denoise);
+ }
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_session_audio_dtor(tsk_object_t * self)
+{
+ tdav_session_audio_t *session = self;
+ if(session){
+
+ // Do it in this order (deinit self first)
+
+ /* Timer manager */
+ if(session->timer.started){
+ if(session->dtmf_events){
+ /* Cancel all events */
+ tsk_list_item_t* item;
+ tsk_list_foreach(item, session->dtmf_events){
+ tsk_timer_mgr_global_cancel(((tdav_session_audio_dtmfe_t*)item->data)->timer_id);
+ }
+ }
+ tsk_timer_mgr_global_stop();
+ }
+ if(session->timer.created){
+ tsk_timer_mgr_global_unref();
+ }
+ /* CleanUp the DTMF events */
+ TSK_OBJECT_SAFE_FREE(session->dtmf_events);
+
+ /* deinit self (rtp manager should be destroyed after the producer) */
+ TSK_OBJECT_SAFE_FREE(session->consumer);
+ TSK_OBJECT_SAFE_FREE(session->producer);
+ TSK_OBJECT_SAFE_FREE(session->rtp_manager);
+ TSK_FREE(session->remote_ip);
+ TSK_FREE(session->local_ip);
+ TSK_OBJECT_SAFE_FREE(session->denoise);
+
+ TSK_OBJECT_SAFE_FREE(session->encoder.codec);
+ TSK_FREE(session->encoder.buffer);
+ TSK_FREE(session->decoder.buffer);
+
+ /* NAT Traversal context */
+ TSK_OBJECT_SAFE_FREE(session->natt_ctx);
+
+ tsk_safeobj_deinit(session);
+
+ /* deinit base */
+ tmedia_session_deinit(self);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_session_audio_def_s =
+{
+ sizeof(tdav_session_audio_t),
+ tdav_session_audio_ctor,
+ tdav_session_audio_dtor,
+ tmedia_session_cmp,
+};
+/* plugin definition*/
+static const tmedia_session_plugin_def_t tdav_session_audio_plugin_def_s =
+{
+ &tdav_session_audio_def_s,
+
+ tmedia_audio,
+ "audio",
+
+ tdav_session_audio_set,
+ tdav_session_audio_prepare,
+ tdav_session_audio_start,
+ tdav_session_audio_pause,
+ tdav_session_audio_stop,
+
+ /* Audio part */
+ {
+ tdav_session_audio_send_dtmf
+ },
+
+ tdav_session_audio_get_lo,
+ tdav_session_audio_set_ro
+};
+const tmedia_session_plugin_def_t *tdav_session_audio_plugin_def_t = &tdav_session_audio_plugin_def_s;
+
+
+
+//=================================================================================================
+// DTMF event object definition
+//
+static tsk_object_t* tdav_session_audio_dtmfe_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_session_audio_dtmfe_t *event = self;
+ if(event){
+ event->timer_id = TSK_INVALID_TIMER_ID;
+ }
+ return self;
+}
+
+static tsk_object_t* tdav_session_audio_dtmfe_dtor(tsk_object_t * self)
+{
+ tdav_session_audio_dtmfe_t *event = self;
+ if(event){
+ TSK_OBJECT_SAFE_FREE(event->packet);
+ }
+
+ return self;
+}
+
+static int tdav_session_audio_dtmfe_cmp(const tsk_object_t *_e1, const tsk_object_t *_e2)
+{
+ const tdav_session_audio_dtmfe_t *e1 = _e1;
+ const tdav_session_audio_dtmfe_t *e2 = _e2;
+
+ return (e1 - e2);
+}
+
+static const tsk_object_def_t tdav_session_audio_dtmfe_def_s =
+{
+ sizeof(tdav_session_audio_dtmfe_t),
+ tdav_session_audio_dtmfe_ctor,
+ tdav_session_audio_dtmfe_dtor,
+ tdav_session_audio_dtmfe_cmp,
+};
+const tsk_object_def_t *tdav_session_audio_dtmfe_def_t = &tdav_session_audio_dtmfe_def_s;
diff --git a/tinyDAV/src/audio/tdav_speex_denoise.c b/tinyDAV/src/audio/tdav_speex_denoise.c
new file mode 100644
index 0000000..fd8c5df
--- /dev/null
+++ b/tinyDAV/src/audio/tdav_speex_denoise.c
@@ -0,0 +1,221 @@
+/*
+* Copyright (C) 2009-2010 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_speex_denoise.c
+ * @brief Speex Denoiser (Noise suppression, AGC, AEC) Plugin
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * @date Created: Sat Nov 8 16:54:58 2009 mdiop
+ */
+#include "tinydav/audio/tdav_speex_denoise.h"
+
+#if HAVE_SPEEX_DSP
+
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define ECHO_TAIL 10
+
+int tdav_speex_denoise_open(tmedia_denoise_t* self, uint32_t frame_size, uint32_t sampling_rate, tsk_bool_t denoise, float agc_level, tsk_bool_t aec, tsk_bool_t vad)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+ float f;
+ int i;
+
+ if(!denoiser->echo_state){
+ if((denoiser->echo_state = speex_echo_state_init(frame_size, ECHO_TAIL*frame_size))){
+ speex_echo_ctl(denoiser->echo_state, SPEEX_ECHO_SET_SAMPLING_RATE, &sampling_rate);
+ }
+ }
+
+ if(!denoiser->preprocess_state){
+ denoiser->vad_on = vad;
+ denoiser->frame_size = frame_size;
+
+ if((denoiser->preprocess_state = speex_preprocess_state_init(frame_size, sampling_rate))){
+
+ if(denoiser->echo_state){
+ speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_ECHO_STATE, denoiser->echo_state);
+
+ i = -40;
+ speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_ECHO_SUPPRESS, &i);
+ i = -15;
+ speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_ECHO_SUPPRESS_ACTIVE, &i);
+
+ TSK_FREE(denoiser->echo_output_frame);
+ denoiser->echo_output_frame = tsk_calloc(denoiser->frame_size, sizeof(spx_int16_t));
+ }
+
+ if(denoise){
+ i = 1;
+ speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ i = -30;
+ speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &i);
+ }
+ else{
+ i = 0;
+ speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_DENOISE, &i);
+ }
+
+ if(agc_level){
+ i = 1;
+ speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_AGC, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_AGC_LEVEL, &agc_level);
+ //speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_AGC_TARGET, &agc_level);
+ i = 30;
+ speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_AGC_MAX_GAIN, &i);
+ i = 12;
+ speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &i);
+ i = -40;
+ speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_AGC_DECREMENT, &i);
+ }
+ else{
+ i = 0, f = 8000.0f;
+ speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_AGC, &i);
+ speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_AGC_LEVEL, &f);
+ }
+ i = vad ? 1 : 2;
+ speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_VAD, &i);
+ //i=1;
+ //speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_DEREVERB, &i);
+ //i=1;
+ //speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_DEREVERB_DECAY, &i);
+ //i=1;
+ //speex_preprocess_ctl(denoiser->preprocess_state, SPEEX_PREPROCESS_SET_DEREVERB_LEVEL, &i);
+
+ return 0;
+ }
+ else{
+ TSK_DEBUG_ERROR("Failed to create Speex preprocessor state");
+ return -2;
+ }
+ }
+
+ return 0;
+}
+
+int tdav_speex_denoise_echo_playback(tmedia_denoise_t* self, const void* echo_frame)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+ if(denoiser->echo_state){
+ speex_echo_playback(denoiser->echo_state, echo_frame);
+ }
+ return 0;
+}
+
+int tdav_speex_denoise_process(tmedia_denoise_t* self, void* audio_frame, tsk_bool_t* silence_or_noise)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+ int vad;
+
+ if(denoiser->preprocess_state){
+ if(denoiser->echo_state && denoiser->echo_output_frame){
+ speex_echo_capture(denoiser->echo_state, audio_frame, denoiser->echo_output_frame);
+ memcpy(audio_frame, denoiser->echo_output_frame, denoiser->frame_size*sizeof(spx_int16_t));
+ }
+ vad = speex_preprocess_run(denoiser->preprocess_state, audio_frame);
+ if(!vad && denoiser->vad_on){
+ *silence_or_noise = tsk_true;
+ }
+ }
+
+ return 0;
+}
+
+int tdav_speex_denoise_close(tmedia_denoise_t* self)
+{
+ tdav_speex_denoise_t *denoiser = (tdav_speex_denoise_t *)self;
+
+ if(denoiser->preprocess_state){
+ speex_preprocess_state_destroy(denoiser->preprocess_state);
+ denoiser->preprocess_state = tsk_null;
+ }
+ if(denoiser->echo_state){
+ speex_echo_state_destroy(denoiser->echo_state);
+ denoiser->echo_state = tsk_null;
+ }
+ TSK_FREE(denoiser->echo_output_frame);
+
+ return 0;
+}
+
+
+
+//
+// Speex denoiser Plugin definition
+//
+
+/* constructor */
+static tsk_object_t* tdav_speex_denoise_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_speex_denoise_t *denoise = self;
+ if(denoise){
+ /* init base */
+ tmedia_denoise_init(TMEDIA_DENOISE(denoise));
+ /* init self */
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_speex_denoise_dtor(tsk_object_t * self)
+{
+ tdav_speex_denoise_t *denoise = self;
+ if(denoise){
+ /* deinit base */
+ tmedia_denoise_deinit(TMEDIA_DENOISE(denoise));
+ /* deinit self */
+ if(denoise->preprocess_state){ // already done by close() ...but who know?
+ speex_preprocess_state_destroy(denoise->preprocess_state);
+ }
+ if(denoise->echo_state){
+ speex_echo_state_destroy(denoise->echo_state);
+ }
+ TSK_FREE(denoise->echo_output_frame);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_speex_denoise_def_s =
+{
+ sizeof(tdav_speex_denoise_t),
+ tdav_speex_denoise_ctor,
+ tdav_speex_denoise_dtor,
+ tsk_null,
+};
+/* plugin definition*/
+static const tmedia_denoise_plugin_def_t tdav_speex_denoise_plugin_def_s =
+{
+ &tdav_speex_denoise_def_s,
+
+ "Audio Denoiser based on Speex",
+
+ tdav_speex_denoise_open,
+ tdav_speex_denoise_echo_playback,
+ tdav_speex_denoise_process,
+ tdav_speex_denoise_close,
+};
+const tmedia_denoise_plugin_def_t *tdav_speex_denoise_plugin_def_t = &tdav_speex_denoise_plugin_def_s;
+
+
+#endif /* HAVE_SPEEX_DSP */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/waveapi/tdav_consumer_waveapi.c b/tinyDAV/src/audio/waveapi/tdav_consumer_waveapi.c
new file mode 100644
index 0000000..21169bc
--- /dev/null
+++ b/tinyDAV/src/audio/waveapi/tdav_consumer_waveapi.c
@@ -0,0 +1,406 @@
+/*
+* Copyright (C) 2009-2010 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_consumer_waveapi.c
+ * @brief Audio Consumer for Win32 and WinCE platforms.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * @date Created: Sat Nov 8 16:54:58 2009 mdiop
+ */
+#include "tinydav/audio/waveapi/tdav_consumer_waveapi.h"
+
+#if HAVE_WAVE_API
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_WAVEAPI_CONSUMER_ERROR_BUFF_COUNT 0xFF
+
+#define tdav_consumer_waveapi_set tsk_null
+
+static void print_last_error(MMRESULT mmrError, const char* func)
+{
+ static char buffer_err[TDAV_WAVEAPI_CONSUMER_ERROR_BUFF_COUNT];
+
+ waveOutGetErrorTextA(mmrError, buffer_err, sizeof(buffer_err));
+ TSK_DEBUG_ERROR("%s() error: %s", func, buffer_err);
+}
+
+static int free_wavehdr(tdav_consumer_waveapi_t* consumer, tsk_size_t index)
+{
+ if(!consumer || index >= sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_FREE(consumer->hWaveHeaders[index]->lpData);
+ TSK_FREE(consumer->hWaveHeaders[index]);
+
+ return 0;
+}
+
+static int create_wavehdr(tdav_consumer_waveapi_t* consumer, tsk_size_t index)
+{
+ if(!consumer || index >= sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(consumer->hWaveHeaders[index]){
+ free_wavehdr(consumer, index);
+ }
+
+ consumer->hWaveHeaders[index] = tsk_calloc(1, sizeof(WAVEHDR));
+ consumer->hWaveHeaders[index]->lpData = tsk_calloc(1, consumer->bytes_per_notif);
+ consumer->hWaveHeaders[index]->dwBufferLength = consumer->bytes_per_notif;
+ consumer->hWaveHeaders[index]->dwFlags = WHDR_BEGINLOOP | WHDR_ENDLOOP;
+ consumer->hWaveHeaders[index]->dwLoops = 0x01;
+ consumer->hWaveHeaders[index]->dwUser = index;
+
+ return 0;
+}
+
+static int write_wavehdr(tdav_consumer_waveapi_t* consumer, tsk_size_t index)
+{
+ MMRESULT result;
+
+ if(!consumer || !consumer->hWaveHeaders[index] || !consumer->hWaveOut){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ result = waveOutPrepareHeader(consumer->hWaveOut, consumer->hWaveHeaders[index], sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutPrepareHeader");
+ return -2;
+ }
+
+ result = waveOutWrite(consumer->hWaveOut, consumer->hWaveHeaders[index], sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutWrite");
+ return -3;
+ }
+
+ return 0;
+}
+
+static int play_wavehdr(tdav_consumer_waveapi_t* consumer, LPWAVEHDR lpHdr)
+{
+ MMRESULT result;
+ void* data;
+
+ if(!consumer || !lpHdr || !consumer->hWaveOut){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ result = waveOutUnprepareHeader(consumer->hWaveOut, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutUnprepareHeader");
+ return -2;
+ }
+
+ //
+ //
+ // Fill lpHdr->Data with decoded data
+ //
+ //
+ if((data = tdav_consumer_audio_get(TDAV_CONSUMER_AUDIO(consumer)))){
+ memcpy(lpHdr->lpData, data, lpHdr->dwBufferLength);
+ TSK_FREE(data);
+ }
+ else{
+ /* Put silence */
+ memset(lpHdr->lpData, 0, lpHdr->dwBufferLength);
+ }
+
+ if(!consumer->started){
+ return 0;
+ }
+
+ result = waveOutPrepareHeader(consumer->hWaveOut, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutPrepareHeader");
+ return -3;
+ }
+
+ result = waveOutWrite(consumer->hWaveOut, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutWrite");
+ return -4;
+ }
+
+ return 0;
+}
+
+static void *__playback_thread(void *param)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)param;
+ DWORD dwEvent;
+ tsk_size_t i;
+
+ TSK_DEBUG_INFO("__playback_thread -- START");
+
+ SetPriorityClass(GetCurrentThread(), REALTIME_PRIORITY_CLASS);
+
+ for(;;){
+ dwEvent = WaitForMultipleObjects(2, consumer->events, FALSE, INFINITE);
+
+ if (dwEvent == 1){
+ break;
+ }
+
+ else if (dwEvent == 0){
+ EnterCriticalSection(&consumer->cs);
+ for(i = 0; i< sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ if(consumer->hWaveHeaders[i] && (consumer->hWaveHeaders[i]->dwFlags & WHDR_DONE)){
+ play_wavehdr(consumer, consumer->hWaveHeaders[i]);
+ }
+ }
+ LeaveCriticalSection(&consumer->cs);
+ }
+ }
+
+ TSK_DEBUG_INFO("__playback_thread -- STOP");
+
+
+ return tsk_null;
+}
+
+
+
+
+
+
+
+
+/* ============ Media Consumer Interface ================= */
+int tdav_consumer_waveapi_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+ tsk_size_t i;
+
+ if(!consumer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TDAV_CONSUMER_AUDIO(consumer)->channels = codec->plugin->audio.channels;
+ TDAV_CONSUMER_AUDIO(consumer)->rate = codec->plugin->rate;
+ /* codec should have ptime */
+
+
+ /* Format */
+ ZeroMemory(&consumer->wfx, sizeof(WAVEFORMATEX));
+ consumer->wfx.wFormatTag = WAVE_FORMAT_PCM;
+ consumer->wfx.nChannels = TDAV_CONSUMER_AUDIO(consumer)->channels;
+ consumer->wfx.nSamplesPerSec = TDAV_CONSUMER_AUDIO(consumer)->rate;
+ consumer->wfx.wBitsPerSample = TDAV_CONSUMER_AUDIO(consumer)->bits_per_sample;
+ consumer->wfx.nBlockAlign = (consumer->wfx.nChannels * consumer->wfx.wBitsPerSample/8);
+ consumer->wfx.nAvgBytesPerSec = (consumer->wfx.nSamplesPerSec * consumer->wfx.nBlockAlign);
+
+ /* Average bytes (count) for each notification */
+ consumer->bytes_per_notif = ((consumer->wfx.nAvgBytesPerSec * TDAV_CONSUMER_AUDIO(consumer)->ptime)/1000);
+
+ /* create buffers */
+ for(i = 0; i< sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ create_wavehdr(consumer, i);
+ }
+
+ return 0;
+}
+
+int tdav_consumer_waveapi_start(tmedia_consumer_t* self)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+ MMRESULT result;
+ tsk_size_t i;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(consumer->started || consumer->hWaveOut){
+ TSK_DEBUG_WARN("Consumer already started");
+ return 0;
+ }
+
+ /* create events */
+ if(!consumer->events[0]){
+ consumer->events[0] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ }
+ if(!consumer->events[1]){
+ consumer->events[1] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ }
+
+ /* open */
+ result = waveOutOpen((HWAVEOUT *)&consumer->hWaveOut, WAVE_MAPPER, &consumer->wfx, (DWORD)consumer->events[0], (DWORD_PTR)consumer, CALLBACK_EVENT);
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveOutOpen");
+ return -2;
+ }
+
+ /* start thread */
+ tsk_thread_create(&consumer->tid[0], __playback_thread, consumer);
+
+ /* write */
+ for(i = 0; i< sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ write_wavehdr(consumer, i);
+ }
+
+ consumer->started = tsk_true;
+
+ return 0;
+}
+
+int tdav_consumer_waveapi_consume(tmedia_consumer_t* self, void** buffer, tsk_size_t size, const tsk_object_t* proto_hdr)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+
+ if(!consumer || !buffer || !*buffer || !size){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+ /* buffer is already decoded */
+ return tdav_consumer_audio_put(TDAV_CONSUMER_AUDIO(consumer), buffer, size, proto_hdr);
+}
+
+int tdav_consumer_waveapi_pause(tmedia_consumer_t* self)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+
+ if(!consumer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+int tdav_consumer_waveapi_stop(tmedia_consumer_t* self)
+{
+ tdav_consumer_waveapi_t* consumer = (tdav_consumer_waveapi_t*)self;
+ MMRESULT result;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!consumer->started){
+ TSK_DEBUG_WARN("Consumer not started");
+ return 0;
+ }
+
+ /* stop thread */
+ if(consumer->tid[0]){
+ SetEvent(consumer->events[1]);
+ tsk_thread_join(&(consumer->tid[0]));
+ }
+
+ /* should be done here */
+ consumer->started = tsk_false;
+
+ if(consumer->hWaveOut && ((result = waveOutReset(consumer->hWaveOut)) != MMSYSERR_NOERROR)){
+ print_last_error(result, "waveOutReset");
+ }
+
+ return 0;
+}
+
+
+//
+// WaveAPI consumer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_consumer_waveapi_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_consumer_waveapi_t *consumer = self;
+ if(consumer){
+ /* init base */
+ tdav_consumer_audio_init(TDAV_CONSUMER_AUDIO(consumer));
+ /* init self */
+ InitializeCriticalSection(&consumer->cs);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_consumer_waveapi_dtor(tsk_object_t * self)
+{
+ tdav_consumer_waveapi_t *consumer = self;
+ if(consumer){
+ tsk_size_t i;
+
+ /* stop */
+ if(consumer->started){
+ tdav_consumer_waveapi_stop(self);
+ }
+
+ /* deinit base */
+ tdav_consumer_audio_deinit(TDAV_CONSUMER_AUDIO(consumer));
+ /* deinit self */
+ for(i = 0; i< sizeof(consumer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ free_wavehdr(consumer, i);
+ }
+ if(consumer->hWaveOut){
+ waveOutClose(consumer->hWaveOut);
+ }
+ if(consumer->events[0]){
+ CloseHandle(consumer->events[0]);
+ }
+ if(consumer->events[1]){
+ CloseHandle(consumer->events[1]);
+ }
+ DeleteCriticalSection(&consumer->cs);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_consumer_waveapi_def_s =
+{
+ sizeof(tdav_consumer_waveapi_t),
+ tdav_consumer_waveapi_ctor,
+ tdav_consumer_waveapi_dtor,
+ tdav_consumer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_consumer_plugin_def_t tdav_consumer_waveapi_plugin_def_s =
+{
+ &tdav_consumer_waveapi_def_s,
+
+ tmedia_audio,
+ "Microsoft WaveAPI consumer",
+
+ tdav_consumer_waveapi_set,
+ tdav_consumer_waveapi_prepare,
+ tdav_consumer_waveapi_start,
+ tdav_consumer_waveapi_consume,
+ tdav_consumer_waveapi_pause,
+ tdav_consumer_waveapi_stop
+};
+const tmedia_consumer_plugin_def_t *tdav_consumer_waveapi_plugin_def_t = &tdav_consumer_waveapi_plugin_def_s;
+
+#endif /* HAVE_WAVE_API */ \ No newline at end of file
diff --git a/tinyDAV/src/audio/waveapi/tdav_producer_waveapi.c b/tinyDAV/src/audio/waveapi/tdav_producer_waveapi.c
new file mode 100644
index 0000000..88fed78
--- /dev/null
+++ b/tinyDAV/src/audio/waveapi/tdav_producer_waveapi.c
@@ -0,0 +1,393 @@
+/*
+* Copyright (C) 2009-2010 Mamadou Diop.
+*
+* Contact: Mamadou Diop <diopmamadou(at)doubango.org>
+*
+* This file is part of Open Source Doubango Framework.
+*
+* DOUBANGO is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* DOUBANGO is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with DOUBANGO.
+*
+*/
+
+/**@file tdav_producer_waveapi.c
+ * @brief Audio Producer for Win32 and WinCE platforms.
+ *
+ * @author Mamadou Diop <diopmamadou(at)doubango.org>
+ *
+ * @date Created: Sat Nov 8 16:54:58 2009 mdiop
+ */
+#include "tinydav/audio/waveapi/tdav_producer_waveapi.h"
+
+#if HAVE_WAVE_API
+
+#include "tsk_thread.h"
+#include "tsk_memory.h"
+#include "tsk_debug.h"
+
+#define TDAV_WAVEAPI_PRODUCER_ERROR_BUFF_COUNT 0xFF
+
+#define tdav_producer_waveapi_set tsk_null
+
+static void print_last_error(MMRESULT mmrError, const char* func)
+{
+ static char buffer_err[TDAV_WAVEAPI_PRODUCER_ERROR_BUFF_COUNT];
+
+ waveInGetErrorTextA(mmrError, buffer_err, sizeof(buffer_err));
+ TSK_DEBUG_ERROR("%s() error: %s", func, buffer_err);
+}
+
+static int free_wavehdr(tdav_producer_waveapi_t* producer, tsk_size_t index)
+{
+ if(!producer || index >= sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TSK_FREE(producer->hWaveHeaders[index]->lpData);
+ TSK_FREE(producer->hWaveHeaders[index]);
+
+ return 0;
+}
+
+static int create_wavehdr(tdav_producer_waveapi_t* producer, tsk_size_t index)
+{
+ if(!producer || index >= sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR)){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(producer->hWaveHeaders[index]){
+ free_wavehdr(producer, index);
+ }
+
+ producer->hWaveHeaders[index] = tsk_calloc(1, sizeof(WAVEHDR));
+ producer->hWaveHeaders[index]->lpData = tsk_calloc(1, producer->bytes_per_notif);
+ producer->hWaveHeaders[index]->dwBufferLength = producer->bytes_per_notif;
+ producer->hWaveHeaders[index]->dwFlags = WHDR_BEGINLOOP | WHDR_ENDLOOP;
+ producer->hWaveHeaders[index]->dwLoops = 0x01;
+ producer->hWaveHeaders[index]->dwUser = index;
+
+ return 0;
+}
+
+static int add_wavehdr(tdav_producer_waveapi_t* producer, tsk_size_t index)
+{
+ MMRESULT result;
+
+ if(!producer || !producer->hWaveHeaders[index] || !producer->hWaveIn){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ result = waveInPrepareHeader(producer->hWaveIn, producer->hWaveHeaders[index], sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInPrepareHeader");
+ return -2;
+ }
+
+ result = waveInAddBuffer(producer->hWaveIn, producer->hWaveHeaders[index], sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInAddBuffer");
+ return -3;
+ }
+
+ return 0;
+}
+
+static int record_wavehdr(tdav_producer_waveapi_t* producer, LPWAVEHDR lpHdr)
+{
+ MMRESULT result;
+
+ if(!producer || !lpHdr || !producer->hWaveIn){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ //
+ // Alert the session that there is new data to send over the network
+ //
+ if(TMEDIA_PRODUCER(producer)->callback){
+ TMEDIA_PRODUCER(producer)->callback(TMEDIA_PRODUCER(producer)->callback_data, lpHdr->lpData, (lpHdr->dwBytesRecorded/2));
+ }
+
+
+ if(!producer->started){
+ return 0;
+ }
+
+ result = waveInUnprepareHeader(producer->hWaveIn, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInUnprepareHeader");
+ return -2;
+ }
+
+ result = waveInPrepareHeader(producer->hWaveIn, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInPrepareHeader");
+ return -3;
+ }
+
+ result = waveInAddBuffer(producer->hWaveIn, lpHdr, sizeof(WAVEHDR));
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInAddBuffer");
+ return -4;
+ }
+
+ return 0;
+}
+
+static void *__record_thread(void *param)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)param;
+ DWORD dwEvent;
+ tsk_size_t i;
+
+ TSK_DEBUG_INFO("__record_thread -- START");
+
+ SetPriorityClass(GetCurrentThread(), REALTIME_PRIORITY_CLASS);
+
+ for(;;){
+ dwEvent = WaitForMultipleObjects(2, producer->events, FALSE, INFINITE);
+
+ if (dwEvent == 1){
+ break;
+ }
+
+ else if (dwEvent == 0){
+ EnterCriticalSection(&producer->cs);
+ for(i = 0; i< sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ if(producer->hWaveHeaders[i] && (producer->hWaveHeaders[i]->dwFlags & WHDR_DONE)){
+ record_wavehdr(producer, producer->hWaveHeaders[i]);
+ }
+ }
+ LeaveCriticalSection(&producer->cs);
+ }
+ }
+
+ TSK_DEBUG_INFO("__record_thread() -- STOP");
+
+
+ return tsk_null;
+}
+
+
+
+
+
+
+
+
+/* ============ Media Producer Interface ================= */
+int tdav_producer_waveapi_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)self;
+ tsk_size_t i;
+
+ if(!producer || !codec && codec->plugin){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ TDAV_PRODUCER_AUDIO(producer)->channels = codec->plugin->audio.channels;
+ TDAV_PRODUCER_AUDIO(producer)->rate = codec->plugin->rate;
+ /* codec should have ptime */
+
+
+ /* Format */
+ ZeroMemory(&producer->wfx, sizeof(WAVEFORMATEX));
+ producer->wfx.wFormatTag = WAVE_FORMAT_PCM;
+ producer->wfx.nChannels = TDAV_PRODUCER_AUDIO(producer)->channels;
+ producer->wfx.nSamplesPerSec = TDAV_PRODUCER_AUDIO(producer)->rate;
+ producer->wfx.wBitsPerSample = TDAV_PRODUCER_AUDIO(producer)->bits_per_sample;
+ producer->wfx.nBlockAlign = (producer->wfx.nChannels * producer->wfx.wBitsPerSample/8);
+ producer->wfx.nAvgBytesPerSec = (producer->wfx.nSamplesPerSec * producer->wfx.nBlockAlign);
+
+ /* Average bytes (count) for each notification */
+ producer->bytes_per_notif = ((producer->wfx.nAvgBytesPerSec * TDAV_PRODUCER_AUDIO(producer)->ptime)/1000);
+
+ /* create buffers */
+ for(i = 0; i< sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ create_wavehdr(producer, i);
+ }
+
+ return 0;
+}
+
+int tdav_producer_waveapi_start(tmedia_producer_t* self)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)self;
+ MMRESULT result;
+ tsk_size_t i;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(producer->started || producer->hWaveIn){
+ TSK_DEBUG_WARN("Producer already started");
+ return 0;
+ }
+
+ /* create events */
+ if(!producer->events[0]){
+ producer->events[0] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ }
+ if(!producer->events[1]){
+ producer->events[1] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ }
+
+ /* open */
+ result = waveInOpen((HWAVEIN *)&producer->hWaveIn, /*WAVE_MAPPER*/0, &producer->wfx, (DWORD)producer->events[0], (DWORD_PTR)producer, CALLBACK_EVENT);
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInOpen");
+ return -2;
+ }
+
+ /* start */
+ result = waveInStart(producer->hWaveIn);
+ if(result != MMSYSERR_NOERROR){
+ print_last_error(result, "waveInStart");
+ return -2;
+ }
+
+ /* start thread */
+ tsk_thread_create(&producer->tid[0], __record_thread, producer);
+
+ /* write */
+ for(i = 0; i< sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ add_wavehdr(producer, i);
+ }
+
+ producer->started = tsk_true;
+
+ return 0;
+}
+
+int tdav_producer_waveapi_pause(tmedia_producer_t* self)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)self;
+
+ if(!producer){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+int tdav_producer_waveapi_stop(tmedia_producer_t* self)
+{
+ tdav_producer_waveapi_t* producer = (tdav_producer_waveapi_t*)self;
+ MMRESULT result;
+
+ if(!self){
+ TSK_DEBUG_ERROR("Invalid parameter");
+ return -1;
+ }
+
+ if(!producer->started){
+ TSK_DEBUG_WARN("Producer not started");
+ return 0;
+ }
+
+ /* stop thread */
+ if(producer->tid[0]){
+ SetEvent(producer->events[1]);
+ tsk_thread_join(&(producer->tid[0]));
+ }
+
+ /* should be done here */
+ producer->started = tsk_false;
+
+ if(producer->hWaveIn && (((result = waveInReset(producer->hWaveIn)) != MMSYSERR_NOERROR) || ((result = waveInClose(producer->hWaveIn)) != MMSYSERR_NOERROR))){
+ print_last_error(result, "waveInReset/waveInClose");
+ }
+
+ return 0;
+}
+
+
+//
+// WaveAPI producer object definition
+//
+/* constructor */
+static tsk_object_t* tdav_producer_waveapi_ctor(tsk_object_t * self, va_list * app)
+{
+ tdav_producer_waveapi_t *producer = self;
+ if(producer){
+ /* init base */
+ tdav_producer_audio_init(TDAV_PRODUCER_AUDIO(producer));
+ /* init self */
+ InitializeCriticalSection(&producer->cs);
+ }
+ return self;
+}
+/* destructor */
+static tsk_object_t* tdav_producer_waveapi_dtor(tsk_object_t * self)
+{
+ tdav_producer_waveapi_t *producer = self;
+ if(producer){
+ tsk_size_t i;
+
+ /* stop */
+ if(producer->started){
+ tdav_producer_waveapi_stop(self);
+ }
+
+ /* deinit base */
+ tdav_producer_audio_deinit(TDAV_PRODUCER_AUDIO(producer));
+ /* deinit self */
+ for(i = 0; i< sizeof(producer->hWaveHeaders)/sizeof(LPWAVEHDR); i++){
+ free_wavehdr(producer, i);
+ }
+ if(producer->hWaveIn){
+ waveInClose(producer->hWaveIn);
+ }
+ if(producer->events[0]){
+ CloseHandle(producer->events[0]);
+ }
+ if(producer->events[1]){
+ CloseHandle(producer->events[1]);
+ }
+ DeleteCriticalSection(&producer->cs);
+ }
+
+ return self;
+}
+/* object definition */
+static const tsk_object_def_t tdav_producer_waveapi_def_s =
+{
+ sizeof(tdav_producer_waveapi_t),
+ tdav_producer_waveapi_ctor,
+ tdav_producer_waveapi_dtor,
+ tdav_producer_audio_cmp,
+};
+/* plugin definition*/
+static const tmedia_producer_plugin_def_t tdav_producer_waveapi_plugin_def_s =
+{
+ &tdav_producer_waveapi_def_s,
+
+ tmedia_audio,
+ "Microsoft WaveAPI producer",
+
+ tdav_producer_waveapi_set,
+ tdav_producer_waveapi_prepare,
+ tdav_producer_waveapi_start,
+ tdav_producer_waveapi_pause,
+ tdav_producer_waveapi_stop
+};
+const tmedia_producer_plugin_def_t *tdav_producer_waveapi_plugin_def_t = &tdav_producer_waveapi_plugin_def_s;
+
+#endif /* HAVE_WAVE_API */ \ No newline at end of file
OpenPOWER on IntegriCloud