|
Hi Steven.
Thank you very much for your quick reply, really appreciate it. I am also really sorry about my late response, been swamped with other work lately.
I think I just know too little on this. I did review msdn and various sources in order to study your code and I think I understand it. But, maybe because it is based on a dialogbox, I just can't seem to incorporate it into what I already have.
"All" I need are the VALUES of the microphone level (MaxLeft and MaxRight in your code) and the spectrum (pMags[idx]). I have been tweaking and studying your code (and the relevant API calls) for weeks and I still can't get past the HMIXER bit, not to mention obtaining the values. I really don't to give this up but I really don't have that much time to spare for just this component. If it is not too much too ask, could you give me some pointers in how I can approach this?
Thanks again.
Regards,
Jeff
-- modified at 6:42 Friday 15th June, 2007
These are the parts of Steven's code I'm using:
// Audio Scope Written By Steven De Toni
#include "stdafx.h"
#include "aud.h"
#include <stdio.h>
#include <stdarg.h>
#include <windows.h>
#include <commctrl.h>
#include <mmsystem.h>
#include "FFTransform.h"
//#include "resource.h"
// --- Global Variables ---
HINSTANCE HInst = NULL;
void CALLBACK waveInProc(HWAVEIN hWaveIn, UINT uMsg, DWORD dwInstance,
DWORD dwParam1, DWORD dwParam2);
void OutputDebugMsg (const char* pszFormat, ...)
{
char buf[1024];
va_list arglist;
va_start(arglist, pszFormat);
vsprintf(buf, pszFormat, arglist);
va_end(arglist);
strcat(buf, "\n");
OutputDebugString(buf);
}
void OutputMsgBox (const char* pszFormat, ...)
{
char buf[1024];
va_list arglist;
va_start(arglist, pszFormat);
vsprintf(buf, pszFormat, arglist);
va_end(arglist);
strcat(buf, "\n");
MessageBox (NULL, buf, "", MB_ICONSTOP);
}
/*********************** PrintWaveErrorMsg() **************************
* Retrieves and displays an error message for the passed Wave In error
* number. It does this using mciGetErrorString().
*************************************************************************/
void PrintWaveErrorMsg(DWORD err, TCHAR * str)
{
char buffer[128];
OutputMsgBox ("ERROR 0x%08X: %s\r\n", err, str);
if (mciGetErrorString(err, &buffer[0], sizeof(buffer)))
{
OutputMsgBox ("%s\r\n", &buffer[0]);
}
else
{
OutputMsgBox ("0x%08X returned!\r\n", err);
}
}
/*
What a long winded way to select recording input!
MIXERLINE_COMPONENTTYPE_SRC_ANALOG
MIXERLINE_COMPONENTTYPE_SRC_AUXILIARY
MIXERLINE_COMPONENTTYPE_SRC_COMPACTDISC
MIXERLINE_COMPONENTTYPE_SRC_DIGITAL
MIXERLINE_COMPONENTTYPE_SRC_LINE
MIXERLINE_COMPONENTTYPE_SRC_MICROPHONE
MIXERLINE_COMPONENTTYPE_SRC_PCSPEAKER
MIXERLINE_COMPONENTTYPE_SRC_SYNTHESIZER
MIXERLINE_COMPONENTTYPE_SRC_TELEPHONE
MIXERLINE_COMPONENTTYPE_SRC_UNDEFINED
MIXERLINE_COMPONENTTYPE_SRC_WAVEOUT
*/
BOOL SetInputDevice (unsigned int inDev)
{
HMIXER hMixer = NULL;
int inDevIdx = -1;
if ((mixerOpen(&hMixer, 0, 0, NULL, MIXER_OBJECTF_MIXER)) != MMSYSERR_NOERROR)
{
return FALSE;
}
// get dwLineID
MIXERLINE mxl;
mxl.cbStruct = sizeof(MIXERLINE);
mxl.dwComponentType = MIXERLINE_COMPONENTTYPE_DST_WAVEIN;
if (mixerGetLineInfo((HMIXEROBJ)hMixer, &mxl, MIXER_OBJECTF_HMIXER | MIXER_GETLINEINFOF_COMPONENTTYPE) != MMSYSERR_NOERROR)
{
mixerClose (hMixer);
return FALSE;
}
// get dwControlID
MIXERCONTROL mxc;
MIXERLINECONTROLS mxlc;
DWORD dwControlType = MIXERCONTROL_CONTROLTYPE_MIXER;
mxlc.cbStruct = sizeof(MIXERLINECONTROLS);
mxlc.dwLineID = mxl.dwLineID;
mxlc.dwControlType = dwControlType;
mxlc.cControls = 0;
mxlc.cbmxctrl = sizeof(MIXERCONTROL);
mxlc.pamxctrl = &mxc;
if (mixerGetLineControls((HMIXEROBJ)hMixer, &mxlc, MIXER_OBJECTF_HMIXER | MIXER_GETLINECONTROLSF_ONEBYTYPE) != MMSYSERR_NOERROR)
{
// no mixer, try MUX
dwControlType = MIXERCONTROL_CONTROLTYPE_MUX;
mxlc.cbStruct = sizeof(MIXERLINECONTROLS);
mxlc.dwLineID = mxl.dwLineID;
mxlc.dwControlType = dwControlType;
mxlc.cControls = 0;
mxlc.cbmxctrl = sizeof(MIXERCONTROL);
mxlc.pamxctrl = &mxc;
if (mixerGetLineControls((HMIXEROBJ)hMixer, &mxlc, MIXER_OBJECTF_HMIXER | MIXER_GETLINECONTROLSF_ONEBYTYPE) != MMSYSERR_NOERROR)
{
mixerClose (hMixer);
return FALSE;
}
}
if (mxc.cMultipleItems <= 0)
{
mixerClose (hMixer);
return FALSE;
}
// get the index of the inDevice from available controls
MIXERCONTROLDETAILS_LISTTEXT* pmxcdSelectText = new MIXERCONTROLDETAILS_LISTTEXT[mxc.cMultipleItems];
if (pmxcdSelectText != NULL)
{
MIXERCONTROLDETAILS mxcd;
mxcd.cbStruct = sizeof(MIXERCONTROLDETAILS);
mxcd.dwControlID = mxc.dwControlID;
mxcd.cChannels = 1;
mxcd.cMultipleItems = mxc.cMultipleItems;
mxcd.cbDetails = sizeof(MIXERCONTROLDETAILS_LISTTEXT);
mxcd.paDetails = pmxcdSelectText;
if (mixerGetControlDetails ((HMIXEROBJ)hMixer, &mxcd, MIXER_OBJECTF_HMIXER | MIXER_GETCONTROLDETAILSF_LISTTEXT) == MMSYSERR_NOERROR)
{
// determine which controls the inputDevice source line
DWORD dwi;
for (dwi = 0; dwi < mxc.cMultipleItems; dwi++)
{
// get the line information
MIXERLINE mxl;
mxl.cbStruct = sizeof(MIXERLINE);
mxl.dwLineID = pmxcdSelectText[dwi].dwParam1;
if (mixerGetLineInfo ((HMIXEROBJ)hMixer, &mxl, MIXER_OBJECTF_HMIXER | MIXER_GETLINEINFOF_LINEID) == MMSYSERR_NOERROR && mxl.dwComponentType == inDev)
{
// found, dwi is the index.
inDevIdx = dwi;
// break;
}
}
}
delete []pmxcdSelectText;
}
if (inDevIdx < 0)
{
mixerClose (hMixer);
return FALSE;
}
// get all the values first
MIXERCONTROLDETAILS_BOOLEAN* pmxcdSelectValue = new MIXERCONTROLDETAILS_BOOLEAN[mxc.cMultipleItems];
if (pmxcdSelectValue != NULL)
{
MIXERCONTROLDETAILS mxcd;
mxcd.cbStruct = sizeof(MIXERCONTROLDETAILS);
mxcd.dwControlID = mxc.dwControlID;
mxcd.cChannels = 1;
mxcd.cMultipleItems = mxc.cMultipleItems;
mxcd.cbDetails = sizeof(MIXERCONTROLDETAILS_BOOLEAN);
mxcd.paDetails = pmxcdSelectValue;
if (mixerGetControlDetails((HMIXEROBJ)hMixer, &mxcd, MIXER_OBJECTF_HMIXER | MIXER_GETCONTROLDETAILSF_VALUE) == MMSYSERR_NOERROR)
{
// ASSERT(m_dwControlType == MIXERCONTROL_CONTROLTYPE_MIXER || m_dwControlType == MIXERCONTROL_CONTROLTYPE_MUX);
// MUX restricts the line selection to one source line at a time.
if (dwControlType == MIXERCONTROL_CONTROLTYPE_MUX)
{
ZeroMemory(pmxcdSelectValue, mxc.cMultipleItems * sizeof(MIXERCONTROLDETAILS_BOOLEAN));
}
// Turn on this input device
pmxcdSelectValue[inDevIdx].fValue = 0x1;
mxcd.cbStruct = sizeof(MIXERCONTROLDETAILS);
mxcd.dwControlID = mxc.dwControlID;
mxcd.cChannels = 1;
mxcd.cMultipleItems = mxc.cMultipleItems;
mxcd.cbDetails = sizeof(MIXERCONTROLDETAILS_BOOLEAN);
mxcd.paDetails = pmxcdSelectValue;
if (mixerSetControlDetails ((HMIXEROBJ)hMixer, &mxcd, MIXER_OBJECTF_HMIXER | MIXER_SETCONTROLDETAILSF_VALUE) != MMSYSERR_NOERROR)
{
delete []pmxcdSelectValue;
mixerClose (hMixer);
return FALSE;
}
}
delete []pmxcdSelectValue;
}
mixerClose (hMixer);
return TRUE;
}
// ***********************************************************************************
#define SPECSCOPEWIDTH 10
static BOOL inRecord = FALSE;
static HWAVEIN waveInHandle = NULL;
static WAVEHDR waveHeader[2];
static WAVEFORMATEX waveFormat;
static FFTransform* pFFTrans = NULL;
static FFTransform* pFFTransStereo = NULL;
static SampleIter* pSampleIter = NULL;
static RECT drawArea;
static HDC HdblDC = NULL;
static HBITMAP HdblOldBitmap = NULL;
static int rps = 1;
///////////////////////////////////////////////////////////////////////////////
Caud::Caud(){}
void Caud::StopRec(){
// Stop recording and tell the driver to unqueue/return all of our WAVEHDRs.
// The driver will return any partially filled buffer that was currently
// recording. Because we use waveInReset() instead of waveInStop(),
// all of the other WAVEHDRs will also be returned via MM_WIM_DONE too
waveInReset(waveInHandle);
waveInUnprepareHeader (waveInHandle, &waveHeader[0], sizeof(WAVEHDR));
waveInUnprepareHeader (waveInHandle, &waveHeader[1], sizeof(WAVEHDR));
waveInClose(waveInHandle);
VirtualFree(waveHeader[0].lpData, (waveHeader[0].dwBufferLength << 1), MEM_RELEASE);
inRecord = FALSE;
SetInputDevice (MIXERLINE_COMPONENTTYPE_SRC_MICROPHONE);
if (pFFTrans != NULL)
{
delete pFFTrans;
pFFTrans = NULL;
}
if (pFFTransStereo != NULL)
{
delete pFFTransStereo;
pFFTransStereo = NULL;
}
if (pSampleIter != NULL)
{
delete pSampleIter;
pSampleIter = NULL;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void Caud::StartRec() {
//InitCommonControls ();
while (1) {
// Set the recording input ...
//if (SetInputDevice (MIXERLINE_COMPONENTTYPE_SRC_WAVEOUT) == FALSE)
//{
if (SetInputDevice (MIXERLINE_COMPONENTTYPE_SRC_ANALOG) == FALSE)
{
if (SetInputDevice (MIXERLINE_COMPONENTTYPE_SRC_LAST) == FALSE)
{
OutputMsgBox ("Error unable to set recording WAVEOUT device");
break;
}
}
//}
MMRESULT err;
// Clear out both of our WAVEHDRs. At the very least, waveInPrepareHeader() expects the dwFlags field to be cleared
ZeroMemory(&waveHeader[0], sizeof(WAVEHDR) * 2);
// Initialize the WAVEFORMATEX for 16-bit, 44KHz, stereo. That's what I want to record
waveFormat.wFormatTag = WAVE_FORMAT_PCM;
waveFormat.nChannels = 2;
waveFormat.nSamplesPerSec = 44100;
waveFormat.wBitsPerSample = 16;
waveFormat.nBlockAlign = waveFormat.nChannels * (waveFormat.wBitsPerSample/8);
waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
waveFormat.cbSize = 0;
// Open the default WAVE In Device, specifying my callback. Note that if this device doesn't
// support 16-bit, 44KHz, stereo recording, then Windows will attempt to open another device
// that does. So don't make any assumptions about the name of the device that opens. After
// waveInOpen returns the handle, use waveInGetID to fetch its ID, and then waveInGetDevCaps
// to retrieve the actual name
if ((err = waveInOpen(&waveInHandle, WAVE_MAPPER, &waveFormat, (DWORD)waveInProc, 0, CALLBACK_FUNCTION )) != 0)
{
PrintWaveErrorMsg (err, "Can't open WAVE In Device!");
break;
}
// Allocate, prepare, and queue two buffers that the driver can use to record blocks of audio data.
// (ie, We're using double-buffering. You can use more buffers if you'd like, and you should do that
// if you suspect that you may lag the driver when you're writing a buffer to disk and are too slow
// to requeue it with waveInAddBuffer. With more buffers, you can take your time requeueing
// each).
//
// I'll allocate 2 buffers large enough to hold 2 seconds worth of waveform data at 44Khz. NOTE:
// Just to make it easy, I'll use 1 call to VirtualAlloc to allocate both buffers, but you can
// use 2 separate calls since the buffers do NOT need to be contiguous. You should do the latter if
// using many, large buffers
waveHeader[1].dwBufferLength = waveHeader[0].dwBufferLength = 512;
if (!(waveHeader[0].lpData = (char*)VirtualAlloc(0, (waveHeader[0].dwBufferLength << 1), MEM_COMMIT, PAGE_READWRITE)))
{
OutputMsgBox ("ERROR: Can't allocate memory for WAVE buffer!\n");
waveInClose(waveInHandle);
break;
}
// Fill in WAVEHDR fields for buffer starting address. We've already filled in the size fields above */
waveHeader[1].lpData = waveHeader[0].lpData + waveHeader[0].dwBufferLength;
// Leave other WAVEHDR fields at 0
// Prepare the 2 WAVEHDR's
if ((err = waveInPrepareHeader(waveInHandle, &waveHeader[0], sizeof(WAVEHDR))))
{
waveInClose(waveInHandle);
OutputMsgBox ("Error preparing WAVEHDR 1! -- %08X\n", err);
VirtualFree (waveHeader[0].lpData, (waveHeader[0].dwBufferLength << 1), MEM_RELEASE);
break;
}
if ((err = waveInPrepareHeader(waveInHandle, &waveHeader[1], sizeof(WAVEHDR))))
{
waveInClose(waveInHandle);
OutputMsgBox ("Error preparing WAVEHDR 2! -- %08X\n", err);
VirtualFree (waveHeader[0].lpData, (waveHeader[0].dwBufferLength << 1), MEM_RELEASE);
break;
}
// Queue first WAVEHDR (recording hasn't started yet)
if ((err = waveInAddBuffer(waveInHandle, &waveHeader[0], sizeof(WAVEHDR))))
{
waveInClose(waveInHandle);
OutputMsgBox ("Error queueing WAVEHDR 1! -- %08X\n", err);
VirtualFree (waveHeader[0].lpData, (waveHeader[0].dwBufferLength << 1), MEM_RELEASE);
break;
}
// Queue second WAVEHDR
if ((err = waveInAddBuffer(waveInHandle, &waveHeader[1], sizeof(WAVEHDR))))
{
waveInClose(waveInHandle);
OutputMsgBox ("Error queueing WAVEHDR 2! -- %08X\n", err);
VirtualFree (waveHeader[0].lpData, (waveHeader[0].dwBufferLength << 1), MEM_RELEASE);
break;
}
// Start recording
if ((err = waveInStart(waveInHandle)))
{
OutputMsgBox ("Error starting record! -- %08X\n", err);
waveInClose(waveInHandle);
VirtualFree (waveHeader[0].lpData, (waveHeader[0].dwBufferLength << 1), MEM_RELEASE);
break;
}
// prepare the DSP processing objects
pFFTrans = new FFTransform (waveFormat.nSamplesPerSec, waveHeader[0].dwBufferLength/(waveFormat.nChannels * (waveFormat.wBitsPerSample/8)));
pFFTransStereo = new FFTransform (waveFormat.nSamplesPerSec, waveHeader[0].dwBufferLength/(waveFormat.wBitsPerSample/8));
pSampleIter = new SampleIter();
inRecord = TRUE;
break;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void CALLBACK waveInProc(HWAVEIN hWaveIn, UINT uMsg, DWORD dwInstance,
DWORD dwParam1, DWORD dwParam2) {
if (uMsg == WIM_DATA){
WAVEHDR* pHeader = (WAVEHDR*) dwParam1;
static int maxLeftSave = 0;
static int maxRightSave = 0;
static int refresh = 0;
refresh++;
if (pHeader->dwBytesRecorded > 0)
{
// OutputDebugMsg ("Recorded %d bytes\n", pHeader->dwBytesRecorded);
int idx = 0;
short int* pSamples = (short int*)pHeader->lpData;
int sampleCnt = pHeader->dwBytesRecorded/(sizeof(short int));
// ------------------------------------------------------------------
// ******* Calculate Power Meter *******
int lm = 0, rm = 0, maxLeft = 0, maxRight = 0;
for (idx = 0; idx < sampleCnt; idx += 2)
{
if ((lm = abs(pSamples[idx]) >> 6) > maxLeft)
maxLeft = lm;
if ((rm = abs(pSamples[idx+1]) >> 6) > maxRight)
maxRight = rm;
}
if (maxLeft < maxLeftSave)
{
if ((maxLeft = maxLeftSave - 4) < 0)
maxLeft = 0;
}
if (maxRight < maxRightSave)
{
if ((maxRight = maxRightSave - 4) < 0)
maxRight = 0;
}
maxLeftSave = maxLeft;
maxRightSave = maxRight;
}
if (inRecord) // Are we still recording?
{
// Yes. Now we need to requeue this buffer so the driver can use it for another block of audio
// data. NOTE: We shouldn't need to waveInPrepareHeader() a WAVEHDR that has already been prepared once
waveInAddBuffer(waveInHandle, pHeader, sizeof(WAVEHDR));
}
}
}
////////////////////////////////////////////////////////////////
I am only aiming to get the input value (MaxLeft and MaxRight) now. First I call StartRec() at the beginning and StopRec() at the end of my own program. I am probably something stupidly wrong here. Anyway, any help would be MUCH MUCH appreciated. Thank you lots.
Jeff
|
|
|
|
|
Hi Steven!
Why can not download the source file form http://www.codeproject.com/audio/oscilloscope/oscilloscope_src.zip ?
Could you send me to the e-mail "david_hsieh_173@hotmail.com"
Thanks!
|
|
|
|
|
Hi Steven!
Cool project! And very useful as well. I have looked at your source code and am trying to utilize it for my own project (hope you don't mind) but I am having trouble reorganizing it. I was wondering if you or indeed any expert who knows this code and C++ well could help me?
What I am trying to do is basically to incorporate the FFT and oscilloscope (i.e. the amplitude) results into a C++ project (with a Windows Form) I have already implemented. Unfortunately, I am pretty incompetent and unable to transfer the code. I am a bit stuck on the all-important BOOL CALLBACK Scope call. From what I understand, this deals with the messages sent by the dialog box and then proceeds to perform a specific function (e.g. start recording, stop recording, process audio messages). As I am not using a dialog box for my current project, I am going with the approach of call these functions myself within my main loop.
Where I am really lost is the part where it processes the audio messages ("Process Audio Messages"). I haven't got a clue on what to substitute the "WAVEHDR* pHeader = (WAVEHDR*) lParam" for. From my limited knowledge (and some research), I assume that the lParam is a part of the data that is sent by the dialog box but since I am not using a dialog box and want to continually process the audio messages, I am not sure how to change pHearder and its attributes, such as dwBytesRecorded.
I would really appreciate any help or advice. Thank you very very much.
Regards,
Jeff
|
|
|
|
|
jeffhcliu wrote: Where I am really lost is the part where it processes the audio messages ("Process Audio Messages"). I haven't got a clue on what to substitute the "WAVEHDR* pHeader = (WAVEHDR*) lParam" for. From my limited knowledge (and some research), I assume that the lParam is a part of the data that is sent by the dialog box but since I am not using a dialog box and want to continually process the audio messages, I am not sure how to change pHearder and its attributes, such as dwBytesRecorded.
I think you need to study the code more closely or in conjunction with the WIN32 documentation from the microsoft developer web site. You cannot sample the audio stream constantly as you do not have access to the sounnd hardware, only the sound drivers/API. So to process sound information, we define the sample information we need, 16bit, 22khz, 1k buffer etc. The sound driver will then DELIVER that information back to the program via what is known as a callback. This example program uses the Windows message system as a notification to process the sound buffer. You can also implement a function callback if your program is not associated within Windows messages.
Look at the code here:
http://sourceforge.net/project/showfiles.php?group_id=61001&package_id=226483
for a more complete example. How to implement thread processing, and function callback.
The lParam is really just a 32bit variable we use to pass information around within Win32 API calls. It could be any 32bit value in relation to a program, its commonly used to store a pointer/address value the points to additional program data. In your case the pointer to a WAVEHDR structure, the lParam value is type case from a LPARAM type (32bit long) to a WAVEHDR pointer so the program understands data/structure at that memory location.
|
|
|
|
|
Hi Steven.
Thank you so much for your quick reply. Much appreciated. I guess I do superficially understand what the code does and have studied it several times but the problem (I think) I am having is that I can't see a way of utilizing it in my program (due to my lack of knowledge on programming).
I have my own program which does a bunch of things within an infinite loop. I would like to add the capabilities of your program to mine, i.e. being able to sample from a microphone and doing FFT analysis on it. In other words, as soon as I clicked on a button, I would want start the sound sampling (along with all the other stuff within that infinite loop) and perform FFT, etc. Then when I click on another button, I will break that loop as well as stopping the sampling.
Now, as your program creates a dialog box and is based on Windows Messages, I am not really sure how to translate that with what I know. In the high likelihood of sounding extremely stupid, I have changed the WM_COMMAND case into StartRecord and StopRecord "void" subroutines which will be called when certain buttons (on a Windows Form) are clicked. Now, I am searching for a way of translating the MM_WIM_DATA part into a subroutine (ProcessAudio), or a function callback as you have kindly suggested.
But I really don't know how to do this. I think I am facing two main problems at this point (based on my approach). If I don't use Windows Messages, what am I going to pass as the lParam value to ProcessAudio (as the MM_WIM_DATA case uses it heavily)? The other problem is that I do not want ProcessAudio to be processing all the time (i.e. "in an infinite loop of its own", in my words) as there will be other stuff that will need to be processed within my own main infinite loop, of which ProcessAudio will be only a part of. I have read up on multi-threading as well as callbacks but I still don't know how to proceed.
I hope you understand what I have just said and what I am trying to do and please forgive me if I am doing something very stupid as I really don't have that much programming experience.
A million thanks.
Regards,
Jeff
|
|
|
|
|
hi
do you have any idea how to change the program so that it works for a sound card (like realtek HD) which has different mixers for input and output?
(actually it has 3:
0: Realtek HD Audio Output
1: Realtek HD Digital Input
2: Realtek HD Audio Input
)
so the mixerOpen with the device id "0" doesn't work anymore
i am getting the default wavein handle, then open the mixer using the waveinhandle as a parameter
with the mixerGetLineInfo i am getting 4 sources with the wavein destination
( 0: CD Volume;
1: Mic Volume
2: Line Volume
3: Stereo Mix
after that i get stuck
the next calls to mixerGetLineControls return error code 1025 which is MIXERR_INVALCONTROL
does it mean that i don't have any mix/mux controls. why do i need those?
i am sure i didn't understand well how the mixer api works.
if you have any sugestions it would be very nice
also another thing i noticed on a sound card with a singe mixer is that the spectrum analizer depends on the sound volume. if i change the music volume the spectrum analizer changes with it.
i was wondering what winamp and other applications use, because they seem to not care about the sound volume. the spectrum analizer remains always the same even if the sound is muted.
thanks in advance
|
|
|
|
|
|
hi Steven,
Really good project. Is there anyway you could send me flowchart/psuedo code for this project. Thanks in advance.
With Regards,
G.Vidyadhar
|
|
|
|
|
|
Thank for your sharing, here i would like to ask a question, how to get the amplitude and frequecy from microphone?
i just want to differentiate the noisy level of input data only,and get the digital value.
thank
|
|
|
|
|
Hello Steve,
with your code I learned a lot about mixer, sound and so on. Thanks too much.
1) What I learned is then used in my application which has to "monitor" a microphone. Unfortunatelly I do not understand why my application stops to receive MM_WIM_DATA message, right after a mouse click has been done on a CListCtrl control or a CButton. This doesn't happens even if I do any other operation inside my application: for example start recording. I do not receive any error messages (each function has the result controlled) and I do not receive any other MM_WIM_XXX like MM_WIM_CLOSE.
When this happens, I need to call again the "waveInAddBuffer" and the "waveInStart": then every thing starts to work again (until a next mouse click).
Do you have any suggestion?
2) I need to put a "parametric equalizer" inside my application. Info collected until now, tell me that I need FFT transformation to have that equalizer? Any help or any suggestion.
Thanks again for your work and in advance for your help.
GianniGP
|
|
|
|
|
Hello Steve, too busy?
Well I found that there is no solution on the MM_WIM_DATA problem. The only thing that works, is to open the waveInOpen() using a CALLBACK_FUNCTION and the relative waveInProc(). In this way never happens that the buffer stops to get back to the application.
Still I'm asking you the second question of my message: the parametri equalizer.
Thanks and regards.
GianniGP
|
|
|
|
|
Hi Gianni.
Could you please help me in converting this code for use in a Windows Form already implemented? Thank you.
As explained in my earlier post, I have already implemented a Visual C++ Form with an infinite loop that does a bunch of things. Now I would like to retrieve microphone input amplitude as well as FFT data utilizing Steven's code.
Do you have any idea on how to do this? I am just too stupid to change this dialog box and message scheme into the functions I want. I have already done quite a bit of research and reasonably understand how Steven's program works, using dialogbox and various messages, but still can't get my head around converting them. How did you convert them into callback FUNCTIONS and what should I use for parameters such as (WAVEHDR*) lParam?
One thing to note is that I do not want ONLY this program/features to be run as there will also be other things that have to be processed as well in the infinite loop.
Any help or direction will be much appreciated. Thank you very much.
Regards,
Jeff
|
|
|
|
|
Hi,
I want to convert your code in C#. But i have not worked with C++ so can you please guide me, what to do and where to start using your code?
Help me out please,
Thanks,
Mahesh Devjibhai Dhola
|
|
|
|
|
Sorry, I have no idea, have not touch C#...
|
|
|
|
|
you will definitely get some good converter.. freeware. over the internet
Regards,
Tauhid Shaikh
|
|
|
|
|
i can help if you are still interested - i see its pretty dated so maybe already found solution?
-O
|
|
|
|
|
Hello,
thanks for this article/source code.
I noticed the following:
When you calculate the spectrum values using FFT, you calculate for left, right and both. There you have a call
float* pMags = pFFTrans->fftGetMagArray();
where in fact it should be
float* pMags = pFFT->fftGetMagArray(); to ensure that you use pFFTTransStereo for the third case.
I hope my assumption is correct.
|
|
|
|
|
I get 2 compiling error's in VC 2003.
d:\Projects\Audio Scope\FFTransform.h(105): error C2668: 'log' : ambiguous call to overloaded function
d:\Projects\Audio Scope\FFTransform.h(134): error C2666: 'fmod' : 3 overloads have similar conversions
c:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\include\math.h(606): could be 'long double fmod(long double,long double)'
c:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\include\math.h(558): or 'float fmod(float,float)'
c:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\include\math.h(191): or 'double fmod(double,double)'
while trying to match the argument list '(float, double)'
[code]
int nu = (int)(log(sampleCount)/log(2));
and scidx = (int)( (fmod(arg, HALFPI) / HALFPI) * (float)sinCosRng);
[/code]
thankx en regards
-- modified at 13:34 Tuesday 7th November, 2006
|
|
|
|
|
im having the same problem, cant compile with VS2003.net
plsae help
|
|
|
|
|
Hola,
las declaraciones de log en VC++ 2003 son en float
He realizado la siguiente modificación y funciona.
double t_SampleCount = sampleCount;
int nu = (int)(log(t_SampleCount)/log(2.000));
Saludos
Disculpen, no escribo muy bien el ingles.
|
|
|
|
|
Im having some problems even compiling it. Im using VS2005. Anyway Im trying to make somthing similar but a bit more simple that your program(audio oscilloscope, with one drop down menu to be able to swap from 16bit to 24, and a start and stop button). To be honest i just cant crack how to get the oscilloscop drawing going.
Gussing i should be reading from one buffer while drawing what i read on the 1st one before and so on.
but trying to write is as a _view class (just the design of the window is like that).
More or less i just wana know how you did the line drawing.
|
|
|
|
|
The line drawing method uses a technique called double buffering to allow for flicker free updates. The program creates a memory device context (which is microsoft speak for a graphics device handle), and creates memory bitmap area (microsoft speak for drawing area). You then need to associate the memory bitmap to the memory device context. Once this is done, then the line drawing algortihm comes in.
Drawing starts from left to right, with a origin of (bitmapHeight/2) and increments in the x direction based upon the sample number, and y direction based upn sample resolution.
So if your graphic output is 320 pixels wide, and you sample is 1024, we divide that by 2 for stereo and then by 2 again for 16 bit values, you end up with 128 data items per channel for the original 1024 byte sample.
so bitmapWidth / sampleChannel number = increment value
or
320 / 128 = 2.5
Each plot of a sample if incremented 2.5 in the xDirection, the yDirection is calculated the same way.
If the height of the bitmap is 200 pixels, and the sample data is signed to -32766 to +32767 we need to scall the data to fit the bitmap.
for bitmapHeight / sampleResultion = reductionFactor or increaseFactor
or
200 / (0xffff / 2) = 0.0061036087586785687
so given a yPlot Start Position of 100 <bitmapheight 2=""> and a data item of -12000
its calculated the plot position as 100 + (-12000 * 0.0061036087586785687) = 26
and if the next sample value is 12450 (as audio data goes from positive to negative to positive etc ) its calculated as
100 + (12450 * 0.0061036087586785687) = 175
and after each plot we increment in the x direction x = x + 2.5; (given that x is a float value). Thus you can plot your data to any bitmap size.
so some pseudo code would be:
short data[128]; // if given data in an array or some other object
float xPos = 0;
float xInc = 320 / 128; // = 2.5;
float yStart = 200 / 2;
float yReduce = 200 / (0xffff / 2); // = 0.0061036087586785687
bool starting = true;
for (idx = 0; idx < 128; idx++)
{
float yPos = yStart + (data[idx] * yReduce);
if (starting != false)
{
moveTo (xPos, yPos);
starting = false;
}
else
{
lineTo (xPos, yPos);
}
xPos = xPos + xInc;
}
// Once drawing is completed, then block imagae transfer the memory bitmap into video memory
BitBlt (screenDC, 0, 0, 320, 200, myDc, 0, 0, SRCCOPY);
How the above helps ?
|
|
|
|
|
thats of great help thank you
|
|
|
|
|
Hellon from france!
Thank you for this code, I did not know how to work with the sound card.
I have a PC on which everything works well and the other one (more recent) I get the error message: "Error unable to set recording WAVEOUT device". have you an idea on this problem? I brought modifications to the program: I transformed it into compressor: It allows to regulate the level when we listen to the TV or when we listen to some music on a PC. Would you be interested in my code and I could put it on sourceforge: With your permission.;)
Irlande78
|
|
|
|
|