2013-05-27 38 views
2

我无法弄清楚如何创建自己的声音播放器,所以我选择使用ChiliTomatoNoodle的框架。如何让此代码播放更长的波形文件?

但是,我遇到的问题是我有一个180s的波形文件,这只是播放第一秒左右。我需要做些什么才能延长比赛时间?

Sound.h:

#pragma once 

#include <windows.h> 
#include <mmsystem.h> 
#include <dsound.h> 
#include <stdio.h> 

class DSound; 

class Sound 
{ 
    friend DSound; 
public: 
    Sound(const Sound& base); 
    Sound(); 
    ~Sound(); 
    const Sound& operator=(const Sound& rhs); 
    void Play(int attenuation = DSBVOLUME_MAX); 
private: 
    Sound(IDirectSoundBuffer8* pSecondaryBuffer); 
private: 
    IDirectSoundBuffer8* pBuffer; 
}; 

class DSound 
{ 
private: 
    struct WaveHeaderType 
    { 
     char chunkId[4]; 
     unsigned long chunkSize; 
     char format[4]; 
     char subChunkId[4]; 
     unsigned long subChunkSize; 
     unsigned short audioFormat; 
     unsigned short numChannels; 
     unsigned long sampleRate; 
     unsigned long bytesPerSecond; 
     unsigned short blockAlign; 
     unsigned short bitsPerSample; 
     char dataChunkId[4]; 
     unsigned long dataSize; 
    }; 
public: 
    DSound(HWND hWnd); 
    ~DSound(); 
    Sound CreateSound(char* wavFileName); 
private: 
    DSound(); 
private:  
    IDirectSound8* pDirectSound; 
    IDirectSoundBuffer* pPrimaryBuffer; 
}; 

Sound.cpp:

#include "Sound.h" 
#include <assert.h> 

#pragma comment(lib, "dsound.lib") 
#pragma comment(lib, "dxguid.lib") 
#pragma comment(lib, "winmm.lib") 

DSound::DSound(HWND hWnd) 
: pDirectSound(NULL), 
    pPrimaryBuffer(NULL) 
{ 
    HRESULT result; 
    DSBUFFERDESC bufferDesc; 
    WAVEFORMATEX waveFormat; 

    result = DirectSoundCreate8(NULL,&pDirectSound,NULL); 
    assert(!FAILED(result)); 

    // Set the cooperative level to priority so the format of the primary sound buffer can be modified. 
    result = pDirectSound->SetCooperativeLevel(hWnd,DSSCL_PRIORITY); 
    assert(!FAILED(result)); 

    // Setup the primary buffer description. 
    bufferDesc.dwSize = sizeof(DSBUFFERDESC); 
    bufferDesc.dwFlags = DSBCAPS_PRIMARYBUFFER | DSBCAPS_CTRLVOLUME; 
    bufferDesc.dwBufferBytes = 0; 
    bufferDesc.dwReserved = 0; 
    bufferDesc.lpwfxFormat = NULL; 
    bufferDesc.guid3DAlgorithm = GUID_NULL; 

    // Get control of the primary sound buffer on the default sound device. 
    result = pDirectSound->CreateSoundBuffer(&bufferDesc,&pPrimaryBuffer,NULL); 
    assert(!FAILED(result)); 

    // Setup the format of the primary sound bufffer. 
    // In this case it is a .WAV file recorded at 44,100 samples per second in 16-bit stereo (cd audio format). 
    waveFormat.wFormatTag = WAVE_FORMAT_PCM; 
    waveFormat.nSamplesPerSec = 44100; 
    waveFormat.wBitsPerSample = 16; 
    waveFormat.nChannels = 2; 
    waveFormat.nBlockAlign = (waveFormat.wBitsPerSample/8) * waveFormat.nChannels; 
    waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign; 
    waveFormat.cbSize = 0; 

    // Set the primary buffer to be the wave format specified. 
    result = pPrimaryBuffer->SetFormat(&waveFormat); 
    assert(!FAILED(result)); 
} 

DSound::~DSound() 
{ 
    if(pPrimaryBuffer) 
    { 
     pPrimaryBuffer->Release(); 
     pPrimaryBuffer = NULL; 
    } 
    if(pDirectSound) 
    { 
     pDirectSound->Release(); 
     pDirectSound = NULL; 
    } 
} 

// must be 44.1k 16bit Stereo PCM Wave 
Sound DSound::CreateSound(char* wavFileName) 
{ 
    int error; 
    FILE* filePtr; 
    unsigned int count; 
    WaveHeaderType waveFileHeader; 
    WAVEFORMATEX waveFormat; 
    DSBUFFERDESC bufferDesc; 
    HRESULT result; 
    IDirectSoundBuffer* tempBuffer; 
    IDirectSoundBuffer8* pSecondaryBuffer; 
    unsigned char* waveData; 
    unsigned char* bufferPtr; 
    unsigned long bufferSize; 


    // Open the wave file in binary. 
    error = fopen_s(&filePtr,wavFileName,"rb"); 
    assert(error == 0); 

    // Read in the wave file header. 
    count = fread(&waveFileHeader,sizeof(waveFileHeader),1,filePtr); 
    assert(count == 1); 

    // Check that the chunk ID is the RIFF format. 
    assert((waveFileHeader.chunkId[0] == 'R') && 
      (waveFileHeader.chunkId[1] == 'I') && 
      (waveFileHeader.chunkId[2] == 'F') && 
      (waveFileHeader.chunkId[3] == 'F')); 

    // Check that the file format is the WAVE format. 
    assert((waveFileHeader.format[0] == 'W') && 
      (waveFileHeader.format[1] == 'A') && 
      (waveFileHeader.format[2] == 'V') && 
      (waveFileHeader.format[3] == 'E')); 

    // Check that the sub chunk ID is the fmt format. 
    assert((waveFileHeader.subChunkId[0] == 'f') && 
      (waveFileHeader.subChunkId[1] == 'm') && 
      (waveFileHeader.subChunkId[2] == 't') && 
      (waveFileHeader.subChunkId[3] == ' ')); 

    // Check that the audio format is WAVE_FORMAT_PCM. 
    assert(waveFileHeader.audioFormat == WAVE_FORMAT_PCM); 

    // Check that the wave file was recorded in stereo format. 
    assert(waveFileHeader.numChannels == 2); 

    // Check that the wave file was recorded at a sample rate of 44.1 KHz. 
    assert(waveFileHeader.sampleRate == 44100); 

    // Ensure that the wave file was recorded in 16 bit format. 
    assert(waveFileHeader.bitsPerSample == 16); 

    // Check for the data chunk header. 
    assert((waveFileHeader.dataChunkId[0] == 'd') && 
      (waveFileHeader.dataChunkId[1] == 'a') && 
      (waveFileHeader.dataChunkId[2] == 't') && 
      (waveFileHeader.dataChunkId[3] == 'a')); 

    // Set the wave format of secondary buffer that this wave file will be loaded onto. 
    waveFormat.wFormatTag = WAVE_FORMAT_PCM; 
    waveFormat.nSamplesPerSec = 44100; 
    waveFormat.wBitsPerSample = 16; 
    waveFormat.nChannels = 2; 
    waveFormat.nBlockAlign = (waveFormat.wBitsPerSample/8) * waveFormat.nChannels; 
    waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign; 
    waveFormat.cbSize = 0; 

    // Set the buffer description of the secondary sound buffer that the wave file will be loaded onto. 
    bufferDesc.dwSize = sizeof(DSBUFFERDESC); 
    bufferDesc.dwFlags = DSBCAPS_CTRLVOLUME; 
    bufferDesc.dwBufferBytes = waveFileHeader.dataSize; 
    bufferDesc.dwReserved = 0; 
    bufferDesc.lpwfxFormat = &waveFormat; 
    bufferDesc.guid3DAlgorithm = GUID_NULL; 

    // Create a temporary sound buffer with the specific buffer settings. 
    result = pDirectSound->CreateSoundBuffer(&bufferDesc,&tempBuffer,NULL); 
    assert(!FAILED(result)); 

    // Test the buffer format against the direct sound 8 interface and create the secondary buffer. 
    result = tempBuffer->QueryInterface(IID_IDirectSoundBuffer8,(void**)&pSecondaryBuffer); 
    assert(!FAILED(result)); 

    // Release the temporary buffer. 
    tempBuffer->Release(); 
    tempBuffer = 0; 

    // Move to the beginning of the wave data which starts at the end of the data chunk header. 
    fseek(filePtr,sizeof(WaveHeaderType),SEEK_SET); 

    // Create a temporary buffer to hold the wave file data. 
    waveData = new unsigned char[ waveFileHeader.dataSize ]; 
    assert(waveData); 

    // Read in the wave file data into the newly created buffer. 
    count = fread(waveData,1,waveFileHeader.dataSize,filePtr); 
    assert(count == waveFileHeader.dataSize); 

    // Close the file once done reading. 
    error = fclose(filePtr); 
    assert(error == 0); 

    // Lock the secondary buffer to write wave data into it. 
    result = pSecondaryBuffer->Lock(0,waveFileHeader.dataSize,(void**)&bufferPtr,(DWORD*)&bufferSize,NULL,0,0); 
    assert(!FAILED(result)); 

    // Copy the wave data into the buffer. 
    memcpy(bufferPtr,waveData,waveFileHeader.dataSize); 

    // Unlock the secondary buffer after the data has been written to it. 
    result = pSecondaryBuffer->Unlock((void*)bufferPtr,bufferSize,NULL,0); 
    assert(!FAILED(result)); 

    // Release the wave data since it was copied into the secondary buffer. 
    delete [] waveData; 
    waveData = NULL; 

    return Sound(pSecondaryBuffer); 
} 

Sound::Sound(IDirectSoundBuffer8* pSecondaryBuffer) 
: pBuffer(pSecondaryBuffer) 
{} 

Sound::Sound() 
: pBuffer(NULL) 
{} 

Sound::Sound(const Sound& base) 
: pBuffer(base.pBuffer) 
{ 
    pBuffer->AddRef(); 
} 

Sound::~Sound() 
{ 
    if(pBuffer) 
    { 
     pBuffer->Release(); 
     pBuffer = NULL; 
    } 
} 

const Sound& Sound::operator=(const Sound& rhs) 
{ 
    this->~Sound(); 
    pBuffer = rhs.pBuffer; 
    pBuffer->AddRef(); 
    return rhs; 
} 

// attn is the attenuation value in units of 0.01 dB (larger 
// negative numbers give a quieter sound, 0 for full volume) 
void Sound::Play(int attn) 
{ 
    attn = max(attn,DSBVOLUME_MIN); 
    HRESULT result; 

    // check that we have a valid buffer 
    assert(pBuffer != NULL); 

    // Set position at the beginning of the sound buffer. 
    result = pBuffer->SetCurrentPosition(0); 
    assert(!FAILED(result)); 

    // Set volume of the buffer to attn 
    result = pBuffer->SetVolume(attn); 
    assert(!FAILED(result)); 

    // Play the contents of the secondary sound buffer. 
    result = pBuffer->Play(0,0,0); 
    assert(!FAILED(result)); 
} 

感谢您的帮助提前!

+0

你的意思是在循环播放声音?然后在“Play”调用中设置“循环”标志。 –

回答

0

假设你有一个.wav文件,你的线沿线的某处加载声音文件:

yourSound = audio.CreateSound("fileName.WAV"); //Capslock on WAV 
yourSound.Play(); 

有了这个而来的声音在头部声明:

Sound yourSound; 

现在,因为你可能已经做到了这一点,这是行不通的,它可能与你的文件有关,因为播放声音160秒+应该不成问题。

您是否在使用.WAV文件来播放声音?如果是的话,你碰巧会转换它(因为它可能是背景音乐?)。如果你曾尝试与该转换器将其转换:

Converter MP3 -> WAV

请让我知道这是否正常工作!

0

您的缓冲区可能只有大到足以播放第一秒左右。你需要做的是设置“通知”。请参阅documentation

通知是一种让音频硬件在到达缓冲区中的特定点时通知您的方法。

这个想法是在缓冲区中间和缓冲区末尾建立一个通知。当您从中间的通知中收到通知时,将用更多数据填充缓冲区的前半部分。当你从最后收到通知时,你用更多的数据填充缓冲区的后半部分。这样,您可以使用单个缓冲区来传输无限量的数据。