大型游戏中的音效相对比较复杂,如声音的淡入淡出,爆炸音效,声音的播放进度等。下面就看看一个音效管理系统都有些什么吧~~
1. 音效的基本操作:
#pragma once
//========================================================================
// Audio.cpp : Defines a simple sound system. 参照:
// <a target=_blank href="http://blog.csdn.net/wanghexu09008126/article/details/39462377">http://blog.csdn.net/wanghexu09008126/article/details/39462377</a>
//========================================================================
#include "ResCache.h"
class SoundResourceExtraData;
// SoundType Description
//
// This is an enum that represents the different kinds of sound data
// streams the sound system can handle.
// 支持的声音类型
enum SoundType
{
SOUND_TYPE_FIRST,
SOUND_TYPE_MP3 = SOUND_TYPE_FIRST,
SOUND_TYPE_WAVE,
SOUND_TYPE_MIDI,
SOUND_TYPE_OGG,
// 刚好可以获得支持的声音类型的总数目 小技巧
SOUND_TYPE_COUNT,
SOUND_TYPE_UNKNOWN,
};
// 上面支持声音类型的后缀
extern char *gSoundExtentions[];
// class IAudioBuffer
//
// The interface class that defines the public API for audio buffers.
// An audio buffer maps to one instance of a sound being played,
// which ISNT the sound data. Two different sounds can be played from
// the same source data - such as two explosions in two different places.
// 这个接口类定义了一个公共的API:音频缓存. 一个音频缓存映射到一个被播放音频的实例,而不是这个音频数据
// 两个不同的声音可以同时播放:尽管是同样的源数据 - 例如两个地方的两个爆炸声音,可以是同一个源音频
// <span style="color:#ff0000;">子类的实现包括 AudioBuffer 和 DirectSoundAudio 两部分</span>
class IAudioBuffer
{
public:
virtual ~IAudioBuffer() { }
virtual void *VGet()=0;
virtual shared_ptr<ResHandle> VGetResource()=0;
virtual bool VOnRestore()=0;
virtual bool VPlay(int volume, bool looping)=0;
virtual bool VPause()=0;
virtual bool VStop()=0;
virtual bool VResume()=0;
virtual bool VTogglePause()=0;
virtual bool VIsPlaying()=0;
virtual bool VIsLooping() const=0;
virtual void VSetVolume(int volume)=0;
virtual void VSetPosition(unsigned long newPosition)=0;
virtual int VGetVolume() const=0;
virtual float VGetProgress()=0;
};
// class AudioBuffer
//
// Implements IAudiobuffer interface using a smart pointer to SoundResource.
class AudioBuffer : public IAudioBuffer
{
public:
virtual shared_ptr<ResHandle> VGetResource() { return m_Resource; }
virtual bool VIsLooping() const { return m_isLooping; }
virtual int VGetVolume() const { return m_Volume; }
protected:
AudioBuffer(shared_ptr<ResHandle >resource)
{ m_Resource = resource;
m_isPaused = false;
m_isLooping = false;
m_Volume = 0;
}
// disable public construction 见下面
shared_ptr<ResHandle> m_Resource;
bool m_isPaused; // Is the sound paused
bool m_isLooping; // Is the sound looping
int m_Volume; //the volume
};
// class IAudio
//
// This interface class describes the public interface for
// a game's audio system.
class IAudio
{
public:
virtual bool VActive()=0;
virtual IAudioBuffer *VInitAudioBuffer(shared_ptr<ResHandle> handle)=0;
virtual void VReleaseAudioBuffer(IAudioBuffer* audioBuffer)=0;
virtual void VStopAllSounds()=0;
virtual void VPauseAllSounds()=0;
virtual void VResumeAllSounds()=0;
virtual bool VInitialize(HWND hWnd)=0; // HWND窗口句柄
virtual void VShutdown()=0;
};
// class Audio
//
// Implements IAudio interface - but not all the way - this is
// still a base class. See class DirectSoundAudio.
//
class Audio : public IAudio
{
public:
Audio();
~Audio() { VShutdown(); }
virtual void VStopAllSounds();
virtual void VPauseAllSounds();
virtual void VResumeAllSounds();
virtual void VShutdown();
static bool HasSoundCard(void);
bool IsPaused() { return m_AllPaused; }
protected:
typedef std::list<IAudioBuffer *> AudioBufferList;
AudioBufferList m_AllSamples; // List of all currently allocated audio buffers
bool m_AllPaused; // Has the sound system been paused?
bool m_Initialized; // Has the sound system been initialized?
};
extern Audio *g_pAudio;
/**
关于构造函数的保护权限和私有权限
一、构造函数
1、保护
构造函数定义为protected后,就意味着你不能在类的外部构造对象了,而只能在外部构造该类的子类的对象,比如:
class Base
{
protected:
Base() {}
...
};
class Derived : public Base
{
public:
Derived() {}
...
};
Base b; //error
Derived d; //ok
2、私有
构造函数定义为private后,意味着不仅仅不能在类的外部构造对象了,而且也不能在外部构造该类的子类的对象了,只能通过类的static静态函数来访问类的内部定义的对象,单件singleton模式就是私有构造函数的典型实例:
class CLog
{
private:
CLog() {};
public:
~CLog() {};
public:
static CLog* GetInstance()
{
if (NULL == m_sopLogInstance)
{
CLock oInstanceLock;
oInstanceLock.Lock();
if (NULL == m_sopLogInstance)
{
m_sopLogInstance = new CLog();
}
oInstanceLock.Unlock();
}
return m_sopLogInstance;
}
...
private:
static CLog *m_sopLogInstance;
...
};
CLog &log = CLog::GetInstance();
3、拷贝构造和赋值操作符
拷贝构造和赋值操作符定义为私有后,意味着禁止在外部对类的对象进行复制操作。这种情况的典型应用是类的成员中含有锁成员变量时,禁止拷贝构造和赋值操作可以防止对象被拷贝后,拷贝的对象进行加锁后影响到原有对象加锁,从而违背码农意愿。
*///========================================================================
// Audio.cpp : Defines a simple sound system.
//========================================================================
#include "GameCodeStd.h"
#include <mmsystem.h>
#include <mmreg.h>
#include "Audio.h"
#include "SoundResource.h"
//参照 http://www.cnitblog.com/ictfly/archive/2011/06/27/74454.html
#pragma comment( lib, "dsound" )
// Globals
Audio *g_pAudio = NULL;
char *gSoundExtentions[] = { ".mp3", ".wav", ".midi", ".ogg" };
// Construction/Destruction
Audio::Audio():
m_Initialized(false),
m_AllPaused(false)
{
}
//
// Audio::VShutdown
//
void Audio::VShutdown()
{
AudioBufferList::iterator i=m_AllSamples.begin();
while (i!=m_AllSamples.end())
{
IAudioBuffer *audioBuffer = (*i);
audioBuffer->VStop(); //子类实现
m_AllSamples.pop_front();
}
}
//
// Audio::VPauseAllSounds
// Pause all active sounds, including music
//
void Audio::VPauseAllSounds()
{
AudioBufferList::iterator i;
AudioBufferList::iterator end;
for(i=m_AllSamples.begin(), end=m_AllSamples.end(); i!=end; ++i)
{
IAudioBuffer *audioBuffer = (*i);
audioBuffer->VPause(); //子类实现
}
m_AllPaused=true;
}
//
// Audio::VResumeAllSounds
//
void Audio::VResumeAllSounds()
{
AudioBufferList::iterator i;
AudioBufferList::iterator end;
for(i=m_AllSamples.begin(), end=m_AllSamples.end(); i!=end; ++i)
{
IAudioBuffer *audioBuffer = (*i);
audioBuffer->VResume(); <span style="color:#ff6666;">//子类实现</span>
}
m_AllPaused=false;
}
//
// Audio::VStopAllSounds
//
void Audio::VStopAllSounds()
{
IAudioBuffer *audioBuffer = NULL;
AudioBufferList::iterator i;
AudioBufferList::iterator end;
for(i=m_AllSamples.begin(), end=m_AllSamples.end(); i!=end; ++i)
{
audioBuffer = (*i);
audioBuffer->VStop();
}
m_AllPaused=false;
}
//
// Audio::HasSoundCard
//
// A bit of an anachronism in name - but it simply returns true if the sound system is active.
//
bool Audio::HasSoundCard(void)
{
return (g_pAudio && g_pAudio->VActive());
}
2. 音效类别和资源下载等
#pragma once
#include <mmsystem.h>
#include "ResCache.h"
typedef unsigned int DWORD;
typedef unsigned short WORD;
typedef unsigned char BYTE;
typedef struct {
WORD wFormatTag;
WORD nChannels;
DWORD nSamplesPerSec;
DWORD nAvgBytesPerSec;
WORD nBlockAlign;
WORD wBitsPerSample;
WORD cbSize;
} WAVEFORMATEX;
//
// class SoundResourceExtraData
//
class IResourceExtraData
{
public:
virtual std::string VToString()=0;
};
class SoundResourceExtraData : public IResourceExtraData
{
friend class WaveResourceLoader;
friend class OggResourceLoader;
public:
SoundResourceExtraData();
virtual ~SoundResourceExtraData() { }
virtual std::string VToString() { return "SoundResourceExtraData"; }
enum SoundType GetSoundType() { return m_SoundType; }
WAVEFORMATEX const *GetFormat() { return &m_WavFormatEx; }
int GetLengthMilli() const { return m_LengthMilli; }
protected:
enum SoundType m_SoundType; // is this an Ogg, WAV, etc.?
bool m_bInitialized; // has the sound been initialized
WAVEFORMATEX m_WavFormatEx; // description of the PCM format
int m_LengthMilli; // how long the sound is in milliseconds
};
//
// class WaveResourceLoader
//
class IResourceLoader
{
public:
virtual std::string VGetPattern()=0;
virtual bool VUseRawFile()=0;
virtual bool VDiscardRawBufferAfterLoad()=0;
virtual bool VAddNullZero() { return false; }
virtual unsigned int VGetLoadedResourceSize(char *rawBuffer, unsigned int rawSize)=0;
virtual bool VLoadResource(char *rawBuffer, unsigned int rawSize, shared_ptr<ResHandle> handle)=0;
};
class WaveResourceLoader : public IResourceLoader
{
public:
virtual bool VUseRawFile() { return false; }
virtual bool VDiscardRawBufferAfterLoad() { return true; }
virtual unsigned int VGetLoadedResourceSize(char *rawBuffer, unsigned int rawSize);
virtual bool VLoadResource(char *rawBuffer, unsigned int rawSize, shared_ptr<ResHandle> handle);
virtual std::string VGetPattern() { return "*.wav"; }
protected:
bool ParseWave(char *wavStream, size_t length, shared_ptr<ResHandle> handle);
};
//
// class OggResourceLoader
//
class OggResourceLoader : public IResourceLoader
{
public:
virtual bool VUseRawFile() { return false; }
virtual bool VDiscardRawBufferAfterLoad() { return true; }
virtual unsigned int VGetLoadedResourceSize(char *rawBuffer, unsigned int rawSize);
virtual bool VLoadResource(char *rawBuffer, unsigned int rawSize, shared_ptr<ResHandle> handle);
virtual std::string VGetPattern() { return "*.ogg"; }
protected:
bool ParseOgg(char *oggStream, size_t length, shared_ptr<ResHandle> handle);
};
#include "GameCodeStd.h"
#include <io.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <vorbis/codec.h> // from the vorbis sdk
#include <vorbis/vorbisfile.h> // also :)
#include "GameCode.h"
#include "SoundResource.h"
#include "Audio.h"
//
// SoundResource::SoundResource
//
SoundResourceExtraData::SoundResourceExtraData()
: m_SoundType(SOUND_TYPE_UNKNOWN),
m_bInitialized(false),
m_LengthMilli(0)
{
// don't do anything yet - timing sound Initialization is important!
}
unsigned int WaveResourceLoader::VGetLoadedResourceSize(char *rawBuffer, unsigned int rawSize)
{
DWORD file = 0;
DWORD fileEnd = 0;
DWORD length = 0;
DWORD type = 0;
DWORD pos = 0;
/**
mmioFOURCC -- converts four chars into a 4 byte integer code.
The first 4 bytes of a valid .wav file is 'R','I','F','F'
*/
#define mmioFOURCC(ch0, ch1, ch2, ch3) MAKEFOURCC(ch0, ch1, ch2, ch3)
#define MAKEFOURCC(ch0, ch1, ch2, ch3) ((DWORD)(BYTE)(ch0) | ((DWORD)(BYTE)(ch1) << 8) | ((DWORD)(BYTE)(ch2) << 16) | ((DWORD)(BYTE)(ch3) << 24 ))
type = *((DWORD *)(rawBuffer+pos)); pos+=sizeof(DWORD);
bool b = type != mmioFOURCC('R', 'I', 'F', 'F');
if( b )
{
return false;
}
length = *((DWORD *)(rawBuffer+pos)); pos+=sizeof(DWORD);
type = *((DWORD *)(rawBuffer+pos)); pos+=sizeof(DWORD);
// 'W','A','V','E' for a legal .wav file
b = type != mmioFOURCC('W', 'A', 'V', 'E');
if( b )
return false; //not a WAV
// Find the end of the file
fileEnd = length - 4;
bool copiedBuffer = false;
// Load the .wav format and the .wav data
// Note that these blocks can be in either order.
while(file < fileEnd)
{
type = *((DWORD *)(rawBuffer+pos)); pos+=sizeof(DWORD);
file += sizeof(DWORD);
length = *((DWORD *)(rawBuffer+pos)); pos+=sizeof(DWORD);
file += sizeof(DWORD);
switch(type)
{
case mmioFOURCC('f', 'a', 'c', 't'):
{
GCC_ASSERT(false && "This wav file is compressed. We don't handle compressed wav at this time");
break;
}
case mmioFOURCC('f', 'm', 't', ' '):
{
pos+=length;
break;
}
case mmioFOURCC('d', 'a', 't', 'a'):
{
return length;
}
}
file += length;
// Increment the pointer past the block we just read,
// and make sure the pointer is word aliged.
if (length & 1)
{
++pos;
++file;
}
}
// If we get to here, the .wav file didn't contain all the right pieces.
return false;
}
bool WaveResourceLoader::VLoadResource(char *rawBuffer, unsigned int rawSize, shared_ptr<ResHandle> handle)
{
shared_ptr<SoundResourceExtraData> extra = shared_ptr<SoundResourceExtraData>(GCC_NEW SoundResourceExtraData());
extra->m_SoundType = SOUND_TYPE_WAVE;
handle->SetExtra(shared_ptr<SoundResourceExtraData>(extra));
if (!ParseWave(rawBuffer, rawSize, handle))
{
return false;
}
return true;
}
//
// WaveResourceLoader::ParseWave
//
bool WaveResourceLoader::ParseWave(char *wavStream, size_t bufferLength, shared_ptr<ResHandle> handle)
{
shared_ptr<SoundResourceExtraData> extra = static_pointer_cast<SoundResourceExtraData>(handle->GetExtra());
DWORD file = 0;
DWORD fileEnd = 0;
DWORD length = 0;
DWORD type = 0;
DWORD pos = 0;
// mmioFOURCC -- converts four chars into a 4 byte integer code.
// The first 4 bytes of a valid .wav file is 'R','I','F','F'
type = *((DWORD *)(wavStream+pos)); pos+=sizeof(DWORD);
if(type != mmioFOURCC('R', 'I', 'F', 'F'))
return false;
length = *((DWORD *)(wavStream+pos)); pos+=sizeof(DWORD);
type = *((DWORD *)(wavStream+pos)); pos+=sizeof(DWORD);
// 'W','A','V','E' for a legal .wav file
if(type != mmioFOURCC('W', 'A', 'V', 'E'))
return false; //not a WAV
// Find the end of the file
fileEnd = length - 4;
memset(&extra->m_WavFormatEx, 0, sizeof(WAVEFORMATEX));
bool copiedBuffer = false;
// Load the .wav format and the .wav data
// Note that these blocks can be in either order.
while(file < fileEnd)
{
type = *((DWORD *)(wavStream+pos)); pos+=sizeof(DWORD);
file += sizeof(DWORD);
length = *((DWORD *)(wavStream+pos)); pos+=sizeof(DWORD);
file += sizeof(DWORD);
switch(type)
{
case mmioFOURCC('f', 'a', 'c', 't'):
{
GCC_ASSERT(false && "This wav file is compressed. We don't handle compressed wav at this time");
break;
}
case mmioFOURCC('f', 'm', 't', ' '):
{
memcpy(&extra->m_WavFormatEx, wavStream+pos, length); pos+=length;
extra->m_WavFormatEx.cbSize = (WORD)length;
break;
}
case mmioFOURCC('d', 'a', 't', 'a'):
{
copiedBuffer = true;
if (length != handle->Size())
{
GCC_ASSERT(0 && _T("Wav resource size does not equal the buffer size"));
return 0;
}
memcpy(handle->WritableBuffer(), wavStream+pos, length); pos+=length;
break;
}
}
file += length;
// If both blocks have been seen, we can return true.
if( copiedBuffer )
{
extra->m_LengthMilli = ( handle->Size() * 1000 ) / extra->GetFormat()->nAvgBytesPerSec;
return true;
}
// Increment the pointer past the block we just read,
// and make sure the pointer is word aliged.
if (length & 1)
{
++pos;
++file;
}
}
// If we get to here, the .wav file didn't contain all the right pieces.
return false;
}
//
// struct OggMemoryFile
//
struct OggMemoryFile
{
unsigned char* dataPtr;// Pointer to the data in memory
size_t dataSize; // Size of the data
size_t dataRead; // Bytes read so far
OggMemoryFile(void)
{
dataPtr = NULL;
dataSize = 0;
dataRead = 0;
}
};
//
// VorbisRead 音频压缩格式读
//
size_t VorbisRead(void* data_ptr, size_t byteSize, size_t sizeToRead, void* data_src)
{
OggMemoryFile *pVorbisData = static_cast<OggMemoryFile *>(data_src);
if (NULL == pVorbisData)
{
return -1;
}
size_t actualSizeToRead, spaceToEOF =
pVorbisData->dataSize - pVorbisData->dataRead;
if ((sizeToRead*byteSize) < spaceToEOF)
{
actualSizeToRead = (sizeToRead*byteSize);
}
else
{
actualSizeToRead = spaceToEOF;
}
if (actualSizeToRead)
{
memcpy(data_ptr,
(char*)pVorbisData->dataPtr + pVorbisData->dataRead, actualSizeToRead);
pVorbisData->dataRead += actualSizeToRead;
}
return actualSizeToRead;
}
//
// VorbisSeek
//
typedef long long ogg_int64_t;
int VorbisSeek(void* data_src, ogg_int64_t offset, int origin)
{
OggMemoryFile *pVorbisData = static_cast<OggMemoryFile *>(data_src);
if (NULL == pVorbisData)
{
return -1;
}
switch (origin)
{
case SEEK_SET:
{
ogg_int64_t actualOffset;
actualOffset = (pVorbisData->dataSize >= offset) ? offset : pVorbisData->dataSize;
pVorbisData->dataRead = static_cast<size_t>(actualOffset);
break;
}
case SEEK_CUR:
{
size_t spaceToEOF =
pVorbisData->dataSize - pVorbisData->dataRead;
ogg_int64_t actualOffset;
actualOffset = (offset < spaceToEOF) ? offset : spaceToEOF;
pVorbisData->dataRead += static_cast<size_t>(actualOffset);
break;
}
case SEEK_END:
pVorbisData->dataRead = pVorbisData->dataSize+1;
break;
default:
GCC_ASSERT(false && "Bad parameter for 'origin', requires same as fseek.");
break;
};
return 0;
}
//
// VorbisClose
//
int VorbisClose(void *src)
{
// Do nothing - we assume someone else is managing the raw buffer
return 0;
}
//
// VorbisTell
//
long VorbisTell(void *data_src)
{
OggMemoryFile *pVorbisData = static_cast<OggMemoryFile *>(data_src);
if (NULL == pVorbisData)
{
return -1L;
}
return static_cast<long>(pVorbisData->dataRead);
}
shared_ptr<IResourceLoader> CreateWAVResourceLoader()
{
return shared_ptr<IResourceLoader>(GCC_NEW WaveResourceLoader());
}
shared_ptr<IResourceLoader> CreateOGGResourceLoader()
{
return shared_ptr<IResourceLoader>(GCC_NEW OggResourceLoader());
}
typedef struct {
size_t (*read_func) (void *ptr, size_t size, size_t nmemb, void *datasource);
int (*seek_func) (void *datasource, ogg_int64_t offset, int whence);
int (*close_func) (void *datasource);
long (*tell_func) (void *datasource);
} ov_callbacks;
typedef struct {
unsigned char *data;
int storage;
int fill;
int returned;
int unsynced;
int headerbytes;
int bodybytes;
} ogg_sync_state;
typedef struct vorbis_comment{
/* unlimited user comment fields. */
char **user_comments;
int *comment_lengths;
int comments;
char *vendor;
} vorbis_comment;
typedef struct vorbis_info{
int version;
int channels;
long rate;
long bitrate_upper;
long bitrate_nominal;
long bitrate_lower;
long bitrate_window;
void *codec_setup;
} vorbis_info;
typedef struct {
unsigned char *body_data; /* bytes from packet bodies */
long body_storage; /* storage elements allocated */
long body_fill; /* elements stored; fill mark */
long body_returned; /* elements of fill returned */
int *lacing_vals; /* The values that will go to the segment table */
ogg_int64_t *granule_vals; /* granulepos values for headers. Not compact
this way, but it is simple coupled to the
lacing fifo */
long lacing_storage;
long lacing_fill;
long lacing_packet;
long lacing_returned;
unsigned char header[282]; /* working space for header encode */
int header_fill;
int e_o_s; /* set when we have buffered the last packet in the
logical bitstream */
int b_o_s; /* set after we've written the initial page
of a logical bitstream */
long serialno;
int pageno;
ogg_int64_t packetno; /* sequence number for decode; the framing
knows where there's a hole in the data,
but we need coupling so that the codec
(which is in a seperate abstraction
layer) also knows about the gap */
ogg_int64_t granulepos;
} ogg_stream_state;
typedef struct vorbis_dsp_state{
/* private */
} vorbis_dsp_state;
typedef struct vorbis_block{
/* private */
} vorbis_block;
typedef struct {
void *datasource; /* Pointer to a FILE *, etc. */
int seekable;
ogg_int64_t offset;
ogg_int64_t end;
ogg_sync_state oy;
/* If the FILE handle isn't seekable (eg, a pipe), only the current
stream appears */
int links;
ogg_int64_t *offsets;
ogg_int64_t *dataoffsets;
long *serialnos;
ogg_int64_t *pcmlengths;
vorbis_info *vi;
vorbis_comment *vc;
/* Decoding working state local storage */
ogg_int64_t pcm_offset;
int ready_state;
long current_serialno;
int current_link;
ogg_int64_t bittrack;
ogg_int64_t samptrack;
ogg_stream_state os; /* take physical pages, weld into a logical
stream of packets */
vorbis_dsp_state vd; /* central working state for the packet->PCM decoder */
vorbis_block vb; /* local working space for packet->PCM decode */
ov_callbacks callbacks;
} OggVorbis_File;
int ov_open_callbacks(void *datasource, OggVorbis_File *vf, char *initial, long ibytes, ov_callbacks callbacks)
{}
int ov_clear(OggVorbis_File *vf)
{}
unsigned int OggResourceLoader::VGetLoadedResourceSize(char *rawBuffer, unsigned int rawSize)
{
OggVorbis_File vf; // for the vorbisfile interface
ov_callbacks oggCallbacks;
OggMemoryFile *vorbisMemoryFile = GCC_NEW OggMemoryFile;
vorbisMemoryFile->dataRead = 0;
vorbisMemoryFile->dataSize = rawSize;
vorbisMemoryFile->dataPtr = (unsigned char *)rawBuffer;
oggCallbacks.read_func = VorbisRead;
oggCallbacks.close_func = VorbisClose;
oggCallbacks.seek_func = VorbisSeek;
oggCallbacks.tell_func = VorbisTell;
/*
This is an alternative function used to open and initialize an OggVorbis_File structure when using a data source other than a file, when its necessary to modify default file access behavior, or to initialize a Vorbis decode from a FILE * pointer under Windows where ov_open() cannot be used. It allows the application to specify custom file manipulation routines and sets up all the related decoding structures.
*/
int ov_ret = ov_open_callbacks(vorbisMemoryFile, &vf, NULL, 0, oggCallbacks);
GCC_ASSERT(ov_ret>=0);
// ok now the tricky part
// the vorbis_info struct keeps the most of the interesting format info
vorbis_info *vi = ov_info(&vf,-1);
DWORD size = 4096 * 16;
DWORD pos = 0;
int sec = 0;
int ret = 1;
DWORD bytes = (DWORD)ov_pcm_total(&vf, -1);
bytes *= 2 * vi->channels;
ov_clear(&vf);
SAFE_DELETE(vorbisMemoryFile);
return bytes;
}
bool OggResourceLoader::VLoadResource(char *rawBuffer, unsigned int rawSize, shared_ptr<ResHandle> handle)
{
shared_ptr<SoundResourceExtraData> extra = shared_ptr<SoundResourceExtraData>(GCC_NEW SoundResourceExtraData());
extra->m_SoundType = SOUND_TYPE_OGG;
handle->SetExtra(shared_ptr<SoundResourceExtraData>(extra));
if (!ParseOgg(rawBuffer, rawSize, handle))
{
return false;
}
return true;
}
//
// OggResourceLoader::ParseOgg
//
bool OggResourceLoader::ParseOgg(char *oggStream, size_t length, shared_ptr<ResHandle> handle)
{
shared_ptr<SoundResourceExtraData> extra = static_pointer_cast<SoundResourceExtraData>(handle->GetExtra());
OggVorbis_File vf; // for the vorbisfile interface
ov_callbacks oggCallbacks;
OggMemoryFile *vorbisMemoryFile = GCC_NEW OggMemoryFile;
vorbisMemoryFile->dataRead = 0;
vorbisMemoryFile->dataSize = length;
vorbisMemoryFile->dataPtr = (unsigned char *)oggStream;
oggCallbacks.read_func = VorbisRead;
oggCallbacks.close_func = VorbisClose;
oggCallbacks.seek_func = VorbisSeek;
oggCallbacks.tell_func = VorbisTell;
int ov_ret = ov_open_callbacks(vorbisMemoryFile, &vf, NULL, 0, oggCallbacks);
GCC_ASSERT(ov_ret>=0);
// ok now the tricky part
// the vorbis_info struct keeps the most of the interesting format info
vorbis_info *vi = ov_info(&vf,-1);
memset(&(extra->m_WavFormatEx), 0, sizeof(extra->m_WavFormatEx));
extra->m_WavFormatEx.cbSize = sizeof(extra->m_WavFormatEx);
extra->m_WavFormatEx.nChannels = vi->channels;
extra->m_WavFormatEx.wBitsPerSample = 16; // ogg vorbis is always 16 bit
extra->m_WavFormatEx.nSamplesPerSec = vi->rate;
extra->m_WavFormatEx.nAvgBytesPerSec = extra->m_WavFormatEx.nSamplesPerSec*extra->m_WavFormatEx.nChannels*2;
extra->m_WavFormatEx.nBlockAlign = 2*extra->m_WavFormatEx.nChannels;
extra->m_WavFormatEx.wFormatTag = 1;
DWORD size = 4096 * 16;
DWORD pos = 0;
int sec = 0;
int ret = 1;
DWORD bytes = (DWORD)ov_pcm_total(&vf, -1);
bytes *= 2 * vi->channels;
if (handle->Size() != bytes)
{
GCC_ASSERT(0 && _T("The Ogg size does not match the memory buffer size!"));
ov_clear(&vf);
SAFE_DELETE(vorbisMemoryFile);
return false;
}
// now read in the bits
while(ret && pos<bytes)
{
ret = ov_read(&vf, handle->WritableBuffer()+pos, size, 0, 2, 1, &sec);
pos += ret;
if (bytes - pos < size)
{
size = bytes - pos;
}
}
extra->m_LengthMilli = (int)(1000.f * ov_time_total(&vf, -1));
ov_clear(&vf);
SAFE_DELETE(vorbisMemoryFile);
return true;
}
上面有的函数变量是基于Microsoft的框架。
3. 音效的不同效果包括爆炸、渐入渐出
#pragma once
#include "Process.h"
#include "Audio.h"
#include "SoundResource.h"
#include "ResCache.h"
/////////////////////////////////////////////////////////////////////////////
// class SoundProcess
//
// A Sound Process, not to be confused with a Sound Resource (SoundResource)
// manages a sound as it is being played. You can use this class to manage
// timing between sounds & animations.
//
/////////////////////////////////////////////////////////////////////////////
class SoundProcess : public Process
{
protected:
shared_ptr<ResHandle> m_handle; // this is the raw sound data
shared_ptr<IAudioBuffer> m_AudioBuffer; // handle to the implementation dependent audio buffer (DSound, Miles)
int m_Volume; // these hold the initial setting until the sound is actually launched.
bool m_isLooping;
public:
SoundProcess(shared_ptr<ResHandle> soundResource, int volume=100, bool looping=false);
virtual ~SoundProcess();
void Play(const int volume, const bool looping);
void Stop();
void SetVolume(int volume);
int GetVolume();
int GetLengthMilli();
bool IsSoundValid() { return m_handle != NULL; }
bool IsPlaying();
bool IsLooping() { return m_AudioBuffer && m_AudioBuffer->VIsLooping(); }
float GetProgress();
void PauseSound(void);
protected:
virtual void VOnInit();
virtual void VOnUpdate(unsigned long deltaMs);
void InitializeVolume();
protected:
SoundProcess(); // Disable Default Construction
};
/////////////////////////////////////////////////////////////////////////////
// class ExplosionProcess
//
// This is an example of a process that uses a simple state machine
// to control itself.
//
/////////////////////////////////////////////////////////////////////////////
class <span style="color:#ff0000;">ExplosionProcess</span> : public Process
{
protected:
int m_Stage;
shared_ptr<SoundProcess> m_Sound;
public:
ExplosionProcess() { m_Stage=0; }
protected:
virtual void VOnInit();
virtual void VOnUpdate(unsigned long deltaMs);
};
/////////////////////////////////////////////////////////////////////////////
// class FadeProcess
//
// Fades sound volume in or out over time and then kills itself.
// This should be useful for groups of sound effects, too - such as when
// an AI barks and it must be heard above the other effects like too much freaking thunder.
//
/////////////////////////////////////////////////////////////////////////////
class <span style="color:#ff0000;">FadeProcess</span> : public Process
{
protected:
shared_ptr<SoundProcess> m_Sound;
int m_TotalFadeTime;
int m_ElapsedTime;
int m_StartVolume;
int m_EndVolume;
public:
FadeProcess(shared_ptr<SoundProcess> sound, int fadeTime, int endVolume);
virtual void VOnUpdate(unsigned long deltaMs);
};
//========================================================================
// SoundProcess.cpp : Defines sound processes.
//========================================================================
#include "GameCodeStd.h"
#include "GameCode.h"
#include "Audio.h"
#include "SoundProcess.h"
#include "SoundResource.h"
//////////////////////////////////////////////////////////////////////
// SoundProcess Implementation
//////////////////////////////////////////////////////////////////////
//
// SoundProcess::SoundProcess
//
SoundProcess::SoundProcess(shared_ptr<ResHandle> resource, int volume, bool looping) :
m_handle(resource),
m_Volume(volume),
m_isLooping(looping)
{
InitializeVolume();
}
//
// SoundProcess::~SoundProcess
//
SoundProcess::~SoundProcess()
{
if (IsPlaying())
Stop();
if (m_AudioBuffer)
g_pAudio->VReleaseAudioBuffer(m_AudioBuffer.get());
}
void SoundProcess::InitializeVolume()
{
// FUTURE WORK: Somewhere set an adjusted volume based on game options
// m_volume = g_GraphicalApp->GetVolume(typeOfSound);
}
//
// SoundProcess::GetLengthMilli
//
int SoundProcess::GetLengthMilli()
{
if ( m_handle && m_handle->GetExtra())
{
shared_ptr<SoundResourceExtraData> extra = static_pointer_cast<SoundResourceExtraData>(m_handle->GetExtra());
return extra->GetLengthMilli();
}
else
{
return 0;
}
}
//
// SoundProcess::VOnInitialize
// note that the book incorrectly names this SoundProcess::OnInitialize
void SoundProcess::VOnInit()
{
Process::VOnInit();
//If the sound has never been... you know... then Play it for the very first time
if ( m_handle == NULL || m_handle->GetExtra() == NULL)
return;
//This sound will manage it's own handle in the other thread
IAudioBuffer *buffer = g_pAudio->VInitAudioBuffer(m_handle);
if (!buffer)
{
Fail();
return;
}
m_AudioBuffer.reset(buffer);
Play(m_Volume, m_isLooping);
}
//
// SoundProcess::OnUpdate
//
void SoundProcess::VOnUpdate(unsigned long deltaMs)
{
if (!IsPlaying())
{
Succeed();
}
}
//
// SoundProcess::IsPlaying
//
bool SoundProcess::IsPlaying()
{
if ( ! m_handle || ! m_AudioBuffer )
return false;
return m_AudioBuffer->VIsPlaying();
}
//
// SoundProcess::SetVolume
//
void SoundProcess::SetVolume(int volume)
{
if(m_AudioBuffer==NULL)
{
return;
}
GCC_ASSERT(volume>=0 && volume<=100 && "Volume must be a number between 0 and 100");
m_Volume = volume;
m_AudioBuffer->VSetVolume(volume);
}
//
// SoundProcess::GetVolume
//
int SoundProcess::GetVolume()
{
if(m_AudioBuffer==NULL)
{
return 0;
}
m_Volume = m_AudioBuffer->VGetVolume();
return m_Volume;
}
//
// SoundProcess::PauseSound
// NOTE: This is called TogglePause in te book
//
void SoundProcess::PauseSound()
{
if (m_AudioBuffer)
m_AudioBuffer->VTogglePause();
}
//
// SoundProcess::Play
//
void SoundProcess::Play(const int volume, const bool looping)
{
GCC_ASSERT(volume>=0 && volume<=100 && "Volume must be a number between 0 and 100");
if(!m_AudioBuffer)
{
return;
}
m_AudioBuffer->VPlay(volume, looping);
}
//
// SoundProcess::Stop
//
void SoundProcess::Stop()
{
if (m_AudioBuffer)
{
m_AudioBuffer->VStop();
}
}
//
// SoundProcess::GetProgress
//
float SoundProcess::GetProgress()
{
if (m_AudioBuffer)
{
return m_AudioBuffer->VGetProgress();
}
return 0.0f;
}
//
// ExplosionProcess::VOnInit
//
void ExplosionProcess::VOnInit()
{
Process::VOnInit();
Resource resource("explosion.wav");
shared_ptr<ResHandle> srh = g_pApp->m_ResCache->GetHandle(&resource);
m_Sound.reset(GCC_NEW SoundProcess(srh));
// Imagine cool explosion graphics setup code here!!!!
//
//
//
}
//
// ExplosionProcess::OnUpdate
//
void ExplosionProcess::VOnUpdate(unsigned long deltaMs)
{
// Since the sound is the real pacing mechanism - we ignore deltaMilliseconds
float progress = m_Sound->GetProgress();
switch (m_Stage)
{
case 0:
{
if (progress > 0.55f)
{
++m_Stage;
// Imagine secondary explosion effect launch right here!
}
break;
}
case 1:
{
if (progress > 0.75f)
{
++m_Stage;
// Imagine tertiary explosion effect launch right here!
}
break;
}
default:
{
break;
}
}
}
//
// FadeProcess::FadeProcess
//
FadeProcess::FadeProcess(shared_ptr<SoundProcess> sound, int fadeTime, int endVolume)
{
m_Sound = sound;
m_TotalFadeTime = fadeTime;
m_StartVolume = sound->GetVolume();
m_EndVolume = endVolume;
m_ElapsedTime = 0;
VOnUpdate(0);
}
//
// FadeProcess::OnUpdate
//
void FadeProcess::VOnUpdate(unsigned long deltaMs)
{
m_ElapsedTime += deltaMs;
if (m_Sound->IsDead())
Succeed();
float cooef = (float)m_ElapsedTime / m_TotalFadeTime;
if (cooef>1.0f)
cooef = 1.0f;
if (cooef<0.0f)
cooef = 0.0f;
int newVolume = m_StartVolume + (int)( float(m_EndVolume - m_StartVolume) * cooef);
if (m_ElapsedTime >= m_TotalFadeTime)
{
newVolume = m_EndVolume;
Succeed();
}
m_Sound->SetVolume(newVolume);
}
4. 基于DirectSound声卡的音效管理实现#pragma once
//========================================================================
// DirectSoundAudio.h : Implements audio interfaces for DirectSound
//========================================================================
#include "Audio.h"
// DirectSound includes
#include <dsound.h>
#include <mmsystem.h>
// DirectSoundAudioBuffer
//
// Implements the rest of the IAudioBuffer interface left out by AudioBuffer.
// If you are interested in implementing a sound system using OpenAL
// you'd create a class OpenALAudioBuffer from AudioBuffer.
//
typedef struct IDirectSoundBuffer *LPDIRECTSOUNDBUFFER;
enum HRESULT {
E_INVALIDARG,
E_FAIL,
S_OK,
};
class DirectSoundAudioBuffer : public AudioBuffer
{
protected:
LPDIRECTSOUNDBUFFER m_Sample;
public:
DirectSoundAudioBuffer(LPDIRECTSOUNDBUFFER sample, shared_ptr<ResHandle> resource);
virtual void *VGet();
virtual bool VOnRestore();
virtual bool VPlay(int volume, bool looping);
virtual bool VPause();
virtual bool VStop();
virtual bool VResume();
virtual bool VTogglePause();
virtual bool VIsPlaying();
virtual void VSetVolume(int volume);
virtual void VSetPosition(unsigned long newPosition);
virtual float VGetProgress();
private:
HRESULT FillBufferWithSound( );
HRESULT RestoreBuffer( BOOL* pbWasRestored );
};
// class DirectSoundAudio
//
// Implements the rest of the IAudio interface left out by Audio.
// If you are interested in implementing a sound system using OpenAL
// you'd create a class OpenALAudioBuffer from AudioBuffer.
//
class DirectSoundAudio : public Audio
{
public:
DirectSoundAudio() { m_pDS = NULL; }
virtual bool VActive() { return m_pDS!=NULL; }
virtual IAudioBuffer *VInitAudioBuffer(shared_ptr<ResHandle> handle);
virtual void VReleaseAudioBuffer(IAudioBuffer* audioBuffer);
virtual void VShutdown();
virtual bool VInitialize(HWND hWnd);
protected:
//参照 http://msdn.microsoft.com/en-us/library/windows/desktop/ee418035(v=vs.85).aspx
IDirectSound8* m_pDS;
HRESULT SetPrimaryBufferFormat( DWORD dwPrimaryChannels,
DWORD dwPrimaryFreq,
DWORD dwPrimaryBitRate );
};
//===========================================================================
// DirectSoundAudio.cpp : Defines a simple sound system that uses DirectSound
//===========================================================================
#include "GameCodeStd.h"
#include "GameCode.h"
#include "SoundResource.h"
#include "DirectSoundAudio.h"
#include <cguid.h>
//
// DirectSoundAudio::VInitialize
//
bool DirectSoundAudio::VInitialize(HWND hWnd)
{
if(m_Initialized)
return true;
m_Initialized=false;
m_AllSamples.clear();
SAFE_RELEASE( m_pDS );
HRESULT hr;
// Create IDirectSound using the primary sound device
if( FAILED( hr = DirectSoundCreate8( NULL, &m_pDS, NULL ) ) )
return false;
// Set DirectSound coop level
if( FAILED( hr = m_pDS->SetCooperativeLevel( hWnd, DSSCL_PRIORITY) ) )
return false;
if( FAILED( hr = SetPrimaryBufferFormat( 8, 44100, 16 ) ) )
return false;
m_Initialized = true;
return true;
}
//
// DirectSoundAudio::SetPrimaryBufferFormat
//
HRESULT DirectSoundAudio::SetPrimaryBufferFormat(
DWORD dwPrimaryChannels,
DWORD dwPrimaryFreq,
DWORD dwPrimaryBitRate )
{
// !WARNING! - Setting the primary buffer format and then using this
// it for DirectMusic messes up DirectMusic!
//
// If you want your primary buffer format to be 22kHz stereo, 16-bit
// call with these parameters: SetPrimaryBufferFormat(2, 22050, 16);
HRESULT hr;
LPDIRECTSOUNDBUFFER pDSBPrimary = NULL;
if( ! m_pDS )
return CO_E_NOTINITIALIZED;
// Get the primary buffer
DSBUFFERDESC dsbd;
ZeroMemory( &dsbd, sizeof(DSBUFFERDESC) );
dsbd.dwSize = sizeof(DSBUFFERDESC);
dsbd.dwFlags = DSBCAPS_PRIMARYBUFFER;
dsbd.dwBufferBytes = 0;
dsbd.lpwfxFormat = NULL;
if( FAILED( hr = m_pDS->CreateSoundBuffer( &dsbd, &pDSBPrimary, NULL ) ) )
return DXUT_ERR( L"CreateSoundBuffer", hr );
WAVEFORMATEX wfx;
ZeroMemory( &wfx, sizeof(WAVEFORMATEX) );
wfx.wFormatTag = (WORD) WAVE_FORMAT_PCM;
wfx.nChannels = (WORD) dwPrimaryChannels;
wfx.nSamplesPerSec = (DWORD) dwPrimaryFreq;
wfx.wBitsPerSample = (WORD) dwPrimaryBitRate;
wfx.nBlockAlign = (WORD) (wfx.wBitsPerSample / 8 * wfx.nChannels);
wfx.nAvgBytesPerSec = (DWORD) (wfx.nSamplesPerSec * wfx.nBlockAlign);
if( FAILED( hr = pDSBPrimary->SetFormat(&wfx) ) )
return DXUT_ERR( L"SetFormat", hr );
SAFE_RELEASE( pDSBPrimary );
return S_OK;
}
//
// DirectSoundAudio::VShutdown
//
void DirectSoundAudio::VShutdown()
{
if(m_Initialized)
{
Audio::VShutdown();
SAFE_RELEASE(m_pDS);
m_Initialized = false;
}
}
//
// DirectSoundAudio::VInitAudioBuffer
// Allocate a sample handle for the newborn sound (used by SoundResource) and tell you it's length
//
IAudioBuffer *DirectSoundAudio::VInitAudioBuffer(shared_ptr<ResHandle> resHandle)//const
{
shared_ptr<SoundResourceExtraData> extra = static_pointer_cast<SoundResourceExtraData>(resHandle->GetExtra());
if( ! m_pDS )
return NULL;
switch(extra->GetSoundType())
{
case SOUND_TYPE_OGG:
case SOUND_TYPE_WAVE:
// We support WAVs and OGGs
break;
case SOUND_TYPE_MP3:
case SOUND_TYPE_MIDI: //If it's a midi file, then do nothin at this time... maybe we will support this in the future
GCC_ASSERT(false && "MP3s and MIDI are not supported");
return NULL;
break;
default:
GCC_ASSERT(false && "Unknown sound type");
return NULL;
}//end switch
LPDIRECTSOUNDBUFFER sampleHandle;
// Create the direct sound buffer, and only request the flags needed
// since each requires some overhead and limits if the buffer can
// be hardware accelerated
DSBUFFERDESC dsbd;
ZeroMemory( &dsbd, sizeof(DSBUFFERDESC) );
dsbd.dwSize = sizeof(DSBUFFERDESC);
dsbd.dwFlags = DSBCAPS_CTRLVOLUME;
dsbd.dwBufferBytes = resHandle->Size();
dsbd.guid3DAlgorithm = GUID_NULL;
dsbd.lpwfxFormat = const_cast<WAVEFORMATEX *>(extra->GetFormat());
HRESULT hr;
if( FAILED( hr = m_pDS->CreateSoundBuffer( &dsbd, &sampleHandle, NULL ) ) )
{
return NULL;
}
// Add handle to the list
IAudioBuffer *audioBuffer = GCC_NEW DirectSoundAudioBuffer(sampleHandle, resHandle);
m_AllSamples.push_front( audioBuffer);
return audioBuffer;
}
//
// DirectSoundAudio::VReleaseAudioBuffer
// Allocate a sample handle for the newborn sound (used by SoundResource)
//
void DirectSoundAudio::VReleaseAudioBuffer(IAudioBuffer *sampleHandle)//const
{
sampleHandle->VStop();
m_AllSamples.remove(sampleHandle);
}
//
// DirectSoundAudioBuffer::DirectSoundAudioBuffer
//
DirectSoundAudioBuffer::DirectSoundAudioBuffer(
LPDIRECTSOUNDBUFFER sample,
shared_ptr<ResHandle> resource)
: AudioBuffer(resource)
{
m_Sample = sample;
FillBufferWithSound();
}
//
// DirectSoundAudioBuffer::VGet
//
void *DirectSoundAudioBuffer::VGet()
{
if (!VOnRestore())
return NULL;
return m_Sample;
}
//
// DirectSoundAudioBuffer::VPlay
// Play a sound
//
bool DirectSoundAudioBuffer::VPlay(int volume, bool looping)
{
if(!g_pAudio->VActive())
return false;
VStop();
m_Volume = volume;
m_isLooping = looping;
LPDIRECTSOUNDBUFFER pDSB = (LPDIRECTSOUNDBUFFER)VGet();
if (!pDSB)
return false;
pDSB->SetVolume( volume );
DWORD dwFlags = looping ? DSBPLAY_LOOPING : 0L;
return (S_OK==pDSB->Play( 0, 0, dwFlags ) );
}//end Play
//
// DirectSoundAudioBuffer::VStop -
// Stop a sound and rewind play position to the beginning.
//
bool DirectSoundAudioBuffer::VStop()
{
if(!g_pAudio->VActive())
return false;
LPDIRECTSOUNDBUFFER pDSB = (LPDIRECTSOUNDBUFFER)VGet();
if( ! pDSB )
return false;
m_isPaused=true;
pDSB->Stop();
return true;
}
//
// DirectSoundAudioBuffer::VPause
// Pause a sound
//
bool DirectSoundAudioBuffer::VPause()
{
LPDIRECTSOUNDBUFFER pDSB = (LPDIRECTSOUNDBUFFER)VGet();
if(!g_pAudio->VActive())
return false;
if( pDSB )
return false;
m_isPaused=true;
pDSB->Stop();
pDSB->SetCurrentPosition(0); // rewinds buffer to beginning.
return true;
}
//
// DirectSoundAudioBuffer::VResume
// Resume a sound
bool DirectSoundAudioBuffer::VResume()
{
m_isPaused=false;
return VPlay(VGetVolume(), VIsLooping());
}
//
// DirectSoundAudioBuffer::VTogglePause
// Pause a sound or Resume a Paused sound
//
bool DirectSoundAudioBuffer::VTogglePause()
{
if(!g_pAudio->VActive())
return false;
if(m_isPaused)
{
VResume();
}
else
{
VPause(); // note that the book code calls VStop().
// It's better to call VPause() instead.
}
return true;
}//end TogglePause
//
// DirectSoundAudioBuffer::VIsPlaying
//
bool DirectSoundAudioBuffer::VIsPlaying()
{
if(!g_pAudio->VActive())
return false;
DWORD dwStatus = 0;
LPDIRECTSOUNDBUFFER pDSB = (LPDIRECTSOUNDBUFFER)VGet();
pDSB->GetStatus( &dwStatus );
bool bIsPlaying = ( ( dwStatus & DSBSTATUS_PLAYING ) != 0 );
return bIsPlaying;
}
//
// DirectSoundAudioBuffer::VSetVolume
//
void DirectSoundAudioBuffer::VSetVolume(int volume)
{
// DSBVOLUME_MIN, defined in dsound.h is set to as -10000, which is just way too silent for a
// lower bound and many programmers find -5000 to be a better minimum bound for the volume
// range to avoid an annoying silence for the lower 50% of a volume slider that uses a logarithmic scale.
// This was contributed by BystanderKain!
int gccDSBVolumeMin = DSBVOLUME_MIN;
if(!g_pAudio->VActive())
return;
LPDIRECTSOUNDBUFFER pDSB = (LPDIRECTSOUNDBUFFER)VGet();
GCC_ASSERT(volume>=0 && volume<=100 && "Volume must be a number between 0 and 100");
// convert volume from 0-100 into range for DirectX - don't forget to use a logarithmic scale!
float coeff = (float)volume / 100.0f;
float logarithmicProportion = coeff >0.1f ? 1+log10(coeff) : 0;
float range = float(DSBVOLUME_MAX - gccDSBVolumeMin);
float fvolume = ( range * logarithmicProportion ) + gccDSBVolumeMin;
GCC_ASSERT(fvolume>=gccDSBVolumeMin && fvolume<=DSBVOLUME_MAX);
HRESULT hr = pDSB->SetVolume( LONG(fvolume) );
GCC_ASSERT(hr==S_OK);
}
void DirectSoundAudioBuffer::VSetPosition(unsigned long newPosition)
{
m_Sample->SetCurrentPosition(newPosition);
}
//
// DirectSoundAudioBuffer::VOnRestore
// NOTE: Renamed from DirectSoundAudioBuffer::VRestore in the book
bool DirectSoundAudioBuffer::VOnRestore()
{
HRESULT hr;
BOOL bRestored;
// Restore the buffer if it was lost
if( FAILED( hr = RestoreBuffer( &bRestored ) ) )
return NULL;
if( bRestored )
{
// The buffer was restored, so we need to fill it with new data
if( FAILED( hr = FillBufferWithSound( ) ) )
return NULL;
}
return true;
}
//
// DirectSoundAudioBuffer::RestoreBuffer
//
// Restores the lost buffer. *pbWasRestored returns TRUE if the buffer was
// restored. It can also NULL if the information is not needed.
//
HRESULT DirectSoundAudioBuffer::RestoreBuffer( BOOL* pbWasRestored )
{
HRESULT hr;
if( ! m_Sample )
return CO_E_NOTINITIALIZED;
if( pbWasRestored )
*pbWasRestored = FALSE;
DWORD dwStatus;
if( FAILED( hr = m_Sample->GetStatus( &dwStatus ) ) )
return DXUT_ERR( L"GetStatus", hr );
if( dwStatus & DSBSTATUS_BUFFERLOST )
{
// Since the app could have just been activated, then
// DirectSound may not be giving us control yet, so
// the restoring the buffer may fail.
// If it does, sleep until DirectSound gives us control but fail if
// if it goes on for more than 1 second
int count = 0;
do
{
hr = m_Sample->Restore();
if( hr == DSERR_BUFFERLOST )
Sleep( 10 );
}
while( ( hr = m_Sample->Restore() ) == DSERR_BUFFERLOST && ++count < 100 );
if( pbWasRestored != NULL )
*pbWasRestored = TRUE;
return S_OK;
}
else
{
return S_FALSE;
}
}
//
// DirectSoundAudioBuffer::FillBufferWithSound
//
HRESULT DirectSoundAudioBuffer::FillBufferWithSound( void )
{
HRESULT hr;
VOID *pDSLockedBuffer = NULL; // a pointer to the DirectSound buffer
DWORD dwDSLockedBufferSize = 0; // Size of the locked DirectSound buffer
DWORD dwWavDataRead = 0; // Amount of data read from the wav file
if( ! m_Sample )
return CO_E_NOTINITIALIZED;
// Make sure we have focus, and we didn't just switch in from
// an app which had a DirectSound device
if( FAILED( hr = RestoreBuffer( NULL ) ) )
return DXUT_ERR( L"RestoreBuffer", hr );
int pcmBufferSize = m_Resource->Size();
shared_ptr<SoundResourceExtraData> extra = static_pointer_cast<SoundResourceExtraData>(m_Resource->GetExtra());
// Lock the buffer down
if( FAILED( hr = m_Sample->Lock( 0, pcmBufferSize,
&pDSLockedBuffer, &dwDSLockedBufferSize,
NULL, NULL, 0L ) ) )
return DXUT_ERR( L"Lock", hr );
if( pcmBufferSize == 0 )
{
// Wav is blank, so just fill with silence
FillMemory( (BYTE*) pDSLockedBuffer,
dwDSLockedBufferSize,
(BYTE)(extra->GetFormat()->wBitsPerSample == 8 ? 128 : 0 ) );
}
else
{
CopyMemory(pDSLockedBuffer, m_Resource->Buffer(), pcmBufferSize);
if( pcmBufferSize < (int)dwDSLockedBufferSize )
{
// If the buffer sizes are different fill in the rest with silence
FillMemory( (BYTE*) pDSLockedBuffer + pcmBufferSize,
dwDSLockedBufferSize - pcmBufferSize,
(BYTE)(extra->GetFormat()->wBitsPerSample == 8 ? 128 : 0 ) );
}
}
// Unlock the buffer, we don't need it anymore.
m_Sample->Unlock( pDSLockedBuffer, dwDSLockedBufferSize, NULL, 0 );
return S_OK;
}
//
// DirectSoundAudioBuffer::VGetProgress
//
float DirectSoundAudioBuffer::VGetProgress()
{
LPDIRECTSOUNDBUFFER pDSB = (LPDIRECTSOUNDBUFFER)VGet();
DWORD progress = 0;
pDSB->GetCurrentPosition(&progress, NULL);
float length = (float)m_Resource->Size();
return (float)progress / length;
}
以上实现的音效管理方法,都是基于DirectSound硬件。在Mac上不行
,Mac基于OpenAL。
上面是音效系统管理的代码实现了,包括音效的各种控制。下一篇是关于游戏中的2D图像实现~~~~
原文地址:http://blog.csdn.net/wanghexu09008126/article/details/39829693