aboutsummaryrefslogtreecommitdiff
path: root/engines/mohawk
diff options
context:
space:
mode:
Diffstat (limited to 'engines/mohawk')
-rw-r--r--engines/mohawk/sound.cpp288
-rw-r--r--engines/mohawk/sound.h3
2 files changed, 146 insertions, 145 deletions
diff --git a/engines/mohawk/sound.cpp b/engines/mohawk/sound.cpp
index 38cb0b3608..e52122dce0 100644
--- a/engines/mohawk/sound.cpp
+++ b/engines/mohawk/sound.cpp
@@ -37,6 +37,150 @@
namespace Mohawk {
+Audio::RewindableAudioStream *makeMohawkWaveStream(Common::SeekableReadStream *stream, CueList *cueList) {
+ uint32 tag = 0;
+ ADPCMStatus adpcmStatus;
+ DataChunk dataChunk;
+ uint32 dataSize = 0;
+
+ memset(&dataChunk, 0, sizeof(DataChunk));
+
+ if (stream->readUint32BE() != ID_MHWK) // MHWK tag again
+ error ("Could not find tag 'MHWK'");
+
+ stream->readUint32BE(); // Skip size
+
+ if (stream->readUint32BE() != ID_WAVE)
+ error ("Could not find tag 'WAVE'");
+
+ while (!dataChunk.audioData) {
+ tag = stream->readUint32BE();
+
+ switch (tag) {
+ case ID_ADPC:
+ debug(2, "Found Tag ADPC");
+ // ADPCM Sound Only
+ //
+ // This is useful for seeking in the stream, and is actually quite brilliant
+ // considering some of the other things Broderbund did with the engine.
+ // Only Riven and CSTime are known to use ADPCM audio and only CSTime
+ // actually requires this for seeking. On the other hand, it may be interesting
+ // to look at that one Riven sample that uses the cue points.
+ //
+ // Basically, the sample frame from the cue list is looked up here and then
+ // sets the starting sample and step index at the point specified. Quite
+ // an elegant/efficient system, really.
+
+ adpcmStatus.size = stream->readUint32BE();
+ adpcmStatus.itemCount = stream->readUint16BE();
+ adpcmStatus.channels = stream->readUint16BE();
+ adpcmStatus.statusItems = new ADPCMStatus::StatusItem[adpcmStatus.itemCount];
+
+ assert(adpcmStatus.channels <= 2);
+
+ for (uint16 i = 0; i < adpcmStatus.itemCount; i++) {
+ adpcmStatus.statusItems[i].sampleFrame = stream->readUint32BE();
+
+ for (uint16 j = 0; j < adpcmStatus.channels; j++) {
+ adpcmStatus.statusItems[i].channelStatus[j].last = stream->readSint16BE();
+ adpcmStatus.statusItems[i].channelStatus[j].stepIndex = stream->readUint16BE();
+ }
+ }
+
+ // TODO: Actually use this chunk. For now, just delete the status items...
+ delete[] adpcmStatus.statusItems;
+ break;
+ case ID_CUE:
+ debug(2, "Found Tag Cue#");
+ // Cues are used for animation sync. There are a couple in Myst and
+ // Riven but are not used there at all.
+
+ if (!cueList) {
+ uint32 size = stream->readUint32BE();
+ stream->skip(size);
+ break;
+ }
+
+ cueList->size = stream->readUint32BE();
+ cueList->pointCount = stream->readUint16BE();
+
+ if (cueList->pointCount == 0)
+ debug(2, "Cue# chunk found with no points!");
+ else
+ debug(2, "Cue# chunk found with %d point(s)!", cueList->pointCount);
+
+ cueList->points.resize(cueList->pointCount);
+ for (uint16 i = 0; i < cueList->pointCount; i++) {
+ cueList->points[i].sampleFrame = stream->readUint32BE();
+
+ byte nameLength = stream->readByte();
+ cueList->points[i].name.clear();
+ for (byte j = 0; j < nameLength; j++)
+ cueList->points[i].name += stream->readByte();
+
+ // Realign to an even boundary
+ if (!(nameLength & 1))
+ stream->readByte();
+
+ debug (3, "Cue# chunk point %d (frame %d): %s", i, cueList->points[i].sampleFrame, cueList->points[i].name.c_str());
+ }
+ break;
+ case ID_DATA:
+ debug(2, "Found Tag DATA");
+ // We subtract 20 from the actual chunk size, which is the total size
+ // of the chunk's header
+ dataSize = stream->readUint32BE() - 20;
+ dataChunk.sampleRate = stream->readUint16BE();
+ dataChunk.sampleCount = stream->readUint32BE();
+ dataChunk.bitsPerSample = stream->readByte();
+ dataChunk.channels = stream->readByte();
+ dataChunk.encoding = stream->readUint16BE();
+ dataChunk.loopCount = stream->readUint16BE();
+ dataChunk.loopStart = stream->readUint32BE();
+ dataChunk.loopEnd = stream->readUint32BE();
+
+ // NOTE: We currently ignore all of the loop parameters here. Myst uses the
+ // loopCount variable but the loopStart and loopEnd are always 0 and the size of
+ // the sample. Myst ME doesn't use the Mohawk Sound format and just standard WAVE
+ // files and therefore does not contain any of this metadata and we have to specify
+ // whether or not to loop elsewhere.
+
+ dataChunk.audioData = stream->readStream(dataSize);
+ break;
+ default:
+ error ("Unknown tag found in 'tWAV' chunk -- '%s'", tag2str(tag));
+ }
+ }
+
+ // makeMohawkWaveStream always takes control of the original stream
+ delete stream;
+
+ // The sound in Myst uses raw unsigned 8-bit data
+ // The sound in the CD version of Riven is encoded in Intel DVI ADPCM
+ // The sound in the DVD version of Riven is encoded in MPEG-2 Layer II or Intel DVI ADPCM
+ if (dataChunk.encoding == kCodecRaw) {
+ byte flags = Audio::FLAG_UNSIGNED;
+
+ if (dataChunk.channels == 2)
+ flags |= Audio::FLAG_STEREO;
+
+ return Audio::makeRawStream(dataChunk.audioData, dataChunk.sampleRate, flags);
+ } else if (dataChunk.encoding == kCodecADPCM) {
+ uint32 blockAlign = dataChunk.channels * dataChunk.bitsPerSample / 8;
+ return Audio::makeADPCMStream(dataChunk.audioData, DisposeAfterUse::YES, dataSize, Audio::kADPCMDVI, dataChunk.sampleRate, dataChunk.channels, blockAlign);
+ } else if (dataChunk.encoding == kCodecMPEG2) {
+#ifdef USE_MAD
+ return Audio::makeMP3Stream(dataChunk.audioData, DisposeAfterUse::YES);
+#else
+ warning ("MAD library not included - unable to play MP2 audio");
+#endif
+ } else {
+ error ("Unknown Mohawk WAVE encoding %d", dataChunk.encoding);
+ }
+
+ return nullptr;
+}
+
Sound::Sound(MohawkEngine* vm) : _vm(vm) {
_midiDriver = NULL;
_midiParser = NULL;
@@ -384,150 +528,6 @@ void Sound::resumeSLST() {
_vm->_mixer->pauseHandle(*_currentSLSTSounds[i].handle, false);
}
-Audio::RewindableAudioStream *Sound::makeMohawkWaveStream(Common::SeekableReadStream *stream, CueList *cueList) {
- uint32 tag = 0;
- ADPCMStatus adpcmStatus;
- DataChunk dataChunk;
- uint32 dataSize = 0;
-
- memset(&dataChunk, 0, sizeof(DataChunk));
-
- if (stream->readUint32BE() != ID_MHWK) // MHWK tag again
- error ("Could not find tag 'MHWK'");
-
- stream->readUint32BE(); // Skip size
-
- if (stream->readUint32BE() != ID_WAVE)
- error ("Could not find tag 'WAVE'");
-
- while (!dataChunk.audioData) {
- tag = stream->readUint32BE();
-
- switch (tag) {
- case ID_ADPC:
- debug(2, "Found Tag ADPC");
- // ADPCM Sound Only
- //
- // This is useful for seeking in the stream, and is actually quite brilliant
- // considering some of the other things Broderbund did with the engine.
- // Only Riven and CSTime are known to use ADPCM audio and only CSTime
- // actually requires this for seeking. On the other hand, it may be interesting
- // to look at that one Riven sample that uses the cue points.
- //
- // Basically, the sample frame from the cue list is looked up here and then
- // sets the starting sample and step index at the point specified. Quite
- // an elegant/efficient system, really.
-
- adpcmStatus.size = stream->readUint32BE();
- adpcmStatus.itemCount = stream->readUint16BE();
- adpcmStatus.channels = stream->readUint16BE();
- adpcmStatus.statusItems = new ADPCMStatus::StatusItem[adpcmStatus.itemCount];
-
- assert(adpcmStatus.channels <= 2);
-
- for (uint16 i = 0; i < adpcmStatus.itemCount; i++) {
- adpcmStatus.statusItems[i].sampleFrame = stream->readUint32BE();
-
- for (uint16 j = 0; j < adpcmStatus.channels; j++) {
- adpcmStatus.statusItems[i].channelStatus[j].last = stream->readSint16BE();
- adpcmStatus.statusItems[i].channelStatus[j].stepIndex = stream->readUint16BE();
- }
- }
-
- // TODO: Actually use this chunk. For now, just delete the status items...
- delete[] adpcmStatus.statusItems;
- break;
- case ID_CUE:
- debug(2, "Found Tag Cue#");
- // Cues are used for animation sync. There are a couple in Myst and
- // Riven but are not used there at all.
-
- if (!cueList) {
- uint32 size = stream->readUint32BE();
- stream->skip(size);
- break;
- }
-
- cueList->size = stream->readUint32BE();
- cueList->pointCount = stream->readUint16BE();
-
- if (cueList->pointCount == 0)
- debug(2, "Cue# chunk found with no points!");
- else
- debug(2, "Cue# chunk found with %d point(s)!", cueList->pointCount);
-
- cueList->points.resize(cueList->pointCount);
- for (uint16 i = 0; i < cueList->pointCount; i++) {
- cueList->points[i].sampleFrame = stream->readUint32BE();
-
- byte nameLength = stream->readByte();
- cueList->points[i].name.clear();
- for (byte j = 0; j < nameLength; j++)
- cueList->points[i].name += stream->readByte();
-
- // Realign to an even boundary
- if (!(nameLength & 1))
- stream->readByte();
-
- debug (3, "Cue# chunk point %d (frame %d): %s", i, cueList->points[i].sampleFrame, cueList->points[i].name.c_str());
- }
- break;
- case ID_DATA:
- debug(2, "Found Tag DATA");
- // We subtract 20 from the actual chunk size, which is the total size
- // of the chunk's header
- dataSize = stream->readUint32BE() - 20;
- dataChunk.sampleRate = stream->readUint16BE();
- dataChunk.sampleCount = stream->readUint32BE();
- dataChunk.bitsPerSample = stream->readByte();
- dataChunk.channels = stream->readByte();
- dataChunk.encoding = stream->readUint16BE();
- dataChunk.loopCount = stream->readUint16BE();
- dataChunk.loopStart = stream->readUint32BE();
- dataChunk.loopEnd = stream->readUint32BE();
-
- // NOTE: We currently ignore all of the loop parameters here. Myst uses the
- // loopCount variable but the loopStart and loopEnd are always 0 and the size of
- // the sample. Myst ME doesn't use the Mohawk Sound format and just standard WAVE
- // files and therefore does not contain any of this metadata and we have to specify
- // whether or not to loop elsewhere.
-
- dataChunk.audioData = stream->readStream(dataSize);
- break;
- default:
- error ("Unknown tag found in 'tWAV' chunk -- '%s'", tag2str(tag));
- }
- }
-
- // makeMohawkWaveStream always takes control of the original stream
- delete stream;
-
- // The sound in Myst uses raw unsigned 8-bit data
- // The sound in the CD version of Riven is encoded in Intel DVI ADPCM
- // The sound in the DVD version of Riven is encoded in MPEG-2 Layer II or Intel DVI ADPCM
- if (dataChunk.encoding == kCodecRaw) {
- byte flags = Audio::FLAG_UNSIGNED;
-
- if (dataChunk.channels == 2)
- flags |= Audio::FLAG_STEREO;
-
- return Audio::makeRawStream(dataChunk.audioData, dataChunk.sampleRate, flags);
- } else if (dataChunk.encoding == kCodecADPCM) {
- uint32 blockAlign = dataChunk.channels * dataChunk.bitsPerSample / 8;
- return Audio::makeADPCMStream(dataChunk.audioData, DisposeAfterUse::YES, dataSize, Audio::kADPCMDVI, dataChunk.sampleRate, dataChunk.channels, blockAlign);
- } else if (dataChunk.encoding == kCodecMPEG2) {
-#ifdef USE_MAD
- return Audio::makeMP3Stream(dataChunk.audioData, DisposeAfterUse::YES);
-#else
- warning ("MAD library not included - unable to play MP2 audio");
-#endif
- } else {
- error ("Unknown Mohawk WAVE encoding %d", dataChunk.encoding);
- }
-
- return NULL;
-}
-
Audio::RewindableAudioStream *Sound::makeLivingBooksWaveStream_v1(Common::SeekableReadStream *stream) {
uint16 header = stream->readUint16BE();
uint16 rate = 0;
diff --git a/engines/mohawk/sound.h b/engines/mohawk/sound.h
index f09706e155..a02c0d1ccb 100644
--- a/engines/mohawk/sound.h
+++ b/engines/mohawk/sound.h
@@ -116,6 +116,8 @@ struct DataChunk {
Common::SeekableReadStream *audioData;
};
+Audio::RewindableAudioStream *makeMohawkWaveStream(Common::SeekableReadStream *stream, CueList *cueList = nullptr);
+
class MohawkEngine;
class Sound {
@@ -158,7 +160,6 @@ private:
MidiParser *_midiParser;
byte *_midiData;
- static Audio::RewindableAudioStream *makeMohawkWaveStream(Common::SeekableReadStream *stream, CueList *cueList = NULL);
static Audio::RewindableAudioStream *makeLivingBooksWaveStream_v1(Common::SeekableReadStream *stream);
void initMidi();