diff options
58 files changed, 4856 insertions, 3959 deletions
@@ -7,6 +7,7 @@ For a more comprehensive changelog of the latest experimental code, see:       supported for resolutions bigger than 640x400. The old chooser is still       available and used for games without thumbnail support. It is possible to       select the old one as default too. +   - Rewrote VideoDecoder subsystem.  1.5.0 (2012-07-27)   New Games: diff --git a/audio/audiostream.cpp b/audio/audiostream.cpp index 1c5c435359..6e185702f0 100644 --- a/audio/audiostream.cpp +++ b/audio/audiostream.cpp @@ -386,4 +386,42 @@ Timestamp convertTimeToStreamPos(const Timestamp &where, int rate, bool isStereo  	return Timestamp(result.secs(), result.numberOfFrames(), result.framerate());  } +/** + * An AudioStream wrapper that cuts off the amount of samples read after a + * given time length is reached. + */ +class LimitingAudioStream : public AudioStream { +public: +	LimitingAudioStream(AudioStream *parentStream, const Audio::Timestamp &length, DisposeAfterUse::Flag disposeAfterUse) : +			_parentStream(parentStream), _samplesRead(0), _disposeAfterUse(disposeAfterUse), +			_totalSamples(length.convertToFramerate(getRate()).totalNumberOfFrames() * getChannels()) {} + +	~LimitingAudioStream() { +		if (_disposeAfterUse == DisposeAfterUse::YES) +			delete _parentStream; +	} + +	int readBuffer(int16 *buffer, const int numSamples) { +		// Cap us off so we don't read past _totalSamples					 +		int samplesRead = _parentStream->readBuffer(buffer, MIN<int>(numSamples, _totalSamples - _samplesRead)); +		_samplesRead += samplesRead; +		return samplesRead; +	} + +	bool endOfData() const { return _parentStream->endOfData() || _samplesRead >= _totalSamples; } +	bool isStereo() const { return _parentStream->isStereo(); } +	int getRate() const { return _parentStream->getRate(); } + +private: +	int getChannels() const { return isStereo() ? 2 : 1; }  + +	AudioStream *_parentStream; +	DisposeAfterUse::Flag _disposeAfterUse; +	uint32 _totalSamples, _samplesRead; +}; + +AudioStream *makeLimitingAudioStream(AudioStream *parentStream, const Timestamp &length, DisposeAfterUse::Flag disposeAfterUse) { +	return new LimitingAudioStream(parentStream, length, disposeAfterUse); +} +  } // End of namespace Audio diff --git a/audio/audiostream.h b/audio/audiostream.h index 801f13d9d9..d6d4a16280 100644 --- a/audio/audiostream.h +++ b/audio/audiostream.h @@ -356,6 +356,16 @@ QueuingAudioStream *makeQueuingAudioStream(int rate, bool stereo);   */  Timestamp convertTimeToStreamPos(const Timestamp &where, int rate, bool isStereo); +/** + * Factory function for an AudioStream wrapper that cuts off the amount of samples read after a + * given time length is reached. + * + * @param parentStream    The stream to limit + * @param length          The time length to limit the stream to + * @param disposeAfterUse Whether the parent stream object should be destroyed on destruction of the returned stream + */ +AudioStream *makeLimitingAudioStream(AudioStream *parentStream, const Timestamp &length, DisposeAfterUse::Flag disposeAfterUse = DisposeAfterUse::YES); +  } // End of namespace Audio  #endif diff --git a/audio/decoders/quicktime.cpp b/audio/decoders/quicktime.cpp index 8874a61c2e..5276cfc530 100644 --- a/audio/decoders/quicktime.cpp +++ b/audio/decoders/quicktime.cpp @@ -62,41 +62,6 @@ private:  };  /** - * An AudioStream wrapper that cuts off the amount of samples read after a - * given time length is reached. - */ -class LimitingAudioStream : public AudioStream { -public: -	LimitingAudioStream(AudioStream *parentStream, const Audio::Timestamp &length, -			DisposeAfterUse::Flag disposeAfterUse = DisposeAfterUse::YES) : -			_parentStream(parentStream), _samplesRead(0), _disposeAfterUse(disposeAfterUse), -			_totalSamples(length.convertToFramerate(getRate()).totalNumberOfFrames() * getChannels()) {} - -	~LimitingAudioStream() { -		if (_disposeAfterUse == DisposeAfterUse::YES) -			delete _parentStream; -	} - -	int readBuffer(int16 *buffer, const int numSamples) { -		// Cap us off so we don't read past _totalSamples					 -		int samplesRead = _parentStream->readBuffer(buffer, MIN<int>(numSamples, _totalSamples - _samplesRead)); -		_samplesRead += samplesRead; -		return samplesRead; -	} - -	bool endOfData() const { return _parentStream->endOfData() || _samplesRead >= _totalSamples; } -	bool isStereo() const { return _parentStream->isStereo(); } -	int getRate() const { return _parentStream->getRate(); } - -private: -	int getChannels() const { return isStereo() ? 2 : 1; }  - -	AudioStream *_parentStream; -	DisposeAfterUse::Flag _disposeAfterUse; -	uint32 _totalSamples, _samplesRead; -}; - -/**   * An AudioStream wrapper that forces audio to be played in mono.   * It currently just ignores the right channel if stereo.   */ @@ -263,7 +228,7 @@ void QuickTimeAudioDecoder::QuickTimeAudioTrack::queueAudio(const Timestamp &len  				_skipSamples = Timestamp();  			} -			queueStream(new LimitingAudioStream(new SilentAudioStream(getRate(), isStereo()), editLength), editLength); +			queueStream(makeLimitingAudioStream(new SilentAudioStream(getRate(), isStereo()), editLength), editLength);  			_curEdit++;  			enterNewEdit(nextEditTime);  		} else { @@ -289,7 +254,7 @@ void QuickTimeAudioDecoder::QuickTimeAudioTrack::queueAudio(const Timestamp &len  			// we move on to the next edit  			if (trackPosition >= nextEditTime || _curChunk >= _parentTrack->chunkCount) {  				chunkLength = nextEditTime.convertToFramerate(getRate()) - getCurrentTrackTime(); -				stream = new LimitingAudioStream(stream, chunkLength); +				stream = makeLimitingAudioStream(stream, chunkLength);  				_curEdit++;  				enterNewEdit(nextEditTime); diff --git a/common/endian.h b/common/endian.h index 394437ec67..759513efef 100644 --- a/common/endian.h +++ b/common/endian.h @@ -146,6 +146,12 @@   */  #define MKTAG(a0,a1,a2,a3) ((uint32)((a3) | ((a2) << 8) | ((a1) << 16) | ((a0) << 24))) +/** + * A wrapper macro used around two character constants, like 'wb', to + * ensure portability. Typical usage: MKTAG16('w','b'). + */ +#define MKTAG16(a0,a1) ((uint16)((a1) | ((a0) << 8))) +  // Functions for reading/writing native integers.  // They also transparently handle the need for alignment. diff --git a/common/quicktime.h b/common/quicktime.h index 974502d075..08ca35ad51 100644 --- a/common/quicktime.h +++ b/common/quicktime.h @@ -35,6 +35,7 @@  #include "common/scummsys.h"  #include "common/stream.h"  #include "common/rational.h" +#include "common/types.h"  namespace Common {  	class MacResManager; diff --git a/common/winexe_pe.cpp b/common/winexe_pe.cpp index 6c0f9c9962..b3c45ffe73 100644 --- a/common/winexe_pe.cpp +++ b/common/winexe_pe.cpp @@ -64,7 +64,7 @@ bool PEResources::loadFromEXE(SeekableReadStream *stream) {  	if (!stream)  		return false; -	if (stream->readUint16BE() != 'MZ') +	if (stream->readUint16BE() != MKTAG16('M', 'Z'))  		return false;  	stream->skip(58); diff --git a/engines/agos/animation.cpp b/engines/agos/animation.cpp index 10c01741ae..9176412e0e 100644 --- a/engines/agos/animation.cpp +++ b/engines/agos/animation.cpp @@ -260,9 +260,6 @@ bool MoviePlayerDXA::load() {  	debug(0, "Playing video %s", videoName.c_str());  	CursorMan.showMouse(false); - -	_firstFrameOffset = _fileStream->pos(); -  	return true;  } @@ -271,6 +268,10 @@ void MoviePlayerDXA::copyFrameToBuffer(byte *dst, uint x, uint y, uint pitch) {  	uint w = getWidth();  	const Graphics::Surface *surface = decodeNextFrame(); + +	if (!surface) +		return; +  	byte *src = (byte *)surface->pixels;  	dst += y * pitch + x; @@ -281,7 +282,7 @@ void MoviePlayerDXA::copyFrameToBuffer(byte *dst, uint x, uint y, uint pitch) {  	} while (--h);  	if (hasDirtyPalette()) -		setSystemPalette(); +		g_system->getPaletteManager()->setPalette(getPalette(), 0, 256);  }  void MoviePlayerDXA::playVideo() { @@ -302,34 +303,7 @@ void MoviePlayerDXA::stopVideo() {  }  void MoviePlayerDXA::startSound() { -	uint32 offset, size; - -	if (getSoundTag() == MKTAG('W','A','V','E')) { -		size = _fileStream->readUint32BE(); - -		if (_sequenceNum) { -			Common::File in; - -			_fileStream->seek(size, SEEK_CUR); - -			in.open("audio.wav"); -			if (!in.isOpen()) { -				error("Can't read offset file 'audio.wav'"); -			} - -			in.seek(_sequenceNum * 8, SEEK_SET); -			offset = in.readUint32LE(); -			size = in.readUint32LE(); - -			in.seek(offset, SEEK_SET); -			_bgSoundStream = Audio::makeWAVStream(in.readStream(size), DisposeAfterUse::YES); -			in.close(); -		} else { -			_bgSoundStream = Audio::makeWAVStream(_fileStream->readStream(size), DisposeAfterUse::YES); -		} -	} else { -		_bgSoundStream = Audio::SeekableAudioStream::openStreamFile(baseName); -	} +	start();  	if (_bgSoundStream != NULL) {  		_vm->_mixer->stopHandle(_bgSound); @@ -344,8 +318,7 @@ void MoviePlayerDXA::nextFrame() {  	}  	if (_vm->_interactiveVideo == TYPE_LOOPING && endOfVideo()) { -		_fileStream->seek(_firstFrameOffset); -		_curFrame = -1; +		rewind();  		startSound();  	} @@ -374,13 +347,15 @@ bool MoviePlayerDXA::processFrame() {  	copyFrameToBuffer((byte *)screen->pixels, (_vm->_screenWidth - getWidth()) / 2, (_vm->_screenHeight - getHeight()) / 2, screen->pitch);  	_vm->_system->unlockScreen(); -	Common::Rational soundTime(_mixer->getSoundElapsedTime(_bgSound), 1000); -	if ((_bgSoundStream == NULL) || ((soundTime * getFrameRate()).toInt() / 1000 < getCurFrame() + 1)) { +	uint32 soundTime = _mixer->getSoundElapsedTime(_bgSound); +	uint32 nextFrameStartTime = ((Video::VideoDecoder::VideoTrack *)getTrack(0))->getNextFrameStartTime(); + +	if ((_bgSoundStream == NULL) || soundTime < nextFrameStartTime) {  		if (_bgSoundStream && _mixer->isSoundHandleActive(_bgSound)) { -			while (_mixer->isSoundHandleActive(_bgSound) && (soundTime * getFrameRate()).toInt() < getCurFrame()) { +			while (_mixer->isSoundHandleActive(_bgSound) && soundTime < nextFrameStartTime) {  				_vm->_system->delayMillis(10); -				soundTime = Common::Rational(_mixer->getSoundElapsedTime(_bgSound), 1000); +				soundTime = _mixer->getSoundElapsedTime(_bgSound);  			}  			// In case the background sound ends prematurely, update  			// _ticks so that we can still fall back on the no-sound @@ -399,14 +374,35 @@ bool MoviePlayerDXA::processFrame() {  	return false;  } -void MoviePlayerDXA::updateVolume() { -	if (g_system->getMixer()->isSoundHandleActive(_bgSound)) -		g_system->getMixer()->setChannelVolume(_bgSound, getVolume()); -} +void MoviePlayerDXA::readSoundData(Common::SeekableReadStream *stream) { +	uint32 tag = stream->readUint32BE(); + +	if (tag == MKTAG('W','A','V','E')) { +		uint32 size = stream->readUint32BE(); + +		if (_sequenceNum) { +			Common::File in; + +			stream->skip(size); + +			in.open("audio.wav"); +			if (!in.isOpen()) { +				error("Can't read offset file 'audio.wav'"); +			} + +			in.seek(_sequenceNum * 8, SEEK_SET); +			uint32 offset = in.readUint32LE(); +			size = in.readUint32LE(); -void MoviePlayerDXA::updateBalance() { -	if (g_system->getMixer()->isSoundHandleActive(_bgSound)) -		g_system->getMixer()->setChannelBalance(_bgSound, getBalance()); +			in.seek(offset, SEEK_SET); +			_bgSoundStream = Audio::makeWAVStream(in.readStream(size), DisposeAfterUse::YES); +			in.close(); +		} else { +			_bgSoundStream = Audio::makeWAVStream(stream->readStream(size), DisposeAfterUse::YES); +		} +	} else { +		_bgSoundStream = Audio::SeekableAudioStream::openStreamFile(baseName); +	}  }  /////////////////////////////////////////////////////////////////////////////// @@ -415,7 +411,7 @@ void MoviePlayerDXA::updateBalance() {  MoviePlayerSMK::MoviePlayerSMK(AGOSEngine_Feeble *vm, const char *name) -	: MoviePlayer(vm), SmackerDecoder(vm->_mixer) { +	: MoviePlayer(vm), SmackerDecoder() {  	debug(0, "Creating SMK cutscene player");  	memset(baseName, 0, sizeof(baseName)); @@ -435,8 +431,6 @@ bool MoviePlayerSMK::load() {  	CursorMan.showMouse(false); -	_firstFrameOffset = _fileStream->pos(); -  	return true;  } @@ -445,6 +439,10 @@ void MoviePlayerSMK::copyFrameToBuffer(byte *dst, uint x, uint y, uint pitch) {  	uint w = getWidth();  	const Graphics::Surface *surface = decodeNextFrame(); + +	if (!surface) +		return; +  	byte *src = (byte *)surface->pixels;  	dst += y * pitch + x; @@ -455,7 +453,7 @@ void MoviePlayerSMK::copyFrameToBuffer(byte *dst, uint x, uint y, uint pitch) {  	} while (--h);  	if (hasDirtyPalette()) -		setSystemPalette(); +		g_system->getPaletteManager()->setPalette(getPalette(), 0, 256);  }  void MoviePlayerSMK::playVideo() { @@ -468,6 +466,7 @@ void MoviePlayerSMK::stopVideo() {  }  void MoviePlayerSMK::startSound() { +	start();  }  void MoviePlayerSMK::handleNextFrame() { @@ -477,10 +476,8 @@ void MoviePlayerSMK::handleNextFrame() {  }  void MoviePlayerSMK::nextFrame() { -	if (_vm->_interactiveVideo == TYPE_LOOPING && endOfVideo()) { -		_fileStream->seek(_firstFrameOffset); -		_curFrame = -1; -	} +	if (_vm->_interactiveVideo == TYPE_LOOPING && endOfVideo()) +		rewind();  	if (!endOfVideo()) {  		decodeNextFrame(); @@ -503,7 +500,7 @@ bool MoviePlayerSMK::processFrame() {  	uint32 waitTime = getTimeToNextFrame(); -	if (!waitTime) { +	if (!waitTime && !endOfVideoTracks()) {  		warning("dropped frame %i", getCurFrame());  		return false;  	} diff --git a/engines/agos/animation.h b/engines/agos/animation.h index d1ff074b03..9e31fced6d 100644 --- a/engines/agos/animation.h +++ b/engines/agos/animation.h @@ -67,9 +67,6 @@ protected:  	virtual void handleNextFrame();  	virtual bool processFrame() = 0;  	virtual void startSound() {} - -protected: -	uint32 _firstFrameOffset;  };  class MoviePlayerDXA : public MoviePlayer, Video::DXADecoder { @@ -84,9 +81,7 @@ public:  	virtual void stopVideo();  protected: -	// VideoDecoder API -	void updateVolume(); -	void updateBalance(); +	void readSoundData(Common::SeekableReadStream *stream);  private:  	void handleNextFrame(); diff --git a/engines/mohawk/myst_stacks/dni.cpp b/engines/mohawk/myst_stacks/dni.cpp index cae165ccf0..d103105c2d 100644 --- a/engines/mohawk/myst_stacks/dni.cpp +++ b/engines/mohawk/myst_stacks/dni.cpp @@ -109,7 +109,7 @@ void Dni::o_handPage(uint16 op, uint16 var, uint16 argc, uint16 *argv) {  		_vm->setMainCursor(kDefaultMystCursor);  		// Play movie end (atrus leaving) -		_vm->_video->setVideoBounds(atrus, Audio::Timestamp(0, 14813, 600), Audio::Timestamp(0xFFFFFFFF)); +		_vm->_video->setVideoBounds(atrus, Audio::Timestamp(0, 14813, 600), _vm->_video->getDuration(atrus));  		_vm->_video->setVideoLooping(atrus, false);  		_atrusLeft = true; diff --git a/engines/mohawk/video.cpp b/engines/mohawk/video.cpp index 18d609c513..0ed4f38b53 100644 --- a/engines/mohawk/video.cpp +++ b/engines/mohawk/video.cpp @@ -29,6 +29,7 @@  #include "common/textconsole.h"  #include "common/system.h" +#include "graphics/palette.h"  #include "graphics/surface.h"  #include "video/qt_decoder.h" @@ -43,13 +44,12 @@ void VideoEntry::clear() {  	loop = false;  	enabled = false;  	start = Audio::Timestamp(0, 1); -	end = Audio::Timestamp(0xFFFFFFFF, 1); // Largest possible, there is an endOfVideo() check anyway  	filename.clear();  	id = -1;  }  bool VideoEntry::endOfVideo() { -	return !video || video->endOfVideo() || video->getTime() >= (uint)end.msecs(); +	return !video || video->endOfVideo();  }  VideoManager::VideoManager(MohawkEngine* vm) : _vm(vm) { @@ -207,7 +207,7 @@ bool VideoManager::updateMovies() {  		// Remove any videos that are over  		if (_videoStreams[i].endOfVideo()) {  			if (_videoStreams[i].loop) { -				_videoStreams[i]->seekToTime(_videoStreams[i].start); +				_videoStreams[i]->seek(_videoStreams[i].start);  			} else {  				// Check the video time one last time before deleting it  				_vm->doVideoTimer(i, true); @@ -239,7 +239,7 @@ bool VideoManager::updateMovies() {  					frame = convertedFrame;  				} else if (pixelFormat.bytesPerPixel == 1 && _videoStreams[i]->hasDirtyPalette()) {  					// Set the palette when running in 8bpp mode only -					_videoStreams[i]->setSystemPalette(); +					_vm->_system->getPaletteManager()->setPalette(_videoStreams[i]->getPalette(), 0, 256);  				}  				// Clip the width/height to make sure we stay on the screen (Myst does this a few times) @@ -394,6 +394,8 @@ VideoHandle VideoManager::createVideoHandle(uint16 id, uint16 x, uint16 y, bool  	entry.loop = loop;  	entry.enabled = true; +	entry->start(); +  	// Search for any deleted videos so we can take a formerly used slot  	for (uint32 i = 0; i < _videoStreams.size(); i++)  		if (!_videoStreams[i].video) { @@ -430,6 +432,7 @@ VideoHandle VideoManager::createVideoHandle(const Common::String &filename, uint  	entry->loadStream(file);  	entry->setVolume(volume); +	entry->start();  	// Search for any deleted videos so we can take a formerly used slot  	for (uint32 i = 0; i < _videoStreams.size(); i++) @@ -492,7 +495,7 @@ uint32 VideoManager::getTime(VideoHandle handle) {  uint32 VideoManager::getDuration(VideoHandle handle) {  	assert(handle != NULL_VID_HANDLE); -	return _videoStreams[handle]->getDuration(); +	return _videoStreams[handle]->getDuration().msecs();  }  bool VideoManager::endOfVideo(VideoHandle handle) { @@ -511,14 +514,13 @@ bool VideoManager::isVideoPlaying() {  void VideoManager::setVideoBounds(VideoHandle handle, Audio::Timestamp start, Audio::Timestamp end) {  	assert(handle != NULL_VID_HANDLE);  	_videoStreams[handle].start = start; -	_videoStreams[handle].end = end; -	_videoStreams[handle]->seekToTime(start); +	_videoStreams[handle]->setEndTime(end); +	_videoStreams[handle]->seek(start);  }  void VideoManager::drawVideoFrame(VideoHandle handle, Audio::Timestamp time) {  	assert(handle != NULL_VID_HANDLE); -	_videoStreams[handle].end = Audio::Timestamp(0xffffffff, 1); -	_videoStreams[handle]->seekToTime(time); +	_videoStreams[handle]->seek(time);  	updateMovies();  	delete _videoStreams[handle].video;  	_videoStreams[handle].clear(); @@ -526,7 +528,7 @@ void VideoManager::drawVideoFrame(VideoHandle handle, Audio::Timestamp time) {  void VideoManager::seekToTime(VideoHandle handle, Audio::Timestamp time) {  	assert(handle != NULL_VID_HANDLE); -	_videoStreams[handle]->seekToTime(time); +	_videoStreams[handle]->seek(time);  }  void VideoManager::setVideoLooping(VideoHandle handle, bool loop) { diff --git a/engines/mohawk/video.h b/engines/mohawk/video.h index 98bcadfb53..9dddcde09b 100644 --- a/engines/mohawk/video.h +++ b/engines/mohawk/video.h @@ -45,19 +45,19 @@ struct MLSTRecord {  struct VideoEntry {  	// Playback variables -	Video::SeekableVideoDecoder *video; +	Video::VideoDecoder *video;  	uint16 x;  	uint16 y;  	bool loop;  	bool enabled; -	Audio::Timestamp start, end; +	Audio::Timestamp start;  	// Identification  	Common::String filename; // External video files  	int id;                  // Internal Mohawk files  	// Helper functions -	Video::SeekableVideoDecoder *operator->() const { assert(video); return video; } // TODO: Remove this eventually +	Video::VideoDecoder *operator->() const { assert(video); return video; } // TODO: Remove this eventually  	void clear();  	bool endOfVideo();  }; diff --git a/engines/saga/introproc_saga2.cpp b/engines/saga/introproc_saga2.cpp index b6470370af..260eca98e6 100644 --- a/engines/saga/introproc_saga2.cpp +++ b/engines/saga/introproc_saga2.cpp @@ -32,6 +32,7 @@  #include "common/keyboard.h"  #include "common/system.h"  #include "common/textconsole.h" +#include "graphics/palette.h"  #include "graphics/surface.h"  #include "video/smk_decoder.h" @@ -92,7 +93,7 @@ int Scene::FTA2EndProc(FTA2Endings whichEnding) {  }  void Scene::playMovie(const char *filename) { -	Video::SmackerDecoder *smkDecoder = new Video::SmackerDecoder(_vm->_mixer); +	Video::SmackerDecoder *smkDecoder = new Video::SmackerDecoder();  	if (!smkDecoder->loadFile(filename))  		return; @@ -101,6 +102,8 @@ void Scene::playMovie(const char *filename) {  	uint16 y = (g_system->getHeight() - smkDecoder->getHeight()) / 2;  	bool skipVideo = false; +	smkDecoder->start(); +  	while (!_vm->shouldQuit() && !smkDecoder->endOfVideo() && !skipVideo) {  		if (smkDecoder->needsUpdate()) {  			const Graphics::Surface *frame = smkDecoder->decodeNextFrame(); @@ -108,7 +111,7 @@ void Scene::playMovie(const char *filename) {  				_vm->_system->copyRectToScreen(frame->pixels, frame->pitch, x, y, frame->w, frame->h);  				if (smkDecoder->hasDirtyPalette()) -					smkDecoder->setSystemPalette(); +					_vm->_system->getPaletteManager()->setPalette(smkDecoder->getPalette(), 0, 256);  				_vm->_system->updateScreen();  			} diff --git a/engines/sci/console.cpp b/engines/sci/console.cpp index 564bbbbd79..1889d53480 100644 --- a/engines/sci/console.cpp +++ b/engines/sci/console.cpp @@ -250,20 +250,18 @@ void Console::postEnter() {  #endif  		if (_videoFile.hasSuffix(".seq")) { -			SeqDecoder *seqDecoder = new SeqDecoder(); -			seqDecoder->setFrameDelay(_videoFrameDelay); -			videoDecoder = seqDecoder; +			videoDecoder = new SEQDecoder(_videoFrameDelay);  #ifdef ENABLE_SCI32  		} else if (_videoFile.hasSuffix(".vmd")) { -			videoDecoder = new Video::VMDDecoder(g_system->getMixer()); +			videoDecoder = new Video::AdvancedVMDDecoder();  		} else if (_videoFile.hasSuffix(".rbt")) { -			videoDecoder = new RobotDecoder(g_system->getMixer(), _engine->getPlatform() == Common::kPlatformMacintosh); +			videoDecoder = new RobotDecoder(_engine->getPlatform() == Common::kPlatformMacintosh);  		} else if (_videoFile.hasSuffix(".duk")) {  			duckMode = true; -			videoDecoder = new Video::AviDecoder(g_system->getMixer()); +			videoDecoder = new Video::AVIDecoder();  #endif  		} else if (_videoFile.hasSuffix(".avi")) { -			videoDecoder = new Video::AviDecoder(g_system->getMixer()); +			videoDecoder = new Video::AVIDecoder();  		} else {  			warning("Unrecognized video type");  		} diff --git a/engines/sci/engine/kvideo.cpp b/engines/sci/engine/kvideo.cpp index cb2a763da9..6bf9aff2fe 100644 --- a/engines/sci/engine/kvideo.cpp +++ b/engines/sci/engine/kvideo.cpp @@ -50,6 +50,8 @@ void playVideo(Video::VideoDecoder *videoDecoder, VideoState videoState) {  	if (!videoDecoder)  		return; +	videoDecoder->start(); +  	byte *scaleBuffer = 0;  	byte bytesPerPixel = videoDecoder->getPixelFormat().bytesPerPixel;  	uint16 width = videoDecoder->getWidth(); @@ -162,9 +164,8 @@ reg_t kShowMovie(EngineState *s, int argc, reg_t *argv) {  		} else {  			// DOS SEQ  			// SEQ's are called with no subops, just the string and delay -			SeqDecoder *seqDecoder = new SeqDecoder(); -			seqDecoder->setFrameDelay(argv[1].toUint16()); // Time between frames in ticks -			videoDecoder = seqDecoder; +			// Time is specified as ticks +			videoDecoder = new SEQDecoder(argv[1].toUint16());  			if (!videoDecoder->loadFile(filename)) {  				warning("Failed to open movie file %s", filename.c_str()); @@ -190,7 +191,7 @@ reg_t kShowMovie(EngineState *s, int argc, reg_t *argv) {  		switch (argv[0].toUint16()) {  		case 0: {  			Common::String filename = s->_segMan->getString(argv[1]); -			videoDecoder = new Video::AviDecoder(g_system->getMixer()); +			videoDecoder = new Video::AVIDecoder();  			if (filename.equalsIgnoreCase("gk2a.avi")) {  				// HACK: Switch to 16bpp graphics for Indeo3. @@ -252,6 +253,7 @@ reg_t kRobot(EngineState *s, int argc, reg_t *argv) {  		int16 y = argv[5].toUint16();  		warning("kRobot(init), id %d, obj %04x:%04x, flag %d, x=%d, y=%d", id, PRINT_REG(obj), flag, x, y);  		g_sci->_robotDecoder->load(id); +		g_sci->_robotDecoder->start();  		g_sci->_robotDecoder->setPos(x, y);  		}  		break; @@ -267,13 +269,13 @@ reg_t kRobot(EngineState *s, int argc, reg_t *argv) {  		warning("kRobot(%d)", subop);  		break;  	case 8: // sync -		//if (false) {	// debug: automatically skip all robot videos -		if ((uint32)g_sci->_robotDecoder->getCurFrame() !=  g_sci->_robotDecoder->getFrameCount() - 1) { -			writeSelector(s->_segMan, argv[1], SELECTOR(signal), NULL_REG); -		} else { +		//if (true) {	// debug: automatically skip all robot videos +		if (g_sci->_robotDecoder->endOfVideo()) {  			g_sci->_robotDecoder->close();  			// Signal the engine scripts that the video is done  			writeSelector(s->_segMan, argv[1], SELECTOR(signal), SIGNAL_REG); +		} else { +			writeSelector(s->_segMan, argv[1], SELECTOR(signal), NULL_REG);	  		}  		break;  	default: @@ -348,7 +350,7 @@ reg_t kPlayVMD(EngineState *s, int argc, reg_t *argv) {  		break;  	}  	case 6:	// Play -		videoDecoder = new Video::VMDDecoder(g_system->getMixer()); +		videoDecoder = new Video::AdvancedVMDDecoder();  		if (s->_videoState.fileName.empty()) {  			// Happens in Lighthouse @@ -406,7 +408,7 @@ reg_t kPlayDuck(EngineState *s, int argc, reg_t *argv) {  		s->_videoState.reset();  		s->_videoState.fileName = Common::String::format("%d.duk", argv[1].toUint16()); -		videoDecoder = new Video::AviDecoder(g_system->getMixer()); +		videoDecoder = new Video::AVIDecoder();  		if (!videoDecoder->loadFile(s->_videoState.fileName)) {  			warning("Could not open Duck %s", s->_videoState.fileName.c_str()); diff --git a/engines/sci/graphics/frameout.cpp b/engines/sci/graphics/frameout.cpp index 6628247127..968014c032 100644 --- a/engines/sci/graphics/frameout.cpp +++ b/engines/sci/graphics/frameout.cpp @@ -28,6 +28,7 @@  #include "common/system.h"  #include "common/textconsole.h"  #include "engines/engine.h" +#include "graphics/palette.h"  #include "graphics/surface.h"  #include "sci/sci.h" @@ -488,7 +489,7 @@ void GfxFrameout::showVideo() {  	uint16 y = videoDecoder->getPos().y;  	if (videoDecoder->hasDirtyPalette()) -		videoDecoder->setSystemPalette(); +		g_system->getPaletteManager()->setPalette(videoDecoder->getPalette(), 0, 256);  	while (!g_engine->shouldQuit() && !videoDecoder->endOfVideo() && !skipVideo) {  		if (videoDecoder->needsUpdate()) { @@ -497,7 +498,7 @@ void GfxFrameout::showVideo() {  				g_system->copyRectToScreen(frame->pixels, frame->pitch, x, y, frame->w, frame->h);  				if (videoDecoder->hasDirtyPalette()) -					videoDecoder->setSystemPalette(); +					g_system->getPaletteManager()->setPalette(videoDecoder->getPalette(), 0, 256);  				g_system->updateScreen();  			} diff --git a/engines/sci/sci.cpp b/engines/sci/sci.cpp index d43a9d06fc..42ae00b525 100644 --- a/engines/sci/sci.cpp +++ b/engines/sci/sci.cpp @@ -632,7 +632,7 @@ void SciEngine::initGraphics() {  		_gfxPaint = _gfxPaint32;  		_gfxText32 = new GfxText32(_gamestate->_segMan, _gfxCache, _gfxScreen);  		_gfxControls32 = new GfxControls32(_gamestate->_segMan, _gfxCache, _gfxScreen, _gfxText32); -		_robotDecoder = new RobotDecoder(g_system->getMixer(), getPlatform() == Common::kPlatformMacintosh); +		_robotDecoder = new RobotDecoder(getPlatform() == Common::kPlatformMacintosh);  		_gfxFrameout = new GfxFrameout(_gamestate->_segMan, _resMan, _gfxCoordAdjuster, _gfxCache, _gfxScreen, _gfxPalette, _gfxPaint32);  	} else {  #endif diff --git a/engines/sci/video/robot_decoder.cpp b/engines/sci/video/robot_decoder.cpp index ebcfac6054..608c77136f 100644 --- a/engines/sci/video/robot_decoder.cpp +++ b/engines/sci/video/robot_decoder.cpp @@ -22,11 +22,13 @@  #include "common/archive.h"  #include "common/stream.h" +#include "common/substream.h"  #include "common/system.h"  #include "common/textconsole.h"  #include "common/util.h"  #include "graphics/surface.h" +#include "audio/audiostream.h"  #include "audio/decoders/raw.h"  #include "sci/resource.h" @@ -63,57 +65,26 @@ namespace Sci {  // our graphics engine, it looks just like a part of the room. A RBT can move  // around the screen and go behind other objects. (...) -#ifdef ENABLE_SCI32 - -enum robotPalTypes { +enum RobotPalTypes {  	kRobotPalVariable = 0,  	kRobotPalConstant = 1  }; -RobotDecoder::RobotDecoder(Audio::Mixer *mixer, bool isBigEndian) { -	_surface = 0; -	_width = 0; -	_height = 0; +RobotDecoder::RobotDecoder(bool isBigEndian) {  	_fileStream = 0; -	_audioStream = 0; -	_dirtyPalette = false;  	_pos = Common::Point(0, 0); -	_mixer = mixer;  	_isBigEndian = isBigEndian; +	_frameTotalSize = 0;  }  RobotDecoder::~RobotDecoder() {  	close();  } -bool RobotDecoder::load(GuiResourceId id) { -	// TODO: RAMA's robot 1003 cannot be played (shown at the menu screen) -  -	// its drawn at odd coordinates. SV can't play it either (along with some -	// others), so it must be some new functionality added in RAMA's robot -	// videos. Skip it for now. -	if (g_sci->getGameId() == GID_RAMA && id == 1003) -		return false; -	 -	// TODO: The robot video in the Lighthouse demo gets stuck -	if (g_sci->getGameId() == GID_LIGHTHOUSE && id == 16) -		return false; - -	Common::String fileName = Common::String::format("%d.rbt", id); -	Common::SeekableReadStream *stream = SearchMan.createReadStreamForMember(fileName); - -	if (!stream) { -		warning("Unable to open robot file %s", fileName.c_str()); -		return false; -	} - -	return loadStream(stream); -} -  bool RobotDecoder::loadStream(Common::SeekableReadStream *stream) {  	close();  	_fileStream = new Common::SeekableSubReadStreamEndian(stream, 0, stream->size(), _isBigEndian, DisposeAfterUse::YES); -	_surface = new Graphics::Surface();  	readHeaderChunk(); @@ -125,131 +96,60 @@ bool RobotDecoder::loadStream(Common::SeekableReadStream *stream) {  	if (_header.version < 4 || _header.version > 6)  		error("Unknown robot version: %d", _header.version); -	if (_header.hasSound) { -		_audioStream = Audio::makeQueuingAudioStream(11025, false); -		_mixer->playStream(Audio::Mixer::kMusicSoundType, &_audioHandle, _audioStream, -1, getVolume(), getBalance()); -	} +	RobotVideoTrack *videoTrack = new RobotVideoTrack(_header.frameCount); +	addTrack(videoTrack); -	readPaletteChunk(_header.paletteDataSize); -	readFrameSizesChunk(); -	calculateVideoDimensions(); -	_surface->create(_width, _height, Graphics::PixelFormat::createFormatCLUT8()); +	if (_header.hasSound) +		addTrack(new RobotAudioTrack()); +	videoTrack->readPaletteChunk(_fileStream, _header.paletteDataSize); +	readFrameSizesChunk(); +	videoTrack->calculateVideoDimensions(_fileStream, _frameTotalSize);  	return true;  } -void RobotDecoder::readHeaderChunk() { -	// Header (60 bytes) -	_fileStream->skip(6); -	_header.version = _fileStream->readUint16(); -	_header.audioChunkSize = _fileStream->readUint16(); -	_header.audioSilenceSize = _fileStream->readUint16(); -	_fileStream->skip(2); -	_header.frameCount = _fileStream->readUint16(); -	_header.paletteDataSize = _fileStream->readUint16(); -	_header.unkChunkDataSize = _fileStream->readUint16(); -	_fileStream->skip(5); -	_header.hasSound = _fileStream->readByte(); -	_fileStream->skip(34); - -	// Some videos (e.g. robot 1305 in Phantasmagoria and -	// robot 184 in Lighthouse) have an unknown chunk before -	// the palette chunk (probably used for sound preloading). -	// Skip it here. -	if (_header.unkChunkDataSize) -		_fileStream->skip(_header.unkChunkDataSize); -} - -void RobotDecoder::readPaletteChunk(uint16 chunkSize) { -	byte *paletteData = new byte[chunkSize]; -	_fileStream->read(paletteData, chunkSize); - -	// SCI1.1 palette -	byte palFormat = paletteData[32]; -	uint16 palColorStart = paletteData[25]; -	uint16 palColorCount = READ_SCI11ENDIAN_UINT16(paletteData + 29); +bool RobotDecoder::load(GuiResourceId id) { +	// TODO: RAMA's robot 1003 cannot be played (shown at the menu screen) -  +	// its drawn at odd coordinates. SV can't play it either (along with some +	// others), so it must be some new functionality added in RAMA's robot +	// videos. Skip it for now. +	if (g_sci->getGameId() == GID_RAMA && id == 1003) +		return false; +	 +	// TODO: The robot video in the Lighthouse demo gets stuck +	if (g_sci->getGameId() == GID_LIGHTHOUSE && id == 16) +		return false; -	int palOffset = 37; -	memset(_palette, 0, 256 * 3); +	Common::String fileName = Common::String::format("%d.rbt", id); +	Common::SeekableReadStream *stream = SearchMan.createReadStreamForMember(fileName); -	for (uint16 colorNo = palColorStart; colorNo < palColorStart + palColorCount; colorNo++) { -		if (palFormat == kRobotPalVariable) -			palOffset++; -		_palette[colorNo * 3 + 0] = paletteData[palOffset++]; -		_palette[colorNo * 3 + 1] = paletteData[palOffset++]; -		_palette[colorNo * 3 + 2] = paletteData[palOffset++]; +	if (!stream) { +		warning("Unable to open robot file %s", fileName.c_str()); +		return false;  	} -	_dirtyPalette = true; -	delete[] paletteData; +	return loadStream(stream);  } +void RobotDecoder::close() { +	VideoDecoder::close(); -void RobotDecoder::readFrameSizesChunk() { -	// The robot video file contains 2 tables, with one entry for each frame: -	// - A table containing the size of the image in each video frame -	// - A table containing the total size of each video frame. -	// In v5 robots, the tables contain 16-bit integers, whereas in v6 robots, -	// they contain 32-bit integers. - -	_frameTotalSize = new uint32[_header.frameCount]; - -	// TODO: The table reading code can probably be removed once the -	// audio chunk size is figured out (check the TODO inside processNextFrame()) -#if 0 -	// We don't need any of the two tables to play the video, so we ignore -	// both of them. -	uint16 wordSize = _header.version == 6 ? 4 : 2; -	_fileStream->skip(_header.frameCount * wordSize * 2); -#else -	switch (_header.version) { -	case 4: -	case 5:		// sizes are 16-bit integers -		// Skip table with frame image sizes, as we don't need it -		_fileStream->skip(_header.frameCount * 2); -		for (int i = 0; i < _header.frameCount; ++i) -			_frameTotalSize[i] = _fileStream->readUint16(); -		break; -	case 6:		// sizes are 32-bit integers -		// Skip table with frame image sizes, as we don't need it -		_fileStream->skip(_header.frameCount * 4); -		for (int i = 0; i < _header.frameCount; ++i) -			_frameTotalSize[i] = _fileStream->readUint32(); -		break; -	default: -		error("Can't yet handle index table for robot version %d", _header.version); -	} -#endif - -	// 2 more unknown tables -	_fileStream->skip(1024 + 512); +	delete _fileStream; +	_fileStream = 0; -	// Pad to nearest 2 kilobytes -	uint32 curPos = _fileStream->pos(); -	if (curPos & 0x7ff) -		_fileStream->seek((curPos & ~0x7ff) + 2048); +	delete[] _frameTotalSize; +	_frameTotalSize = 0;  } -void RobotDecoder::calculateVideoDimensions() { -	// This is an O(n) operation, as each frame has a different size. -	// We need to know the actual frame size to have a constant video size. -	uint32 pos = _fileStream->pos(); - -	for (uint32 curFrame = 0; curFrame < _header.frameCount; curFrame++) { -		_fileStream->skip(4); -		uint16 frameWidth = _fileStream->readUint16(); -		uint16 frameHeight = _fileStream->readUint16(); -		if (frameWidth > _width) -			_width = frameWidth; -		if (frameHeight > _height) -			_height = frameHeight; -		_fileStream->skip(_frameTotalSize[curFrame] - 8); -	} +void RobotDecoder::readNextPacket() { +	// Get our track +	RobotVideoTrack *videoTrack = (RobotVideoTrack *)getTrack(0); +	videoTrack->increaseCurFrame(); +	Graphics::Surface *surface = videoTrack->getSurface(); -	_fileStream->seek(pos); -} +	if (videoTrack->endOfTrack()) +		return; -const Graphics::Surface *RobotDecoder::decodeNextFrame() {  	// Read frame image header (24 bytes)  	_fileStream->skip(3);  	byte frameScale = _fileStream->readByte(); @@ -258,23 +158,28 @@ const Graphics::Surface *RobotDecoder::decodeNextFrame() {  	_fileStream->skip(4); // unknown, almost always 0  	uint16 frameX = _fileStream->readUint16();  	uint16 frameY = _fileStream->readUint16(); +  	// TODO: In v4 robot files, frameX and frameY have a different meaning.  	// Set them both to 0 for v4 for now, so that robots in PQ:SWAT show up  	// correctly.  	if (_header.version == 4)  		frameX = frameY = 0; +  	uint16 compressedSize = _fileStream->readUint16();  	uint16 frameFragments = _fileStream->readUint16();  	_fileStream->skip(4); // unknown  	uint32 decompressedSize = frameWidth * frameHeight * frameScale / 100; +  	// FIXME: A frame's height + position can go off limits... why? With the  	// following, we cut the contents to fit the frame -	uint16 scaledHeight = CLIP<uint16>(decompressedSize / frameWidth, 0, _height - frameY); +	uint16 scaledHeight = CLIP<uint16>(decompressedSize / frameWidth, 0, surface->h - frameY); +  	// FIXME: Same goes for the frame's width + position. In this case, we  	// modify the position to fit the contents on screen. -	if (frameWidth + frameX > _width) -		frameX = _width - frameWidth; -	assert (frameWidth + frameX <= _width && scaledHeight + frameY <= _height); +	if (frameWidth + frameX > surface->w) +		frameX = surface->w - frameWidth; + +	assert(frameWidth + frameX <= surface->w && scaledHeight + frameY <= surface->h);  	DecompressorLZS lzs;  	byte *decompressedFrame = new byte[decompressedSize]; @@ -305,24 +210,23 @@ const Graphics::Surface *RobotDecoder::decodeNextFrame() {  	// Copy over the decompressed frame  	byte *inFrame = decompressedFrame; -	byte *outFrame = (byte *)_surface->pixels; +	byte *outFrame = (byte *)surface->pixels;  	// Black out the surface -	memset(outFrame, 0, _width * _height); +	memset(outFrame, 0, surface->w * surface->h);  	// Move to the correct y coordinate -	outFrame += _width * frameY; +	outFrame += surface->w * frameY;  	for (uint16 y = 0; y < scaledHeight; y++) {  		memcpy(outFrame + frameX, inFrame, frameWidth);  		inFrame += frameWidth; -		outFrame += _width; +		outFrame += surface->w;  	}  	delete[] decompressedFrame; -	// +1 because we start with frame number -1 -	uint32 audioChunkSize = _frameTotalSize[_curFrame + 1] - (24 + compressedSize); +	uint32 audioChunkSize = _frameTotalSize[videoTrack->getCurFrame()] - (24 + compressedSize);  // TODO: The audio chunk size below is usually correct, but there are some  // exceptions (e.g. robot 4902 in Phantasmagoria, towards its end) @@ -337,51 +241,166 @@ const Graphics::Surface *RobotDecoder::decodeNextFrame() {  	// Queue the next audio frame  	// FIXME: For some reason, there are audio hiccups/gaps  	if (_header.hasSound) { -		_fileStream->skip(8);	// header -		_audioStream->queueBuffer(g_sci->_audio->getDecodedRobotAudioFrame(_fileStream, audioChunkSize - 8), -									(audioChunkSize - 8) * 2, DisposeAfterUse::NO, -									Audio::FLAG_16BITS | Audio::FLAG_LITTLE_ENDIAN); +		RobotAudioTrack *audioTrack = (RobotAudioTrack *)getTrack(1); +		_fileStream->skip(8); // header +		audioChunkSize -= 8; +		audioTrack->queueBuffer(g_sci->_audio->getDecodedRobotAudioFrame(_fileStream, audioChunkSize), audioChunkSize * 2);  	} else {  		_fileStream->skip(audioChunkSize); -	} - -	if (_curFrame == -1) -		_startTime = g_system->getMillis(); +	}	 +} -	_curFrame++; +void RobotDecoder::readHeaderChunk() { +	// Header (60 bytes) +	_fileStream->skip(6); +	_header.version = _fileStream->readUint16(); +	_header.audioChunkSize = _fileStream->readUint16(); +	_header.audioSilenceSize = _fileStream->readUint16(); +	_fileStream->skip(2); +	_header.frameCount = _fileStream->readUint16(); +	_header.paletteDataSize = _fileStream->readUint16(); +	_header.unkChunkDataSize = _fileStream->readUint16(); +	_fileStream->skip(5); +	_header.hasSound = _fileStream->readByte(); +	_fileStream->skip(34); -	return _surface; +	// Some videos (e.g. robot 1305 in Phantasmagoria and +	// robot 184 in Lighthouse) have an unknown chunk before +	// the palette chunk (probably used for sound preloading). +	// Skip it here. +	if (_header.unkChunkDataSize) +		_fileStream->skip(_header.unkChunkDataSize);  } -void RobotDecoder::close() { -	if (!_fileStream) -		return; +void RobotDecoder::readFrameSizesChunk() { +	// The robot video file contains 2 tables, with one entry for each frame: +	// - A table containing the size of the image in each video frame +	// - A table containing the total size of each video frame. +	// In v5 robots, the tables contain 16-bit integers, whereas in v6 robots, +	// they contain 32-bit integers. -	delete _fileStream; -	_fileStream = 0; +	_frameTotalSize = new uint32[_header.frameCount]; +	// TODO: The table reading code can probably be removed once the +	// audio chunk size is figured out (check the TODO inside processNextFrame()) +#if 0 +	// We don't need any of the two tables to play the video, so we ignore +	// both of them. +	uint16 wordSize = _header.version == 6 ? 4 : 2; +	_fileStream->skip(_header.frameCount * wordSize * 2); +#else +	switch (_header.version) { +	case 4: +	case 5:		// sizes are 16-bit integers +		// Skip table with frame image sizes, as we don't need it +		_fileStream->skip(_header.frameCount * 2); +		for (int i = 0; i < _header.frameCount; ++i) +			_frameTotalSize[i] = _fileStream->readUint16(); +		break; +	case 6:		// sizes are 32-bit integers +		// Skip table with frame image sizes, as we don't need it +		_fileStream->skip(_header.frameCount * 4); +		for (int i = 0; i < _header.frameCount; ++i) +			_frameTotalSize[i] = _fileStream->readUint32(); +		break; +	default: +		error("Can't yet handle index table for robot version %d", _header.version); +	} +#endif + +	// 2 more unknown tables +	_fileStream->skip(1024 + 512); + +	// Pad to nearest 2 kilobytes +	uint32 curPos = _fileStream->pos(); +	if (curPos & 0x7ff) +		_fileStream->seek((curPos & ~0x7ff) + 2048); +} + +RobotDecoder::RobotVideoTrack::RobotVideoTrack(int frameCount) : _frameCount(frameCount) { +	_surface = new Graphics::Surface(); +	_curFrame = -1; +	_dirtyPalette = false; +} + +RobotDecoder::RobotVideoTrack::~RobotVideoTrack() {  	_surface->free();  	delete _surface; -	_surface = 0; +} -	if (_header.hasSound) { -		_mixer->stopHandle(_audioHandle); -		//delete _audioStream; _audioStream = 0; +uint16 RobotDecoder::RobotVideoTrack::getWidth() const { +	return _surface->w; +} + +uint16 RobotDecoder::RobotVideoTrack::getHeight() const { +	return _surface->h; +} + +Graphics::PixelFormat RobotDecoder::RobotVideoTrack::getPixelFormat() const { +	return _surface->format; +} + +void RobotDecoder::RobotVideoTrack::readPaletteChunk(Common::SeekableSubReadStreamEndian *stream, uint16 chunkSize) { +	byte *paletteData = new byte[chunkSize]; +	stream->read(paletteData, chunkSize); + +	// SCI1.1 palette +	byte palFormat = paletteData[32]; +	uint16 palColorStart = paletteData[25]; +	uint16 palColorCount = READ_SCI11ENDIAN_UINT16(paletteData + 29); + +	int palOffset = 37; +	memset(_palette, 0, 256 * 3); + +	for (uint16 colorNo = palColorStart; colorNo < palColorStart + palColorCount; colorNo++) { +		if (palFormat == kRobotPalVariable) +			palOffset++; +		_palette[colorNo * 3 + 0] = paletteData[palOffset++]; +		_palette[colorNo * 3 + 1] = paletteData[palOffset++]; +		_palette[colorNo * 3 + 2] = paletteData[palOffset++];  	} -	reset(); +	_dirtyPalette = true; +	delete[] paletteData;  } -void RobotDecoder::updateVolume() { -	if (g_system->getMixer()->isSoundHandleActive(_audioHandle)) -		g_system->getMixer()->setChannelVolume(_audioHandle, getVolume()); +void RobotDecoder::RobotVideoTrack::calculateVideoDimensions(Common::SeekableSubReadStreamEndian *stream, uint32 *frameSizes) { +	// This is an O(n) operation, as each frame has a different size. +	// We need to know the actual frame size to have a constant video size. +	uint32 pos = stream->pos(); + +	uint16 width = 0, height = 0; + +	for (int curFrame = 0; curFrame < _frameCount; curFrame++) { +		stream->skip(4); +		uint16 frameWidth = stream->readUint16(); +		uint16 frameHeight = stream->readUint16(); +		if (frameWidth > width) +			width = frameWidth; +		if (frameHeight > height) +			height = frameHeight; +		stream->skip(frameSizes[curFrame] - 8); +	} + +	stream->seek(pos); + +	_surface->create(width, height, Graphics::PixelFormat::createFormatCLUT8());  } -void RobotDecoder::updateBalance() { -	if (g_system->getMixer()->isSoundHandleActive(_audioHandle)) -		g_system->getMixer()->setChannelBalance(_audioHandle, getBalance()); +RobotDecoder::RobotAudioTrack::RobotAudioTrack() { +	_audioStream = Audio::makeQueuingAudioStream(11025, false);  } -#endif +RobotDecoder::RobotAudioTrack::~RobotAudioTrack() { +	delete _audioStream; +} + +void RobotDecoder::RobotAudioTrack::queueBuffer(byte *buffer, int size) { +	_audioStream->queueBuffer(buffer, size, DisposeAfterUse::YES, Audio::FLAG_16BITS | Audio::FLAG_LITTLE_ENDIAN); +} + +Audio::AudioStream *RobotDecoder::RobotAudioTrack::getAudioStream() const { +	return _audioStream; +}  } // End of namespace Sci diff --git a/engines/sci/video/robot_decoder.h b/engines/sci/video/robot_decoder.h index e9cefe7d91..ebc3262939 100644 --- a/engines/sci/video/robot_decoder.h +++ b/engines/sci/video/robot_decoder.h @@ -25,84 +25,103 @@  #include "common/rational.h"  #include "common/rect.h" -#include "common/stream.h" -#include "common/substream.h" -#include "audio/audiostream.h" -#include "audio/mixer.h" -#include "graphics/pixelformat.h"  #include "video/video_decoder.h" -namespace Sci { +namespace Audio { +class QueuingAudioStream; +} -#ifdef ENABLE_SCI32 - -struct RobotHeader { -	// 6 bytes, identifier bytes -	uint16 version; -	uint16 audioChunkSize; -	uint16 audioSilenceSize; -	// 2 bytes, unknown -	uint16 frameCount; -	uint16 paletteDataSize; -	uint16 unkChunkDataSize; -	// 5 bytes, unknown -	byte hasSound; -	// 34 bytes, unknown -}; +namespace Common { +class SeekableSubReadStreamEndian; +} + +namespace Sci { -class RobotDecoder : public Video::FixedRateVideoDecoder { +class RobotDecoder : public Video::VideoDecoder {  public: -	RobotDecoder(Audio::Mixer *mixer, bool isBigEndian); +	RobotDecoder(bool isBigEndian);  	virtual ~RobotDecoder();  	bool loadStream(Common::SeekableReadStream *stream);  	bool load(GuiResourceId id);  	void close(); - -	bool isVideoLoaded() const { return _fileStream != 0; } -	uint16 getWidth() const { return _width; } -	uint16 getHeight() const { return _height; } -	uint32 getFrameCount() const { return _header.frameCount; } -	const Graphics::Surface *decodeNextFrame(); -	Graphics::PixelFormat getPixelFormat() const { return Graphics::PixelFormat::createFormatCLUT8(); } -	const byte *getPalette() { _dirtyPalette = false; return _palette; } -	bool hasDirtyPalette() const { return _dirtyPalette; } +	  	void setPos(uint16 x, uint16 y) { _pos = Common::Point(x, y); }  	Common::Point getPos() const { return _pos; }  protected: -	// VideoDecoder API -	void updateVolume(); -	void updateBalance(); - -	// FixedRateVideoDecoder API -	Common::Rational getFrameRate() const { return Common::Rational(60, 10); } - +	void readNextPacket(); +	  private: +	class RobotVideoTrack : public FixedRateVideoTrack { +	public: +		RobotVideoTrack(int frameCount); +		~RobotVideoTrack(); + +		uint16 getWidth() const; +		uint16 getHeight() const; +		Graphics::PixelFormat getPixelFormat() const; +		int getCurFrame() const { return _curFrame; } +		int getFrameCount() const { return _frameCount; } +		const Graphics::Surface *decodeNextFrame() { return _surface; } +		const byte *getPalette() const { _dirtyPalette = false; return _palette; } +		bool hasDirtyPalette() const { return _dirtyPalette; } + +		void readPaletteChunk(Common::SeekableSubReadStreamEndian *stream, uint16 chunkSize); +		void calculateVideoDimensions(Common::SeekableSubReadStreamEndian *stream, uint32 *frameSizes); +		Graphics::Surface *getSurface() { return _surface; } +		void increaseCurFrame() { _curFrame++; } + +	protected: +		Common::Rational getFrameRate() const { return Common::Rational(60, 10); } + +	private: +		int _frameCount; +		int _curFrame; +		byte _palette[256 * 3]; +		mutable bool _dirtyPalette; +		Graphics::Surface *_surface; +	}; + +	class RobotAudioTrack : public AudioTrack { +	public: +		RobotAudioTrack(); +		~RobotAudioTrack(); + +		Audio::Mixer::SoundType getSoundType() const { return Audio::Mixer::kMusicSoundType; } + +		void queueBuffer(byte *buffer, int size); + +	protected: +		Audio::AudioStream *getAudioStream() const; + +	private: +		Audio::QueuingAudioStream *_audioStream; +	}; + +	struct RobotHeader { +		// 6 bytes, identifier bytes +		uint16 version; +		uint16 audioChunkSize; +		uint16 audioSilenceSize; +		// 2 bytes, unknown +		uint16 frameCount; +		uint16 paletteDataSize; +		uint16 unkChunkDataSize; +		// 5 bytes, unknown +		byte hasSound; +		// 34 bytes, unknown +	} _header; +  	void readHeaderChunk(); -	void readPaletteChunk(uint16 chunkSize);  	void readFrameSizesChunk(); -	void calculateVideoDimensions(); - -	void freeData(); -	RobotHeader _header;  	Common::Point _pos;  	bool _isBigEndian; +	uint32 *_frameTotalSize;  	Common::SeekableSubReadStreamEndian *_fileStream; - -	uint16 _width; -	uint16 _height; -	uint32 *_frameTotalSize; -	byte _palette[256 * 3]; -	bool _dirtyPalette; -	Graphics::Surface *_surface; -	Audio::QueuingAudioStream *_audioStream; -	Audio::SoundHandle _audioHandle; -	Audio::Mixer *_mixer;  }; -#endif  } // End of namespace Sci diff --git a/engines/sci/video/seq_decoder.cpp b/engines/sci/video/seq_decoder.cpp index abd64911a7..a7b6346eca 100644 --- a/engines/sci/video/seq_decoder.cpp +++ b/engines/sci/video/seq_decoder.cpp @@ -41,33 +41,44 @@ enum seqFrameTypes {  	kSeqFrameDiff = 1  }; -SeqDecoder::SeqDecoder() { -	_fileStream = 0; -	_surface = 0; -	_dirtyPalette = false; +SEQDecoder::SEQDecoder(uint frameDelay) : _frameDelay(frameDelay) {  } -SeqDecoder::~SeqDecoder() { +SEQDecoder::~SEQDecoder() {  	close();  } -bool SeqDecoder::loadStream(Common::SeekableReadStream *stream) { +bool SEQDecoder::loadStream(Common::SeekableReadStream *stream) {  	close(); +	addTrack(new SEQVideoTrack(stream, _frameDelay)); + +	return true; +} + +SEQDecoder::SEQVideoTrack::SEQVideoTrack(Common::SeekableReadStream *stream, uint frameDelay) { +	assert(stream); +	assert(frameDelay != 0);  	_fileStream = stream; +	_frameDelay = frameDelay; +	_curFrame = -1; +  	_surface = new Graphics::Surface();  	_surface->create(SEQ_SCREEN_WIDTH, SEQ_SCREEN_HEIGHT, Graphics::PixelFormat::createFormatCLUT8());  	_frameCount = _fileStream->readUint16LE(); -	// Set palette -	int paletteChunkSize = _fileStream->readUint32LE(); -	readPaletteChunk(paletteChunkSize); +	// Set initial palette +	readPaletteChunk(_fileStream->readUint32LE()); +} -	return true; +SEQDecoder::SEQVideoTrack::~SEQVideoTrack() { +	delete _fileStream; +	_surface->free(); +	delete _surface;  } -void SeqDecoder::readPaletteChunk(uint16 chunkSize) { +void SEQDecoder::SEQVideoTrack::readPaletteChunk(uint16 chunkSize) {  	byte *paletteData = new byte[chunkSize];  	_fileStream->read(paletteData, chunkSize); @@ -91,23 +102,7 @@ void SeqDecoder::readPaletteChunk(uint16 chunkSize) {  	delete[] paletteData;  } -void SeqDecoder::close() { -	if (!_fileStream) -		return; - -	_frameDelay = 0; - -	delete _fileStream; -	_fileStream = 0; - -	_surface->free(); -	delete _surface; -	_surface = 0; - -	reset(); -} - -const Graphics::Surface *SeqDecoder::decodeNextFrame() { +const Graphics::Surface *SEQDecoder::SEQVideoTrack::decodeNextFrame() {  	int16 frameWidth = _fileStream->readUint16LE();  	int16 frameHeight = _fileStream->readUint16LE();  	int16 frameLeft = _fileStream->readUint16LE(); @@ -142,9 +137,6 @@ const Graphics::Surface *SeqDecoder::decodeNextFrame() {  		delete[] buf;  	} -	if (_curFrame == -1) -		_startTime = g_system->getMillis(); -  	_curFrame++;  	return _surface;  } @@ -159,7 +151,7 @@ const Graphics::Surface *SeqDecoder::decodeNextFrame() {  	} \  	memcpy(dest + writeRow * SEQ_SCREEN_WIDTH + writeCol, litData + litPos, n); -bool SeqDecoder::decodeFrame(byte *rleData, int rleSize, byte *litData, int litSize, byte *dest, int left, int width, int height, int colorKey) { +bool SEQDecoder::SEQVideoTrack::decodeFrame(byte *rleData, int rleSize, byte *litData, int litSize, byte *dest, int left, int width, int height, int colorKey) {  	int writeRow = 0;  	int writeCol = left;  	int litPos = 0; @@ -237,4 +229,9 @@ bool SeqDecoder::decodeFrame(byte *rleData, int rleSize, byte *litData, int litS  	return true;  } +const byte *SEQDecoder::SEQVideoTrack::getPalette() const { +	_dirtyPalette = false; +	return _palette; +} +  } // End of namespace Sci diff --git a/engines/sci/video/seq_decoder.h b/engines/sci/video/seq_decoder.h index 800a3c9024..890f349feb 100644 --- a/engines/sci/video/seq_decoder.h +++ b/engines/sci/video/seq_decoder.h @@ -40,44 +40,49 @@ namespace Sci {  /**   * Implementation of the Sierra SEQ decoder, used in KQ6 DOS floppy/CD and GK1 DOS   */ -class SeqDecoder : public Video::FixedRateVideoDecoder { +class SEQDecoder : public Video::VideoDecoder {  public: -	SeqDecoder(); -	virtual ~SeqDecoder(); +	SEQDecoder(uint frameDelay); +	virtual ~SEQDecoder();  	bool loadStream(Common::SeekableReadStream *stream); -	void close(); - -	void setFrameDelay(int frameDelay) { _frameDelay = frameDelay; } - -	bool isVideoLoaded() const { return _fileStream != 0; } -	uint16 getWidth() const { return SEQ_SCREEN_WIDTH; } -	uint16 getHeight() const { return SEQ_SCREEN_HEIGHT; } -	uint32 getFrameCount() const { return _frameCount; } -	const Graphics::Surface *decodeNextFrame(); -	Graphics::PixelFormat getPixelFormat() const { return Graphics::PixelFormat::createFormatCLUT8(); } -	const byte *getPalette() { _dirtyPalette = false; return _palette; } -	bool hasDirtyPalette() const { return _dirtyPalette; } - -protected: -	Common::Rational getFrameRate() const { assert(_frameDelay); return Common::Rational(60, _frameDelay); }  private: -	enum { -		SEQ_SCREEN_WIDTH = 320, -		SEQ_SCREEN_HEIGHT = 200 +	class SEQVideoTrack : public FixedRateVideoTrack { +	public: +		SEQVideoTrack(Common::SeekableReadStream *stream, uint frameDelay); +		~SEQVideoTrack(); + +		uint16 getWidth() const { return SEQ_SCREEN_WIDTH; } +		uint16 getHeight() const { return SEQ_SCREEN_HEIGHT; } +		Graphics::PixelFormat getPixelFormat() const { return Graphics::PixelFormat::createFormatCLUT8(); } +		int getCurFrame() const { return _curFrame; } +		int getFrameCount() const { return _frameCount; } +		const Graphics::Surface *decodeNextFrame(); +		const byte *getPalette() const; +		bool hasDirtyPalette() const { return _dirtyPalette; } + +	protected: +		Common::Rational getFrameRate() const { return Common::Rational(60, _frameDelay); } + +	private: +		enum { +			SEQ_SCREEN_WIDTH = 320, +			SEQ_SCREEN_HEIGHT = 200 +		}; + +		void readPaletteChunk(uint16 chunkSize); +		bool decodeFrame(byte *rleData, int rleSize, byte *litData, int litSize, byte *dest, int left, int width, int height, int colorKey); + +		Common::SeekableReadStream *_fileStream; +		int _curFrame, _frameCount; +		byte _palette[256 * 3]; +		mutable bool _dirtyPalette; +		Graphics::Surface *_surface; +		uint _frameDelay;  	}; -	void readPaletteChunk(uint16 chunkSize); -	bool decodeFrame(byte *rleData, int rleSize, byte *litData, int litSize, byte *dest, int left, int width, int height, int colorKey); - -	uint16 _width, _height; -	uint16 _frameDelay; -	Common::SeekableReadStream *_fileStream; -	byte _palette[256 * 3]; -	bool _dirtyPalette; -	uint32 _frameCount; -	Graphics::Surface *_surface; +	uint _frameDelay;  };  } // End of namespace Sci diff --git a/engines/scumm/he/animation_he.cpp b/engines/scumm/he/animation_he.cpp index 40e99c26a8..be17a3b305 100644 --- a/engines/scumm/he/animation_he.cpp +++ b/engines/scumm/he/animation_he.cpp @@ -40,7 +40,7 @@ MoviePlayer::MoviePlayer(ScummEngine_v90he *vm, Audio::Mixer *mixer) : _vm(vm) {  		_video = new Video::BinkDecoder();  	else  #endif -		_video = new Video::SmackerDecoder(mixer); +		_video = new Video::SmackerDecoder();  	_flags = 0;  	_wizResNum = 0; @@ -61,11 +61,16 @@ int MoviePlayer::load(const char *filename, int flags, int image) {  	if (_video->isVideoLoaded())  		_video->close(); +	// Ensure that Bink will use our PixelFormat +	_video->setDefaultHighColorFormat(g_system->getScreenFormat()); +  	if (!_video->loadFile(filename)) {  		warning("Failed to load video file %s", filename);  		return -1;  	} +	_video->start(); +  	debug(1, "Playing video %s", filename);  	if (flags & 2) diff --git a/engines/sword1/animation.cpp b/engines/sword1/animation.cpp index ddafd964eb..f7add4eed2 100644 --- a/engines/sword1/animation.cpp +++ b/engines/sword1/animation.cpp @@ -37,6 +37,7 @@  #include "gui/message.h" +#include "video/dxa_decoder.h"  #include "video/psx_decoder.h"  #include "video/smk_decoder.h" @@ -96,9 +97,8 @@ static const char *const sequenceListPSX[20] = {  // Basic movie player  /////////////////////////////////////////////////////////////////////////////// -MoviePlayer::MoviePlayer(SwordEngine *vm, Text *textMan, ResMan *resMan, Audio::Mixer *snd, OSystem *system, Audio::SoundHandle *bgSoundHandle, Video::VideoDecoder *decoder, DecoderType decoderType) -	: _vm(vm), _textMan(textMan), _resMan(resMan), _snd(snd), _bgSoundHandle(bgSoundHandle), _system(system) { -	_bgSoundStream = NULL; +MoviePlayer::MoviePlayer(SwordEngine *vm, Text *textMan, ResMan *resMan, OSystem *system, Video::VideoDecoder *decoder, DecoderType decoderType) +	: _vm(vm), _textMan(textMan), _resMan(resMan), _system(system) {  	_decoderType = decoderType;  	_decoder = decoder; @@ -107,7 +107,6 @@ MoviePlayer::MoviePlayer(SwordEngine *vm, Text *textMan, ResMan *resMan, Audio::  }  MoviePlayer::~MoviePlayer() { -	delete _bgSoundHandle;  	delete _decoder;  } @@ -116,16 +115,12 @@ MoviePlayer::~MoviePlayer() {   * @param id the id of the file   */  bool MoviePlayer::load(uint32 id) { -	Common::File f;  	Common::String filename; -	if (_decoderType == kVideoDecoderDXA) -		_bgSoundStream = Audio::SeekableAudioStream::openStreamFile(sequenceList[id]); -	else -		_bgSoundStream = NULL; -  	if (SwordEngine::_systemVars.showText) { +		Common::File f;  		filename = Common::String::format("%s.txt", sequenceList[id]); +  		if (f.open(filename)) {  			Common::String line;  			int lineNo = 0; @@ -169,7 +164,6 @@ bool MoviePlayer::load(uint32 id) {  				_movieTexts.push_back(MovieText(startFrame, endFrame, ptr, color));  				lastEnd = endFrame;  			} -			f.close();  		}  	} @@ -189,6 +183,7 @@ bool MoviePlayer::load(uint32 id) {  		// Need to load here in case it fails in which case we'd need  		// to go back to paletted mode  		if (_decoder->loadFile(filename)) { +			_decoder->start();  			return true;  		} else {  			initGraphics(g_system->getWidth(), g_system->getHeight(), true); @@ -197,30 +192,27 @@ bool MoviePlayer::load(uint32 id) {  		break;  	} -	return _decoder->loadFile(filename.c_str()); -} +	if (!_decoder->loadFile(filename)) +		return false; -void MoviePlayer::play() { -	if (_bgSoundStream) -		_snd->playStream(Audio::Mixer::kSFXSoundType, _bgSoundHandle, _bgSoundStream); +	// For DXA, also add the external sound file +	if (_decoderType == kVideoDecoderDXA) +		_decoder->addStreamFileTrack(sequenceList[id]); -	bool terminated = false; +	_decoder->start(); +	return true; +} +void MoviePlayer::play() {  	_textX = 0;  	_textY = 0; -	terminated = !playVideo(); - -	if (terminated) -		_snd->stopHandle(*_bgSoundHandle); +	playVideo();  	_textMan->releaseText(2, false);  	_movieTexts.clear(); -	while (_snd->isSoundHandleActive(*_bgSoundHandle)) -		_system->delayMillis(100); -  	// It's tempting to call _screen->fullRefresh() here to restore the old  	// palette. However, that causes glitches with DXA movies, where the  	// previous location would be momentarily drawn, before switching to @@ -320,7 +312,7 @@ bool MoviePlayer::playVideo() {  			}  			if (_decoder->hasDirtyPalette()) { -				_decoder->setSystemPalette(); +				_vm->_system->getPaletteManager()->setPalette(_decoder->getPalette(), 0, 256);  				if (!_movieTexts.empty()) {  					// Look for the best color indexes to use to display the subtitles @@ -506,24 +498,12 @@ void MoviePlayer::drawFramePSX(const Graphics::Surface *frame) {  	scaledFrame.free();  } -DXADecoderWithSound::DXADecoderWithSound(Audio::Mixer *mixer, Audio::SoundHandle *bgSoundHandle) -	: _mixer(mixer), _bgSoundHandle(bgSoundHandle)  { -} - -uint32 DXADecoderWithSound::getTime() const { -	if (_mixer->isSoundHandleActive(*_bgSoundHandle)) -		return _mixer->getSoundElapsedTime(*_bgSoundHandle); - -	return DXADecoder::getTime(); -} -  ///////////////////////////////////////////////////////////////////////////////  // Factory function for creating the appropriate cutscene player  /////////////////////////////////////////////////////////////////////////////// -MoviePlayer *makeMoviePlayer(uint32 id, SwordEngine *vm, Text *textMan, ResMan *resMan, Audio::Mixer *snd, OSystem *system) { +MoviePlayer *makeMoviePlayer(uint32 id, SwordEngine *vm, Text *textMan, ResMan *resMan, OSystem *system) {  	Common::String filename; -	Audio::SoundHandle *bgSoundHandle = new Audio::SoundHandle;  	// For the PSX version, we'll try the PlayStation stream files  	if (vm->isPsx()) { @@ -534,7 +514,7 @@ MoviePlayer *makeMoviePlayer(uint32 id, SwordEngine *vm, Text *textMan, ResMan *  #ifdef USE_RGB_COLOR  			// All BS1 PSX videos run the videos at 2x speed  			Video::VideoDecoder *psxDecoder = new Video::PSXStreamDecoder(Video::PSXStreamDecoder::kCD2x); -			return new MoviePlayer(vm, textMan, resMan, snd, system, bgSoundHandle, psxDecoder, kVideoDecoderPSX); +			return new MoviePlayer(vm, textMan, resMan, system, psxDecoder, kVideoDecoderPSX);  #else  			GUI::MessageDialog dialog(Common::String::format(_("PSX stream cutscene '%s' cannot be played in paletted mode"), filename.c_str()), _("OK"));  			dialog.runModal(); @@ -546,20 +526,20 @@ MoviePlayer *makeMoviePlayer(uint32 id, SwordEngine *vm, Text *textMan, ResMan *  	filename = Common::String::format("%s.smk", sequenceList[id]);  	if (Common::File::exists(filename)) { -		Video::SmackerDecoder *smkDecoder = new Video::SmackerDecoder(snd); -		return new MoviePlayer(vm, textMan, resMan, snd, system, bgSoundHandle, smkDecoder, kVideoDecoderSMK); +		Video::SmackerDecoder *smkDecoder = new Video::SmackerDecoder(); +		return new MoviePlayer(vm, textMan, resMan, system, smkDecoder, kVideoDecoderSMK);  	}  	filename = Common::String::format("%s.dxa", sequenceList[id]);  	if (Common::File::exists(filename)) {  #ifdef USE_ZLIB -		DXADecoderWithSound *dxaDecoder = new DXADecoderWithSound(snd, bgSoundHandle); -		return new MoviePlayer(vm, textMan, resMan, snd, system, bgSoundHandle, dxaDecoder, kVideoDecoderDXA); +		Video::VideoDecoder *dxaDecoder = new Video::DXADecoder(); +		return new MoviePlayer(vm, textMan, resMan, system, dxaDecoder, kVideoDecoderDXA);  #else  		GUI::MessageDialog dialog(_("DXA cutscenes found but ScummVM has been built without zlib support"), _("OK"));  		dialog.runModal(); -		return NULL; +		return 0;  #endif  	} @@ -569,7 +549,7 @@ MoviePlayer *makeMoviePlayer(uint32 id, SwordEngine *vm, Text *textMan, ResMan *  	if (Common::File::exists(filename)) {  		GUI::MessageDialog dialog(_("MPEG2 cutscenes are no longer supported"), _("OK"));  		dialog.runModal(); -		return NULL; +		return 0;  	}  	if (!vm->isPsx() || scumm_stricmp(sequenceList[id], "enddemo") != 0) { @@ -578,7 +558,7 @@ MoviePlayer *makeMoviePlayer(uint32 id, SwordEngine *vm, Text *textMan, ResMan *  		dialog.runModal();  	} -	return NULL; +	return 0;  }  } // End of namespace Sword1 diff --git a/engines/sword1/animation.h b/engines/sword1/animation.h index c2ed86a1a3..d0c61f5eb3 100644 --- a/engines/sword1/animation.h +++ b/engines/sword1/animation.h @@ -23,16 +23,19 @@  #ifndef SWORD1_ANIMATION_H  #define SWORD1_ANIMATION_H -#include "video/dxa_decoder.h" -#include "video/video_decoder.h" -  #include "common/list.h" -#include "audio/audiostream.h" -  #include "sword1/screen.h"  #include "sword1/sound.h" +namespace Graphics { +struct Surface; +} + +namespace Video { +class VideoDecoder; +} +  namespace Sword1 {  enum DecoderType { @@ -55,21 +58,9 @@ public:  	}  }; -class DXADecoderWithSound : public Video::DXADecoder { -public: -	DXADecoderWithSound(Audio::Mixer *mixer, Audio::SoundHandle *bgSoundHandle); -	~DXADecoderWithSound() {} - -	uint32 getTime() const; - -private: -	Audio::Mixer *_mixer; -	Audio::SoundHandle *_bgSoundHandle; -}; -  class MoviePlayer {  public: -	MoviePlayer(SwordEngine *vm, Text *textMan, ResMan *resMan, Audio::Mixer *snd, OSystem *system, Audio::SoundHandle *bgSoundHandle, Video::VideoDecoder *decoder, DecoderType decoderType); +	MoviePlayer(SwordEngine *vm, Text *textMan, ResMan *resMan, OSystem *system, Video::VideoDecoder *decoder, DecoderType decoderType);  	virtual ~MoviePlayer();  	bool load(uint32 id);  	void play(); @@ -78,7 +69,6 @@ protected:  	SwordEngine *_vm;  	Text *_textMan;  	ResMan *_resMan; -	Audio::Mixer *_snd;  	OSystem *_system;  	Common::List<MovieText> _movieTexts;  	int _textX, _textY, _textWidth, _textHeight; @@ -88,8 +78,6 @@ protected:  	DecoderType _decoderType;  	Video::VideoDecoder *_decoder; -	Audio::SoundHandle *_bgSoundHandle; -	Audio::AudioStream *_bgSoundStream;  	bool playVideo();  	void performPostProcessing(byte *screen); @@ -100,7 +88,7 @@ protected:  	void convertColor(byte r, byte g, byte b, float &h, float &s, float &v);  }; -MoviePlayer *makeMoviePlayer(uint32 id, SwordEngine *vm, Text *textMan, ResMan *resMan, Audio::Mixer *snd, OSystem *system); +MoviePlayer *makeMoviePlayer(uint32 id, SwordEngine *vm, Text *textMan, ResMan *resMan, OSystem *system);  } // End of namespace Sword1 diff --git a/engines/sword1/logic.cpp b/engines/sword1/logic.cpp index 8e04861edf..757d768780 100644 --- a/engines/sword1/logic.cpp +++ b/engines/sword1/logic.cpp @@ -959,7 +959,7 @@ int Logic::fnPlaySequence(Object *cpt, int32 id, int32 sequenceId, int32 d, int3  	// meantime, we don't want any looping sound effects still playing.  	_sound->quitScreen(); -	MoviePlayer *player = makeMoviePlayer(sequenceId, _vm, _textMan, _resMan, _mixer, _system); +	MoviePlayer *player = makeMoviePlayer(sequenceId, _vm, _textMan, _resMan, _system);  	if (player) {  		_screen->clearScreen();  		if (player->load(sequenceId)) diff --git a/engines/sword2/animation.cpp b/engines/sword2/animation.cpp index 5e3f8929e9..00260f789a 100644 --- a/engines/sword2/animation.cpp +++ b/engines/sword2/animation.cpp @@ -38,8 +38,11 @@  #include "sword2/screen.h"  #include "sword2/animation.h" +#include "graphics/palette.h" +  #include "gui/message.h" +#include "video/dxa_decoder.h"  #include "video/smk_decoder.h"  #include "video/psx_decoder.h" @@ -51,9 +54,8 @@ namespace Sword2 {  // Basic movie player  /////////////////////////////////////////////////////////////////////////////// -MoviePlayer::MoviePlayer(Sword2Engine *vm, Audio::Mixer *snd, OSystem *system, Audio::SoundHandle *bgSoundHandle, Video::VideoDecoder *decoder, DecoderType decoderType) -	: _vm(vm), _snd(snd), _bgSoundHandle(bgSoundHandle), _system(system) { -	_bgSoundStream = NULL; +MoviePlayer::MoviePlayer(Sword2Engine *vm, OSystem *system, Video::VideoDecoder *decoder, DecoderType decoderType) +	: _vm(vm), _system(system) {  	_decoderType = decoderType;  	_decoder = decoder; @@ -62,7 +64,6 @@ MoviePlayer::MoviePlayer(Sword2Engine *vm, Audio::Mixer *snd, OSystem *system, A  }  MoviePlayer::~MoviePlayer() { -	delete _bgSoundHandle;  	delete _decoder;  } @@ -75,11 +76,6 @@ bool MoviePlayer::load(const char *name) {  	if (_vm->shouldQuit())  		return false; -	if (_decoderType == kVideoDecoderDXA) -		_bgSoundStream = Audio::SeekableAudioStream::openStreamFile(name); -	else -		_bgSoundStream = NULL; -  	_textSurface = NULL;  	Common::String filename; @@ -99,6 +95,7 @@ bool MoviePlayer::load(const char *name) {  		// Need to load here in case it fails in which case we'd need  		// to go back to paletted mode  		if (_decoder->loadFile(filename)) { +			_decoder->start();  			return true;  		} else {  			initGraphics(640, 480, true); @@ -106,7 +103,15 @@ bool MoviePlayer::load(const char *name) {  		}  	} -	return _decoder->loadFile(filename.c_str()); +	if (!_decoder->loadFile(filename)) +		return false; + +	// For DXA, also add the external sound file +	if (_decoderType == kVideoDecoderDXA) +		_decoder->addStreamFileTrack(name); + +	_decoder->start(); +	return true;  }  void MoviePlayer::play(MovieText *movieTexts, uint32 numMovieTexts, uint32 leadIn, uint32 leadOut) { @@ -122,24 +127,15 @@ void MoviePlayer::play(MovieText *movieTexts, uint32 numMovieTexts, uint32 leadI  	if (leadIn)  		_vm->_sound->playMovieSound(leadIn, kLeadInSound); -	if (_bgSoundStream) -		_snd->playStream(Audio::Mixer::kSFXSoundType, _bgSoundHandle, _bgSoundStream); - -	bool terminated = false; - -	terminated = !playVideo(); +	bool terminated = !playVideo();  	closeTextObject(_currentMovieText, NULL, 0);  	if (terminated) { -		_snd->stopHandle(*_bgSoundHandle);  		_vm->_sound->stopMovieSounds();  		_vm->_sound->stopSpeech();  	} -	while (_snd->isSoundHandleActive(*_bgSoundHandle)) -		_system->delayMillis(100); -  	if (_decoderType == kVideoDecoderPSX) {  		// Need to jump back to paletted color  		initGraphics(640, 480, true); @@ -336,7 +332,7 @@ bool MoviePlayer::playVideo() {  			}  			if (_decoder->hasDirtyPalette()) { -				_decoder->setSystemPalette(); +				_vm->_system->getPaletteManager()->setPalette(_decoder->getPalette(), 0, 256);  				uint32 maxWeight = 0;  				uint32 minWeight = 0xFFFFFFFF; @@ -406,31 +402,19 @@ void MoviePlayer::drawFramePSX(const Graphics::Surface *frame) {  	scaledFrame.free();  } -DXADecoderWithSound::DXADecoderWithSound(Audio::Mixer *mixer, Audio::SoundHandle *bgSoundHandle) -	: _mixer(mixer), _bgSoundHandle(bgSoundHandle)  { -} - -uint32 DXADecoderWithSound::getTime() const { -	if (_mixer->isSoundHandleActive(*_bgSoundHandle)) -		return _mixer->getSoundElapsedTime(*_bgSoundHandle); - -	return DXADecoder::getTime(); -} -  ///////////////////////////////////////////////////////////////////////////////  // Factory function for creating the appropriate cutscene player  /////////////////////////////////////////////////////////////////////////////// -MoviePlayer *makeMoviePlayer(const char *name, Sword2Engine *vm, Audio::Mixer *snd, OSystem *system, uint32 frameCount) { +MoviePlayer *makeMoviePlayer(const char *name, Sword2Engine *vm, OSystem *system, uint32 frameCount) {  	Common::String filename; -	Audio::SoundHandle *bgSoundHandle = new Audio::SoundHandle;  	filename = Common::String::format("%s.str", name);  	if (Common::File::exists(filename)) {  #ifdef USE_RGB_COLOR  		Video::VideoDecoder *psxDecoder = new Video::PSXStreamDecoder(Video::PSXStreamDecoder::kCD2x, frameCount); -		return new MoviePlayer(vm, snd, system, bgSoundHandle, psxDecoder, kVideoDecoderPSX); +		return new MoviePlayer(vm, system, psxDecoder, kVideoDecoderPSX);  #else  		GUI::MessageDialog dialog(_("PSX cutscenes found but ScummVM has been built without RGB color support"), _("OK"));  		dialog.runModal(); @@ -441,16 +425,16 @@ MoviePlayer *makeMoviePlayer(const char *name, Sword2Engine *vm, Audio::Mixer *s  	filename = Common::String::format("%s.smk", name);  	if (Common::File::exists(filename)) { -		Video::SmackerDecoder *smkDecoder = new Video::SmackerDecoder(snd); -		return new MoviePlayer(vm, snd, system, bgSoundHandle, smkDecoder, kVideoDecoderSMK); +		Video::SmackerDecoder *smkDecoder = new Video::SmackerDecoder(); +		return new MoviePlayer(vm, system, smkDecoder, kVideoDecoderSMK);  	}  	filename = Common::String::format("%s.dxa", name);  	if (Common::File::exists(filename)) {  #ifdef USE_ZLIB -		DXADecoderWithSound *dxaDecoder = new DXADecoderWithSound(snd, bgSoundHandle); -		return new MoviePlayer(vm, snd, system, bgSoundHandle, dxaDecoder, kVideoDecoderDXA); +		Video::DXADecoder *dxaDecoder = new Video::DXADecoder(); +		return new MoviePlayer(vm, system, dxaDecoder, kVideoDecoderDXA);  #else  		GUI::MessageDialog dialog(_("DXA cutscenes found but ScummVM has been built without zlib support"), _("OK"));  		dialog.runModal(); diff --git a/engines/sword2/animation.h b/engines/sword2/animation.h index 3d5c42b7f7..b2a243b2ca 100644 --- a/engines/sword2/animation.h +++ b/engines/sword2/animation.h @@ -25,12 +25,16 @@  #ifndef SWORD2_ANIMATION_H  #define SWORD2_ANIMATION_H -#include "video/dxa_decoder.h" -#include "video/video_decoder.h" -#include "audio/mixer.h" -  #include "sword2/screen.h" +namespace Graphics { +struct Surface; +} + +namespace Video { +class VideoDecoder; +} +  namespace Sword2 {  enum DecoderType { @@ -55,20 +59,9 @@ struct MovieText {  	}  }; -class DXADecoderWithSound : public Video::DXADecoder { -public: -	DXADecoderWithSound(Audio::Mixer *mixer, Audio::SoundHandle *bgSoundHandle); -	~DXADecoderWithSound() {} - -	uint32 getTime() const; -private: -	Audio::Mixer *_mixer; -	Audio::SoundHandle *_bgSoundHandle; -}; -  class MoviePlayer {  public: -	MoviePlayer(Sword2Engine *vm, Audio::Mixer *snd, OSystem *system, Audio::SoundHandle *bgSoundHandle, Video::VideoDecoder *decoder, DecoderType decoderType); +	MoviePlayer(Sword2Engine *vm, OSystem *system, Video::VideoDecoder *decoder, DecoderType decoderType);  	virtual ~MoviePlayer();  	bool load(const char *name); @@ -76,7 +69,6 @@ public:  protected:  	Sword2Engine *_vm; -	Audio::Mixer *_snd;  	OSystem *_system;  	MovieText *_movieTexts;  	uint32 _numMovieTexts; @@ -87,8 +79,6 @@ protected:  	DecoderType _decoderType;  	Video::VideoDecoder *_decoder; -	Audio::SoundHandle *_bgSoundHandle; -	Audio::AudioStream *_bgSoundStream;  	uint32 _leadOut;  	int _leadOutFrame; @@ -105,7 +95,7 @@ protected:  	uint32 getWhiteColor();  }; -MoviePlayer *makeMoviePlayer(const char *name, Sword2Engine *vm, Audio::Mixer *snd, OSystem *system, uint32 frameCount); +MoviePlayer *makeMoviePlayer(const char *name, Sword2Engine *vm, OSystem *system, uint32 frameCount);  } // End of namespace Sword2 diff --git a/engines/sword2/function.cpp b/engines/sword2/function.cpp index 836b252d6c..07fcaa094b 100644 --- a/engines/sword2/function.cpp +++ b/engines/sword2/function.cpp @@ -2139,7 +2139,7 @@ int32 Logic::fnPlaySequence(int32 *params) {  	uint32 frameCount = Sword2Engine::isPsx() ? params[1] : 0; -	_moviePlayer = makeMoviePlayer(filename, _vm, _vm->_mixer, _vm->_system, frameCount); +	_moviePlayer = makeMoviePlayer(filename, _vm, _vm->_system, frameCount);  	if (_moviePlayer && _moviePlayer->load(filename)) {  		_moviePlayer->play(_sequenceTextList, _sequenceTextLines, _smackerLeadIn, _smackerLeadOut); diff --git a/engines/sword25/fmv/movieplayer.cpp b/engines/sword25/fmv/movieplayer.cpp index 9ee13b4b6d..a95532ec65 100644 --- a/engines/sword25/fmv/movieplayer.cpp +++ b/engines/sword25/fmv/movieplayer.cpp @@ -61,6 +61,7 @@ bool MoviePlayer::loadMovie(const Common::String &filename, uint z) {  	// Get the file and load it into the decoder  	Common::SeekableReadStream *in = Kernel::getInstance()->getPackage()->getStream(filename);  	_decoder.loadStream(in); +	_decoder.start();  	GraphicEngine *pGfx = Kernel::getInstance()->getGfx(); diff --git a/engines/sword25/fmv/movieplayer.h b/engines/sword25/fmv/movieplayer.h index 1d256e56ba..2f5614b505 100644 --- a/engines/sword25/fmv/movieplayer.h +++ b/engines/sword25/fmv/movieplayer.h @@ -39,7 +39,7 @@  #include "sword25/gfx/bitmap.h"  #ifdef USE_THEORADEC -#include "sword25/fmv/theora_decoder.h" +#include "video/theora_decoder.h"  #endif  #define THEORA_INDIRECT_RENDERING @@ -141,7 +141,7 @@ private:  #ifdef USE_THEORADEC -	TheoraDecoder _decoder; +	Video::TheoraDecoder _decoder;  	Graphics::Surface *_backSurface;  	int _outX, _outY; diff --git a/engines/sword25/fmv/theora_decoder.cpp b/engines/sword25/fmv/theora_decoder.cpp deleted file mode 100644 index d38f5a26cf..0000000000 --- a/engines/sword25/fmv/theora_decoder.cpp +++ /dev/null @@ -1,565 +0,0 @@ -/* ScummVM - Graphic Adventure Engine - * - * ScummVM is the legal property of its developers, whose names - * are too numerous to list here. Please refer to the COPYRIGHT - * file distributed with this source distribution. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - * - */ - -/* - * Source is based on the player example from libvorbis package, - * available at: http://svn.xiph.org/trunk/theora/examples/player_example.c - * - * THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. - * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS - * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE - * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. - * - * THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2009 - * by the Xiph.Org Foundation and contributors http://www.xiph.org/ - * - */ - -#include "sword25/fmv/theora_decoder.h" - -#ifdef USE_THEORADEC -#include "common/system.h" -#include "common/textconsole.h" -#include "common/util.h" -#include "graphics/yuv_to_rgb.h" -#include "audio/decoders/raw.h" -#include "sword25/kernel/common.h" - -namespace Sword25 { - -#define AUDIOFD_FRAGSIZE 10240 - -static double rint(double v) { -	return floor(v + 0.5); -} - -TheoraDecoder::TheoraDecoder(Audio::Mixer::SoundType soundType) { -	_fileStream = 0; - -	_theoraPacket = 0; -	_vorbisPacket = 0; -	_theoraDecode = 0; -	_theoraSetup = 0; -	_nextFrameStartTime = 0.0; - -	_soundType = soundType; -	_audStream = 0; -	_audHandle = new Audio::SoundHandle(); - -	ogg_sync_init(&_oggSync); - -	_curFrame = -1; -	_audiobuf = (ogg_int16_t *)malloc(AUDIOFD_FRAGSIZE * sizeof(ogg_int16_t)); - -	reset(); -} - -TheoraDecoder::~TheoraDecoder() { -	close(); -	delete _fileStream; -	delete _audHandle; -	free(_audiobuf); -} - -void TheoraDecoder::queuePage(ogg_page *page) { -	if (_theoraPacket) -		ogg_stream_pagein(&_theoraOut, page); - -	if (_vorbisPacket) -		ogg_stream_pagein(&_vorbisOut, page); -} - -int TheoraDecoder::bufferData() { -	char *buffer = ogg_sync_buffer(&_oggSync, 4096); -	int bytes = _fileStream->read(buffer, 4096); - -	ogg_sync_wrote(&_oggSync, bytes); - -	return bytes; -} - -bool TheoraDecoder::loadStream(Common::SeekableReadStream *stream) { -	close(); - -	_endOfAudio = false; -	_endOfVideo = false; -	_fileStream = stream; - -	// start up Ogg stream synchronization layer -	ogg_sync_init(&_oggSync); - -	// init supporting Vorbis structures needed in header parsing -	vorbis_info_init(&_vorbisInfo); -	vorbis_comment_init(&_vorbisComment); - -	// init supporting Theora structures needed in header parsing -	th_comment_init(&_theoraComment); -	th_info_init(&_theoraInfo); - -	// Ogg file open; parse the headers -	// Only interested in Vorbis/Theora streams -	bool foundHeader = false; -	while (!foundHeader) { -		int ret = bufferData(); - -		if (ret == 0) -			break; - -		while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) { -			ogg_stream_state test; - -			// is this a mandated initial header? If not, stop parsing -			if (!ogg_page_bos(&_oggPage)) { -				// don't leak the page; get it into the appropriate stream -				queuePage(&_oggPage); -				foundHeader = true; -				break; -			} - -			ogg_stream_init(&test, ogg_page_serialno(&_oggPage)); -			ogg_stream_pagein(&test, &_oggPage); -			ogg_stream_packetout(&test, &_oggPacket); - -			// identify the codec: try theora -			if (!_theoraPacket && th_decode_headerin(&_theoraInfo, &_theoraComment, &_theoraSetup, &_oggPacket) >= 0) { -				// it is theora -				memcpy(&_theoraOut, &test, sizeof(test)); -				_theoraPacket = 1; -			} else if (!_vorbisPacket && vorbis_synthesis_headerin(&_vorbisInfo, &_vorbisComment, &_oggPacket) >= 0) { -				// it is vorbis -				memcpy(&_vorbisOut, &test, sizeof(test)); -				_vorbisPacket = 1; -			} else { -				// whatever it is, we don't care about it -				ogg_stream_clear(&test); -			} -		} -		// fall through to non-bos page parsing -	} - -	// we're expecting more header packets. -	while ((_theoraPacket && _theoraPacket < 3) || (_vorbisPacket && _vorbisPacket < 3)) { -		int ret; - -		// look for further theora headers -		while (_theoraPacket && (_theoraPacket < 3) && (ret = ogg_stream_packetout(&_theoraOut, &_oggPacket))) { -			if (ret < 0) -				error("Error parsing Theora stream headers; corrupt stream?"); - -			if (!th_decode_headerin(&_theoraInfo, &_theoraComment, &_theoraSetup, &_oggPacket)) -				error("Error parsing Theora stream headers; corrupt stream?"); - -			_theoraPacket++; -		} - -		// look for more vorbis header packets -		while (_vorbisPacket && (_vorbisPacket < 3) && (ret = ogg_stream_packetout(&_vorbisOut, &_oggPacket))) { -			if (ret < 0) -				error("Error parsing Vorbis stream headers; corrupt stream?"); - -			if (vorbis_synthesis_headerin(&_vorbisInfo, &_vorbisComment, &_oggPacket)) -				error("Error parsing Vorbis stream headers; corrupt stream?"); - -			_vorbisPacket++; - -			if (_vorbisPacket == 3) -				break; -		} - -		// The header pages/packets will arrive before anything else we -		// care about, or the stream is not obeying spec - -		if (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) { -			queuePage(&_oggPage); // demux into the appropriate stream -		} else { -			ret = bufferData(); // someone needs more data - -			if (ret == 0) -				error("End of file while searching for codec headers."); -		} -	} - -	// and now we have it all.  initialize decoders -	if (_theoraPacket) { -		_theoraDecode = th_decode_alloc(&_theoraInfo, _theoraSetup); -		debugN(1, "Ogg logical stream %lx is Theora %dx%d %.02f fps", -		       _theoraOut.serialno, _theoraInfo.pic_width, _theoraInfo.pic_height, -		       (double)_theoraInfo.fps_numerator / _theoraInfo.fps_denominator); - -		switch (_theoraInfo.pixel_fmt) { -		case TH_PF_420: -			debug(1, " 4:2:0 video"); -			break; -		case TH_PF_422: -			debug(1, " 4:2:2 video"); -			break; -		case TH_PF_444: -			debug(1, " 4:4:4 video"); -			break; -		case TH_PF_RSVD: -		default: -			debug(1, " video\n  (UNKNOWN Chroma sampling!)"); -			break; -		} - -		if (_theoraInfo.pic_width != _theoraInfo.frame_width || _theoraInfo.pic_height != _theoraInfo.frame_height) -			debug(1, "  Frame content is %dx%d with offset (%d,%d).", -			      _theoraInfo.frame_width, _theoraInfo.frame_height, _theoraInfo.pic_x, _theoraInfo.pic_y); - -		switch (_theoraInfo.colorspace){ -		case TH_CS_UNSPECIFIED: -			/* nothing to report */ -			break; -		case TH_CS_ITU_REC_470M: -			debug(1, "  encoder specified ITU Rec 470M (NTSC) color."); -			break; -		case TH_CS_ITU_REC_470BG: -			debug(1, "  encoder specified ITU Rec 470BG (PAL) color."); -			break; -		default: -			debug(1, "warning: encoder specified unknown colorspace (%d).", _theoraInfo.colorspace); -			break; -		} - -		debug(1, "Encoded by %s", _theoraComment.vendor); -		if (_theoraComment.comments) { -			debug(1, "theora comment header:"); -			for (int i = 0; i < _theoraComment.comments; i++) { -				if (_theoraComment.user_comments[i]) { -					int len = _theoraComment.comment_lengths[i]; -					char *value = (char *)malloc(len + 1); -					if (value) { -						memcpy(value, _theoraComment.user_comments[i], len); -						value[len] = '\0'; -						debug(1, "\t%s", value); -						free(value); -					} -				} -			} -		} - -		th_decode_ctl(_theoraDecode, TH_DECCTL_GET_PPLEVEL_MAX, &_ppLevelMax, sizeof(_ppLevelMax)); -		_ppLevel = _ppLevelMax; -		th_decode_ctl(_theoraDecode, TH_DECCTL_SET_PPLEVEL, &_ppLevel, sizeof(_ppLevel)); -		_ppInc = 0; -	} else { -		// tear down the partial theora setup -		th_info_clear(&_theoraInfo); -		th_comment_clear(&_theoraComment); -	} - -	th_setup_free(_theoraSetup); -	_theoraSetup = 0; - -	if (_vorbisPacket) { -		vorbis_synthesis_init(&_vorbisDSP, &_vorbisInfo); -		vorbis_block_init(&_vorbisDSP, &_vorbisBlock); -		debug(3, "Ogg logical stream %lx is Vorbis %d channel %ld Hz audio.", -		      _vorbisOut.serialno, _vorbisInfo.channels, _vorbisInfo.rate); - -		_audStream = Audio::makeQueuingAudioStream(_vorbisInfo.rate, _vorbisInfo.channels); - -		// Get enough audio data to start us off -		while (_audStream->numQueuedStreams() == 0) { -			// Queue more data -			bufferData(); -			while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) -				queuePage(&_oggPage); - -			queueAudio(); -		} - -		if (_audStream) -			g_system->getMixer()->playStream(Audio::Mixer::kPlainSoundType, _audHandle, _audStream, -1, getVolume(), getBalance()); -	} else { -		// tear down the partial vorbis setup -		vorbis_info_clear(&_vorbisInfo); -		vorbis_comment_clear(&_vorbisComment); -		_endOfAudio = true; -	} - -	_surface.create(_theoraInfo.frame_width, _theoraInfo.frame_height, g_system->getScreenFormat()); - -	// Set up a display surface -	_displaySurface.pixels = _surface.getBasePtr(_theoraInfo.pic_x, _theoraInfo.pic_y); -	_displaySurface.w = _theoraInfo.pic_width; -	_displaySurface.h = _theoraInfo.pic_height; -	_displaySurface.format = _surface.format; -	_displaySurface.pitch = _surface.pitch; - -	// Set the frame rate -	_frameRate = Common::Rational(_theoraInfo.fps_numerator, _theoraInfo.fps_denominator); - -	return true; -} - -void TheoraDecoder::close() { -	if (_vorbisPacket) { -		ogg_stream_clear(&_vorbisOut); -		vorbis_block_clear(&_vorbisBlock); -		vorbis_dsp_clear(&_vorbisDSP); -		vorbis_comment_clear(&_vorbisComment); -		vorbis_info_clear(&_vorbisInfo); - -		g_system->getMixer()->stopHandle(*_audHandle); - -		_audStream = 0; -		_vorbisPacket = false; -	} -	if (_theoraPacket) { -		ogg_stream_clear(&_theoraOut); -		th_decode_free(_theoraDecode); -		th_comment_clear(&_theoraComment); -		th_info_clear(&_theoraInfo); -		_theoraDecode = 0; -		_theoraPacket = false; -	} - -	if (!_fileStream) -		return; - -	ogg_sync_clear(&_oggSync); - -	delete _fileStream; -	_fileStream = 0; - -	_surface.free(); -	_displaySurface.pixels = 0; -	_displaySurface.free(); - -	reset(); -} - -const Graphics::Surface *TheoraDecoder::decodeNextFrame() { -	// First, let's get our frame -	while (_theoraPacket) { -		// theora is one in, one out... -		if (ogg_stream_packetout(&_theoraOut, &_oggPacket) > 0) { - -			if (_ppInc) { -				_ppLevel += _ppInc; -				th_decode_ctl(_theoraDecode, TH_DECCTL_SET_PPLEVEL, &_ppLevel, sizeof(_ppLevel)); -				_ppInc = 0; -			} - -			if (th_decode_packetin(_theoraDecode, &_oggPacket, NULL) == 0) { -				_curFrame++; - -				// Convert YUV data to RGB data -				th_ycbcr_buffer yuv; -				th_decode_ycbcr_out(_theoraDecode, yuv); -				translateYUVtoRGBA(yuv); - -				if (_curFrame == 0) -					_startTime = g_system->getMillis(); - -				double time = th_granule_time(_theoraDecode, _oggPacket.granulepos); - -				// We need to calculate when the next frame should be shown -				// This is all in floating point because that's what the Ogg code gives us -				// Ogg is a lossy container format, so it doesn't always list the time to the -				// next frame. In such cases, we need to calculate it ourselves. -				if (time == -1.0) -					_nextFrameStartTime += _frameRate.getInverse().toDouble(); -				else -					_nextFrameStartTime = time; - -				// break out -				break; -			} -		} else { -			// If we can't get any more frames, we're done. -			if (_theoraOut.e_o_s || _fileStream->eos()) { -				_endOfVideo = true; -				break; -			} - -			// Queue more data -			bufferData(); -			while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) -				queuePage(&_oggPage); -		} - -		// Update audio if we can -		queueAudio(); -	} - -	// Force at least some audio to be buffered -	// TODO: 5 is very arbitrary. We probably should do something like QuickTime does. -	while (!_endOfAudio && _audStream->numQueuedStreams() < 5) { -		bufferData(); -		while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) -			queuePage(&_oggPage); - -		bool queuedAudio = queueAudio(); -		if ((_vorbisOut.e_o_s  || _fileStream->eos()) && !queuedAudio) { -			_endOfAudio = true; -			break; -		} -	} - -	return &_displaySurface; -} - -bool TheoraDecoder::queueAudio() { -	if (!_audStream) -		return false; - -	// An audio buffer should have been allocated (either in the constructor or after queuing the current buffer) -	if (!_audiobuf) { -		warning("[TheoraDecoder::queueAudio] Invalid audio buffer"); -		return false; -	} - -	bool queuedAudio = false; - -	for (;;) { -		float **pcm; - -		// if there's pending, decoded audio, grab it -		int ret = vorbis_synthesis_pcmout(&_vorbisDSP, &pcm); -		if (ret > 0) { -			int count = _audiobufFill / 2; -			int maxsamples = ((AUDIOFD_FRAGSIZE - _audiobufFill) / _vorbisInfo.channels) >> 1; -			int i; -			for (i = 0; i < ret && i < maxsamples; i++) -				for (int j = 0; j < _vorbisInfo.channels; j++) { -					int val = CLIP((int)rint(pcm[j][i] * 32767.f), -32768, 32767); -					_audiobuf[count++] = val; -				} - -			vorbis_synthesis_read(&_vorbisDSP, i); -			_audiobufFill += (i * _vorbisInfo.channels) << 1; - -			if (_audiobufFill == AUDIOFD_FRAGSIZE) { -				byte flags = Audio::FLAG_16BITS | Audio::FLAG_STEREO; -#ifdef SCUMM_LITTLE_ENDIAN -				flags |= Audio::FLAG_LITTLE_ENDIAN; -#endif -				_audStream->queueBuffer((byte *)_audiobuf, AUDIOFD_FRAGSIZE, DisposeAfterUse::NO, flags); - -				// The audio mixer is now responsible for the old audio buffer. -				// We need to create a new one. -				_audiobuf = (ogg_int16_t *)malloc(AUDIOFD_FRAGSIZE * sizeof(ogg_int16_t)); -				if (!_audiobuf) { -					warning("[TheoraDecoder::queueAudio] Cannot allocate memory for audio buffer"); -					return false; -				} - -				_audiobufFill = 0; -				queuedAudio = true; -			} -		} else { -			// no pending audio; is there a pending packet to decode? -			if (ogg_stream_packetout(&_vorbisOut, &_oggPacket) > 0) { -				if (vorbis_synthesis(&_vorbisBlock, &_oggPacket) == 0) // test for success! -					vorbis_synthesis_blockin(&_vorbisDSP, &_vorbisBlock); -			} else   // we've buffered all we have, break out for now -				return queuedAudio; -		} -	} - -	// Unreachable -	return false; -} - -void TheoraDecoder::reset() { -	VideoDecoder::reset(); - -	// FIXME: This does a rewind() instead of a reset()! - -	if (_fileStream) -		_fileStream->seek(0); - -	_audiobufFill = 0; -	_audiobufReady = false; - -	_curFrame = -1; - -	_theoraPacket = 0; -	_vorbisPacket = 0; -} - -bool TheoraDecoder::endOfVideo() const { -	return !isVideoLoaded() || (_endOfVideo && (!_audStream || (_audStream->endOfData() && _endOfAudio))); -} - -uint32 TheoraDecoder::getTimeToNextFrame() const { -	if (endOfVideo() || _curFrame < 0) -		return 0; - -	uint32 elapsedTime = getTime(); -	uint32 nextFrameStartTime = (uint32)(_nextFrameStartTime * 1000); - -	if (nextFrameStartTime <= elapsedTime) -		return 0; - -	return nextFrameStartTime - elapsedTime; -} - -uint32 TheoraDecoder::getTime() const { -	if (_audStream) -		return g_system->getMixer()->getSoundElapsedTime(*_audHandle); - -	return VideoDecoder::getTime(); -} - -void TheoraDecoder::pauseVideoIntern(bool pause) { -	if (_audStream) -		g_system->getMixer()->pauseHandle(*_audHandle, pause); -} - -enum TheoraYUVBuffers { -	kBufferY = 0, -	kBufferU = 1, -	kBufferV = 2 -}; - -void TheoraDecoder::translateYUVtoRGBA(th_ycbcr_buffer &YUVBuffer) { -	// Width and height of all buffers have to be divisible by 2. -	assert((YUVBuffer[kBufferY].width & 1)   == 0); -	assert((YUVBuffer[kBufferY].height & 1)  == 0); -	assert((YUVBuffer[kBufferU].width & 1)   == 0); -	assert((YUVBuffer[kBufferV].width & 1)   == 0); - -	// UV images have to have a quarter of the Y image resolution -	assert(YUVBuffer[kBufferU].width  == YUVBuffer[kBufferY].width  >> 1); -	assert(YUVBuffer[kBufferV].width  == YUVBuffer[kBufferY].width  >> 1); -	assert(YUVBuffer[kBufferU].height == YUVBuffer[kBufferY].height >> 1); -	assert(YUVBuffer[kBufferV].height == YUVBuffer[kBufferY].height >> 1); - -	Graphics::convertYUV420ToRGB(&_surface, YUVBuffer[kBufferY].data, YUVBuffer[kBufferU].data, YUVBuffer[kBufferV].data, YUVBuffer[kBufferY].width, YUVBuffer[kBufferY].height, YUVBuffer[kBufferY].stride, YUVBuffer[kBufferU].stride); -} - -void TheoraDecoder::updateVolume() { -	if (g_system->getMixer()->isSoundHandleActive(*_audHandle)) -		g_system->getMixer()->setChannelVolume(*_audHandle, getVolume()); -} - -void TheoraDecoder::updateBalance() { -	if (g_system->getMixer()->isSoundHandleActive(*_audHandle)) -		g_system->getMixer()->setChannelBalance(*_audHandle, getBalance()); -} - -} // End of namespace Sword25 - -#endif diff --git a/engines/sword25/fmv/theora_decoder.h b/engines/sword25/fmv/theora_decoder.h deleted file mode 100644 index 739040024f..0000000000 --- a/engines/sword25/fmv/theora_decoder.h +++ /dev/null @@ -1,144 +0,0 @@ -/* ScummVM - Graphic Adventure Engine - * - * ScummVM is the legal property of its developers, whose names - * are too numerous to list here. Please refer to the COPYRIGHT - * file distributed with this source distribution. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - * - */ - -#ifndef SWORD25_THEORADECODER_H -#define SWORD25_THEORADECODER_H - -#include "common/scummsys.h"	// for USE_THEORADEC - -#ifdef USE_THEORADEC - -#include "common/rational.h" -#include "video/video_decoder.h" -#include "audio/audiostream.h" -#include "audio/mixer.h" -#include "graphics/pixelformat.h" -#include "graphics/surface.h" - -#include <theora/theoradec.h> -#include <vorbis/codec.h> - -namespace Common { -class SeekableReadStream; -} - -namespace Sword25 { - -/** - * - * Decoder for Theora videos. - * Video decoder used in engines: - *  - sword25 - */ -class TheoraDecoder : public Video::VideoDecoder { -public: -	TheoraDecoder(Audio::Mixer::SoundType soundType = Audio::Mixer::kMusicSoundType); -	virtual ~TheoraDecoder(); - -	/** -	 * Load a video file -	 * @param stream  the stream to load -	 */ -	bool loadStream(Common::SeekableReadStream *stream); -	void close(); -	void reset(); - -	/** -	 * Decode the next frame and return the frame's surface -	 * @note the return surface should *not* be freed -	 * @note this may return 0, in which case the last frame should be kept on screen -	 */ -	const Graphics::Surface *decodeNextFrame(); - -	bool isVideoLoaded() const { return _fileStream != 0; } -	uint16 getWidth() const { return _displaySurface.w; } -	uint16 getHeight() const { return _displaySurface.h; } - -	uint32 getFrameCount() const { -		// It is not possible to get frame count easily -		// I.e. seeking is required -		assert(0); -		return 0; -	} - -	Graphics::PixelFormat getPixelFormat() const { return _displaySurface.format; } -	uint32 getTime() const; -	uint32 getTimeToNextFrame() const; - -	bool endOfVideo() const; - -protected: -	// VideoDecoder API -	void updateVolume(); -	void updateBalance(); -	void pauseVideoIntern(bool pause); - -private: -	void queuePage(ogg_page *page); -	bool queueAudio(); -	int bufferData(); -	void translateYUVtoRGBA(th_ycbcr_buffer &YUVBuffer); - -	Common::SeekableReadStream *_fileStream; -	Graphics::Surface _surface; -	Graphics::Surface _displaySurface; -	Common::Rational _frameRate; -	double _nextFrameStartTime; -	bool _endOfVideo; -	bool _endOfAudio; - -	Audio::Mixer::SoundType _soundType; -	Audio::SoundHandle *_audHandle; -	Audio::QueuingAudioStream *_audStream; - -	ogg_sync_state _oggSync; -	ogg_page _oggPage; -	ogg_packet _oggPacket; -	ogg_stream_state _vorbisOut; -	ogg_stream_state _theoraOut; -	th_info _theoraInfo; -	th_comment _theoraComment; -	th_dec_ctx *_theoraDecode; -	th_setup_info *_theoraSetup; -	vorbis_info _vorbisInfo; -	vorbis_dsp_state _vorbisDSP; -	vorbis_block _vorbisBlock; -	vorbis_comment _vorbisComment; - -	int _theoraPacket; -	int _vorbisPacket; - -	int _ppLevelMax; -	int _ppLevel; -	int _ppInc; - -	// single audio fragment audio buffering -	int _audiobufFill; -	bool _audiobufReady; -	ogg_int16_t *_audiobuf; -}; - -} // End of namespace Sword25 - -#endif - -#endif diff --git a/engines/sword25/module.mk b/engines/sword25/module.mk index 302120c500..e24a221244 100644 --- a/engines/sword25/module.mk +++ b/engines/sword25/module.mk @@ -85,11 +85,6 @@ MODULE_OBJS := \  	util/pluto/pluto.o \  	util/pluto/plzio.o -ifdef USE_THEORADEC -MODULE_OBJS += \ -	fmv/theora_decoder.o -endif -  # This module can be built as a plugin  ifeq ($(ENABLE_SWORD25), DYNAMIC_PLUGIN)  PLUGIN := 1 diff --git a/engines/toon/movie.cpp b/engines/toon/movie.cpp index 93e41adf57..8c85e20f7c 100644 --- a/engines/toon/movie.cpp +++ b/engines/toon/movie.cpp @@ -25,6 +25,7 @@  #include "common/keyboard.h"  #include "common/stream.h"  #include "common/system.h" +#include "graphics/palette.h"  #include "graphics/surface.h"  #include "toon/audio.h" @@ -33,6 +34,10 @@  namespace Toon { +ToonstruckSmackerDecoder::ToonstruckSmackerDecoder() : Video::SmackerDecoder() { +	_lowRes = false; +} +  void ToonstruckSmackerDecoder::handleAudioTrack(byte track, uint32 chunkSize, uint32 unpackedSize) {  	debugC(6, kDebugMovie, "handleAudioTrack(%d, %d, %d)", track, chunkSize, unpackedSize); @@ -40,33 +45,21 @@ void ToonstruckSmackerDecoder::handleAudioTrack(byte track, uint32 chunkSize, ui  		/* uint16 width = */ _fileStream->readUint16LE();  		uint16 height = _fileStream->readUint16LE();  		_lowRes = (height == getHeight() / 2); -	} else +	} else {  		Video::SmackerDecoder::handleAudioTrack(track, chunkSize, unpackedSize); +	}  } -bool ToonstruckSmackerDecoder::loadFile(const Common::String &filename) { -	debugC(1, kDebugMovie, "loadFile(%s)", filename.c_str()); +bool ToonstruckSmackerDecoder::loadStream(Common::SeekableReadStream *stream) { +	if (!Video::SmackerDecoder::loadStream(stream)) +		return false;  	_lowRes = false; - -	if (Video::SmackerDecoder::loadFile(filename)) { -		if (_surface->h == 200) { -			if (_surface) { -				_surface->free(); -				delete _surface; -			} -			_surface = new Graphics::Surface(); -			_surface->create(640, 400, Graphics::PixelFormat::createFormatCLUT8()); -			_header.flags = 4; -		} - -		return true; -	} -	return false; +	return true;  } -ToonstruckSmackerDecoder::ToonstruckSmackerDecoder(Audio::Mixer *mixer, Audio::Mixer::SoundType soundType) : Video::SmackerDecoder(mixer, soundType) { -	_lowRes = false; +Video::SmackerDecoder::SmackerVideoTrack *ToonstruckSmackerDecoder::createVideoTrack(uint32 width, uint32 height, uint32 frameCount, const Common::Rational &frameRate, uint32 flags, uint32 signature) const { +	return Video::SmackerDecoder::createVideoTrack(width, height, frameCount, frameRate, (height == 200) ? 4 : flags, signature);  }  // decoder is deallocated with Movie destruction i.e. new ToonstruckSmackerDecoder is needed @@ -103,6 +96,9 @@ void Movie::play(const Common::String &video, int32 flags) {  bool Movie::playVideo(bool isFirstIntroVideo) {  	debugC(1, kDebugMovie, "playVideo(isFirstIntroVideo: %d)", isFirstIntroVideo); + +	_decoder->start(); +  	while (!_vm->shouldQuit() && !_decoder->endOfVideo()) {  		if (_decoder->needsUpdate()) {  			const Graphics::Surface *frame = _decoder->decodeNextFrame(); @@ -131,7 +127,7 @@ bool Movie::playVideo(bool isFirstIntroVideo) {  					}  				}  			} -			_decoder->setSystemPalette(); +			_vm->_system->getPaletteManager()->setPalette(_decoder->getPalette(), 0, 256);  			_vm->_system->updateScreen();  		} diff --git a/engines/toon/movie.h b/engines/toon/movie.h index 2cd33302f2..e795182cba 100644 --- a/engines/toon/movie.h +++ b/engines/toon/movie.h @@ -30,13 +30,17 @@ namespace Toon {  class ToonstruckSmackerDecoder : public Video::SmackerDecoder {  public: -	ToonstruckSmackerDecoder(Audio::Mixer *mixer, Audio::Mixer::SoundType soundType = Audio::Mixer::kSFXSoundType); -	virtual ~ToonstruckSmackerDecoder() {} -	void handleAudioTrack(byte track, uint32 chunkSize, uint32 unpackedSize); -	bool loadFile(const Common::String &filename); +	ToonstruckSmackerDecoder(); + +	bool loadStream(Common::SeekableReadStream *stream);  	bool isLowRes() { return _lowRes; } +  protected: -	bool _lowRes; +	void handleAudioTrack(byte track, uint32 chunkSize, uint32 unpackedSize); +	SmackerVideoTrack *createVideoTrack(uint32 width, uint32 height, uint32 frameCount, const Common::Rational &frameRate, uint32 flags, uint32 signature) const; + +private: +	bool _lowRes;	  };  class Movie { diff --git a/engines/toon/toon.cpp b/engines/toon/toon.cpp index ee427652d8..9fd8415676 100644 --- a/engines/toon/toon.cpp +++ b/engines/toon/toon.cpp @@ -51,7 +51,7 @@ void ToonEngine::init() {  	_currentScriptRegion = 0;  	_resources = new Resources(this);  	_animationManager = new AnimationManager(this); -	_moviePlayer = new Movie(this, new ToonstruckSmackerDecoder(_mixer)); +	_moviePlayer = new Movie(this, new ToonstruckSmackerDecoder());  	_hotspots = new Hotspots(this);  	_mainSurface = new Graphics::Surface(); diff --git a/engines/tucker/sequences.cpp b/engines/tucker/sequences.cpp index 775fd6f1a0..16c4f4f6f0 100644 --- a/engines/tucker/sequences.cpp +++ b/engines/tucker/sequences.cpp @@ -28,6 +28,7 @@  #include "audio/decoders/wave.h"  #include "graphics/palette.h" +#include "graphics/surface.h"  #include "tucker/tucker.h"  #include "tucker/graphics.h" @@ -749,6 +750,7 @@ void AnimationSequencePlayer::openAnimation(int index, const char *fileName) {  		_seqNum = 1;  		return;  	} +	_flicPlayer[index].start();  	_flicPlayer[index].decodeNextFrame();  	if (index == 0) {  		getRGBPalette(index); @@ -801,7 +803,7 @@ void AnimationSequencePlayer::playIntroSeq19_20() {  	if (_flicPlayer[0].getCurFrame() >= 115) {  		surface = _flicPlayer[1].decodeNextFrame();  		if (_flicPlayer[1].endOfVideo()) -			_flicPlayer[1].reset(); +			_flicPlayer[1].rewind();  	}  	bool framesLeft = decodeNextAnimationFrame(0, false); diff --git a/video/avi_decoder.cpp b/video/avi_decoder.cpp index 2ea7e8d90e..09b95d38ad 100644 --- a/video/avi_decoder.cpp +++ b/video/avi_decoder.cpp @@ -42,106 +42,128 @@  namespace Video { -/* +#define UNKNOWN_HEADER(a) error("Unknown header found -- \'%s\'", tag2str(a)) + +// IDs used throughout the AVI files +// that will be handled by this player +#define ID_RIFF MKTAG('R','I','F','F') +#define ID_AVI  MKTAG('A','V','I',' ') +#define ID_LIST MKTAG('L','I','S','T') +#define ID_HDRL MKTAG('h','d','r','l') +#define ID_AVIH MKTAG('a','v','i','h') +#define ID_STRL MKTAG('s','t','r','l') +#define ID_STRH MKTAG('s','t','r','h') +#define ID_VIDS MKTAG('v','i','d','s') +#define ID_AUDS MKTAG('a','u','d','s') +#define ID_MIDS MKTAG('m','i','d','s') +#define ID_TXTS MKTAG('t','x','t','s') +#define ID_JUNK MKTAG('J','U','N','K') +#define ID_STRF MKTAG('s','t','r','f') +#define ID_MOVI MKTAG('m','o','v','i') +#define ID_REC  MKTAG('r','e','c',' ') +#define ID_VEDT MKTAG('v','e','d','t') +#define ID_IDX1 MKTAG('i','d','x','1') +#define ID_STRD MKTAG('s','t','r','d') +#define ID_00AM MKTAG('0','0','A','M') +//#define ID_INFO MKTAG('I','N','F','O') + +// Codec tags +#define ID_RLE  MKTAG('R','L','E',' ') +#define ID_CRAM MKTAG('C','R','A','M') +#define ID_MSVC MKTAG('m','s','v','c') +#define ID_WHAM MKTAG('W','H','A','M') +#define ID_CVID MKTAG('c','v','i','d') +#define ID_IV32 MKTAG('i','v','3','2') +#define ID_DUCK MKTAG('D','U','C','K') +  static byte char2num(char c) { -	return (c >= 48 && c <= 57) ? c - 48 : 0; +	c = tolower((byte)c); +	return (c >= 'a' && c <= 'f') ? c - 'a' + 10 : c - '0';  } -static byte getStreamNum(uint32 tag) { -	return char2num((char)(tag >> 24)) * 16 + char2num((char)(tag >> 16)); +static byte getStreamIndex(uint32 tag) { +	return char2num((tag >> 24) & 0xFF) << 4 | char2num((tag >> 16) & 0xFF);  } -*/  static uint16 getStreamType(uint32 tag) {  	return tag & 0xffff;  } -AviDecoder::AviDecoder(Audio::Mixer *mixer, Audio::Mixer::SoundType soundType) : _mixer(mixer) { -	_soundType = soundType; - -	_videoCodec = NULL; +AVIDecoder::AVIDecoder(Audio::Mixer::SoundType soundType) : _soundType(soundType) {  	_decodedHeader = false; -	_audStream = NULL; -	_fileStream = NULL; -	_audHandle = new Audio::SoundHandle(); -	_dirtyPalette = false; -	memset(_palette, 0, sizeof(_palette)); -	memset(&_wvInfo, 0, sizeof(PCMWAVEFORMAT)); -	memset(&_bmInfo, 0, sizeof(BITMAPINFOHEADER)); -	memset(&_vidsHeader, 0, sizeof(AVIStreamHeader)); -	memset(&_audsHeader, 0, sizeof(AVIStreamHeader)); -	memset(&_ixInfo, 0, sizeof(AVIOLDINDEX)); +	_fileStream = 0; +	memset(&_ixInfo, 0, sizeof(_ixInfo)); +	memset(&_header, 0, sizeof(_header));  } -AviDecoder::~AviDecoder() { +AVIDecoder::~AVIDecoder() {  	close(); -	delete _audHandle;  } -void AviDecoder::runHandle(uint32 tag) { -	assert (_fileStream); +void AVIDecoder::runHandle(uint32 tag) { +	assert(_fileStream);  	if (_fileStream->eos())  		return; -	debug (3, "Decoding tag %s", tag2str(tag)); +	debug(3, "Decoding tag %s", tag2str(tag));  	switch (tag) { -		case ID_RIFF: -			/*_filesize = */_fileStream->readUint32LE(); -			if (_fileStream->readUint32BE() != ID_AVI) -				error("RIFF file is not an AVI video"); -			break; -		case ID_LIST: -			handleList(); -			break; -		case ID_AVIH: -			_header.size = _fileStream->readUint32LE(); -			_header.microSecondsPerFrame = _fileStream->readUint32LE(); -			_header.maxBytesPerSecond = _fileStream->readUint32LE(); -			_header.padding = _fileStream->readUint32LE(); -			_header.flags = _fileStream->readUint32LE(); -			_header.totalFrames = _fileStream->readUint32LE(); -			_header.initialFrames = _fileStream->readUint32LE(); -			_header.streams = _fileStream->readUint32LE(); -			_header.bufferSize = _fileStream->readUint32LE(); -			_header.width = _fileStream->readUint32LE(); -			_header.height = _fileStream->readUint32LE(); -			//Ignore 16 bytes of reserved data -			_fileStream->skip(16); -			break; -		case ID_STRH: -			handleStreamHeader(); -			break; -		case ID_STRD: // Extra stream info, safe to ignore -		case ID_VEDT: // Unknown, safe to ignore -		case ID_JUNK: // Alignment bytes, should be ignored -			{ -			uint32 junkSize = _fileStream->readUint32LE(); -			_fileStream->skip(junkSize + (junkSize & 1)); // Alignment -			} break; -		case ID_IDX1: -			_ixInfo.size = _fileStream->readUint32LE(); -			_ixInfo.indices = new AVIOLDINDEX::Index[_ixInfo.size / 16]; -			debug (0, "%d Indices", (_ixInfo.size / 16)); -			for (uint32 i = 0; i < (_ixInfo.size / 16); i++) { -				_ixInfo.indices[i].id = _fileStream->readUint32BE(); -				_ixInfo.indices[i].flags = _fileStream->readUint32LE(); -				_ixInfo.indices[i].offset = _fileStream->readUint32LE(); -				_ixInfo.indices[i].size = _fileStream->readUint32LE(); -				debug (0, "Index %d == Tag \'%s\', Offset = %d, Size = %d", i, tag2str(_ixInfo.indices[i].id), _ixInfo.indices[i].offset, _ixInfo.indices[i].size); -			} -			break; -		default: -			error ("Unknown tag \'%s\' found", tag2str(tag)); +	case ID_RIFF: +		/*_filesize = */_fileStream->readUint32LE(); +		if (_fileStream->readUint32BE() != ID_AVI) +			error("RIFF file is not an AVI video"); +		break; +	case ID_LIST: +		handleList(); +		break; +	case ID_AVIH: +		_header.size = _fileStream->readUint32LE(); +		_header.microSecondsPerFrame = _fileStream->readUint32LE(); +		_header.maxBytesPerSecond = _fileStream->readUint32LE(); +		_header.padding = _fileStream->readUint32LE(); +		_header.flags = _fileStream->readUint32LE(); +		_header.totalFrames = _fileStream->readUint32LE(); +		_header.initialFrames = _fileStream->readUint32LE(); +		_header.streams = _fileStream->readUint32LE(); +		_header.bufferSize = _fileStream->readUint32LE(); +		_header.width = _fileStream->readUint32LE(); +		_header.height = _fileStream->readUint32LE(); +		// Ignore 16 bytes of reserved data +		_fileStream->skip(16); +		break; +	case ID_STRH: +		handleStreamHeader(); +		break; +	case ID_STRD: // Extra stream info, safe to ignore +	case ID_VEDT: // Unknown, safe to ignore +	case ID_JUNK: // Alignment bytes, should be ignored +		{ +		uint32 junkSize = _fileStream->readUint32LE(); +		_fileStream->skip(junkSize + (junkSize & 1)); // Alignment +		} break; +	case ID_IDX1: +		_ixInfo.size = _fileStream->readUint32LE(); +		_ixInfo.indices = new OldIndex::Index[_ixInfo.size / 16]; +		debug(0, "%d Indices", (_ixInfo.size / 16)); +		for (uint32 i = 0; i < (_ixInfo.size / 16); i++) { +			_ixInfo.indices[i].id = _fileStream->readUint32BE(); +			_ixInfo.indices[i].flags = _fileStream->readUint32LE(); +			_ixInfo.indices[i].offset = _fileStream->readUint32LE(); +			_ixInfo.indices[i].size = _fileStream->readUint32LE(); +			debug(0, "Index %d == Tag \'%s\', Offset = %d, Size = %d", i, tag2str(_ixInfo.indices[i].id), _ixInfo.indices[i].offset, _ixInfo.indices[i].size); +		} +		break; +	default: +		error("Unknown tag \'%s\' found", tag2str(tag));  	}  } -void AviDecoder::handleList() { +void AVIDecoder::handleList() {  	uint32 listSize = _fileStream->readUint32LE() - 4; // Subtract away listType's 4 bytes  	uint32 listType = _fileStream->readUint32BE();  	uint32 curPos = _fileStream->pos(); -	debug (0, "Found LIST of type %s", tag2str(listType)); +	debug(0, "Found LIST of type %s", tag2str(listType));  	while ((_fileStream->pos() - curPos) < listSize)  		runHandle(_fileStream->readUint32BE()); @@ -151,12 +173,14 @@ void AviDecoder::handleList() {  		_decodedHeader = true;  } -void AviDecoder::handleStreamHeader() { +void AVIDecoder::handleStreamHeader() {  	AVIStreamHeader sHeader;  	sHeader.size = _fileStream->readUint32LE();  	sHeader.streamType = _fileStream->readUint32BE(); +  	if (sHeader.streamType == ID_MIDS || sHeader.streamType == ID_TXTS) -		error ("Unhandled MIDI/Text stream"); +		error("Unhandled MIDI/Text stream"); +  	sHeader.streamHandler = _fileStream->readUint32BE();  	sHeader.flags = _fileStream->readUint32LE();  	sHeader.priority = _fileStream->readUint16LE(); @@ -174,63 +198,67 @@ void AviDecoder::handleStreamHeader() {  	if (_fileStream->readUint32BE() != ID_STRF)  		error("Could not find STRF tag"); +  	uint32 strfSize = _fileStream->readUint32LE();  	uint32 startPos = _fileStream->pos();  	if (sHeader.streamType == ID_VIDS) { -		_vidsHeader = sHeader; - -		_bmInfo.size = _fileStream->readUint32LE(); -		_bmInfo.width = _fileStream->readUint32LE(); -		assert (_header.width == _bmInfo.width); -		_bmInfo.height = _fileStream->readUint32LE(); -		assert (_header.height == _bmInfo.height); -		_bmInfo.planes = _fileStream->readUint16LE(); -		_bmInfo.bitCount = _fileStream->readUint16LE(); -		_bmInfo.compression = _fileStream->readUint32BE(); -		_bmInfo.sizeImage = _fileStream->readUint32LE(); -		_bmInfo.xPelsPerMeter = _fileStream->readUint32LE(); -		_bmInfo.yPelsPerMeter = _fileStream->readUint32LE(); -		_bmInfo.clrUsed = _fileStream->readUint32LE(); -		_bmInfo.clrImportant = _fileStream->readUint32LE(); - -		if (_bmInfo.bitCount == 8) { -			if (_bmInfo.clrUsed == 0) -				_bmInfo.clrUsed = 256; - -			for (uint32 i = 0; i < _bmInfo.clrUsed; i++) { -				_palette[i * 3 + 2] = _fileStream->readByte(); -				_palette[i * 3 + 1] = _fileStream->readByte(); -				_palette[i * 3] = _fileStream->readByte(); +		BitmapInfoHeader bmInfo; +		bmInfo.size = _fileStream->readUint32LE(); +		bmInfo.width = _fileStream->readUint32LE(); +		bmInfo.height = _fileStream->readUint32LE(); +		bmInfo.planes = _fileStream->readUint16LE(); +		bmInfo.bitCount = _fileStream->readUint16LE(); +		bmInfo.compression = _fileStream->readUint32BE(); +		bmInfo.sizeImage = _fileStream->readUint32LE(); +		bmInfo.xPelsPerMeter = _fileStream->readUint32LE(); +		bmInfo.yPelsPerMeter = _fileStream->readUint32LE(); +		bmInfo.clrUsed = _fileStream->readUint32LE(); +		bmInfo.clrImportant = _fileStream->readUint32LE(); + +		if (bmInfo.clrUsed == 0) +			bmInfo.clrUsed = 256; + +		if (sHeader.streamHandler == 0) +			sHeader.streamHandler = bmInfo.compression; + +		AVIVideoTrack *track = new AVIVideoTrack(_header.totalFrames, sHeader, bmInfo); + +		if (bmInfo.bitCount == 8) { +			byte *palette = const_cast<byte *>(track->getPalette()); +			for (uint32 i = 0; i < bmInfo.clrUsed; i++) { +				palette[i * 3 + 2] = _fileStream->readByte(); +				palette[i * 3 + 1] = _fileStream->readByte(); +				palette[i * 3] = _fileStream->readByte();  				_fileStream->readByte();  			} -			_dirtyPalette = true; +			track->markPaletteDirty();  		} -		if (!_vidsHeader.streamHandler) -			_vidsHeader.streamHandler = _bmInfo.compression; +		addTrack(track);  	} else if (sHeader.streamType == ID_AUDS) { -		_audsHeader = sHeader; - -		_wvInfo.tag = _fileStream->readUint16LE(); -		_wvInfo.channels = _fileStream->readUint16LE(); -		_wvInfo.samplesPerSec = _fileStream->readUint32LE(); -		_wvInfo.avgBytesPerSec = _fileStream->readUint32LE(); -		_wvInfo.blockAlign = _fileStream->readUint16LE(); -		_wvInfo.size = _fileStream->readUint16LE(); +		PCMWaveFormat wvInfo; +		wvInfo.tag = _fileStream->readUint16LE(); +		wvInfo.channels = _fileStream->readUint16LE(); +		wvInfo.samplesPerSec = _fileStream->readUint32LE(); +		wvInfo.avgBytesPerSec = _fileStream->readUint32LE(); +		wvInfo.blockAlign = _fileStream->readUint16LE(); +		wvInfo.size = _fileStream->readUint16LE();  		// AVI seems to treat the sampleSize as including the second  		// channel as well, so divide for our sake. -		if (_wvInfo.channels == 2) -			_audsHeader.sampleSize /= 2; +		if (wvInfo.channels == 2) +			sHeader.sampleSize /= 2; + +		addTrack(new AVIAudioTrack(sHeader, wvInfo, _soundType));  	}  	// Ensure that we're at the end of the chunk  	_fileStream->seek(startPos + strfSize);  } -bool AviDecoder::loadStream(Common::SeekableReadStream *stream) { +bool AVIDecoder::loadStream(Common::SeekableReadStream *stream) {  	close();  	_fileStream = stream; @@ -252,74 +280,31 @@ bool AviDecoder::loadStream(Common::SeekableReadStream *stream) {  	if (nextTag == ID_LIST) {  		_fileStream->readUint32BE(); // Skip size  		if (_fileStream->readUint32BE() != ID_MOVI) -			error ("Expected 'movi' LIST"); -	} else -		error ("Expected 'movi' LIST"); - -	// Now, create the codec -	_videoCodec = createCodec(); - -	// Initialize the video stuff too -	_audStream = createAudioStream(); -	if (_audStream) -		_mixer->playStream(_soundType, _audHandle, _audStream, -1, getVolume(), getBalance()); - -	debug (0, "Frames = %d, Dimensions = %d x %d", _header.totalFrames, _header.width, _header.height); -	debug (0, "Frame Rate = %d", _vidsHeader.rate / _vidsHeader.scale); -	if (_wvInfo.samplesPerSec != 0) -		debug (0, "Sound Rate = %d", _wvInfo.samplesPerSec); -	debug (0, "Video Codec = \'%s\'", tag2str(_vidsHeader.streamHandler)); - -	if (!_videoCodec) -		return false; +			error("Expected 'movi' LIST"); +	} else { +		error("Expected 'movi' LIST"); +	}  	return true;  } -void AviDecoder::close() { -	if (!_fileStream) -		return; +void AVIDecoder::close() { +	VideoDecoder::close();  	delete _fileStream;  	_fileStream = 0; - -	// Deinitialize sound -	_mixer->stopHandle(*_audHandle); -	_audStream = 0; -  	_decodedHeader = false; -	delete _videoCodec; -	_videoCodec = 0; -  	delete[] _ixInfo.indices; -	_ixInfo.indices = 0; - -	memset(_palette, 0, sizeof(_palette)); -	memset(&_wvInfo, 0, sizeof(PCMWAVEFORMAT)); -	memset(&_bmInfo, 0, sizeof(BITMAPINFOHEADER)); -	memset(&_vidsHeader, 0, sizeof(AVIStreamHeader)); -	memset(&_audsHeader, 0, sizeof(AVIStreamHeader)); -	memset(&_ixInfo, 0, sizeof(AVIOLDINDEX)); - -	reset(); -} - -uint32 AviDecoder::getTime() const { -	if (_audStream) -		return _mixer->getSoundElapsedTime(*_audHandle); - -	return FixedRateVideoDecoder::getTime(); +	memset(&_ixInfo, 0, sizeof(_ixInfo)); +	memset(&_header, 0, sizeof(_header));  } -const Graphics::Surface *AviDecoder::decodeNextFrame() { +void AVIDecoder::readNextPacket() {  	uint32 nextTag = _fileStream->readUint32BE();  	if (_fileStream->eos()) -		return NULL; - -	if (_curFrame == -1) -		_startTime = g_system->getMillis(); +		return;  	if (nextTag == ID_LIST) {  		// A list of audio/video chunks @@ -327,138 +312,159 @@ const Graphics::Surface *AviDecoder::decodeNextFrame() {  		int32 startPos = _fileStream->pos();  		if (_fileStream->readUint32BE() != ID_REC) -			error ("Expected 'rec ' LIST"); - -		// Decode chunks in the list and see if we get a frame -		const Graphics::Surface *frame = NULL; -		while (_fileStream->pos() < startPos + (int32)listSize) { -			const Graphics::Surface *temp = decodeNextFrame(); -			if (temp) -				frame = temp; -		} +			error("Expected 'rec ' LIST"); -		return frame; -	} else if (getStreamType(nextTag) == 'wb') { -		// Audio Chunk -		uint32 chunkSize = _fileStream->readUint32LE(); -		queueAudioBuffer(chunkSize); -		_fileStream->skip(chunkSize & 1); // Alignment -	} else if (getStreamType(nextTag) == 'dc' || getStreamType(nextTag) == 'id' || -	           getStreamType(nextTag) == 'AM' || getStreamType(nextTag) == '32' || -			   getStreamType(nextTag) == 'iv') { -		// Compressed Frame -		_curFrame++; -		uint32 chunkSize = _fileStream->readUint32LE(); - -		if (chunkSize == 0) // Keep last frame on screen -			return NULL; - -		Common::SeekableReadStream *frameData = _fileStream->readStream(chunkSize); -		const Graphics::Surface *surface = _videoCodec->decodeImage(frameData); -		delete frameData; -		_fileStream->skip(chunkSize & 1); // Alignment -		return surface; -	} else if (getStreamType(nextTag) == 'pc') { -		// Palette Change -		_fileStream->readUint32LE(); // Chunk size, not needed here -		byte firstEntry = _fileStream->readByte(); -		uint16 numEntries = _fileStream->readByte(); -		_fileStream->readUint16LE(); // Reserved - -		// 0 entries means all colors are going to be changed -		if (numEntries == 0) -			numEntries = 256; - -		for (uint16 i = firstEntry; i < numEntries + firstEntry; i++) { -			_palette[i * 3] = _fileStream->readByte(); -			_palette[i * 3 + 1] = _fileStream->readByte(); -			_palette[i * 3 + 2] = _fileStream->readByte(); -			_fileStream->readByte(); // Flags that don't serve us any purpose -		} +		// Decode chunks in the list +		while (_fileStream->pos() < startPos + (int32)listSize) +			readNextPacket(); -		_dirtyPalette = true; +		return; +	} else if (nextTag == ID_JUNK || nextTag == ID_IDX1) { +		runHandle(nextTag); +		return; +	} -		// No alignment necessary. It's always even. -	} else if (nextTag == ID_JUNK) { -		runHandle(ID_JUNK); -	} else if (nextTag == ID_IDX1) { -		runHandle(ID_IDX1); -	} else -		error("Tag = \'%s\', %d", tag2str(nextTag), _fileStream->pos()); +	Track *track = getTrack(getStreamIndex(nextTag)); -	return NULL; -} +	if (!track) +		error("Cannot get track from tag '%s'", tag2str(nextTag)); -Codec *AviDecoder::createCodec() { -	switch (_vidsHeader.streamHandler) { -		case ID_CRAM: -		case ID_MSVC: -		case ID_WHAM: -			return new MSVideo1Decoder(_bmInfo.width, _bmInfo.height, _bmInfo.bitCount); -		case ID_RLE: -			return new MSRLEDecoder(_bmInfo.width, _bmInfo.height, _bmInfo.bitCount); -		case ID_CVID: -			return new CinepakDecoder(_bmInfo.bitCount); -		case ID_IV32: -			return new Indeo3Decoder(_bmInfo.width, _bmInfo.height); -#ifdef VIDEO_CODECS_TRUEMOTION1_H -		case ID_DUCK: -			return new TrueMotion1Decoder(_bmInfo.width, _bmInfo.height); -#endif -		default: -			warning ("Unknown/Unhandled compression format \'%s\'", tag2str(_vidsHeader.streamHandler)); +	uint32 chunkSize = _fileStream->readUint32LE(); +	Common::SeekableReadStream *chunk = _fileStream->readStream(chunkSize); +	_fileStream->skip(chunkSize & 1); + +	if (track->getTrackType() == Track::kTrackTypeAudio) { +		if (getStreamType(nextTag) != MKTAG16('w', 'b')) +			error("Invalid audio track tag '%s'", tag2str(nextTag)); + +		((AVIAudioTrack *)track)->queueSound(chunk); +	} else { +		AVIVideoTrack *videoTrack = (AVIVideoTrack *)track; + +		if (getStreamType(nextTag) == MKTAG16('p', 'c')) { +			// Palette Change +			byte firstEntry = chunk->readByte(); +			uint16 numEntries = chunk->readByte(); +			chunk->readUint16LE(); // Reserved + +			// 0 entries means all colors are going to be changed +			if (numEntries == 0) +				numEntries = 256; + +			byte *palette = const_cast<byte *>(videoTrack->getPalette()); + +			for (uint16 i = firstEntry; i < numEntries + firstEntry; i++) { +				palette[i * 3] = chunk->readByte(); +				palette[i * 3 + 1] = chunk->readByte(); +				palette[i * 3 + 2] = chunk->readByte(); +				chunk->readByte(); // Flags that don't serve us any purpose +			} + +			delete chunk; +			videoTrack->markPaletteDirty(); +		} else if (getStreamType(nextTag) == MKTAG16('d', 'b')) { +			// TODO: Check if this really is uncompressed. Many videos +			// falsely put compressed data in here. +			error("Uncompressed AVI frame found"); +		} else { +			// Otherwise, assume it's a compressed frame +			videoTrack->decodeFrame(chunk); +		}  	} +} -	return NULL; +AVIDecoder::AVIVideoTrack::AVIVideoTrack(int frameCount, const AVIStreamHeader &streamHeader, const BitmapInfoHeader &bitmapInfoHeader)  +		: _frameCount(frameCount), _vidsHeader(streamHeader), _bmInfo(bitmapInfoHeader) { +	memset(_palette, 0, sizeof(_palette)); +	_videoCodec = createCodec(); +	_dirtyPalette = false; +	_lastFrame = 0; +	_curFrame = -1;  } -Graphics::PixelFormat AviDecoder::getPixelFormat() const { -	assert(_videoCodec); -	return _videoCodec->getPixelFormat(); +AVIDecoder::AVIVideoTrack::~AVIVideoTrack() { +	delete _videoCodec;  } -Audio::QueuingAudioStream *AviDecoder::createAudioStream() { -	if (_wvInfo.tag == kWaveFormatPCM || _wvInfo.tag == kWaveFormatDK3) -		return Audio::makeQueuingAudioStream(_wvInfo.samplesPerSec, _wvInfo.channels == 2); -	else if (_wvInfo.tag != kWaveFormatNone) // No sound -		warning("Unsupported AVI audio format %d", _wvInfo.tag); +void AVIDecoder::AVIVideoTrack::decodeFrame(Common::SeekableReadStream *stream) { +	if (_videoCodec) +		_lastFrame = _videoCodec->decodeImage(stream); -	return NULL; +	delete stream; +	_curFrame++;  } -void AviDecoder::queueAudioBuffer(uint32 chunkSize) { -	// Return if we haven't created the queue (unsupported audio format) -	if (!_audStream) { -		_fileStream->skip(chunkSize); -		return; +Graphics::PixelFormat AVIDecoder::AVIVideoTrack::getPixelFormat() const { +	if (_videoCodec) +		return _videoCodec->getPixelFormat(); + +	return Graphics::PixelFormat(); +} + +Codec *AVIDecoder::AVIVideoTrack::createCodec() { +	switch (_vidsHeader.streamHandler) { +	case ID_CRAM: +	case ID_MSVC: +	case ID_WHAM: +		return new MSVideo1Decoder(_bmInfo.width, _bmInfo.height, _bmInfo.bitCount); +	case ID_RLE: +		return new MSRLEDecoder(_bmInfo.width, _bmInfo.height, _bmInfo.bitCount); +	case ID_CVID: +		return new CinepakDecoder(_bmInfo.bitCount); +	case ID_IV32: +		return new Indeo3Decoder(_bmInfo.width, _bmInfo.height); +#ifdef VIDEO_CODECS_TRUEMOTION1_H +	case ID_DUCK: +		return new TrueMotion1Decoder(_bmInfo.width, _bmInfo.height); +#endif +	default: +		warning("Unknown/Unhandled compression format \'%s\'", tag2str(_vidsHeader.streamHandler));  	} -	Common::SeekableReadStream *stream = _fileStream->readStream(chunkSize); +	return 0; +} -	if (_wvInfo.tag == kWaveFormatPCM) { -		byte flags = 0; -		if (_audsHeader.sampleSize == 2) -			flags |= Audio::FLAG_16BITS | Audio::FLAG_LITTLE_ENDIAN; -		else -			flags |= Audio::FLAG_UNSIGNED; +AVIDecoder::AVIAudioTrack::AVIAudioTrack(const AVIStreamHeader &streamHeader, const PCMWaveFormat &waveFormat, Audio::Mixer::SoundType soundType) +		: _audsHeader(streamHeader), _wvInfo(waveFormat), _soundType(soundType) { +	_audStream = createAudioStream(); +} -		if (_wvInfo.channels == 2) -			flags |= Audio::FLAG_STEREO; +AVIDecoder::AVIAudioTrack::~AVIAudioTrack() { +	delete _audStream; +} -		_audStream->queueAudioStream(Audio::makeRawStream(stream, _wvInfo.samplesPerSec, flags, DisposeAfterUse::YES), DisposeAfterUse::YES); -	} else if (_wvInfo.tag == kWaveFormatDK3) { -		_audStream->queueAudioStream(Audio::makeADPCMStream(stream, DisposeAfterUse::YES, chunkSize, Audio::kADPCMDK3, _wvInfo.samplesPerSec, _wvInfo.channels, _wvInfo.blockAlign), DisposeAfterUse::YES); +void AVIDecoder::AVIAudioTrack::queueSound(Common::SeekableReadStream *stream) { +	if (_audStream) { +		if (_wvInfo.tag == kWaveFormatPCM) { +			byte flags = 0; +			if (_audsHeader.sampleSize == 2) +				flags |= Audio::FLAG_16BITS | Audio::FLAG_LITTLE_ENDIAN; +			else +				flags |= Audio::FLAG_UNSIGNED; + +			if (_wvInfo.channels == 2) +				flags |= Audio::FLAG_STEREO; + +			_audStream->queueAudioStream(Audio::makeRawStream(stream, _wvInfo.samplesPerSec, flags, DisposeAfterUse::YES), DisposeAfterUse::YES); +		} else if (_wvInfo.tag == kWaveFormatDK3) { +			_audStream->queueAudioStream(Audio::makeADPCMStream(stream, DisposeAfterUse::YES, stream->size(), Audio::kADPCMDK3, _wvInfo.samplesPerSec, _wvInfo.channels, _wvInfo.blockAlign), DisposeAfterUse::YES); +		} +	} else { +		delete stream;  	}  } -void AviDecoder::updateVolume() { -	if (g_system->getMixer()->isSoundHandleActive(*_audHandle)) -		g_system->getMixer()->setChannelVolume(*_audHandle, getVolume()); +Audio::AudioStream *AVIDecoder::AVIAudioTrack::getAudioStream() const { +	return _audStream;  } -void AviDecoder::updateBalance() { -	if (g_system->getMixer()->isSoundHandleActive(*_audHandle)) -		g_system->getMixer()->setChannelBalance(*_audHandle, getBalance()); +Audio::QueuingAudioStream *AVIDecoder::AVIAudioTrack::createAudioStream() { +	if (_wvInfo.tag == kWaveFormatPCM || _wvInfo.tag == kWaveFormatDK3) +		return Audio::makeQueuingAudioStream(_wvInfo.samplesPerSec, _wvInfo.channels == 2); +	else if (_wvInfo.tag != kWaveFormatNone) // No sound +		warning("Unsupported AVI audio format %d", _wvInfo.tag); + +	return 0;  }  } // End of namespace Video diff --git a/video/avi_decoder.h b/video/avi_decoder.h index fb4dae6711..a3a262db36 100644 --- a/video/avi_decoder.h +++ b/video/avi_decoder.h @@ -47,196 +47,183 @@ namespace Video {  class Codec; -#define UNKNOWN_HEADER(a) error("Unknown header found -- \'%s\'", tag2str(a)) - -// IDs used throughout the AVI files -// that will be handled by this player -#define ID_RIFF MKTAG('R','I','F','F') -#define ID_AVI  MKTAG('A','V','I',' ') -#define ID_LIST MKTAG('L','I','S','T') -#define ID_HDRL MKTAG('h','d','r','l') -#define ID_AVIH MKTAG('a','v','i','h') -#define ID_STRL MKTAG('s','t','r','l') -#define ID_STRH MKTAG('s','t','r','h') -#define ID_VIDS MKTAG('v','i','d','s') -#define ID_AUDS MKTAG('a','u','d','s') -#define ID_MIDS MKTAG('m','i','d','s') -#define ID_TXTS MKTAG('t','x','t','s') -#define ID_JUNK MKTAG('J','U','N','K') -#define ID_STRF MKTAG('s','t','r','f') -#define ID_MOVI MKTAG('m','o','v','i') -#define ID_REC  MKTAG('r','e','c',' ') -#define ID_VEDT MKTAG('v','e','d','t') -#define ID_IDX1 MKTAG('i','d','x','1') -#define ID_STRD MKTAG('s','t','r','d') -#define ID_00AM MKTAG('0','0','A','M') -//#define ID_INFO MKTAG('I','N','F','O') - -// Codec tags -#define ID_RLE  MKTAG('R','L','E',' ') -#define ID_CRAM MKTAG('C','R','A','M') -#define ID_MSVC MKTAG('m','s','v','c') -#define ID_WHAM MKTAG('W','H','A','M') -#define ID_CVID MKTAG('c','v','i','d') -#define ID_IV32 MKTAG('i','v','3','2') -#define ID_DUCK MKTAG('D','U','C','K') - -struct BITMAPINFOHEADER { -	uint32 size; -	uint32 width; -	uint32 height; -	uint16 planes; -	uint16 bitCount; -	uint32 compression; -	uint32 sizeImage; -	uint32 xPelsPerMeter; -	uint32 yPelsPerMeter; -	uint32 clrUsed; -	uint32 clrImportant; -}; - -struct WAVEFORMAT { -	uint16 tag; -	uint16 channels; -	uint32 samplesPerSec; -	uint32 avgBytesPerSec; -	uint16 blockAlign; -}; - -struct PCMWAVEFORMAT : public WAVEFORMAT { -	uint16 size; -}; - -struct WAVEFORMATEX : public WAVEFORMAT { -	uint16 bitsPerSample; -	uint16 size; -}; - -struct AVIOLDINDEX { -	uint32 size; -	struct Index { -		uint32 id; -		uint32 flags; -		uint32 offset; -		uint32 size; -	} *indices; -}; - -// Index Flags -enum IndexFlags { -	AVIIF_INDEX = 0x10 -}; - -// Audio Codecs -enum { -	kWaveFormatNone = 0, -	kWaveFormatPCM = 1, -	kWaveFormatDK3 = 98 -}; - -struct AVIHeader { -	uint32 size; -	uint32 microSecondsPerFrame; -	uint32 maxBytesPerSecond; -	uint32 padding; -	uint32 flags; -	uint32 totalFrames; -	uint32 initialFrames; -	uint32 streams; -	uint32 bufferSize; -	uint32 width; -	uint32 height; -}; - -// Flags from the AVIHeader -enum AviFlags { -	AVIF_HASINDEX = 0x00000010, -	AVIF_MUSTUSEINDEX = 0x00000020, -	AVIF_ISINTERLEAVED = 0x00000100, -	AVIF_TRUSTCKTYPE = 0x00000800, -	AVIF_WASCAPTUREFILE = 0x00010000, -	AVIF_WASCOPYRIGHTED = 0x00020000 -}; - -struct AVIStreamHeader { -	uint32 size; -	uint32 streamType; -	uint32 streamHandler; -	uint32 flags; -	uint16 priority; -	uint16 language; -	uint32 initialFrames; -	uint32 scale; -	uint32 rate; -	uint32 start; -	uint32 length; -	uint32 bufferSize; -	uint32 quality; -	uint32 sampleSize; -	Common::Rect frame; -}; -  /**   * Decoder for AVI videos.   *   * Video decoder used in engines:   *  - sci   */ -class AviDecoder : public FixedRateVideoDecoder { +class AVIDecoder : public VideoDecoder {  public: -	AviDecoder(Audio::Mixer *mixer, -			Audio::Mixer::SoundType soundType = Audio::Mixer::kPlainSoundType); -	virtual ~AviDecoder(); +	AVIDecoder(Audio::Mixer::SoundType soundType = Audio::Mixer::kPlainSoundType); +	virtual ~AVIDecoder();  	bool loadStream(Common::SeekableReadStream *stream);  	void close(); - -	bool isVideoLoaded() const { return _fileStream != 0; }  	uint16 getWidth() const { return _header.width; }  	uint16 getHeight() const { return _header.height; } -	uint32 getFrameCount() const { return _header.totalFrames; } -	uint32 getTime() const; -	const Graphics::Surface *decodeNextFrame(); -	Graphics::PixelFormat getPixelFormat() const; -	const byte *getPalette() { _dirtyPalette = false; return _palette; } -	bool hasDirtyPalette() const { return _dirtyPalette; }  protected: -	// VideoDecoder API -	void updateVolume(); -	void updateBalance(); - -	// FixedRateVideoDecoder API -	Common::Rational getFrameRate() const { return Common::Rational(_vidsHeader.rate, _vidsHeader.scale); } +	 void readNextPacket();  private: -	Audio::Mixer *_mixer; -	BITMAPINFOHEADER _bmInfo; -	PCMWAVEFORMAT _wvInfo; -	AVIOLDINDEX _ixInfo; -	AVIHeader _header; -	AVIStreamHeader _vidsHeader; -	AVIStreamHeader _audsHeader; -	byte _palette[3 * 256]; -	bool _dirtyPalette; +	struct BitmapInfoHeader { +		uint32 size; +		uint32 width; +		uint32 height; +		uint16 planes; +		uint16 bitCount; +		uint32 compression; +		uint32 sizeImage; +		uint32 xPelsPerMeter; +		uint32 yPelsPerMeter; +		uint32 clrUsed; +		uint32 clrImportant; +	}; + +	struct WaveFormat { +		uint16 tag; +		uint16 channels; +		uint32 samplesPerSec; +		uint32 avgBytesPerSec; +		uint16 blockAlign; +	}; + +	struct PCMWaveFormat : public WaveFormat { +		uint16 size; +	}; + +	struct WaveFormatEX : public WaveFormat { +		uint16 bitsPerSample; +		uint16 size; +	}; + +	struct OldIndex { +		uint32 size; +		struct Index { +			uint32 id; +			uint32 flags; +			uint32 offset; +			uint32 size; +		} *indices; +	}; + +	// Index Flags +	enum IndexFlags { +		AVIIF_INDEX = 0x10 +	}; + +	struct AVIHeader { +		uint32 size; +		uint32 microSecondsPerFrame; +		uint32 maxBytesPerSecond; +		uint32 padding; +		uint32 flags; +		uint32 totalFrames; +		uint32 initialFrames; +		uint32 streams; +		uint32 bufferSize; +		uint32 width; +		uint32 height; +	}; + +	// Flags from the AVIHeader +	enum AVIFlags { +		AVIF_HASINDEX = 0x00000010, +		AVIF_MUSTUSEINDEX = 0x00000020, +		AVIF_ISINTERLEAVED = 0x00000100, +		AVIF_TRUSTCKTYPE = 0x00000800, +		AVIF_WASCAPTUREFILE = 0x00010000, +		AVIF_WASCOPYRIGHTED = 0x00020000 +	}; + +	struct AVIStreamHeader { +		uint32 size; +		uint32 streamType; +		uint32 streamHandler; +		uint32 flags; +		uint16 priority; +		uint16 language; +		uint32 initialFrames; +		uint32 scale; +		uint32 rate; +		uint32 start; +		uint32 length; +		uint32 bufferSize; +		uint32 quality; +		uint32 sampleSize; +		Common::Rect frame; +	}; + +	class AVIVideoTrack : public FixedRateVideoTrack { +	public: +		AVIVideoTrack(int frameCount, const AVIStreamHeader &streamHeader, const BitmapInfoHeader &bitmapInfoHeader); +		~AVIVideoTrack(); + +		void decodeFrame(Common::SeekableReadStream *stream); + +		uint16 getWidth() const { return _bmInfo.width; } +		uint16 getHeight() const { return _bmInfo.height; } +		Graphics::PixelFormat getPixelFormat() const; +		int getCurFrame() const { return _curFrame; } +		int getFrameCount() const { return _frameCount; } +		const Graphics::Surface *decodeNextFrame() { return _lastFrame; } +		const byte *getPalette() const { _dirtyPalette = false; return _palette; } +		bool hasDirtyPalette() const { return _dirtyPalette; } +		void markPaletteDirty() { _dirtyPalette = true; } + +	protected: +		Common::Rational getFrameRate() const { return Common::Rational(_vidsHeader.rate, _vidsHeader.scale); } + +	private: +		AVIStreamHeader _vidsHeader; +		BitmapInfoHeader _bmInfo; +		byte _palette[3 * 256]; +		mutable bool _dirtyPalette; +		int _frameCount, _curFrame; + +		Codec *_videoCodec; +		const Graphics::Surface *_lastFrame; +		Codec *createCodec(); +	}; + +	class AVIAudioTrack : public AudioTrack { +	public: +		AVIAudioTrack(const AVIStreamHeader &streamHeader, const PCMWaveFormat &waveFormat, Audio::Mixer::SoundType soundType); +		~AVIAudioTrack(); + +		void queueSound(Common::SeekableReadStream *stream); +		Audio::Mixer::SoundType getSoundType() const { return _soundType; } + +	protected: +		Audio::AudioStream *getAudioStream() const; + +	private: +		// Audio Codecs +		enum { +			kWaveFormatNone = 0, +			kWaveFormatPCM = 1, +			kWaveFormatDK3 = 98 +		}; + +		AVIStreamHeader _audsHeader; +		PCMWaveFormat _wvInfo; +		Audio::Mixer::SoundType _soundType; +		Audio::QueuingAudioStream *_audStream; +		Audio::QueuingAudioStream *createAudioStream(); +	}; + +	OldIndex _ixInfo; +	AVIHeader _header;	  	Common::SeekableReadStream *_fileStream;  	bool _decodedHeader; -	Codec *_videoCodec; -	Codec *createCodec(); -  	Audio::Mixer::SoundType _soundType;  	void runHandle(uint32 tag);  	void handleList();  	void handleStreamHeader(); -	void handlePalChange(); - -	Audio::SoundHandle *_audHandle; -	Audio::QueuingAudioStream *_audStream; -	Audio::QueuingAudioStream *createAudioStream(); -	void queueAudioBuffer(uint32 chunkSize);  };  } // End of namespace Video diff --git a/video/bink_decoder.cpp b/video/bink_decoder.cpp index 538487f067..620316806f 100644 --- a/video/bink_decoder.cpp +++ b/video/bink_decoder.cpp @@ -24,6 +24,7 @@  // based quite heavily on the Bink decoder found in FFmpeg.  // Many thanks to Kostya Shishkov for doing the hard work. +#include "audio/audiostream.h"  #include "audio/decoders/raw.h"  #include "common/util.h" @@ -60,139 +61,108 @@ static const uint32 kDCStartBits = 11;  namespace Video { -BinkDecoder::VideoFrame::VideoFrame() : bits(0) { -} - -BinkDecoder::VideoFrame::~VideoFrame() { -	delete bits; +BinkDecoder::BinkDecoder() { +	_bink = 0;  } - -BinkDecoder::AudioTrack::AudioTrack() : bits(0), bands(0), rdft(0), dct(0) { +BinkDecoder::~BinkDecoder() { +	close();  } -BinkDecoder::AudioTrack::~AudioTrack() { -	delete bits; - -	delete[] bands; - -	delete rdft; -	delete dct; -} +bool BinkDecoder::loadStream(Common::SeekableReadStream *stream) { +	close(); +	uint32 id = stream->readUint32BE(); +	if ((id != kBIKfID) && (id != kBIKgID) && (id != kBIKhID) && (id != kBIKiID)) +		return false; -BinkDecoder::BinkDecoder() { -	_bink = 0; -	_audioTrack = 0; +	uint32 fileSize         = stream->readUint32LE() + 8; +	uint32 frameCount       = stream->readUint32LE(); +	uint32 largestFrameSize = stream->readUint32LE(); -	for (int i = 0; i < 16; i++) -		_huffman[i] = 0; +	if (largestFrameSize > fileSize) { +		warning("Largest frame size greater than file size"); +		return false; +	} -	for (int i = 0; i < kSourceMAX; i++) { -		_bundles[i].countLength = 0; +	stream->skip(4); -		_bundles[i].huffman.index = 0; -		for (int j = 0; j < 16; j++) -			_bundles[i].huffman.symbols[j] = j; +	uint32 width  = stream->readUint32LE(); +	uint32 height = stream->readUint32LE(); -		_bundles[i].data     = 0; -		_bundles[i].dataEnd  = 0; -		_bundles[i].curDec   = 0; -		_bundles[i].curPtr   = 0; +	uint32 frameRateNum = stream->readUint32LE(); +	uint32 frameRateDen = stream->readUint32LE(); +	if (frameRateNum == 0 || frameRateDen == 0) { +		warning("Invalid frame rate (%d/%d)", frameRateNum, frameRateDen); +		return false;  	} -	for (int i = 0; i < 16; i++) { -		_colHighHuffman[i].index = 0; -		for (int j = 0; j < 16; j++) -			_colHighHuffman[i].symbols[j] = j; -	} +	_bink = stream; -	for (int i = 0; i < 4; i++) { -		_curPlanes[i] = 0; -		_oldPlanes[i] = 0; -	} +	uint32 videoFlags = _bink->readUint32LE(); -	_audioStream = 0; -} +	// BIKh and BIKi swap the chroma planes +	addTrack(new BinkVideoTrack(width, height, getDefaultHighColorFormat(), frameCount, +			Common::Rational(frameRateNum, frameRateDen), (id == kBIKhID || id == kBIKiID), videoFlags & kVideoFlagAlpha, id)); -void BinkDecoder::startAudio() { -	if (_audioTrack < _audioTracks.size()) { -		const AudioTrack &audio = _audioTracks[_audioTrack]; +	uint32 audioTrackCount = _bink->readUint32LE(); -		_audioStream = Audio::makeQueuingAudioStream(audio.outSampleRate, audio.outChannels == 2); -		g_system->getMixer()->playStream(Audio::Mixer::kPlainSoundType, &_audioHandle, _audioStream, -1, getVolume(), getBalance()); -	} // else no audio -} +	if (audioTrackCount > 0) { +		_audioTracks.reserve(audioTrackCount); -void BinkDecoder::stopAudio() { -	if (_audioStream) { -		g_system->getMixer()->stopHandle(_audioHandle); -		_audioStream = 0; -	} -} +		_bink->skip(4 * audioTrackCount); -BinkDecoder::~BinkDecoder() { -	close(); -} +		// Reading audio track properties +		for (uint32 i = 0; i < audioTrackCount; i++) { +			AudioInfo track; -void BinkDecoder::close() { -	reset(); +			track.sampleRate = _bink->readUint16LE(); +			track.flags      = _bink->readUint16LE(); -	// Stop audio -	stopAudio(); +			_audioTracks.push_back(track); -	for (int i = 0; i < 4; i++) { -		delete[] _curPlanes[i]; _curPlanes[i] = 0; -		delete[] _oldPlanes[i]; _oldPlanes[i] = 0; +			initAudioTrack(_audioTracks[i]); +		} + +		_bink->skip(4 * audioTrackCount);  	} -	deinitBundles(); +	// Reading video frame properties +	_frames.resize(frameCount); +	for (uint32 i = 0; i < frameCount; i++) { +		_frames[i].offset   = _bink->readUint32LE(); +		_frames[i].keyFrame = _frames[i].offset & 1; -	for (int i = 0; i < 16; i++) { -		delete _huffman[i]; -		_huffman[i] = 0; -	} +		_frames[i].offset &= ~1; -	delete _bink; _bink = 0; -	_surface.free(); +		if (i != 0) +			_frames[i - 1].size = _frames[i].offset - _frames[i - 1].offset; -	_audioTrack = 0; +		_frames[i].bits = 0; +	} -	for (int i = 0; i < kSourceMAX; i++) { -		_bundles[i].countLength = 0; +	_frames[frameCount - 1].size = _bink->size() - _frames[frameCount - 1].offset; -		_bundles[i].huffman.index = 0; -		for (int j = 0; j < 16; j++) -			_bundles[i].huffman.symbols[j] = j; +	return true; +} -		_bundles[i].data     = 0; -		_bundles[i].dataEnd  = 0; -		_bundles[i].curDec   = 0; -		_bundles[i].curPtr   = 0; -	} +void BinkDecoder::close() { +	VideoDecoder::close(); -	for (int i = 0; i < 16; i++) { -		_colHighHuffman[i].index = 0; -		for (int j = 0; j < 16; j++) -			_colHighHuffman[i].symbols[j] = j; -	} +	delete _bink; +	_bink = 0;  	_audioTracks.clear();  	_frames.clear();  } -uint32 BinkDecoder::getTime() const { -	if (_audioStream && g_system->getMixer()->isSoundHandleActive(_audioHandle)) -		return g_system->getMixer()->getSoundElapsedTime(_audioHandle) + _audioStartOffset; +void BinkDecoder::readNextPacket() { +	BinkVideoTrack *videoTrack = (BinkVideoTrack *)getTrack(0); -	return g_system->getMillis() - _startTime; -} - -const Graphics::Surface *BinkDecoder::decodeNextFrame() { -	if (endOfVideo()) -		return 0; +	if (videoTrack->endOfTrack()) +		return; -	VideoFrame &frame = _frames[_curFrame + 1]; +	VideoFrame &frame = _frames[videoTrack->getCurFrame() + 1];  	if (!_bink->seek(frame.offset))  		error("Bad bink seek"); @@ -200,7 +170,7 @@ const Graphics::Surface *BinkDecoder::decodeNextFrame() {  	uint32 frameSize = frame.size;  	for (uint32 i = 0; i < _audioTracks.size(); i++) { -		AudioTrack &audio = _audioTracks[i]; +		AudioInfo &audio = _audioTracks[i];  		uint32 audioPacketLength = _bink->readUint32LE(); @@ -210,24 +180,21 @@ const Graphics::Surface *BinkDecoder::decodeNextFrame() {  			error("Audio packet too big for the frame");  		if (audioPacketLength >= 4) { +			// Get our track - audio index plus one as the first track is video +			BinkAudioTrack *audioTrack = (BinkAudioTrack *)getTrack(i + 1);  			uint32 audioPacketStart = _bink->pos();  			uint32 audioPacketEnd   = _bink->pos() + audioPacketLength; -			if (i == _audioTrack) { -				// Only play one audio track - -				//                  Number of samples in bytes -				audio.sampleCount = _bink->readUint32LE() / (2 * audio.channels); +			//                  Number of samples in bytes +			audio.sampleCount = _bink->readUint32LE() / (2 * audio.channels); -				audio.bits = -					new Common::BitStream32LELSB(new Common::SeekableSubReadStream(_bink, -					    audioPacketStart + 4, audioPacketEnd), true); +			audio.bits = new Common::BitStream32LELSB(new Common::SeekableSubReadStream(_bink, +					audioPacketStart + 4, audioPacketEnd), true); -				audioPacket(audio); +			audioTrack->decodePacket(); -				delete audio.bits; -				audio.bits = 0; -			} +			delete audio.bits; +			audio.bits = 0;  			_bink->seek(audioPacketEnd); @@ -238,67 +205,125 @@ const Graphics::Surface *BinkDecoder::decodeNextFrame() {  	uint32 videoPacketStart = _bink->pos();  	uint32 videoPacketEnd   = _bink->pos() + frameSize; -	frame.bits = -		new Common::BitStream32LELSB(new Common::SeekableSubReadStream(_bink, -		    videoPacketStart, videoPacketEnd), true); +	frame.bits = new Common::BitStream32LELSB(new Common::SeekableSubReadStream(_bink, +			videoPacketStart, videoPacketEnd), true); -	videoPacket(frame); +	videoTrack->decodePacket(frame);  	delete frame.bits;  	frame.bits = 0; +} -	_curFrame++; -	if (_curFrame == 0) -		_startTime = g_system->getMillis(); +BinkDecoder::VideoFrame::VideoFrame() : bits(0) { +} -	return &_surface; +BinkDecoder::VideoFrame::~VideoFrame() { +	delete bits;  } -void BinkDecoder::audioPacket(AudioTrack &audio) { -	if (!_audioStream) -		return; -	int outSize = audio.frameLen * audio.channels; -	while (audio.bits->pos() < audio.bits->size()) { -		int16 *out = (int16 *)malloc(outSize * 2); -		memset(out, 0, outSize * 2); +BinkDecoder::AudioInfo::AudioInfo() : bits(0), bands(0), rdft(0), dct(0) { +} -		audioBlock(audio, out); +BinkDecoder::AudioInfo::~AudioInfo() { +	delete bits; -		byte flags = Audio::FLAG_16BITS; -		if (audio.outChannels == 2) -			flags |= Audio::FLAG_STEREO; +	delete[] bands; -#ifdef SCUMM_LITTLE_ENDIAN -		flags |= Audio::FLAG_LITTLE_ENDIAN; -#endif +	delete rdft; +	delete dct; +} + +BinkDecoder::BinkVideoTrack::BinkVideoTrack(uint32 width, uint32 height, const Graphics::PixelFormat &format, uint32 frameCount, const Common::Rational &frameRate, bool swapPlanes, bool hasAlpha, uint32 id) : +		_frameCount(frameCount), _frameRate(frameRate), _swapPlanes(swapPlanes), _hasAlpha(hasAlpha), _id(id) { +	_curFrame = -1;	 + +	for (int i = 0; i < 16; i++) +		_huffman[i] = 0; + +	for (int i = 0; i < kSourceMAX; i++) { +		_bundles[i].countLength = 0; + +		_bundles[i].huffman.index = 0; +		for (int j = 0; j < 16; j++) +			_bundles[i].huffman.symbols[j] = j; + +		_bundles[i].data     = 0; +		_bundles[i].dataEnd  = 0; +		_bundles[i].curDec   = 0; +		_bundles[i].curPtr   = 0; +	} + +	for (int i = 0; i < 16; i++) { +		_colHighHuffman[i].index = 0; +		for (int j = 0; j < 16; j++) +			_colHighHuffman[i].symbols[j] = j; +	} + +	_surface.create(width, height, format); + +	// Give the planes a bit extra space +	width  = _surface.w + 32; +	height = _surface.h + 32; + +	_curPlanes[0] = new byte[ width       *  height      ]; // Y +	_curPlanes[1] = new byte[(width >> 1) * (height >> 1)]; // U, 1/4 resolution +	_curPlanes[2] = new byte[(width >> 1) * (height >> 1)]; // V, 1/4 resolution +	_curPlanes[3] = new byte[ width       *  height      ]; // A +	_oldPlanes[0] = new byte[ width       *  height      ]; // Y +	_oldPlanes[1] = new byte[(width >> 1) * (height >> 1)]; // U, 1/4 resolution +	_oldPlanes[2] = new byte[(width >> 1) * (height >> 1)]; // V, 1/4 resolution +	_oldPlanes[3] = new byte[ width       *  height      ]; // A + +	// Initialize the video with solid black +	memset(_curPlanes[0],   0,  width       *  height      ); +	memset(_curPlanes[1],   0, (width >> 1) * (height >> 1)); +	memset(_curPlanes[2],   0, (width >> 1) * (height >> 1)); +	memset(_curPlanes[3], 255,  width       *  height      ); +	memset(_oldPlanes[0],   0,  width       *  height      ); +	memset(_oldPlanes[1],   0, (width >> 1) * (height >> 1)); +	memset(_oldPlanes[2],   0, (width >> 1) * (height >> 1)); +	memset(_oldPlanes[3], 255,  width       *  height      ); -		_audioStream->queueBuffer((byte *)out, audio.blockSize * 2, DisposeAfterUse::YES, flags); +	initBundles(); +	initHuffman(); +} -		if (audio.bits->pos() & 0x1F) // next data block starts at a 32-byte boundary -			audio.bits->skip(32 - (audio.bits->pos() & 0x1F)); +BinkDecoder::BinkVideoTrack::~BinkVideoTrack() { +	for (int i = 0; i < 4; i++) { +		delete[] _curPlanes[i]; _curPlanes[i] = 0; +		delete[] _oldPlanes[i]; _oldPlanes[i] = 0;  	} + +	deinitBundles(); + +	for (int i = 0; i < 16; i++) { +		delete _huffman[i]; +		_huffman[i] = 0; +	} + +	_surface.free();  } -void BinkDecoder::videoPacket(VideoFrame &video) { -	assert(video.bits); +void BinkDecoder::BinkVideoTrack::decodePacket(VideoFrame &frame) { +	assert(frame.bits);  	if (_hasAlpha) {  		if (_id == kBIKiID) -			video.bits->skip(32); +			frame.bits->skip(32); -		decodePlane(video, 3, false); +		decodePlane(frame, 3, false);  	}  	if (_id == kBIKiID) -		video.bits->skip(32); +		frame.bits->skip(32);  	for (int i = 0; i < 3; i++) {  		int planeIdx = ((i == 0) || !_swapPlanes) ? i : (i ^ 3); -		decodePlane(video, planeIdx, i != 0); +		decodePlane(frame, planeIdx, i != 0); -		if (video.bits->pos() >= video.bits->size()) +		if (frame.bits->pos() >= frame.bits->size())  			break;  	} @@ -311,10 +336,11 @@ void BinkDecoder::videoPacket(VideoFrame &video) {  	// And swap the planes with the reference planes  	for (int i = 0; i < 4; i++)  		SWAP(_curPlanes[i], _oldPlanes[i]); -} -void BinkDecoder::decodePlane(VideoFrame &video, int planeIdx, bool isChroma) { +	_curFrame++; +} +void BinkDecoder::BinkVideoTrack::decodePlane(VideoFrame &video, int planeIdx, bool isChroma) {  	uint32 blockWidth  = isChroma ? ((_surface.w  + 15) >> 4) : ((_surface.w  + 7) >> 3);  	uint32 blockHeight = isChroma ? ((_surface.h + 15) >> 4) : ((_surface.h + 7) >> 3);  	uint32 width       = isChroma ?  (_surface.w        >> 1) :   _surface.w; @@ -371,48 +397,38 @@ void BinkDecoder::decodePlane(VideoFrame &video, int planeIdx, bool isChroma) {  			}  			switch (blockType) { -				case kBlockSkip: -					blockSkip(ctx); -					break; - -				case kBlockScaled: -					blockScaled(ctx); -					break; - -				case kBlockMotion: -					blockMotion(ctx); -					break; - -				case kBlockRun: -					blockRun(ctx); -					break; - -				case kBlockResidue: -					blockResidue(ctx); -					break; - -				case kBlockIntra: -					blockIntra(ctx); -					break; - -				case kBlockFill: -					blockFill(ctx); -					break; - -				case kBlockInter: -					blockInter(ctx); -					break; - -				case kBlockPattern: -					blockPattern(ctx); -					break; - -				case kBlockRaw: -					blockRaw(ctx); -					break; - -				default: -					error("Unknown block type: %d", blockType); +			case kBlockSkip: +				blockSkip(ctx); +				break; +			case kBlockScaled: +				blockScaled(ctx); +				break; +			case kBlockMotion: +				blockMotion(ctx); +				break; +			case kBlockRun: +				blockRun(ctx); +				break; +			case kBlockResidue: +				blockResidue(ctx); +				break; +			case kBlockIntra: +				blockIntra(ctx); +				break; +			case kBlockFill: +				blockFill(ctx); +				break; +			case kBlockInter: +				blockInter(ctx); +				break; +			case kBlockPattern: +				blockPattern(ctx); +				break; +			case kBlockRaw: +				blockRaw(ctx); +				break; +			default: +				error("Unknown block type: %d", blockType);  			}  		} @@ -424,7 +440,7 @@ void BinkDecoder::decodePlane(VideoFrame &video, int planeIdx, bool isChroma) {  } -void BinkDecoder::readBundle(VideoFrame &video, Source source) { +void BinkDecoder::BinkVideoTrack::readBundle(VideoFrame &video, Source source) {  	if (source == kSourceColors) {  		for (int i = 0; i < 16; i++)  			readHuffman(video, _colHighHuffman[i]); @@ -439,12 +455,11 @@ void BinkDecoder::readBundle(VideoFrame &video, Source source) {  	_bundles[source].curPtr = _bundles[source].data;  } -void BinkDecoder::readHuffman(VideoFrame &video, Huffman &huffman) { +void BinkDecoder::BinkVideoTrack::readHuffman(VideoFrame &video, Huffman &huffman) {  	huffman.index = video.bits->getBits(4);  	if (huffman.index == 0) {  		// The first tree always gives raw nibbles -  		for (int i = 0; i < 16; i++)  			huffman.symbols[i] = i; @@ -455,7 +470,6 @@ void BinkDecoder::readHuffman(VideoFrame &video, Huffman &huffman) {  	if (video.bits->getBit()) {  		// Symbol selection -  		memset(hasSymbol, 0, 16);  		uint8 length = video.bits->getBits(3); @@ -493,9 +507,9 @@ void BinkDecoder::readHuffman(VideoFrame &video, Huffman &huffman) {  	memcpy(huffman.symbols, in, 16);  } -void BinkDecoder::mergeHuffmanSymbols(VideoFrame &video, byte *dst, const byte *src, int size) { +void BinkDecoder::BinkVideoTrack::mergeHuffmanSymbols(VideoFrame &video, byte *dst, const byte *src, int size) {  	const byte *src2  = src + size; -	int         size2 = size; +	int size2 = size;  	do {  		if (!video.bits->getBit()) { @@ -510,197 +524,12 @@ void BinkDecoder::mergeHuffmanSymbols(VideoFrame &video, byte *dst, const byte *  	while (size--)  		*dst++ = *src++; +  	while (size2--)  		*dst++ = *src2++;  } -bool BinkDecoder::loadStream(Common::SeekableReadStream *stream) { -	Graphics::PixelFormat format = g_system->getScreenFormat(); -	return loadStream(stream, format); -} - -bool BinkDecoder::loadStream(Common::SeekableReadStream *stream, const Graphics::PixelFormat &format) { -	close(); - -	_id = stream->readUint32BE(); -	if ((_id != kBIKfID) && (_id != kBIKgID) && (_id != kBIKhID) && (_id != kBIKiID)) -		return false; - -	uint32 fileSize         = stream->readUint32LE() + 8; -	uint32 frameCount       = stream->readUint32LE(); -	uint32 largestFrameSize = stream->readUint32LE(); - -	if (largestFrameSize > fileSize) { -		warning("Largest frame size greater than file size"); -		return false; -	} - -	stream->skip(4); - -	uint32 width  = stream->readUint32LE(); -	uint32 height = stream->readUint32LE(); - -	uint32 frameRateNum = stream->readUint32LE(); -	uint32 frameRateDen = stream->readUint32LE(); -	if (frameRateNum == 0 || frameRateDen == 0) { -		warning("Invalid frame rate (%d/%d)", frameRateNum, frameRateDen); -		return false; -	} - -	_frameRate = Common::Rational(frameRateNum, frameRateDen); -	_bink = stream; - -	_videoFlags = _bink->readUint32LE(); - -	uint32 audioTrackCount = _bink->readUint32LE(); - -	if (audioTrackCount > 1) { -		warning("More than one audio track found. Using the first one"); - -		_audioTrack = 0; -	} - -	if (audioTrackCount > 0) { -		_audioTracks.reserve(audioTrackCount); - -		_bink->skip(4 * audioTrackCount); - -		// Reading audio track properties -		for (uint32 i = 0; i < audioTrackCount; i++) { -			AudioTrack track; - -			track.sampleRate = _bink->readUint16LE(); -			track.flags      = _bink->readUint16LE(); - -			_audioTracks.push_back(track); - -			initAudioTrack(_audioTracks[i]); -		} - -		_bink->skip(4 * audioTrackCount); -	} - -	// Reading video frame properties -	_frames.resize(frameCount); -	for (uint32 i = 0; i < frameCount; i++) { -		_frames[i].offset   = _bink->readUint32LE(); -		_frames[i].keyFrame = _frames[i].offset & 1; - -		_frames[i].offset &= ~1; - -		if (i != 0) -			_frames[i - 1].size = _frames[i].offset - _frames[i - 1].offset; - -		_frames[i].bits = 0; -	} - -	_frames[frameCount - 1].size = _bink->size() - _frames[frameCount - 1].offset; - -	_hasAlpha   = _videoFlags & kVideoFlagAlpha; -	_swapPlanes = (_id == kBIKhID) || (_id == kBIKiID); // BIKh and BIKi swap the chroma planes - -	_surface.create(width, height, format); - -	// Give the planes a bit extra space -	width  = _surface.w  + 32; -	height = _surface.h + 32; - -	_curPlanes[0] = new byte[ width       *  height      ]; // Y -	_curPlanes[1] = new byte[(width >> 1) * (height >> 1)]; // U, 1/4 resolution -	_curPlanes[2] = new byte[(width >> 1) * (height >> 1)]; // V, 1/4 resolution -	_curPlanes[3] = new byte[ width       *  height      ]; // A -	_oldPlanes[0] = new byte[ width       *  height      ]; // Y -	_oldPlanes[1] = new byte[(width >> 1) * (height >> 1)]; // U, 1/4 resolution -	_oldPlanes[2] = new byte[(width >> 1) * (height >> 1)]; // V, 1/4 resolution -	_oldPlanes[3] = new byte[ width       *  height      ]; // A - -	// Initialize the video with solid black -	memset(_curPlanes[0],   0,  width       *  height      ); -	memset(_curPlanes[1],   0, (width >> 1) * (height >> 1)); -	memset(_curPlanes[2],   0, (width >> 1) * (height >> 1)); -	memset(_curPlanes[3], 255,  width       *  height      ); -	memset(_oldPlanes[0],   0,  width       *  height      ); -	memset(_oldPlanes[1],   0, (width >> 1) * (height >> 1)); -	memset(_oldPlanes[2],   0, (width >> 1) * (height >> 1)); -	memset(_oldPlanes[3], 255,  width       *  height      ); - -	initBundles(); -	initHuffman(); - -	startAudio(); -	_audioStartOffset = 0; - -	return true; -} - -void BinkDecoder::initAudioTrack(AudioTrack &audio) { -	audio.sampleCount = 0; -	audio.bits        = 0; - -	audio.channels = ((audio.flags & kAudioFlagStereo) != 0) ? 2 : 1; -	audio.codec    = ((audio.flags & kAudioFlagDCT   ) != 0) ? kAudioCodecDCT : kAudioCodecRDFT; - -	if (audio.channels > kAudioChannelsMax) -		error("Too many audio channels: %d", audio.channels); - -	uint32 frameLenBits; -	// Calculate frame length -	if     (audio.sampleRate < 22050) -		frameLenBits =  9; -	else if(audio.sampleRate < 44100) -		frameLenBits = 10; -	else -		frameLenBits = 11; - -	audio.frameLen = 1 << frameLenBits; - -	audio.outSampleRate = audio.sampleRate; -	audio.outChannels   = audio.channels; - -	if (audio.codec  == kAudioCodecRDFT) { -		// RDFT audio already interleaves the samples correctly - -		if (audio.channels == 2) -			frameLenBits++; - -		audio.sampleRate *= audio.channels; -		audio.frameLen   *= audio.channels; -		audio.channels    = 1; -	} - -	audio.overlapLen = audio.frameLen / 16; -	audio.blockSize  = (audio.frameLen - audio.overlapLen) * audio.channels; -	audio.root       = 2.0 / sqrt((double)audio.frameLen); - -	uint32 sampleRateHalf = (audio.sampleRate + 1) / 2; - -	// Calculate number of bands -	for (audio.bandCount = 1; audio.bandCount < 25; audio.bandCount++) -		if (sampleRateHalf <= binkCriticalFreqs[audio.bandCount - 1]) -			break; - -	audio.bands = new uint32[audio.bandCount + 1]; - -	// Populate bands -	audio.bands[0] = 1; -	for (uint32 i = 1; i < audio.bandCount; i++) -		audio.bands[i] = binkCriticalFreqs[i - 1] * (audio.frameLen / 2) / sampleRateHalf; -	audio.bands[audio.bandCount] = audio.frameLen / 2; - -	audio.first = true; - -	for (uint8 i = 0; i < audio.channels; i++) -		audio.coeffsPtr[i] = audio.coeffs + i * audio.frameLen; - -	audio.codec = ((audio.flags & kAudioFlagDCT) != 0) ? kAudioCodecDCT : kAudioCodecRDFT; - -	if      (audio.codec == kAudioCodecRDFT) -		audio.rdft = new Common::RDFT(frameLenBits, Common::RDFT::DFT_C2R); -	else if (audio.codec == kAudioCodecDCT) -		audio.dct  = new Common::DCT(frameLenBits, Common::DCT::DCT_III); -} - -void BinkDecoder::initBundles() { +void BinkDecoder::BinkVideoTrack::initBundles() {  	uint32 bw     = (_surface.w  + 7) >> 3;  	uint32 bh     = (_surface.h + 7) >> 3;  	uint32 blocks = bw * bh; @@ -729,21 +558,21 @@ void BinkDecoder::initBundles() {  	}  } -void BinkDecoder::deinitBundles() { +void BinkDecoder::BinkVideoTrack::deinitBundles() {  	for (int i = 0; i < kSourceMAX; i++)  		delete[] _bundles[i].data;  } -void BinkDecoder::initHuffman() { +void BinkDecoder::BinkVideoTrack::initHuffman() {  	for (int i = 0; i < 16; i++)  		_huffman[i] = new Common::Huffman(binkHuffmanLengths[i][15], 16, binkHuffmanCodes[i], binkHuffmanLengths[i]);  } -byte BinkDecoder::getHuffmanSymbol(VideoFrame &video, Huffman &huffman) { +byte BinkDecoder::BinkVideoTrack::getHuffmanSymbol(VideoFrame &video, Huffman &huffman) {  	return huffman.symbols[_huffman[huffman.index]->getSymbol(*video.bits)];  } -int32 BinkDecoder::getBundleValue(Source source) { +int32 BinkDecoder::BinkVideoTrack::getBundleValue(Source source) {  	if ((source < kSourceXOff) || (source == kSourceRun))  		return *_bundles[source].curPtr++; @@ -757,7 +586,7 @@ int32 BinkDecoder::getBundleValue(Source source) {  	return ret;  } -uint32 BinkDecoder::readBundleCount(VideoFrame &video, Bundle &bundle) { +uint32 BinkDecoder::BinkVideoTrack::readBundleCount(VideoFrame &video, Bundle &bundle) {  	if (!bundle.curDec || (bundle.curDec > bundle.curPtr))  		return 0; @@ -768,7 +597,7 @@ uint32 BinkDecoder::readBundleCount(VideoFrame &video, Bundle &bundle) {  	return n;  } -void BinkDecoder::blockSkip(DecodeContext &ctx) { +void BinkDecoder::BinkVideoTrack::blockSkip(DecodeContext &ctx) {  	byte *dest = ctx.dest;  	byte *prev = ctx.prev; @@ -776,7 +605,7 @@ void BinkDecoder::blockSkip(DecodeContext &ctx) {  		memcpy(dest, prev, 8);  } -void BinkDecoder::blockScaledSkip(DecodeContext &ctx) { +void BinkDecoder::BinkVideoTrack::blockScaledSkip(DecodeContext &ctx) {  	byte *dest = ctx.dest;  	byte *prev = ctx.prev; @@ -784,7 +613,7 @@ void BinkDecoder::blockScaledSkip(DecodeContext &ctx) {  		memcpy(dest, prev, 16);  } -void BinkDecoder::blockScaledRun(DecodeContext &ctx) { +void BinkDecoder::BinkVideoTrack::blockScaledRun(DecodeContext &ctx) {  	const uint8 *scan = binkPatterns[ctx.video->bits->getBits(4)];  	int i = 0; @@ -820,7 +649,7 @@ void BinkDecoder::blockScaledRun(DecodeContext &ctx) {  		ctx.dest[ctx.coordScaledMap4[*scan]] = getBundleValue(kSourceColors);  } -void BinkDecoder::blockScaledIntra(DecodeContext &ctx) { +void BinkDecoder::BinkVideoTrack::blockScaledIntra(DecodeContext &ctx) {  	int16 block[64];  	memset(block, 0, 64 * sizeof(int16)); @@ -841,7 +670,7 @@ void BinkDecoder::blockScaledIntra(DecodeContext &ctx) {  	}  } -void BinkDecoder::blockScaledFill(DecodeContext &ctx) { +void BinkDecoder::BinkVideoTrack::blockScaledFill(DecodeContext &ctx) {  	byte v = getBundleValue(kSourceColors);  	byte *dest = ctx.dest; @@ -849,7 +678,7 @@ void BinkDecoder::blockScaledFill(DecodeContext &ctx) {  		memset(dest, v, 16);  } -void BinkDecoder::blockScaledPattern(DecodeContext &ctx) { +void BinkDecoder::BinkVideoTrack::blockScaledPattern(DecodeContext &ctx) {  	byte col[2];  	for (int i = 0; i < 2; i++) @@ -865,7 +694,7 @@ void BinkDecoder::blockScaledPattern(DecodeContext &ctx) {  	}  } -void BinkDecoder::blockScaledRaw(DecodeContext &ctx) { +void BinkDecoder::BinkVideoTrack::blockScaledRaw(DecodeContext &ctx) {  	byte row[8];  	byte *dest1 = ctx.dest; @@ -880,32 +709,27 @@ void BinkDecoder::blockScaledRaw(DecodeContext &ctx) {  	}  } -void BinkDecoder::blockScaled(DecodeContext &ctx) { +void BinkDecoder::BinkVideoTrack::blockScaled(DecodeContext &ctx) {  	BlockType blockType = (BlockType) getBundleValue(kSourceSubBlockTypes);  	switch (blockType) { -		case kBlockRun: -			blockScaledRun(ctx); -			break; - -		case kBlockIntra: -			blockScaledIntra(ctx); -			break; - -		case kBlockFill: -			blockScaledFill(ctx); -			break; - -		case kBlockPattern: -			blockScaledPattern(ctx); -			break; - -		case kBlockRaw: -			blockScaledRaw(ctx); -			break; - -		default: -			error("Invalid 16x16 block type: %d", blockType); +	case kBlockRun: +		blockScaledRun(ctx); +		break; +	case kBlockIntra: +		blockScaledIntra(ctx); +		break; +	case kBlockFill: +		blockScaledFill(ctx); +		break; +	case kBlockPattern: +		blockScaledPattern(ctx); +		break; +	case kBlockRaw: +		blockScaledRaw(ctx); +		break; +	default: +		error("Invalid 16x16 block type: %d", blockType);  	}  	ctx.blockX += 1; @@ -913,7 +737,7 @@ void BinkDecoder::blockScaled(DecodeContext &ctx) {  	ctx.prev   += 8;  } -void BinkDecoder::blockMotion(DecodeContext &ctx) { +void BinkDecoder::BinkVideoTrack::blockMotion(DecodeContext &ctx) {  	int8 xOff = getBundleValue(kSourceXOff);  	int8 yOff = getBundleValue(kSourceYOff); @@ -926,7 +750,7 @@ void BinkDecoder::blockMotion(DecodeContext &ctx) {  		memcpy(dest, prev, 8);  } -void BinkDecoder::blockRun(DecodeContext &ctx) { +void BinkDecoder::BinkVideoTrack::blockRun(DecodeContext &ctx) {  	const uint8 *scan = binkPatterns[ctx.video->bits->getBits(4)];  	int i = 0; @@ -953,7 +777,7 @@ void BinkDecoder::blockRun(DecodeContext &ctx) {  		ctx.dest[ctx.coordMap[*scan++]] = getBundleValue(kSourceColors);  } -void BinkDecoder::blockResidue(DecodeContext &ctx) { +void BinkDecoder::BinkVideoTrack::blockResidue(DecodeContext &ctx) {  	blockMotion(ctx);  	byte v = ctx.video->bits->getBits(7); @@ -970,7 +794,7 @@ void BinkDecoder::blockResidue(DecodeContext &ctx) {  			dst[j] += src[j];  } -void BinkDecoder::blockIntra(DecodeContext &ctx) { +void BinkDecoder::BinkVideoTrack::blockIntra(DecodeContext &ctx) {  	int16 block[64];  	memset(block, 0, 64 * sizeof(int16)); @@ -981,7 +805,7 @@ void BinkDecoder::blockIntra(DecodeContext &ctx) {  	IDCTPut(ctx, block);  } -void BinkDecoder::blockFill(DecodeContext &ctx) { +void BinkDecoder::BinkVideoTrack::blockFill(DecodeContext &ctx) {  	byte v = getBundleValue(kSourceColors);  	byte *dest = ctx.dest; @@ -989,7 +813,7 @@ void BinkDecoder::blockFill(DecodeContext &ctx) {  		memset(dest, v, 8);  } -void BinkDecoder::blockInter(DecodeContext &ctx) { +void BinkDecoder::BinkVideoTrack::blockInter(DecodeContext &ctx) {  	blockMotion(ctx);  	int16 block[64]; @@ -1002,7 +826,7 @@ void BinkDecoder::blockInter(DecodeContext &ctx) {  	IDCTAdd(ctx, block);  } -void BinkDecoder::blockPattern(DecodeContext &ctx) { +void BinkDecoder::BinkVideoTrack::blockPattern(DecodeContext &ctx) {  	byte col[2];  	for (int i = 0; i < 2; i++) @@ -1017,7 +841,7 @@ void BinkDecoder::blockPattern(DecodeContext &ctx) {  	}  } -void BinkDecoder::blockRaw(DecodeContext &ctx) { +void BinkDecoder::BinkVideoTrack::blockRaw(DecodeContext &ctx) {  	byte *dest = ctx.dest;  	byte *data = _bundles[kSourceColors].curPtr;  	for (int i = 0; i < 8; i++, dest += ctx.pitch, data += 8) @@ -1026,7 +850,7 @@ void BinkDecoder::blockRaw(DecodeContext &ctx) {  	_bundles[kSourceColors].curPtr += 64;  } -void BinkDecoder::readRuns(VideoFrame &video, Bundle &bundle) { +void BinkDecoder::BinkVideoTrack::readRuns(VideoFrame &video, Bundle &bundle) {  	uint32 n = readBundleCount(video, bundle);  	if (n == 0)  		return; @@ -1046,7 +870,7 @@ void BinkDecoder::readRuns(VideoFrame &video, Bundle &bundle) {  			*bundle.curDec++ = getHuffmanSymbol(video, bundle.huffman);  } -void BinkDecoder::readMotionValues(VideoFrame &video, Bundle &bundle) { +void BinkDecoder::BinkVideoTrack::readMotionValues(VideoFrame &video, Bundle &bundle) {  	uint32 n = readBundleCount(video, bundle);  	if (n == 0)  		return; @@ -1083,7 +907,7 @@ void BinkDecoder::readMotionValues(VideoFrame &video, Bundle &bundle) {  }  const uint8 rleLens[4] = { 4, 8, 12, 32 }; -void BinkDecoder::readBlockTypes(VideoFrame &video, Bundle &bundle) { +void BinkDecoder::BinkVideoTrack::readBlockTypes(VideoFrame &video, Bundle &bundle) {  	uint32 n = readBundleCount(video, bundle);  	if (n == 0)  		return; @@ -1120,7 +944,7 @@ void BinkDecoder::readBlockTypes(VideoFrame &video, Bundle &bundle) {  	} while (bundle.curDec < decEnd);  } -void BinkDecoder::readPatterns(VideoFrame &video, Bundle &bundle) { +void BinkDecoder::BinkVideoTrack::readPatterns(VideoFrame &video, Bundle &bundle) {  	uint32 n = readBundleCount(video, bundle);  	if (n == 0)  		return; @@ -1138,7 +962,7 @@ void BinkDecoder::readPatterns(VideoFrame &video, Bundle &bundle) {  } -void BinkDecoder::readColors(VideoFrame &video, Bundle &bundle) { +void BinkDecoder::BinkVideoTrack::readColors(VideoFrame &video, Bundle &bundle) {  	uint32 n = readBundleCount(video, bundle);  	if (n == 0)  		return; @@ -1182,7 +1006,7 @@ void BinkDecoder::readColors(VideoFrame &video, Bundle &bundle) {  	}  } -void BinkDecoder::readDCS(VideoFrame &video, Bundle &bundle, int startBits, bool hasSign) { +void BinkDecoder::BinkVideoTrack::readDCS(VideoFrame &video, Bundle &bundle, int startBits, bool hasSign) {  	uint32 length = readBundleCount(video, bundle);  	if (length == 0)  		return; @@ -1228,7 +1052,7 @@ void BinkDecoder::readDCS(VideoFrame &video, Bundle &bundle, int startBits, bool  }  /** Reads 8x8 block of DCT coefficients. */ -void BinkDecoder::readDCTCoeffs(VideoFrame &video, int16 *block, bool isIntra) { +void BinkDecoder::BinkVideoTrack::readDCTCoeffs(VideoFrame &video, int16 *block, bool isIntra) {  	int coefCount = 0;  	int coefIdx[64]; @@ -1326,7 +1150,7 @@ void BinkDecoder::readDCTCoeffs(VideoFrame &video, int16 *block, bool isIntra) {  }  /** Reads 8x8 block with residue after motion compensation. */ -void BinkDecoder::readResidue(VideoFrame &video, int16 *block, int masksCount) { +void BinkDecoder::BinkVideoTrack::readResidue(VideoFrame &video, int16 *block, int masksCount) {  	int nzCoeff[64];  	int nzCoeffCount = 0; @@ -1417,63 +1241,170 @@ void BinkDecoder::readResidue(VideoFrame &video, int16 *block, int masksCount) {  	}  } -float BinkDecoder::getFloat(AudioTrack &audio) { -	int power = audio.bits->getBits(5); +#define A1  2896 /* (1/sqrt(2))<<12 */ +#define A2  2217 +#define A3  3784 +#define A4 -5352 -	float f = ldexp((float)audio.bits->getBits(23), power - 23); +#define IDCT_TRANSFORM(dest,s0,s1,s2,s3,s4,s5,s6,s7,d0,d1,d2,d3,d4,d5,d6,d7,munge,src) {\ +    const int a0 = (src)[s0] + (src)[s4]; \ +    const int a1 = (src)[s0] - (src)[s4]; \ +    const int a2 = (src)[s2] + (src)[s6]; \ +    const int a3 = (A1*((src)[s2] - (src)[s6])) >> 11; \ +    const int a4 = (src)[s5] + (src)[s3]; \ +    const int a5 = (src)[s5] - (src)[s3]; \ +    const int a6 = (src)[s1] + (src)[s7]; \ +    const int a7 = (src)[s1] - (src)[s7]; \ +    const int b0 = a4 + a6; \ +    const int b1 = (A3*(a5 + a7)) >> 11; \ +    const int b2 = ((A4*a5) >> 11) - b0 + b1; \ +    const int b3 = (A1*(a6 - a4) >> 11) - b2; \ +    const int b4 = ((A2*a7) >> 11) + b3 - b1; \ +    (dest)[d0] = munge(a0+a2   +b0); \ +    (dest)[d1] = munge(a1+a3-a2+b2); \ +    (dest)[d2] = munge(a1-a3+a2+b3); \ +    (dest)[d3] = munge(a0-a2   -b4); \ +    (dest)[d4] = munge(a0-a2   +b4); \ +    (dest)[d5] = munge(a1-a3+a2-b3); \ +    (dest)[d6] = munge(a1+a3-a2-b2); \ +    (dest)[d7] = munge(a0+a2   -b0); \ +} +/* end IDCT_TRANSFORM macro */ -	if (audio.bits->getBit()) -		f = -f; +#define MUNGE_NONE(x) (x) +#define IDCT_COL(dest,src) IDCT_TRANSFORM(dest,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,MUNGE_NONE,src) -	return f; +#define MUNGE_ROW(x) (((x) + 0x7F)>>8) +#define IDCT_ROW(dest,src) IDCT_TRANSFORM(dest,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,MUNGE_ROW,src) + +static inline void IDCTCol(int16 *dest, const int16 *src) { +	if ((src[8] | src[16] | src[24] | src[32] | src[40] | src[48] | src[56]) == 0) { +		dest[ 0] = +		dest[ 8] = +		dest[16] = +		dest[24] = +		dest[32] = +		dest[40] = +		dest[48] = +		dest[56] = src[0]; +	} else { +		IDCT_COL(dest, src); +	} +} + +void BinkDecoder::BinkVideoTrack::IDCT(int16 *block) { +	int i; +	int16 temp[64]; + +	for (i = 0; i < 8; i++) +		IDCTCol(&temp[i], &block[i]); +	for (i = 0; i < 8; i++) { +		IDCT_ROW( (&block[8*i]), (&temp[8*i]) ); +	} +} + +void BinkDecoder::BinkVideoTrack::IDCTAdd(DecodeContext &ctx, int16 *block) { +	int i, j; + +	IDCT(block); +	byte *dest = ctx.dest; +	for (i = 0; i < 8; i++, dest += ctx.pitch, block += 8) +		for (j = 0; j < 8; j++) +			 dest[j] += block[j]; +} + +void BinkDecoder::BinkVideoTrack::IDCTPut(DecodeContext &ctx, int16 *block) { +	int i; +	int16 temp[64]; +	for (i = 0; i < 8; i++) +		IDCTCol(&temp[i], &block[i]); +	for (i = 0; i < 8; i++) { +		IDCT_ROW( (&ctx.dest[i*ctx.pitch]), (&temp[8*i]) ); +	} +} + +BinkDecoder::BinkAudioTrack::BinkAudioTrack(BinkDecoder::AudioInfo &audio) : _audioInfo(&audio) { +	_audioStream = Audio::makeQueuingAudioStream(_audioInfo->outSampleRate, _audioInfo->outChannels == 2); +} + +BinkDecoder::BinkAudioTrack::~BinkAudioTrack() { +	delete _audioStream; +} + +Audio::AudioStream *BinkDecoder::BinkAudioTrack::getAudioStream() const { +	return _audioStream; +} + +void BinkDecoder::BinkAudioTrack::decodePacket() { +	int outSize = _audioInfo->frameLen * _audioInfo->channels; + +	while (_audioInfo->bits->pos() < _audioInfo->bits->size()) { +		int16 *out = (int16 *)malloc(outSize * 2); +		memset(out, 0, outSize * 2); + +		audioBlock(out); + +		byte flags = Audio::FLAG_16BITS; +		if (_audioInfo->outChannels == 2) +			flags |= Audio::FLAG_STEREO; + +#ifdef SCUMM_LITTLE_ENDIAN +		flags |= Audio::FLAG_LITTLE_ENDIAN; +#endif + +		_audioStream->queueBuffer((byte *)out, _audioInfo->blockSize * 2, DisposeAfterUse::YES, flags); + +		if (_audioInfo->bits->pos() & 0x1F) // next data block starts at a 32-byte boundary +			_audioInfo->bits->skip(32 - (_audioInfo->bits->pos() & 0x1F)); +	}  } -void BinkDecoder::audioBlock(AudioTrack &audio, int16 *out) { -	if      (audio.codec == kAudioCodecDCT) -		audioBlockDCT (audio); -	else if (audio.codec == kAudioCodecRDFT) -		audioBlockRDFT(audio); +void BinkDecoder::BinkAudioTrack::audioBlock(int16 *out) { +	if      (_audioInfo->codec == kAudioCodecDCT) +		audioBlockDCT (); +	else if (_audioInfo->codec == kAudioCodecRDFT) +		audioBlockRDFT(); -	floatToInt16Interleave(out, const_cast<const float **>(audio.coeffsPtr), audio.frameLen, audio.channels); +	floatToInt16Interleave(out, const_cast<const float **>(_audioInfo->coeffsPtr), _audioInfo->frameLen, _audioInfo->channels); -	if (!audio.first) { -		int count = audio.overlapLen * audio.channels; +	if (!_audioInfo->first) { +		int count = _audioInfo->overlapLen * _audioInfo->channels;  		int shift = Common::intLog2(count);  		for (int i = 0; i < count; i++) { -			out[i] = (audio.prevCoeffs[i] * (count - i) + out[i] * i) >> shift; +			out[i] = (_audioInfo->prevCoeffs[i] * (count - i) + out[i] * i) >> shift;  		}  	} -	memcpy(audio.prevCoeffs, out + audio.blockSize, audio.overlapLen * audio.channels * sizeof(*out)); +	memcpy(_audioInfo->prevCoeffs, out + _audioInfo->blockSize, _audioInfo->overlapLen * _audioInfo->channels * sizeof(*out)); -	audio.first = false; +	_audioInfo->first = false;  } -void BinkDecoder::audioBlockDCT(AudioTrack &audio) { -	audio.bits->skip(2); +void BinkDecoder::BinkAudioTrack::audioBlockDCT() { +	_audioInfo->bits->skip(2); -	for (uint8 i = 0; i < audio.channels; i++) { -		float *coeffs = audio.coeffsPtr[i]; +	for (uint8 i = 0; i < _audioInfo->channels; i++) { +		float *coeffs = _audioInfo->coeffsPtr[i]; -		readAudioCoeffs(audio, coeffs); +		readAudioCoeffs(coeffs);  		coeffs[0] /= 0.5; -		audio.dct->calc(coeffs); +		_audioInfo->dct->calc(coeffs); -		for (uint32 j = 0; j < audio.frameLen; j++) -			coeffs[j] *= (audio.frameLen / 2.0); +		for (uint32 j = 0; j < _audioInfo->frameLen; j++) +			coeffs[j] *= (_audioInfo->frameLen / 2.0);  	}  } -void BinkDecoder::audioBlockRDFT(AudioTrack &audio) { -	for (uint8 i = 0; i < audio.channels; i++) { -		float *coeffs = audio.coeffsPtr[i]; +void BinkDecoder::BinkAudioTrack::audioBlockRDFT() { +	for (uint8 i = 0; i < _audioInfo->channels; i++) { +		float *coeffs = _audioInfo->coeffsPtr[i]; -		readAudioCoeffs(audio, coeffs); +		readAudioCoeffs(coeffs); -		audio.rdft->calc(coeffs); +		_audioInfo->rdft->calc(coeffs);  	}  } @@ -1481,56 +1412,56 @@ static const uint8 rleLengthTab[16] = {  	2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 32, 64  }; -void BinkDecoder::readAudioCoeffs(AudioTrack &audio, float *coeffs) { -	coeffs[0] = getFloat(audio) * audio.root; -	coeffs[1] = getFloat(audio) * audio.root; +void BinkDecoder::BinkAudioTrack::readAudioCoeffs(float *coeffs) { +	coeffs[0] = getFloat() * _audioInfo->root; +	coeffs[1] = getFloat() * _audioInfo->root;  	float quant[25]; -	for (uint32 i = 0; i < audio.bandCount; i++) { -		int value = audio.bits->getBits(8); +	for (uint32 i = 0; i < _audioInfo->bandCount; i++) { +		int value = _audioInfo->bits->getBits(8);  		//                              0.066399999 / log10(M_E) -		quant[i] = exp(MIN(value, 95) * 0.15289164787221953823f) * audio.root; +		quant[i] = exp(MIN(value, 95) * 0.15289164787221953823f) * _audioInfo->root;  	}  	float q = 0.0;  	// Find band (k)  	int k; -	for (k = 0; audio.bands[k] < 1; k++) +	for (k = 0; _audioInfo->bands[k] < 1; k++)  		q = quant[k];  	// Parse coefficients  	uint32 i = 2; -	while (i < audio.frameLen) { +	while (i < _audioInfo->frameLen) {  		uint32 j = 0; -		if (audio.bits->getBit()) -			j = i + rleLengthTab[audio.bits->getBits(4)] * 8; +		if (_audioInfo->bits->getBit()) +			j = i + rleLengthTab[_audioInfo->bits->getBits(4)] * 8;  		else  			j = i + 8; -		j = MIN(j, audio.frameLen); +		j = MIN(j, _audioInfo->frameLen); -		int width = audio.bits->getBits(4); +		int width = _audioInfo->bits->getBits(4);  		if (width == 0) {  			memset(coeffs + i, 0, (j - i) * sizeof(*coeffs));  			i = j; -			while (audio.bands[k] * 2 < i) +			while (_audioInfo->bands[k] * 2 < i)  				q = quant[k++];  		} else {  			while (i < j) { -				if (audio.bands[k] * 2 == i) +				if (_audioInfo->bands[k] * 2 == i)  					q = quant[k++]; -				int coeff = audio.bits->getBits(width); +				int coeff = _audioInfo->bits->getBits(width);  				if (coeff) { -					if (audio.bits->getBit()) +					if (_audioInfo->bits->getBit())  						coeffs[i] = -q * coeff;  					else  						coeffs[i] =  q * coeff; @@ -1548,10 +1479,10 @@ void BinkDecoder::readAudioCoeffs(AudioTrack &audio, float *coeffs) {  }  static inline int floatToInt16One(float src) { -	return (int16) CLIP<int>((int) floor(src + 0.5), -32768, 32767); +	return (int16)CLIP<int>((int)floor(src + 0.5), -32768, 32767);  } -void BinkDecoder::floatToInt16Interleave(int16 *dst, const float **src, uint32 length, uint8 channels) { +void BinkDecoder::BinkAudioTrack::floatToInt16Interleave(int16 *dst, const float **src, uint32 length, uint8 channels) {  	if (channels == 2) {  		for (uint32 i = 0; i < length; i++) {  			dst[2 * i    ] = floatToInt16One(src[0][i]); @@ -1564,97 +1495,84 @@ void BinkDecoder::floatToInt16Interleave(int16 *dst, const float **src, uint32 l  	}  } -#define A1  2896 /* (1/sqrt(2))<<12 */ -#define A2  2217 -#define A3  3784 -#define A4 -5352 +float BinkDecoder::BinkAudioTrack::getFloat() { +	int power = _audioInfo->bits->getBits(5); -#define IDCT_TRANSFORM(dest,s0,s1,s2,s3,s4,s5,s6,s7,d0,d1,d2,d3,d4,d5,d6,d7,munge,src) {\ -    const int a0 = (src)[s0] + (src)[s4]; \ -    const int a1 = (src)[s0] - (src)[s4]; \ -    const int a2 = (src)[s2] + (src)[s6]; \ -    const int a3 = (A1*((src)[s2] - (src)[s6])) >> 11; \ -    const int a4 = (src)[s5] + (src)[s3]; \ -    const int a5 = (src)[s5] - (src)[s3]; \ -    const int a6 = (src)[s1] + (src)[s7]; \ -    const int a7 = (src)[s1] - (src)[s7]; \ -    const int b0 = a4 + a6; \ -    const int b1 = (A3*(a5 + a7)) >> 11; \ -    const int b2 = ((A4*a5) >> 11) - b0 + b1; \ -    const int b3 = (A1*(a6 - a4) >> 11) - b2; \ -    const int b4 = ((A2*a7) >> 11) + b3 - b1; \ -    (dest)[d0] = munge(a0+a2   +b0); \ -    (dest)[d1] = munge(a1+a3-a2+b2); \ -    (dest)[d2] = munge(a1-a3+a2+b3); \ -    (dest)[d3] = munge(a0-a2   -b4); \ -    (dest)[d4] = munge(a0-a2   +b4); \ -    (dest)[d5] = munge(a1-a3+a2-b3); \ -    (dest)[d6] = munge(a1+a3-a2-b2); \ -    (dest)[d7] = munge(a0+a2   -b0); \ +	float f = ldexp((float)_audioInfo->bits->getBits(23), power - 23); + +	if (_audioInfo->bits->getBit()) +		f = -f; + +	return f;  } -/* end IDCT_TRANSFORM macro */ -#define MUNGE_NONE(x) (x) -#define IDCT_COL(dest,src) IDCT_TRANSFORM(dest,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,MUNGE_NONE,src) +void BinkDecoder::initAudioTrack(AudioInfo &audio) { +	audio.sampleCount = 0; +	audio.bits        = 0; -#define MUNGE_ROW(x) (((x) + 0x7F)>>8) -#define IDCT_ROW(dest,src) IDCT_TRANSFORM(dest,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,MUNGE_ROW,src) +	audio.channels = ((audio.flags & kAudioFlagStereo) != 0) ? 2 : 1; +	audio.codec    = ((audio.flags & kAudioFlagDCT   ) != 0) ? kAudioCodecDCT : kAudioCodecRDFT; -static inline void IDCTCol(int16 *dest, const int16 *src) -{ -	if ((src[8] | src[16] | src[24] | src[32] | src[40] | src[48] | src[56]) == 0) { -		dest[ 0] = -		dest[ 8] = -		dest[16] = -		dest[24] = -		dest[32] = -		dest[40] = -		dest[48] = -		dest[56] = src[0]; -	} else { -		IDCT_COL(dest, src); -	} -} +	if (audio.channels > kAudioChannelsMax) +		error("Too many audio channels: %d", audio.channels); -void BinkDecoder::IDCT(int16 *block) { -	int i; -	int16 temp[64]; +	uint32 frameLenBits; +	// Calculate frame length +	if     (audio.sampleRate < 22050) +		frameLenBits =  9; +	else if(audio.sampleRate < 44100) +		frameLenBits = 10; +	else +		frameLenBits = 11; -	for (i = 0; i < 8; i++) -		IDCTCol(&temp[i], &block[i]); -	for (i = 0; i < 8; i++) { -		IDCT_ROW( (&block[8*i]), (&temp[8*i]) ); -	} -} +	audio.frameLen = 1 << frameLenBits; -void BinkDecoder::IDCTAdd(DecodeContext &ctx, int16 *block) { -	int i, j; +	audio.outSampleRate = audio.sampleRate; +	audio.outChannels   = audio.channels; -	IDCT(block); -	byte *dest = ctx.dest; -	for (i = 0; i < 8; i++, dest += ctx.pitch, block += 8) -		for (j = 0; j < 8; j++) -			 dest[j] += block[j]; -} +	if (audio.codec  == kAudioCodecRDFT) { +		// RDFT audio already interleaves the samples correctly -void BinkDecoder::IDCTPut(DecodeContext &ctx, int16 *block) { -	int i; -	int16 temp[64]; -	for (i = 0; i < 8; i++) -		IDCTCol(&temp[i], &block[i]); -	for (i = 0; i < 8; i++) { -		IDCT_ROW( (&ctx.dest[i*ctx.pitch]), (&temp[8*i]) ); +		if (audio.channels == 2) +			frameLenBits++; + +		audio.sampleRate *= audio.channels; +		audio.frameLen   *= audio.channels; +		audio.channels    = 1;  	} -} -void BinkDecoder::updateVolume() { -	if (g_system->getMixer()->isSoundHandleActive(_audioHandle)) -		g_system->getMixer()->setChannelVolume(_audioHandle, getVolume()); -} +	audio.overlapLen = audio.frameLen / 16; +	audio.blockSize  = (audio.frameLen - audio.overlapLen) * audio.channels; +	audio.root       = 2.0 / sqrt((double)audio.frameLen); + +	uint32 sampleRateHalf = (audio.sampleRate + 1) / 2; + +	// Calculate number of bands +	for (audio.bandCount = 1; audio.bandCount < 25; audio.bandCount++) +		if (sampleRateHalf <= binkCriticalFreqs[audio.bandCount - 1]) +			break; + +	audio.bands = new uint32[audio.bandCount + 1]; + +	// Populate bands +	audio.bands[0] = 1; +	for (uint32 i = 1; i < audio.bandCount; i++) +		audio.bands[i] = binkCriticalFreqs[i - 1] * (audio.frameLen / 2) / sampleRateHalf; +	audio.bands[audio.bandCount] = audio.frameLen / 2; + +	audio.first = true; + +	for (uint8 i = 0; i < audio.channels; i++) +		audio.coeffsPtr[i] = audio.coeffs + i * audio.frameLen; + +	audio.codec = ((audio.flags & kAudioFlagDCT) != 0) ? kAudioCodecDCT : kAudioCodecRDFT; + +	if      (audio.codec == kAudioCodecRDFT) +		audio.rdft = new Common::RDFT(frameLenBits, Common::RDFT::DFT_C2R); +	else if (audio.codec == kAudioCodecDCT) +		audio.dct  = new Common::DCT(frameLenBits, Common::DCT::DCT_III); -void BinkDecoder::updateBalance() { -	if (g_system->getMixer()->isSoundHandleActive(_audioHandle)) -		g_system->getMixer()->setChannelBalance(_audioHandle, getBalance()); +	addTrack(new BinkAudioTrack(audio));  }  } // End of namespace Video diff --git a/video/bink_decoder.h b/video/bink_decoder.h index a5e1b10270..150e91aab7 100644 --- a/video/bink_decoder.h +++ b/video/bink_decoder.h @@ -31,22 +31,27 @@  #ifndef VIDEO_BINK_DECODER_H  #define VIDEO_BINK_DECODER_H -#include "audio/audiostream.h" -#include "audio/mixer.h"  #include "common/array.h"  #include "common/rational.h" -#include "graphics/surface.h" -  #include "video/video_decoder.h" +namespace Audio { +class AudioStream; +class QueuingAudioStream; +} +  namespace Common { -	class SeekableReadStream; -	class BitStream; -	class Huffman; +class SeekableReadStream; +class BitStream; +class Huffman; -	class RDFT; -	class DCT; +class RDFT; +class DCT; +} + +namespace Graphics { +struct Surface;  }  namespace Video { @@ -57,92 +62,28 @@ namespace Video {   * Video decoder used in engines:   *  - scumm (he)   */ -class BinkDecoder : public FixedRateVideoDecoder { +class BinkDecoder : public VideoDecoder {  public:  	BinkDecoder();  	~BinkDecoder(); -	// VideoDecoder API  	bool loadStream(Common::SeekableReadStream *stream);  	void close(); -	bool isVideoLoaded() const { return _bink != 0; } -	uint16 getWidth() const { return _surface.w; } -	uint16 getHeight() const { return _surface.h; } -	Graphics::PixelFormat getPixelFormat() const { return _surface.format; } -	uint32 getFrameCount() const { return _frames.size(); } -	uint32 getTime() const; -	const Graphics::Surface *decodeNextFrame(); - -	// FixedRateVideoDecoder -	Common::Rational getFrameRate() const { return _frameRate; } - -	// Bink specific -	bool loadStream(Common::SeekableReadStream *stream, const Graphics::PixelFormat &format);  protected: -	// VideoDecoder API -	void updateVolume(); -	void updateBalance(); +	void readNextPacket(); +private:  	static const int kAudioChannelsMax  = 2;  	static const int kAudioBlockSizeMax = (kAudioChannelsMax << 11); -	/** IDs for different data types used in Bink video codec. */ -	enum Source { -		kSourceBlockTypes    = 0, ///< 8x8 block types. -		kSourceSubBlockTypes    , ///< 16x16 block types (a subset of 8x8 block types). -		kSourceColors           , ///< Pixel values used for different block types. -		kSourcePattern          , ///< 8-bit values for 2-color pattern fill. -		kSourceXOff             , ///< X components of motion value. -		kSourceYOff             , ///< Y components of motion value. -		kSourceIntraDC          , ///< DC values for intrablocks with DCT. -		kSourceInterDC          , ///< DC values for interblocks with DCT. -		kSourceRun              , ///< Run lengths for special fill block. - -		kSourceMAX -	}; - -	/** Bink video block types. */ -	enum BlockType { -		kBlockSkip    = 0,  ///< Skipped block. -		kBlockScaled     ,  ///< Block has size 16x16. -		kBlockMotion     ,  ///< Block is copied from previous frame with some offset. -		kBlockRun        ,  ///< Block is composed from runs of colors with custom scan order. -		kBlockResidue    ,  ///< Motion block with some difference added. -		kBlockIntra      ,  ///< Intra DCT block. -		kBlockFill       ,  ///< Block is filled with single color. -		kBlockInter      ,  ///< Motion block with DCT applied to the difference. -		kBlockPattern    ,  ///< Block is filled with two colors following custom pattern. -		kBlockRaw           ///< Uncoded 8x8 block. -	}; - -	/** Data structure for decoding and tranlating Huffman'd data. */ -	struct Huffman { -		int  index;       ///< Index of the Huffman codebook to use. -		byte symbols[16]; ///< Huffman symbol => Bink symbol tranlation list. -	}; - -	/** Data structure used for decoding a single Bink data type. */ -	struct Bundle { -		int countLengths[2]; ///< Lengths of number of entries to decode (in bits). -		int countLength;     ///< Length of number of entries to decode (in bits) for the current plane. - -		Huffman huffman; ///< Huffman codebook. - -		byte *data;    ///< Buffer for decoded symbols. -		byte *dataEnd; ///< Buffer end. - -		byte *curDec; ///< Pointer to the data that wasn't yet decoded. -		byte *curPtr; ///< Pointer to the data that wasn't yet read. -	}; -  	enum AudioCodec {  		kAudioCodecDCT,  		kAudioCodecRDFT  	};  	/** An audio track. */ -	struct AudioTrack { +	struct AudioInfo {  		uint16 flags;  		uint32 sampleRate; @@ -177,8 +118,8 @@ protected:  		Common::RDFT *rdft;  		Common::DCT  *dct; -		AudioTrack(); -		~AudioTrack(); +		AudioInfo(); +		~AudioInfo();  	};  	/** A video frame. */ @@ -194,149 +135,218 @@ protected:  		~VideoFrame();  	}; -	/** A decoder state. */ -	struct DecodeContext { -		VideoFrame *video; +	class BinkVideoTrack : public FixedRateVideoTrack { +	public: +		BinkVideoTrack(uint32 width, uint32 height, const Graphics::PixelFormat &format, uint32 frameCount, const Common::Rational &frameRate, bool swapPlanes, bool hasAlpha, uint32 id); +		~BinkVideoTrack(); + +		uint16 getWidth() const { return _surface.w; } +		uint16 getHeight() const { return _surface.h; } +		Graphics::PixelFormat getPixelFormat() const { return _surface.format; } +		int getCurFrame() const { return _curFrame; } +		int getFrameCount() const { return _frameCount; } +		const Graphics::Surface *decodeNextFrame() { return &_surface; } + +		/** Decode a video packet. */ +		void decodePacket(VideoFrame &frame); + +	protected: +		Common::Rational getFrameRate() const { return _frameRate; } -		uint32 planeIdx; +	private: +		/** A decoder state. */ +		struct DecodeContext { +			VideoFrame *video; + +			uint32 planeIdx; -		uint32 blockX; -		uint32 blockY; +			uint32 blockX; +			uint32 blockY; -		byte *dest; -		byte *prev; +			byte *dest; +			byte *prev; -		byte *destStart, *destEnd; -		byte *prevStart, *prevEnd; +			byte *destStart, *destEnd; +			byte *prevStart, *prevEnd; -		uint32 pitch; +			uint32 pitch; -		int coordMap[64]; -		int coordScaledMap1[64]; -		int coordScaledMap2[64]; -		int coordScaledMap3[64]; -		int coordScaledMap4[64]; +			int coordMap[64]; +			int coordScaledMap1[64]; +			int coordScaledMap2[64]; +			int coordScaledMap3[64]; +			int coordScaledMap4[64]; +		}; + +		/** IDs for different data types used in Bink video codec. */ +		enum Source { +			kSourceBlockTypes    = 0, ///< 8x8 block types. +			kSourceSubBlockTypes    , ///< 16x16 block types (a subset of 8x8 block types). +			kSourceColors           , ///< Pixel values used for different block types. +			kSourcePattern          , ///< 8-bit values for 2-color pattern fill. +			kSourceXOff             , ///< X components of motion value. +			kSourceYOff             , ///< Y components of motion value. +			kSourceIntraDC          , ///< DC values for intrablocks with DCT. +			kSourceInterDC          , ///< DC values for interblocks with DCT. +			kSourceRun              , ///< Run lengths for special fill block. + +			kSourceMAX +		}; + +		/** Bink video block types. */ +		enum BlockType { +			kBlockSkip    = 0,  ///< Skipped block. +			kBlockScaled     ,  ///< Block has size 16x16. +			kBlockMotion     ,  ///< Block is copied from previous frame with some offset. +			kBlockRun        ,  ///< Block is composed from runs of colors with custom scan order. +			kBlockResidue    ,  ///< Motion block with some difference added. +			kBlockIntra      ,  ///< Intra DCT block. +			kBlockFill       ,  ///< Block is filled with single color. +			kBlockInter      ,  ///< Motion block with DCT applied to the difference. +			kBlockPattern    ,  ///< Block is filled with two colors following custom pattern. +			kBlockRaw           ///< Uncoded 8x8 block. +		}; + +		/** Data structure for decoding and tranlating Huffman'd data. */ +		struct Huffman { +			int  index;       ///< Index of the Huffman codebook to use. +			byte symbols[16]; ///< Huffman symbol => Bink symbol tranlation list. +		}; + +		/** Data structure used for decoding a single Bink data type. */ +		struct Bundle { +			int countLengths[2]; ///< Lengths of number of entries to decode (in bits). +			int countLength;     ///< Length of number of entries to decode (in bits) for the current plane. + +			Huffman huffman; ///< Huffman codebook. + +			byte *data;    ///< Buffer for decoded symbols. +			byte *dataEnd; ///< Buffer end. + +			byte *curDec; ///< Pointer to the data that wasn't yet decoded. +			byte *curPtr; ///< Pointer to the data that wasn't yet read. +		}; + +		int _curFrame; +		int _frameCount; + +		Graphics::Surface _surface; + +		uint32 _id; ///< The BIK FourCC. + +		bool _hasAlpha;   ///< Do video frames have alpha? +		bool _swapPlanes; ///< Are the planes ordered (A)YVU instead of (A)YUV? + +		Common::Rational _frameRate; + +		Bundle _bundles[kSourceMAX]; ///< Bundles for decoding all data types. + +		Common::Huffman *_huffman[16]; ///< The 16 Huffman codebooks used in Bink decoding. + +		/** Huffman codebooks to use for decoding high nibbles in color data types. */ +		Huffman _colHighHuffman[16]; +		/** Value of the last decoded high nibble in color data types. */ +		int _colLastVal; + +		byte *_curPlanes[4]; ///< The 4 color planes, YUVA, current frame. +		byte *_oldPlanes[4]; ///< The 4 color planes, YUVA, last frame. + +		/** Initialize the bundles. */ +		void initBundles(); +		/** Deinitialize the bundles. */ +		void deinitBundles(); + +		/** Initialize the Huffman decoders. */ +		void initHuffman(); + +		/** Decode a plane. */ +		void decodePlane(VideoFrame &video, int planeIdx, bool isChroma); + +		/** Read/Initialize a bundle for decoding a plane. */ +		void readBundle(VideoFrame &video, Source source); + +		/** Read the symbols for a Huffman code. */ +		void readHuffman(VideoFrame &video, Huffman &huffman); +		/** Merge two Huffman symbol lists. */ +		void mergeHuffmanSymbols(VideoFrame &video, byte *dst, const byte *src, int size); + +		/** Read and translate a symbol out of a Huffman code. */ +		byte getHuffmanSymbol(VideoFrame &video, Huffman &huffman); + +		/** Get a direct value out of a bundle. */ +		int32 getBundleValue(Source source); +		/** Read a count value out of a bundle. */ +		uint32 readBundleCount(VideoFrame &video, Bundle &bundle); + +		// Handle the block types +		void blockSkip         (DecodeContext &ctx); +		void blockScaledSkip   (DecodeContext &ctx); +		void blockScaledRun    (DecodeContext &ctx); +		void blockScaledIntra  (DecodeContext &ctx); +		void blockScaledFill   (DecodeContext &ctx); +		void blockScaledPattern(DecodeContext &ctx); +		void blockScaledRaw    (DecodeContext &ctx); +		void blockScaled       (DecodeContext &ctx); +		void blockMotion       (DecodeContext &ctx); +		void blockRun          (DecodeContext &ctx); +		void blockResidue      (DecodeContext &ctx); +		void blockIntra        (DecodeContext &ctx); +		void blockFill         (DecodeContext &ctx); +		void blockInter        (DecodeContext &ctx); +		void blockPattern      (DecodeContext &ctx); +		void blockRaw          (DecodeContext &ctx); + +		// Read the bundles +		void readRuns        (VideoFrame &video, Bundle &bundle); +		void readMotionValues(VideoFrame &video, Bundle &bundle); +		void readBlockTypes  (VideoFrame &video, Bundle &bundle); +		void readPatterns    (VideoFrame &video, Bundle &bundle); +		void readColors      (VideoFrame &video, Bundle &bundle); +		void readDCS         (VideoFrame &video, Bundle &bundle, int startBits, bool hasSign); +		void readDCTCoeffs   (VideoFrame &video, int16 *block, bool isIntra); +		void readResidue     (VideoFrame &video, int16 *block, int masksCount); + +		// Bink video IDCT +		void IDCT(int16 *block); +		void IDCTPut(DecodeContext &ctx, int16 *block); +		void IDCTAdd(DecodeContext &ctx, int16 *block);  	}; -	Common::SeekableReadStream *_bink; +	class BinkAudioTrack : public AudioTrack { +	public: +		BinkAudioTrack(AudioInfo &audio); +		~BinkAudioTrack(); + +		/** Decode an audio packet. */ +		void decodePacket(); -	uint32 _id; ///< The BIK FourCC. +	protected: +		Audio::AudioStream *getAudioStream() const; -	Common::Rational _frameRate; +	private: +		AudioInfo *_audioInfo; +		Audio::QueuingAudioStream *_audioStream; -	Graphics::Surface _surface; +		float getFloat(); -	Audio::SoundHandle _audioHandle; -	Audio::QueuingAudioStream *_audioStream; -	int32 _audioStartOffset; +		/** Decode an audio block. */ +		void audioBlock(int16 *out); +		/** Decode a DCT'd audio block. */ +		void audioBlockDCT(); +		/** Decode a RDFT'd audio block. */ +		void audioBlockRDFT(); -	uint32 _videoFlags; ///< Video frame features. +		void readAudioCoeffs(float *coeffs); -	bool _hasAlpha;   ///< Do video frames have alpha? -	bool _swapPlanes; ///< Are the planes ordered (A)YVU instead of (A)YUV? +		static void floatToInt16Interleave(int16 *dst, const float **src, uint32 length, uint8 channels); +	}; + +	Common::SeekableReadStream *_bink; -	Common::Array<AudioTrack> _audioTracks; ///< All audio tracks. +	Common::Array<AudioInfo> _audioTracks; ///< All audio tracks.  	Common::Array<VideoFrame> _frames;      ///< All video frames. -	uint32 _audioTrack; ///< Audio track to use. - -	Common::Huffman *_huffman[16]; ///< The 16 Huffman codebooks used in Bink decoding. - -	Bundle _bundles[kSourceMAX]; ///< Bundles for decoding all data types. - -	/** Huffman codebooks to use for decoding high nibbles in color data types. */ -	Huffman _colHighHuffman[16]; -	/** Value of the last decoded high nibble in color data types. */ -	int _colLastVal; - -	byte *_curPlanes[4]; ///< The 4 color planes, YUVA, current frame. -	byte *_oldPlanes[4]; ///< The 4 color planes, YUVA, last frame. - - -	/** Initialize the bundles. */ -	void initBundles(); -	/** Deinitialize the bundles. */ -	void deinitBundles(); - -	/** Initialize the Huffman decoders. */ -	void initHuffman(); - -	/** Decode an audio packet. */ -	void audioPacket(AudioTrack &audio); -	/** Decode a video packet. */ -	virtual void videoPacket(VideoFrame &video); - -	/** Decode a plane. */ -	void decodePlane(VideoFrame &video, int planeIdx, bool isChroma); - -	/** Read/Initialize a bundle for decoding a plane. */ -	void readBundle(VideoFrame &video, Source source); - -	/** Read the symbols for a Huffman code. */ -	void readHuffman(VideoFrame &video, Huffman &huffman); -	/** Merge two Huffman symbol lists. */ -	void mergeHuffmanSymbols(VideoFrame &video, byte *dst, const byte *src, int size); - -	/** Read and translate a symbol out of a Huffman code. */ -	byte getHuffmanSymbol(VideoFrame &video, Huffman &huffman); - -	/** Get a direct value out of a bundle. */ -	int32 getBundleValue(Source source); -	/** Read a count value out of a bundle. */ -	uint32 readBundleCount(VideoFrame &video, Bundle &bundle); - -	// Handle the block types -	void blockSkip         (DecodeContext &ctx); -	void blockScaledSkip   (DecodeContext &ctx); -	void blockScaledRun    (DecodeContext &ctx); -	void blockScaledIntra  (DecodeContext &ctx); -	void blockScaledFill   (DecodeContext &ctx); -	void blockScaledPattern(DecodeContext &ctx); -	void blockScaledRaw    (DecodeContext &ctx); -	void blockScaled       (DecodeContext &ctx); -	void blockMotion       (DecodeContext &ctx); -	void blockRun          (DecodeContext &ctx); -	void blockResidue      (DecodeContext &ctx); -	void blockIntra        (DecodeContext &ctx); -	void blockFill         (DecodeContext &ctx); -	void blockInter        (DecodeContext &ctx); -	void blockPattern      (DecodeContext &ctx); -	void blockRaw          (DecodeContext &ctx); - -	// Read the bundles -	void readRuns        (VideoFrame &video, Bundle &bundle); -	void readMotionValues(VideoFrame &video, Bundle &bundle); -	void readBlockTypes  (VideoFrame &video, Bundle &bundle); -	void readPatterns    (VideoFrame &video, Bundle &bundle); -	void readColors      (VideoFrame &video, Bundle &bundle); -	void readDCS         (VideoFrame &video, Bundle &bundle, int startBits, bool hasSign); -	void readDCTCoeffs   (VideoFrame &video, int16 *block, bool isIntra); -	void readResidue     (VideoFrame &video, int16 *block, int masksCount); - -	void initAudioTrack(AudioTrack &audio); - -	float getFloat(AudioTrack &audio); - -	/** Decode an audio block. */ -	void audioBlock    (AudioTrack &audio, int16 *out); -	/** Decode a DCT'd audio block. */ -	void audioBlockDCT (AudioTrack &audio); -	/** Decode a RDFT'd audio block. */ -	void audioBlockRDFT(AudioTrack &audio); - -	void readAudioCoeffs(AudioTrack &audio, float *coeffs); - -	void floatToInt16Interleave(int16 *dst, const float **src, uint32 length, uint8 channels); - -	// Bink video IDCT -	void IDCT(int16 *block); -	void IDCTPut(DecodeContext &ctx, int16 *block); -	void IDCTAdd(DecodeContext &ctx, int16 *block); - -	/** Start playing the audio track */ -	void startAudio(); -	/** Stop playing the audio track */ -	void stopAudio(); +	void initAudioTrack(AudioInfo &audio);  };  } // End of namespace Video diff --git a/video/coktel_decoder.cpp b/video/coktel_decoder.cpp index 0c7ade1b8a..6a60b0e7d7 100644 --- a/video/coktel_decoder.cpp +++ b/video/coktel_decoder.cpp @@ -53,7 +53,8 @@ CoktelDecoder::CoktelDecoder(Audio::Mixer *mixer, Audio::Mixer::SoundType soundT  	_mixer(mixer), _soundType(soundType), _width(0), _height(0), _x(0), _y(0),  	_defaultX(0), _defaultY(0), _features(0), _frameCount(0), _paletteDirty(false),  	_ownSurface(true), _frameRate(12), _hasSound(false), _soundEnabled(false), -	_soundStage(kSoundNone), _audioStream(0) { +	_soundStage(kSoundNone), _audioStream(0), _startTime(0), _pauseStartTime(0), +	_isPaused(false) {  	assert(_mixer); @@ -261,6 +262,10 @@ bool CoktelDecoder::isPaletted() const {  	return true;  } +int CoktelDecoder::getCurFrame() const { +	return _curFrame; +} +  void CoktelDecoder::close() {  	disableSound();  	freeSurface(); @@ -273,9 +278,14 @@ void CoktelDecoder::close() {  	_features = 0; -	_frameCount = 0; +	_curFrame   = -1; +	_frameCount =  0; + +	_startTime = 0;  	_hasSound = false; + +	_isPaused = false;  }  uint16 CoktelDecoder::getWidth() const { @@ -291,6 +301,7 @@ uint32 CoktelDecoder::getFrameCount() const {  }  const byte *CoktelDecoder::getPalette() { +	_paletteDirty = false;  	return _palette;  } @@ -625,14 +636,45 @@ Common::Rational CoktelDecoder::getFrameRate() const {  	return _frameRate;  } +uint32 CoktelDecoder::getTimeToNextFrame() const { +	if (endOfVideo() || _curFrame < 0) +		return 0; + +	uint32 elapsedTime        = g_system->getMillis() - _startTime; +	uint32 nextFrameStartTime = (Common::Rational((_curFrame + 1) * 1000) / getFrameRate()).toInt(); + +	if (nextFrameStartTime <= elapsedTime) +		return 0; + +	return nextFrameStartTime - elapsedTime; +} +  uint32 CoktelDecoder::getStaticTimeToNextFrame() const {  	return (1000 / _frameRate).toInt();  } +void CoktelDecoder::pauseVideo(bool pause) { +	if (_isPaused != pause) { +		if (_isPaused) { +			// Add the time we were paused to the initial starting time +			_startTime += g_system->getMillis() - _pauseStartTime; +		} else { +			// Store the time we paused for use later +			_pauseStartTime = g_system->getMillis(); +		} + +		_isPaused = pause; +	} +} +  inline void CoktelDecoder::unsignedToSigned(byte *buffer, int length) {  	while (length-- > 0) *buffer++ ^= 0x80;  } +bool CoktelDecoder::endOfVideo() const { +	return !isVideoLoaded() || (getCurFrame() >= (int32)getFrameCount() - 1); +} +  PreIMDDecoder::PreIMDDecoder(uint16 width, uint16 height,  	Audio::Mixer *mixer, Audio::Mixer::SoundType soundType) : CoktelDecoder(mixer, soundType), @@ -705,8 +747,6 @@ bool PreIMDDecoder::loadStream(Common::SeekableReadStream *stream) {  }  void PreIMDDecoder::close() { -	reset(); -  	CoktelDecoder::close();  	delete _stream; @@ -1159,8 +1199,6 @@ bool IMDDecoder::loadFrameTables(uint32 framePosPos, uint32 frameCoordsPos) {  }  void IMDDecoder::close() { -	reset(); -  	CoktelDecoder::close();  	delete _stream; @@ -1225,8 +1263,6 @@ void IMDDecoder::processFrame() {  	_dirtyRects.clear(); -	_paletteDirty = false; -  	uint32 cmd = 0;  	bool hasNextCmd = false;  	bool startSound = false; @@ -1273,7 +1309,7 @@ void IMDDecoder::processFrame() {  		// Set palette  		if (cmd == kCommandPalette) {  			_stream->skip(2); - +	  			_paletteDirty = true;  			for (int i = 0; i < 768; i++) @@ -1322,7 +1358,7 @@ void IMDDecoder::processFrame() {  	// Start the audio stream if necessary  	if (startSound && _soundEnabled) {  			_mixer->playStream(_soundType, &_audioHandle, _audioStream, -					-1, getVolume(), getBalance(), DisposeAfterUse::NO); +					-1, Audio::Mixer::kMaxChannelVolume, 0, DisposeAfterUse::NO);  		_soundStage = kSoundPlaying;  	} @@ -1504,16 +1540,6 @@ Graphics::PixelFormat IMDDecoder::getPixelFormat() const {  	return Graphics::PixelFormat::createFormatCLUT8();  } -void IMDDecoder::updateVolume() { -	if (g_system->getMixer()->isSoundHandleActive(_audioHandle)) -		g_system->getMixer()->setChannelVolume(_audioHandle, getVolume()); -} - -void IMDDecoder::updateBalance() { -	if (g_system->getMixer()->isSoundHandleActive(_audioHandle)) -		g_system->getMixer()->setChannelBalance(_audioHandle, getBalance()); -} -  VMDDecoder::File::File() {  	offset   = 0; @@ -1552,7 +1578,7 @@ VMDDecoder::VMDDecoder(Audio::Mixer *mixer, Audio::Mixer::SoundType soundType) :  	_soundLastFilledFrame(0), _audioFormat(kAudioFormat8bitRaw),  	_hasVideo(false), _videoCodec(0), _blitMode(0), _bytesPerPixel(0),  	_firstFramePos(0), _videoBufferSize(0), _externalCodec(false), _codec(0), -	_subtitle(-1), _isPaletted(true) { +	_subtitle(-1), _isPaletted(true), _autoStartSound(true) {  	_videoBuffer   [0] = 0;  	_videoBuffer   [1] = 0; @@ -2014,8 +2040,6 @@ bool VMDDecoder::readFiles() {  }  void VMDDecoder::close() { -	reset(); -  	CoktelDecoder::close();  	delete _stream; @@ -2095,7 +2119,6 @@ void VMDDecoder::processFrame() {  	_dirtyRects.clear(); -	_paletteDirty = false;  	_subtitle     = -1;  	bool startSound = false; @@ -2215,8 +2238,9 @@ void VMDDecoder::processFrame() {  	if (startSound && _soundEnabled) {  		if (_hasSound && _audioStream) { -			_mixer->playStream(_soundType, &_audioHandle, _audioStream, -					-1, getVolume(), getBalance(), DisposeAfterUse::NO); +			if (_autoStartSound) +				_mixer->playStream(_soundType, &_audioHandle, _audioStream, +						-1, Audio::Mixer::kMaxChannelVolume, 0, DisposeAfterUse::NO);  			_soundStage = kSoundPlaying;  		} else  			_soundStage = kSoundNone; @@ -2742,14 +2766,92 @@ bool VMDDecoder::isPaletted() const {  	return _isPaletted;  } -void VMDDecoder::updateVolume() { -	if (g_system->getMixer()->isSoundHandleActive(_audioHandle)) -		g_system->getMixer()->setChannelVolume(_audioHandle, getVolume()); +void VMDDecoder::setAutoStartSound(bool autoStartSound) { +	_autoStartSound = autoStartSound; +} + +AdvancedVMDDecoder::AdvancedVMDDecoder(Audio::Mixer::SoundType soundType) { +	_decoder = new VMDDecoder(g_system->getMixer(), soundType); +	_decoder->setAutoStartSound(false); +} + +AdvancedVMDDecoder::~AdvancedVMDDecoder() { +	close(); +	delete _decoder; +} + +bool AdvancedVMDDecoder::loadStream(Common::SeekableReadStream *stream) { +	close(); + +	if (!_decoder->loadStream(stream)) +		return false; + +	if (_decoder->hasVideo()) { +		_videoTrack = new VMDVideoTrack(_decoder); +		addTrack(_videoTrack); +	} + +	if (_decoder->hasSound()) { +		_audioTrack = new VMDAudioTrack(_decoder); +		addTrack(_audioTrack); +	} + +	return true; +} + +void AdvancedVMDDecoder::close() { +	VideoDecoder::close(); +	_decoder->close(); +} + +AdvancedVMDDecoder::VMDVideoTrack::VMDVideoTrack(VMDDecoder *decoder) : _decoder(decoder) { +} + +uint16 AdvancedVMDDecoder::VMDVideoTrack::getWidth() const { +	return _decoder->getWidth(); +} + +uint16 AdvancedVMDDecoder::VMDVideoTrack::getHeight() const { +	return _decoder->getHeight(); +} + +Graphics::PixelFormat AdvancedVMDDecoder::VMDVideoTrack::getPixelFormat() const { +	return _decoder->getPixelFormat(); +} + +int AdvancedVMDDecoder::VMDVideoTrack::getCurFrame() const { +	return _decoder->getCurFrame(); +} + +int AdvancedVMDDecoder::VMDVideoTrack::getFrameCount() const { +	return _decoder->getFrameCount(); +} + +const Graphics::Surface *AdvancedVMDDecoder::VMDVideoTrack::decodeNextFrame() { +	return _decoder->decodeNextFrame(); +} + +const byte *AdvancedVMDDecoder::VMDVideoTrack::getPalette() const { +	return _decoder->getPalette(); +} + +bool AdvancedVMDDecoder::VMDVideoTrack::hasDirtyPalette() const { +	return _decoder->hasDirtyPalette(); +} + +Common::Rational AdvancedVMDDecoder::VMDVideoTrack::getFrameRate() const { +	return _decoder->getFrameRate(); +} + +AdvancedVMDDecoder::VMDAudioTrack::VMDAudioTrack(VMDDecoder *decoder) : _decoder(decoder) { +} + +Audio::Mixer::SoundType AdvancedVMDDecoder::VMDAudioTrack::getSoundType() const { +	return _decoder->_soundType;  } -void VMDDecoder::updateBalance() { -	if (g_system->getMixer()->isSoundHandleActive(_audioHandle)) -		g_system->getMixer()->setChannelBalance(_audioHandle, getBalance()); +Audio::AudioStream *AdvancedVMDDecoder::VMDAudioTrack::getAudioStream() const { +	return _decoder->_audioStream;  }  } // End of namespace Video diff --git a/video/coktel_decoder.h b/video/coktel_decoder.h index c88d982191..2a97eadf00 100644 --- a/video/coktel_decoder.h +++ b/video/coktel_decoder.h @@ -64,7 +64,7 @@ class Codec;   *  - gob   *  - sci   */ -class CoktelDecoder : public FixedRateVideoDecoder { +class CoktelDecoder {  public:  	struct State {  		/** Set accordingly to what was done. */ @@ -77,7 +77,7 @@ public:  	CoktelDecoder(Audio::Mixer *mixer,  			Audio::Mixer::SoundType soundType = Audio::Mixer::kPlainSoundType); -	~CoktelDecoder(); +	virtual ~CoktelDecoder();  	/** Replace the current video stream with this identical one. */  	virtual bool reloadStream(Common::SeekableReadStream *stream) = 0; @@ -138,21 +138,47 @@ public:  	/** Is the video paletted or true color? */  	virtual bool isPaletted() const; +	/** +	 * Get the current frame +	 * @see VideoDecoder::getCurFrame() +	 */ +	int getCurFrame() const; -	// VideoDecoder interface +	/** +	 * Decode the next frame +	 * @see VideoDecoder::decodeNextFrame() +	 */ +	virtual const Graphics::Surface *decodeNextFrame() = 0; +	/** +	 * Load a video from a stream +	 * @see VideoDecoder::loadStream() +	 */ +	virtual bool loadStream(Common::SeekableReadStream *stream) = 0; + +	/** Has a video been loaded? */ +	virtual bool isVideoLoaded() const = 0; + +	/** Has the end of the video been reached? */ +	bool endOfVideo() const; + +	/** Close the video. */  	void close();  	uint16 getWidth()  const;  	uint16 getHeight() const; +	virtual Graphics::PixelFormat getPixelFormat() const = 0;  	uint32 getFrameCount() const;  	const byte *getPalette();  	bool  hasDirtyPalette() const; +	uint32 getTimeToNextFrame() const;  	uint32 getStaticTimeToNextFrame() const; +	void pauseVideo(bool pause); +  protected:  	enum SoundStage {  		kSoundNone     = 0, ///< No sound. @@ -186,8 +212,11 @@ protected:  	uint32 _features; +	 int32 _curFrame;  	uint32 _frameCount; +	uint32 _startTime; +  	byte _palette[768];  	bool _paletteDirty; @@ -208,6 +237,8 @@ protected:  	bool evaluateSeekFrame(int32 &frame, int whence) const; +	Common::Rational getFrameRate() const; +  	// Surface management  	bool hasSurface();  	void createSurface(); @@ -228,10 +259,9 @@ protected:  	// Sound helper functions  	inline void unsignedToSigned(byte *buffer, int length); - -	// FixedRateVideoDecoder interface - -	Common::Rational getFrameRate() const; +private: +	uint32 _pauseStartTime; +	bool   _isPaused;  };  class PreIMDDecoder : public CoktelDecoder { @@ -244,9 +274,6 @@ public:  	bool seek(int32 frame, int whence = SEEK_SET, bool restart = false); - -	// VideoDecoder interface -  	bool loadStream(Common::SeekableReadStream *stream);  	void close(); @@ -279,9 +306,6 @@ public:  	void setXY(uint16 x, uint16 y); - -	// VideoDecoder interface -  	bool loadStream(Common::SeekableReadStream *stream);  	void close(); @@ -291,11 +315,6 @@ public:  	Graphics::PixelFormat getPixelFormat() const; -protected: -	// VideoDecoder API -	void updateVolume(); -	void updateBalance(); -  private:  	enum Command {  		kCommandNextSound   = 0xFF00, @@ -367,6 +386,8 @@ private:  };  class VMDDecoder : public CoktelDecoder { +friend class AdvancedVMDDecoder; +  public:  	VMDDecoder(Audio::Mixer *mixer, Audio::Mixer::SoundType soundType = Audio::Mixer::kPlainSoundType);  	~VMDDecoder(); @@ -390,9 +411,6 @@ public:  	bool hasVideo() const;  	bool isPaletted() const; - -	// VideoDecoder interface -  	bool loadStream(Common::SeekableReadStream *stream);  	void close(); @@ -403,9 +421,7 @@ public:  	Graphics::PixelFormat getPixelFormat() const;  protected: -	// VideoDecoder API -	void updateVolume(); -	void updateBalance(); +	void setAutoStartSound(bool autoStartSound);  private:  	enum PartType { @@ -478,6 +494,7 @@ private:  	uint32 _soundDataSize;  	uint32 _soundLastFilledFrame;  	AudioFormat _audioFormat; +	bool   _autoStartSound;  	// Video properties  	bool   _hasVideo; @@ -532,6 +549,57 @@ private:  	bool getPartCoords(int16 frame, PartType type, int16 &x, int16 &y, int16 &width, int16 &height);  }; +/** + * A wrapper around the VMD code that implements the VideoDecoder + * API. + */ +class AdvancedVMDDecoder : public VideoDecoder { +public: +	AdvancedVMDDecoder(Audio::Mixer::SoundType soundType = Audio::Mixer::kPlainSoundType); +	~AdvancedVMDDecoder(); + +	bool loadStream(Common::SeekableReadStream *stream); +	void close(); + +private: +	class VMDVideoTrack : public FixedRateVideoTrack { +	public: +		VMDVideoTrack(VMDDecoder *decoder); + +		uint16 getWidth() const; +		uint16 getHeight() const; +		Graphics::PixelFormat getPixelFormat() const; +		int getCurFrame() const; +		int getFrameCount() const; +		const Graphics::Surface *decodeNextFrame(); +		const byte *getPalette() const; +		bool hasDirtyPalette() const; + +	protected: +		Common::Rational getFrameRate() const; + +	private: +		VMDDecoder *_decoder; +	}; + +	class VMDAudioTrack : public AudioTrack { +	public: +		VMDAudioTrack(VMDDecoder *decoder); + +		Audio::Mixer::SoundType getSoundType() const; + +	protected: +		virtual Audio::AudioStream *getAudioStream() const; + +	private: +		VMDDecoder *_decoder; +	}; + +	VMDDecoder    *_decoder; +	VMDVideoTrack *_videoTrack; +	VMDAudioTrack *_audioTrack; +}; +  } // End of namespace Video  #endif // VIDEO_COKTELDECODER_H diff --git a/video/dxa_decoder.cpp b/video/dxa_decoder.cpp index 7d1112a59c..5ac9bd2088 100644 --- a/video/dxa_decoder.cpp +++ b/video/dxa_decoder.cpp @@ -37,41 +37,43 @@  namespace Video {  DXADecoder::DXADecoder() { -	_fileStream = 0; -	_surface = 0; -	_dirtyPalette = false; +} -	_frameBuffer1 = 0; -	_frameBuffer2 = 0; -	_scaledBuffer = 0; +DXADecoder::~DXADecoder() { +	close(); +} -	_inBuffer = 0; -	_inBufferSize = 0; +bool DXADecoder::loadStream(Common::SeekableReadStream *stream) { +	close(); -	_decompBuffer = 0; -	_decompBufferSize = 0; +	uint32 tag = stream->readUint32BE(); -	_width = 0; -	_height = 0; +	if (tag != MKTAG('D','E','X','A')) { +		close(); +		return false; +	} -	_frameSize = 0; -	_frameCount = 0; -	_frameRate = 0; +	DXAVideoTrack *track = new DXAVideoTrack(stream); +	addTrack(track); -	_scaleMode = S_NONE; -} +	readSoundData(stream); -DXADecoder::~DXADecoder() { -	close(); +	track->setFrameStartPos(); +	return true;  } -bool DXADecoder::loadStream(Common::SeekableReadStream *stream) { -	close(); +void DXADecoder::readSoundData(Common::SeekableReadStream *stream) { +	// Skip over the tag by default +	stream->readUint32BE(); +} +DXADecoder::DXAVideoTrack::DXAVideoTrack(Common::SeekableReadStream *stream) {  	_fileStream = stream; - -	uint32 tag = _fileStream->readUint32BE(); -	assert(tag == MKTAG('D','E','X','A')); +	_curFrame = -1; +	_frameStartOffset = 0; +	_decompBuffer = 0; +	_inBuffer = 0; +	memset(_palette, 0, 256 * 3);  	uint8 flags = _fileStream->readByte();  	_frameCount = _fileStream->readUint16BE(); @@ -105,18 +107,14 @@ bool DXADecoder::loadStream(Common::SeekableReadStream *stream) {  	_frameSize = _width * _height;  	_decompBufferSize = _frameSize; -	_frameBuffer1 = (uint8 *)malloc(_frameSize); +	_frameBuffer1 = new byte[_frameSize];  	memset(_frameBuffer1, 0, _frameSize); -	_frameBuffer2 = (uint8 *)malloc(_frameSize); +	_frameBuffer2 = new byte[_frameSize];  	memset(_frameBuffer2, 0, _frameSize); -	if (!_frameBuffer1 || !_frameBuffer2) -		error("DXADecoder: Error allocating frame buffers (size %u)", _frameSize);  	_scaledBuffer = 0;  	if (_scaleMode != S_NONE) { -		_scaledBuffer = (uint8 *)malloc(_frameSize); -		if (!_scaledBuffer) -			error("Error allocating scale buffer (size %u)", _frameSize); +		_scaledBuffer = new byte[_frameSize];  		memset(_scaledBuffer, 0, _frameSize);  	} @@ -148,36 +146,33 @@ bool DXADecoder::loadStream(Common::SeekableReadStream *stream) {  		} while (tag != 0);  	}  #endif - -	// Read the sound header -	_soundTag = _fileStream->readUint32BE(); - -	return true;  } -void DXADecoder::close() { -	if (!_fileStream) -		return; - +DXADecoder::DXAVideoTrack::~DXAVideoTrack() {  	delete _fileStream; -	_fileStream = 0; -  	delete _surface; -	_surface = 0; +	delete[] _frameBuffer1; +	delete[] _frameBuffer2; +	delete[] _scaledBuffer; +	delete[] _inBuffer; +	delete[] _decompBuffer; +} -	free(_frameBuffer1); -	free(_frameBuffer2); -	free(_scaledBuffer); -	free(_inBuffer); -	free(_decompBuffer); +bool DXADecoder::DXAVideoTrack::rewind() { +	_curFrame = -1; +	_fileStream->seek(_frameStartOffset); +	return true; +} -	_inBuffer = 0; -	_decompBuffer = 0; +Graphics::PixelFormat DXADecoder::DXAVideoTrack::getPixelFormat() const { +	return _surface->format; +} -	reset(); +void DXADecoder::DXAVideoTrack::setFrameStartPos() { +	_frameStartOffset = _fileStream->pos();  } -void DXADecoder::decodeZlib(byte *data, int size, int totalSize) { +void DXADecoder::DXAVideoTrack::decodeZlib(byte *data, int size, int totalSize) {  #ifdef USE_ZLIB  	unsigned long dstLen = totalSize;  	Common::uncompress(data, &dstLen, _inBuffer, size); @@ -187,14 +182,13 @@ void DXADecoder::decodeZlib(byte *data, int size, int totalSize) {  #define BLOCKW 4  #define BLOCKH 4 -void DXADecoder::decode12(int size) { +void DXADecoder::DXAVideoTrack::decode12(int size) {  #ifdef USE_ZLIB -	if (_decompBuffer == NULL) { -		_decompBuffer = (byte *)malloc(_decompBufferSize); +	if (!_decompBuffer) { +		_decompBuffer = new byte[_decompBufferSize];  		memset(_decompBuffer, 0, _decompBufferSize); -		if (_decompBuffer == NULL) -			error("Error allocating decomp buffer (size %u)", _decompBufferSize);  	} +  	/* decompress the input data */  	decodeZlib(_decompBuffer, size, _decompBufferSize); @@ -287,15 +281,13 @@ void DXADecoder::decode12(int size) {  #endif  } -void DXADecoder::decode13(int size) { +void DXADecoder::DXAVideoTrack::decode13(int size) {  #ifdef USE_ZLIB  	uint8 *codeBuf, *dataBuf, *motBuf, *maskBuf; -	if (_decompBuffer == NULL) { -		_decompBuffer = (byte *)malloc(_decompBufferSize); +	if (!_decompBuffer) { +		_decompBuffer = new byte[_decompBufferSize];  		memset(_decompBuffer, 0, _decompBufferSize); -		if (_decompBuffer == NULL) -			error("Error allocating decomp buffer (size %u)", _decompBufferSize);  	}  	/* decompress the input data */ @@ -475,7 +467,7 @@ void DXADecoder::decode13(int size) {  #endif  } -const Graphics::Surface *DXADecoder::decodeNextFrame() { +const Graphics::Surface *DXADecoder::DXAVideoTrack::decodeNextFrame() {  	uint32 tag = _fileStream->readUint32BE();  	if (tag == MKTAG('C','M','A','P')) {  		_fileStream->read(_palette, 256 * 3); @@ -486,11 +478,10 @@ const Graphics::Surface *DXADecoder::decodeNextFrame() {  	if (tag == MKTAG('F','R','A','M')) {  		byte type = _fileStream->readByte();  		uint32 size = _fileStream->readUint32BE(); -		if ((_inBuffer == NULL) || (_inBufferSize < size)) { -			free(_inBuffer); -			_inBuffer = (byte *)malloc(size); -			if (_inBuffer == NULL) -				error("Error allocating input buffer (size %u)", size); + +		if (!_inBuffer || _inBufferSize < size) { +			delete[] _inBuffer; +			_inBuffer = new byte[size];  			memset(_inBuffer, 0, size);  			_inBufferSize = size;  		} @@ -551,9 +542,6 @@ const Graphics::Surface *DXADecoder::decodeNextFrame() {  	_curFrame++; -	if (_curFrame == 0) -		_startTime = g_system->getMillis(); -  	return _surface;  } diff --git a/video/dxa_decoder.h b/video/dxa_decoder.h index d13cd3076c..b3f2eca5e2 100644 --- a/video/dxa_decoder.h +++ b/video/dxa_decoder.h @@ -41,62 +41,74 @@ namespace Video {   *  - sword1   *  - sword2   */ -class DXADecoder : public FixedRateVideoDecoder { +class DXADecoder : public VideoDecoder {  public:  	DXADecoder();  	virtual ~DXADecoder();  	bool loadStream(Common::SeekableReadStream *stream); -	void close(); - -	bool isVideoLoaded() const { return _fileStream != 0; } -	uint16 getWidth() const { return _width; } -	uint16 getHeight() const { return _height; } -	uint32 getFrameCount() const { return _frameCount; } -	const Graphics::Surface *decodeNextFrame(); -	Graphics::PixelFormat getPixelFormat() const { return Graphics::PixelFormat::createFormatCLUT8(); } -	const byte *getPalette() { _dirtyPalette = false; return _palette; } -	bool hasDirtyPalette() const { return _dirtyPalette; } +protected:  	/** -	 * Get the sound chunk tag of the loaded DXA file +	 * Read the sound data out of the given DXA stream  	 */ -	uint32 getSoundTag() { return _soundTag; } - -protected: -	Common::Rational getFrameRate() const { return _frameRate; } - -	Common::SeekableReadStream *_fileStream; +	virtual void readSoundData(Common::SeekableReadStream *stream);  private: -	void decodeZlib(byte *data, int size, int totalSize); -	void decode12(int size); -	void decode13(int size); - -	enum ScaleMode { -		S_NONE, -		S_INTERLACED, -		S_DOUBLE +	class DXAVideoTrack : public FixedRateVideoTrack { +	public: +		DXAVideoTrack(Common::SeekableReadStream *stream); +		~DXAVideoTrack(); + +		bool isRewindable() const { return true; } +		bool rewind(); + +		uint16 getWidth() const { return _width; } +		uint16 getHeight() const { return _height; } +		Graphics::PixelFormat getPixelFormat() const; +		int getCurFrame() const { return _curFrame; } +		int getFrameCount() const { return _frameCount; } +		const Graphics::Surface *decodeNextFrame(); +		const byte *getPalette() const { _dirtyPalette = false; return _palette; } +		bool hasDirtyPalette() const { return _dirtyPalette; } + +		void setFrameStartPos(); + +	protected: +		Common::Rational getFrameRate() const { return _frameRate; } + +	private: +		void decodeZlib(byte *data, int size, int totalSize); +		void decode12(int size); +		void decode13(int size); + +		enum ScaleMode { +			S_NONE, +			S_INTERLACED, +			S_DOUBLE +		}; + +		Common::SeekableReadStream *_fileStream; +		Graphics::Surface *_surface; + +		byte *_frameBuffer1; +		byte *_frameBuffer2; +		byte *_scaledBuffer; +		byte *_inBuffer; +		uint32 _inBufferSize; +		byte *_decompBuffer; +		uint32 _decompBufferSize; +		uint16 _curHeight; +		uint32 _frameSize; +		ScaleMode _scaleMode; +		uint16 _width, _height; +		uint32 _frameRate; +		uint32 _frameCount; +		byte _palette[256 * 3]; +		mutable bool _dirtyPalette; +		int _curFrame; +		uint32 _frameStartOffset;  	}; - -	Graphics::Surface *_surface; -	byte _palette[256 * 3]; -	bool _dirtyPalette; - -	byte *_frameBuffer1; -	byte *_frameBuffer2; -	byte *_scaledBuffer; -	byte *_inBuffer; -	uint32 _inBufferSize; -	byte *_decompBuffer; -	uint32 _decompBufferSize; -	uint16 _curHeight; -	uint32 _frameSize; -	ScaleMode _scaleMode; -	uint32 _soundTag; -	uint16 _width, _height; -	uint32 _frameRate; -	uint32 _frameCount;  };  } // End of namespace Video diff --git a/video/flic_decoder.cpp b/video/flic_decoder.cpp index bdcdedc142..1a0627615b 100644 --- a/video/flic_decoder.cpp +++ b/video/flic_decoder.cpp @@ -26,13 +26,11 @@  #include "common/stream.h"  #include "common/system.h"  #include "common/textconsole.h" +#include "graphics/surface.h"  namespace Video {  FlicDecoder::FlicDecoder() { -	_paletteChanged = false; -	_fileStream = 0; -	_surface = 0;  }  FlicDecoder::~FlicDecoder() { @@ -42,35 +40,59 @@ FlicDecoder::~FlicDecoder() {  bool FlicDecoder::loadStream(Common::SeekableReadStream *stream) {  	close(); -	_fileStream = stream; - -	/* uint32 frameSize = */ _fileStream->readUint32LE(); -	uint16 frameType = _fileStream->readUint16LE(); +	/* uint32 frameSize = */ stream->readUint32LE(); +	uint16 frameType = stream->readUint16LE();  	// Check FLC magic number  	if (frameType != 0xAF12) { -		warning("FlicDecoder::FlicDecoder(): attempted to load non-FLC data (type = 0x%04X)", frameType); -		delete _fileStream; -		_fileStream = 0; +		warning("FlicDecoder::loadStream(): attempted to load non-FLC data (type = 0x%04X)", frameType);  		return false;  	} - -	_frameCount = _fileStream->readUint16LE(); -	uint16 width = _fileStream->readUint16LE(); -	uint16 height = _fileStream->readUint16LE(); -	uint16 colorDepth = _fileStream->readUint16LE(); +	uint16 frameCount = stream->readUint16LE(); +	uint16 width = stream->readUint16LE(); +	uint16 height = stream->readUint16LE(); +	uint16 colorDepth = stream->readUint16LE();  	if (colorDepth != 8) { -		warning("FlicDecoder::FlicDecoder(): attempted to load an FLC with a palette of color depth %d. Only 8-bit color palettes are supported", frameType); -		delete _fileStream; -		_fileStream = 0; +		warning("FlicDecoder::loadStream(): attempted to load an FLC with a palette of color depth %d. Only 8-bit color palettes are supported", colorDepth);  		return false;  	} +	addTrack(new FlicVideoTrack(stream, frameCount, width, height)); +	return true; +} + +const Common::List<Common::Rect> *FlicDecoder::getDirtyRects() const { +	const Track *track = getTrack(0); + +	if (track) +		return ((const FlicVideoTrack *)track)->getDirtyRects(); + +	return 0; +} + +void FlicDecoder::clearDirtyRects() { +	Track *track = getTrack(0); + +	if (track) +		((FlicVideoTrack *)track)->clearDirtyRects(); +} + +void FlicDecoder::copyDirtyRectsToBuffer(uint8 *dst, uint pitch) { +	Track *track = getTrack(0); + +	if (track) +		((FlicVideoTrack *)track)->copyDirtyRectsToBuffer(dst, pitch); +} + +FlicDecoder::FlicVideoTrack::FlicVideoTrack(Common::SeekableReadStream *stream, uint16 frameCount, uint16 width, uint16 height) { +	_fileStream = stream; +	_frameCount = frameCount; +  	_fileStream->readUint16LE();	// flags  	// Note: The normal delay is a 32-bit integer (dword), whereas the overridden delay is a 16-bit integer (word)  	// the frame delay is the FLIC "speed", in milliseconds. -	_frameRate = Common::Rational(1000, _fileStream->readUint32LE()); +	_frameDelay = _startFrameDelay = _fileStream->readUint32LE();  	_fileStream->seek(80);  	_offsetFrame1 = _fileStream->readUint32LE(); @@ -78,112 +100,53 @@ bool FlicDecoder::loadStream(Common::SeekableReadStream *stream) {  	_surface = new Graphics::Surface();  	_surface->create(width, height, Graphics::PixelFormat::createFormatCLUT8()); -	_palette = (byte *)malloc(3 * 256); +	_palette = new byte[3 * 256];  	memset(_palette, 0, 3 * 256); -	_paletteChanged = false; +	_dirtyPalette = false; + +	_curFrame = -1; +	_nextFrameStartTime = 0; +	_atRingFrame = false;  	// Seek to the first frame  	_fileStream->seek(_offsetFrame1); -	return true;  } -void FlicDecoder::close() { -	if (!_fileStream) -		return; - +FlicDecoder::FlicVideoTrack::~FlicVideoTrack() {  	delete _fileStream; -	_fileStream = 0; +	delete[] _palette;  	_surface->free();  	delete _surface; -	_surface = 0; - -	free(_palette); -	_dirtyRects.clear(); - -	reset();  } -void FlicDecoder::decodeByteRun(uint8 *data) { -	byte *ptr = (byte *)_surface->pixels; -	while ((int32)(ptr - (byte *)_surface->pixels) < (getWidth() * getHeight())) { -		int chunks = *data++; -		while (chunks--) { -			int count = (int8)*data++; -			if (count > 0) { -				memset(ptr, *data++, count); -			} else { -				count = -count; -				memcpy(ptr, data, count); -				data += count; -			} -			ptr += count; -		} -	} - -	// Redraw -	_dirtyRects.clear(); -	_dirtyRects.push_back(Common::Rect(0, 0, getWidth(), getHeight())); +bool FlicDecoder::FlicVideoTrack::endOfTrack() const { +	return getCurFrame() >= getFrameCount() - 1;  } -#define OP_PACKETCOUNT   0 -#define OP_UNDEFINED     1 -#define OP_LASTPIXEL     2 -#define OP_LINESKIPCOUNT 3 - -void FlicDecoder::decodeDeltaFLC(uint8 *data) { -	uint16 linesInChunk = READ_LE_UINT16(data); data += 2; -	uint16 currentLine = 0; -	uint16 packetCount = 0; - -	while (linesInChunk--) { -		uint16 opcode; +bool FlicDecoder::FlicVideoTrack::rewind() { +	_curFrame = -1; +	_nextFrameStartTime = 0; -		// First process all the opcodes. -		do { -			opcode = READ_LE_UINT16(data); data += 2; +	if (endOfTrack() && _fileStream->pos() < _fileStream->size()) +		_atRingFrame = true; +	else +		_fileStream->seek(_offsetFrame1); -			switch ((opcode >> 14) & 3) { -			case OP_PACKETCOUNT: -				packetCount = opcode; -				break; -			case OP_UNDEFINED: -				break; -			case OP_LASTPIXEL: -				*((byte *)_surface->pixels + currentLine * getWidth() + getWidth() - 1) = (opcode & 0xFF); -				_dirtyRects.push_back(Common::Rect(getWidth() - 1, currentLine, getWidth(), currentLine + 1)); -				break; -			case OP_LINESKIPCOUNT: -				currentLine += -(int16)opcode; -				break; -			} -		} while (((opcode >> 14) & 3) != OP_PACKETCOUNT); +	_frameDelay = _startFrameDelay; +	return true; +} -		uint16 column = 0; +uint16 FlicDecoder::FlicVideoTrack::getWidth() const { +	return _surface->w; +} -		// Now interpret the RLE data -		while (packetCount--) { -			column += *data++; -			int rleCount = (int8)*data++; -			if (rleCount > 0) { -				memcpy((byte *)_surface->pixels + (currentLine * getWidth()) + column, data, rleCount * 2); -				data += rleCount * 2; -				_dirtyRects.push_back(Common::Rect(column, currentLine, column + rleCount * 2, currentLine + 1)); -			} else if (rleCount < 0) { -				rleCount = -rleCount; -				uint16 dataWord = READ_UINT16(data); data += 2; -				for (int i = 0; i < rleCount; ++i) { -					WRITE_UINT16((byte *)_surface->pixels + currentLine * getWidth() + column + i * 2, dataWord); -				} -				_dirtyRects.push_back(Common::Rect(column, currentLine, column + rleCount * 2, currentLine + 1)); -			} else { // End of cutscene ? -				return; -			} -			column += rleCount * 2; -		} +uint16 FlicDecoder::FlicVideoTrack::getHeight() const { +	return _surface->h; +} -		currentLine++; -	} +Graphics::PixelFormat FlicDecoder::FlicVideoTrack::getPixelFormat() const { +	return _surface->format;  }  #define FLI_SETPAL 4 @@ -192,7 +155,7 @@ void FlicDecoder::decodeDeltaFLC(uint8 *data) {  #define PSTAMP     18  #define FRAME_TYPE 0xF1FA -const Graphics::Surface *FlicDecoder::decodeNextFrame() { +const Graphics::Surface *FlicDecoder::FlicVideoTrack::decodeNextFrame() {  	// Read chunk  	uint32 frameSize = _fileStream->readUint32LE();  	uint16 frameType = _fileStream->readUint16LE(); @@ -201,15 +164,12 @@ const Graphics::Surface *FlicDecoder::decodeNextFrame() {  	switch (frameType) {  	case FRAME_TYPE:  		{ -			// FIXME: FLIC should be switched over to a variable frame rate VideoDecoder to handle -			// this properly. -  			chunkCount = _fileStream->readUint16LE();  			// Note: The overridden delay is a 16-bit integer (word), whereas the normal delay is a 32-bit integer (dword)  			// the frame delay is the FLIC "speed", in milliseconds.  			uint16 newFrameDelay = _fileStream->readUint16LE();	// "speed", in milliseconds  			if (newFrameDelay > 0) -				_frameRate = Common::Rational(1000, newFrameDelay); +				_frameDelay = newFrameDelay;  			_fileStream->readUint16LE();	// reserved, always 0  			uint16 newWidth = _fileStream->readUint16LE(); @@ -240,10 +200,11 @@ const Graphics::Surface *FlicDecoder::decodeNextFrame() {  			frameType = _fileStream->readUint16LE();  			uint8 *data = new uint8[frameSize - 6];  			_fileStream->read(data, frameSize - 6); +  			switch (frameType) {  			case FLI_SETPAL:  				unpackPalette(data); -				_paletteChanged = true; +				_dirtyPalette = true;  				break;  			case FLI_SS2:  				decodeDeltaFLC(data); @@ -264,26 +225,111 @@ const Graphics::Surface *FlicDecoder::decodeNextFrame() {  	}  	_curFrame++; +	_nextFrameStartTime += _frameDelay; -	// If we just processed the ring frame, set the next frame -	if (_curFrame == (int32)_frameCount) { -		_curFrame = 0; +	if (_atRingFrame) { +		// If we decoded the ring frame, seek to the second frame +		_atRingFrame = false;  		_fileStream->seek(_offsetFrame2);  	} -	if (_curFrame == 0) -		_startTime = g_system->getMillis(); -  	return _surface;  } -void FlicDecoder::reset() { -	FixedRateVideoDecoder::reset(); -	if (_fileStream) -		_fileStream->seek(_offsetFrame1); +void FlicDecoder::FlicVideoTrack::copyDirtyRectsToBuffer(uint8 *dst, uint pitch) { +	for (Common::List<Common::Rect>::const_iterator it = _dirtyRects.begin(); it != _dirtyRects.end(); ++it) { +		for (int y = (*it).top; y < (*it).bottom; ++y) { +			const int x = (*it).left; +			memcpy(dst + y * pitch + x, (byte *)_surface->pixels + y * getWidth() + x, (*it).right - x); +		} +	} + +	clearDirtyRects();  } -void FlicDecoder::unpackPalette(uint8 *data) { +void FlicDecoder::FlicVideoTrack::decodeByteRun(uint8 *data) { +	byte *ptr = (byte *)_surface->pixels; +	while ((int32)(ptr - (byte *)_surface->pixels) < (getWidth() * getHeight())) { +		int chunks = *data++; +		while (chunks--) { +			int count = (int8)*data++; +			if (count > 0) { +				memset(ptr, *data++, count); +			} else { +				count = -count; +				memcpy(ptr, data, count); +				data += count; +			} +			ptr += count; +		} +	} + +	// Redraw +	_dirtyRects.clear(); +	_dirtyRects.push_back(Common::Rect(0, 0, getWidth(), getHeight())); +} + +#define OP_PACKETCOUNT   0 +#define OP_UNDEFINED     1 +#define OP_LASTPIXEL     2 +#define OP_LINESKIPCOUNT 3 + +void FlicDecoder::FlicVideoTrack::decodeDeltaFLC(uint8 *data) { +	uint16 linesInChunk = READ_LE_UINT16(data); data += 2; +	uint16 currentLine = 0; +	uint16 packetCount = 0; + +	while (linesInChunk--) { +		uint16 opcode; + +		// First process all the opcodes. +		do { +			opcode = READ_LE_UINT16(data); data += 2; + +			switch ((opcode >> 14) & 3) { +			case OP_PACKETCOUNT: +				packetCount = opcode; +				break; +			case OP_UNDEFINED: +				break; +			case OP_LASTPIXEL: +				*((byte *)_surface->pixels + currentLine * getWidth() + getWidth() - 1) = (opcode & 0xFF); +				_dirtyRects.push_back(Common::Rect(getWidth() - 1, currentLine, getWidth(), currentLine + 1)); +				break; +			case OP_LINESKIPCOUNT: +				currentLine += -(int16)opcode; +				break; +			} +		} while (((opcode >> 14) & 3) != OP_PACKETCOUNT); + +		uint16 column = 0; + +		// Now interpret the RLE data +		while (packetCount--) { +			column += *data++; +			int rleCount = (int8)*data++; +			if (rleCount > 0) { +				memcpy((byte *)_surface->pixels + (currentLine * getWidth()) + column, data, rleCount * 2); +				data += rleCount * 2; +				_dirtyRects.push_back(Common::Rect(column, currentLine, column + rleCount * 2, currentLine + 1)); +			} else if (rleCount < 0) { +				rleCount = -rleCount; +				uint16 dataWord = READ_UINT16(data); data += 2; +				for (int i = 0; i < rleCount; ++i) { +					WRITE_UINT16((byte *)_surface->pixels + currentLine * getWidth() + column + i * 2, dataWord); +				} +				_dirtyRects.push_back(Common::Rect(column, currentLine, column + rleCount * 2, currentLine + 1)); +			} else { // End of cutscene ? +				return; +			} +			column += rleCount * 2; +		} + +		currentLine++; +	} +} + +void FlicDecoder::FlicVideoTrack::unpackPalette(uint8 *data) {  	uint16 numPackets = READ_LE_UINT16(data); data += 2;  	if (0 == READ_LE_UINT16(data)) { //special case @@ -308,14 +354,4 @@ void FlicDecoder::unpackPalette(uint8 *data) {  	}  } -void FlicDecoder::copyDirtyRectsToBuffer(uint8 *dst, uint pitch) { -	for (Common::List<Common::Rect>::const_iterator it = _dirtyRects.begin(); it != _dirtyRects.end(); ++it) { -		for (int y = (*it).top; y < (*it).bottom; ++y) { -			const int x = (*it).left; -			memcpy(dst + y * pitch + x, (byte *)_surface->pixels + y * getWidth() + x, (*it).right - x); -		} -	} -	_dirtyRects.clear(); -} -  } // End of namespace Video diff --git a/video/flic_decoder.h b/video/flic_decoder.h index 9badc3da2e..9037af05d6 100644 --- a/video/flic_decoder.h +++ b/video/flic_decoder.h @@ -25,15 +25,17 @@  #include "video/video_decoder.h"  #include "common/list.h" -#include "common/rational.h"  #include "common/rect.h" -#include "graphics/pixelformat.h" -#include "graphics/surface.h"  namespace Common {  class SeekableReadStream;  } +namespace Graphics { +struct PixelFormat; +struct Surface; +} +  namespace Video {  /** @@ -42,58 +44,63 @@ namespace Video {   * Video decoder used in engines:   *  - tucker   */ -class FlicDecoder : public FixedRateVideoDecoder { +class FlicDecoder : public VideoDecoder {  public:  	FlicDecoder();  	virtual ~FlicDecoder(); -	/** -	 * Load a video file -	 * @param stream  the stream to load -	 */  	bool loadStream(Common::SeekableReadStream *stream); -	void close(); - -	/** -	 * Decode the next frame and return the frame's surface -	 * @note the return surface should *not* be freed -	 * @note this may return 0, in which case the last frame should be kept on screen -	 */ -	const Graphics::Surface *decodeNextFrame(); - -	bool isVideoLoaded() const { return _fileStream != 0; } -	uint16 getWidth() const { return _surface->w; } -	uint16 getHeight() const { return _surface->h; } -	uint32 getFrameCount() const { return _frameCount; } -	Graphics::PixelFormat getPixelFormat() const { return Graphics::PixelFormat::createFormatCLUT8(); } - -	const Common::List<Common::Rect> *getDirtyRects() const { return &_dirtyRects; } -	void clearDirtyRects() { _dirtyRects.clear(); } -	void copyDirtyRectsToBuffer(uint8 *dst, uint pitch); - -	const byte *getPalette() { _paletteChanged = false; return _palette; } -	bool hasDirtyPalette() const { return _paletteChanged; } -	void reset(); -protected: -	Common::Rational getFrameRate() const { return _frameRate; } +	const Common::List<Common::Rect> *getDirtyRects() const; +	void clearDirtyRects(); +	void copyDirtyRectsToBuffer(uint8 *dst, uint pitch);  private: -	uint16 _offsetFrame1; -	uint16 _offsetFrame2; -	byte *_palette; -	bool _paletteChanged; - -	void decodeByteRun(uint8 *data); -	void decodeDeltaFLC(uint8 *data); -	void unpackPalette(uint8 *mem); - -	Common::SeekableReadStream *_fileStream; -	Graphics::Surface *_surface; -	uint32 _frameCount; -	Common::Rational _frameRate; - -	Common::List<Common::Rect> _dirtyRects; +	class FlicVideoTrack : public VideoTrack { +	public: +		FlicVideoTrack(Common::SeekableReadStream *stream, uint16 frameCount, uint16 width, uint16 height); +		~FlicVideoTrack(); + +		bool endOfTrack() const; +		bool isRewindable() const { return true; } +		bool rewind(); + +		uint16 getWidth() const; +		uint16 getHeight() const; +		Graphics::PixelFormat getPixelFormat() const; +		int getCurFrame() const { return _curFrame; } +		int getFrameCount() const { return _frameCount; } +		uint32 getNextFrameStartTime() const { return _nextFrameStartTime; } +		const Graphics::Surface *decodeNextFrame(); +		const byte *getPalette() const { _dirtyPalette = false; return _palette; } +		bool hasDirtyPalette() const { return _dirtyPalette; } + +		const Common::List<Common::Rect> *getDirtyRects() const { return &_dirtyRects; } +		void clearDirtyRects() { _dirtyRects.clear(); } +		void copyDirtyRectsToBuffer(uint8 *dst, uint pitch); + +	private: +		Common::SeekableReadStream *_fileStream; +		Graphics::Surface *_surface; + +		int _curFrame; +		bool _atRingFrame; + +		uint16 _offsetFrame1; +		uint16 _offsetFrame2; +		byte *_palette; +		mutable bool _dirtyPalette; + +		uint32 _frameCount; +		uint32 _frameDelay, _startFrameDelay; +		uint32 _nextFrameStartTime; + +		Common::List<Common::Rect> _dirtyRects; + +		void decodeByteRun(uint8 *data); +		void decodeDeltaFLC(uint8 *data); +		void unpackPalette(uint8 *mem); +	};  };  } // End of namespace Video diff --git a/video/module.mk b/video/module.mk index cebd403ca2..287e14ce18 100644 --- a/video/module.mk +++ b/video/module.mk @@ -26,5 +26,10 @@ MODULE_OBJS += \  	bink_decoder.o  endif +ifdef USE_THEORADEC +MODULE_OBJS += \ +	theora_decoder.o +endif +  # Include common rules  include $(srcdir)/rules.mk diff --git a/video/psx_decoder.cpp b/video/psx_decoder.cpp index df91a2badd..fa7f1e8cfe 100644 --- a/video/psx_decoder.cpp +++ b/video/psx_decoder.cpp @@ -149,22 +149,12 @@ static const uint32 s_huffmanACSymbols[AC_CODE_COUNT] = {  	END_OF_BLOCK  }; -PSXStreamDecoder::PSXStreamDecoder(CDSpeed speed, uint32 frameCount) : _nextFrameStartTime(0, speed), _frameCount(frameCount) { +PSXStreamDecoder::PSXStreamDecoder(CDSpeed speed, uint32 frameCount) : _speed(speed), _frameCount(frameCount) {  	_stream = 0; -	_audStream = 0; -	_surface = new Graphics::Surface(); -	_yBuffer = _cbBuffer = _crBuffer = 0; -	_acHuffman = new Common::Huffman(0, AC_CODE_COUNT, s_huffmanACCodes, s_huffmanACLengths, s_huffmanACSymbols); -	_dcHuffmanChroma = new Common::Huffman(0, DC_CODE_COUNT, s_huffmanDCChromaCodes, s_huffmanDCChromaLengths, s_huffmanDCSymbols); -	_dcHuffmanLuma = new Common::Huffman(0, DC_CODE_COUNT, s_huffmanDCLumaCodes, s_huffmanDCLumaLengths, s_huffmanDCSymbols);  }  PSXStreamDecoder::~PSXStreamDecoder() {  	close(); -	delete _surface; -	delete _acHuffman; -	delete _dcHuffmanLuma; -	delete _dcHuffmanChroma;  }  #define RAW_CD_SECTOR_SIZE 2352 @@ -178,95 +168,30 @@ bool PSXStreamDecoder::loadStream(Common::SeekableReadStream *stream) {  	close();  	_stream = stream; - -	Common::SeekableReadStream *sector = readSector(); - -	if (!sector) { -		close(); -		return false; -	} - -	// Rip out video info from the first frame -	sector->seek(18); -	byte sectorType = sector->readByte() & CDXA_TYPE_MASK; - -	if (sectorType != CDXA_TYPE_VIDEO && sectorType != CDXA_TYPE_DATA) { -		close(); -		return false; -	} - -	sector->seek(40); - -	uint16 width = sector->readUint16LE(); -	uint16 height = sector->readUint16LE(); -	_surface->create(width, height, g_system->getScreenFormat()); - -	_macroBlocksW = (width + 15) / 16; -	_macroBlocksH = (height + 15) / 16; -	_yBuffer = new byte[_macroBlocksW * _macroBlocksH * 16 * 16]; -	_cbBuffer = new byte[_macroBlocksW * _macroBlocksH * 8 * 8]; -	_crBuffer = new byte[_macroBlocksW * _macroBlocksH * 8 * 8]; - -	delete sector; -	_stream->seek(0); +	readNextPacket();  	return true;  }  void PSXStreamDecoder::close() { -	if (!_stream) -		return; +	VideoDecoder::close(); +	_audioTrack = 0; +	_videoTrack = 0; +	_frameCount = 0;  	delete _stream;  	_stream = 0; - -	// Deinitialize sound -	g_system->getMixer()->stopHandle(_audHandle); -	_audStream = 0; - -	_surface->free(); - -	memset(&_adpcmStatus, 0, sizeof(_adpcmStatus)); - -	_macroBlocksW = _macroBlocksH = 0; -	delete[] _yBuffer; _yBuffer = 0; -	delete[] _cbBuffer; _cbBuffer = 0; -	delete[] _crBuffer; _crBuffer = 0; - -	reset(); -} - -uint32 PSXStreamDecoder::getTime() const { -	// TODO: Currently, the audio is always after the video so using this -	// can often lead to gaps in the audio... -	//if (_audStream) -	//	return _mixer->getSoundElapsedTime(_audHandle); - -	return VideoDecoder::getTime(); -} - -uint32 PSXStreamDecoder::getTimeToNextFrame() const { -	if (!isVideoLoaded() || endOfVideo()) -		return 0; - -	uint32 nextTimeMillis = _nextFrameStartTime.msecs(); -	uint32 elapsedTime = getTime(); - -	if (elapsedTime > nextTimeMillis) -		return 0; - -	return nextTimeMillis - elapsedTime;  }  #define VIDEO_DATA_CHUNK_SIZE   2016  #define VIDEO_DATA_HEADER_SIZE  56 -const Graphics::Surface *PSXStreamDecoder::decodeNextFrame() { +void PSXStreamDecoder::readNextPacket() {  	Common::SeekableReadStream *sector = 0;  	byte *partialFrame = 0;  	int sectorsRead = 0; -	while (!endOfVideo()) { +	while (_stream->pos() < _stream->size()) {  		sector = readSector();  		sectorsRead++; @@ -284,6 +209,11 @@ const Graphics::Surface *PSXStreamDecoder::decodeNextFrame() {  		case CDXA_TYPE_DATA:  		case CDXA_TYPE_VIDEO:  			if (track == 1) { +				if (!_videoTrack) { +					_videoTrack = new PSXVideoTrack(sector, _speed, _frameCount); +					addTrack(_videoTrack); +				} +  				sector->seek(28);  				uint16 curSector = sector->readUint16LE();  				uint16 sectorCount = sector->readUint16LE(); @@ -303,35 +233,27 @@ const Graphics::Surface *PSXStreamDecoder::decodeNextFrame() {  					// Done assembling the frame  					Common::SeekableReadStream *frame = new Common::MemoryReadStream(partialFrame, frameSize, DisposeAfterUse::YES); -					decodeFrame(frame); +					_videoTrack->decodeFrame(frame, sectorsRead);  					delete frame;  					delete sector; - -					_curFrame++; -					if (_curFrame == 0) -						_startTime = g_system->getMillis(); - -					// Increase the time by the amount of sectors we read -					// One may notice that this is still not the most precise -					// method since a frame takes up the time its sectors took -					// up instead of the amount of time it takes the next frame -					// to be read from the sectors. The actual frame rate should -					// be constant instead of variable, so the slight difference -					// in a frame's showing time is negligible (1/150 of a second). -					_nextFrameStartTime = _nextFrameStartTime.addFrames(sectorsRead); - -					return _surface; +					return;  				}  			} else  				error("Unhandled multi-track video");  			break;  		case CDXA_TYPE_AUDIO:  			// We only handle one audio channel so far -			if (track == 1) -				queueAudioFromSector(sector); -			else +			if (track == 1) { +				if (!_audioTrack) { +					_audioTrack = new PSXAudioTrack(sector); +					addTrack(_audioTrack); +				} + +				_audioTrack->queueAudioFromSector(sector); +			} else {  				warning("Unhandled multi-track audio"); +			}  			break;  		default:  			// This shows up way too often, but the other sectors @@ -343,7 +265,19 @@ const Graphics::Surface *PSXStreamDecoder::decodeNextFrame() {  		delete sector;  	} -	return 0; +	if (_stream->pos() >= _stream->size()) { +		if (_videoTrack) +			_videoTrack->setEndOfTrack(); + +		if (_audioTrack) +			_audioTrack->setEndOfTrack(); +	} +} + +bool PSXStreamDecoder::useAudioSync() const { +	// Audio sync is disabled since most audio data comes after video +	// data. +	return false;  }  static const byte s_syncHeader[12] = { 0x00, 0xff ,0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; @@ -373,20 +307,29 @@ static const int s_xaTable[5][2] = {     { 122, -60 }  }; -void PSXStreamDecoder::queueAudioFromSector(Common::SeekableReadStream *sector) { +PSXStreamDecoder::PSXAudioTrack::PSXAudioTrack(Common::SeekableReadStream *sector) {  	assert(sector); +	_endOfTrack = false; -	if (!_audStream) { -		// Initialize audio stream -		sector->seek(19); -		byte format = sector->readByte(); +	sector->seek(19); +	byte format = sector->readByte(); +	bool stereo = (format & (1 << 0)) != 0; +	uint rate = (format & (1 << 2)) ? 18900 : 37800; +	_audStream = Audio::makeQueuingAudioStream(rate, stereo); -		bool stereo = (format & (1 << 0)) != 0; -		uint rate = (format & (1 << 2)) ? 18900 : 37800; +	memset(&_adpcmStatus, 0, sizeof(_adpcmStatus)); +} -		_audStream = Audio::makeQueuingAudioStream(rate, stereo); -		g_system->getMixer()->playStream(Audio::Mixer::kPlainSoundType, &_audHandle, _audStream, -1, getVolume(), getBalance()); -	} +PSXStreamDecoder::PSXAudioTrack::~PSXAudioTrack() { +	delete _audStream; +} + +bool PSXStreamDecoder::PSXAudioTrack::endOfTrack() const { +	return AudioTrack::endOfTrack() && _endOfTrack; +} + +void PSXStreamDecoder::PSXAudioTrack::queueAudioFromSector(Common::SeekableReadStream *sector) { +	assert(sector);  	sector->seek(24); @@ -472,7 +415,54 @@ void PSXStreamDecoder::queueAudioFromSector(Common::SeekableReadStream *sector)  	delete[] buf;  } -void PSXStreamDecoder::decodeFrame(Common::SeekableReadStream *frame) { +Audio::AudioStream *PSXStreamDecoder::PSXAudioTrack::getAudioStream() const { +	return _audStream; +} + + +PSXStreamDecoder::PSXVideoTrack::PSXVideoTrack(Common::SeekableReadStream *firstSector, CDSpeed speed, int frameCount) : _nextFrameStartTime(0, speed), _frameCount(frameCount) { +	assert(firstSector); + +	firstSector->seek(40); +	uint16 width = firstSector->readUint16LE(); +	uint16 height = firstSector->readUint16LE(); +	_surface = new Graphics::Surface(); +	_surface->create(width, height, g_system->getScreenFormat()); + +	_macroBlocksW = (width + 15) / 16; +	_macroBlocksH = (height + 15) / 16; +	_yBuffer = new byte[_macroBlocksW * _macroBlocksH * 16 * 16]; +	_cbBuffer = new byte[_macroBlocksW * _macroBlocksH * 8 * 8]; +	_crBuffer = new byte[_macroBlocksW * _macroBlocksH * 8 * 8]; + +	_endOfTrack = false; +	_curFrame = -1; +	_acHuffman = new Common::Huffman(0, AC_CODE_COUNT, s_huffmanACCodes, s_huffmanACLengths, s_huffmanACSymbols); +	_dcHuffmanChroma = new Common::Huffman(0, DC_CODE_COUNT, s_huffmanDCChromaCodes, s_huffmanDCChromaLengths, s_huffmanDCSymbols); +	_dcHuffmanLuma = new Common::Huffman(0, DC_CODE_COUNT, s_huffmanDCLumaCodes, s_huffmanDCLumaLengths, s_huffmanDCSymbols); +} + +PSXStreamDecoder::PSXVideoTrack::~PSXVideoTrack() { +	_surface->free(); +	delete _surface; + +	delete[] _yBuffer; +	delete[] _cbBuffer; +	delete[] _crBuffer; +	delete _acHuffman; +	delete _dcHuffmanChroma; +	delete _dcHuffmanLuma; +} + +uint32 PSXStreamDecoder::PSXVideoTrack::getNextFrameStartTime() const { +	return _nextFrameStartTime.msecs(); +} + +const Graphics::Surface *PSXStreamDecoder::PSXVideoTrack::decodeNextFrame() { +	return _surface; +} + +void PSXStreamDecoder::PSXVideoTrack::decodeFrame(Common::SeekableReadStream *frame, uint sectorCount) {  	// A frame is essentially an MPEG-1 intra frame  	Common::BitStream16LEMSB bits(frame); @@ -494,9 +484,20 @@ void PSXStreamDecoder::decodeFrame(Common::SeekableReadStream *frame) {  	// Output data onto the frame  	Graphics::convertYUV420ToRGB(_surface, _yBuffer, _cbBuffer, _crBuffer, _surface->w, _surface->h, _macroBlocksW * 16, _macroBlocksW * 8); + +	_curFrame++; + +	// Increase the time by the amount of sectors we read +	// One may notice that this is still not the most precise +	// method since a frame takes up the time its sectors took +	// up instead of the amount of time it takes the next frame +	// to be read from the sectors. The actual frame rate should +	// be constant instead of variable, so the slight difference +	// in a frame's showing time is negligible (1/150 of a second). +	_nextFrameStartTime = _nextFrameStartTime.addFrames(sectorCount);  } -void PSXStreamDecoder::decodeMacroBlock(Common::BitStream *bits, int mbX, int mbY, uint16 scale, uint16 version) { +void PSXStreamDecoder::PSXVideoTrack::decodeMacroBlock(Common::BitStream *bits, int mbX, int mbY, uint16 scale, uint16 version) {  	int pitchY = _macroBlocksW * 16;  	int pitchC = _macroBlocksW * 8; @@ -533,7 +534,7 @@ static const byte s_quantizationTable[8 * 8] = {  	27, 29, 35, 38, 46, 56, 69, 83  }; -void PSXStreamDecoder::dequantizeBlock(int *coefficients, float *block, uint16 scale) { +void PSXStreamDecoder::PSXVideoTrack::dequantizeBlock(int *coefficients, float *block, uint16 scale) {  	// Dequantize the data, un-zig-zagging as we go along  	for (int i = 0; i < 8 * 8; i++) {  		if (i == 0) // Special case for the DC coefficient @@ -543,7 +544,7 @@ void PSXStreamDecoder::dequantizeBlock(int *coefficients, float *block, uint16 s  	}  } -int PSXStreamDecoder::readDC(Common::BitStream *bits, uint16 version, PlaneType plane) { +int PSXStreamDecoder::PSXVideoTrack::readDC(Common::BitStream *bits, uint16 version, PlaneType plane) {  	// Version 2 just has its coefficient as 10-bits  	if (version == 2)  		return readSignedCoefficient(bits); @@ -573,7 +574,7 @@ int PSXStreamDecoder::readDC(Common::BitStream *bits, uint16 version, PlaneType  	if (count > 63) \  		error("PSXStreamDecoder::readAC(): Too many coefficients") -void PSXStreamDecoder::readAC(Common::BitStream *bits, int *block) { +void PSXStreamDecoder::PSXVideoTrack::readAC(Common::BitStream *bits, int *block) {  	// Clear the block first  	for (int i = 0; i < 63; i++)  		block[i] = 0; @@ -608,7 +609,7 @@ void PSXStreamDecoder::readAC(Common::BitStream *bits, int *block) {  	}  } -int PSXStreamDecoder::readSignedCoefficient(Common::BitStream *bits) { +int PSXStreamDecoder::PSXVideoTrack::readSignedCoefficient(Common::BitStream *bits) {  	uint val = bits->getBits(10);  	// extend the sign @@ -630,7 +631,7 @@ static const double s_idct8x8[8][8] = {  	{ 0.353553390593274, -0.490392640201615,  0.461939766255643, -0.415734806151273,  0.353553390593273, -0.277785116509801,  0.191341716182545, -0.097545161008064 }  }; -void PSXStreamDecoder::idct(float *dequantData, float *result) { +void PSXStreamDecoder::PSXVideoTrack::idct(float *dequantData, float *result) {  	// IDCT code based on JPEG's IDCT code  	// TODO: Switch to the integer-based one mentioned in the docs  	// This is by far the costliest operation here @@ -669,7 +670,7 @@ void PSXStreamDecoder::idct(float *dequantData, float *result) {  	}  } -void PSXStreamDecoder::decodeBlock(Common::BitStream *bits, byte *block, int pitch, uint16 scale, uint16 version, PlaneType plane) { +void PSXStreamDecoder::PSXVideoTrack::decodeBlock(Common::BitStream *bits, byte *block, int pitch, uint16 scale, uint16 version, PlaneType plane) {  	// Version 2 just has signed 10 bits for DC  	// Version 3 has them huffman coded  	int coefficients[8 * 8]; @@ -686,22 +687,13 @@ void PSXStreamDecoder::decodeBlock(Common::BitStream *bits, byte *block, int pit  	// Now output the data  	for (int y = 0; y < 8; y++) { -		byte *start = block + pitch * y; +		byte *dst = block + pitch * y;  		// Convert the result to be in the range [0, 255]  		for (int x = 0; x < 8; x++) -			*start++ = (int)CLIP<float>(idctData[y * 8 + x], -128.0f, 127.0f) + 128; +			*dst++ = (int)CLIP<float>(idctData[y * 8 + x], -128.0f, 127.0f) + 128;  	}  } -void PSXStreamDecoder::updateVolume() { -	if (g_system->getMixer()->isSoundHandleActive(_audHandle)) -		g_system->getMixer()->setChannelVolume(_audHandle, getVolume()); -} - -void PSXStreamDecoder::updateBalance() { -	if (g_system->getMixer()->isSoundHandleActive(_audHandle)) -		g_system->getMixer()->setChannelBalance(_audHandle, getBalance()); -}  } // End of namespace Video diff --git a/video/psx_decoder.h b/video/psx_decoder.h index 4364ec4bbb..11f311594d 100644 --- a/video/psx_decoder.h +++ b/video/psx_decoder.h @@ -71,59 +71,85 @@ public:  	bool loadStream(Common::SeekableReadStream *stream);  	void close(); -	bool isVideoLoaded() const { return _stream != 0; } -	uint16 getWidth() const { return _surface->w; } -	uint16 getHeight() const { return _surface->h; } -	uint32 getFrameCount() const { return _frameCount; } -	uint32 getTime() const; -	uint32 getTimeToNextFrame() const; -	const Graphics::Surface *decodeNextFrame(); -	Graphics::PixelFormat getPixelFormat() const { return _surface->format; } -	bool endOfVideo() const { return _stream->pos() >= _stream->size(); } -  protected: -	// VideoDecoder API -	void updateVolume(); -	void updateBalance(); +	void readNextPacket(); +	bool useAudioSync() const;  private: -	void initCommon(); -	Common::SeekableReadStream *_stream; -	Graphics::Surface *_surface; +	class PSXVideoTrack : public VideoTrack { +	public: +		PSXVideoTrack(Common::SeekableReadStream *firstSector, CDSpeed speed, int frameCount); +		~PSXVideoTrack(); + +		uint16 getWidth() const { return _surface->w; } +		uint16 getHeight() const { return _surface->h; } +		Graphics::PixelFormat getPixelFormat() const { return _surface->format; } +		bool endOfTrack() const { return _endOfTrack; } +		int getCurFrame() const { return _curFrame; } +		int getFrameCount() const { return _frameCount; } +		uint32 getNextFrameStartTime() const; +		const Graphics::Surface *decodeNextFrame(); + +		void setEndOfTrack() { _endOfTrack = true; } +		void decodeFrame(Common::SeekableReadStream *frame, uint sectorCount); + +	private: +		Graphics::Surface *_surface; +		uint32 _frameCount; +		Audio::Timestamp _nextFrameStartTime; +		bool _endOfTrack; +		int _curFrame; + +		enum PlaneType { +			kPlaneY = 0, +			kPlaneU = 1, +			kPlaneV = 2 +		}; + +		uint16 _macroBlocksW, _macroBlocksH; +		byte *_yBuffer, *_cbBuffer, *_crBuffer; +		void decodeMacroBlock(Common::BitStream *bits, int mbX, int mbY, uint16 scale, uint16 version); +		void decodeBlock(Common::BitStream *bits, byte *block, int pitch, uint16 scale, uint16 version, PlaneType plane); + +		void readAC(Common::BitStream *bits, int *block); +		Common::Huffman *_acHuffman; + +		int readDC(Common::BitStream *bits, uint16 version, PlaneType plane); +		Common::Huffman *_dcHuffmanLuma, *_dcHuffmanChroma; +		int _lastDC[3]; + +		void dequantizeBlock(int *coefficients, float *block, uint16 scale); +		void idct(float *dequantData, float *result); +		int readSignedCoefficient(Common::BitStream *bits); +	}; -	uint32 _frameCount; -	Audio::Timestamp _nextFrameStartTime; +	class PSXAudioTrack : public AudioTrack { +	public: +		PSXAudioTrack(Common::SeekableReadStream *sector); +		~PSXAudioTrack(); -	Audio::SoundHandle _audHandle; -	Audio::QueuingAudioStream *_audStream; -	void queueAudioFromSector(Common::SeekableReadStream *sector); +		bool endOfTrack() const; -	enum PlaneType { -		kPlaneY = 0, -		kPlaneU = 1, -		kPlaneV = 2 -	}; +		void setEndOfTrack() { _endOfTrack = true; } +		void queueAudioFromSector(Common::SeekableReadStream *sector); -	uint16 _macroBlocksW, _macroBlocksH; -	byte *_yBuffer, *_cbBuffer, *_crBuffer; -	void decodeFrame(Common::SeekableReadStream *frame); -	void decodeMacroBlock(Common::BitStream *bits, int mbX, int mbY, uint16 scale, uint16 version); -	void decodeBlock(Common::BitStream *bits, byte *block, int pitch, uint16 scale, uint16 version, PlaneType plane); +	private: +		Audio::AudioStream *getAudioStream() const; -	void readAC(Common::BitStream *bits, int *block); -	Common::Huffman *_acHuffman; +		Audio::QueuingAudioStream *_audStream; -	int readDC(Common::BitStream *bits, uint16 version, PlaneType plane); -	Common::Huffman *_dcHuffmanLuma, *_dcHuffmanChroma; -	int _lastDC[3]; +		struct ADPCMStatus { +			int16 sample[2]; +		} _adpcmStatus[2]; -	void dequantizeBlock(int *coefficients, float *block, uint16 scale); -	void idct(float *dequantData, float *result); -	int readSignedCoefficient(Common::BitStream *bits); +		bool _endOfTrack; +	}; -	struct ADPCMStatus { -		int16 sample[2]; -	} _adpcmStatus[2]; +	CDSpeed _speed; +	uint32 _frameCount; +	Common::SeekableReadStream *_stream; +	PSXVideoTrack *_videoTrack; +	PSXAudioTrack *_audioTrack;	  	Common::SeekableReadStream *readSector();  }; diff --git a/video/qt_decoder.cpp b/video/qt_decoder.cpp index aba545abc0..87c530dba0 100644 --- a/video/qt_decoder.cpp +++ b/video/qt_decoder.cpp @@ -33,14 +33,12 @@  #include "audio/audiostream.h"  #include "common/debug.h" -#include "common/endian.h"  #include "common/memstream.h"  #include "common/system.h"  #include "common/textconsole.h"  #include "common/util.h"  // Video codecs -#include "video/codecs/codec.h"  #include "video/codecs/cinepak.h"  #include "video/codecs/mjpeg.h"  #include "video/codecs/qtrle.h" @@ -56,97 +54,43 @@ namespace Video {  ////////////////////////////////////////////  QuickTimeDecoder::QuickTimeDecoder() { -	_setStartTime = false;  	_scaledSurface = 0; -	_dirtyPalette = false; -	_palette = 0;  	_width = _height = 0; -	_needUpdate = false;  }  QuickTimeDecoder::~QuickTimeDecoder() {  	close();  } -int32 QuickTimeDecoder::getCurFrame() const { -	// TODO: This is rather simplistic and doesn't take edits that -	// repeat sections of the media into account. Doing that -	// over-complicates things and shouldn't be necessary, but -	// it would be nice to have in the future. - -	int32 frame = -1; - -	for (uint32 i = 0; i < _handlers.size(); i++) -		if (_handlers[i]->getTrackType() == TrackHandler::kTrackTypeVideo) -			frame += ((VideoTrackHandler *)_handlers[i])->getCurFrame() + 1; - -	return frame; -} - -uint32 QuickTimeDecoder::getFrameCount() const { -	uint32 count = 0; - -	for (uint32 i = 0; i < _handlers.size(); i++) -		if (_handlers[i]->getTrackType() == TrackHandler::kTrackTypeVideo) -			count += ((VideoTrackHandler *)_handlers[i])->getFrameCount(); - -	return count; -} - -void QuickTimeDecoder::startAudio() { -	updateAudioBuffer(); - -	for (uint32 i = 0; i < _audioTracks.size(); i++) { -		g_system->getMixer()->playStream(Audio::Mixer::kPlainSoundType, &_audioHandles[i], _audioTracks[i], -1, getVolume(), getBalance(), DisposeAfterUse::NO); +bool QuickTimeDecoder::loadFile(const Common::String &filename) { +	if (!Common::QuickTimeParser::parseFile(filename)) +		return false; -		// Pause the audio again if we're still paused -		if (isPaused()) -			g_system->getMixer()->pauseHandle(_audioHandles[i], true); -	} +	init(); +	return true;  } -void QuickTimeDecoder::stopAudio() { -	for (uint32 i = 0; i < _audioHandles.size(); i++) -		g_system->getMixer()->stopHandle(_audioHandles[i]); -} +bool QuickTimeDecoder::loadStream(Common::SeekableReadStream *stream) { +	if (!Common::QuickTimeParser::parseStream(stream)) +		return false; -void QuickTimeDecoder::pauseVideoIntern(bool pause) { -	for (uint32 i = 0; i < _audioHandles.size(); i++) -		g_system->getMixer()->pauseHandle(_audioHandles[i], pause); +	init(); +	return true;  } -QuickTimeDecoder::VideoTrackHandler *QuickTimeDecoder::findNextVideoTrack() const { -	VideoTrackHandler *bestTrack = 0; -	uint32 bestTime = 0xffffffff; - -	for (uint32 i = 0; i < _handlers.size(); i++) { -		if (_handlers[i]->getTrackType() == TrackHandler::kTrackTypeVideo && !_handlers[i]->endOfTrack()) { -			VideoTrackHandler *track = (VideoTrackHandler *)_handlers[i]; -			uint32 time = track->getNextFrameStartTime(); +void QuickTimeDecoder::close() { +	VideoDecoder::close(); +	Common::QuickTimeParser::close(); -			if (time < bestTime) { -				bestTime = time; -				bestTrack = track; -			} -		} +	if (_scaledSurface) { +		_scaledSurface->free(); +		delete _scaledSurface; +		_scaledSurface = 0;  	} - -	return bestTrack;  }  const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() { -	if (!_nextVideoTrack) -		return 0; - -	const Graphics::Surface *frame = _nextVideoTrack->decodeNextFrame(); - -	if (!_setStartTime) { -		_startTime = g_system->getMillis(); -		_setStartTime = true; -	} - -	_nextVideoTrack = findNextVideoTrack(); -	_needUpdate = false; +	const Graphics::Surface *frame = VideoDecoder::decodeNextFrame();  	// Update audio buffers too  	// (needs to be done after we find the next track) @@ -166,138 +110,7 @@ const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() {  	return frame;  } -void QuickTimeDecoder::scaleSurface(const Graphics::Surface *src, Graphics::Surface *dst, Common::Rational scaleFactorX, Common::Rational scaleFactorY) { -	assert(src && dst); - -	for (int32 j = 0; j < dst->h; j++) -		for (int32 k = 0; k < dst->w; k++) -			memcpy(dst->getBasePtr(k, j), src->getBasePtr((k * scaleFactorX).toInt() , (j * scaleFactorY).toInt()), src->format.bytesPerPixel); -} - -bool QuickTimeDecoder::endOfVideo() const { -	if (!isVideoLoaded()) -		return true; - -	for (uint32 i = 0; i < _handlers.size(); i++) -		if (!_handlers[i]->endOfTrack()) -			return false; - -	return true; -} - -uint32 QuickTimeDecoder::getTime() const { -	// Try to base sync off an active audio track -	for (uint32 i = 0; i < _audioHandles.size(); i++) { -		if (g_system->getMixer()->isSoundHandleActive(_audioHandles[i])) { -			uint32 time = g_system->getMixer()->getSoundElapsedTime(_audioHandles[i]) + _audioStartOffset.msecs(); -			if (Audio::Timestamp(time, 1000) < _audioTracks[i]->getLength()) -				return time; -		} -	} - -	// Just use time elapsed since the beginning -	return SeekableVideoDecoder::getTime(); -} - -uint32 QuickTimeDecoder::getTimeToNextFrame() const { -	if (_needUpdate) -		return 0; - -	if (_nextVideoTrack) { -		uint32 nextFrameStartTime = _nextVideoTrack->getNextFrameStartTime(); - -		if (nextFrameStartTime == 0) -			return 0; - -		// TODO: Add support for rate modification - -		uint32 elapsedTime = getTime(); - -		if (elapsedTime < nextFrameStartTime) -			return nextFrameStartTime - elapsedTime; -	} - -	return 0; -} - -bool QuickTimeDecoder::loadFile(const Common::String &filename) { -	if (!Common::QuickTimeParser::parseFile(filename)) -		return false; - -	init(); -	return true; -} - -bool QuickTimeDecoder::loadStream(Common::SeekableReadStream *stream) { -	if (!Common::QuickTimeParser::parseStream(stream)) -		return false; - -	init(); -	return true; -} - -void QuickTimeDecoder::updateVolume() { -	for (uint32 i = 0; i < _audioHandles.size(); i++) -		if (g_system->getMixer()->isSoundHandleActive(_audioHandles[i])) -			g_system->getMixer()->setChannelVolume(_audioHandles[i], getVolume()); -} - -void QuickTimeDecoder::updateBalance() { -	for (uint32 i = 0; i < _audioHandles.size(); i++) -		if (g_system->getMixer()->isSoundHandleActive(_audioHandles[i])) -			g_system->getMixer()->setChannelBalance(_audioHandles[i], getBalance()); -} - -void QuickTimeDecoder::init() { -	Audio::QuickTimeAudioDecoder::init(); - -	_startTime = 0; -	_setStartTime = false; - -	// Initialize all the audio tracks -	if (!_audioTracks.empty()) { -		_audioHandles.resize(_audioTracks.size()); - -		for (uint32 i = 0; i < _audioTracks.size(); i++) -			_handlers.push_back(new AudioTrackHandler(this, _audioTracks[i])); -	} - -	// Initialize all the video tracks -	for (uint32 i = 0; i < _tracks.size(); i++) { -		if (_tracks[i]->codecType == CODEC_TYPE_VIDEO) { -			for (uint32 j = 0; j < _tracks[i]->sampleDescs.size(); j++) -				((VideoSampleDesc *)_tracks[i]->sampleDescs[j])->initCodec(); - -			_handlers.push_back(new VideoTrackHandler(this, _tracks[i])); -		} -	} - -	// Prepare the first video track -	_nextVideoTrack = findNextVideoTrack(); - -	if (_nextVideoTrack) { -		if (_scaleFactorX != 1 || _scaleFactorY != 1) { -			// We have to take the scale into consideration when setting width/height -			_width = (_nextVideoTrack->getWidth() / _scaleFactorX).toInt(); -			_height = (_nextVideoTrack->getHeight() / _scaleFactorY).toInt(); -		} else { -			_width = _nextVideoTrack->getWidth().toInt(); -			_height = _nextVideoTrack->getHeight().toInt(); -		} - -		_needUpdate = true; -	} else { -		_needUpdate = false; -	} - -	// Now start any audio -	if (!_audioTracks.empty()) { -		startAudio(); -		_audioStartOffset = Audio::Timestamp(0); -	} -} - -Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(Track *track, uint32 format) { +Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(Common::QuickTimeParser::Track *track, uint32 format) {  	if (track->codecType == CODEC_TYPE_VIDEO) {  		debug(0, "Video Codec FourCC: \'%s\'", tag2str(format)); @@ -395,61 +208,52 @@ Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(Track *tra  	return Audio::QuickTimeAudioDecoder::readSampleDesc(track, format);  } -void QuickTimeDecoder::close() { -	stopAudio(); -	freeAllTrackHandlers(); - -	if (_scaledSurface) { -		_scaledSurface->free(); -		delete _scaledSurface; -		_scaledSurface = 0; -	} - -	_width = _height = 0; - -	Common::QuickTimeParser::close(); -	SeekableVideoDecoder::reset(); -} - -void QuickTimeDecoder::freeAllTrackHandlers() { -	for (uint32 i = 0; i < _handlers.size(); i++) -		delete _handlers[i]; - -	_handlers.clear(); -} +void QuickTimeDecoder::init() { +	Audio::QuickTimeAudioDecoder::init(); -void QuickTimeDecoder::seekToTime(const Audio::Timestamp &time) { -	stopAudio(); -	_audioStartOffset = time; +	// Initialize all the audio tracks +	for (uint32 i = 0; i < _audioTracks.size(); i++) +		addTrack(new AudioTrackHandler(this, _audioTracks[i])); -	// Sets all tracks to this time -	for (uint32 i = 0; i < _handlers.size(); i++) -		_handlers[i]->seekToTime(time); +	// Initialize all the video tracks +	Common::Array<Common::QuickTimeParser::Track *> &tracks = Common::QuickTimeParser::_tracks; +	for (uint32 i = 0; i < tracks.size(); i++) { +		if (tracks[i]->codecType == CODEC_TYPE_VIDEO) { +			for (uint32 j = 0; j < tracks[i]->sampleDescs.size(); j++) +				((VideoSampleDesc *)tracks[i]->sampleDescs[j])->initCodec(); -	startAudio(); +			addTrack(new VideoTrackHandler(this, tracks[i])); +		} +	} -	// Reset our start time -	_startTime = g_system->getMillis() - time.msecs(); -	_setStartTime = true; -	resetPauseStartTime(); +	// Prepare the first video track +	VideoTrackHandler *nextVideoTrack = (VideoTrackHandler *)findNextVideoTrack(); -	// Reset the next video track too -	_nextVideoTrack = findNextVideoTrack(); -	_needUpdate = _nextVideoTrack != 0; +	if (nextVideoTrack) { +		if (_scaleFactorX != 1 || _scaleFactorY != 1) { +			// We have to take the scale into consideration when setting width/height +			_width = (nextVideoTrack->getScaledWidth() / _scaleFactorX).toInt(); +			_height = (nextVideoTrack->getScaledHeight() / _scaleFactorY).toInt(); +		} else { +			_width = nextVideoTrack->getWidth(); +			_height = nextVideoTrack->getHeight(); +		} +	}  }  void QuickTimeDecoder::updateAudioBuffer() {  	// Updates the audio buffers for all audio tracks -	for (uint32 i = 0; i < _handlers.size(); i++) -		if (_handlers[i]->getTrackType() == TrackHandler::kTrackTypeAudio) -			((AudioTrackHandler *)_handlers[i])->updateBuffer(); +	for (TrackListIterator it = getTrackListBegin(); it != getTrackListEnd(); it++) +		if ((*it)->getTrackType() == VideoDecoder::Track::kTrackTypeAudio) +			((AudioTrackHandler *)*it)->updateBuffer();  } -Graphics::PixelFormat QuickTimeDecoder::getPixelFormat() const { -	if (_nextVideoTrack) -		return _nextVideoTrack->getPixelFormat(); +void QuickTimeDecoder::scaleSurface(const Graphics::Surface *src, Graphics::Surface *dst, const Common::Rational &scaleFactorX, const Common::Rational &scaleFactorY) { +	assert(src && dst); -	return Graphics::PixelFormat(); +	for (int32 j = 0; j < dst->h; j++) +		for (int32 k = 0; k < dst->w; k++) +			memcpy(dst->getBasePtr(k, j), src->getBasePtr((k * scaleFactorX).toInt() , (j * scaleFactorY).toInt()), src->format.bytesPerPixel);  }  QuickTimeDecoder::VideoSampleDesc::VideoSampleDesc(Common::QuickTimeParser::Track *parentTrack, uint32 codecTag) : Common::QuickTimeParser::SampleDesc(parentTrack, codecTag) { @@ -504,25 +308,8 @@ void QuickTimeDecoder::VideoSampleDesc::initCodec() {  	}  } -bool QuickTimeDecoder::endOfVideoTracks() const { -	for (uint32 i = 0; i < _handlers.size(); i++) -		if (_handlers[i]->getTrackType() == TrackHandler::kTrackTypeVideo && !_handlers[i]->endOfTrack()) -			return false; - -	return true; -} - -QuickTimeDecoder::TrackHandler::TrackHandler(QuickTimeDecoder *decoder, Track *parent) : _decoder(decoder), _parent(parent), _fd(_decoder->_fd) { -	_curEdit = 0; -} - -bool QuickTimeDecoder::TrackHandler::endOfTrack() { -	// A track is over when we've finished going through all edits -	return _curEdit == _parent->editCount; -} -  QuickTimeDecoder::AudioTrackHandler::AudioTrackHandler(QuickTimeDecoder *decoder, QuickTimeAudioTrack *audioTrack) -		: TrackHandler(decoder, audioTrack->getParent()), _audioTrack(audioTrack) { +		: _decoder(decoder), _audioTrack(audioTrack) {  }  void QuickTimeDecoder::AudioTrackHandler::updateBuffer() { @@ -532,21 +319,20 @@ void QuickTimeDecoder::AudioTrackHandler::updateBuffer() {  		_audioTrack->queueAudio(Audio::Timestamp(_decoder->getTimeToNextFrame() + 500, 1000));  } -bool QuickTimeDecoder::AudioTrackHandler::endOfTrack() { -	return _audioTrack->endOfData(); -} - -void QuickTimeDecoder::AudioTrackHandler::seekToTime(Audio::Timestamp time) { -	_audioTrack->seek(time); +Audio::SeekableAudioStream *QuickTimeDecoder::AudioTrackHandler::getSeekableAudioStream() const { +	return _audioTrack;  } -QuickTimeDecoder::VideoTrackHandler::VideoTrackHandler(QuickTimeDecoder *decoder, Common::QuickTimeParser::Track *parent) : TrackHandler(decoder, parent) { +QuickTimeDecoder::VideoTrackHandler::VideoTrackHandler(QuickTimeDecoder *decoder, Common::QuickTimeParser::Track *parent) : _decoder(decoder), _parent(parent) { +	_curEdit = 0;  	enterNewEditList(false);  	_holdNextFrameStartTime = false;  	_curFrame = -1;  	_durationOverride = -1;  	_scaledSurface = 0; +	_curPalette = 0; +	_dirtyPalette = false;  }  QuickTimeDecoder::VideoTrackHandler::~VideoTrackHandler() { @@ -556,6 +342,88 @@ QuickTimeDecoder::VideoTrackHandler::~VideoTrackHandler() {  	}  } +bool QuickTimeDecoder::VideoTrackHandler::endOfTrack() const { +	// A track is over when we've finished going through all edits +	return _curEdit == _parent->editCount; +} + +bool QuickTimeDecoder::VideoTrackHandler::seek(const Audio::Timestamp &requestedTime) { +	// First, figure out what edit we're in +	Audio::Timestamp time = requestedTime.convertToFramerate(_parent->timeScale); + +	// Continue until we get to where we need to be +	for (_curEdit = 0; !endOfTrack(); _curEdit++) +		if ((uint32)time.totalNumberOfFrames() >= getCurEditTimeOffset() && (uint32)time.totalNumberOfFrames() < getCurEditTimeOffset() + getCurEditTrackDuration()) +			break; + +	// This track is done +	if (endOfTrack()) +		return true; + +	enterNewEditList(false); + +	// One extra check for the end of a track +	if (endOfTrack()) +		return true; + +	// Now we're in the edit and need to figure out what frame we need +	while (getRateAdjustedFrameTime() < (uint32)time.totalNumberOfFrames()) { +		_curFrame++; +		if (_durationOverride >= 0) { +			_nextFrameStartTime += _durationOverride; +			_durationOverride = -1; +		} else { +			_nextFrameStartTime += getFrameDuration(); +		} +	} + +	// All that's left is to figure out what our starting time is going to be +	// Compare the starting point for the frame to where we need to be +	_holdNextFrameStartTime = getRateAdjustedFrameTime() != (uint32)time.totalNumberOfFrames(); + +	// If we went past the time, go back a frame +	if (_holdNextFrameStartTime) +		_curFrame--; + +	// Handle the keyframe here +	int32 destinationFrame = _curFrame + 1; + +	assert(destinationFrame < (int32)_parent->frameCount); +	_curFrame = findKeyFrame(destinationFrame) - 1; +	while (_curFrame < destinationFrame - 1) +		bufferNextFrame(); + +	return true; +} + +Audio::Timestamp QuickTimeDecoder::VideoTrackHandler::getDuration() const { +	return Audio::Timestamp(0, _parent->duration, _decoder->_timeScale); +} + +uint16 QuickTimeDecoder::VideoTrackHandler::getWidth() const { +	return getScaledWidth().toInt(); +} + +uint16 QuickTimeDecoder::VideoTrackHandler::getHeight() const { +	return getScaledHeight().toInt(); +} + +Graphics::PixelFormat QuickTimeDecoder::VideoTrackHandler::getPixelFormat() const { +	return ((VideoSampleDesc *)_parent->sampleDescs[0])->_videoCodec->getPixelFormat(); +} + +int QuickTimeDecoder::VideoTrackHandler::getFrameCount() const { +	return _parent->frameCount; +} + +uint32 QuickTimeDecoder::VideoTrackHandler::getNextFrameStartTime() const { +	if (endOfTrack()) +		return 0; + +	// Convert to milliseconds so the tracks can be compared +	return getRateAdjustedFrameTime() * 1000 / _parent->timeScale; +} +  const Graphics::Surface *QuickTimeDecoder::VideoTrackHandler::decodeNextFrame() {  	if (endOfTrack())  		return 0; @@ -586,7 +454,7 @@ const Graphics::Surface *QuickTimeDecoder::VideoTrackHandler::decodeNextFrame()  	if (frame && (_parent->scaleFactorX != 1 || _parent->scaleFactorY != 1)) {  		if (!_scaledSurface) {  			_scaledSurface = new Graphics::Surface(); -			_scaledSurface->create(getWidth().toInt(), getHeight().toInt(), getPixelFormat()); +			_scaledSurface->create(getScaledWidth().toInt(), getScaledHeight().toInt(), getPixelFormat());  		}  		_decoder->scaleSurface(frame, _scaledSurface, _parent->scaleFactorX, _parent->scaleFactorY); @@ -596,6 +464,85 @@ const Graphics::Surface *QuickTimeDecoder::VideoTrackHandler::decodeNextFrame()  	return frame;  } +Common::Rational QuickTimeDecoder::VideoTrackHandler::getScaledWidth() const { +	return Common::Rational(_parent->width) / _parent->scaleFactorX; +} + +Common::Rational QuickTimeDecoder::VideoTrackHandler::getScaledHeight() const { +	return Common::Rational(_parent->height) / _parent->scaleFactorY; +} + +Common::SeekableReadStream *QuickTimeDecoder::VideoTrackHandler::getNextFramePacket(uint32 &descId) { +	// First, we have to track down which chunk holds the sample and which sample in the chunk contains the frame we are looking for. +	int32 totalSampleCount = 0; +	int32 sampleInChunk = 0; +	int32 actualChunk = -1; +	uint32 sampleToChunkIndex = 0; + +	for (uint32 i = 0; i < _parent->chunkCount; i++) { +		if (sampleToChunkIndex < _parent->sampleToChunkCount && i >= _parent->sampleToChunk[sampleToChunkIndex].first) +			sampleToChunkIndex++; + +		totalSampleCount += _parent->sampleToChunk[sampleToChunkIndex - 1].count; + +		if (totalSampleCount > _curFrame) { +			actualChunk = i; +			descId = _parent->sampleToChunk[sampleToChunkIndex - 1].id; +			sampleInChunk = _parent->sampleToChunk[sampleToChunkIndex - 1].count - totalSampleCount + _curFrame; +			break; +		} +	} + +	if (actualChunk < 0) { +		warning("Could not find data for frame %d", _curFrame); +		return 0; +	} + +	// Next seek to that frame +	Common::SeekableReadStream *stream = _decoder->_fd; +	stream->seek(_parent->chunkOffsets[actualChunk]); + +	// Then, if the chunk holds more than one frame, seek to where the frame we want is located +	for (int32 i = _curFrame - sampleInChunk; i < _curFrame; i++) { +		if (_parent->sampleSize != 0) +			stream->skip(_parent->sampleSize); +		else +			stream->skip(_parent->sampleSizes[i]); +	} + +	// Finally, read in the raw data for the frame +	//debug("Frame Data[%d]: Offset = %d, Size = %d", _curFrame, stream->pos(), _parent->sampleSizes[_curFrame]); + +	if (_parent->sampleSize != 0) +		return stream->readStream(_parent->sampleSize); + +	return stream->readStream(_parent->sampleSizes[_curFrame]); +} + +uint32 QuickTimeDecoder::VideoTrackHandler::getFrameDuration() { +	uint32 curFrameIndex = 0; +	for (int32 i = 0; i < _parent->timeToSampleCount; i++) { +		curFrameIndex += _parent->timeToSample[i].count; +		if ((uint32)_curFrame < curFrameIndex) { +			// Ok, now we have what duration this frame has. +			return _parent->timeToSample[i].duration; +		} +	} + +	// This should never occur +	error("Cannot find duration for frame %d", _curFrame); +	return 0; +} + +uint32 QuickTimeDecoder::VideoTrackHandler::findKeyFrame(uint32 frame) const { +	for (int i = _parent->keyframeCount - 1; i >= 0; i--) +		if (_parent->keyframes[i] <= frame) +			return _parent->keyframes[i]; + +	// If none found, we'll assume the requested frame is a key frame +	return frame; +} +  void QuickTimeDecoder::VideoTrackHandler::enterNewEditList(bool bufferFrames) {  	// Bypass all empty edit lists first  	while (!endOfTrack() && _parent->editList[_curEdit].mediaTime == -1) @@ -667,166 +614,25 @@ const Graphics::Surface *QuickTimeDecoder::VideoTrackHandler::bufferNextFrame()  	if (entry->_videoCodec->containsPalette()) {  		// The codec itself contains a palette  		if (entry->_videoCodec->hasDirtyPalette()) { -			_decoder->_palette = entry->_videoCodec->getPalette(); -			_decoder->_dirtyPalette = true; +			_curPalette = entry->_videoCodec->getPalette(); +			_dirtyPalette = true;  		}  	} else {  		// Check if the video description has been updated  		byte *palette = entry->_palette; -		if (palette !=_decoder-> _palette) { -			_decoder->_palette = palette; -			_decoder->_dirtyPalette = true; -		} -	} - -	return frame; -} - -uint32 QuickTimeDecoder::VideoTrackHandler::getNextFrameStartTime() { -	if (endOfTrack()) -		return 0; - -	// Convert to milliseconds so the tracks can be compared -	return getRateAdjustedFrameTime() * 1000 / _parent->timeScale; -} - -uint32 QuickTimeDecoder::VideoTrackHandler::getFrameCount() { -	return _parent->frameCount; -} - -uint32 QuickTimeDecoder::VideoTrackHandler::getFrameDuration() { -	uint32 curFrameIndex = 0; -	for (int32 i = 0; i < _parent->timeToSampleCount; i++) { -		curFrameIndex += _parent->timeToSample[i].count; -		if ((uint32)_curFrame < curFrameIndex) { -			// Ok, now we have what duration this frame has. -			return _parent->timeToSample[i].duration; -		} -	} - -	// This should never occur -	error("Cannot find duration for frame %d", _curFrame); -	return 0; -} - -Common::SeekableReadStream *QuickTimeDecoder::VideoTrackHandler::getNextFramePacket(uint32 &descId) { -	// First, we have to track down which chunk holds the sample and which sample in the chunk contains the frame we are looking for. -	int32 totalSampleCount = 0; -	int32 sampleInChunk = 0; -	int32 actualChunk = -1; -	uint32 sampleToChunkIndex = 0; - -	for (uint32 i = 0; i < _parent->chunkCount; i++) { -		if (sampleToChunkIndex < _parent->sampleToChunkCount && i >= _parent->sampleToChunk[sampleToChunkIndex].first) -			sampleToChunkIndex++; - -		totalSampleCount += _parent->sampleToChunk[sampleToChunkIndex - 1].count; - -		if (totalSampleCount > _curFrame) { -			actualChunk = i; -			descId = _parent->sampleToChunk[sampleToChunkIndex - 1].id; -			sampleInChunk = _parent->sampleToChunk[sampleToChunkIndex - 1].count - totalSampleCount + _curFrame; -			break; +		if (palette != _curPalette) { +			_curPalette = palette; +			_dirtyPalette = true;  		}  	} -	if (actualChunk < 0) { -		warning("Could not find data for frame %d", _curFrame); -		return 0; -	} - -	// Next seek to that frame -	_fd->seek(_parent->chunkOffsets[actualChunk]); - -	// Then, if the chunk holds more than one frame, seek to where the frame we want is located -	for (int32 i = _curFrame - sampleInChunk; i < _curFrame; i++) { -		if (_parent->sampleSize != 0) -			_fd->skip(_parent->sampleSize); -		else -			_fd->skip(_parent->sampleSizes[i]); -	} - -	// Finally, read in the raw data for the frame -	//debug("Frame Data[%d]: Offset = %d, Size = %d", _curFrame, _fd->pos(), _parent->sampleSizes[_curFrame]); - -	if (_parent->sampleSize != 0) -		return _fd->readStream(_parent->sampleSize); - -	return _fd->readStream(_parent->sampleSizes[_curFrame]); -} - -uint32 QuickTimeDecoder::VideoTrackHandler::findKeyFrame(uint32 frame) const { -	for (int i = _parent->keyframeCount - 1; i >= 0; i--) -		if (_parent->keyframes[i] <= frame) -			return _parent->keyframes[i]; - -	// If none found, we'll assume the requested frame is a key frame  	return frame;  } -void QuickTimeDecoder::VideoTrackHandler::seekToTime(Audio::Timestamp time) { -	// First, figure out what edit we're in -	time = time.convertToFramerate(_parent->timeScale); - -	// Continue until we get to where we need to be -	for (_curEdit = 0; !endOfTrack(); _curEdit++) -		if ((uint32)time.totalNumberOfFrames() >= getCurEditTimeOffset() && (uint32)time.totalNumberOfFrames() < getCurEditTimeOffset() + getCurEditTrackDuration()) -			break; - -	// This track is done -	if (endOfTrack()) -		return; - -	enterNewEditList(false); - -	// One extra check for the end of a track -	if (endOfTrack()) -		return; - -	// Now we're in the edit and need to figure out what frame we need -	while (getRateAdjustedFrameTime() < (uint32)time.totalNumberOfFrames()) { -		_curFrame++; -		if (_durationOverride >= 0) { -			_nextFrameStartTime += _durationOverride; -			_durationOverride = -1; -		} else { -			_nextFrameStartTime += getFrameDuration(); -		} -	} - -	// All that's left is to figure out what our starting time is going to be -	// Compare the starting point for the frame to where we need to be -	_holdNextFrameStartTime = getRateAdjustedFrameTime() != (uint32)time.totalNumberOfFrames(); - -	// If we went past the time, go back a frame -	if (_holdNextFrameStartTime) -		_curFrame--; - -	// Handle the keyframe here -	int32 destinationFrame = _curFrame + 1; - -	assert(destinationFrame < (int32)_parent->frameCount); -	_curFrame = findKeyFrame(destinationFrame) - 1; -	while (_curFrame < destinationFrame - 1) -		bufferNextFrame(); -} - -Common::Rational QuickTimeDecoder::VideoTrackHandler::getWidth() const { -	return Common::Rational(_parent->width) / _parent->scaleFactorX; -} - -Common::Rational QuickTimeDecoder::VideoTrackHandler::getHeight() const { -	return Common::Rational(_parent->height) / _parent->scaleFactorY; -} - -Graphics::PixelFormat QuickTimeDecoder::VideoTrackHandler::getPixelFormat() const { -	return ((VideoSampleDesc *)_parent->sampleDescs[0])->_videoCodec->getPixelFormat(); -} -  uint32 QuickTimeDecoder::VideoTrackHandler::getRateAdjustedFrameTime() const {  	// Figure out what time the next frame is at taking the edit list rate into account -	uint32 convertedTime =  (Common::Rational(_nextFrameStartTime - getCurEditTimeOffset()) / _parent->editList[_curEdit].mediaRate).toInt(); +	uint32 convertedTime = (Common::Rational(_nextFrameStartTime - getCurEditTimeOffset()) / _parent->editList[_curEdit].mediaRate).toInt();  	return convertedTime + getCurEditTimeOffset();  } diff --git a/video/qt_decoder.h b/video/qt_decoder.h index ce32562d64..71d33711a6 100644 --- a/video/qt_decoder.h +++ b/video/qt_decoder.h @@ -31,16 +31,17 @@  #ifndef VIDEO_QT_DECODER_H  #define VIDEO_QT_DECODER_H -#include "audio/mixer.h"  #include "audio/decoders/quicktime_intern.h"  #include "common/scummsys.h" -#include "common/rational.h" -#include "graphics/pixelformat.h"  #include "video/video_decoder.h"  namespace Common { -	class Rational; +class Rational; +} + +namespace Graphics { +struct PixelFormat;  }  namespace Video { @@ -54,68 +55,33 @@ class Codec;   *  - mohawk   *  - sci   */ -class QuickTimeDecoder : public SeekableVideoDecoder, public Audio::QuickTimeAudioDecoder { +class QuickTimeDecoder : public VideoDecoder, public Audio::QuickTimeAudioDecoder {  public:  	QuickTimeDecoder();  	virtual ~QuickTimeDecoder(); -	/** -	 * Returns the width of the video -	 * @return the width of the video -	 */ -	uint16 getWidth() const { return _width; } - -	/** -	 * Returns the height of the video -	 * @return the height of the video -	 */ -	uint16 getHeight() const { return _height; } - -	/** -	 * Returns the amount of frames in the video -	 * @return the amount of frames in the video -	 */ -	uint32 getFrameCount() const; - -	/** -	 * Load a video file -	 * @param filename	the filename to load -	 */  	bool loadFile(const Common::String &filename); - -	/** -	 * Load a QuickTime video file from a SeekableReadStream -	 * @param stream	the stream to load -	 */  	bool loadStream(Common::SeekableReadStream *stream); - -	/** -	 * Close a QuickTime encoded video file -	 */  	void close(); +	uint16 getWidth() const { return _width; } +	uint16 getHeight() const { return _height; } +	const Graphics::Surface *decodeNextFrame(); +	Audio::Timestamp getDuration() const { return Audio::Timestamp(0, _duration, _timeScale); } -	/** -	 * Returns the palette of the video -	 * @return the palette of the video -	 */ -	const byte *getPalette() { _dirtyPalette = false; return _palette; } -	bool hasDirtyPalette() const { return _dirtyPalette; } +protected: +	Common::QuickTimeParser::SampleDesc *readSampleDesc(Common::QuickTimeParser::Track *track, uint32 format); -	int32 getCurFrame() const; +private: +	void init(); -	bool isVideoLoaded() const { return isOpen(); } -	const Graphics::Surface *decodeNextFrame(); -	bool endOfVideo() const; -	uint32 getTime() const; -	uint32 getTimeToNextFrame() const; -	Graphics::PixelFormat getPixelFormat() const; +	void updateAudioBuffer(); -	// SeekableVideoDecoder API -	void seekToFrame(uint32 frame); -	void seekToTime(const Audio::Timestamp &time); -	uint32 getDuration() const { return _duration * 1000 / _timeScale; } +	uint16 _width, _height; + +	Graphics::Surface *_scaledSurface; +	void scaleSurface(const Graphics::Surface *src, Graphics::Surface *dst, +			const Common::Rational &scaleFactorX, const Common::Rational &scaleFactorY); -protected:  	class VideoSampleDesc : public Common::QuickTimeParser::SampleDesc {  	public:  		VideoSampleDesc(Common::QuickTimeParser::Track *parentTrack, uint32 codecTag); @@ -131,110 +97,59 @@ protected:  		Codec *_videoCodec;  	}; -	Common::QuickTimeParser::SampleDesc *readSampleDesc(Track *track, uint32 format); - -	// VideoDecoder API -	void updateVolume(); -	void updateBalance(); - -private: -	void init(); - -	void startAudio(); -	void stopAudio(); -	void updateAudioBuffer(); -	void readNextAudioChunk(); -	Common::Array<Audio::SoundHandle> _audioHandles; -	Audio::Timestamp _audioStartOffset; - -	Codec *createCodec(uint32 codecTag, byte bitsPerPixel); -	uint32 findKeyFrame(uint32 frame) const; - -	bool _dirtyPalette; -	const byte *_palette; -	bool _setStartTime; -	bool _needUpdate; - -	uint16 _width, _height; - -	Graphics::Surface *_scaledSurface; -	void scaleSurface(const Graphics::Surface *src, Graphics::Surface *dst, -			Common::Rational scaleFactorX, Common::Rational scaleFactorY); - -	void pauseVideoIntern(bool pause); -	bool endOfVideoTracks() const; - -	// The TrackHandler is a class that wraps around a QuickTime Track -	// and handles playback in this decoder class. -	class TrackHandler { -	public: -		TrackHandler(QuickTimeDecoder *decoder, Track *parent); -		virtual ~TrackHandler() {} - -		enum TrackType { -			kTrackTypeAudio, -			kTrackTypeVideo -		}; - -		virtual TrackType getTrackType() const = 0; - -		virtual void seekToTime(Audio::Timestamp time) = 0; - -		virtual bool endOfTrack(); - -	protected: -		uint32 _curEdit; -		QuickTimeDecoder *_decoder; -		Common::SeekableReadStream *_fd; -		Track *_parent; -	}; -  	// The AudioTrackHandler is currently just a wrapper around some  	// QuickTimeDecoder functions. -	class AudioTrackHandler : public TrackHandler { +	class AudioTrackHandler : public SeekableAudioTrack {  	public:  		AudioTrackHandler(QuickTimeDecoder *decoder, QuickTimeAudioTrack *audioTrack); -		TrackType getTrackType() const { return kTrackTypeAudio; }  		void updateBuffer(); -		void seekToTime(Audio::Timestamp time); -		bool endOfTrack(); + +	protected: +		Audio::SeekableAudioStream *getSeekableAudioStream() const;  	private: +		QuickTimeDecoder *_decoder;  		QuickTimeAudioTrack *_audioTrack;  	};  	// The VideoTrackHandler is the bridge between the time of playback  	// and the media for the given track. It calculates when to start  	// tracks and at what rate to play the media using the edit list. -	class VideoTrackHandler : public TrackHandler { +	class VideoTrackHandler : public VideoTrack {  	public: -		VideoTrackHandler(QuickTimeDecoder *decoder, Track *parent); +		VideoTrackHandler(QuickTimeDecoder *decoder, Common::QuickTimeParser::Track *parent);  		~VideoTrackHandler(); -		TrackType getTrackType() const { return kTrackTypeVideo; } - -		const Graphics::Surface *decodeNextFrame(); - -		uint32 getNextFrameStartTime(); - -		uint32 getFrameCount(); - -		int32 getCurFrame() { return _curFrame; } +		bool endOfTrack() const; +		bool isSeekable() const { return true; } +		bool seek(const Audio::Timestamp &time); +		Audio::Timestamp getDuration() const; +		uint16 getWidth() const; +		uint16 getHeight() const;  		Graphics::PixelFormat getPixelFormat() const; +		int getCurFrame() const { return _curFrame; } +		int getFrameCount() const; +		uint32 getNextFrameStartTime() const; +		const Graphics::Surface *decodeNextFrame(); +		const byte *getPalette() const { _dirtyPalette = false; return _curPalette; } +		bool hasDirtyPalette() const { return _curPalette; } -		void seekToTime(Audio::Timestamp time); - -		Common::Rational getWidth() const; -		Common::Rational getHeight() const; +		Common::Rational getScaledWidth() const; +		Common::Rational getScaledHeight() const;  	private: +		QuickTimeDecoder *_decoder; +		Common::QuickTimeParser::Track *_parent; +		uint32 _curEdit;  		int32 _curFrame;  		uint32 _nextFrameStartTime;  		Graphics::Surface *_scaledSurface;  		bool _holdNextFrameStartTime;  		int32 _durationOverride; +		const byte *_curPalette; +		mutable bool _dirtyPalette;  		Common::SeekableReadStream *getNextFramePacket(uint32 &descId);  		uint32 getFrameDuration(); @@ -245,12 +160,6 @@ private:  		uint32 getCurEditTimeOffset() const;  		uint32 getCurEditTrackDuration() const;  	}; - -	Common::Array<TrackHandler *> _handlers; -	VideoTrackHandler *_nextVideoTrack; -	VideoTrackHandler *findNextVideoTrack() const; - -	void freeAllTrackHandlers();  };  } // End of namespace Video diff --git a/video/smk_decoder.cpp b/video/smk_decoder.cpp index 359f4cb9bd..bea65142a1 100644 --- a/video/smk_decoder.cpp +++ b/video/smk_decoder.cpp @@ -204,8 +204,7 @@ BigHuffmanTree::BigHuffmanTree(Common::BitStream &bs, int allocSize)  	delete _hiBytes;  } -BigHuffmanTree::~BigHuffmanTree() -{ +BigHuffmanTree::~BigHuffmanTree() {  	delete[] _tree;  } @@ -278,24 +277,17 @@ uint32 BigHuffmanTree::getCode(Common::BitStream &bs) {  	return v;  } -SmackerDecoder::SmackerDecoder(Audio::Mixer *mixer, Audio::Mixer::SoundType soundType) -	: _audioStarted(false), _audioStream(0), _mixer(mixer), _soundType(soundType) { -	_surface = 0; +SmackerDecoder::SmackerDecoder(Audio::Mixer::SoundType soundType) : _soundType(soundType) {  	_fileStream = 0; -	_dirtyPalette = false; +	_firstFrameStart = 0; +	_frameTypes = 0; +	_frameSizes = 0;  }  SmackerDecoder::~SmackerDecoder() {  	close();  } -uint32 SmackerDecoder::getTime() const { -	if (_audioStream && _audioStarted) -		return _mixer->getSoundElapsedTime(_audioHandle); - -	return FixedRateVideoDecoder::getTime(); -} -  bool SmackerDecoder::loadStream(Common::SeekableReadStream *stream) {  	close(); @@ -309,16 +301,17 @@ bool SmackerDecoder::loadStream(Common::SeekableReadStream *stream) {  	uint32 width = _fileStream->readUint32LE();  	uint32 height = _fileStream->readUint32LE(); -	_frameCount = _fileStream->readUint32LE(); -	int32 frameRate = _fileStream->readSint32LE(); - -	// framerate contains 2 digits after the comma, so 1497 is actually 14.97 fps -	if (frameRate > 0) -		_frameRate = Common::Rational(1000, frameRate); -	else if (frameRate < 0) -		_frameRate = Common::Rational(100000, -frameRate); +	uint32 frameCount = _fileStream->readUint32LE(); +	int32 frameDelay = _fileStream->readSint32LE(); + +	// frame rate contains 2 digits after the comma, so 1497 is actually 14.97 fps +	Common::Rational frameRate; +	if (frameDelay > 0) +		frameRate = Common::Rational(1000, frameDelay); +	else if (frameDelay < 0) +		frameRate = Common::Rational(100000, -frameDelay);  	else -		_frameRate = 1000; +		frameRate = 1000;  	// Flags are determined by which bit is set, which can be one of the following:  	// 0 - set to 1 if file contains a ring frame. @@ -328,6 +321,9 @@ bool SmackerDecoder::loadStream(Common::SeekableReadStream *stream) {      // before it is displayed.  	_header.flags = _fileStream->readUint32LE(); +	SmackerVideoTrack *videoTrack = createVideoTrack(width, height, frameCount, frameRate, _header.flags, _header.signature); +	addTrack(videoTrack); +  	// TODO: should we do any extra processing for Smacker files with ring frames?  	// TODO: should we do any extra processing for Y-doubled videos? Are they the @@ -374,92 +370,77 @@ bool SmackerDecoder::loadStream(Common::SeekableReadStream *stream) {  				warning("Unhandled Smacker v2 audio compression");  			if (i == 0) -				_audioStream = Audio::makeQueuingAudioStream(_header.audioInfo[0].sampleRate, _header.audioInfo[0].isStereo); +				addTrack(new SmackerAudioTrack(_header.audioInfo[i], _soundType));  		}  	}  	_header.dummy = _fileStream->readUint32LE(); -	_frameSizes = new uint32[_frameCount]; -	for (i = 0; i < _frameCount; ++i) +	_frameSizes = new uint32[frameCount]; +	for (i = 0; i < frameCount; ++i)  		_frameSizes[i] = _fileStream->readUint32LE(); -	_frameTypes = new byte[_frameCount]; -	for (i = 0; i < _frameCount; ++i) +	_frameTypes = new byte[frameCount]; +	for (i = 0; i < frameCount; ++i)  		_frameTypes[i] = _fileStream->readByte();  	byte *huffmanTrees = (byte *) malloc(_header.treesSize);  	_fileStream->read(huffmanTrees, _header.treesSize);  	Common::BitStream8LSB bs(new Common::MemoryReadStream(huffmanTrees, _header.treesSize, DisposeAfterUse::YES), true); +	videoTrack->readTrees(bs, _header.mMapSize, _header.mClrSize, _header.fullSize, _header.typeSize); -	_MMapTree = new BigHuffmanTree(bs, _header.mMapSize); -	_MClrTree = new BigHuffmanTree(bs, _header.mClrSize); -	_FullTree = new BigHuffmanTree(bs, _header.fullSize); -	_TypeTree = new BigHuffmanTree(bs, _header.typeSize); - -	_surface = new Graphics::Surface(); +	_firstFrameStart = _fileStream->pos(); -	// Height needs to be doubled if we have flags (Y-interlaced or Y-doubled) -	_surface->create(width, height * (_header.flags ? 2 : 1), Graphics::PixelFormat::createFormatCLUT8()); - -	memset(_palette, 0, 3 * 256);  	return true;  }  void SmackerDecoder::close() { -	if (!_fileStream) -		return; - -	if (_audioStream) { -		if (_audioStarted) { -			// The mixer will delete the stream. -			_mixer->stopHandle(_audioHandle); -			_audioStarted = false; -		} else { -			delete _audioStream; -		} -		_audioStream = 0; -	} +	VideoDecoder::close();  	delete _fileStream;  	_fileStream = 0; -	_surface->free(); -	delete _surface; -	_surface = 0; - -	delete _MMapTree; -	delete _MClrTree; -	delete _FullTree; -	delete _TypeTree; +	delete[] _frameTypes; +	_frameTypes = 0;  	delete[] _frameSizes; -	delete[] _frameTypes; +	_frameSizes = 0; +} + +bool SmackerDecoder::rewind() { +	// Call the parent method to rewind the tracks first +	if (!VideoDecoder::rewind()) +		return false; -	reset(); +	// And seek back to where the first frame begins +	_fileStream->seek(_firstFrameStart); +	return true;  } -const Graphics::Surface *SmackerDecoder::decodeNextFrame() { +void SmackerDecoder::readNextPacket() { +	SmackerVideoTrack *videoTrack = (SmackerVideoTrack *)getTrack(0); + +	if (videoTrack->endOfTrack()) +		return; + +	videoTrack->increaseCurFrame(); +  	uint i;  	uint32 chunkSize = 0;  	uint32 dataSizeUnpacked = 0;  	uint32 startPos = _fileStream->pos(); -	_curFrame++; -  	// Check if we got a frame with palette data, and  	// call back the virtual setPalette function to set  	// the current palette -	if (_frameTypes[_curFrame] & 1) { -		unpackPalette(); -		_dirtyPalette = true; -	} +	if (_frameTypes[videoTrack->getCurFrame()] & 1) +		videoTrack->unpackPalette(_fileStream);  	// Load audio tracks  	for (i = 0; i < 7; ++i) { -		if (!(_frameTypes[_curFrame] & (2 << i))) +		if (!(_frameTypes[videoTrack->getCurFrame()] & (2 << i)))  			continue;  		chunkSize = _fileStream->readUint32LE(); @@ -475,29 +456,109 @@ const Graphics::Surface *SmackerDecoder::decodeNextFrame() {  		handleAudioTrack(i, chunkSize, dataSizeUnpacked);  	} -	uint32 frameSize = _frameSizes[_curFrame] & ~3; -//	uint32 remainder =  _frameSizes[_curFrame] & 3; +	uint32 frameSize = _frameSizes[videoTrack->getCurFrame()] & ~3; +//	uint32 remainder =  _frameSizes[videoTrack->getCurFrame()] & 3;  	if (_fileStream->pos() - startPos > frameSize)  		error("Smacker actual frame size exceeds recorded frame size");  	uint32 frameDataSize = frameSize - (_fileStream->pos() - startPos); -	_frameData = (byte *)malloc(frameDataSize + 1); +	byte *frameData = (byte *)malloc(frameDataSize + 1);  	// Padding to keep the BigHuffmanTrees from reading past the data end -	_frameData[frameDataSize] = 0x00; +	frameData[frameDataSize] = 0x00; + +	_fileStream->read(frameData, frameDataSize); + +	Common::BitStream8LSB bs(new Common::MemoryReadStream(frameData, frameDataSize + 1, DisposeAfterUse::YES), true); +	videoTrack->decodeFrame(bs); + +	_fileStream->seek(startPos + frameSize); +} -	_fileStream->read(_frameData, frameDataSize); +void SmackerDecoder::handleAudioTrack(byte track, uint32 chunkSize, uint32 unpackedSize) { +	if (_header.audioInfo[track].hasAudio && chunkSize > 0 && track == 0) { +		// Get the audio track, which start at offset 1 (first track is video) +		SmackerAudioTrack *audioTrack = (SmackerAudioTrack *)getTrack(track + 1); -	Common::BitStream8LSB bs(new Common::MemoryReadStream(_frameData, frameDataSize + 1, DisposeAfterUse::YES), true); +		// If it's track 0, play the audio data +		byte *soundBuffer = (byte *)malloc(chunkSize + 1); +		// Padding to keep the SmallHuffmanTrees from reading past the data end +		soundBuffer[chunkSize] = 0x00; +		_fileStream->read(soundBuffer, chunkSize); + +		if (_header.audioInfo[track].compression == kCompressionRDFT || _header.audioInfo[track].compression == kCompressionDCT) { +			// TODO: Compressed audio (Bink RDFT/DCT encoded) +			free(soundBuffer); +			return; +		} else if (_header.audioInfo[track].compression == kCompressionDPCM) { +			// Compressed audio (Huffman DPCM encoded) +			audioTrack->queueCompressedBuffer(soundBuffer, chunkSize + 1, unpackedSize); +			free(soundBuffer); +		} else { +			// Uncompressed audio (PCM) +			audioTrack->queuePCM(soundBuffer, chunkSize); +		} +	} else { +		// Ignore the rest of the audio tracks, if they exist +		// TODO: Are there any Smacker videos with more than one audio stream? +		// If yes, we should play the rest of the audio streams as well +		if (chunkSize > 0) +			_fileStream->skip(chunkSize); +	} +} + +SmackerDecoder::SmackerVideoTrack::SmackerVideoTrack(uint32 width, uint32 height, uint32 frameCount, const Common::Rational &frameRate, uint32 flags, uint32 signature) { +	_surface = new Graphics::Surface(); +	_surface->create(width, height * (flags ? 2 : 1), Graphics::PixelFormat::createFormatCLUT8()); +	_frameCount = frameCount; +	_frameRate = frameRate; +	_flags = flags; +	_signature = signature; +	_curFrame = -1; +	_dirtyPalette = false; +	_MMapTree = _MClrTree = _FullTree = _TypeTree = 0; +	memset(_palette, 0, 3 * 256); +} + +SmackerDecoder::SmackerVideoTrack::~SmackerVideoTrack() { +	_surface->free(); +	delete _surface; + +	delete _MMapTree; +	delete _MClrTree; +	delete _FullTree; +	delete _TypeTree; +} + +uint16 SmackerDecoder::SmackerVideoTrack::getWidth() const { +	return _surface->w; +} + +uint16 SmackerDecoder::SmackerVideoTrack::getHeight() const { +	return _surface->h; +} + +Graphics::PixelFormat SmackerDecoder::SmackerVideoTrack::getPixelFormat() const { +	return _surface->format; +} + +void SmackerDecoder::SmackerVideoTrack::readTrees(Common::BitStream &bs, uint32 mMapSize, uint32 mClrSize, uint32 fullSize, uint32 typeSize) { +	_MMapTree = new BigHuffmanTree(bs, mMapSize); +	_MClrTree = new BigHuffmanTree(bs, mClrSize); +	_FullTree = new BigHuffmanTree(bs, fullSize); +	_TypeTree = new BigHuffmanTree(bs, typeSize); +} + +void SmackerDecoder::SmackerVideoTrack::decodeFrame(Common::BitStream &bs) {  	_MMapTree->reset();  	_MClrTree->reset();  	_FullTree->reset();  	_TypeTree->reset();  	// Height needs to be doubled if we have flags (Y-interlaced or Y-doubled) -	uint doubleY = _header.flags ? 2 : 1; +	uint doubleY = _flags ? 2 : 1;  	uint bw = getWidth() / 4;  	uint bh = getHeight() / doubleY / 4; @@ -508,6 +569,7 @@ const Graphics::Surface *SmackerDecoder::decodeNextFrame() {  	uint type, run, j, mode;  	uint32 p1, p2, clr, map;  	byte hi, lo; +	uint i;  	while (block < blocks) {  		type = _TypeTree->getCode(bs); @@ -536,7 +598,7 @@ const Graphics::Surface *SmackerDecoder::decodeNextFrame() {  			break;  		case SMK_BLOCK_FULL:  			// Smacker v2 has one mode, Smacker v4 has three -			if (_header.signature == MKTAG('S','M','K','2')) { +			if (_signature == MKTAG('S','M','K','2')) {  				mode = 0;  			} else {  				// 00 - mode 0 @@ -628,60 +690,81 @@ const Graphics::Surface *SmackerDecoder::decodeNextFrame() {  			break;  		}  	} +} -	_fileStream->seek(startPos + frameSize); +void SmackerDecoder::SmackerVideoTrack::unpackPalette(Common::SeekableReadStream *stream) { +	uint startPos = stream->pos(); +	uint32 len = 4 * stream->readByte(); -	if (_curFrame == 0) -		_startTime = g_system->getMillis(); +	byte *chunk = (byte *)malloc(len); +	stream->read(chunk, len); +	byte *p = chunk; -	return _surface; -} +	byte oldPalette[3 * 256]; +	memcpy(oldPalette, _palette, 3 * 256); -void SmackerDecoder::handleAudioTrack(byte track, uint32 chunkSize, uint32 unpackedSize) { -	if (_header.audioInfo[track].hasAudio && chunkSize > 0 && track == 0) { -		// If it's track 0, play the audio data -		byte *soundBuffer = (byte *)malloc(chunkSize + 1); -		// Padding to keep the SmallHuffmanTrees from reading past the data end -		soundBuffer[chunkSize] = 0x00; +	byte *pal = _palette; -		_fileStream->read(soundBuffer, chunkSize); +	int sz = 0; +	byte b0; +	while (sz < 256) { +		b0 = *p++; +		if (b0 & 0x80) {               // if top bit is 1 (0x80 = 10000000) +			sz += (b0 & 0x7f) + 1;     // get lower 7 bits + 1 (0x7f = 01111111) +			pal += 3 * ((b0 & 0x7f) + 1); +		} else if (b0 & 0x40) {        // if top 2 bits are 01 (0x40 = 01000000) +			byte c = (b0 & 0x3f) + 1;  // get lower 6 bits + 1 (0x3f = 00111111) +			uint s = 3 * *p++; +			sz += c; -		if (_header.audioInfo[track].compression == kCompressionRDFT || _header.audioInfo[track].compression == kCompressionDCT) { -			// TODO: Compressed audio (Bink RDFT/DCT encoded) -			free(soundBuffer); -			return; -		} else if (_header.audioInfo[track].compression == kCompressionDPCM) { -			// Compressed audio (Huffman DPCM encoded) -			queueCompressedBuffer(soundBuffer, chunkSize + 1, unpackedSize, track); -			free(soundBuffer); -		} else { -			// Uncompressed audio (PCM) -			byte flags = 0; -			if (_header.audioInfo[track].is16Bits) -				flags = flags | Audio::FLAG_16BITS; -			if (_header.audioInfo[track].isStereo) -				flags = flags | Audio::FLAG_STEREO; - -			_audioStream->queueBuffer(soundBuffer, chunkSize, DisposeAfterUse::YES, flags); -			// The sound buffer will be deleted by QueuingAudioStream -		} +			while (c--) { +				*pal++ = oldPalette[s + 0]; +				*pal++ = oldPalette[s + 1]; +				*pal++ = oldPalette[s + 2]; +				s += 3; +			} +		} else {                       // top 2 bits are 00 +			sz++; +			// get the lower 6 bits for each component (0x3f = 00111111) +			byte b = b0 & 0x3f; +			byte g = (*p++) & 0x3f; +			byte r = (*p++) & 0x3f; -		if (!_audioStarted) { -			_mixer->playStream(_soundType, &_audioHandle, _audioStream, -1, getVolume(), getBalance()); -			_audioStarted = true; +			assert(g < 0xc0 && b < 0xc0); + +			// upscale to full 8-bit color values by multiplying by 4 +			*pal++ = b * 4; +			*pal++ = g * 4; +			*pal++ = r * 4;  		} -	} else { -		// Ignore the rest of the audio tracks, if they exist -		// TODO: Are there any Smacker videos with more than one audio stream? -		// If yes, we should play the rest of the audio streams as well -		if (chunkSize > 0) -			_fileStream->skip(chunkSize);  	} + +	stream->seek(startPos + len); +	free(chunk); + +	_dirtyPalette = true; +} + +SmackerDecoder::SmackerAudioTrack::SmackerAudioTrack(const AudioInfo &audioInfo, Audio::Mixer::SoundType soundType) : +		_audioInfo(audioInfo), _soundType(soundType) { +	_audioStream = Audio::makeQueuingAudioStream(_audioInfo.sampleRate, _audioInfo.isStereo); +} + +SmackerDecoder::SmackerAudioTrack::~SmackerAudioTrack() { +	delete _audioStream;  } -void SmackerDecoder::queueCompressedBuffer(byte *buffer, uint32 bufferSize, -		uint32 unpackedSize, int streamNum) { +bool SmackerDecoder::SmackerAudioTrack::rewind() { +	delete _audioStream; +	_audioStream = Audio::makeQueuingAudioStream(_audioInfo.sampleRate, _audioInfo.isStereo); +	return true; +} +Audio::AudioStream *SmackerDecoder::SmackerAudioTrack::getAudioStream() const { +	return _audioStream; +} + +void SmackerDecoder::SmackerAudioTrack::queueCompressedBuffer(byte *buffer, uint32 bufferSize, uint32 unpackedSize) {  	Common::BitStream8LSB audioBS(new Common::MemoryReadStream(buffer, bufferSize), true);  	bool dataPresent = audioBS.getBit(); @@ -689,9 +772,9 @@ void SmackerDecoder::queueCompressedBuffer(byte *buffer, uint32 bufferSize,  		return;  	bool isStereo = audioBS.getBit(); -	assert(isStereo == _header.audioInfo[streamNum].isStereo); +	assert(isStereo == _audioInfo.isStereo);  	bool is16Bits = audioBS.getBit(); -	assert(is16Bits == _header.audioInfo[streamNum].is16Bits); +	assert(is16Bits == _audioInfo.is16Bits);  	int numBytes = 1 * (isStereo ? 2 : 1) * (is16Bits ? 2 : 1); @@ -759,74 +842,21 @@ void SmackerDecoder::queueCompressedBuffer(byte *buffer, uint32 bufferSize,  	for (int k = 0; k < numBytes; k++)  		delete audioTrees[k]; -	byte flags = 0; -	if (_header.audioInfo[0].is16Bits) -		flags = flags | Audio::FLAG_16BITS; -	if (_header.audioInfo[0].isStereo) -		flags = flags | Audio::FLAG_STEREO; -	_audioStream->queueBuffer(unpackedBuffer, unpackedSize, DisposeAfterUse::YES, flags); -	// unpackedBuffer will be deleted by QueuingAudioStream +	queuePCM(unpackedBuffer, unpackedSize);  } -void SmackerDecoder::unpackPalette() { -	uint startPos = _fileStream->pos(); -	uint32 len = 4 * _fileStream->readByte(); - -	byte *chunk = (byte *)malloc(len); -	_fileStream->read(chunk, len); -	byte *p = chunk; - -	byte oldPalette[3*256]; -	memcpy(oldPalette, _palette, 3 * 256); - -	byte *pal = _palette; - -	int sz = 0; -	byte b0; -	while (sz < 256) { -		b0 = *p++; -		if (b0 & 0x80) {               // if top bit is 1 (0x80 = 10000000) -			sz += (b0 & 0x7f) + 1;     // get lower 7 bits + 1 (0x7f = 01111111) -			pal += 3 * ((b0 & 0x7f) + 1); -		} else if (b0 & 0x40) {        // if top 2 bits are 01 (0x40 = 01000000) -			byte c = (b0 & 0x3f) + 1;  // get lower 6 bits + 1 (0x3f = 00111111) -			uint s = 3 * *p++; -			sz += c; - -			while (c--) { -				*pal++ = oldPalette[s + 0]; -				*pal++ = oldPalette[s + 1]; -				*pal++ = oldPalette[s + 2]; -				s += 3; -			} -		} else {                       // top 2 bits are 00 -			sz++; -			// get the lower 6 bits for each component (0x3f = 00111111) -			byte b = b0 & 0x3f; -			byte g = (*p++) & 0x3f; -			byte r = (*p++) & 0x3f; - -			assert(g < 0xc0 && b < 0xc0); - -			// upscale to full 8-bit color values by multiplying by 4 -			*pal++ = b * 4; -			*pal++ = g * 4; -			*pal++ = r * 4; -		} -	} - -	_fileStream->seek(startPos + len); -	free(chunk); -} +void SmackerDecoder::SmackerAudioTrack::queuePCM(byte *buffer, uint32 bufferSize) { +	byte flags = 0; +	if (_audioInfo.is16Bits) +		flags |= Audio::FLAG_16BITS; +	if (_audioInfo.isStereo) +		flags |= Audio::FLAG_STEREO; -void SmackerDecoder::updateVolume() { -	if (g_system->getMixer()->isSoundHandleActive(_audioHandle)) -		g_system->getMixer()->setChannelVolume(_audioHandle, getVolume()); +	_audioStream->queueBuffer(buffer, bufferSize, DisposeAfterUse::YES, flags);  } -void SmackerDecoder::updateBalance() { -	if (g_system->getMixer()->isSoundHandleActive(_audioHandle)) -		g_system->getMixer()->setChannelBalance(_audioHandle, getBalance()); +SmackerDecoder::SmackerVideoTrack *SmackerDecoder::createVideoTrack(uint32 width, uint32 height, uint32 frameCount, const Common::Rational &frameRate, uint32 flags, uint32 signature) const { +	return new SmackerVideoTrack(width, height, frameCount, frameRate, flags, signature);  }  } // End of namespace Video diff --git a/video/smk_decoder.h b/video/smk_decoder.h index 516882e7c8..7227238373 100644 --- a/video/smk_decoder.h +++ b/video/smk_decoder.h @@ -34,6 +34,7 @@ class QueuingAudioStream;  }  namespace Common { +class BitStream;  class SeekableReadStream;  } @@ -56,42 +57,72 @@ class BigHuffmanTree;   *  - sword2   *  - toon   */ -class SmackerDecoder : public FixedRateVideoDecoder { +class SmackerDecoder : public VideoDecoder {  public: -	SmackerDecoder(Audio::Mixer *mixer, -			Audio::Mixer::SoundType soundType = Audio::Mixer::kSFXSoundType); +	SmackerDecoder(Audio::Mixer::SoundType soundType = Audio::Mixer::kSFXSoundType);  	virtual ~SmackerDecoder(); -	bool loadStream(Common::SeekableReadStream *stream); +	virtual bool loadStream(Common::SeekableReadStream *stream);  	void close(); -	bool isVideoLoaded() const { return _fileStream != 0; } -	uint16 getWidth() const { return _surface->w; } -	uint16 getHeight() const { return _surface->h; } -	uint32 getFrameCount() const { return _frameCount; } -	uint32 getTime() const; -	const Graphics::Surface *decodeNextFrame(); -	Graphics::PixelFormat getPixelFormat() const { return Graphics::PixelFormat::createFormatCLUT8(); } -	const byte *getPalette() { _dirtyPalette = false; return _palette; } -	bool hasDirtyPalette() const { return _dirtyPalette; } -	virtual void handleAudioTrack(byte track, uint32 chunkSize, uint32 unpackedSize); +	bool rewind();  protected: -	Common::SeekableReadStream *_fileStream; +	void readNextPacket(); + +	virtual void handleAudioTrack(byte track, uint32 chunkSize, uint32 unpackedSize); -	// VideoDecoder API -	void updateVolume(); -	void updateBalance(); +	class SmackerVideoTrack : public FixedRateVideoTrack { +	public: +		SmackerVideoTrack(uint32 width, uint32 height, uint32 frameCount, const Common::Rational &frameRate, uint32 flags, uint32 signature); +		~SmackerVideoTrack(); -	// FixedRateVideoDecoder API -	Common::Rational getFrameRate() const { return _frameRate; } +		bool isRewindable() const { return true; } +		bool rewind() { _curFrame = -1; return true; } -protected: -	void unpackPalette(); -	// Possible runs of blocks -	uint getBlockRun(int index) { return (index <= 58) ? index + 1 : 128 << (index - 59); } -	void queueCompressedBuffer(byte *buffer, uint32 bufferSize, uint32 unpackedSize, int streamNum); +		uint16 getWidth() const; +		uint16 getHeight() const; +		Graphics::PixelFormat getPixelFormat() const; +		int getCurFrame() const { return _curFrame; } +		int getFrameCount() const { return _frameCount; } +		const Graphics::Surface *decodeNextFrame() { return _surface; } +		const byte *getPalette() const { _dirtyPalette = false; return _palette; } +		bool hasDirtyPalette() const { return _dirtyPalette; } + +		void readTrees(Common::BitStream &bs, uint32 mMapSize, uint32 mClrSize, uint32 fullSize, uint32 typeSize); +		void increaseCurFrame() { _curFrame++; } +		void decodeFrame(Common::BitStream &bs); +		void unpackPalette(Common::SeekableReadStream *stream); + +	protected: +		Common::Rational getFrameRate() const { return _frameRate; } + +		Graphics::Surface *_surface; + +	private: +		Common::Rational _frameRate; +		uint32 _flags, _signature; + +		byte _palette[3 * 256]; +		mutable bool _dirtyPalette; + +		int _curFrame; +		uint32 _frameCount; + +		BigHuffmanTree *_MMapTree; +		BigHuffmanTree *_MClrTree; +		BigHuffmanTree *_FullTree; +		BigHuffmanTree *_TypeTree; +		// Possible runs of blocks +		static uint getBlockRun(int index) { return (index <= 58) ? index + 1 : 128 << (index - 59); } +	}; + +	virtual SmackerVideoTrack *createVideoTrack(uint32 width, uint32 height, uint32 frameCount, const Common::Rational &frameRate, uint32 flags, uint32 signature) const; + +	Common::SeekableReadStream *_fileStream; + +private:  	enum AudioCompression {  		kCompressionNone,  		kCompressionDPCM, @@ -120,6 +151,28 @@ protected:  		uint32 dummy;  	} _header; +	class SmackerAudioTrack : public AudioTrack { +	public: +		SmackerAudioTrack(const AudioInfo &audioInfo, Audio::Mixer::SoundType soundType); +		~SmackerAudioTrack(); + +		bool isRewindable() const { return true; } +		bool rewind(); + +		Audio::Mixer::SoundType getSoundType() const { return _soundType; } + +		void queueCompressedBuffer(byte *buffer, uint32 bufferSize, uint32 unpackedSize); +		void queuePCM(byte *buffer, uint32 bufferSize); + +	protected: +		Audio::AudioStream *getAudioStream() const; + +	private: +		Audio::Mixer::SoundType _soundType; +		Audio::QueuingAudioStream *_audioStream; +		AudioInfo _audioInfo; +	}; +  	uint32 *_frameSizes;  	// The FrameTypes section of a Smacker file contains an array of bytes, where  	// the 8 bits of each byte describe the contents of the corresponding frame. @@ -127,25 +180,10 @@ protected:  	// and so on), so there can be up to 7 different audio tracks. When the lowest bit  	// (bit 0) is set, it denotes a frame that contains a palette record  	byte *_frameTypes; -	byte *_frameData; -	// The RGB palette -	byte _palette[3 * 256]; -	bool _dirtyPalette; -	Common::Rational _frameRate; -	uint32 _frameCount; -	Graphics::Surface *_surface; +	uint32 _firstFrameStart;  	Audio::Mixer::SoundType _soundType; -	Audio::Mixer *_mixer; -	bool _audioStarted; -	Audio::QueuingAudioStream *_audioStream; -	Audio::SoundHandle _audioHandle; - -	BigHuffmanTree *_MMapTree; -	BigHuffmanTree *_MClrTree; -	BigHuffmanTree *_FullTree; -	BigHuffmanTree *_TypeTree;  };  } // End of namespace Video diff --git a/video/theora_decoder.cpp b/video/theora_decoder.cpp new file mode 100644 index 0000000000..d7260469e6 --- /dev/null +++ b/video/theora_decoder.cpp @@ -0,0 +1,487 @@ +/* ScummVM - Graphic Adventure Engine + * + * ScummVM is the legal property of its developers, whose names + * are too numerous to list here. Please refer to the COPYRIGHT + * file distributed with this source distribution. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/* + * Source is based on the player example from libvorbis package, + * available at: http://svn.xiph.org/trunk/theora/examples/player_example.c + * + * THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. + * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS + * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE + * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. + * + * THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2009 + * by the Xiph.Org Foundation and contributors http://www.xiph.org/ + * + */ + +#include "video/theora_decoder.h" + +#include "audio/audiostream.h" +#include "audio/decoders/raw.h" +#include "common/stream.h" +#include "common/system.h" +#include "common/textconsole.h" +#include "common/util.h" +#include "graphics/pixelformat.h" +#include "graphics/yuv_to_rgb.h" + +namespace Video { + +TheoraDecoder::TheoraDecoder(Audio::Mixer::SoundType soundType) : _soundType(soundType) { +	_fileStream = 0; + +	_videoTrack = 0; +	_audioTrack = 0; +	_hasVideo = _hasAudio = false; +} + +TheoraDecoder::~TheoraDecoder() { +	close(); +} + +bool TheoraDecoder::loadStream(Common::SeekableReadStream *stream) { +	close(); + +	_fileStream = stream; + +	// start up Ogg stream synchronization layer +	ogg_sync_init(&_oggSync); + +	// init supporting Vorbis structures needed in header parsing +	vorbis_info_init(&_vorbisInfo); +	vorbis_comment vorbisComment; +	vorbis_comment_init(&vorbisComment); + +	// init supporting Theora structures needed in header parsing +	th_info theoraInfo; +	th_info_init(&theoraInfo); +	th_comment theoraComment; +	th_comment_init(&theoraComment); +	th_setup_info *theoraSetup = 0; + +	uint theoraPackets = 0, vorbisPackets = 0; + +	// Ogg file open; parse the headers +	// Only interested in Vorbis/Theora streams +	bool foundHeader = false; +	while (!foundHeader) { +		int ret = bufferData(); + +		if (ret == 0) +			break; // FIXME: Shouldn't this error out? + +		while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) { +			ogg_stream_state test; + +			// is this a mandated initial header? If not, stop parsing +			if (!ogg_page_bos(&_oggPage)) { +				// don't leak the page; get it into the appropriate stream +				queuePage(&_oggPage); +				foundHeader = true; +				break; +			} + +			ogg_stream_init(&test, ogg_page_serialno(&_oggPage)); +			ogg_stream_pagein(&test, &_oggPage); +			ogg_stream_packetout(&test, &_oggPacket); + +			// identify the codec: try theora +			if (theoraPackets == 0 && th_decode_headerin(&theoraInfo, &theoraComment, &theoraSetup, &_oggPacket) >= 0) { +				// it is theora +				memcpy(&_theoraOut, &test, sizeof(test)); +				theoraPackets = 1; +				_hasVideo = true; +			} else if (vorbisPackets == 0 && vorbis_synthesis_headerin(&_vorbisInfo, &vorbisComment, &_oggPacket) >= 0) { +				// it is vorbis +				memcpy(&_vorbisOut, &test, sizeof(test)); +				vorbisPackets = 1; +				_hasAudio = true; +			} else { +				// whatever it is, we don't care about it +				ogg_stream_clear(&test); +			} +		} +		// fall through to non-bos page parsing +	} + +	// we're expecting more header packets. +	while ((theoraPackets && theoraPackets < 3) || (vorbisPackets && vorbisPackets < 3)) { +		int ret; + +		// look for further theora headers +		while (theoraPackets && (theoraPackets < 3) && (ret = ogg_stream_packetout(&_theoraOut, &_oggPacket))) { +			if (ret < 0) +				error("Error parsing Theora stream headers; corrupt stream?"); + +			if (!th_decode_headerin(&theoraInfo, &theoraComment, &theoraSetup, &_oggPacket)) +				error("Error parsing Theora stream headers; corrupt stream?"); + +			theoraPackets++; +		} + +		// look for more vorbis header packets +		while (vorbisPackets && (vorbisPackets < 3) && (ret = ogg_stream_packetout(&_vorbisOut, &_oggPacket))) { +			if (ret < 0) +				error("Error parsing Vorbis stream headers; corrupt stream?"); + +			if (vorbis_synthesis_headerin(&_vorbisInfo, &vorbisComment, &_oggPacket)) +				error("Error parsing Vorbis stream headers; corrupt stream?"); + +			vorbisPackets++; + +			if (vorbisPackets == 3) +				break; +		} + +		// The header pages/packets will arrive before anything else we +		// care about, or the stream is not obeying spec + +		if (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) { +			queuePage(&_oggPage); // demux into the appropriate stream +		} else { +			ret = bufferData(); // someone needs more data + +			if (ret == 0) +				error("End of file while searching for codec headers."); +		} +	} + +	// And now we have it all. Initialize decoders next +	if (_hasVideo) { +		_videoTrack = new TheoraVideoTrack(getDefaultHighColorFormat(), theoraInfo, theoraSetup); +		addTrack(_videoTrack); +	} + +	th_info_clear(&theoraInfo); +	th_comment_clear(&theoraComment); +	th_setup_free(theoraSetup); + +	if (_hasAudio) { +		_audioTrack = new VorbisAudioTrack(_soundType, _vorbisInfo); + +		// Get enough audio data to start us off +		while (!_audioTrack->hasAudio()) { +			// Queue more data +			bufferData(); +			while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) +				queuePage(&_oggPage); + +			queueAudio(); +		} + +		addTrack(_audioTrack); +	} + +	vorbis_comment_clear(&vorbisComment); + +	return true; +} + +void TheoraDecoder::close() { +	VideoDecoder::close(); + +	if (!_fileStream) +		return; + +	if (_videoTrack) { +		ogg_stream_clear(&_theoraOut); +		_videoTrack = 0; +	} + +	if (_audioTrack) { +		ogg_stream_clear(&_vorbisOut); +		_audioTrack = 0; +	} + +	ogg_sync_clear(&_oggSync); +	vorbis_info_clear(&_vorbisInfo); + +	delete _fileStream; +	_fileStream = 0; + +	_hasVideo = _hasAudio = false; +} + +void TheoraDecoder::readNextPacket() { +	// First, let's get our frame +	if (_hasVideo) { +		while (!_videoTrack->endOfTrack()) { +			// theora is one in, one out... +			if (ogg_stream_packetout(&_theoraOut, &_oggPacket) > 0) { +				if (_videoTrack->decodePacket(_oggPacket)) +					break; +			} else if (_theoraOut.e_o_s || _fileStream->eos()) { +				// If we can't get any more frames, we're done. +				_videoTrack->setEndOfVideo(); +			} else { +				// Queue more data +				bufferData(); +				while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) +					queuePage(&_oggPage); +			} + +			// Update audio if we can +			queueAudio(); +		} +	} + +	// Then make sure we have enough audio buffered +	ensureAudioBufferSize(); +} + +TheoraDecoder::TheoraVideoTrack::TheoraVideoTrack(const Graphics::PixelFormat &format, th_info &theoraInfo, th_setup_info *theoraSetup) { +	_theoraDecode = th_decode_alloc(&theoraInfo, theoraSetup); + +	if (theoraInfo.pixel_fmt != TH_PF_420) +		error("Only theora YUV420 is supported"); + +	int postProcessingMax; +	th_decode_ctl(_theoraDecode, TH_DECCTL_GET_PPLEVEL_MAX, &postProcessingMax, sizeof(postProcessingMax)); +	th_decode_ctl(_theoraDecode, TH_DECCTL_SET_PPLEVEL, &postProcessingMax, sizeof(postProcessingMax)); + +	_surface.create(theoraInfo.frame_width, theoraInfo.frame_height, format); + +	// Set up a display surface +	_displaySurface.pixels = _surface.getBasePtr(theoraInfo.pic_x, theoraInfo.pic_y); +	_displaySurface.w = theoraInfo.pic_width; +	_displaySurface.h = theoraInfo.pic_height; +	_displaySurface.format = format; +	_displaySurface.pitch = _surface.pitch; + +	// Set the frame rate +	_frameRate = Common::Rational(theoraInfo.fps_numerator, theoraInfo.fps_denominator); + +	_endOfVideo = false; +	_nextFrameStartTime = 0.0; +	_curFrame = -1; +} + +TheoraDecoder::TheoraVideoTrack::~TheoraVideoTrack() { +	th_decode_free(_theoraDecode); + +	_surface.free(); +	_displaySurface.pixels = 0; +} + +bool TheoraDecoder::TheoraVideoTrack::decodePacket(ogg_packet &oggPacket) { +	if (th_decode_packetin(_theoraDecode, &oggPacket, 0) == 0) { +		_curFrame++; + +		// Convert YUV data to RGB data +		th_ycbcr_buffer yuv; +		th_decode_ycbcr_out(_theoraDecode, yuv); +		translateYUVtoRGBA(yuv); + +		double time = th_granule_time(_theoraDecode, oggPacket.granulepos); + +		// We need to calculate when the next frame should be shown +		// This is all in floating point because that's what the Ogg code gives us +		// Ogg is a lossy container format, so it doesn't always list the time to the +		// next frame. In such cases, we need to calculate it ourselves. +		if (time == -1.0) +			_nextFrameStartTime += _frameRate.getInverse().toDouble(); +		else +			_nextFrameStartTime = time; +	 +		return true; +	} + +	return false; +} + +enum TheoraYUVBuffers { +	kBufferY = 0, +	kBufferU = 1, +	kBufferV = 2 +}; + +void TheoraDecoder::TheoraVideoTrack::translateYUVtoRGBA(th_ycbcr_buffer &YUVBuffer) { +	// Width and height of all buffers have to be divisible by 2. +	assert((YUVBuffer[kBufferY].width & 1) == 0); +	assert((YUVBuffer[kBufferY].height & 1) == 0); +	assert((YUVBuffer[kBufferU].width & 1) == 0); +	assert((YUVBuffer[kBufferV].width & 1) == 0); + +	// UV images have to have a quarter of the Y image resolution +	assert(YUVBuffer[kBufferU].width == YUVBuffer[kBufferY].width >> 1); +	assert(YUVBuffer[kBufferV].width == YUVBuffer[kBufferY].width >> 1); +	assert(YUVBuffer[kBufferU].height == YUVBuffer[kBufferY].height >> 1); +	assert(YUVBuffer[kBufferV].height == YUVBuffer[kBufferY].height >> 1); + +	Graphics::convertYUV420ToRGB(&_surface, YUVBuffer[kBufferY].data, YUVBuffer[kBufferU].data, YUVBuffer[kBufferV].data, YUVBuffer[kBufferY].width, YUVBuffer[kBufferY].height, YUVBuffer[kBufferY].stride, YUVBuffer[kBufferU].stride); +} + +static vorbis_info *info = 0; + +TheoraDecoder::VorbisAudioTrack::VorbisAudioTrack(Audio::Mixer::SoundType soundType, vorbis_info &vorbisInfo) : _soundType(soundType) { +	vorbis_synthesis_init(&_vorbisDSP, &vorbisInfo); +	vorbis_block_init(&_vorbisDSP, &_vorbisBlock); +	info = &vorbisInfo; + +	_audStream = Audio::makeQueuingAudioStream(vorbisInfo.rate, vorbisInfo.channels); + +	_audioBufferFill = 0; +	_audioBuffer = 0; +	_endOfAudio = false; +} + +TheoraDecoder::VorbisAudioTrack::~VorbisAudioTrack() { +	vorbis_dsp_clear(&_vorbisDSP); +	vorbis_block_clear(&_vorbisBlock); +	delete _audStream; +	free(_audioBuffer); +} + +Audio::AudioStream *TheoraDecoder::VorbisAudioTrack::getAudioStream() const { +	return _audStream; +} + +#define AUDIOFD_FRAGSIZE 10240 + +static double rint(double v) { +	return floor(v + 0.5); +} + +bool TheoraDecoder::VorbisAudioTrack::decodeSamples() { +	float **pcm; + +	// if there's pending, decoded audio, grab it +	int ret = vorbis_synthesis_pcmout(&_vorbisDSP, &pcm); + +	if (ret > 0) { +		if (!_audioBuffer) { +			_audioBuffer = (ogg_int16_t *)malloc(AUDIOFD_FRAGSIZE * sizeof(ogg_int16_t)); +			assert(_audioBuffer); +		} + +		int channels = _audStream->isStereo() ? 2 : 1; +		int count = _audioBufferFill / 2; +		int maxsamples = ((AUDIOFD_FRAGSIZE - _audioBufferFill) / channels) >> 1; +		int i; + +		for (i = 0; i < ret && i < maxsamples; i++) { +			for (int j = 0; j < channels; j++) { +				int val = CLIP((int)rint(pcm[j][i] * 32767.f), -32768, 32767); +				_audioBuffer[count++] = val; +			} +		} + +		vorbis_synthesis_read(&_vorbisDSP, i); +		_audioBufferFill += (i * channels) << 1; + +		if (_audioBufferFill == AUDIOFD_FRAGSIZE) { +			byte flags = Audio::FLAG_16BITS; + +			if (_audStream->isStereo()) +				flags |= Audio::FLAG_STEREO; + +#ifdef SCUMM_LITTLE_ENDIAN +			flags |= Audio::FLAG_LITTLE_ENDIAN; +#endif + +			_audStream->queueBuffer((byte *)_audioBuffer, AUDIOFD_FRAGSIZE, DisposeAfterUse::YES, flags); + +			// The audio mixer is now responsible for the old audio buffer. +			// We need to create a new one. +			_audioBuffer = 0; +			_audioBufferFill = 0; +		} + +		return true; +	} + +	return false; +} + +bool TheoraDecoder::VorbisAudioTrack::hasAudio() const { +	return _audStream->numQueuedStreams() > 0; +} + +bool TheoraDecoder::VorbisAudioTrack::needsAudio() const { +	// TODO: 5 is very arbitrary. We probably should do something like QuickTime does. +	return !_endOfAudio && _audStream->numQueuedStreams() < 5; +} + +void TheoraDecoder::VorbisAudioTrack::synthesizePacket(ogg_packet &oggPacket) { +	if (vorbis_synthesis(&_vorbisBlock, &oggPacket) == 0) // test for success +		vorbis_synthesis_blockin(&_vorbisDSP, &_vorbisBlock); +} + +void TheoraDecoder::queuePage(ogg_page *page) { +	if (_hasVideo) +		ogg_stream_pagein(&_theoraOut, page); + +	if (_hasAudio) +		ogg_stream_pagein(&_vorbisOut, page); +} + +int TheoraDecoder::bufferData() { +	char *buffer = ogg_sync_buffer(&_oggSync, 4096); +	int bytes = _fileStream->read(buffer, 4096); + +	ogg_sync_wrote(&_oggSync, bytes); + +	return bytes; +} + +bool TheoraDecoder::queueAudio() { +	if (!_hasAudio) +		return false; + +	bool queuedAudio = false; + +	for (;;) { +		if (_audioTrack->decodeSamples()) { +			// we queued some pending audio +			queuedAudio = true; +		} else if (ogg_stream_packetout(&_vorbisOut, &_oggPacket) > 0) { +			// no pending audio; is there a pending packet to decode? +			_audioTrack->synthesizePacket(_oggPacket); +		} else { +			// we've buffered all we have, break out for now +			break; +		} +	} + +	return queuedAudio; +} + +void TheoraDecoder::ensureAudioBufferSize() { +	if (!_hasAudio) +		return; + +	// Force at least some audio to be buffered +	while (_audioTrack->needsAudio()) { +		bufferData(); +		while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) +			queuePage(&_oggPage); + +		bool queuedAudio = queueAudio(); +		if ((_vorbisOut.e_o_s  || _fileStream->eos()) && !queuedAudio) { +			_audioTrack->setEndOfAudio(); +			break; +		} +	} +} + +} // End of namespace Video diff --git a/video/theora_decoder.h b/video/theora_decoder.h new file mode 100644 index 0000000000..7e36d829e7 --- /dev/null +++ b/video/theora_decoder.h @@ -0,0 +1,157 @@ +/* ScummVM - Graphic Adventure Engine + * + * ScummVM is the legal property of its developers, whose names + * are too numerous to list here. Please refer to the COPYRIGHT + * file distributed with this source distribution. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#include "common/scummsys.h"	// for USE_THEORADEC + +#ifdef USE_THEORADEC + +#ifndef VIDEO_THEORA_DECODER_H +#define VIDEO_THEORA_DECODER_H + +#include "common/rational.h" +#include "video/video_decoder.h" +#include "audio/mixer.h" +#include "graphics/surface.h" + +#include <theora/theoradec.h> +#include <vorbis/codec.h> + +namespace Common { +class SeekableReadStream; +} + +namespace Audio { +class AudioStream; +class QueuingAudioStream; +} + +namespace Video { + +/** + * + * Decoder for Theora videos. + * Video decoder used in engines: + *  - sword25 + */ +class TheoraDecoder : public VideoDecoder { +public: +	TheoraDecoder(Audio::Mixer::SoundType soundType = Audio::Mixer::kMusicSoundType); +	virtual ~TheoraDecoder(); + +	/** +	 * Load a video file +	 * @param stream  the stream to load +	 */ +	bool loadStream(Common::SeekableReadStream *stream); +	void close(); + +protected: +	void readNextPacket(); + +private: +	class TheoraVideoTrack : public VideoTrack { +	public: +		TheoraVideoTrack(const Graphics::PixelFormat &format, th_info &theoraInfo, th_setup_info *theoraSetup); +		~TheoraVideoTrack(); + +		bool endOfTrack() const { return _endOfVideo; } +		uint16 getWidth() const { return _displaySurface.w; } +		uint16 getHeight() const { return _displaySurface.h; } +		Graphics::PixelFormat getPixelFormat() const { return _displaySurface.format; } +		int getCurFrame() const { return _curFrame; } +		uint32 getNextFrameStartTime() const { return (uint32)(_nextFrameStartTime * 1000); } +		const Graphics::Surface *decodeNextFrame() { return &_displaySurface; } + +		bool decodePacket(ogg_packet &oggPacket); +		void setEndOfVideo() { _endOfVideo = true; } + +	private: +		int _curFrame; +		bool _endOfVideo; +		Common::Rational _frameRate; +		double _nextFrameStartTime; + +		Graphics::Surface _surface; +		Graphics::Surface _displaySurface; + +		th_dec_ctx *_theoraDecode; + +		void translateYUVtoRGBA(th_ycbcr_buffer &YUVBuffer); +	}; + +	class VorbisAudioTrack : public AudioTrack { +	public: +		VorbisAudioTrack(Audio::Mixer::SoundType soundType, vorbis_info &vorbisInfo); +		~VorbisAudioTrack(); + +		Audio::Mixer::SoundType getSoundType() const { return _soundType; } + +		bool decodeSamples(); +		bool hasAudio() const; +		bool needsAudio() const; +		void synthesizePacket(ogg_packet &oggPacket); +		void setEndOfAudio() { _endOfAudio = true; } + +	protected: +		Audio::AudioStream *getAudioStream() const; + +	private: +		// single audio fragment audio buffering +		int _audioBufferFill; +		ogg_int16_t *_audioBuffer; + +		Audio::Mixer::SoundType _soundType; +		Audio::QueuingAudioStream *_audStream; + +		vorbis_block _vorbisBlock; +		vorbis_dsp_state _vorbisDSP; + +		bool _endOfAudio; +	}; + +	void queuePage(ogg_page *page); +	int bufferData(); +	bool queueAudio(); +	void ensureAudioBufferSize(); + +	Common::SeekableReadStream *_fileStream; + +	Audio::Mixer::SoundType _soundType; + +	ogg_sync_state _oggSync; +	ogg_page _oggPage; +	ogg_packet _oggPacket; + +	ogg_stream_state _theoraOut, _vorbisOut; +	bool _hasVideo, _hasAudio; + +	vorbis_info _vorbisInfo; + +	TheoraVideoTrack *_videoTrack; +	VorbisAudioTrack *_audioTrack; +}; + +} // End of namespace Video + +#endif + +#endif diff --git a/video/video_decoder.cpp b/video/video_decoder.cpp index 44d7917652..559880acee 100644 --- a/video/video_decoder.cpp +++ b/video/video_decoder.cpp @@ -22,6 +22,7 @@  #include "video/video_decoder.h" +#include "audio/audiostream.h"  #include "audio/mixer.h" // for kMaxChannelVolume  #include "common/rational.h" @@ -33,7 +34,45 @@  namespace Video {  VideoDecoder::VideoDecoder() { -	reset(); +	_startTime = 0; +	_needsRewind = false; +	_dirtyPalette = false; +	_palette = 0; +	_isPlaying = false; +	_audioVolume = Audio::Mixer::kMaxChannelVolume; +	_audioBalance = 0; +	_pauseLevel = 0; +	_needsUpdate = false; +	_lastTimeChange = 0; +	_endTime = 0; +	_endTimeSet = false; + +	// Find the best format for output +	_defaultHighColorFormat = g_system->getScreenFormat(); + +	if (_defaultHighColorFormat.bytesPerPixel == 1) +		_defaultHighColorFormat = Graphics::PixelFormat(4, 8, 8, 8, 8, 8, 16, 24, 0); +} + +void VideoDecoder::close() { +	if (isPlaying()) +		stop(); + +	for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) +		delete *it; + +	_tracks.clear(); +	_needsRewind = false; +	_dirtyPalette = false; +	_palette = 0; +	_startTime = 0; +	_audioVolume = Audio::Mixer::kMaxChannelVolume; +	_audioBalance = 0; +	_pauseLevel = 0; +	_needsUpdate = false; +	_lastTimeChange = 0; +	_endTime = 0; +	_endTimeSet = false;  }  bool VideoDecoder::loadFile(const Common::String &filename) { @@ -47,30 +86,10 @@ bool VideoDecoder::loadFile(const Common::String &filename) {  	return loadStream(file);  } -uint32 VideoDecoder::getTime() const { -	return g_system->getMillis() - _startTime; -} - -void VideoDecoder::setSystemPalette() { -	g_system->getPaletteManager()->setPalette(getPalette(), 0, 256); -} -  bool VideoDecoder::needsUpdate() const {  	return !endOfVideo() && getTimeToNextFrame() == 0;  } -void VideoDecoder::reset() { -	_curFrame = -1; -	_startTime = 0; -	_pauseLevel = 0; -	_audioVolume = Audio::Mixer::kMaxChannelVolume; -	_audioBalance = 0; -} - -bool VideoDecoder::endOfVideo() const { -	return !isVideoLoaded() || (getCurFrame() >= (int32)getFrameCount() - 1); -} -  void VideoDecoder::pauseVideo(bool pause) {  	if (pause) {  		_pauseLevel++; @@ -86,10 +105,14 @@ void VideoDecoder::pauseVideo(bool pause) {  	if (_pauseLevel == 1 && pause) {  		_pauseStartTime = g_system->getMillis(); // Store the starting time from pausing to keep it for later -		pauseVideoIntern(true); + +		for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) +			(*it)->pause(true);  	} else if (_pauseLevel == 0) { -		pauseVideoIntern(false); -		addPauseTime(g_system->getMillis() - _pauseStartTime); +		for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) +			(*it)->pause(false); + +		_startTime += (g_system->getMillis() - _pauseStartTime);  	}  } @@ -100,33 +123,560 @@ void VideoDecoder::resetPauseStartTime() {  void VideoDecoder::setVolume(byte volume) {  	_audioVolume = volume; -	updateVolume(); + +	for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) +		if ((*it)->getTrackType() == Track::kTrackTypeAudio) +			((AudioTrack *)*it)->setVolume(_audioVolume);  }  void VideoDecoder::setBalance(int8 balance) {  	_audioBalance = balance; -	updateBalance(); + +	for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) +		if ((*it)->getTrackType() == Track::kTrackTypeAudio) +			((AudioTrack *)*it)->setBalance(_audioBalance); +} + +bool VideoDecoder::isVideoLoaded() const { +	return !_tracks.empty(); +} + +uint16 VideoDecoder::getWidth() const { +	for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) +		if ((*it)->getTrackType() == Track::kTrackTypeVideo) +			return ((VideoTrack *)*it)->getWidth(); + +	return 0; +} + +uint16 VideoDecoder::getHeight() const { +	for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) +		if ((*it)->getTrackType() == Track::kTrackTypeVideo) +			return ((VideoTrack *)*it)->getHeight(); + +	return 0; +} + +Graphics::PixelFormat VideoDecoder::getPixelFormat() const { +	for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) +		if ((*it)->getTrackType() == Track::kTrackTypeVideo) +			return ((VideoTrack *)*it)->getPixelFormat(); + +	return Graphics::PixelFormat();  } -uint32 FixedRateVideoDecoder::getTimeToNextFrame() const { -	if (endOfVideo() || _curFrame < 0) +const Graphics::Surface *VideoDecoder::decodeNextFrame() { +	_needsUpdate = false; + +	readNextPacket(); +	VideoTrack *track = findNextVideoTrack(); + +	if (!track) +		return 0; + +	const Graphics::Surface *frame = track->decodeNextFrame(); + +	if (track->hasDirtyPalette()) { +		_palette = track->getPalette(); +		_dirtyPalette = true; +	} + +	return frame; +} + +const byte *VideoDecoder::getPalette() { +	_dirtyPalette = false; +	return _palette; +} + +int VideoDecoder::getCurFrame() const { +	int32 frame = -1; + +	for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) +		if ((*it)->getTrackType() == Track::kTrackTypeVideo) +			frame += ((VideoTrack *)*it)->getCurFrame() + 1; + +	return frame; +} + +uint32 VideoDecoder::getFrameCount() const { +	int count = 0; + +	for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) +		if ((*it)->getTrackType() == Track::kTrackTypeVideo) +			count += ((VideoTrack *)*it)->getFrameCount(); + +	return count; +} + +uint32 VideoDecoder::getTime() const { +	if (!isPlaying()) +		return _lastTimeChange.msecs(); + +	if (isPaused()) +		return _pauseStartTime - _startTime; + +	if (useAudioSync()) { +		for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) { +			if ((*it)->getTrackType() == Track::kTrackTypeAudio && !(*it)->endOfTrack()) { +				uint32 time = ((const AudioTrack *)*it)->getRunningTime(); + +				if (time != 0) +					return time + _lastTimeChange.msecs(); +			} +		} +	} + +	return g_system->getMillis() - _startTime; +} + +uint32 VideoDecoder::getTimeToNextFrame() const { +	if (endOfVideo() || _needsUpdate) +		return 0; + +	const VideoTrack *track = findNextVideoTrack(); + +	if (!track)  		return 0;  	uint32 elapsedTime = getTime(); -	uint32 nextFrameStartTime = getFrameBeginTime(_curFrame + 1); +	uint32 nextFrameStartTime = track->getNextFrameStartTime(); -	// If the time that the next frame should be shown has past -	// the frame should be shown ASAP.  	if (nextFrameStartTime <= elapsedTime)  		return 0;  	return nextFrameStartTime - elapsedTime;  } -uint32 FixedRateVideoDecoder::getFrameBeginTime(uint32 frame) const { -	Common::Rational beginTime = frame * 1000; -	beginTime /= getFrameRate(); -	return beginTime.toInt(); +bool VideoDecoder::endOfVideo() const { +	if (!isVideoLoaded()) +		return true; + +	if (_endTimeSet) { +		const VideoTrack *track = findNextVideoTrack(); + +		if (track && track->getNextFrameStartTime() >= (uint)_endTime.msecs()) +			return true; +	} + +	for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) +		if (!(*it)->endOfTrack()) +			return false; + +	return true; +} + +bool VideoDecoder::isRewindable() const { +	if (!isVideoLoaded()) +		return false; + +	for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) +		if (!(*it)->isRewindable()) +			return false; + +	return true; +} + +bool VideoDecoder::rewind() { +	if (!isRewindable()) +		return false; + +	_needsRewind = false; + +	// Stop all tracks so they can be rewound +	if (isPlaying()) +		stopAudio(); + +	for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) +		if (!(*it)->rewind()) +			return false; + +	// Now that we've rewound, start all tracks again +	if (isPlaying()) +		startAudio(); + +	_lastTimeChange = 0; +	_startTime = g_system->getMillis(); +	resetPauseStartTime(); +	return true; +} + +bool VideoDecoder::isSeekable() const { +	if (!isVideoLoaded()) +		return false; + +	for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) +		if (!(*it)->isSeekable()) +			return false; + +	return true; +} + +bool VideoDecoder::seek(const Audio::Timestamp &time) { +	if (!isSeekable()) +		return false; + +	_needsRewind = false; + +	// Stop all tracks so they can be seeked +	if (isPlaying()) +		stopAudio(); + +	for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) +		if (!(*it)->seek(time)) +			return false; + +	_lastTimeChange = time; + +	// Now that we've seeked, start all tracks again +	// Also reset our start time +	if (isPlaying()) { +		startAudio(); +		_startTime = g_system->getMillis() - time.msecs(); +	} + +	resetPauseStartTime(); +	_needsUpdate = true; +	return true; +} + +void VideoDecoder::start() { +	if (isPlaying() || !isVideoLoaded()) +		return; + +	_isPlaying = true; +	_startTime = g_system->getMillis(); + +	// If someone previously called stop(), we'll rewind it. +	if (_needsRewind) +		rewind(); + +	// Adjust start time if we've seeked to something besides zero time +	if (_lastTimeChange.totalNumberOfFrames() != 0) +		_startTime -= _lastTimeChange.msecs(); + +	startAudio(); +} + +void VideoDecoder::stop() { +	if (!isPlaying()) +		return; + +	_isPlaying = false; +	_startTime = 0; +	_palette = 0; +	_dirtyPalette = false; +	_needsUpdate = false; + +	stopAudio(); + +	// Also reset the pause state. +	_pauseLevel = 0; + +	// If this is a rewindable video, don't close it too. We'll just rewind() the video +	// the next time someone calls start(). Otherwise, since it can't be rewound, we +	// just close it. +	if (isRewindable()) { +		_lastTimeChange = getTime(); +		_needsRewind = true; +	} else { +		close(); +	} +} + +Audio::Timestamp VideoDecoder::getDuration() const { +	Audio::Timestamp maxDuration(0, 1000); + +	for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) { +		Audio::Timestamp duration = (*it)->getDuration(); + +		if (duration > maxDuration) +			maxDuration = duration; +	} + +	return maxDuration; +} + +VideoDecoder::Track::Track() { +	_paused = false; +} + +bool VideoDecoder::Track::isRewindable() const { +	return isSeekable(); +} + +bool VideoDecoder::Track::rewind() { +	return seek(Audio::Timestamp(0, 1000)); +} + +Audio::Timestamp VideoDecoder::Track::getDuration() const { +	return Audio::Timestamp(0, 1000); +} + +bool VideoDecoder::VideoTrack::endOfTrack() const { +	return getCurFrame() >= (getFrameCount() - 1); +} + +uint32 VideoDecoder::FixedRateVideoTrack::getNextFrameStartTime() const { +	if (endOfTrack() || getCurFrame() < 0) +		return 0; + +	Common::Rational time = (getCurFrame() + 1) * 1000; +	time /= getFrameRate(); +	return time.toInt(); +} + +Audio::Timestamp VideoDecoder::FixedRateVideoTrack::getDuration() const { +	// Since Audio::Timestamp doesn't support a fractional frame rate, we're currently +	// just converting to milliseconds. +	Common::Rational time = getFrameCount() * 1000; +	time /= getFrameRate(); +	return time.toInt(); +} + +bool VideoDecoder::AudioTrack::endOfTrack() const { +	Audio::AudioStream *stream = getAudioStream(); +	return !stream || !g_system->getMixer()->isSoundHandleActive(_handle) || stream->endOfData(); +} + +void VideoDecoder::AudioTrack::setVolume(byte volume) { +	_volume = volume; + +	if (g_system->getMixer()->isSoundHandleActive(_handle)) +		g_system->getMixer()->setChannelVolume(_handle, _volume); +} + +void VideoDecoder::AudioTrack::setBalance(int8 balance) { +	_balance = balance; + +	if (g_system->getMixer()->isSoundHandleActive(_handle)) +		g_system->getMixer()->setChannelBalance(_handle, _balance); +} + +void VideoDecoder::AudioTrack::start() { +	stop(); + +	Audio::AudioStream *stream = getAudioStream(); +	assert(stream); + +	g_system->getMixer()->playStream(getSoundType(), &_handle, stream, -1, getVolume(), getBalance(), DisposeAfterUse::NO); + +	// Pause the audio again if we're still paused +	if (isPaused()) +		g_system->getMixer()->pauseHandle(_handle, true); +} + +void VideoDecoder::AudioTrack::stop() { +	g_system->getMixer()->stopHandle(_handle); +} + +void VideoDecoder::AudioTrack::start(const Audio::Timestamp &limit) { +	stop(); + +	Audio::AudioStream *stream = getAudioStream(); +	assert(stream); + +	stream = Audio::makeLimitingAudioStream(stream, limit, DisposeAfterUse::NO); + +	g_system->getMixer()->playStream(getSoundType(), &_handle, stream, -1, getVolume(), getBalance(), DisposeAfterUse::YES); + +	// Pause the audio again if we're still paused +	if (isPaused()) +		g_system->getMixer()->pauseHandle(_handle, true); +} + +uint32 VideoDecoder::AudioTrack::getRunningTime() const { +	if (g_system->getMixer()->isSoundHandleActive(_handle)) +		return g_system->getMixer()->getSoundElapsedTime(_handle); + +	return 0; +} + +void VideoDecoder::AudioTrack::pauseIntern(bool shouldPause) { +	if (g_system->getMixer()->isSoundHandleActive(_handle)) +		g_system->getMixer()->pauseHandle(_handle, shouldPause); +} + +Audio::AudioStream *VideoDecoder::RewindableAudioTrack::getAudioStream() const { +	return getRewindableAudioStream(); +} + +bool VideoDecoder::RewindableAudioTrack::rewind() { +	Audio::RewindableAudioStream *stream = getRewindableAudioStream(); +	assert(stream); +	return stream->rewind(); +} + +Audio::Timestamp VideoDecoder::SeekableAudioTrack::getDuration() const { +	Audio::SeekableAudioStream *stream = getSeekableAudioStream(); +	assert(stream); +	return stream->getLength(); +} + +Audio::AudioStream *VideoDecoder::SeekableAudioTrack::getAudioStream() const { +	return getSeekableAudioStream(); +} + +bool VideoDecoder::SeekableAudioTrack::seek(const Audio::Timestamp &time) { +	Audio::SeekableAudioStream *stream = getSeekableAudioStream(); +	assert(stream); +	return stream->seek(time); +} + +VideoDecoder::StreamFileAudioTrack::StreamFileAudioTrack() { +	_stream = 0; +} + +VideoDecoder::StreamFileAudioTrack::~StreamFileAudioTrack() { +	delete _stream; +} + +bool VideoDecoder::StreamFileAudioTrack::loadFromFile(const Common::String &baseName) { +	// TODO: Make sure the stream isn't being played +	delete _stream; +	_stream = Audio::SeekableAudioStream::openStreamFile(baseName); +	return _stream != 0; +} + +void VideoDecoder::addTrack(Track *track) { +	_tracks.push_back(track); + +	// Update volume settings if it's an audio track +	if (track->getTrackType() == Track::kTrackTypeAudio) { +		((AudioTrack *)track)->setVolume(_audioVolume); +		((AudioTrack *)track)->setBalance(_audioBalance); +	} + +	// Keep the track paused if we're paused +	if (isPaused()) +		track->pause(true); + +	// Start the track if we're playing +	if (isPlaying() && track->getTrackType() == Track::kTrackTypeAudio) +		((AudioTrack *)track)->start(); +} + +bool VideoDecoder::addStreamFileTrack(const Common::String &baseName) { +	// Only allow adding external tracks if a video is already loaded +	if (!isVideoLoaded()) +		return false; + +	StreamFileAudioTrack *track = new StreamFileAudioTrack(); + +	bool result = track->loadFromFile(baseName); + +	if (result) +		addTrack(track); + +	return result; +} + +void VideoDecoder::setEndTime(const Audio::Timestamp &endTime) { +	Audio::Timestamp startTime = 0; + +	if (isPlaying()) { +		startTime = getTime(); +		stopAudio(); +	} + +	_endTime = endTime; +	_endTimeSet = true; + +	if (startTime > endTime) +		return; + +	if (isPlaying()) { +		// We'll assume the audio track is going to start up at the same time it just was +		// and therefore not do any seeking. +		// Might want to set it anyway if we're seekable. +		startAudioLimit(_endTime.msecs() - startTime.msecs()); +		_lastTimeChange = startTime; +	} +} + +VideoDecoder::Track *VideoDecoder::getTrack(uint track) { +	if (track > _tracks.size()) +		return 0; + +	return _tracks[track]; +} + +const VideoDecoder::Track *VideoDecoder::getTrack(uint track) const { +	if (track > _tracks.size()) +		return 0; + +	return _tracks[track]; +} + +bool VideoDecoder::endOfVideoTracks() const { +	for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) +		if ((*it)->getTrackType() == Track::kTrackTypeVideo && !(*it)->endOfTrack()) +			return false; + +	return true; +} + +VideoDecoder::VideoTrack *VideoDecoder::findNextVideoTrack() { +	VideoTrack *bestTrack = 0; +	uint32 bestTime = 0xFFFFFFFF; + +	for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) { +		if ((*it)->getTrackType() == Track::kTrackTypeVideo && !(*it)->endOfTrack()) { +			VideoTrack *track = (VideoTrack *)*it; +			uint32 time = track->getNextFrameStartTime(); + +			if (time < bestTime) { +				bestTime = time; +				bestTrack = track; +			} +		} +	} + +	return bestTrack; +} + +const VideoDecoder::VideoTrack *VideoDecoder::findNextVideoTrack() const { +	const VideoTrack *bestTrack = 0; +	uint32 bestTime = 0xFFFFFFFF; + +	for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) { +		if ((*it)->getTrackType() == Track::kTrackTypeVideo && !(*it)->endOfTrack()) { +			const VideoTrack *track = (const VideoTrack *)*it; +			uint32 time = track->getNextFrameStartTime(); + +			if (time < bestTime) { +				bestTime = time; +				bestTrack = track; +			} +		} +	} + +	return bestTrack; +} + +void VideoDecoder::startAudio() { +	if (_endTimeSet) { +		// HACK: Timestamp's subtraction asserts out when subtracting two times +		// with different rates. +		startAudioLimit(_endTime - _lastTimeChange.convertToFramerate(_endTime.framerate())); +		return; +	} + +	for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) +		if ((*it)->getTrackType() == Track::kTrackTypeAudio) +			((AudioTrack *)*it)->start(); +} + +void VideoDecoder::stopAudio() { +	for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) +		if ((*it)->getTrackType() == Track::kTrackTypeAudio) +			((AudioTrack *)*it)->stop(); +} + +void VideoDecoder::startAudioLimit(const Audio::Timestamp &limit) { +	for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) +		if ((*it)->getTrackType() == Track::kTrackTypeAudio) +			((AudioTrack *)*it)->start(limit);  }  } // End of namespace Video diff --git a/video/video_decoder.h b/video/video_decoder.h index 3bb75ade09..5abe1d917c 100644 --- a/video/video_decoder.h +++ b/video/video_decoder.h @@ -23,10 +23,17 @@  #ifndef VIDEO_DECODER_H  #define VIDEO_DECODER_H -#include "common/str.h" - +#include "audio/mixer.h"  #include "audio/timestamp.h"	// TODO: Move this to common/ ? +#include "common/array.h" +#include "common/str.h" +#include "graphics/pixelformat.h" +namespace Audio { +class AudioStream; +class RewindableAudioStream; +class SeekableAudioStream; +}  namespace Common {  class Rational; @@ -34,7 +41,6 @@ class SeekableReadStream;  }  namespace Graphics { -struct PixelFormat;  struct Surface;  } @@ -48,10 +54,14 @@ public:  	VideoDecoder();  	virtual ~VideoDecoder() {} +	///////////////////////////////////////// +	// Opening/Closing a Video +	///////////////////////////////////////// +  	/**  	 * Load a video from a file with the given name.  	 * -	 * A default implementation using loadStream is provided. +	 * A default implementation using Common::File and loadStream is provided.  	 *  	 * @param filename	the filename to load  	 * @return whether loading the file succeeded @@ -62,6 +72,10 @@ public:  	 * Load a video from a generic read stream. The ownership of the  	 * stream object transfers to this VideoDecoder instance, which is  	 * hence also responsible for eventually deleting it. +	 * +	 * Implementations of this function are required to call addTrack() +	 * for each track in the video upon success. +	 *  	 * @param stream  the stream to load  	 * @return whether loading the stream succeeded  	 */ @@ -69,60 +83,133 @@ public:  	/**  	 * Close the active video stream and free any associated resources. +	 * +	 * All subclasses that need to close their own resources should still +	 * call the base class' close() function at the start of their function.  	 */ -	virtual void close() = 0; +	virtual void close();  	/**  	 * Returns if a video stream is currently loaded or not.  	 */ -	virtual bool isVideoLoaded() const = 0; +	bool isVideoLoaded() const; +	///////////////////////////////////////// +	// Playback Control +	/////////////////////////////////////////  	/** -	 * Returns the width of the video's frames. -	 * @return the width of the video's frames +	 * Begin playback of the video. +	 * +	 * @note This has no effect is the video is already playing.  	 */ -	virtual uint16 getWidth() const = 0; +	void start();  	/** -	 * Returns the height of the video's frames. -	 * @return the height of the video's frames +	 * Stop playback of the video. +	 * +	 * @note This will close() the video if it is not rewindable. +	 * @note If the video is rewindable, the video will be rewound on the +	 * next start() call unless rewind() or seek() is called before then.  	 */ -	virtual uint16 getHeight() const = 0; +	void stop();  	/** -	 * Get the pixel format of the currently loaded video. +	 * Returns if the video is currently playing or not. +	 * +	 * This is not equivalent to the inverse of endOfVideo(). A video keeps +	 * its playing status even after reaching the end of the video. This will +	 * return true after calling start() and will continue to return true +	 * until stop() (or close()) is called.  	 */ -	virtual Graphics::PixelFormat getPixelFormat() const = 0; +	bool isPlaying() const { return _isPlaying; }  	/** -	 * Get the palette for the video in RGB format (if 8bpp or less). +	 * Returns if a video is rewindable or not. The default implementation +	 * polls each track for rewindability.  	 */ -	virtual const byte *getPalette() { return 0; } +	virtual bool isRewindable() const;  	/** -	 * Returns if the palette is dirty or not. +	 * Rewind a video to its beginning. +	 * +	 * If the video is playing, it will continue to play. The default +	 * implementation will rewind each track. +	 * +	 * @return true on success, false otherwise +	 */ +	virtual bool rewind(); + +	/** +	 * Returns if a video is seekable or not. The default implementation +	 * polls each track for seekability. +	 */ +	virtual bool isSeekable() const; + +	/** +	 * Seek to a given time in the video. +	 * +	 * If the video is playing, it will continue to play. The default +	 * implementation will seek each track and must still be called +	 * from any other implementation. +	 * +	 * @param time The time to seek to +	 * @return true on success, false otherwise  	 */ -	virtual bool hasDirtyPalette() const { return false; } +	virtual bool seek(const Audio::Timestamp &time);  	/** -	 * Set the system palette to the palette returned by getPalette. -	 * @see getPalette +	 * Pause or resume the video. This should stop/resume any audio playback +	 * and other stuff. The initial pause time is kept so that any timing +	 * variables can be updated appropriately. +	 * +	 * This is a convenience method which automatically keeps track on how +	 * often the video has been paused, ensuring that after pausing an video +	 * e.g. twice, it has to be unpaused twice before actuallying resuming. +	 * +	 * @param pause		true to pause the video, false to resume it  	 */ -	void setSystemPalette(); +	void pauseVideo(bool pause); + +	/** +	 * Return whether the video is currently paused or not. +	 */ +	bool isPaused() const { return _pauseLevel != 0; } + +	/** +	 * Set the time for this video to end at. At this time in the video, +	 * all audio will stop and endOfVideo() will return true. +	 */ +	void setEndTime(const Audio::Timestamp &endTime); + +	/** +	 * Get the stop time of the video (if not set, zero) +	 */ +	Audio::Timestamp getEndTime() const { return _endTime; } + + +	///////////////////////////////////////// +	// Playback Status +	///////////////////////////////////////// + +	/** +	 * Returns if the video has reached the end or not. +	 * @return true if the video has finished playing or if none is loaded, false otherwise +	 */ +	bool endOfVideo() const;  	/**  	 * Returns the current frame number of the video.  	 * @return the last frame decoded by the video  	 */ -	virtual int32 getCurFrame() const { return _curFrame; } +	int32 getCurFrame() const;  	/**  	 * Returns the number of frames in the video.  	 * @return the number of frames in the video  	 */ -	virtual uint32 getFrameCount() const = 0; +	uint32 getFrameCount() const;  	/**  	 * Returns the time position (in ms) of the current video. @@ -138,103 +225,456 @@ public:  	 * completely accurate (since our mixer does not have precise  	 * timing).  	 */ -	virtual uint32 getTime() const; +	uint32 getTime() const; + + +	///////////////////////////////////////// +	// Video Info +	///////////////////////////////////////// + +	/** +	 * Returns the width of the video's frames. +	 * +	 * By default, this finds the largest width between all of the loaded +	 * tracks. However, a subclass may override this if it does any kind +	 * of post-processing on it. +	 * +	 * @return the width of the video's frames +	 */ +	virtual uint16 getWidth() const; + +	/** +	 * Returns the height of the video's frames. +	 * +	 * By default, this finds the largest height between all of the loaded +	 * tracks. However, a subclass may override this if it does any kind +	 * of post-processing on it. +	 * +	 * @return the height of the video's frames +	 */ +	virtual uint16 getHeight() const; + +	/** +	 * Get the pixel format of the currently loaded video. +	 */ +	Graphics::PixelFormat getPixelFormat() const; + +	/** +	 * Get the duration of the video. +	 * +	 * If the duration is unknown, this will return 0. If this is not +	 * overriden, it will take the length of the longest track. +	 */ +	virtual Audio::Timestamp getDuration() const; + + +	///////////////////////////////////////// +	// Frame Decoding +	///////////////////////////////////////// + +	/** +	 * Get the palette for the video in RGB format (if 8bpp or less). +	 * +	 * The palette's format is the same as PaletteManager's palette +	 * (interleaved RGB values). +	 */ +	const byte *getPalette(); + +	/** +	 * Returns if the palette is dirty or not. +	 */ +	bool hasDirtyPalette() const { return _dirtyPalette; }  	/**  	 * Return the time (in ms) until the next frame should be displayed.  	 */ -	virtual uint32 getTimeToNextFrame() const = 0; +	uint32 getTimeToNextFrame() const;  	/**  	 * Check whether a new frame should be decoded, i.e. because enough  	 * time has elapsed since the last frame was decoded.  	 * @return whether a new frame should be decoded or not  	 */ -	virtual bool needsUpdate() const; +	bool needsUpdate() const;  	/**  	 * Decode the next frame into a surface and return the latter. +	 * +	 * A subclass may override this, but must still call this function. As an +	 * example, a subclass may do this to apply some global video scale to +	 * individual track's frame. +	 * +	 * Note that this will call readNextPacket() internally first before calling +	 * the next video track's decodeNextFrame() function. +	 *  	 * @return a surface containing the decoded frame, or 0  	 * @note Ownership of the returned surface stays with the VideoDecoder,  	 *       hence the caller must *not* free it.  	 * @note this may return 0, in which case the last frame should be kept on screen  	 */ -	virtual const Graphics::Surface *decodeNextFrame() = 0; - -	/** -	 * Returns if the video has finished playing or not. -	 * @return true if the video has finished playing or if none is loaded, false otherwise -	 */ -	virtual bool endOfVideo() const; +	virtual const Graphics::Surface *decodeNextFrame();  	/** -	 * Pause or resume the video. This should stop/resume any audio playback -	 * and other stuff. The initial pause time is kept so that any timing -	 * variables can be updated appropriately. +	 * Set the default high color format for videos that convert from YUV.  	 * -	 * This is a convenience method which automatically keeps track on how -	 * often the video has been paused, ensuring that after pausing an video -	 * e.g. twice, it has to be unpaused twice before actuallying resuming. +	 * By default, VideoDecoder will attempt to use the screen format +	 * if it's >8bpp and use a 32bpp format when not.  	 * -	 * @param pause		true to pause the video, false to resume it +	 * This must be set before calling loadStream().  	 */ -	void pauseVideo(bool pause); +	void setDefaultHighColorFormat(const Graphics::PixelFormat &format) { _defaultHighColorFormat = format; } -	/** -	 * Return whether the video is currently paused or not. -	 */ -	bool isPaused() const { return _pauseLevel != 0; } + +	///////////////////////////////////////// +	// Audio Control +	/////////////////////////////////////////  	/**  	 * Get the current volume at which the audio in the video is being played  	 * @return the current volume at which the audio in the video is being played  	 */ -	virtual byte getVolume() const { return _audioVolume; } +	byte getVolume() const { return _audioVolume; }  	/**  	 * Set the volume at which the audio in the video should be played. -	 * This setting remains until reset() is called (which may be called -	 * from loadStream() or close()). The default volume is the maximum. -	 * -	 * @note This function calls updateVolume() by default. +	 * This setting remains until close() is called (which may be called +	 * from loadStream()). The default volume is the maximum.  	 *  	 * @param volume The volume at which to play the audio in the video  	 */ -	virtual void setVolume(byte volume); +	void setVolume(byte volume);  	/**  	 * Get the current balance at which the audio in the video is being played  	 * @return the current balance at which the audio in the video is being played  	 */ -	virtual int8 getBalance() const { return _audioBalance; } +	int8 getBalance() const { return _audioBalance; }  	/**  	 * Set the balance at which the audio in the video should be played. -	 * This setting remains until reset() is called (which may be called -	 * from loadStream() or close()). The default balance is 0. -	 * -	 * @note This function calls updateBalance() by default. +	 * This setting remains until close() is called (which may be called +	 * from loadStream()). The default balance is 0.  	 *  	 * @param balance The balance at which to play the audio in the video  	 */ -	virtual void setBalance(int8 balance); +	void setBalance(int8 balance); + +	/** +	 * Add an audio track from a stream file. +	 * +	 * This calls SeekableAudioStream::openStreamFile() internally +	 */ +	bool addStreamFileTrack(const Common::String &baseName); + + +	// Future API +	//void setRate(const Common::Rational &rate); +	//Common::Rational getRate() const;  protected:  	/** -	 * Resets _curFrame and _startTime. Should be called from every close() function. +	 * An abstract representation of a track in a movie. +	 */ +	class Track { +	public: +		Track(); +		virtual ~Track() {} + +		/** +		 * The types of tracks this class can be. +		 */ +		enum TrackType { +			kTrackTypeNone, +			kTrackTypeVideo, +			kTrackTypeAudio +		}; + +		/** +		 * Get the type of track. +		 */ +		virtual TrackType getTrackType() const = 0; + +		/** +		 * Return if the track has finished. +		 */ +		virtual bool endOfTrack() const = 0; + +		/** +		 * Return if the track is rewindable. +		 * +		 * If a video is seekable, it does not need to implement this +		 * for it to also be rewindable. +		 */ +		virtual bool isRewindable() const; + +		/** +		 * Rewind the video to the beginning. +		 * +		 * If a video is seekable, it does not need to implement this +		 * for it to also be rewindable. +		 * +		 * @return true on success, false otherwise. +		 */ +		virtual bool rewind(); + +		/** +		 * Return if the track is seekable. +		 */ +		virtual bool isSeekable() const { return false; } + +		/** +		 * Seek to the given time. +		 * @param time The time to seek to, from the beginning of the video. +		 * @return true on success, false otherwise. +		 */ +		virtual bool seek(const Audio::Timestamp &time) { return false; } + +		/** +		 * Set the pause status of the track. +		 */ +		void pause(bool shouldPause) {} + +		/** +		 * Return if the track is paused. +		 */ +		bool isPaused() const { return _paused; } + +		/** +		 * Get the duration of the track (starting from this track's start time). +		 * +		 * By default, this returns 0 for unknown. +		 */ +		virtual Audio::Timestamp getDuration() const; + +	protected: +		/** +		 * Function called by pause() for subclasses to implement. +		 */ +		void pauseIntern(bool pause); + +	private: +		bool _paused; +	}; + +	/** +	 * An abstract representation of a video track. +	 */ +	class VideoTrack : public Track { +	public: +		VideoTrack() {} +		virtual ~VideoTrack() {} + +		TrackType getTrackType() const  { return kTrackTypeVideo; } +		virtual bool endOfTrack() const; + +		/** +		 * Get the width of this track +		 */ +		virtual uint16 getWidth() const = 0; + +		/** +		 * Get the height of this track +		 */ +		virtual uint16 getHeight() const = 0; + +		/** +		 * Get the pixel format of this track +		 */ +		virtual Graphics::PixelFormat getPixelFormat() const = 0; + +		/** +		 * Get the current frame of this track +		 * +		 * @see VideoDecoder::getCurFrame() +		 */ +		virtual int getCurFrame() const = 0; + +		/** +		 * Get the frame count of this track +		 * +		 * @note If the frame count is unknown, return 0 (which is also +		 * the default implementation of the function). However, one must +		 * also implement endOfTrack() in that case. +		 */ +		virtual int getFrameCount() const { return 0; } + +		/** +		 * Get the start time of the next frame in milliseconds since +		 * the start of the video +		 */ +		virtual uint32 getNextFrameStartTime() const = 0; + +		/** +		 * Decode the next frame +		 */ +		virtual const Graphics::Surface *decodeNextFrame() = 0; + +		/** +		 * Get the palette currently in use by this track +		 */ +		virtual const byte *getPalette() const { return 0; } + +		/** +		 * Does the palette currently in use by this track need to be updated? +		 */ +		virtual bool hasDirtyPalette() const { return false; } +	}; + +	/** +	 * A VideoTrack that is played at a constant rate. +	 * +	 * If the frame count is unknown, you must override endOfTrack(). +	 */ +	class FixedRateVideoTrack : public VideoTrack { +	public: +		FixedRateVideoTrack() {} +		virtual ~FixedRateVideoTrack() {} + +		uint32 getNextFrameStartTime() const; +		virtual Audio::Timestamp getDuration() const; + +	protected: +		/** +		 * Get the rate at which this track is played. +		 */ +		virtual Common::Rational getFrameRate() const = 0; +	}; + +	/** +	 * An abstract representation of an audio track.  	 */ -	void reset(); +	class AudioTrack : public Track { +	public: +		AudioTrack() {} +		virtual ~AudioTrack() {} + +		TrackType getTrackType() const { return kTrackTypeAudio; } + +		virtual bool endOfTrack() const; + +		/** +		 * Start playing this track +		 */ +		void start(); + +		/** +		 * Stop playing this track +		 */ +		void stop(); + +		void start(const Audio::Timestamp &limit); + +		/** +		 * Get the volume for this track +		 */ +		byte getVolume() const { return _volume; } + +		/** +		 * Set the volume for this track +		 */ +		void setVolume(byte volume); + +		/** +		 * Get the balance for this track +		 */ +		int8 getBalance() const { return _balance; } + +		/** +		 * Set the balance for this track +		 */ +		void setBalance(int8 balance); + +		/** +		 * Get the time the AudioStream behind this track has been +		 * running +		 */ +		uint32 getRunningTime() const; + +		/** +		 * Get the sound type to be used when playing this audio track +		 */ +		virtual Audio::Mixer::SoundType getSoundType() const { return Audio::Mixer::kPlainSoundType; } + +	protected: +		void pauseIntern(bool pause); + +		/** +		 * Get the AudioStream that is the representation of this AudioTrack +		 */ +		virtual Audio::AudioStream *getAudioStream() const = 0; + +	private: +		Audio::SoundHandle _handle; +		byte _volume; +		int8 _balance; +	};  	/** -	 * Actual implementation of pause by subclasses. See pause() -	 * for details. +	 * An AudioTrack that implements isRewindable() and rewind() using +	 * RewindableAudioStream.  	 */ -	virtual void pauseVideoIntern(bool pause) {} +	class RewindableAudioTrack : public AudioTrack { +	public: +		RewindableAudioTrack() {} +		virtual ~RewindableAudioTrack() {} + +		bool isRewindable() const { return true; } +		bool rewind(); + +	protected: +		Audio::AudioStream *getAudioStream() const; + +		/** +		 * Get the RewindableAudioStream pointer to be used by this class +		 * for rewind() and getAudioStream() +		 */ +		virtual Audio::RewindableAudioStream *getRewindableAudioStream() const = 0; +	};  	/** -	 * Add the time the video has been paused to maintain sync +	 * An AudioTrack that implements isSeekable() and seek() using +	 * SeekableAudioStream.  	 */ -	virtual void addPauseTime(uint32 ms) { _startTime += ms; } +	class SeekableAudioTrack : public AudioTrack { +	public: +		SeekableAudioTrack() {} +		virtual ~SeekableAudioTrack() {} + +		bool isSeekable() const { return true; } +		bool seek(const Audio::Timestamp &time); + +		Audio::Timestamp getDuration() const; + +	protected: +		Audio::AudioStream *getAudioStream() const; + +		/** +		 * Get the SeekableAudioStream pointer to be used by this class +		 * for seek(), getDuration(), and getAudioStream() +		 */ +		virtual Audio::SeekableAudioStream *getSeekableAudioStream() const = 0; +	}; + +	/** +	 * A SeekableAudioTrack that constructs its SeekableAudioStream using +	 * SeekableAudioStream::openStreamFile() +	 */ +	class StreamFileAudioTrack : public SeekableAudioTrack { +	public: +		StreamFileAudioTrack(); +		~StreamFileAudioTrack(); + +		/** +		 * Load the track from a file with the given base name. +		 * +		 * @return true on success, false otherwise +		 */ +		bool loadFromFile(const Common::String &baseName); + +	protected: +		Audio::SeekableAudioStream *_stream; +		Audio::SeekableAudioStream *getSeekableAudioStream() const { return _stream; } +	};  	/**  	 * Reset the pause start time (which should be called when seeking) @@ -242,79 +682,107 @@ protected:  	void resetPauseStartTime();  	/** -	 * Update currently playing audio tracks with the new volume setting +	 * Decode enough data for the next frame and enough audio to last that long. +	 * +	 * This function is used by the decodeNextFrame() function. A subclass +	 * of a Track may decide to just have its decodeNextFrame() function read +	 * and decode the frame.  	 */ -	virtual void updateVolume() {} +	virtual void readNextPacket() {}  	/** -	 * Update currently playing audio tracks with the new balance setting +	 * Define a track to be used by this class. +	 * +	 * The pointer is then owned by this base class.  	 */ -	virtual void updateBalance() {} +	void addTrack(Track *track); -	int32 _curFrame; -	int32 _startTime; +	/** +	 * Whether or not getTime() will sync with a playing audio track. +	 * +	 * A subclass can override this to disable this feature. +	 */ +	virtual bool useAudioSync() const { return true; } -private: -	uint32 _pauseLevel; -	uint32 _pauseStartTime; -	byte _audioVolume; -	int8 _audioBalance; -}; +	/** +	 * Get the given track based on its index. +	 * +	 * @return A valid track pointer on success, 0 otherwise +	 */ +	Track *getTrack(uint track); -/** - * A VideoDecoder wrapper that implements getTimeToNextFrame() based on getFrameRate(). - */ -class FixedRateVideoDecoder : public virtual VideoDecoder { -public: -	uint32 getTimeToNextFrame() const; +	/** +	 * Get the given track based on its index +	 * +	 * @return A valid track pointer on success, 0 otherwise +	 */ +	const Track *getTrack(uint track) const; -protected:  	/** -	 * Return the frame rate in frames per second. -	 * This returns a Rational because videos can have rates that are not integers and -	 * there are some videos with frame rates < 1. +	 * Find out if all video tracks have finished +	 * +	 * This is useful if one wants to figure out if they need to buffer all +	 * remaining audio in a file.  	 */ -	virtual Common::Rational getFrameRate() const = 0; +	bool endOfVideoTracks() const; -private: -	uint32 getFrameBeginTime(uint32 frame) const; -}; +	/** +	 * Get the default high color format +	 */ +	Graphics::PixelFormat getDefaultHighColorFormat() const { return _defaultHighColorFormat; } -/** - * A VideoDecoder that can be rewound back to the beginning. - */ -class RewindableVideoDecoder : public virtual VideoDecoder { -public:  	/** -	 * Rewind to the beginning of the video. +	 * Find the video track with the lowest start time for the next frame  	 */ -	virtual void rewind() = 0; -}; +	VideoTrack *findNextVideoTrack(); -/** - * A VideoDecoder that can seek to a frame or point in time. - */ -class SeekableVideoDecoder : public virtual RewindableVideoDecoder { -public:  	/** -	 * Seek to the specified time. +	 * Find the video track with the lowest start time for the next frame  	 */ -	virtual void seekToTime(const Audio::Timestamp &time) = 0; +	const VideoTrack *findNextVideoTrack() const;  	/** -	 * Seek to the specified time (in ms). +	 * Typedef helpers for accessing tracks  	 */ -	void seekToTime(uint32 msecs) { seekToTime(Audio::Timestamp(msecs, 1000)); } +	typedef Common::Array<Track *> TrackList; +	typedef TrackList::iterator TrackListIterator;  	/** -	 * Implementation of RewindableVideoDecoder::rewind(). +	 * Get the begin iterator of the tracks  	 */ -	virtual void rewind() { seekToTime(0); } +	TrackListIterator getTrackListBegin() { return _tracks.begin(); }  	/** -	 * Get the total duration of the video (in ms). +	 * Get the end iterator of the tracks  	 */ -	virtual uint32 getDuration() const = 0; +	TrackListIterator getTrackListEnd() { return _tracks.end(); } + +private: +	// Tracks owned by this VideoDecoder +	TrackList _tracks; + +	// Current playback status +	bool _isPlaying, _needsRewind, _needsUpdate; +	Audio::Timestamp _lastTimeChange, _endTime; +	bool _endTimeSet; + +	// Palette settings from individual tracks +	mutable bool _dirtyPalette; +	const byte *_palette; + +	// Default PixelFormat settings +	Graphics::PixelFormat _defaultHighColorFormat; + +	// Internal helper functions +	void stopAudio(); +	void startAudio(); +	void startAudioLimit(const Audio::Timestamp &limit); + +	int32 _startTime; +	uint32 _pauseLevel; +	uint32 _pauseStartTime; +	byte _audioVolume; +	int8 _audioBalance;  };  } // End of namespace Video  | 
