aboutsummaryrefslogtreecommitdiff
path: root/engines
diff options
context:
space:
mode:
Diffstat (limited to 'engines')
-rw-r--r--engines/sci/console.cpp3
-rw-r--r--engines/sci/engine/kernel.h15
-rw-r--r--engines/sci/engine/kernel_tables.h18
-rw-r--r--engines/sci/engine/kvideo.cpp74
-rw-r--r--engines/sci/graphics/frameout.cpp29
-rw-r--r--engines/sci/graphics/screen_item32.h1
-rw-r--r--engines/sci/graphics/video32.cpp1
-rw-r--r--engines/sci/graphics/video32.h6
-rw-r--r--engines/sci/sci.cpp9
-rw-r--r--engines/sci/sci.h6
-rw-r--r--engines/sci/sound/audio32.cpp164
-rw-r--r--engines/sci/sound/audio32.h24
-rw-r--r--engines/sci/sound/decoders/sol.cpp19
-rw-r--r--engines/sci/video/robot_decoder.cpp1807
-rw-r--r--engines/sci/video/robot_decoder.h1454
15 files changed, 3186 insertions, 444 deletions
diff --git a/engines/sci/console.cpp b/engines/sci/console.cpp
index 3aaf13efdb..b20ed3f8be 100644
--- a/engines/sci/console.cpp
+++ b/engines/sci/console.cpp
@@ -54,7 +54,6 @@
#include "sci/graphics/frameout.h"
#include "sci/graphics/paint32.h"
#include "video/coktel_decoder.h"
-#include "sci/video/robot_decoder.h"
#endif
#include "common/file.h"
@@ -266,8 +265,6 @@ void Console::postEnter() {
#ifdef ENABLE_SCI32
} else if (_videoFile.hasSuffix(".vmd")) {
videoDecoder = new Video::AdvancedVMDDecoder();
- } else if (_videoFile.hasSuffix(".rbt")) {
- videoDecoder = new RobotDecoder(_engine->getPlatform() == Common::kPlatformMacintosh);
} else if (_videoFile.hasSuffix(".duk")) {
duckMode = true;
videoDecoder = new Video::AVIDecoder();
diff --git a/engines/sci/engine/kernel.h b/engines/sci/engine/kernel.h
index 2b488cba8a..fffb7c90e3 100644
--- a/engines/sci/engine/kernel.h
+++ b/engines/sci/engine/kernel.h
@@ -447,6 +447,19 @@ reg_t kDoAudioFade(EngineState *s, int argc, reg_t *argv);
reg_t kDoAudioHasSignal(EngineState *s, int argc, reg_t *argv);
reg_t kDoAudioSetLoop(EngineState *s, int argc, reg_t *argv);
+reg_t kRobot(EngineState *s, int argc, reg_t *argv);
+reg_t kRobotOpen(EngineState *s, int argc, reg_t *argv);
+reg_t kRobotShowFrame(EngineState *s, int argc, reg_t *argv);
+reg_t kRobotGetFrameSize(EngineState *s, int argc, reg_t *argv);
+reg_t kRobotPlay(EngineState *s, int argc, reg_t *argv);
+reg_t kRobotGetIsFinished(EngineState *s, int argc, reg_t *argv);
+reg_t kRobotGetIsPlaying(EngineState *s, int argc, reg_t *argv);
+reg_t kRobotClose(EngineState *s, int argc, reg_t *argv);
+reg_t kRobotGetCue(EngineState *s, int argc, reg_t *argv);
+reg_t kRobotPause(EngineState *s, int argc, reg_t *argv);
+reg_t kRobotGetFrameNo(EngineState *s, int argc, reg_t *argv);
+reg_t kRobotSetPriority(EngineState *s, int argc, reg_t *argv);
+
reg_t kPlayVMD(EngineState *s, int argc, reg_t *argv);
reg_t kPlayVMDOpen(EngineState *s, int argc, reg_t *argv);
reg_t kPlayVMDInit(EngineState *s, int argc, reg_t *argv);
@@ -593,8 +606,6 @@ reg_t kTextWidth(EngineState *s, int argc, reg_t *argv);
reg_t kSave(EngineState *s, int argc, reg_t *argv);
reg_t kAutoSave(EngineState *s, int argc, reg_t *argv);
reg_t kList(EngineState *s, int argc, reg_t *argv);
-reg_t kRobot(EngineState *s, int argc, reg_t *argv);
-reg_t kPlayVMD(EngineState *s, int argc, reg_t *argv);
reg_t kCD(EngineState *s, int argc, reg_t *argv);
reg_t kAddPicAt(EngineState *s, int argc, reg_t *argv);
reg_t kAddBefore(EngineState *s, int argc, reg_t *argv);
diff --git a/engines/sci/engine/kernel_tables.h b/engines/sci/engine/kernel_tables.h
index 77fe6e2e10..ae3162e3dc 100644
--- a/engines/sci/engine/kernel_tables.h
+++ b/engines/sci/engine/kernel_tables.h
@@ -461,6 +461,22 @@ static const SciKernelMapSubEntry kPlayVMD_subops[] = {
};
// version, subId, function-mapping, signature, workarounds
+static const SciKernelMapSubEntry kRobot_subops[] = {
+ { SIG_SINCE_SCI21, 0, MAP_CALL(RobotOpen), "ioiii(i)", NULL },
+ { SIG_SINCE_SCI21, 1, MAP_CALL(RobotShowFrame), "i(ii)", NULL },
+ { SIG_SINCE_SCI21, 2, MAP_CALL(RobotGetFrameSize), "r", NULL },
+ { SIG_SINCE_SCI21, 4, MAP_CALL(RobotPlay), "", NULL },
+ { SIG_SINCE_SCI21, 5, MAP_CALL(RobotGetIsFinished), "", NULL },
+ { SIG_SINCE_SCI21, 6, MAP_CALL(RobotGetIsPlaying), "", NULL },
+ { SIG_SINCE_SCI21, 7, MAP_CALL(RobotClose), "", NULL },
+ { SIG_SINCE_SCI21, 8, MAP_CALL(RobotGetCue), "o", NULL },
+ { SIG_SINCE_SCI21, 10, MAP_CALL(RobotPause), "", NULL },
+ { SIG_SINCE_SCI21, 11, MAP_CALL(RobotGetFrameNo), "", NULL },
+ { SIG_SINCE_SCI21, 12, MAP_CALL(RobotSetPriority), "i", NULL },
+ SCI_SUBOPENTRY_TERMINATOR
+};
+
+// version, subId, function-mapping, signature, workarounds
static const SciKernelMapSubEntry kRemapColors_subops[] = {
{ SIG_SCI32, 0, MAP_CALL(RemapColorsOff), "(i)", NULL },
{ SIG_SCI32, 1, MAP_CALL(RemapColorsByRange), "iiii(i)", NULL },
@@ -855,7 +871,7 @@ static SciKernelMapEntry s_kernelMap[] = {
{ MAP_CALL(List), SIG_SINCE_SCI21, SIGFOR_ALL, "(.*)", kList_subops, NULL },
{ MAP_CALL(MulDiv), SIG_EVERYWHERE, "iii", NULL, NULL },
{ MAP_CALL(PlayVMD), SIG_EVERYWHERE, "(.*)", kPlayVMD_subops, NULL },
- { MAP_EMPTY(Robot), SIG_EVERYWHERE, "(.*)", NULL, NULL },
+ { MAP_CALL(Robot), SIG_EVERYWHERE, "(.*)", kRobot_subops, NULL },
{ MAP_CALL(Save), SIG_EVERYWHERE, "i(.*)", kSave_subops, NULL },
{ MAP_CALL(Text), SIG_SINCE_SCI21MID, SIGFOR_ALL, "i(.*)", kText_subops, NULL },
{ MAP_CALL(AddPicAt), SIG_EVERYWHERE, "oiii(i)(i)", NULL, NULL },
diff --git a/engines/sci/engine/kvideo.cpp b/engines/sci/engine/kvideo.cpp
index 86d8a4b817..9e5da20bbc 100644
--- a/engines/sci/engine/kvideo.cpp
+++ b/engines/sci/engine/kvideo.cpp
@@ -226,6 +226,80 @@ reg_t kShowMovie32(EngineState *s, int argc, reg_t *argv) {
return s->r_acc;
}
+reg_t kRobot(EngineState *s, int argc, reg_t *argv) {
+ if (!s)
+ return make_reg(0, getSciVersion());
+ error("not supposed to call this");
+}
+
+reg_t kRobotOpen(EngineState *s, int argc, reg_t *argv) {
+ const GuiResourceId robotId = argv[0].toUint16();
+ const reg_t plane = argv[1];
+ const int16 priority = argv[2].toSint16();
+ const int16 x = argv[3].toSint16();
+ const int16 y = argv[4].toSint16();
+ const int16 scale = argc > 5 ? argv[5].toSint16() : 128;
+ g_sci->_video32->getRobotPlayer().open(robotId, plane, priority, x, y, scale);
+ return make_reg(0, 0);
+}
+reg_t kRobotShowFrame(EngineState *s, int argc, reg_t *argv) {
+ const uint16 frameNo = argv[0].toUint16();
+ const uint16 newX = argc > 1 ? argv[1].toUint16() : (uint16)RobotDecoder::kUnspecified;
+ const uint16 newY = argc > 1 ? argv[2].toUint16() : (uint16)RobotDecoder::kUnspecified;
+ g_sci->_video32->getRobotPlayer().showFrame(frameNo, newX, newY, RobotDecoder::kUnspecified);
+ return s->r_acc;
+}
+
+reg_t kRobotGetFrameSize(EngineState *s, int argc, reg_t *argv) {
+ Common::Rect frameRect;
+ const uint16 numFramesTotal = g_sci->_video32->getRobotPlayer().getFrameSize(frameRect);
+
+ reg_t *outRect = s->_segMan->derefRegPtr(argv[0], 4);
+ outRect[0] = make_reg(0, frameRect.left);
+ outRect[1] = make_reg(0, frameRect.top);
+ outRect[2] = make_reg(0, frameRect.right - 1);
+ outRect[3] = make_reg(0, frameRect.bottom - 1);
+
+ return make_reg(0, numFramesTotal);
+}
+
+reg_t kRobotPlay(EngineState *s, int argc, reg_t *argv) {
+ g_sci->_video32->getRobotPlayer().resume();
+ return s->r_acc;
+}
+
+reg_t kRobotGetIsFinished(EngineState *s, int argc, reg_t *argv) {
+ return make_reg(0, g_sci->_video32->getRobotPlayer().getStatus() == RobotDecoder::kRobotStatusEnd);
+}
+
+reg_t kRobotGetIsPlaying(EngineState *s, int argc, reg_t *argv) {
+ return make_reg(0, g_sci->_video32->getRobotPlayer().getStatus() == RobotDecoder::kRobotStatusPlaying);
+}
+
+reg_t kRobotClose(EngineState *s, int argc, reg_t *argv) {
+ g_sci->_video32->getRobotPlayer().close();
+ return s->r_acc;
+}
+
+reg_t kRobotGetCue(EngineState *s, int argc, reg_t *argv) {
+ writeSelectorValue(s->_segMan, argv[0], SELECTOR(signal), g_sci->_video32->getRobotPlayer().getCue());
+ return s->r_acc;
+}
+
+reg_t kRobotPause(EngineState *s, int argc, reg_t *argv) {
+ g_sci->_video32->getRobotPlayer().pause();
+ return s->r_acc;
+}
+
+reg_t kRobotGetFrameNo(EngineState *s, int argc, reg_t *argv) {
+ return make_reg(0, g_sci->_video32->getRobotPlayer().getFrameNo());
+}
+
+reg_t kRobotSetPriority(EngineState *s, int argc, reg_t *argv) {
+ g_sci->_video32->getRobotPlayer().setPriority(argv[0].toSint16());
+ return s->r_acc;
+}
+
reg_t kShowMovieWin(EngineState *s, int argc, reg_t *argv) {
if (!s)
return make_reg(0, getSciVersion());
diff --git a/engines/sci/graphics/frameout.cpp b/engines/sci/graphics/frameout.cpp
index c0d8f15c74..333ed764b2 100644
--- a/engines/sci/graphics/frameout.cpp
+++ b/engines/sci/graphics/frameout.cpp
@@ -44,16 +44,17 @@
#include "sci/graphics/compare.h"
#include "sci/graphics/cursor32.h"
#include "sci/graphics/font.h"
-#include "sci/graphics/screen.h"
+#include "sci/graphics/frameout.h"
#include "sci/graphics/paint32.h"
#include "sci/graphics/palette32.h"
#include "sci/graphics/plane32.h"
#include "sci/graphics/remap32.h"
+#include "sci/graphics/screen.h"
#include "sci/graphics/screen_item32.h"
#include "sci/graphics/text32.h"
#include "sci/graphics/frameout.h"
-#include "sci/video/robot_decoder.h"
#include "sci/graphics/transitions32.h"
+#include "sci/graphics/video32.h"
namespace Sci {
@@ -501,10 +502,12 @@ void GfxFrameout::kernelAddPicAt(const reg_t planeObject, const GuiResourceId pi
#pragma mark Rendering
void GfxFrameout::frameOut(const bool shouldShowBits, const Common::Rect &eraseRect) {
-// TODO: Robot
-// if (_robot != nullptr) {
-// _robot.doRobot();
-// }
+ RobotDecoder &robotPlayer = g_sci->_video32->getRobotPlayer();
+ const bool robotIsActive = robotPlayer.getStatus() != RobotDecoder::kRobotStatusUninitialized;
+
+ if (robotIsActive) {
+ robotPlayer.doRobot();
+ }
// NOTE: The original engine allocated these as static arrays of 100
// pointers to ScreenItemList / RectList
@@ -542,10 +545,9 @@ void GfxFrameout::frameOut(const bool shouldShowBits, const Common::Rect &eraseR
drawScreenItemList(screenItemLists[i]);
}
-// TODO: Robot
-// if (_robot != nullptr) {
-// _robot->frameAlmostVisible();
-// }
+ if (robotIsActive) {
+ robotPlayer.frameAlmostVisible();
+ }
_palette->updateHardware(!shouldShowBits);
@@ -555,10 +557,9 @@ void GfxFrameout::frameOut(const bool shouldShowBits, const Common::Rect &eraseR
_frameNowVisible = true;
-// TODO: Robot
-// if (_robot != nullptr) {
-// robot->frameNowVisible();
-// }
+ if (robotIsActive) {
+ robotPlayer.frameNowVisible();
+ }
}
void GfxFrameout::palMorphFrameOut(const int8 *styleRanges, PlaneShowStyle *showStyle) {
diff --git a/engines/sci/graphics/screen_item32.h b/engines/sci/graphics/screen_item32.h
index 3d9d5ef3d7..4221c0ea52 100644
--- a/engines/sci/graphics/screen_item32.h
+++ b/engines/sci/graphics/screen_item32.h
@@ -31,6 +31,7 @@ namespace Sci {
enum ScaleSignals32 {
kScaleSignalNone = 0,
+ // TODO: rename to 'manual'
kScaleSignalDoScaling32 = 1, // enables scaling when drawing that cel (involves scaleX and scaleY)
kScaleSignalUseVanishingPoint = 2,
// TODO: Is this actually a thing? I have not seen it and
diff --git a/engines/sci/graphics/video32.cpp b/engines/sci/graphics/video32.cpp
index b9fc7061a7..b4b14d6d23 100644
--- a/engines/sci/graphics/video32.cpp
+++ b/engines/sci/graphics/video32.cpp
@@ -20,6 +20,7 @@
*
*/
+#include "audio/mixer.h" // for Audio::Mixer::kSFXSoundType
#include "common/config-manager.h" // for ConfMan
#include "common/textconsole.h" // for warning, error
#include "common/util.h" // for ARRAYSIZE
diff --git a/engines/sci/graphics/video32.h b/engines/sci/graphics/video32.h
index 0496f61d5d..3ea244292e 100644
--- a/engines/sci/graphics/video32.h
+++ b/engines/sci/graphics/video32.h
@@ -27,6 +27,7 @@
#include "common/scummsys.h" // for int16, uint8, uint16, int32
#include "common/str.h" // for String
#include "sci/engine/vm_types.h" // for reg_t
+#include "sci/video/robot_decoder.h" // for RobotDecoder
namespace Video {
class AdvancedVMDDecoder;
@@ -505,16 +506,19 @@ public:
Video32(SegManager *segMan, EventManager *eventMan) :
_SEQPlayer(segMan),
_AVIPlayer(segMan, eventMan),
- _VMDPlayer(segMan, eventMan) {}
+ _VMDPlayer(segMan, eventMan),
+ _robotPlayer(segMan) {}
SEQPlayer &getSEQPlayer() { return _SEQPlayer; }
AVIPlayer &getAVIPlayer() { return _AVIPlayer; }
VMDPlayer &getVMDPlayer() { return _VMDPlayer; }
+ RobotDecoder &getRobotPlayer() { return _robotPlayer; }
private:
SEQPlayer _SEQPlayer;
AVIPlayer _AVIPlayer;
VMDPlayer _VMDPlayer;
+ RobotDecoder _robotPlayer;
};
} // End of namespace Sci
diff --git a/engines/sci/sci.cpp b/engines/sci/sci.cpp
index 2c8683ab8c..bca1aaeb12 100644
--- a/engines/sci/sci.cpp
+++ b/engines/sci/sci.cpp
@@ -73,17 +73,12 @@
#include "sci/graphics/transitions32.h"
#include "sci/graphics/video32.h"
#include "sci/sound/audio32.h"
-// TODO: Move this to video32
-#include "sci/video/robot_decoder.h"
#endif
namespace Sci {
SciEngine *g_sci = 0;
-
-class GfxDriver;
-
SciEngine::SciEngine(OSystem *syst, const ADGameDescription *desc, SciGameId gameId)
: Engine(syst), _gameDescription(desc), _gameId(gameId), _rng("sci") {
@@ -132,6 +127,7 @@ SciEngine::SciEngine(OSystem *syst, const ADGameDescription *desc, SciGameId gam
DebugMan.addDebugChannel(kDebugLevelScripts, "Scripts", "Notifies when scripts are unloaded");
DebugMan.addDebugChannel(kDebugLevelScriptPatcher, "ScriptPatcher", "Notifies when scripts are patched");
DebugMan.addDebugChannel(kDebugLevelWorkarounds, "Workarounds", "Notifies when workarounds are triggered");
+ DebugMan.addDebugChannel(kDebugLevelVideo, "Video", "Video (SEQ, VMD, RBT) debugging");
DebugMan.addDebugChannel(kDebugLevelGC, "GC", "Garbage Collector debugging");
DebugMan.addDebugChannel(kDebugLevelResMan, "ResMan", "Resource manager debugging");
DebugMan.addDebugChannel(kDebugLevelOnStartup, "OnStartup", "Enter debugger at start of game");
@@ -173,7 +169,6 @@ SciEngine::~SciEngine() {
delete _gfxControls32;
delete _gfxPaint32;
delete _gfxText32;
- delete _robotDecoder;
// GfxFrameout and GfxPalette32 must be deleted after Video32 since
// destruction of screen items in the Video32 destructor relies on these
// components
@@ -708,7 +703,6 @@ void SciEngine::initGraphics() {
#ifdef ENABLE_SCI32
_gfxControls32 = 0;
_gfxText32 = 0;
- _robotDecoder = 0;
_gfxFrameout = 0;
_gfxPaint32 = 0;
_gfxPalette32 = 0;
@@ -742,7 +736,6 @@ void SciEngine::initGraphics() {
_gfxCursor32 = new GfxCursor32();
_gfxCompare = new GfxCompare(_gamestate->_segMan, _gfxCache, nullptr, _gfxCoordAdjuster);
_gfxPaint32 = new GfxPaint32(_gamestate->_segMan);
- _robotDecoder = new RobotDecoder(getPlatform() == Common::kPlatformMacintosh);
_gfxTransitions32 = new GfxTransitions32(_gamestate->_segMan);
_gfxFrameout = new GfxFrameout(_gamestate->_segMan, _resMan, _gfxCoordAdjuster, _gfxPalette32, _gfxTransitions32, _gfxCursor32);
_gfxCursor32->init(_gfxFrameout->getCurrentBuffer());
diff --git a/engines/sci/sci.h b/engines/sci/sci.h
index 881df3d87a..001430f783 100644
--- a/engines/sci/sci.h
+++ b/engines/sci/sci.h
@@ -92,8 +92,6 @@ class GfxText32;
class GfxTransitions;
#ifdef ENABLE_SCI32
-// TODO: Move RobotDecoder to Video32
-class RobotDecoder;
class GfxFrameout;
class Audio32;
class Video32;
@@ -126,7 +124,8 @@ enum kDebugLevels {
kDebugLevelOnStartup = 1 << 20,
kDebugLevelDebugMode = 1 << 21,
kDebugLevelScriptPatcher = 1 << 22,
- kDebugLevelWorkarounds = 1 << 23
+ kDebugLevelWorkarounds = 1 << 23,
+ kDebugLevelVideo = 1 << 24
};
enum SciGameId {
@@ -389,7 +388,6 @@ public:
#ifdef ENABLE_SCI32
Audio32 *_audio32;
Video32 *_video32;
- RobotDecoder *_robotDecoder;
GfxFrameout *_gfxFrameout; // kFrameout and the like for 32-bit gfx
GfxTransitions32 *_gfxTransitions32;
GfxCursor32 *_gfxCursor32;
diff --git a/engines/sci/sound/audio32.cpp b/engines/sci/sound/audio32.cpp
index 288b7c00f5..4af474b918 100644
--- a/engines/sci/sound/audio32.cpp
+++ b/engines/sci/sound/audio32.cpp
@@ -164,7 +164,7 @@ Audio32::~Audio32() {
#pragma mark -
#pragma mark AudioStream implementation
-int Audio32::writeAudioInternal(Audio::RewindableAudioStream *const sourceStream, Audio::RateConverter *const converter, Audio::st_sample_t *targetBuffer, const int numSamples, const Audio::st_volume_t leftVolume, const Audio::st_volume_t rightVolume, const bool loop) {
+int Audio32::writeAudioInternal(Audio::AudioStream *const sourceStream, Audio::RateConverter *const converter, Audio::st_sample_t *targetBuffer, const int numSamples, const Audio::st_volume_t leftVolume, const Audio::st_volume_t rightVolume, const bool loop) {
int samplesToRead = numSamples;
// The parent rate converter will request N * 2
@@ -182,7 +182,8 @@ int Audio32::writeAudioInternal(Audio::RewindableAudioStream *const sourceStream
do {
if (loop && sourceStream->endOfStream()) {
- sourceStream->rewind();
+ Audio::RewindableAudioStream *rewindableStream = dynamic_cast<Audio::RewindableAudioStream *>(sourceStream);
+ rewindableStream->rewind();
}
const int loopSamplesWritten = converter->flow(*sourceStream, targetBuffer, samplesToRead, leftVolume, rightVolume);
@@ -305,7 +306,14 @@ int Audio32::readBuffer(Audio::st_sample_t *buffer, const int numSamples) {
}
if (channel.robot) {
- // TODO: Robot audio into output buffer
+ if (channel.stream->endOfStream()) {
+ stop(channelIndex--);
+ } else {
+ const int channelSamplesWritten = writeAudioInternal(channel.stream, channel.converter, buffer, numSamples, kMaxVolume, kMaxVolume, channel.loop);
+ if (channelSamplesWritten > maxSamplesWritten) {
+ maxSamplesWritten = channelSamplesWritten;
+ }
+ }
continue;
}
@@ -443,9 +451,9 @@ void Audio32::freeUnusedChannels() {
Common::StackLock lock(_mutex);
for (int channelIndex = 0; channelIndex < _numActiveChannels; ++channelIndex) {
const AudioChannel &channel = getChannel(channelIndex);
- if (channel.stream->endOfStream()) {
+ if (!channel.robot && channel.stream->endOfStream()) {
if (channel.loop) {
- channel.stream->rewind();
+ dynamic_cast<Audio::SeekableAudioStream *>(channel.stream)->rewind();
} else {
stop(channelIndex--);
}
@@ -466,21 +474,29 @@ void Audio32::freeChannel(const int16 channelIndex) {
Common::StackLock lock(_mutex);
AudioChannel &channel = getChannel(channelIndex);
- // We cannot unlock resources from the audio thread
- // because ResourceManager is not thread-safe; instead,
- // we just record that the resource needs unlocking and
- // unlock it whenever we are on the main thread again
- if (_inAudioThread) {
- _resourcesToUnlock.push_back(channel.resource);
+ // Robots have no corresponding resource to free
+ if (channel.robot) {
+ delete channel.stream;
+ channel.stream = nullptr;
+ channel.robot = false;
} else {
- _resMan->unlockResource(channel.resource);
+ // We cannot unlock resources from the audio thread
+ // because ResourceManager is not thread-safe; instead,
+ // we just record that the resource needs unlocking and
+ // unlock it whenever we are on the main thread again
+ if (_inAudioThread) {
+ _resourcesToUnlock.push_back(channel.resource);
+ } else {
+ _resMan->unlockResource(channel.resource);
+ }
+
+ channel.resource = nullptr;
+ delete channel.stream;
+ channel.stream = nullptr;
+ delete channel.resourceStream;
+ channel.resourceStream = nullptr;
}
- channel.resource = nullptr;
- delete channel.stream;
- channel.stream = nullptr;
- delete channel.resourceStream;
- channel.resourceStream = nullptr;
delete channel.converter;
channel.converter = nullptr;
@@ -527,6 +543,111 @@ void Audio32::setNumOutputChannels(int16 numChannels) {
}
#pragma mark -
+#pragma mark Robot
+
+int16 Audio32::findRobotChannel() const {
+ Common::StackLock lock(_mutex);
+ for (int16 i = 0; i < _numActiveChannels; ++i) {
+ if (_channels[i].robot) {
+ return i;
+ }
+ }
+
+ return kNoExistingChannel;
+}
+
+bool Audio32::playRobotAudio(const RobotAudioStream::RobotAudioPacket &packet) {
+ // Stop immediately
+ if (packet.dataSize == 0) {
+ warning("Stopping robot stream by zero-length packet");
+ return stopRobotAudio();
+ }
+
+ // Flush and then stop
+ if (packet.dataSize == -1) {
+ warning("Stopping robot stream by negative-length packet");
+ return finishRobotAudio();
+ }
+
+ Common::StackLock lock(_mutex);
+ int16 channelIndex = findRobotChannel();
+
+ bool isNewChannel = false;
+ if (channelIndex == kNoExistingChannel) {
+ if (_numActiveChannels == _channels.size()) {
+ return false;
+ }
+
+ channelIndex = _numActiveChannels++;
+ isNewChannel = true;
+ }
+
+ AudioChannel &channel = getChannel(channelIndex);
+
+ if (isNewChannel) {
+ channel.id = ResourceId();
+ channel.resource = nullptr;
+ channel.loop = false;
+ channel.robot = true;
+ channel.fadeStartTick = 0;
+ channel.pausedAtTick = 0;
+ channel.soundNode = NULL_REG;
+ channel.volume = kMaxVolume;
+ // TODO: SCI3 introduces stereo audio
+ channel.pan = -1;
+ channel.converter = Audio::makeRateConverter(RobotAudioStream::kRobotSampleRate, getRate(), false);
+ // The RobotAudioStream buffer size is
+ // ((bytesPerSample * channels * sampleRate * 2000ms) / 1000ms) & ~3
+ // where bytesPerSample = 2, channels = 1, and sampleRate = 22050
+ channel.stream = new RobotAudioStream(88200);
+ _robotAudioPaused = false;
+
+ if (_numActiveChannels == 1) {
+ _startedAtTick = g_sci->getTickCount();
+ }
+ }
+
+ return static_cast<RobotAudioStream *>(channel.stream)->addPacket(packet);
+}
+
+bool Audio32::queryRobotAudio(RobotAudioStream::StreamState &status) const {
+ Common::StackLock lock(_mutex);
+
+ const int16 channelIndex = findRobotChannel();
+ if (channelIndex == kNoExistingChannel) {
+ status.bytesPlaying = 0;
+ return false;
+ }
+
+ status = static_cast<RobotAudioStream *>(getChannel(channelIndex).stream)->getStatus();
+ return true;
+}
+
+bool Audio32::finishRobotAudio() {
+ Common::StackLock lock(_mutex);
+
+ const int16 channelIndex = findRobotChannel();
+ if (channelIndex == kNoExistingChannel) {
+ return false;
+ }
+
+ static_cast<RobotAudioStream *>(getChannel(channelIndex).stream)->finish();
+ return true;
+}
+
+bool Audio32::stopRobotAudio() {
+ Common::StackLock lock(_mutex);
+
+ const int16 channelIndex = findRobotChannel();
+ if (channelIndex == kNoExistingChannel) {
+ return false;
+ }
+
+ stop(channelIndex);
+ return true;
+}
+
+#pragma mark -
#pragma mark Playback
uint16 Audio32::play(int16 channelIndex, const ResourceId resourceId, const bool autoPlay, const bool loop, const int16 volume, const reg_t soundNode, const bool monitor) {
@@ -536,14 +657,15 @@ uint16 Audio32::play(int16 channelIndex, const ResourceId resourceId, const bool
if (channelIndex != kNoExistingChannel) {
AudioChannel &channel = getChannel(channelIndex);
+ Audio::SeekableAudioStream *stream = dynamic_cast<Audio::SeekableAudioStream *>(channel.stream);
if (channel.pausedAtTick) {
resume(channelIndex);
- return MIN(65534, 1 + channel.stream->getLength().msecs() * 60 / 1000);
+ return MIN(65534, 1 + stream->getLength().msecs() * 60 / 1000);
}
warning("Tried to resume channel %s that was not paused", channel.id.toString().c_str());
- return MIN(65534, 1 + channel.stream->getLength().msecs() * 60 / 1000);
+ return MIN(65534, 1 + stream->getLength().msecs() * 60 / 1000);
}
if (_numActiveChannels == _channels.size()) {
@@ -642,7 +764,7 @@ uint16 Audio32::play(int16 channelIndex, const ResourceId resourceId, const bool
// use audio streams, and allocate and fill the monitoring buffer
// when reading audio data from the stream.
- channel.duration = /* round up */ 1 + (channel.stream->getLength().msecs() * 60 / 1000);
+ channel.duration = /* round up */ 1 + (dynamic_cast<Audio::SeekableAudioStream *>(channel.stream)->getLength().msecs() * 60 / 1000);
const uint32 now = g_sci->getTickCount();
channel.pausedAtTick = autoPlay ? 0 : now;
@@ -687,8 +809,6 @@ bool Audio32::resume(const int16 channelIndex) {
if (channel.robot) {
channel.startedAtTick += now - channel.pausedAtTick;
channel.pausedAtTick = 0;
- // TODO: Robot
- // StartRobot();
return true;
}
}
diff --git a/engines/sci/sound/audio32.h b/engines/sci/sound/audio32.h
index ac3176cc5a..a9905ab6bf 100644
--- a/engines/sci/sound/audio32.h
+++ b/engines/sci/sound/audio32.h
@@ -30,8 +30,10 @@
#include "common/scummsys.h" // for int16, uint8, uint32, uint16
#include "engines/sci/resource.h" // for ResourceId
#include "sci/engine/vm_types.h" // for reg_t, NULL_REG
+#include "sci/video/robot_decoder.h" // for RobotAudioStream
namespace Sci {
+#pragma mark AudioChannel
/**
* An audio channel used by the software SCI mixer.
@@ -53,14 +55,11 @@ struct AudioChannel {
Common::SeekableReadStream *resourceStream;
/**
- * The audio stream loaded into this channel.
- * `SeekableAudioStream` is used here instead of
- * `RewindableAudioStream` because
- * `RewindableAudioStream` does not include the
- * `getLength` function, which is needed to tell the
- * game engine the duration of audio streams.
+ * The audio stream loaded into this channel. Can cast
+ * to `SeekableAudioStream` for normal channels and
+ * `RobotAudioStream` for robot channels.
*/
- Audio::SeekableAudioStream *stream;
+ Audio::AudioStream *stream;
/**
* The converter used to transform and merge the input
@@ -188,7 +187,7 @@ private:
* Mixes audio from the given source stream into the
* target buffer using the given rate converter.
*/
- int writeAudioInternal(Audio::RewindableAudioStream *const sourceStream, Audio::RateConverter *const converter, Audio::st_sample_t *targetBuffer, const int numSamples, const Audio::st_volume_t leftVolume, const Audio::st_volume_t rightVolume, const bool loop);
+ int writeAudioInternal(Audio::AudioStream *const sourceStream, Audio::RateConverter *const converter, Audio::st_sample_t *targetBuffer, const int numSamples, const Audio::st_volume_t leftVolume, const Audio::st_volume_t rightVolume, const bool loop);
#pragma mark -
#pragma mark Channel management
@@ -395,9 +394,18 @@ private:
#pragma mark -
#pragma mark Robot
public:
+ bool playRobotAudio(const RobotAudioStream::RobotAudioPacket &packet);
+ bool queryRobotAudio(RobotAudioStream::StreamState &outStatus) const;
+ bool finishRobotAudio();
+ bool stopRobotAudio();
private:
/**
+ * Finds a channel that is configured for robot playback.
+ */
+ int16 findRobotChannel() const;
+
+ /**
* When true, channels marked as robot audio will not be
* played.
*/
diff --git a/engines/sci/sound/decoders/sol.cpp b/engines/sci/sound/decoders/sol.cpp
index e445403120..ee1ba35406 100644
--- a/engines/sci/sound/decoders/sol.cpp
+++ b/engines/sci/sound/decoders/sol.cpp
@@ -21,6 +21,7 @@
*/
#include "audio/audiostream.h"
+#include "audio/rate.h"
#include "audio/decoders/raw.h"
#include "common/substream.h"
#include "common/util.h"
@@ -52,7 +53,7 @@ static const byte tableDPCM8[8] = { 0, 1, 2, 3, 6, 10, 15, 21 };
* Decompresses 16-bit DPCM compressed audio. Each byte read
* outputs one sample into the decompression buffer.
*/
-static void deDPCM16(int16 *out, Common::ReadStream &audioStream, uint32 numBytes, int16 &sample) {
+static void deDPCM16(int16 *out, Common::ReadStream &audioStream, const uint32 numBytes, int16 &sample) {
for (uint32 i = 0; i < numBytes; ++i) {
const uint8 delta = audioStream.readByte();
if (delta & 0x80) {
@@ -65,6 +66,19 @@ static void deDPCM16(int16 *out, Common::ReadStream &audioStream, uint32 numByte
}
}
+void deDPCM16(int16 *out, const byte *in, const uint32 numBytes, int16 &sample) {
+ for (uint32 i = 0; i < numBytes; ++i) {
+ const uint8 delta = *in++;
+ if (delta & 0x80) {
+ sample -= tableDPCM16[delta & 0x7f];
+ } else {
+ sample += tableDPCM16[delta];
+ }
+ sample = CLIP<int16>(sample, -32768, 32767);
+ *out++ = TO_LE_16(sample);
+ }
+}
+
/**
* Decompresses one half of an 8-bit DPCM compressed audio
* byte.
@@ -178,7 +192,7 @@ int SOLStream<STEREO, S16BIT>::getRate() const {
template <bool STEREO, bool S16BIT>
bool SOLStream<STEREO, S16BIT>::endOfData() const {
- return _stream->eos() || _stream->pos() >= _dataOffset + _rawDataSize;
+ return _stream->eos() || _stream->pos() >= _rawDataSize;
}
template <bool STEREO, bool S16BIT>
@@ -269,5 +283,4 @@ Audio::SeekableAudioStream *makeSOLStream(Common::SeekableReadStream *headerStre
return Audio::makeRawStream(dataStream, sampleRate, rawFlags, disposeAfterUse);
}
-
}
diff --git a/engines/sci/video/robot_decoder.cpp b/engines/sci/video/robot_decoder.cpp
index a2795d21f9..f3354f9e44 100644
--- a/engines/sci/video/robot_decoder.cpp
+++ b/engines/sci/video/robot_decoder.cpp
@@ -20,391 +20,1598 @@
*
*/
-#include "common/archive.h"
-#include "common/stream.h"
-#include "common/substream.h"
-#include "common/system.h"
-#include "common/textconsole.h"
-#include "common/util.h"
-
-#include "graphics/surface.h"
-#include "audio/audiostream.h"
-#include "audio/decoders/raw.h"
-
-#include "sci/resource.h"
-#include "sci/util.h"
-#include "sci/sound/audio.h"
#include "sci/video/robot_decoder.h"
+#include "common/archive.h" // for SearchMan
+#include "common/debug.h" // for debugC
+#include "common/endian.h" // for MKTAG
+#include "common/memstream.h" // for MemoryReadStream
+#include "common/platform.h" // for Platform::kPlatformMacintosh
+#include "common/rational.h" // for operator*, Rational
+#include "common/str.h" // for String
+#include "common/stream.h" // for SeekableReadStream
+#include "common/substream.h" // for SeekableSubReadStreamEndian
+#include "common/textconsole.h" // for error, warning
+#include "common/types.h" // for Flag::NO, Flag::YES
+#include "sci/engine/seg_manager.h" // for SegManager
+#include "sci/graphics/celobj32.h" // for Ratio, ::kLowResX, ::kLowResY
+#include "sci/graphics/text32.h" // for BitmapResource
+#include "sci/sound/audio32.h" // for Audio32
+#include "sci/sci.h" // for kDebugLevels::kDebugLevelVideo
+#include "sci/util.h" // for READ_SCI11ENDIAN_UINT16, READ_SC...
namespace Sci {
-// TODO:
-// - Positioning
-// - Proper handling of frame scaling - scaled frames look squashed
-// (probably because both dimensions should be scaled)
-// - Transparency support
-// - Timing - the arbitrary 100ms delay between each frame is not quite right
-// - Proper handling of sound chunks in some cases, so that the frame size
-// table can be ignored (it's only used to determine the correct sound chunk
-// size at the moment, cause it can be wrong in some cases)
-// - Fix audio "hiccups" - probably data that shouldn't be in the audio frames
-
-
-// Some non technical information on robot files, from an interview with
-// Greg Tomko-Pavia of Sierra On-Line
-// Taken from http://anthonylarme.tripod.com/phantas/phintgtp.html
-//
-// (...) What we needed was a way of playing video, but have it blend into
-// normal room art instead of occupying its own rectangular area. Room art
-// consists of a background pic overlaid with various animating cels
-// (traditional lingo: sprites). The cels each have a priority that determines
-// who is on top and who is behind in the drawing order. Cels are read from
-// *.v56 files (another proprietary format). A Robot is video frames with
-// transparent background including priority and x,y information. Thus, it is
-// like a cel, except it comes from an RBT - not a v56. Because it blends into
-// our graphics engine, it looks just like a part of the room. A RBT can move
-// around the screen and go behind other objects. (...)
-
-enum RobotPalTypes {
- kRobotPalVariable = 0,
- kRobotPalConstant = 1
-};
-
-RobotDecoder::RobotDecoder(bool isBigEndian) {
- _fileStream = 0;
- _pos = Common::Point(0, 0);
- _isBigEndian = isBigEndian;
- _frameTotalSize = 0;
+#pragma mark RobotAudioStream
+
+extern void deDPCM16(int16 *out, const byte *in, const uint32 numBytes, int16 &sample);
+
+RobotAudioStream::RobotAudioStream(const int32 bufferSize) :
+ _loopBuffer((byte *)malloc(bufferSize)),
+ _loopBufferSize(bufferSize),
+ _decompressionBuffer(nullptr),
+ _decompressionBufferSize(0),
+ _decompressionBufferPosition(-1),
+ _waiting(true),
+ _finished(false),
+ _firstPacketPosition(-1) {}
+
+RobotAudioStream::~RobotAudioStream() {
+ free(_loopBuffer);
+ free(_decompressionBuffer);
}
-RobotDecoder::~RobotDecoder() {
- close();
+static void interpolateChannel(int16 *buffer, int32 numSamples, const int8 bufferIndex) {
+ if (numSamples <= 0) {
+ return;
+ }
+
+ if (bufferIndex) {
+ int16 lastSample = *buffer;
+ int sample = lastSample;
+ int16 *target = buffer + 1;
+ const int16 *source = buffer + 2;
+ --numSamples;
+
+ while (numSamples--) {
+ sample = *source + lastSample;
+ lastSample = *source;
+ sample /= 2;
+ *target = sample;
+ source += 2;
+ target += 2;
+ }
+
+ *target = sample;
+ } else {
+ int16 *target = buffer;
+ const int16 *source = buffer + 1;
+ int16 lastSample = *source;
+
+ while (numSamples--) {
+ int sample = *source + lastSample;
+ lastSample = *source;
+ sample /= 2;
+ *target = sample;
+ source += 2;
+ target += 2;
+ }
+ }
}
-bool RobotDecoder::loadStream(Common::SeekableReadStream *stream) {
- close();
+static void copyEveryOtherSample(int16 *out, const int16 *in, int numSamples) {
+ while (numSamples--) {
+ *out = *in++;
+ out += 2;
+ }
+}
+
+bool RobotAudioStream::addPacket(const RobotAudioPacket &packet) {
+ Common::StackLock lock(_mutex);
+
+ if (_finished) {
+ warning("Packet %d sent to finished robot audio stream", packet.position);
+ return false;
+ }
+
+ // `packet.position` is the decompressed (doubled) position of the packet,
+ // so values of `position` will always be divisible either by 2 (even) or by
+ // 4 (odd).
+ const int8 bufferIndex = packet.position % 4 ? 1 : 0;
+
+ // Packet 0 is the first primer, packet 2 is the second primer,
+ // packet 4+ are regular audio data
+ if (packet.position <= 2 && _firstPacketPosition == -1) {
+ _readHead = 0;
+ _readHeadAbs = 0;
+ _maxWriteAbs = _loopBufferSize;
+ _writeHeadAbs = 2;
+ _jointMin[0] = 0;
+ _jointMin[1] = 2;
+ _waiting = true;
+ _finished = false;
+ _firstPacketPosition = packet.position;
+ fillRobotBuffer(packet, bufferIndex);
+ return true;
+ }
- _fileStream = new Common::SeekableSubReadStreamEndian(stream, 0, stream->size(), _isBigEndian, DisposeAfterUse::YES);
+ const int32 packetEndByte = packet.position + (packet.dataSize * sizeof(int16) * kEOSExpansion);
- readHeaderChunk();
+ // Already read all the way past this packet (or already wrote valid samples
+ // to this channel all the way past this packet), so discard it
+ if (packetEndByte <= MAX(_readHeadAbs, _jointMin[bufferIndex])) {
+ debugC(kDebugLevelVideo, "Rejecting packet %d, read past %d / %d", packet.position, _readHeadAbs, _jointMin[bufferIndex]);
+ return true;
+ }
- // There are several versions of robot files, ranging from 3 to 6.
- // v3: no known examples
- // v4: PQ:SWAT demo
- // v5: SCI2.1 and SCI3 games
- // v6: SCI3 games
- if (_header.version < 4 || _header.version > 6)
- error("Unknown robot version: %d", _header.version);
+ // The loop buffer is full, so tell the caller to send the packet again
+ // later
+ if (_maxWriteAbs <= _jointMin[bufferIndex]) {
+ debugC(kDebugLevelVideo, "Rejecting packet %d, full buffer", packet.position);
+ return false;
+ }
- RobotVideoTrack *videoTrack = new RobotVideoTrack(_header.frameCount);
- addTrack(videoTrack);
+ fillRobotBuffer(packet, bufferIndex);
- if (_header.hasSound)
- addTrack(new RobotAudioTrack());
+ // This packet is the second primer, so allow playback to begin
+ if (_firstPacketPosition != -1 && _firstPacketPosition != packet.position) {
+ debugC(kDebugLevelVideo, "Done waiting. Robot audio begins");
+ _waiting = false;
+ _firstPacketPosition = -1;
+ }
- videoTrack->readPaletteChunk(_fileStream, _header.paletteDataSize);
- readFrameSizesChunk();
- videoTrack->calculateVideoDimensions(_fileStream, _frameTotalSize);
+ // Only part of the packet could be read into the loop buffer before it was
+ // full, so tell the caller to send the packet again later
+ if (packetEndByte > _maxWriteAbs) {
+ debugC(kDebugLevelVideo, "Partial read of packet %d (%d / %d)", packet.position, packetEndByte - _maxWriteAbs, packetEndByte - packet.position);
+ return false;
+ }
+
+ // The entire packet was successfully read into the loop buffer
return true;
}
-bool RobotDecoder::load(GuiResourceId id) {
- // TODO: RAMA's robot 1003 cannot be played (shown at the menu screen) -
- // its drawn at odd coordinates. SV can't play it either (along with some
- // others), so it must be some new functionality added in RAMA's robot
- // videos. Skip it for now.
- if (g_sci->getGameId() == GID_RAMA && id == 1003)
- return false;
+void RobotAudioStream::fillRobotBuffer(const RobotAudioPacket &packet, const int8 bufferIndex) {
+ int32 sourceByte = 0;
- // Robots for the options in the RAMA menu
- if (g_sci->getGameId() == GID_RAMA && (id >= 1004 && id <= 1009))
- return false;
+ const int32 decompressedSize = packet.dataSize * sizeof(int16);
+ if (_decompressionBufferPosition != packet.position) {
+ if (decompressedSize != _decompressionBufferSize) {
+ _decompressionBuffer = (byte *)realloc(_decompressionBuffer, decompressedSize);
+ _decompressionBufferSize = decompressedSize;
+ }
- // TODO: The robot video in the Lighthouse demo gets stuck
- if (g_sci->getGameId() == GID_LIGHTHOUSE && id == 16)
- return false;
+ int16 carry = 0;
+ deDPCM16((int16 *)_decompressionBuffer, packet.data, packet.dataSize, carry);
+ _decompressionBufferPosition = packet.position;
+ }
+
+ int32 numBytes = decompressedSize;
+ int32 packetPosition = packet.position;
+ int32 endByte = packet.position + decompressedSize * kEOSExpansion;
+ int32 startByte = MAX(_readHeadAbs + bufferIndex * 2, _jointMin[bufferIndex]);
+ int32 maxWriteByte = _maxWriteAbs + bufferIndex * 2;
+ if (packetPosition < startByte) {
+ sourceByte = (startByte - packetPosition) / kEOSExpansion;
+ numBytes -= sourceByte;
+ packetPosition = startByte;
+ }
+ if (packetPosition > maxWriteByte) {
+ numBytes += (packetPosition - maxWriteByte) / kEOSExpansion;
+ packetPosition = maxWriteByte;
+ }
+ if (endByte > maxWriteByte) {
+ numBytes -= (endByte - maxWriteByte) / kEOSExpansion;
+ endByte = maxWriteByte;
+ }
- Common::String fileName = Common::String::format("%d.rbt", id);
+ const int32 maxJointMin = MAX(_jointMin[0], _jointMin[1]);
+ if (endByte > maxJointMin) {
+ _writeHeadAbs += endByte - maxJointMin;
+ }
+
+ if (packetPosition > _jointMin[bufferIndex]) {
+ int32 packetEndByte = packetPosition % _loopBufferSize;
+ int32 targetBytePosition;
+ int32 numBytesToEnd;
+ if ((packetPosition & ~3) > (_jointMin[1 - bufferIndex] & ~3)) {
+ targetBytePosition = _jointMin[1 - bufferIndex] % _loopBufferSize;
+ if (targetBytePosition >= packetEndByte) {
+ numBytesToEnd = _loopBufferSize - targetBytePosition;
+ memset(_loopBuffer + targetBytePosition, 0, numBytesToEnd);
+ targetBytePosition = (1 - bufferIndex) ? 2 : 0;
+ }
+ numBytesToEnd = packetEndByte - targetBytePosition;
+ if (numBytesToEnd > 0) {
+ memset(_loopBuffer + targetBytePosition, 0, numBytesToEnd);
+ }
+ }
+ targetBytePosition = _jointMin[bufferIndex] % _loopBufferSize;
+ if (targetBytePosition >= packetEndByte) {
+ numBytesToEnd = _loopBufferSize - targetBytePosition;
+ interpolateChannel((int16 *)(_loopBuffer + targetBytePosition), numBytesToEnd / sizeof(int16) / kEOSExpansion, 0);
+ targetBytePosition = bufferIndex ? 2 : 0;
+ }
+ numBytesToEnd = packetEndByte - targetBytePosition;
+ if (numBytesToEnd > 0) {
+ interpolateChannel((int16 *)(_loopBuffer + targetBytePosition), numBytesToEnd / sizeof(int16) / kEOSExpansion, 0);
+ }
+ }
+
+ if (numBytes > 0) {
+ int32 targetBytePosition = packetPosition % _loopBufferSize;
+ int32 packetEndByte = endByte % _loopBufferSize;
+ int32 numBytesToEnd = 0;
+ if (targetBytePosition >= packetEndByte) {
+ numBytesToEnd = (_loopBufferSize - (targetBytePosition & ~3)) / kEOSExpansion;
+ copyEveryOtherSample((int16 *)(_loopBuffer + targetBytePosition), (int16 *)(_decompressionBuffer + sourceByte), numBytesToEnd / kEOSExpansion);
+ targetBytePosition = bufferIndex ? 2 : 0;
+ }
+ copyEveryOtherSample((int16 *)(_loopBuffer + targetBytePosition), (int16 *)(_decompressionBuffer + sourceByte + numBytesToEnd), (packetEndByte - targetBytePosition) / sizeof(int16) / kEOSExpansion);
+ }
+ _jointMin[bufferIndex] = endByte;
+}
+
+void RobotAudioStream::interpolateMissingSamples(int32 numSamples) {
+ int32 numBytes = numSamples * sizeof(int16) * kEOSExpansion;
+ int32 targetPosition = _readHead;
+
+ if (_readHeadAbs > _jointMin[1]) {
+ if (_readHeadAbs > _jointMin[0]) {
+ if (targetPosition + numBytes >= _loopBufferSize) {
+ const int32 numBytesToEdge = (_loopBufferSize - targetPosition);
+ memset(_loopBuffer + targetPosition, 0, numBytesToEdge);
+ numBytes -= numBytesToEdge;
+ targetPosition = 0;
+ }
+ memset(_loopBuffer + targetPosition, 0, numBytes);
+ _jointMin[0] += numBytes;
+ _jointMin[1] += numBytes;
+ } else {
+ if (targetPosition + numBytes >= _loopBufferSize) {
+ const int32 numSamplesToEdge = (_loopBufferSize - targetPosition) / sizeof(int16) / kEOSExpansion;
+ interpolateChannel((int16 *)(_loopBuffer + targetPosition), numSamplesToEdge, 1);
+ numSamples -= numSamplesToEdge;
+ targetPosition = 0;
+ }
+ interpolateChannel((int16 *)(_loopBuffer + targetPosition), numSamples, 1);
+ _jointMin[1] += numBytes;
+ }
+ } else if (_readHeadAbs > _jointMin[0]) {
+ if (targetPosition + numBytes >= _loopBufferSize) {
+ const int32 numSamplesToEdge = (_loopBufferSize - targetPosition) / sizeof(int16) / kEOSExpansion;
+ interpolateChannel((int16 *)(_loopBuffer + targetPosition), numSamplesToEdge, 0);
+ numSamples -= numSamplesToEdge;
+ targetPosition = 2;
+ }
+ interpolateChannel((int16 *)(_loopBuffer + targetPosition), numSamples, 0);
+ _jointMin[0] += numBytes;
+ }
+}
+
+void RobotAudioStream::finish() {
+ Common::StackLock lock(_mutex);
+ _finished = true;
+}
+
+RobotAudioStream::StreamState RobotAudioStream::getStatus() const {
+ Common::StackLock lock(_mutex);
+ StreamState status;
+ status.bytesPlaying = _readHeadAbs;
+ status.rate = getRate();
+ status.bits = 8 * sizeof(int16);
+ return status;
+}
+
+int RobotAudioStream::readBuffer(Audio::st_sample_t *outBuffer, int numSamples) {
+ Common::StackLock lock(_mutex);
+
+ if (_waiting) {
+ return 0;
+ }
+
+ assert(!((_writeHeadAbs - _readHeadAbs) & 1));
+ const int maxNumSamples = (_writeHeadAbs - _readHeadAbs) / sizeof(Audio::st_sample_t);
+ numSamples = MIN(numSamples, maxNumSamples);
+
+ if (!numSamples) {
+ return 0;
+ }
+
+ interpolateMissingSamples(numSamples);
+
+ Audio::st_sample_t *inBuffer = (Audio::st_sample_t *)(_loopBuffer + _readHead);
+
+ assert(!((_loopBufferSize - _readHead) & 1));
+ const int numSamplesToEnd = (_loopBufferSize - _readHead) / sizeof(Audio::st_sample_t);
+
+ int numSamplesToRead = MIN(numSamples, numSamplesToEnd);
+ Common::copy(inBuffer, inBuffer + numSamplesToRead, outBuffer);
+
+ if (numSamplesToRead < numSamples) {
+ inBuffer = (Audio::st_sample_t *)_loopBuffer;
+ outBuffer += numSamplesToRead;
+ numSamplesToRead = numSamples - numSamplesToRead;
+ Common::copy(inBuffer, inBuffer + numSamplesToRead, outBuffer);
+ }
+
+ const int32 numBytes = numSamples * sizeof(Audio::st_sample_t);
+
+ _readHead += numBytes;
+ if (_readHead > _loopBufferSize) {
+ _readHead -= _loopBufferSize;
+ }
+ _readHeadAbs += numBytes;
+ _maxWriteAbs += numBytes;
+ assert(!(_readHead & 1));
+ assert(!(_readHeadAbs & 1));
+
+ return numSamples;
+}
+
+#pragma mark -
+#pragma mark RobotDecoder
+
+RobotDecoder::RobotDecoder(SegManager *segMan) :
+ _delayTime(this),
+ _segMan(segMan),
+ _status(kRobotStatusUninitialized),
+ _audioBuffer(nullptr),
+ _rawPalette((uint8 *)malloc(kRawPaletteSize)) {}
+
+RobotDecoder::~RobotDecoder() {
+ close();
+ free(_rawPalette);
+ free(_audioBuffer);
+}
+
+#pragma mark -
+#pragma mark RobotDecoder - Initialization
+
+void RobotDecoder::initStream(const GuiResourceId robotId) {
+ const Common::String fileName = Common::String::format("%d.rbt", robotId);
Common::SeekableReadStream *stream = SearchMan.createReadStreamForMember(fileName);
+ _fileOffset = 0;
- if (!stream) {
- warning("Unable to open robot file %s", fileName.c_str());
- return false;
+ if (stream == nullptr) {
+ error("Unable to open robot file %s", fileName.c_str());
+ }
+
+ const uint16 id = stream->readUint16LE();
+ if (id != 0x16) {
+ error("Invalid robot file %s", fileName.c_str());
+ }
+
+ // TODO: Mac version not tested, so this could be totally wrong
+ _stream = new Common::SeekableSubReadStreamEndian(stream, 0, stream->size(), g_sci->getPlatform() == Common::kPlatformMacintosh, DisposeAfterUse::YES);
+ _stream->seek(2, SEEK_SET);
+ if (_stream->readUint32BE() != MKTAG('S', 'O', 'L', 0)) {
+ error("Resource %s is not Robot type!", fileName.c_str());
+ }
+}
+
+void RobotDecoder::initPlayback() {
+ _startFrameNo = 0;
+ _startTime = -1;
+ _startingFrameNo = -1;
+ _cueForceShowFrame = -1;
+ _previousFrameNo = -1;
+ _currentFrameNo = 0;
+ _status = kRobotStatusPaused;
+}
+
+void RobotDecoder::initAudio() {
+ _syncFrame = true;
+
+ _audioRecordInterval = RobotAudioStream::kRobotSampleRate / _frameRate;
+
+ // TODO: Might actually be for all games newer than Lighthouse; check to
+ // see which games have this condition.
+ if (g_sci->getGameId() != GID_LIGHTHOUSE && !(_audioRecordInterval & 1)) {
+ ++_audioRecordInterval;
+ }
+
+ _expectedAudioBlockSize = _audioBlockSize - kAudioBlockHeaderSize;
+ _audioBuffer = (byte *)realloc(_audioBuffer, kRobotZeroCompressSize + _expectedAudioBlockSize);
+
+ if (_primerReservedSize != 0) {
+ const int32 primerHeaderPosition = _stream->pos();
+ _totalPrimerSize = _stream->readSint32();
+ const int16 compressionType = _stream->readSint16();
+ _evenPrimerSize = _stream->readSint32();
+ _oddPrimerSize = _stream->readSint32();
+ _primerPosition = _stream->pos();
+
+ if (compressionType) {
+ error("Unknown audio header compression type %d", compressionType);
+ }
+
+ if (_evenPrimerSize + _oddPrimerSize != _primerReservedSize) {
+ _stream->seek(primerHeaderPosition + _primerReservedSize, SEEK_SET);
+ }
+ } else if (_primerZeroCompressFlag) {
+ _evenPrimerSize = 19922;
+ _oddPrimerSize = 21024;
+ }
+
+ _firstAudioRecordPosition = _evenPrimerSize * 2;
+
+ const int usedEachFrame = (RobotAudioStream::kRobotSampleRate / 2) / _frameRate;
+ _maxSkippablePackets = MAX(0, _audioBlockSize / usedEachFrame - 1);
+}
+
+void RobotDecoder::initVideo(const int16 x, const int16 y, const int16 scale, const reg_t plane, const bool hasPalette, const uint16 paletteSize) {
+ _position = Common::Point(x, y);
+
+ if (scale != 128) {
+ _scaleInfo.x = scale;
+ _scaleInfo.y = scale;
+ _scaleInfo.signal = kScaleSignalDoScaling32;
+ }
+
+ _plane = g_sci->_gfxFrameout->getPlanes().findByObject(plane);
+ if (_plane == nullptr) {
+ error("Invalid plane %04x:%04x passed to RobotDecoder::open", PRINT_REG(plane));
+ }
+
+ _minFrameRate = _frameRate - kMaxFrameRateDrift;
+ _maxFrameRate = _frameRate + kMaxFrameRateDrift;
+
+ if (_xResolution == 0 || _yResolution == 0) {
+ // TODO: Default values were taken from RESOURCE.CFG hires property
+ // if it exists, so need to check games' configuration files for those
+ _xResolution = g_sci->_gfxFrameout->getCurrentBuffer().screenWidth;
+ _yResolution = g_sci->_gfxFrameout->getCurrentBuffer().screenHeight;
}
- return loadStream(stream);
+ if (hasPalette) {
+ _stream->read(_rawPalette, paletteSize);
+ } else {
+ _stream->seek(paletteSize, SEEK_CUR);
+ }
+
+ _screenItemList.reserve(kScreenItemListSize);
+ _maxCelArea.reserve(kFixedCelListSize);
+
+ // Fixed cel buffers are for version 5 and newer
+ _fixedCels.reserve(MIN(_maxCelsPerFrame, (int16)kFixedCelListSize));
+ _celDecompressionBuffer.reserve(_maxCelArea[0] + SciBitmap::getBitmapHeaderSize() + kRawPaletteSize);
+ _celDecompressionArea = _maxCelArea[0];
+}
+
+void RobotDecoder::initRecordAndCuePositions() {
+ PositionList recordSizes;
+ _videoSizes.reserve(_numFramesTotal);
+ _recordPositions.reserve(_numFramesTotal);
+ recordSizes.reserve(_numFramesTotal);
+
+ switch(_version) {
+ case 5: // 16-bit sizes and positions
+ for (int i = 0; i < _numFramesTotal; ++i) {
+ _videoSizes.push_back(_stream->readUint16());
+ }
+ for (int i = 0; i < _numFramesTotal; ++i) {
+ recordSizes.push_back(_stream->readUint16());
+ }
+ break;
+ case 6: // 32-bit sizes and positions
+ for (int i = 0; i < _numFramesTotal; ++i) {
+ _videoSizes.push_back(_stream->readSint32());
+ }
+ for (int i = 0; i < _numFramesTotal; ++i) {
+ recordSizes.push_back(_stream->readSint32());
+ }
+ break;
+ default:
+ error("Unknown Robot version %d", _version);
+ }
+
+ for (int i = 0; i < kCueListSize; ++i) {
+ _cueTimes[i] = _stream->readSint32();
+ }
+
+ for (int i = 0; i < kCueListSize; ++i) {
+ _cueValues[i] = _stream->readUint16();
+ }
+
+ Common::copy(_cueTimes, _cueTimes + kCueListSize, _masterCueTimes);
+
+ int bytesRemaining = (_stream->pos() - _fileOffset) % kRobotFrameSize;
+ if (bytesRemaining != 0) {
+ _stream->seek(kRobotFrameSize - bytesRemaining, SEEK_CUR);
+ }
+
+ int position = _stream->pos();
+ _recordPositions.push_back(position);
+ for (int i = 0; i < _numFramesTotal - 1; ++i) {
+ position += recordSizes[i];
+ _recordPositions.push_back(position);
+ }
+}
+
+#pragma mark -
+#pragma mark RobotDecoder - Playback
+
+void RobotDecoder::open(const GuiResourceId robotId, const reg_t plane, const int16 priority, const int16 x, const int16 y, const int16 scale) {
+ if (_status != kRobotStatusUninitialized) {
+ warning("Last robot was not closed");
+ close();
+ }
+
+ initStream(robotId);
+
+ _version = _stream->readUint16();
+
+ // TODO: Version 4 for PQ:SWAT demo?
+ if (_version < 5 || _version > 6) {
+ error("Unsupported version %d of Robot resource", _version);
+ }
+
+ debugC(kDebugLevelVideo, "Opening version %d robot %d", _version, robotId);
+
+ initPlayback();
+
+ _audioBlockSize = _stream->readUint16();
+ _primerZeroCompressFlag = _stream->readSint16();
+ _stream->seek(2, SEEK_CUR); // unused
+ _numFramesTotal = _stream->readUint16();
+ const uint16 paletteSize = _stream->readUint16();
+ _primerReservedSize = _stream->readUint16();
+ _xResolution = _stream->readSint16();
+ _yResolution = _stream->readSint16();
+ const bool hasPalette = (bool)_stream->readByte();
+ _hasAudio = (bool)_stream->readByte();
+ _stream->seek(2, SEEK_CUR); // unused
+ _frameRate = _normalFrameRate = _stream->readSint16();
+ _isHiRes = (bool)_stream->readSint16();
+ _maxSkippablePackets = _stream->readSint16();
+ _maxCelsPerFrame = _stream->readSint16();
+
+ // used for memory preallocation of fixed cels
+ _maxCelArea.push_back(_stream->readSint32());
+ _maxCelArea.push_back(_stream->readSint32());
+ _maxCelArea.push_back(_stream->readSint32());
+ _maxCelArea.push_back(_stream->readSint32());
+ _stream->seek(8, SEEK_CUR); // reserved
+
+ if (_hasAudio) {
+ initAudio();
+ } else {
+ _stream->seek(_primerReservedSize, SEEK_CUR);
+ }
+
+ _priority = priority;
+ initVideo(x, y, scale, plane, hasPalette, paletteSize);
+ initRecordAndCuePositions();
}
void RobotDecoder::close() {
- VideoDecoder::close();
+ if (_status == kRobotStatusUninitialized) {
+ return;
+ }
+
+ debugC(kDebugLevelVideo, "Closing robot");
- delete _fileStream;
- _fileStream = 0;
+ _status = kRobotStatusUninitialized;
+ _videoSizes.clear();
+ _recordPositions.clear();
+ _celDecompressionBuffer.clear();
+ _doVersion5Scratch.clear();
+ delete _stream;
+ _stream = nullptr;
+
+ for (CelHandleList::size_type i = 0; i < _celHandles.size(); ++i) {
+ if (_celHandles[i].status == CelHandleInfo::kFrameLifetime) {
+ _segMan->freeBitmap(_celHandles[i].bitmapId);
+ }
+ }
+ _celHandles.clear();
+
+ for (FixedCelsList::size_type i = 0; i < _fixedCels.size(); ++i) {
+ _segMan->freeBitmap(_fixedCels[i]);
+ }
+ _fixedCels.clear();
+
+ if (g_sci->_gfxFrameout->getPlanes().findByObject(_plane->_object) != nullptr) {
+ for (RobotScreenItemList::size_type i = 0; i < _screenItemList.size(); ++i) {
+ if (_screenItemList[i] != nullptr) {
+ g_sci->_gfxFrameout->deleteScreenItem(*_screenItemList[i]);
+ }
+ }
+ }
+ _screenItemList.clear();
- delete[] _frameTotalSize;
- _frameTotalSize = 0;
+ if (_hasAudio) {
+ _audioList.reset();
+ }
}
-void RobotDecoder::readNextPacket() {
- // Get our track
- RobotVideoTrack *videoTrack = (RobotVideoTrack *)getTrack(0);
- videoTrack->increaseCurFrame();
- Graphics::Surface *surface = videoTrack->getSurface();
+void RobotDecoder::pause() {
+ if (_status != kRobotStatusPlaying) {
+ return;
+ }
+
+ if (_hasAudio) {
+ _audioList.stopAudioNow();
+ }
+
+ _status = kRobotStatusPaused;
+ _frameRate = _normalFrameRate;
+}
- if (videoTrack->endOfTrack())
+void RobotDecoder::resume() {
+ if (_status != kRobotStatusPaused) {
return;
+ }
+
+ _startingFrameNo = _currentFrameNo;
+ _status = kRobotStatusPlaying;
+ if (_hasAudio) {
+ primeAudio(_currentFrameNo * 60 / _frameRate);
+ _syncFrame = true;
+ }
+
+ setRobotTime(_currentFrameNo);
+ for (int i = 0; i < kCueListSize; ++i) {
+ if (_masterCueTimes[i] != -1 && _masterCueTimes[i] < _currentFrameNo) {
+ _cueTimes[i] = -1;
+ } else {
+ _cueTimes[i] = _masterCueTimes[i];
+ }
+ }
+}
- // Read frame image header (24 bytes)
- _fileStream->skip(3);
- byte frameScale = _fileStream->readByte();
- uint16 frameWidth = _fileStream->readUint16();
- uint16 frameHeight = _fileStream->readUint16();
- _fileStream->skip(4); // unknown, almost always 0
- uint16 frameX = _fileStream->readUint16();
- uint16 frameY = _fileStream->readUint16();
-
- // TODO: In v4 robot files, frameX and frameY have a different meaning.
- // Set them both to 0 for v4 for now, so that robots in PQ:SWAT show up
- // correctly.
- if (_header.version == 4)
- frameX = frameY = 0;
-
- uint16 compressedSize = _fileStream->readUint16();
- uint16 frameFragments = _fileStream->readUint16();
- _fileStream->skip(4); // unknown
- uint32 decompressedSize = frameWidth * frameHeight * frameScale / 100;
-
- // FIXME: A frame's height + position can go off limits... why? With the
- // following, we cut the contents to fit the frame
- uint16 scaledHeight = CLIP<uint16>(decompressedSize / frameWidth, 0, surface->h - frameY);
-
- // FIXME: Same goes for the frame's width + position. In this case, we
- // modify the position to fit the contents on screen.
- if (frameWidth + frameX > surface->w)
- frameX = surface->w - frameWidth;
-
- assert(frameWidth + frameX <= surface->w && scaledHeight + frameY <= surface->h);
-
- DecompressorLZS lzs;
- byte *decompressedFrame = new byte[decompressedSize];
- byte *outPtr = decompressedFrame;
-
- if (_header.version == 4) {
- // v4 has just the one fragment, it seems, and ignores the fragment count
- Common::SeekableSubReadStream fragmentStream(_fileStream, _fileStream->pos(), _fileStream->pos() + compressedSize);
- lzs.unpack(&fragmentStream, outPtr, compressedSize, decompressedSize);
+void RobotDecoder::showFrame(const uint16 frameNo, const uint16 newX, const uint16 newY, const uint16 newPriority) {
+ debugC(kDebugLevelVideo, "Show frame %d (%d %d %d)", frameNo, newX, newY, newPriority);
+
+ if (newX != kUnspecified) {
+ _position.x = newX;
+ }
+
+ if (newY != kUnspecified) {
+ _position.y = newY;
+ }
+
+ if (newPriority != kUnspecified) {
+ _priority = newPriority;
+ }
+
+ _currentFrameNo = frameNo;
+ pause();
+
+ if (frameNo != _previousFrameNo) {
+ seekToFrame(frameNo);
+ doVersion5(false);
} else {
- for (uint16 i = 0; i < frameFragments; ++i) {
- uint32 compressedFragmentSize = _fileStream->readUint32();
- uint32 decompressedFragmentSize = _fileStream->readUint32();
- uint16 compressionType = _fileStream->readUint16();
-
- if (compressionType == 0) {
- Common::SeekableSubReadStream fragmentStream(_fileStream, _fileStream->pos(), _fileStream->pos() + compressedFragmentSize);
- lzs.unpack(&fragmentStream, outPtr, compressedFragmentSize, decompressedFragmentSize);
- } else if (compressionType == 2) { // untested
- _fileStream->read(outPtr, compressedFragmentSize);
+ for (RobotScreenItemList::size_type i = 0; i < _screenItemList.size(); ++i) {
+ if (_isHiRes) {
+ SciBitmap &bitmap = *_segMan->lookupBitmap(_celHandles[i].bitmapId);
+
+ const int16 scriptWidth = g_sci->_gfxFrameout->getCurrentBuffer().scriptWidth;
+ const int16 scriptHeight = g_sci->_gfxFrameout->getCurrentBuffer().scriptHeight;
+ const int16 screenWidth = g_sci->_gfxFrameout->getCurrentBuffer().screenWidth;
+ const int16 screenHeight = g_sci->_gfxFrameout->getCurrentBuffer().screenHeight;
+
+ if (scriptWidth == kLowResX && scriptHeight == kLowResY) {
+ const Ratio lowResToScreenX(screenWidth, kLowResX);
+ const Ratio lowResToScreenY(screenHeight, kLowResY);
+ const Ratio screenToLowResX(kLowResX, screenWidth);
+ const Ratio screenToLowResY(kLowResY, screenHeight);
+
+ const int16 scaledX = _originalScreenItemX[i] + (_position.x * lowResToScreenX).toInt();
+ const int16 scaledY1 = _originalScreenItemY[i] + (_position.y * lowResToScreenY).toInt();
+ const int16 scaledY2 = scaledY1 + bitmap.getHeight() - 1;
+
+ const int16 lowResX = (scaledX * screenToLowResX).toInt();
+ const int16 lowResY = (scaledY2 * screenToLowResY).toInt();
+
+ bitmap.setDisplace(Common::Point(
+ (scaledX - (lowResX * lowResToScreenX).toInt()) * -1,
+ (lowResY * lowResToScreenY).toInt() - scaledY1
+ ));
+
+ _screenItemX[i] = lowResX;
+ _screenItemY[i] = lowResY;
+ } else {
+ const int16 scaledX = _originalScreenItemX[i] + _position.x;
+ const int16 scaledY = _originalScreenItemY[i] + _position.y + bitmap.getHeight() - 1;
+ bitmap.setDisplace(Common::Point(0, bitmap.getHeight() - 1));
+ _screenItemX[i] = scaledX;
+ _screenItemY[i] = scaledY;
+ }
+ } else {
+ _screenItemX[i] = _originalScreenItemX[i] + _position.x;
+ _screenItemY[i] = _originalScreenItemY[i] + _position.y;
+ }
+
+ if (_screenItemList[i] == nullptr) {
+ CelInfo32 celInfo;
+ celInfo.type = kCelTypeMem;
+ celInfo.bitmap = _celHandles[i].bitmapId;
+ ScreenItem *screenItem = new ScreenItem(_plane->_object, celInfo);
+ _screenItemList[i] = screenItem;
+ screenItem->_position = Common::Point(_screenItemX[i], _screenItemY[i]);
+ if (_priority == -1) {
+ screenItem->_fixedPriority = false;
+ } else {
+ screenItem->_priority = _priority;
+ screenItem->_fixedPriority = true;
+ }
+ g_sci->_gfxFrameout->addScreenItem(*screenItem);
} else {
- error("Unknown frame compression found: %d", compressionType);
+ ScreenItem *screenItem = _screenItemList[i];
+ screenItem->_celInfo.bitmap = _celHandles[i].bitmapId;
+ screenItem->_position = Common::Point(_screenItemX[i], _screenItemY[i]);
+ if (_priority == -1) {
+ screenItem->_fixedPriority = false;
+ } else {
+ screenItem->_priority = _priority;
+ screenItem->_fixedPriority = true;
+ }
+ g_sci->_gfxFrameout->updateScreenItem(*screenItem);
+ }
+ }
+ }
+
+ _previousFrameNo = frameNo;
+}
+
+int16 RobotDecoder::getCue() const {
+ if (_status == kRobotStatusUninitialized ||
+ _status == kRobotStatusPaused ||
+ _syncFrame) {
+ return 0;
+ }
+
+ if (_status == kRobotStatusEnd) {
+ return -1;
+ }
+
+ const uint16 estimatedNextFrameNo = MIN(calculateNextFrameNo(_delayTime.predictedTicks()), _numFramesTotal);
+
+ for (int i = 0; i < kCueListSize; ++i) {
+ if (_cueTimes[i] != -1 && _cueTimes[i] <= estimatedNextFrameNo) {
+ if (_cueTimes[i] >= _previousFrameNo) {
+ _cueForceShowFrame = _cueTimes[i] + 1;
}
- outPtr += decompressedFragmentSize;
+ _cueTimes[i] = -1;
+ return _cueValues[i];
}
}
- // Copy over the decompressed frame
- byte *inFrame = decompressedFrame;
- byte *outFrame = (byte *)surface->getPixels();
+ return 0;
+}
- // Black out the surface
- memset(outFrame, 0, surface->w * surface->h);
+int16 RobotDecoder::getFrameNo() const {
+ if (_status == kRobotStatusUninitialized) {
+ return 0;
+ }
- // Move to the correct y coordinate
- outFrame += surface->w * frameY;
+ return _currentFrameNo;
+}
+
+RobotDecoder::RobotStatus RobotDecoder::getStatus() const {
+ return _status;
+}
- for (uint16 y = 0; y < scaledHeight; y++) {
- memcpy(outFrame + frameX, inFrame, frameWidth);
- inFrame += frameWidth;
- outFrame += surface->w;
+bool RobotDecoder::seekToFrame(const int frameNo) {
+ return _stream->seek(_recordPositions[frameNo], SEEK_SET);
+}
+
+void RobotDecoder::setRobotTime(const int frameNo) {
+ _startTime = getTickCount();
+ _startFrameNo = frameNo;
+}
+
+#pragma mark -
+#pragma mark RobotDecoder - Timing
+
+RobotDecoder::DelayTime::DelayTime(RobotDecoder *decoder) :
+ _decoder(decoder) {
+ for (int i = 0; i < kDelayListSize; ++i) {
+ _timestamps[i] = i;
+ _delays[i] = 0;
}
- delete[] decompressedFrame;
+ _oldestTimestamp = 0;
+ _newestTimestamp = kDelayListSize - 1;
+ _startTime = 0;
+}
- uint32 audioChunkSize = _frameTotalSize[videoTrack->getCurFrame()] - (24 + compressedSize);
+void RobotDecoder::DelayTime::startTiming() {
+ _startTime = _decoder->getTickCount();
+}
-// TODO: The audio chunk size below is usually correct, but there are some
-// exceptions (e.g. robot 4902 in Phantasmagoria, towards its end)
-#if 0
- // Read frame audio header (14 bytes)
- _fileStream->skip(2); // buffer position
- _fileStream->skip(2); // unknown (usually 1)
- _fileStream->skip(2); /*uint16 audioChunkSize = _fileStream->readUint16() + 8;*/
- _fileStream->skip(2);
-#endif
+void RobotDecoder::DelayTime::endTiming() {
+ const int timeDelta = _decoder->getTickCount() - _startTime;
+ for (uint i = 0; i < kDelayListSize; ++i) {
+ if (_timestamps[i] == _oldestTimestamp) {
+ _timestamps[i] = ++_newestTimestamp;
+ _delays[i] = timeDelta;
+ break;
+ }
+ }
+ ++_newestTimestamp;
+ _startTime = 0;
+ sortList();
+}
- // Queue the next audio frame
- // FIXME: For some reason, there are audio hiccups/gaps
- if (_header.hasSound) {
- RobotAudioTrack *audioTrack = (RobotAudioTrack *)getTrack(1);
- _fileStream->skip(8); // header
- audioChunkSize -= 8;
- audioTrack->queueBuffer(g_sci->_audio->getDecodedRobotAudioFrame(_fileStream, audioChunkSize), audioChunkSize * 2);
+bool RobotDecoder::DelayTime::timingInProgress() const {
+ return _startTime != 0;
+}
+
+int RobotDecoder::DelayTime::predictedTicks() const {
+ return _delays[kDelayListSize / 2];
+}
+
+void RobotDecoder::DelayTime::sortList() {
+ for (uint i = 0; i < kDelayListSize - 1; ++i) {
+ int smallestDelay = _delays[i];
+ uint smallestIndex = i;
+
+ for (uint j = i + 1; j < kDelayListSize - 1; ++j) {
+ if (_delays[j] < smallestDelay) {
+ smallestDelay = _delays[j];
+ smallestIndex = j;
+ }
+ }
+
+ if (smallestIndex != i) {
+ SWAP(_delays[i], _delays[smallestIndex]);
+ SWAP(_timestamps[i], _timestamps[smallestIndex]);
+ }
+ }
+}
+
+uint16 RobotDecoder::calculateNextFrameNo(const uint32 extraTicks) const {
+ return ticksToFrames(getTickCount() + extraTicks - _startTime) + _startFrameNo;
+}
+
+uint32 RobotDecoder::ticksToFrames(const uint32 ticks) const {
+ return (ticks * _frameRate) / 60;
+}
+
+uint32 RobotDecoder::getTickCount() const {
+ return g_sci->getTickCount();
+}
+
+#pragma mark -
+#pragma mark RobotDecoder - Audio
+
+RobotDecoder::AudioList::AudioList() :
+ _blocks(),
+ _blocksSize(0),
+ _oldestBlockIndex(0),
+ _newestBlockIndex(0),
+ _startOffset(0),
+ _status(kRobotAudioReady) {}
+
+void RobotDecoder::AudioList::startAudioNow() {
+ submitDriverMax();
+ g_sci->_audio32->resume(kRobotChannel);
+ _status = kRobotAudioPlaying;
+}
+
+void RobotDecoder::AudioList::stopAudio() {
+ g_sci->_audio32->finishRobotAudio();
+ freeAudioBlocks();
+ _status = kRobotAudioStopping;
+}
+
+void RobotDecoder::AudioList::stopAudioNow() {
+ if (_status == kRobotAudioPlaying || _status == kRobotAudioStopping || _status == kRobotAudioPaused) {
+ g_sci->_audio32->stopRobotAudio();
+ _status = kRobotAudioStopped;
+ }
+
+ freeAudioBlocks();
+}
+
+void RobotDecoder::AudioList::submitDriverMax() {
+ while (_blocksSize != 0) {
+ if (!_blocks[_oldestBlockIndex]->submit(_startOffset)) {
+ return;
+ }
+
+ delete _blocks[_oldestBlockIndex];
+ _blocks[_oldestBlockIndex] = nullptr;
+ ++_oldestBlockIndex;
+ if (_oldestBlockIndex == kAudioListSize) {
+ _oldestBlockIndex = 0;
+ }
+
+ --_blocksSize;
+ }
+}
+
+void RobotDecoder::AudioList::addBlock(const int position, const int size, const byte *data) {
+ assert(data != nullptr);
+ assert(size >= 0);
+ assert(position >= -1);
+
+ if (_blocksSize == kAudioListSize) {
+ delete _blocks[_oldestBlockIndex];
+ _blocks[_oldestBlockIndex] = nullptr;
+ ++_oldestBlockIndex;
+ if (_oldestBlockIndex == kAudioListSize) {
+ _oldestBlockIndex = 0;
+ }
+ --_blocksSize;
+ }
+
+ if (_blocksSize == 0) {
+ _oldestBlockIndex = _newestBlockIndex = 0;
} else {
- _fileStream->skip(audioChunkSize);
- }
-}
-
-void RobotDecoder::readHeaderChunk() {
- // Header (60 bytes)
- _fileStream->skip(6);
- _header.version = _fileStream->readUint16();
- _header.audioChunkSize = _fileStream->readUint16();
- _header.audioSilenceSize = _fileStream->readUint16();
- _fileStream->skip(2);
- _header.frameCount = _fileStream->readUint16();
- _header.paletteDataSize = _fileStream->readUint16();
- _header.unkChunkDataSize = _fileStream->readUint16();
- _fileStream->skip(5);
- _header.hasSound = _fileStream->readByte();
- _fileStream->skip(34);
-
- // Some videos (e.g. robot 1305 in Phantasmagoria and
- // robot 184 in Lighthouse) have an unknown chunk before
- // the palette chunk (probably used for sound preloading).
- // Skip it here.
- if (_header.unkChunkDataSize)
- _fileStream->skip(_header.unkChunkDataSize);
-}
-
-void RobotDecoder::readFrameSizesChunk() {
- // The robot video file contains 2 tables, with one entry for each frame:
- // - A table containing the size of the image in each video frame
- // - A table containing the total size of each video frame.
- // In v5 robots, the tables contain 16-bit integers, whereas in v6 robots,
- // they contain 32-bit integers.
-
- _frameTotalSize = new uint32[_header.frameCount];
-
- // TODO: The table reading code can probably be removed once the
- // audio chunk size is figured out (check the TODO inside processNextFrame())
-#if 0
- // We don't need any of the two tables to play the video, so we ignore
- // both of them.
- uint16 wordSize = _header.version == 6 ? 4 : 2;
- _fileStream->skip(_header.frameCount * wordSize * 2);
-#else
- switch (_header.version) {
- case 4:
- case 5: // sizes are 16-bit integers
- // Skip table with frame image sizes, as we don't need it
- _fileStream->skip(_header.frameCount * 2);
- for (int i = 0; i < _header.frameCount; ++i)
- _frameTotalSize[i] = _fileStream->readUint16();
- break;
- case 6: // sizes are 32-bit integers
- // Skip table with frame image sizes, as we don't need it
- _fileStream->skip(_header.frameCount * 4);
- for (int i = 0; i < _header.frameCount; ++i)
- _frameTotalSize[i] = _fileStream->readUint32();
- break;
- default:
- error("Can't yet handle index table for robot version %d", _header.version);
+ ++_newestBlockIndex;
+ if (_newestBlockIndex == kAudioListSize) {
+ _newestBlockIndex = 0;
+ }
}
-#endif
- // 2 more unknown tables
- _fileStream->skip(1024 + 512);
+ _blocks[_newestBlockIndex] = new AudioBlock(position, size, data);
+ ++_blocksSize;
+}
- // Pad to nearest 2 kilobytes
- uint32 curPos = _fileStream->pos();
- if (curPos & 0x7ff)
- _fileStream->seek((curPos & ~0x7ff) + 2048);
+void RobotDecoder::AudioList::reset() {
+ stopAudioNow();
+ _startOffset = 0;
+ _status = kRobotAudioReady;
}
-RobotDecoder::RobotVideoTrack::RobotVideoTrack(int frameCount) : _frameCount(frameCount) {
- _surface = new Graphics::Surface();
- _curFrame = -1;
- _dirtyPalette = false;
+void RobotDecoder::AudioList::prepareForPrimer() {
+ g_sci->_audio32->pause(kRobotChannel);
+ _status = kRobotAudioPaused;
}
-RobotDecoder::RobotVideoTrack::~RobotVideoTrack() {
- _surface->free();
- delete _surface;
+void RobotDecoder::AudioList::setAudioOffset(const int offset) {
+ _startOffset = offset;
}
-uint16 RobotDecoder::RobotVideoTrack::getWidth() const {
- return _surface->w;
+RobotDecoder::AudioList::AudioBlock::AudioBlock(const int position, const int size, const byte* const data) :
+ _position(position),
+ _size(size) {
+ _data = (byte *)malloc(size);
+ memcpy(_data, data, size);
}
-uint16 RobotDecoder::RobotVideoTrack::getHeight() const {
- return _surface->h;
+RobotDecoder::AudioList::AudioBlock::~AudioBlock() {
+ free(_data);
}
-Graphics::PixelFormat RobotDecoder::RobotVideoTrack::getPixelFormat() const {
- return _surface->format;
+bool RobotDecoder::AudioList::AudioBlock::submit(const int startOffset) {
+ assert(_data != nullptr);
+ RobotAudioStream::RobotAudioPacket packet(_data, _size, (_position - startOffset) * 2);
+ return g_sci->_audio32->playRobotAudio(packet);
}
-void RobotDecoder::RobotVideoTrack::readPaletteChunk(Common::SeekableSubReadStreamEndian *stream, uint16 chunkSize) {
- byte *paletteData = new byte[chunkSize];
- stream->read(paletteData, chunkSize);
+void RobotDecoder::AudioList::freeAudioBlocks() {
+ while (_blocksSize != 0) {
+ delete _blocks[_oldestBlockIndex];
+ _blocks[_oldestBlockIndex] = nullptr;
+ ++_oldestBlockIndex;
+ if (_oldestBlockIndex == kAudioListSize) {
+ _oldestBlockIndex = 0;
+ }
+
+ --_blocksSize;
+ }
+}
- // SCI1.1 palette
- byte palFormat = paletteData[32];
- uint16 palColorStart = paletteData[25];
- uint16 palColorCount = READ_SCI11ENDIAN_UINT16(paletteData + 29);
+bool RobotDecoder::primeAudio(const uint32 startTick) {
+ bool success = true;
+ _audioList.reset();
+
+ if (startTick == 0) {
+ _audioList.prepareForPrimer();
+ byte *evenPrimerBuff = new byte[_evenPrimerSize];
+ byte *oddPrimerBuff = new byte[_oddPrimerSize];
+
+ success = readPrimerData(evenPrimerBuff, oddPrimerBuff);
+ if (success) {
+ if (_evenPrimerSize != 0) {
+ _audioList.addBlock(0, _evenPrimerSize, evenPrimerBuff);
+ }
+ if (_oddPrimerSize != 0) {
+ _audioList.addBlock(1, _oddPrimerSize, oddPrimerBuff);
+ }
+ }
+
+ delete[] evenPrimerBuff;
+ delete[] oddPrimerBuff;
+ } else {
+ assert(_evenPrimerSize * 2 >= _audioRecordInterval || _oddPrimerSize * 2 >= _audioRecordInterval);
+
+ int audioStartFrame = 0;
+ int videoStartFrame = startTick * _frameRate / 60;
+ assert(videoStartFrame < _numFramesTotal);
+
+ int audioStartPosition = (startTick * RobotAudioStream::kRobotSampleRate) / 60;
+ if (audioStartPosition & 1) {
+ audioStartPosition--;
+ }
+ _audioList.setAudioOffset(audioStartPosition);
+ _audioList.prepareForPrimer();
+
+ if (audioStartPosition < _evenPrimerSize * 2 ||
+ audioStartPosition + 1 < _oddPrimerSize * 2) {
+
+ byte *evenPrimerBuffer = new byte[_evenPrimerSize];
+ byte *oddPrimerBuffer = new byte[_oddPrimerSize];
+ success = readPrimerData(evenPrimerBuffer, oddPrimerBuffer);
+ if (success) {
+ int halfAudioStartPosition = audioStartPosition / 2;
+ if (audioStartPosition < _evenPrimerSize * 2) {
+ _audioList.addBlock(audioStartPosition, _evenPrimerSize - halfAudioStartPosition, &evenPrimerBuffer[halfAudioStartPosition]);
+ }
+
+ if (audioStartPosition + 1 < _oddPrimerSize * 2) {
+ _audioList.addBlock(audioStartPosition + 1, _oddPrimerSize - halfAudioStartPosition, &oddPrimerBuffer[halfAudioStartPosition]);
+ }
+ }
+
+ delete[] evenPrimerBuffer;
+ delete[] oddPrimerBuffer;
+ }
- int palOffset = 37;
- memset(_palette, 0, 256 * 3);
+ if (audioStartPosition >= _firstAudioRecordPosition) {
+ int audioRecordSize = _expectedAudioBlockSize;
+ assert(audioRecordSize > 0);
+ assert(_audioRecordInterval > 0);
+ assert(_firstAudioRecordPosition >= 0);
- for (uint16 colorNo = palColorStart; colorNo < palColorStart + palColorCount; colorNo++) {
- if (palFormat == kRobotPalVariable)
- palOffset++;
- _palette[colorNo * 3 + 0] = paletteData[palOffset++];
- _palette[colorNo * 3 + 1] = paletteData[palOffset++];
- _palette[colorNo * 3 + 2] = paletteData[palOffset++];
+ audioStartFrame = (audioStartPosition - _firstAudioRecordPosition) / _audioRecordInterval;
+ assert(audioStartFrame < videoStartFrame);
+
+ if (audioStartFrame > 0) {
+ int lastAudioFrame = audioStartFrame - 1;
+ int oddRemainder = lastAudioFrame & 1;
+ int audioRecordStart = (lastAudioFrame * _audioRecordInterval) + oddRemainder + _firstAudioRecordPosition;
+ int audioRecordEnd = (audioRecordStart + ((audioRecordSize - 1) * 2)) + oddRemainder + _firstAudioRecordPosition;
+
+ if (audioStartPosition >= audioRecordStart && audioStartPosition <= audioRecordEnd) {
+ --audioStartFrame;
+ }
+ }
+
+ assert(!(audioStartPosition & 1));
+ if (audioStartFrame & 1) {
+ ++audioStartPosition;
+ }
+
+ if (!readPartialAudioRecordAndSubmit(audioStartFrame, audioStartPosition)) {
+ return false;
+ }
+
+ ++audioStartFrame;
+ assert(audioStartFrame < videoStartFrame);
+
+ int oddRemainder = audioStartFrame & 1;
+ int audioRecordStart = (audioStartFrame * _audioRecordInterval) + oddRemainder + _firstAudioRecordPosition;
+ int audioRecordEnd = (audioRecordStart + ((audioRecordSize - 1) * 2)) + oddRemainder + _firstAudioRecordPosition;
+
+ if (audioStartPosition >= audioRecordStart && audioStartPosition <= audioRecordEnd) {
+ if (!readPartialAudioRecordAndSubmit(audioStartFrame, audioStartPosition + 1)) {
+ return false;
+ }
+
+ ++audioStartFrame;
+ }
+ }
+
+ int audioPosition, audioSize;
+ for (int i = audioStartFrame; i < videoStartFrame; i++) {
+ if (!readAudioDataFromRecord(i, _audioBuffer, audioPosition, audioSize)) {
+ break;
+ }
+
+ _audioList.addBlock(audioPosition, audioSize, _audioBuffer);
+ }
}
- _dirtyPalette = true;
- delete[] paletteData;
+ return success;
}
-void RobotDecoder::RobotVideoTrack::calculateVideoDimensions(Common::SeekableSubReadStreamEndian *stream, uint32 *frameSizes) {
- // This is an O(n) operation, as each frame has a different size.
- // We need to know the actual frame size to have a constant video size.
- uint32 pos = stream->pos();
+bool RobotDecoder::readPrimerData(byte *outEvenBuffer, byte *outOddBuffer) {
+ if (_primerReservedSize != 0) {
+ if (_totalPrimerSize != 0) {
+ _stream->seek(_primerPosition, SEEK_SET);
+ if (_evenPrimerSize > 0) {
+ _stream->read(outEvenBuffer, _evenPrimerSize);
+ }
+
+ if (_oddPrimerSize > 0) {
+ _stream->read(outOddBuffer, _oddPrimerSize);
+ }
+ }
+ } else if (_primerZeroCompressFlag) {
+ memset(outEvenBuffer, 0, _evenPrimerSize);
+ memset(outOddBuffer, 0, _oddPrimerSize);
+ } else {
+ error("ReadPrimerData - Flags corrupt");
+ }
+
+ return !_stream->err();
+}
+
+bool RobotDecoder::readAudioDataFromRecord(const int frameNo, byte *outBuffer, int &outAudioPosition, int &outAudioSize) {
+ _stream->seek(_recordPositions[frameNo] + _videoSizes[frameNo], SEEK_SET);
+ _audioList.submitDriverMax();
+
+ // Compressed absolute position of the audio block in the audio stream
+ const int position = _stream->readSint32();
- uint16 width = 0, height = 0;
+ // Size of the block of audio, excluding the audio block header
+ int size = _stream->readSint32();
- for (int curFrame = 0; curFrame < _frameCount; curFrame++) {
- stream->skip(4);
- uint16 frameWidth = stream->readUint16();
- uint16 frameHeight = stream->readUint16();
- if (frameWidth > width)
- width = frameWidth;
- if (frameHeight > height)
- height = frameHeight;
- stream->skip(frameSizes[curFrame] - 8);
+ assert(size <= _expectedAudioBlockSize);
+
+ if (position == 0) {
+ return false;
}
- stream->seek(pos);
+ if (size != _expectedAudioBlockSize) {
+ memset(outBuffer, 0, kRobotZeroCompressSize);
+ _stream->read(outBuffer + kRobotZeroCompressSize, size);
+ size += kRobotZeroCompressSize;
+ } else {
+ _stream->read(outBuffer, size);
+ }
- _surface->create(width, height, Graphics::PixelFormat::createFormatCLUT8());
+ outAudioPosition = position;
+ outAudioSize = size;
+ return !_stream->err();
}
-RobotDecoder::RobotAudioTrack::RobotAudioTrack() {
- _audioStream = Audio::makeQueuingAudioStream(11025, false);
+bool RobotDecoder::readPartialAudioRecordAndSubmit(const int startFrame, const int startPosition) {
+ int audioPosition, audioSize;
+ bool success = readAudioDataFromRecord(startFrame, _audioBuffer, audioPosition, audioSize);
+ if (success) {
+ const int relativeStartOffset = (startPosition - audioPosition) / 2;
+ _audioList.addBlock(startPosition, audioSize - relativeStartOffset, _audioBuffer + relativeStartOffset);
+ }
+
+ return success;
}
-RobotDecoder::RobotAudioTrack::~RobotAudioTrack() {
- delete _audioStream;
+#pragma mark -
+#pragma mark RobotDecoder - Rendering
+
+uint16 RobotDecoder::getFrameSize(Common::Rect &outRect) const {
+ outRect.clip(0, 0);
+ for (RobotScreenItemList::size_type i = 0; i < _screenItemList.size(); ++i) {
+ ScreenItem &screenItem = *_screenItemList[i];
+ outRect.extend(screenItem.getNowSeenRect(*_plane));
+ }
+
+ return _numFramesTotal;
}
-void RobotDecoder::RobotAudioTrack::queueBuffer(byte *buffer, int size) {
- _audioStream->queueBuffer(buffer, size, DisposeAfterUse::YES, Audio::FLAG_16BITS | Audio::FLAG_LITTLE_ENDIAN);
+void RobotDecoder::doRobot() {
+ if (_status != kRobotStatusPlaying) {
+ return;
+ }
+
+ if (!_syncFrame) {
+ if (_cueForceShowFrame != -1) {
+ _currentFrameNo = _cueForceShowFrame;
+ _cueForceShowFrame = -1;
+ } else {
+ const int nextFrameNo = calculateNextFrameNo(_delayTime.predictedTicks());
+ if (nextFrameNo < _currentFrameNo) {
+ return;
+ }
+ _currentFrameNo = nextFrameNo;
+ }
+ }
+
+ if (_currentFrameNo >= _numFramesTotal) {
+ const int finalFrameNo = _numFramesTotal - 1;
+ if (_previousFrameNo == finalFrameNo) {
+ _status = kRobotStatusEnd;
+ if (_hasAudio) {
+ _audioList.stopAudio();
+ _frameRate = _normalFrameRate;
+ _hasAudio = false;
+ }
+ return;
+ } else {
+ _currentFrameNo = finalFrameNo;
+ }
+ }
+
+ if (_currentFrameNo == _previousFrameNo) {
+ _audioList.submitDriverMax();
+ return;
+ }
+
+ if (_hasAudio) {
+ for (int candidateFrameNo = _previousFrameNo + _maxSkippablePackets + 1; candidateFrameNo < _currentFrameNo; candidateFrameNo += _maxSkippablePackets + 1) {
+
+ _audioList.submitDriverMax();
+
+ int audioPosition, audioSize;
+ if (readAudioDataFromRecord(candidateFrameNo, _audioBuffer, audioPosition, audioSize)) {
+ _audioList.addBlock(audioPosition, audioSize, _audioBuffer);
+ }
+ }
+ _audioList.submitDriverMax();
+ }
+
+ _delayTime.startTiming();
+ seekToFrame(_currentFrameNo);
+ doVersion5();
+ if (_hasAudio) {
+ _audioList.submitDriverMax();
+ }
+}
+
+void RobotDecoder::frameAlmostVisible() {
+ if (_status == kRobotStatusPlaying && !_syncFrame) {
+ if (_previousFrameNo != _currentFrameNo) {
+ while (calculateNextFrameNo() < _currentFrameNo) {
+ _audioList.submitDriverMax();
+ }
+ }
+ }
+}
+
+void RobotDecoder::frameNowVisible() {
+ if (_status != kRobotStatusPlaying) {
+ return;
+ }
+
+ if (_syncFrame) {
+ _syncFrame = false;
+ if (_hasAudio) {
+ _audioList.startAudioNow();
+ _checkAudioSyncTime = _startTime + kAudioSyncCheckInterval;
+ }
+
+ setRobotTime(_currentFrameNo);
+ }
+
+ if (_delayTime.timingInProgress()) {
+ _delayTime.endTiming();
+ }
+
+ if (_hasAudio) {
+ _audioList.submitDriverMax();
+ }
+
+ if (_previousFrameNo != _currentFrameNo) {
+ _previousFrameNo = _currentFrameNo;
+ }
+
+ if (!_syncFrame && _hasAudio && getTickCount() >= _checkAudioSyncTime) {
+ RobotAudioStream::StreamState status;
+ const bool success = g_sci->_audio32->queryRobotAudio(status);
+ if (!success) {
+ return;
+ }
+
+ const int bytesPerFrame = status.rate / _normalFrameRate * (status.bits == 16 ? 2 : 1);
+ // check again in 1/3rd second
+ _checkAudioSyncTime = getTickCount() + 60 / 3;
+
+ const int currentVideoFrameNo = calculateNextFrameNo() - _startingFrameNo;
+ const int currentAudioFrameNo = status.bytesPlaying / bytesPerFrame;
+ debugC(kDebugLevelVideo, "Video frame %d %s audio frame %d", currentVideoFrameNo, currentVideoFrameNo == currentAudioFrameNo ? "=" : currentVideoFrameNo < currentAudioFrameNo ? "<" : ">", currentAudioFrameNo);
+ if (currentVideoFrameNo < _numFramesTotal &&
+ currentAudioFrameNo < _numFramesTotal) {
+
+ bool shouldResetRobotTime = false;
+
+ if (currentAudioFrameNo < currentVideoFrameNo - 1 && _frameRate != _minFrameRate) {
+ debugC(kDebugLevelVideo, "[v] Reducing frame rate");
+ _frameRate = _minFrameRate;
+ shouldResetRobotTime = true;
+ } else if (currentAudioFrameNo > currentVideoFrameNo + 1 && _frameRate != _maxFrameRate) {
+ debugC(kDebugLevelVideo, "[^] Increasing frame rate");
+ _frameRate = _maxFrameRate;
+ shouldResetRobotTime = true;
+ } else if (_frameRate != _normalFrameRate) {
+ debugC(kDebugLevelVideo, "[=] Setting to normal frame rate");
+ _frameRate = _normalFrameRate;
+ shouldResetRobotTime = true;
+ }
+
+ if (shouldResetRobotTime) {
+ if (currentAudioFrameNo < _currentFrameNo) {
+ setRobotTime(_currentFrameNo);
+ } else {
+ setRobotTime(currentAudioFrameNo);
+ }
+ }
+ }
+ }
+}
+
+void RobotDecoder::expandCel(byte* target, const byte* source, const int16 celWidth, const int16 celHeight) const {
+ assert(source != nullptr && target != nullptr);
+
+ const int sourceHeight = (celHeight * _verticalScaleFactor) / 100;
+ assert(sourceHeight > 0);
+
+ const int16 numerator = celHeight;
+ const int16 denominator = sourceHeight;
+ int remainder = 0;
+ for (int16 y = sourceHeight - 1; y >= 0; --y) {
+ remainder += numerator;
+ int16 linesToDraw = remainder / denominator;
+ remainder %= denominator;
+
+ while (linesToDraw--) {
+ memcpy(target, source, celWidth);
+ target += celWidth;
+ }
+
+ source += celWidth;
+ }
+}
+
+void RobotDecoder::setPriority(const int16 newPriority) {
+ _priority = newPriority;
+}
+
+void RobotDecoder::doVersion5(const bool shouldSubmitAudio) {
+ const RobotScreenItemList::size_type oldScreenItemCount = _screenItemList.size();
+ const int videoSize = _videoSizes[_currentFrameNo];
+ _doVersion5Scratch.resize(videoSize);
+
+ byte *videoFrameData = _doVersion5Scratch.begin();
+
+ if (!_stream->read(videoFrameData, videoSize)) {
+ error("RobotDecoder::doVersion5: Read error");
+ }
+
+ const RobotScreenItemList::size_type screenItemCount = READ_SCI11ENDIAN_UINT16(videoFrameData);
+
+ if (screenItemCount > kScreenItemListSize) {
+ return;
+ }
+
+ if (_hasAudio &&
+ (getSciVersion() < SCI_VERSION_3 || shouldSubmitAudio)) {
+ int audioPosition, audioSize;
+ if (readAudioDataFromRecord(_currentFrameNo, _audioBuffer, audioPosition, audioSize)) {
+ _audioList.addBlock(audioPosition, audioSize, _audioBuffer);
+ }
+ }
+
+ if (screenItemCount > oldScreenItemCount) {
+ _screenItemList.resize(screenItemCount);
+ _screenItemX.resize(screenItemCount);
+ _screenItemY.resize(screenItemCount);
+ _originalScreenItemX.resize(screenItemCount);
+ _originalScreenItemY.resize(screenItemCount);
+ }
+
+ createCels5(videoFrameData + 2, screenItemCount, true);
+ for (RobotScreenItemList::size_type i = 0; i < screenItemCount; ++i) {
+ Common::Point position(_screenItemX[i], _screenItemY[i]);
+
+// TODO: Version 6 robot?
+// int scaleXRemainder;
+ if (_scaleInfo.signal == kScaleSignalDoScaling32) {
+ position.x = (position.x * _scaleInfo.x) / 128;
+// TODO: Version 6 robot?
+// scaleXRemainder = (position.x * _scaleInfo.x) % 128;
+ position.y = (position.y * _scaleInfo.y) / 128;
+ }
+
+ if (_screenItemList[i] == nullptr) {
+ CelInfo32 celInfo;
+ celInfo.bitmap = _celHandles[i].bitmapId;
+ ScreenItem *screenItem = new ScreenItem(_plane->_object, celInfo, position, _scaleInfo);
+ _screenItemList[i] = screenItem;
+ // TODO: Version 6 robot?
+ // screenItem->_field_30 = scaleXRemainder;
+
+ if (_priority == -1) {
+ screenItem->_fixedPriority = false;
+ } else {
+ screenItem->_fixedPriority = true;
+ screenItem->_priority = _priority;
+ }
+ g_sci->_gfxFrameout->addScreenItem(*screenItem);
+ } else {
+ ScreenItem *screenItem = _screenItemList[i];
+ screenItem->_celInfo.bitmap = _celHandles[i].bitmapId;
+ screenItem->_position = position;
+ // TODO: Version 6 robot?
+ // screenItem->_field_30 = scaleXRemainder;
+
+ if (_priority == -1) {
+ screenItem->_fixedPriority = false;
+ } else {
+ screenItem->_fixedPriority = true;
+ screenItem->_priority = _priority;
+ }
+ g_sci->_gfxFrameout->updateScreenItem(*screenItem);
+ }
+ }
+
+ for (RobotScreenItemList::size_type i = screenItemCount; i < oldScreenItemCount; ++i) {
+ if (_screenItemList[i] != nullptr) {
+ g_sci->_gfxFrameout->deleteScreenItem(*_screenItemList[i]);
+ _screenItemList[i] = nullptr;
+ }
+ }
+}
+
+void RobotDecoder::createCels5(const byte *rawVideoData, const int16 numCels, const bool usePalette) {
+ preallocateCelMemory(rawVideoData, numCels);
+ for (int16 i = 0; i < numCels; ++i) {
+ rawVideoData += createCel5(rawVideoData, i, usePalette);
+ }
+}
+
+uint32 RobotDecoder::createCel5(const byte *rawVideoData, const int16 screenItemIndex, const bool usePalette) {
+ _verticalScaleFactor = rawVideoData[1];
+ const int16 celWidth = (int16)READ_SCI11ENDIAN_UINT16(rawVideoData + 2);
+ const int16 celHeight = (int16)READ_SCI11ENDIAN_UINT16(rawVideoData + 4);
+ const Common::Point celPosition((int16)READ_SCI11ENDIAN_UINT16(rawVideoData + 10),
+ (int16)READ_SCI11ENDIAN_UINT16(rawVideoData + 12));
+ const uint16 dataSize = READ_SCI11ENDIAN_UINT16(rawVideoData + 14);
+ const int16 numDataChunks = (int16)READ_SCI11ENDIAN_UINT16(rawVideoData + 16);
+
+ rawVideoData += kCelHeaderSize;
+
+ const int16 scriptWidth = g_sci->_gfxFrameout->getCurrentBuffer().scriptWidth;
+ const int16 scriptHeight = g_sci->_gfxFrameout->getCurrentBuffer().scriptHeight;
+ const int16 screenWidth = g_sci->_gfxFrameout->getCurrentBuffer().screenWidth;
+ const int16 screenHeight = g_sci->_gfxFrameout->getCurrentBuffer().screenHeight;
+
+ Common::Point displace;
+ if (scriptWidth == kLowResX && scriptHeight == kLowResY) {
+ const Ratio lowResToScreenX(screenWidth, kLowResX);
+ const Ratio lowResToScreenY(screenHeight, kLowResY);
+ const Ratio screenToLowResX(kLowResX, screenWidth);
+ const Ratio screenToLowResY(kLowResY, screenHeight);
+
+ const int16 scaledX = celPosition.x + (_position.x * lowResToScreenX).toInt();
+ const int16 scaledY1 = celPosition.y + (_position.y * lowResToScreenY).toInt();
+ const int16 scaledY2 = scaledY1 + celHeight - 1;
+
+ const int16 lowResX = (scaledX * screenToLowResX).toInt();
+ const int16 lowResY = (scaledY2 * screenToLowResY).toInt();
+
+ displace.x = (scaledX - (lowResX * lowResToScreenX).toInt()) * -1;
+ displace.y = (lowResY * lowResToScreenY).toInt() - scaledY1;
+ _screenItemX[screenItemIndex] = lowResX;
+ _screenItemY[screenItemIndex] = lowResY;
+
+ debugC(kDebugLevelVideo, "Low resolution position c: %d %d l: %d/%d %d/%d d: %d %d s: %d/%d %d/%d x: %d y: %d", celPosition.x, celPosition.y, lowResX, scriptWidth, lowResY, scriptHeight, displace.x, displace.y, scaledX, screenWidth, scaledY2, screenHeight, scaledX - displace.x, scaledY2 - displace.y);
+ } else {
+ const int16 highResX = celPosition.x + _position.x;
+ const int16 highResY = celPosition.y + _position.y + celHeight - 1;
+
+ displace.x = 0;
+ displace.y = celHeight - 1;
+ _screenItemX[screenItemIndex] = highResX;
+ _screenItemY[screenItemIndex] = highResY;
+
+ debugC(kDebugLevelVideo, "High resolution position c: %d %d s: %d %d d: %d %d", celPosition.x, celPosition.y, highResX, highResY, displace.x, displace.y);
+ }
+
+ _originalScreenItemX[screenItemIndex] = celPosition.x;
+ _originalScreenItemY[screenItemIndex] = celPosition.y;
+
+ assert(_celHandles[screenItemIndex].area >= celWidth * celHeight);
+
+ SciBitmap &bitmap = *_segMan->lookupBitmap(_celHandles[screenItemIndex].bitmapId);
+ assert(bitmap.getWidth() == celWidth && bitmap.getHeight() == celHeight);
+ assert(bitmap.getScaledWidth() == _xResolution && bitmap.getScaledHeight() == _yResolution);
+ assert(bitmap.getHunkPaletteOffset() == (uint32)bitmap.getWidth() * bitmap.getHeight() + SciBitmap::getBitmapHeaderSize());
+ bitmap.setDisplace(displace);
+
+ byte *targetBuffer = nullptr;
+ if (_verticalScaleFactor == 100) {
+ // direct copy to bitmap
+ targetBuffer = bitmap.getPixels();
+ } else {
+ // go through squashed cel decompressor
+ _celDecompressionBuffer.resize(_celDecompressionArea >= celWidth * (celHeight * _verticalScaleFactor / 100));
+ targetBuffer = _celDecompressionBuffer.begin();
+ }
+
+ for (int i = 0; i < numDataChunks; ++i) {
+ uint compressedSize = READ_SCI11ENDIAN_UINT32(rawVideoData);
+ uint decompressedSize = READ_SCI11ENDIAN_UINT32(rawVideoData + 4);
+ uint16 compressionType = READ_SCI11ENDIAN_UINT16(rawVideoData + 8);
+ rawVideoData += 10;
+
+ switch (compressionType) {
+ case kCompressionLZS: {
+ Common::MemoryReadStream videoDataStream(rawVideoData, compressedSize, DisposeAfterUse::NO);
+ _decompressor.unpack(&videoDataStream, targetBuffer, compressedSize, decompressedSize);
+ break;
+ }
+ case kCompressionNone:
+ Common::copy(rawVideoData, rawVideoData + decompressedSize, targetBuffer);
+ break;
+ default:
+ error("Unknown compression type %d!", compressionType);
+ }
+
+ rawVideoData += compressedSize;
+ targetBuffer += decompressedSize;
+ }
+
+ if (_verticalScaleFactor != 100) {
+ expandCel(bitmap.getPixels(), _celDecompressionBuffer.begin(), celWidth, celHeight);
+ }
+
+ if (usePalette) {
+ Common::copy(_rawPalette, _rawPalette + kRawPaletteSize, bitmap.getHunkPalette());
+ }
+
+ return kCelHeaderSize + dataSize;
}
-Audio::AudioStream *RobotDecoder::RobotAudioTrack::getAudioStream() const {
- return _audioStream;
+void RobotDecoder::preallocateCelMemory(const byte *rawVideoData, const int16 numCels) {
+ for (CelHandleList::size_type i = 0; i < _celHandles.size(); ++i) {
+ CelHandleInfo &celHandle = _celHandles[i];
+
+ if (celHandle.status == CelHandleInfo::kFrameLifetime) {
+ _segMan->freeBitmap(celHandle.bitmapId);
+ celHandle.bitmapId = NULL_REG;
+ celHandle.status = CelHandleInfo::kNoCel;
+ celHandle.area = 0;
+ }
+ }
+ _celHandles.resize(numCels);
+
+ const int numFixedCels = MIN(numCels, (int16)kFixedCelListSize);
+ for (int i = 0; i < numFixedCels; ++i) {
+ CelHandleInfo &celHandle = _celHandles[i];
+
+ // NOTE: There was a check to see if the cel handle was not allocated
+ // here, for some reason, which would mean that nothing was ever
+ // allocated from fixed cels, because the _celHandles array just got
+ // deleted and recreated...
+ if (celHandle.bitmapId == NULL_REG) {
+ break;
+ }
+
+ celHandle.bitmapId = _fixedCels[i];
+ celHandle.status = CelHandleInfo::kRobotLifetime;
+ celHandle.area = _maxCelArea[i];
+ }
+
+ uint maxFrameArea = 0;
+ for (int i = 0; i < numCels; ++i) {
+ const int16 celWidth = (int16)READ_SCI11ENDIAN_UINT16(rawVideoData + 2);
+ const int16 celHeight = (int16)READ_SCI11ENDIAN_UINT16(rawVideoData + 4);
+ const uint16 dataSize = READ_SCI11ENDIAN_UINT16(rawVideoData + 14);
+ const uint area = celWidth * celHeight;
+
+ if (area > maxFrameArea) {
+ maxFrameArea = area;
+ }
+
+ CelHandleInfo &celHandle = _celHandles[i];
+ if (celHandle.status == CelHandleInfo::kRobotLifetime) {
+ if (_maxCelArea[i] < area) {
+ _segMan->freeBitmap(celHandle.bitmapId);
+ _segMan->allocateBitmap(&celHandle.bitmapId, celWidth, celHeight, 255, 0, 0, _xResolution, _yResolution, kRawPaletteSize, false, false);
+ celHandle.area = area;
+ celHandle.status = CelHandleInfo::kFrameLifetime;
+ }
+ } else if (celHandle.status == CelHandleInfo::kNoCel) {
+ _segMan->allocateBitmap(&celHandle.bitmapId, celWidth, celHeight, 255, 0, 0, _xResolution, _yResolution, kRawPaletteSize, false, false);
+ celHandle.area = area;
+ celHandle.status = CelHandleInfo::kFrameLifetime;
+ } else {
+ error("Cel Handle has bad status");
+ }
+
+ rawVideoData += kCelHeaderSize + dataSize;
+ }
+
+ if (maxFrameArea > _celDecompressionBuffer.size()) {
+ _celDecompressionBuffer.resize(maxFrameArea);
+ }
}
} // End of namespace Sci
diff --git a/engines/sci/video/robot_decoder.h b/engines/sci/video/robot_decoder.h
index 4faea5008a..5fd6ad49c4 100644
--- a/engines/sci/video/robot_decoder.h
+++ b/engines/sci/video/robot_decoder.h
@@ -20,109 +20,1407 @@
*
*/
-#ifndef SCI_VIDEO_ROBOT_DECODER_H
-#define SCI_VIDEO_ROBOT_DECODER_H
+#ifndef SCI_SOUND_DECODERS_ROBOT_H
+#define SCI_SOUND_DECODERS_ROBOT_H
-#include "common/rational.h"
-#include "common/rect.h"
-#include "video/video_decoder.h"
+#include "audio/audiostream.h" // for AudioStream
+#include "audio/rate.h" // for st_sample_t
+#include "common/array.h" // for Array
+#include "common/mutex.h" // for StackLock, Mutex
+#include "common/rect.h" // for Point, Rect (ptr only)
+#include "common/scummsys.h" // for int16, int32, byte, uint16
+#include "sci/engine/vm_types.h" // for NULL_REG, reg_t
+#include "sci/graphics/helpers.h" // for GuiResourceId
+#include "sci/graphics/screen_item32.h" // for ScaleInfo, ScreenItem (ptr o...
-namespace Audio {
-class QueuingAudioStream;
-}
+namespace Common { class SeekableSubReadStreamEndian; }
+namespace Sci {
+class Plane;
+class SegManager;
-namespace Common {
-class SeekableSubReadStreamEndian;
-}
+// Notes on Robot v5/v6 format:
+//
+// Robot is a packetized streaming AV format that encodes multiple bitmaps +
+// positioning data, plus synchronised audio, for rendering in the SCI graphics
+// system.
+//
+// Unlike traditional AV formats, Robot videos almost always require playback
+// within the game engine because certain information (like the resolution of
+// the Robot coordinates and the background for the video) is dependent on data
+// that does not exist within the Robot file itself.
+//
+// The Robot container consists of a file header, an optional primer audio
+// section, an optional colour palette, a frame seek index, a set of cuepoints,
+// and variable-sized packets of compressed video+audio data.
+//
+// Integers in Robot files are coded using native endianness (LSB for x86
+// versions, MSB for 68k/PPC versions).
+//
+// Robot video coding is a relatively simple variable-length compression with no
+// interframe compression. Each cel in a frame is constructed from multiple
+// contiguous data blocks, each of which can be independently compressed with
+// LZS or left uncompressed. An entire cel can also be line decimated, where
+// lines are deleted from the source bitmap at compression time and are
+// reconstructed by decompression using line doubling. Each cel also includes
+// coordinates where it should be placed within the video frame, relative to the
+// top-left corner of the frame.
+//
+// Audio coding is fixed-length, and all audio blocks except for the primer
+// audio are the same size. Audio is encoded with Sierra SOL DPCM16 compression,
+// and is split into two channels ('even' and 'odd'), each at a 11025Hz sample
+// rate. The original signal is restored by interleaving samples from the two
+// channels together. Channel packets are 'even' if they have an ''absolute
+// position of audio'' that is evenly divisible by 2; otherwise, they are 'odd'.
+// Because the channels use DPCM compression, there is an 8-byte runway at the
+// start of every audio block that is never written to the output stream, which
+// is used to move the signal to the correct location by the 9th sample.
+//
+// File header (v5/v6):
+//
+// byte | description
+// 0 | signature 0x16
+// 1 | unused
+// 2-5 | signature 'SOL\0'
+// 6-7 | version (4, 5, and 6 are the only known versions)
+// 8-9 | size of audio blocks
+// 10-11 | primer is compressed flag
+// 12-13 | unused
+// 14-15 | total number of video frames
+// 16-17 | embedded palette size, in bytes
+// 18-19 | primer reserved size
+// 20-21 | coordinate X-resolution (if 0, uses game coordinates)
+// 22-23 | coordinate Y-resolution (if 0, uses game coordinates)
+// 24 | if non-zero, Robot includes a palette
+// 25 | if non-zero, Robot includes audio
+// 26-27 | unused
+// 28-29 | the frame rate, in frames per second
+// 30-31 | coordinate conversion flag; if true, screen item coordinates
+// | from the robot should be used as-is with NO conversion when
+// | explicitly displaying a specific frame
+// 32-33 | the maximum number of packets that can be skipped without causing
+// | audio drop-out
+// 34-35 | the maximum possible number of cels that will be displayed in any
+// | frame of the robot
+// 36-39 | the maximum possible size, in bytes, of the first fixed cel
+// 40-43 | the maximum possible size, in bytes, of the second fixed cel
+// 44-47 | the maximum possible size, in bytes, of the third fixed cel
+// 48-51 | the maximum possible size, in bytes, of the fourth fixed cel
+// 52-59 | unused
+//
+// If the ''file includes audio'' flag is false, seek ''primer reserved size''
+// bytes from the end of the file header to get past a padding zone.
+//
+// If the ''file includes audio'' flag is true, and the ''primer reserved size''
+// is not zero, the data immediately after the file header consists of an audio
+// primer header plus compressed audio data:
+//
+// Audio primer header:
+//
+// byte | description
+// 0-3 | the size, in bytes, of the entire primer audio section
+// 4-5 | the compression format of the primer audio (must be zero)
+// 6-9 | the size, in bytes, of the "even" primer
+// 10-13 | the size, in bytes, of the "odd" primer
+//
+// If the combined sizes of the even and odd primers do not match the ''primer
+// reserved size'', the next header block can be found ''primer reserved size''
+// bytes from the *start* of the audio primer header.
+//
+// Otherwise, if the Robot has audio, and the ''primer reserved size'' is zero,
+// and the ''primer is compressed flag'' is set, the "even" primer size is
+// 19922, the "odd" primer size is 21024, and the "even" and "odd" buffers
+// should be zero-filled.
+//
+// Any other combination of these flags is an error.
+//
+// If the Robot has a palette, the next ''palette size'' bytes should be read
+// as a SCI HunkPalette. Otherwise, seek ''palette size'' bytes from the current
+// position to get to the frame index.
+//
+// The next section of the Robot is the video frame size index. In version 5
+// robots, read ''total number of frames'' 16-bit integers to get the size of
+// the compressed video for each frame. For version 6 robots, use 32-bit
+// integers.
+//
+// The next section of the Robot is the packet size index (combined compressed
+// size of video + audio for each frame). In version 5 Robots, read ''total
+// number of frames'' 16-bit integers. In version 6 robots, use 32-bit integers.
+//
+// The next section of the Robot is the cue times index. Read 256 32-bit
+// integers, which represent the number of ticks from the start of playback that
+// the given cue point falls on.
+//
+// The next section of the Robot is the cue values index. Read 256 16-bit
+// integers, which represent the actual cue values that will be passed back to
+// the game engine when a cue is requested.
+//
+// Finally, to get to the first frame packet, seek from the current position to
+// the start of the next 2048-byte-aligned sector.
+//
+// Frame packet:
+//
+// byte | description
+// 0..n | video data (size is in the ''video frame size index'')
+// n+1.. | optional audio data (size is ''size of audio blocks'')
+//
+// Video data:
+//
+// byte | description
+// 0-2 | number of cels in the frame (max 10)
+// 3..n | cels
+//
+// Cel:
+//
+// 0-17 | cel header
+// 18..n | data chunks
+//
+// Cel header:
+//
+// byte | description
+// 0 | unused
+// 1 | vertical scale factor, in percent decimation (100 = no decimation,
+// | 50 = 50% of lines were removed)
+// 2-3 | cel width
+// 4-5 | cel height
+// 6-9 | unused
+// 10-11 | cel x-position, in Robot coordinates
+// 12-13 | cel y-position, in Robot coordinates
+// 14-15 | cel total data chunk size, in bytes
+// 16-17 | number of data chunks
+//
+// Cel data chunk:
+//
+// 0-9 | cel data chunk header
+// 10..n | cel data
+//
+// Cel data chunk header:
+//
+// byte | description
+// 0-3 | compressed size
+// 4-7 | decompressed size
+// 8-9 | compression type (0 = LZS, 2 = uncompressed)
+//
+// Random frame seeking can be done by calculating the address of the frame
+// packet by adding up the ''packet size index'' entries up to the current
+// frame. This will normally disable audio playback, as audio data in a packet
+// does not correspond to the video in the same packet.
+//
+// Audio data is placed immediately after the end of the video data in a packet,
+// and consists of an audio header plus compressed audio data:
+//
+// Audio data:
+//
+// byte | description
+// 0-7 | audio data header
+// 8-15 | DPCM runway
+// 16..n | compressed audio data
+//
+// Audio data header:
+//
+// byte | description
+// 0-3 | absolute position of audio in the audio stream
+// 4-7 | the size of the audio block, excluding the header
+//
+// When a block of audio is processed, first check to ensure that the
+// decompressed audio block's `position * 2 + length * 4` runs past the end of
+// the last packet of the same evenness/oddness. Discard the audio block
+// entirely if data has already been written past the end of this block for this
+// channel, or if the read head has already read past the end of this audio
+// block.
+//
+// If the block is not discarded, apply DPCM decompression to the entire block,
+// starting from beginning of the DPCM runway, using an initial sample value of
+// 0. Then, copy every sample from the decompressed source outside of the DPCM
+// runway into every *other* sample of the final audio buffer (1 -> 2, 2 -> 4,
+// 3 -> 6, etc.).
+//
+// Finally, for any skipped samples where the opposing (even/odd) channel did
+// not yet write, interpolate the skipped areas by adding together the
+// neighbouring samples from this audio block and dividing by two. (This allows
+// the audio quality to degrade to 11kHz in case it takes too long to decode all
+// the frames in the stream). Interpolated samples must not be written on top of
+// true data from the opposing channel. Audio from later packets must also not
+// be written on top of data in the same channel that was already written by an
+// earlier packet, in particular because the first 8 bytes of the next packet
+// are garbage data used to move the waveform to the correct position (due to
+// the use of DPCM compression).
-namespace Sci {
+#pragma mark -
+#pragma mark RobotAudioStream
+
+/**
+ * A Robot audio stream is a simple loop buffer
+ * that accepts audio blocks from the Robot engine.
+ */
+class RobotAudioStream : public Audio::AudioStream {
+public:
+ enum {
+ /**
+ * The sample rate used for all robot audio.
+ */
+ kRobotSampleRate = 22050,
+
+ /**
+ * Multiplier for the size of a packet that
+ * is being expanded by writing to every other
+ * byte of the target buffer.
+ */
+ kEOSExpansion = 2
+ };
+
+ /**
+ * Playback state information. Used for framerate
+ * calculation.
+ */
+ struct StreamState {
+ /**
+ * The current position of the read head of
+ * the audio stream.
+ */
+ int bytesPlaying;
+
+ /**
+ * The sample rate of the audio stream.
+ * Always 22050.
+ */
+ uint16 rate;
+
+ /**
+ * The bit depth of the audio stream.
+ * Always 16.
+ */
+ uint8 bits;
+ };
+
+ /**
+ * A single packet of compressed audio from a
+ * Robot data stream.
+ */
+ struct RobotAudioPacket {
+ /**
+ * Raw DPCM-compressed audio data.
+ */
+ byte *data;
+
+ /**
+ * The size of the compressed audio data,
+ * in bytes.
+ */
+ int dataSize;
+
+ /**
+ * The uncompressed, file-relative position
+ * of this audio packet.
+ */
+ int position;
+
+ RobotAudioPacket(byte *data_, const int dataSize_, const int position_) :
+ data(data_), dataSize(dataSize_), position(position_) {}
+ };
+
+ RobotAudioStream(const int32 bufferSize);
+ virtual ~RobotAudioStream();
+
+ /**
+ * Adds a new audio packet to the stream.
+ * @returns `true` if the audio packet was fully
+ * consumed, otherwise `false`.
+ */
+ bool addPacket(const RobotAudioPacket &packet);
+
+ /**
+ * Prevents any additional audio packets from
+ * being added to the audio stream.
+ */
+ void finish();
+
+ /**
+ * Returns the current status of the audio
+ * stream.
+ */
+ StreamState getStatus() const;
+
+private:
+ Common::Mutex _mutex;
+
+ /**
+ * Loop buffer for playback. Contains decompressed
+ * 16-bit PCM samples.
+ */
+ byte *_loopBuffer;
+
+ /**
+ * The size of the loop buffer, in bytes.
+ */
+ int32 _loopBufferSize;
+
+ /**
+ * The position of the read head within the loop
+ * buffer, in bytes.
+ */
+ int32 _readHead;
+
+ /**
+ * The lowest file position that can be buffered,
+ * in uncompressed bytes.
+ */
+ int32 _readHeadAbs;
+
+ /**
+ * The highest file position that can be buffered,
+ * in uncompressed bytes.
+ */
+ int32 _maxWriteAbs;
+
+ /**
+ * The highest file position, in uncompressed bytes,
+ * that has been written to the stream.
+ * Different from `_maxWriteAbs`, which is the highest
+ * uncompressed position which *can* be written right
+ * now.
+ */
+ int32 _writeHeadAbs;
+
+ /**
+ * The highest file position, in uncompressed bytes,
+ * that has been written to the even & odd sides of
+ * the stream.
+ *
+ * Index 0 corresponds to the 'even' side; index
+ * 1 correspond to the 'odd' side.
+ */
+ int32 _jointMin[2];
+
+ /**
+ * When `true`, the stream is waiting for all primer
+ * blocks to be received before allowing playback to
+ * begin.
+ */
+ bool _waiting;
+
+ /**
+ * When `true`, the stream will accept no more audio
+ * blocks.
+ */
+ bool _finished;
+
+ /**
+ * The uncompressed position of the first packet of
+ * robot data. Used to decide whether all primer
+ * blocks have been received and the stream should
+ * be started.
+ */
+ int32 _firstPacketPosition;
+
+ /**
+ * Decompression buffer, used to temporarily store
+ * an uncompressed block of audio data.
+ */
+ byte *_decompressionBuffer;
+
+ /**
+ * The size of the decompression buffer, in bytes.
+ */
+ int32 _decompressionBufferSize;
+
+ /**
+ * The position of the packet currently in the
+ * decompression buffer. Used to avoid
+ * re-decompressing audio data that has already
+ * been decompressed during a partial packet read.
+ */
+ int32 _decompressionBufferPosition;
+
+ /**
+ * Calculates the absolute ranges for new fills
+ * into the loop buffer.
+ */
+ void fillRobotBuffer(const RobotAudioPacket &packet, const int8 bufferIndex);
+
+ /**
+ * Interpolates `numSamples` samples from the read
+ * head, if no true samples were written for one
+ * (or both) of the joint channels.
+ */
+ void interpolateMissingSamples(const int32 numSamples);
+
+#pragma mark -
+#pragma mark RobotAudioStream - AudioStream implementation
+public:
+ int readBuffer(Audio::st_sample_t *outBuffer, int numSamples) override;
+ virtual bool isStereo() const override { return false; };
+ virtual int getRate() const override { return 22050; };
+ virtual bool endOfData() const override {
+ Common::StackLock lock(_mutex);
+ return _readHeadAbs >= _writeHeadAbs;
+ };
+ virtual bool endOfStream() const override {
+ Common::StackLock lock(_mutex);
+ return _finished && endOfData();
+ }
+};
+
+#pragma mark -
+#pragma mark RobotDecoder
+
+/**
+ * RobotDecoder implements the logic required
+ * for Robot animations.
+ *
+ * @note A paused or finished RobotDecoder was
+ * classified as serializable in SCI3, but the
+ * save/load code would attempt to use uninitialised
+ * values, so it seems that robots were not ever
+ * actually able to be saved.
+ */
+class RobotDecoder {
+public:
+ RobotDecoder(SegManager *segMan);
+ ~RobotDecoder();
+
+private:
+ SegManager *_segMan;
+
+#pragma mark Constants
+public:
+ /**
+ * The playback status of the robot.
+ */
+ enum RobotStatus {
+ kRobotStatusUninitialized = 0,
+ kRobotStatusPlaying = 1,
+ kRobotStatusEnd = 2,
+ kRobotStatusPaused = 3
+ };
+
+ enum {
+ // Special high value used to represent
+ // parameters that should be left unchanged
+ // when calling `showFrame`
+ kUnspecified = 50000
+ };
+
+private:
+ enum {
+ /**
+ * Maximum number of on-screen screen items.
+ */
+ kScreenItemListSize = 10,
+
+ /**
+ * Maximum number of queued audio blocks.
+ */
+ kAudioListSize = 10,
+
+ /**
+ * Maximum number of samples used for frame timing.
+ */
+ kDelayListSize = 10,
+
+ /**
+ * Maximum number of cues.
+ */
+ kCueListSize = 256,
+
+ /**
+ * Maximum number of 'fixed' cels that never
+ * change for the duration of a robot.
+ */
+ kFixedCelListSize = 4,
+
+ /**
+ * The size of a hunk palette in the Robot stream.
+ */
+ kRawPaletteSize = 1200,
+
+ /**
+ * The size of a frame of Robot data. This
+ * value was used to align the first block of
+ * data after the main Robot header to the next
+ * CD sector.
+ */
+ kRobotFrameSize = 2048,
+
+ /**
+ * The size of a block of zero-compressed
+ * audio. Used to fill audio when the size of
+ * an audio packet does not match the expected
+ * packet size.
+ */
+ kRobotZeroCompressSize = 2048,
-class RobotDecoder : public Video::VideoDecoder {
+ /**
+ * The size of the audio block header, in bytes.
+ * The audio block header consists of the
+ * compressed size of the audio in the record,
+ * plus the position of the audio in the
+ * compressed data stream.
+ */
+ kAudioBlockHeaderSize = 8,
+
+ /**
+ * The size of a Robot cel header, in bytes.
+ */
+ kCelHeaderSize = 22,
+
+ /**
+ * The maximum amount that the frame rate is
+ * allowed to drift from the nominal frame rate
+ * in order to correct for AV drift or slow
+ * playback.
+ */
+ kMaxFrameRateDrift = 1
+ };
+
+ /**
+ * The version number for the currently loaded
+ * robot.
+ *
+ * There are several known versions of robot:
+ *
+ * v2: before Nov 1994; no known examples
+ * v3: before Nov 1994; no known examples
+ * v4: Jan 1995; PQ:SWAT demo
+ * v5: Mar 1995; SCI2.1 and SCI3 games
+ * v6: SCI3 games
+ */
+ uint16 _version;
+
+#pragma mark -
+#pragma mark Initialisation
+private:
+ /**
+ * Sets up the read stream for the robot.
+ */
+ void initStream(const GuiResourceId robotId);
+
+ /**
+ * Sets up the initial values for playback control.
+ */
+ void initPlayback();
+
+ /**
+ * Sets up the initial values for audio decoding.
+ */
+ void initAudio();
+
+ /**
+ * Sets up the initial values for video rendering.
+ */
+ void initVideo(const int16 x, const int16 y, const int16 scale, const reg_t plane, const bool hasPalette, const uint16 paletteSize);
+
+ /**
+ * Sets up the robot's data record and cue positions.
+ */
+ void initRecordAndCuePositions();
+
+#pragma mark -
+#pragma mark Playback
public:
- RobotDecoder(bool isBigEndian);
- virtual ~RobotDecoder();
+ /**
+ * Opens a robot file for playback.
+ * Newly opened robots are paused by default.
+ */
+ void open(const GuiResourceId robotId, const reg_t plane, const int16 priority, const int16 x, const int16 y, const int16 scale);
- bool loadStream(Common::SeekableReadStream *stream);
- bool load(GuiResourceId id);
+ /**
+ * Closes the currently open robot file.
+ */
void close();
- void setPos(uint16 x, uint16 y) { _pos = Common::Point(x, y); }
- Common::Point getPos() const { return _pos; }
+ /**
+ * Pauses the robot. Once paused, the audio for a robot
+ * is disabled until the end of playback.
+ */
+ void pause();
+
+ /**
+ * Resumes a paused robot.
+ */
+ void resume();
+
+ /**
+ * Moves robot to the specified frame and pauses playback.
+ *
+ * @note Called DisplayFrame in SSCI.
+ */
+ void showFrame(const uint16 frameNo, const uint16 newX, const uint16 newY, const uint16 newPriority);
+
+ /**
+ * Retrieves the value associated with the
+ * current cue point.
+ */
+ int16 getCue() const;
+
+ /**
+ * Gets the currently displayed frame.
+ */
+ int16 getFrameNo() const;
+
+ /**
+ * Gets the playback status of the player.
+ */
+ RobotStatus getStatus() const;
+
+private:
+ /**
+ * The read stream containing raw robot data.
+ */
+ Common::SeekableSubReadStreamEndian *_stream;
+
+ /**
+ * The current status of the player.
+ */
+ RobotStatus _status;
+
+ typedef Common::Array<int> PositionList;
+
+ /**
+ * A map of frame numbers to byte offsets within `_stream`.
+ */
+ PositionList _recordPositions;
+
+ /**
+ * The offset of the Robot file within a
+ * resource bundle.
+ */
+ int32 _fileOffset;
+
+ /**
+ * A list of cue times that is updated to
+ * prevent earlier cue values from being
+ * given to the game more than once.
+ */
+ mutable int32 _cueTimes[kCueListSize];
+
+ /**
+ * The original list of cue times from the
+ * raw Robot data.
+ */
+ int32 _masterCueTimes[kCueListSize];
+
+ /**
+ * The list of values to provide to a game
+ * when a cue value is requested.
+ */
+ int32 _cueValues[kCueListSize];
+
+ /**
+ * The current playback frame rate.
+ */
+ int16 _frameRate;
+
+ /**
+ * The nominal playback frame rate.
+ */
+ int16 _normalFrameRate;
+
+ /**
+ * The minimal playback frame rate. Used to
+ * correct for AV sync drift when the video
+ * is more than one frame ahead of the audio.
+ */
+ int16 _minFrameRate;
+
+ /**
+ * The maximum playback frame rate. Used to
+ * correct for AV sync drift when the video
+ * is more than one frame behind the audio.
+ */
+ int16 _maxFrameRate;
+
+ /**
+ * The maximum number of record blocks that
+ * can be skipped without causing audio to
+ * drop out.
+ */
+ int16 _maxSkippablePackets;
+
+ /**
+ * The currently displayed frame number.
+ */
+ int _currentFrameNo;
+
+ /**
+ * The last displayed frame number.
+ */
+ int _previousFrameNo;
+
+ /**
+ * The time, in ticks, when the robot was
+ * last started or resumed.
+ */
+ int32 _startTime;
+
+ /**
+ * The first frame displayed when the
+ * robot was resumed.
+ */
+ int32 _startFrameNo;
+
+ /**
+ * The last frame displayed when the robot
+ * was resumed.
+ */
+ int32 _startingFrameNo;
+
+ /**
+ * Seeks the raw data stream to the record for
+ * the given frame number.
+ */
+ bool seekToFrame(const int frameNo);
-protected:
- void readNextPacket();
+ /**
+ * Sets the start time and frame of the robot
+ * when the robot is started or resumed.
+ */
+ void setRobotTime(const int frameNo);
+#pragma mark -
+#pragma mark Timing
private:
- class RobotVideoTrack : public FixedRateVideoTrack {
+ /**
+ * This class tracks the amount of time it takes for
+ * a frame of robot animation to be rendered. This
+ * information is used by the player to speculatively
+ * skip rendering of future frames to keep the
+ * animation in sync with the robot audio.
+ */
+ class DelayTime {
public:
- RobotVideoTrack(int frameCount);
- ~RobotVideoTrack();
-
- uint16 getWidth() const;
- uint16 getHeight() const;
- Graphics::PixelFormat getPixelFormat() const;
- int getCurFrame() const { return _curFrame; }
- int getFrameCount() const { return _frameCount; }
- const Graphics::Surface *decodeNextFrame() { return _surface; }
- const byte *getPalette() const { _dirtyPalette = false; return _palette; }
- bool hasDirtyPalette() const { return _dirtyPalette; }
-
- void readPaletteChunk(Common::SeekableSubReadStreamEndian *stream, uint16 chunkSize);
- void calculateVideoDimensions(Common::SeekableSubReadStreamEndian *stream, uint32 *frameSizes);
- Graphics::Surface *getSurface() { return _surface; }
- void increaseCurFrame() { _curFrame++; }
-
- protected:
- Common::Rational getFrameRate() const { return Common::Rational(60, 10); }
+ DelayTime(RobotDecoder *decoder);
+
+ /**
+ * Starts performance timing.
+ */
+ void startTiming();
+
+ /**
+ * Ends performance timing.
+ */
+ void endTiming();
+
+ /**
+ * Returns whether or not timing is currently in
+ * progress.
+ */
+ bool timingInProgress() const;
+
+ /**
+ * Returns the median time, in ticks, of the
+ * currently stored timing samples.
+ */
+ int predictedTicks() const;
private:
- int _frameCount;
- int _curFrame;
- byte _palette[256 * 3];
- mutable bool _dirtyPalette;
- Graphics::Surface *_surface;
+ RobotDecoder *_decoder;
+
+ /**
+ * The start time, in ticks, of the current timing
+ * loop. If no loop is in progress, the value is 0.
+ *
+ * @note This is slightly different than SSCI where
+ * the not-timing value was -1.
+ */
+ uint32 _startTime;
+
+ /**
+ * A sorted list containing the timing data for
+ * the last `kDelayListSize` frames, in ticks.
+ */
+ int _delays[kDelayListSize];
+
+ /**
+ * A list of monotonically increasing identifiers
+ * used to identify and replace the oldest sample
+ * in the `_delays` array when finishing the
+ * next timing operation.
+ */
+ uint _timestamps[kDelayListSize];
+
+ /**
+ * The identifier of the oldest timing.
+ */
+ uint _oldestTimestamp;
+
+ /**
+ * The identifier of the newest timing.
+ */
+ uint _newestTimestamp;
+
+ /**
+ * Sorts the list of timings.
+ */
+ void sortList();
};
- class RobotAudioTrack : public AudioTrack {
+ /**
+ * Calculates the next frame number that needs
+ * to be rendered, using the timing data
+ * collected by DelayTime.
+ */
+ uint16 calculateNextFrameNo(const uint32 extraTicks = 0) const;
+
+ /**
+ * Calculates and returns the number of frames
+ * that should be rendered in `ticks` time,
+ * according to the current target frame rate
+ * of the robot.
+ */
+ uint32 ticksToFrames(const uint32 ticks) const;
+
+ /**
+ * Gets the current game time, in ticks.
+ */
+ uint32 getTickCount() const;
+
+ /**
+ * The performance timer for the robot.
+ */
+ DelayTime _delayTime;
+
+#pragma mark -
+#pragma mark Audio
+private:
+ enum {
+ /**
+ * The number of ticks that should elapse
+ * between each AV sync check.
+ */
+ kAudioSyncCheckInterval = 5 * 60 /* 5 seconds */
+ };
+
+ /**
+ * The status of the audio track of a Robot
+ * animation.
+ */
+ enum RobotAudioStatus {
+ kRobotAudioReady = 1,
+ kRobotAudioStopped = 2,
+ kRobotAudioPlaying = 3,
+ kRobotAudioPaused = 4,
+ kRobotAudioStopping = 5
+ };
+
+#pragma mark -
+#pragma mark Audio - AudioList
+private:
+ /**
+ * This class manages packetized audio playback
+ * for robots.
+ */
+ class AudioList {
public:
- RobotAudioTrack();
- ~RobotAudioTrack();
+ AudioList();
+
+ /**
+ * Starts playback of robot audio.
+ */
+ void startAudioNow();
+
+ /**
+ * Stops playback of robot audio, allowing
+ * any queued audio to finish playing back.
+ */
+ void stopAudio();
+
+ /**
+ * Stops playback of robot audio immediately.
+ */
+ void stopAudioNow();
+
+ /**
+ * Submits as many blocks of audio as possible
+ * to the audio engine.
+ */
+ void submitDriverMax();
+
+ /**
+ * Adds a new AudioBlock to the queue.
+ *
+ * @param position The absolute position of the
+ * audio for the block, in compressed bytes.
+ * @param size The size of the buffer.
+ * @param buffer A pointer to compressed audio
+ * data that will be copied into the new
+ * AudioBlock.
+ */
+ void addBlock(const int position, const int size, const byte *buffer);
- Audio::Mixer::SoundType getSoundType() const { return Audio::Mixer::kMusicSoundType; }
+ /**
+ * Immediately stops any active playback and
+ * purges all audio data in the audio list.
+ */
+ void reset();
- void queueBuffer(byte *buffer, int size);
+ /**
+ * Pauses the robot audio channel in
+ * preparation for the first block of audio
+ * data to be read.
+ */
+ void prepareForPrimer();
- protected:
- Audio::AudioStream *getAudioStream() const;
+ /**
+ * Sets the audio offset which is used to
+ * offset the position of audio packets
+ * sent to the audio stream.
+ */
+ void setAudioOffset(const int offset);
+
+#pragma mark -
+#pragma mark Audio - AudioList - AudioBlock
private:
- Audio::QueuingAudioStream *_audioStream;
+ /**
+ * AudioBlock represents a block of audio
+ * from the Robot's audio track.
+ */
+ class AudioBlock {
+ public:
+ AudioBlock(const int position, const int size, const byte *const data);
+ ~AudioBlock();
+
+ /**
+ * Submits the block of audio to the
+ * audio manager.
+ * @returns true if the block was fully
+ * read, or false if the block was not
+ * read or only partially read.
+ */
+ bool submit(const int startOffset);
+
+ private:
+ /**
+ * The absolute position, in compressed
+ * bytes, of this audio block's audio
+ * data in the audio stream.
+ */
+ int _position;
+
+ /**
+ * The compressed size, in bytes, of
+ * this audio block's audio data.
+ */
+ int _size;
+
+ /**
+ * A buffer containing raw
+ * SOL-compressed audio data.
+ */
+ byte *_data;
+ };
+
+ /**
+ * The list of compressed audio blocks
+ * submitted for playback.
+ */
+ AudioBlock *_blocks[kAudioListSize];
+
+ /**
+ * The number of blocks in `_blocks` that are
+ * ready to be submitted.
+ */
+ uint8 _blocksSize;
+
+ /**
+ * The index of the oldest submitted audio block.
+ */
+ uint8 _oldestBlockIndex;
+
+ /**
+ * The index of the newest submitted audio block.
+ */
+ uint8 _newestBlockIndex;
+
+ /**
+ * The offset used when sending packets to the
+ * audio stream.
+ */
+ int _startOffset;
+
+ /**
+ * The status of robot audio playback.
+ */
+ RobotAudioStatus _status;
+
+ /**
+ * Frees all audio blocks in the `_blocks` list.
+ */
+ void freeAudioBlocks();
};
- struct RobotHeader {
- // 6 bytes, identifier bytes
- uint16 version;
- uint16 audioChunkSize;
- uint16 audioSilenceSize;
- // 2 bytes, unknown
- uint16 frameCount;
- uint16 paletteDataSize;
- uint16 unkChunkDataSize;
- // 5 bytes, unknown
- byte hasSound;
- // 34 bytes, unknown
- } _header;
-
- void readHeaderChunk();
- void readFrameSizesChunk();
-
- Common::Point _pos;
- bool _isBigEndian;
- uint32 *_frameTotalSize;
-
- Common::SeekableSubReadStreamEndian *_fileStream;
-};
+ /**
+ * Whether or not this robot animation has
+ * an audio track.
+ */
+ bool _hasAudio;
+
+ /**
+ * The audio list for the current robot.
+ */
+ AudioList _audioList;
+
+ /**
+ * The size, in bytes, of a block of audio data,
+ * excluding the audio block header.
+ */
+ uint16 _audioBlockSize;
+
+ /**
+ * The expected size of a block of audio data,
+ * in bytes, excluding the audio block header.
+ */
+ int16 _expectedAudioBlockSize;
+
+ /**
+ * The number of compressed audio bytes that are
+ * needed per frame to fill the audio buffer
+ * without causing audio to drop out.
+ */
+ int16 _audioRecordInterval;
+
+ /**
+ * If true, primer audio buffers should be filled
+ * with silence instead of trying to read buffers
+ * from the Robot data.
+ */
+ uint16 _primerZeroCompressFlag;
+
+ /**
+ * The size, in bytes, of the primer audio in the
+ * Robot, including any extra alignment padding.
+ */
+ uint16 _primerReservedSize;
+
+ /**
+ * The combined size, in bytes, of the even and odd
+ * primer channels.
+ */
+ int32 _totalPrimerSize;
+
+ /**
+ * The absolute offset of the primer audio data in
+ * the robot data stream.
+ */
+ int32 _primerPosition;
+
+ /**
+ * The size, in bytes, of the even primer.
+ */
+ int32 _evenPrimerSize;
+
+ /**
+ * The size, in bytes, of the odd primer.
+ */
+ int32 _oddPrimerSize;
+
+ /**
+ * The absolute position in the audio stream of
+ * the first audio packet.
+ */
+ int32 _firstAudioRecordPosition;
-} // End of namespace Sci
+ /**
+ * A temporary buffer used to hold one frame of
+ * raw (DPCM-compressed) audio when reading audio
+ * records from the robot stream.
+ */
+ byte *_audioBuffer;
+ /**
+ * The next tick count when AV sync should be
+ * checked and framerate adjustments made, if
+ * necessary.
+ */
+ uint32 _checkAudioSyncTime;
+
+ /**
+ * Primes the audio buffer with the first frame
+ * of audio data.
+ *
+ * @note `primeAudio` was `InitAudio` in SSCI
+ */
+ bool primeAudio(const uint32 startTick);
+
+ /**
+ * Reads primer data from the robot data stream
+ * and puts it into the given buffers.
+ */
+ bool readPrimerData(byte *outEvenBuffer, byte *outOddBuffer);
+
+ /**
+ * Reads audio data for the given frame number
+ * into the given buffer.
+ *
+ * @param outAudioPosition The position of the
+ * audio, in compressed bytes, in the data stream.
+ * @param outAudioSize The size of the audio data,
+ * in compressed bytes.
+ */
+ bool readAudioDataFromRecord(const int frameNo, byte *outBuffer, int &outAudioPosition, int &outAudioSize);
+
+ /**
+ * Submits part of the audio packet of the given
+ * frame to the audio list, starting `startPosition`
+ * bytes into the audio.
+ */
+ bool readPartialAudioRecordAndSubmit(const int startFrame, const int startPosition);
+
+#pragma mark -
+#pragma mark Rendering
+public:
+ /**
+ * Puts the current dimensions of the robot, in game script
+ * coordinates, into the given rect, and returns the total
+ * number of frames in the robot animation.
+ */
+ uint16 getFrameSize(Common::Rect &outRect) const;
+
+ /**
+ * Pumps the robot player for the next frame of video.
+ * This is the main rendering function.
+ */
+ void doRobot();
+
+ /**
+ * Submits any outstanding audio blocks that should
+ * be added to the queue before the robot frame
+ * becomes visible.
+ */
+ void frameAlmostVisible();
+
+ /**
+ * Evaluates frame drift and makes modifications to
+ * the player in order to ensure that future frames
+ * will arrive on time.
+ */
+ void frameNowVisible();
+
+ /**
+ * Scales a vertically compressed cel to its original
+ * uncompressed dimensions.
+ */
+ void expandCel(byte *target, const byte* source, const int16 celWidth, const int16 celHeight) const;
+
+ /**
+ * Sets the visual priority of the robot.
+ * @see Plane::_priority
+ */
+ void setPriority(const int16 newPriority);
+
+private:
+ enum CompressionType {
+ kCompressionLZS = 0,
+ kCompressionNone = 2
+ };
+
+ /**
+ * Describes the state of a Robot video cel.
+ */
+ struct CelHandleInfo {
+ /**
+ * The persistence level of Robot cels.
+ */
+ enum CelHandleLifetime {
+ kNoCel = 0,
+ kFrameLifetime = 1,
+ kRobotLifetime = 2
+ };
+
+ /**
+ * A reg_t pointer to an in-memory
+ * bitmap containing the cel.
+ */
+ reg_t bitmapId;
+
+ /**
+ * The lifetime of the cel, either just
+ * for this frame or for the entire
+ * duration of the robot playback.
+ */
+ CelHandleLifetime status;
+
+ /**
+ * The size, in pixels, of the decompressed
+ * cel.
+ */
+ int area;
+
+ CelHandleInfo() : bitmapId(NULL_REG), status(kNoCel), area(0) {}
+ };
+
+ typedef Common::Array<ScreenItem *> RobotScreenItemList;
+ typedef Common::Array<CelHandleInfo> CelHandleList;
+ typedef Common::Array<int> VideoSizeList;
+ typedef Common::Array<uint> MaxCelAreaList;
+ typedef Common::Array<reg_t> FixedCelsList;
+ typedef Common::Array<Common::Point> CelPositionsList;
+ typedef Common::Array<byte> ScratchMemory;
+
+ /**
+ * Renders a version 5/6 robot frame.
+ */
+ void doVersion5(const bool shouldSubmitAudio = true);
+
+ /**
+ * Creates screen items for a version 5/6 robot.
+ */
+ void createCels5(const byte *rawVideoData, const int16 numCels, const bool usePalette);
+
+ /**
+ * Creates a single screen item for a cel in a
+ * version 5/6 robot.
+ *
+ * Returns the size, in bytes, of the raw cel data.
+ */
+ uint32 createCel5(const byte *rawVideoData, const int16 screenItemIndex, const bool usePalette);
+
+ /**
+ * Preallocates memory for the next `numCels` cels
+ * in the robot data stream.
+ */
+ void preallocateCelMemory(const byte *rawVideoData, const int16 numCels);
+
+ /**
+ * The decompressor for LZS-compressed cels.
+ */
+ DecompressorLZS _decompressor;
+
+ /**
+ * The origin of the robot animation, in screen
+ * coordinates.
+ */
+ Common::Point _position;
+
+ /**
+ * Global scaling applied to the robot.
+ */
+ ScaleInfo _scaleInfo;
+
+ /**
+ * The native resolution of the robot.
+ */
+ int16 _xResolution, _yResolution;
+
+ /**
+ * Whether or not the coordinates read from robot
+ * data are high resolution.
+ */
+ bool _isHiRes;
+
+ /**
+ * The maximum number of cels that will be rendered
+ * on any given frame in this robot. Used for
+ * preallocation of cel memory.
+ */
+ int16 _maxCelsPerFrame;
+
+ /**
+ * The maximum areas, in pixels, for each of
+ * the fixed cels in the robot. Used for
+ * preallocation of cel memory.
+ */
+ MaxCelAreaList _maxCelArea;
+
+ /**
+ * The hunk palette to use when rendering the
+ * current frame, if the `usePalette` flag was set
+ * in the robot header.
+ */
+ uint8 *_rawPalette;
+
+ /**
+ * A list of the raw video data sizes, in bytes,
+ * for each frame of the robot.
+ */
+ VideoSizeList _videoSizes;
+
+ /**
+ * A list of cels that will be present for the
+ * entire duration of the robot animation.
+ */
+ FixedCelsList _fixedCels;
+
+ /**
+ * A list of handles for each cel in the current
+ * frame.
+ */
+ CelHandleList _celHandles;
+
+ /**
+ * Scratch memory used to temporarily store
+ * decompressed cel data for vertically squashed
+ * cels.
+ */
+ ScratchMemory _celDecompressionBuffer;
+
+ /**
+ * The size, in bytes, of the squashed cel
+ * decompression buffer.
+ */
+ int _celDecompressionArea;
+
+ /**
+ * If true, the robot just started playing and
+ * is awaiting output for the first frame.
+ */
+ bool _syncFrame;
+
+ /**
+ * Scratch memory used to store the compressed robot
+ * video data for the current frame.
+ */
+ ScratchMemory _doVersion5Scratch;
+
+ /**
+ * When set to a non-negative value, forces the next
+ * call to doRobot to render the given frame number
+ * instead of whatever frame would have normally been
+ * rendered.
+ */
+ mutable int _cueForceShowFrame;
+
+ /**
+ * The plane where the robot animation will be drawn.
+ */
+ Plane *_plane;
+
+ /**
+ * A list of pointers to ScreenItems used by the robot.
+ */
+ RobotScreenItemList _screenItemList;
+
+ /**
+ * The positions of the various screen items in this
+ * robot, in screen coordinates.
+ */
+ Common::Array<int16> _screenItemX, _screenItemY;
+
+ /**
+ * The raw position values from the cel header for
+ * each screen item currently on-screen.
+ */
+ Common::Array<int16> _originalScreenItemX, _originalScreenItemY;
+
+ /**
+ * The duration of the current robot, in frames.
+ */
+ uint16 _numFramesTotal;
+
+ /**
+ * The screen priority of the video.
+ * @see ScreenItem::_priority
+ */
+ int16 _priority;
+
+ /**
+ * The amount of visual vertical compression applied
+ * to the current cel. A value of 100 means no
+ * compression; a value above 100 indicates how much
+ * the cel needs to be scaled along the y-axis to
+ * return to its original dimensions.
+ */
+ uint8 _verticalScaleFactor;
+};
+} // end of namespace Sci
#endif