aboutsummaryrefslogtreecommitdiff
path: root/video
diff options
context:
space:
mode:
Diffstat (limited to 'video')
-rw-r--r--video/avi_decoder.cpp2
-rw-r--r--video/avi_decoder.h2
-rw-r--r--video/bink_decoder.cpp26
-rw-r--r--video/bink_decoder.h2
-rw-r--r--video/codecs/svq1.cpp43
-rw-r--r--video/coktel_decoder.cpp2
-rw-r--r--video/psx_decoder.cpp6
-rw-r--r--video/psx_decoder.h2
-rw-r--r--video/qt_decoder.cpp2
-rw-r--r--video/qt_decoder.h1
-rw-r--r--video/smk_decoder.cpp2
-rw-r--r--video/theora_decoder.cpp4
-rw-r--r--video/video_decoder.cpp63
-rw-r--r--video/video_decoder.h20
14 files changed, 106 insertions, 71 deletions
diff --git a/video/avi_decoder.cpp b/video/avi_decoder.cpp
index 0d51f5b130..6062049b72 100644
--- a/video/avi_decoder.cpp
+++ b/video/avi_decoder.cpp
@@ -379,7 +379,7 @@ void AVIDecoder::readNextPacket() {
}
}
-AVIDecoder::AVIVideoTrack::AVIVideoTrack(int frameCount, const AVIStreamHeader &streamHeader, const BitmapInfoHeader &bitmapInfoHeader)
+AVIDecoder::AVIVideoTrack::AVIVideoTrack(int frameCount, const AVIStreamHeader &streamHeader, const BitmapInfoHeader &bitmapInfoHeader)
: _frameCount(frameCount), _vidsHeader(streamHeader), _bmInfo(bitmapInfoHeader) {
memset(_palette, 0, sizeof(_palette));
_videoCodec = createCodec();
diff --git a/video/avi_decoder.h b/video/avi_decoder.h
index a3a262db36..3bdc0561d1 100644
--- a/video/avi_decoder.h
+++ b/video/avi_decoder.h
@@ -214,7 +214,7 @@ private:
};
OldIndex _ixInfo;
- AVIHeader _header;
+ AVIHeader _header;
Common::SeekableReadStream *_fileStream;
bool _decodedHeader;
diff --git a/video/bink_decoder.cpp b/video/bink_decoder.cpp
index 620316806f..1ece22c963 100644
--- a/video/bink_decoder.cpp
+++ b/video/bink_decoder.cpp
@@ -236,7 +236,7 @@ BinkDecoder::AudioInfo::~AudioInfo() {
BinkDecoder::BinkVideoTrack::BinkVideoTrack(uint32 width, uint32 height, const Graphics::PixelFormat &format, uint32 frameCount, const Common::Rational &frameRate, bool swapPlanes, bool hasAlpha, uint32 id) :
_frameCount(frameCount), _frameRate(frameRate), _swapPlanes(swapPlanes), _hasAlpha(hasAlpha), _id(id) {
- _curFrame = -1;
+ _curFrame = -1;
for (int i = 0; i < 16; i++)
_huffman[i] = 0;
@@ -260,7 +260,23 @@ BinkDecoder::BinkVideoTrack::BinkVideoTrack(uint32 width, uint32 height, const G
_colHighHuffman[i].symbols[j] = j;
}
- _surface.create(width, height, format);
+ // Make the surface even-sized:
+ _surfaceHeight = height;
+ _surfaceWidth = width;
+
+ if (height & 1) {
+ _surfaceHeight++;
+ }
+ if (width & 1) {
+ _surfaceWidth++;
+ }
+
+ _surface.create(_surfaceWidth, _surfaceHeight, format);
+ // Since we over-allocate to make surfaces even-sized
+ // we need to set the actual VIDEO size back into the
+ // surface.
+ _surface.h = height;
+ _surface.w = width;
// Give the planes a bit extra space
width = _surface.w + 32;
@@ -329,9 +345,11 @@ void BinkDecoder::BinkVideoTrack::decodePacket(VideoFrame &frame) {
// Convert the YUV data we have to our format
// We're ignoring alpha for now
+ // The width used here is the surface-width, and not the video-width
+ // to allow for odd-sized videos.
assert(_curPlanes[0] && _curPlanes[1] && _curPlanes[2]);
- Graphics::convertYUV420ToRGB(&_surface, _curPlanes[0], _curPlanes[1], _curPlanes[2],
- _surface.w, _surface.h, _surface.w, _surface.w >> 1);
+ YUVToRGBMan.convert420(&_surface, Graphics::YUVToRGBManager::kScaleITU, _curPlanes[0], _curPlanes[1], _curPlanes[2],
+ _surfaceWidth, _surfaceHeight, _surfaceWidth, _surfaceWidth >> 1);
// And swap the planes with the reference planes
for (int i = 0; i < 4; i++)
diff --git a/video/bink_decoder.h b/video/bink_decoder.h
index 150e91aab7..27d3aa3691 100644
--- a/video/bink_decoder.h
+++ b/video/bink_decoder.h
@@ -231,6 +231,8 @@ private:
int _frameCount;
Graphics::Surface _surface;
+ int _surfaceWidth; ///< The actual surface width
+ int _surfaceHeight; ///< The actual surface height
uint32 _id; ///< The BIK FourCC.
diff --git a/video/codecs/svq1.cpp b/video/codecs/svq1.cpp
index 14452ab15b..56b376f590 100644
--- a/video/codecs/svq1.cpp
+++ b/video/codecs/svq1.cpp
@@ -180,28 +180,29 @@ const Graphics::Surface *SVQ1Decoder::decodeImage(Common::SeekableReadStream *st
frameData.skip(8);
}
- int yWidth = ALIGN(_frameWidth, 16);
- int yHeight = ALIGN(_frameHeight, 16);
- int uvWidth = ALIGN(yWidth / 4, 16);
- int uvHeight = ALIGN(yHeight / 4, 16);
+ uint yWidth = ALIGN(_frameWidth, 16);
+ uint yHeight = ALIGN(_frameHeight, 16);
+ uint uvWidth = ALIGN(yWidth / 4, 16);
+ uint uvHeight = ALIGN(yHeight / 4, 16);
+ uint uvPitch = uvWidth + 4; // we need at least one extra column and pitch must be divisible by 4
byte *current[3];
// Decode Y, U and V component planes
for (int i = 0; i < 3; i++) {
- int width, height;
+ uint width, height, pitch;
if (i == 0) {
width = yWidth;
height = yHeight;
+ pitch = width;
current[i] = new byte[width * height];
} else {
width = uvWidth;
height = uvHeight;
+ pitch = uvPitch;
- // Add an extra row's worth of data to not go out-of-bounds in the
- // color conversion. Then fill that with "empty" data.
- current[i] = new byte[width * (height + 1)];
- memset(current[i] + width * height, 0x80, width);
+ // Add an extra row here. See below for more information.
+ current[i] = new byte[pitch * (height + 1)];
}
if (frameType == 0) { // I Frame
@@ -209,12 +210,12 @@ const Graphics::Surface *SVQ1Decoder::decodeImage(Common::SeekableReadStream *st
byte *currentP = current[i];
for (uint16 y = 0; y < height; y += 16) {
for (uint16 x = 0; x < width; x += 16) {
- if (!svq1DecodeBlockIntra(&frameData, &currentP[x], width)) {
+ if (!svq1DecodeBlockIntra(&frameData, &currentP[x], pitch)) {
warning("svq1DecodeBlockIntra decode failure");
return _surface;
}
}
- currentP += 16 * width;
+ currentP += 16 * pitch;
}
} else {
// Delta frame (P or B)
@@ -233,7 +234,7 @@ const Graphics::Surface *SVQ1Decoder::decodeImage(Common::SeekableReadStream *st
byte *currentP = current[i];
for (uint16 y = 0; y < height; y += 16) {
for (uint16 x = 0; x < width; x += 16) {
- if (!svq1DecodeDeltaBlock(&frameData, &currentP[x], previous, width, pmv, x, y)) {
+ if (!svq1DecodeDeltaBlock(&frameData, &currentP[x], previous, pitch, pmv, x, y)) {
warning("svq1DecodeDeltaBlock decode failure");
return _surface;
}
@@ -241,7 +242,7 @@ const Graphics::Surface *SVQ1Decoder::decodeImage(Common::SeekableReadStream *st
pmv[0].x = pmv[0].y = 0;
- currentP += 16 * width;
+ currentP += 16 * pitch;
}
delete[] pmv;
@@ -256,7 +257,21 @@ const Graphics::Surface *SVQ1Decoder::decodeImage(Common::SeekableReadStream *st
_surface->h = _height;
}
- convertYUV410ToRGB(_surface, current[0], current[1], current[2], yWidth, yHeight, yWidth, uvWidth);
+ // We need to massage the chrominance data a bit to be able to be used by the converter
+ // Since the thing peeks at values one column and one row beyond the data, we need to fill it in
+
+ // First, fill in the column-after-last with the last column's value
+ for (uint i = 0; i < uvHeight; i++) {
+ current[1][i * uvPitch + uvWidth] = current[1][i * uvPitch + uvWidth - 1];
+ current[2][i * uvPitch + uvWidth] = current[2][i * uvPitch + uvWidth - 1];
+ }
+
+ // Then, copy the last row to the one after the last row
+ memcpy(current[1] + uvHeight * uvPitch, current[1] + (uvHeight - 1) * uvPitch, uvWidth + 1);
+ memcpy(current[2] + uvHeight * uvPitch, current[2] + (uvHeight - 1) * uvPitch, uvWidth + 1);
+
+ // Finally, actually do the conversion ;)
+ YUVToRGBMan.convert410(_surface, Graphics::YUVToRGBManager::kScaleFull, current[0], current[1], current[2], yWidth, yHeight, yWidth, uvPitch);
// Store the current surfaces for later and free the old ones
for (int i = 0; i < 3; i++) {
diff --git a/video/coktel_decoder.cpp b/video/coktel_decoder.cpp
index 5d7efe87af..08340a19a6 100644
--- a/video/coktel_decoder.cpp
+++ b/video/coktel_decoder.cpp
@@ -1317,7 +1317,7 @@ void IMDDecoder::processFrame() {
// Set palette
if (cmd == kCommandPalette) {
_stream->skip(2);
-
+
_paletteDirty = true;
for (int i = 0; i < 768; i++)
diff --git a/video/psx_decoder.cpp b/video/psx_decoder.cpp
index fa7f1e8cfe..57c8972ee5 100644
--- a/video/psx_decoder.cpp
+++ b/video/psx_decoder.cpp
@@ -234,7 +234,7 @@ void PSXStreamDecoder::readNextPacket() {
Common::SeekableReadStream *frame = new Common::MemoryReadStream(partialFrame, frameSize, DisposeAfterUse::YES);
_videoTrack->decodeFrame(frame, sectorsRead);
-
+
delete frame;
delete sector;
return;
@@ -297,7 +297,7 @@ Common::SeekableReadStream *PSXStreamDecoder::readSector() {
// Ha! It's palindromic!
#define AUDIO_DATA_CHUNK_SIZE 2304
-#define AUDIO_DATA_SAMPLE_COUNT 4032
+#define AUDIO_DATA_SAMPLE_COUNT 4032
static const int s_xaTable[5][2] = {
{ 0, 0 },
@@ -483,7 +483,7 @@ void PSXStreamDecoder::PSXVideoTrack::decodeFrame(Common::SeekableReadStream *fr
decodeMacroBlock(&bits, mbX, mbY, scale, version);
// Output data onto the frame
- Graphics::convertYUV420ToRGB(_surface, _yBuffer, _cbBuffer, _crBuffer, _surface->w, _surface->h, _macroBlocksW * 16, _macroBlocksW * 8);
+ YUVToRGBMan.convert420(_surface, Graphics::YUVToRGBManager::kScaleFull, _yBuffer, _cbBuffer, _crBuffer, _surface->w, _surface->h, _macroBlocksW * 16, _macroBlocksW * 8);
_curFrame++;
diff --git a/video/psx_decoder.h b/video/psx_decoder.h
index 11f311594d..d1d5204e37 100644
--- a/video/psx_decoder.h
+++ b/video/psx_decoder.h
@@ -149,7 +149,7 @@ private:
uint32 _frameCount;
Common::SeekableReadStream *_stream;
PSXVideoTrack *_videoTrack;
- PSXAudioTrack *_audioTrack;
+ PSXAudioTrack *_audioTrack;
Common::SeekableReadStream *readSector();
};
diff --git a/video/qt_decoder.cpp b/video/qt_decoder.cpp
index 87c530dba0..b4dab9ddfb 100644
--- a/video/qt_decoder.cpp
+++ b/video/qt_decoder.cpp
@@ -216,7 +216,7 @@ void QuickTimeDecoder::init() {
addTrack(new AudioTrackHandler(this, _audioTracks[i]));
// Initialize all the video tracks
- Common::Array<Common::QuickTimeParser::Track *> &tracks = Common::QuickTimeParser::_tracks;
+ const Common::Array<Common::QuickTimeParser::Track *> &tracks = Common::QuickTimeParser::_tracks;
for (uint32 i = 0; i < tracks.size(); i++) {
if (tracks[i]->codecType == CODEC_TYPE_VIDEO) {
for (uint32 j = 0; j < tracks[i]->sampleDescs.size(); j++)
diff --git a/video/qt_decoder.h b/video/qt_decoder.h
index 71d33711a6..45ab155c2c 100644
--- a/video/qt_decoder.h
+++ b/video/qt_decoder.h
@@ -53,6 +53,7 @@ class Codec;
*
* Video decoder used in engines:
* - mohawk
+ * - pegasus
* - sci
*/
class QuickTimeDecoder : public VideoDecoder, public Audio::QuickTimeAudioDecoder {
diff --git a/video/smk_decoder.cpp b/video/smk_decoder.cpp
index bea65142a1..c49791100d 100644
--- a/video/smk_decoder.cpp
+++ b/video/smk_decoder.cpp
@@ -318,7 +318,7 @@ bool SmackerDecoder::loadStream(Common::SeekableReadStream *stream) {
// 1 - set to 1 if file is Y-interlaced
// 2 - set to 1 if file is Y-doubled
// If bits 1 or 2 are set, the frame should be scaled to twice its height
- // before it is displayed.
+ // before it is displayed.
_header.flags = _fileStream->readUint32LE();
SmackerVideoTrack *videoTrack = createVideoTrack(width, height, frameCount, frameRate, _header.flags, _header.signature);
diff --git a/video/theora_decoder.cpp b/video/theora_decoder.cpp
index d7260469e6..63aa93e2f5 100644
--- a/video/theora_decoder.cpp
+++ b/video/theora_decoder.cpp
@@ -302,7 +302,7 @@ bool TheoraDecoder::TheoraVideoTrack::decodePacket(ogg_packet &oggPacket) {
_nextFrameStartTime += _frameRate.getInverse().toDouble();
else
_nextFrameStartTime = time;
-
+
return true;
}
@@ -328,7 +328,7 @@ void TheoraDecoder::TheoraVideoTrack::translateYUVtoRGBA(th_ycbcr_buffer &YUVBuf
assert(YUVBuffer[kBufferU].height == YUVBuffer[kBufferY].height >> 1);
assert(YUVBuffer[kBufferV].height == YUVBuffer[kBufferY].height >> 1);
- Graphics::convertYUV420ToRGB(&_surface, YUVBuffer[kBufferY].data, YUVBuffer[kBufferU].data, YUVBuffer[kBufferV].data, YUVBuffer[kBufferY].width, YUVBuffer[kBufferY].height, YUVBuffer[kBufferY].stride, YUVBuffer[kBufferU].stride);
+ YUVToRGBMan.convert420(&_surface, Graphics::YUVToRGBManager::kScaleITU, YUVBuffer[kBufferY].data, YUVBuffer[kBufferU].data, YUVBuffer[kBufferV].data, YUVBuffer[kBufferY].width, YUVBuffer[kBufferY].height, YUVBuffer[kBufferY].stride, YUVBuffer[kBufferU].stride);
}
static vorbis_info *info = 0;
diff --git a/video/video_decoder.cpp b/video/video_decoder.cpp
index 559880acee..110afa7755 100644
--- a/video/video_decoder.cpp
+++ b/video/video_decoder.cpp
@@ -35,7 +35,6 @@ namespace Video {
VideoDecoder::VideoDecoder() {
_startTime = 0;
- _needsRewind = false;
_dirtyPalette = false;
_palette = 0;
_isPlaying = false;
@@ -62,7 +61,6 @@ void VideoDecoder::close() {
delete *it;
_tracks.clear();
- _needsRewind = false;
_dirtyPalette = false;
_palette = 0;
_startTime = 0;
@@ -87,7 +85,7 @@ bool VideoDecoder::loadFile(const Common::String &filename) {
}
bool VideoDecoder::needsUpdate() const {
- return !endOfVideo() && getTimeToNextFrame() == 0;
+ return hasFramesLeft() && getTimeToNextFrame() == 0;
}
void VideoDecoder::pauseVideo(bool pause) {
@@ -249,18 +247,8 @@ uint32 VideoDecoder::getTimeToNextFrame() const {
}
bool VideoDecoder::endOfVideo() const {
- if (!isVideoLoaded())
- return true;
-
- if (_endTimeSet) {
- const VideoTrack *track = findNextVideoTrack();
-
- if (track && track->getNextFrameStartTime() >= (uint)_endTime.msecs())
- return true;
- }
-
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
- if (!(*it)->endOfTrack())
+ if (!(*it)->endOfTrack() && (!isPlaying() || (*it)->getTrackType() != Track::kTrackTypeVideo || !_endTimeSet || ((VideoTrack *)*it)->getNextFrameStartTime() < (uint)_endTime.msecs()))
return false;
return true;
@@ -281,8 +269,6 @@ bool VideoDecoder::rewind() {
if (!isRewindable())
return false;
- _needsRewind = false;
-
// Stop all tracks so they can be rewound
if (isPlaying())
stopAudio();
@@ -316,8 +302,6 @@ bool VideoDecoder::seek(const Audio::Timestamp &time) {
if (!isSeekable())
return false;
- _needsRewind = false;
-
// Stop all tracks so they can be seeked
if (isPlaying())
stopAudio();
@@ -347,10 +331,6 @@ void VideoDecoder::start() {
_isPlaying = true;
_startTime = g_system->getMillis();
- // If someone previously called stop(), we'll rewind it.
- if (_needsRewind)
- rewind();
-
// Adjust start time if we've seeked to something besides zero time
if (_lastTimeChange.totalNumberOfFrames() != 0)
_startTime -= _lastTimeChange.msecs();
@@ -362,26 +342,27 @@ void VideoDecoder::stop() {
if (!isPlaying())
return;
+ // Stop audio here so we don't have it affect getTime()
+ stopAudio();
+
+ // Keep the time marked down in case we start up again
+ // We do this before _isPlaying is set so we don't get
+ // _lastTimeChange returned, but before _pauseLevel is
+ // reset.
+ _lastTimeChange = getTime();
+
_isPlaying = false;
_startTime = 0;
_palette = 0;
_dirtyPalette = false;
_needsUpdate = false;
- stopAudio();
-
// Also reset the pause state.
_pauseLevel = 0;
- // If this is a rewindable video, don't close it too. We'll just rewind() the video
- // the next time someone calls start(). Otherwise, since it can't be rewound, we
- // just close it.
- if (isRewindable()) {
- _lastTimeChange = getTime();
- _needsRewind = true;
- } else {
- close();
- }
+ // Reset the pause state of the tracks too
+ for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
+ (*it)->pause(false);
}
Audio::Timestamp VideoDecoder::getDuration() const {
@@ -409,6 +390,11 @@ bool VideoDecoder::Track::rewind() {
return seek(Audio::Timestamp(0, 1000));
}
+void VideoDecoder::Track::pause(bool shouldPause) {
+ _paused = shouldPause;
+ pauseIntern(shouldPause);
+}
+
Audio::Timestamp VideoDecoder::Track::getDuration() const {
return Audio::Timestamp(0, 1000);
}
@@ -679,4 +665,15 @@ void VideoDecoder::startAudioLimit(const Audio::Timestamp &limit) {
((AudioTrack *)*it)->start(limit);
}
+bool VideoDecoder::hasFramesLeft() const {
+ // This is similar to endOfVideo(), except it doesn't take Audio into account (and returns true if not the end of the video)
+ // This is only used for needsUpdate() atm so that setEndTime() works properly
+ // And unlike endOfVideoTracks(), this takes into account _endTime
+ for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
+ if ((*it)->getTrackType() == Track::kTrackTypeVideo && !(*it)->endOfTrack() && (!isPlaying() || !_endTimeSet || ((VideoTrack *)*it)->getNextFrameStartTime() < (uint)_endTime.msecs()))
+ return true;
+
+ return false;
+}
+
} // End of namespace Video
diff --git a/video/video_decoder.h b/video/video_decoder.h
index 5abe1d917c..cc7d1df51b 100644
--- a/video/video_decoder.h
+++ b/video/video_decoder.h
@@ -102,16 +102,14 @@ public:
/**
* Begin playback of the video.
*
- * @note This has no effect is the video is already playing.
+ * @note This has no effect if the video is already playing.
*/
void start();
/**
* Stop playback of the video.
*
- * @note This will close() the video if it is not rewindable.
- * @note If the video is rewindable, the video will be rewound on the
- * next start() call unless rewind() or seek() is called before then.
+ * @note This has no effect if the video is not playing.
*/
void stop();
@@ -180,6 +178,9 @@ public:
/**
* Set the time for this video to end at. At this time in the video,
* all audio will stop and endOfVideo() will return true.
+ *
+ * While the setting is stored even if a video is not playing,
+ * endOfVideo() is only affected when the video is playing.
*/
void setEndTime(const Audio::Timestamp &endTime);
@@ -203,7 +204,7 @@ public:
* Returns the current frame number of the video.
* @return the last frame decoded by the video
*/
- int32 getCurFrame() const;
+ int getCurFrame() const;
/**
* Returns the number of frames in the video.
@@ -432,7 +433,7 @@ protected:
/**
* Set the pause status of the track.
*/
- void pause(bool shouldPause) {}
+ void pause(bool shouldPause);
/**
* Return if the track is paused.
@@ -450,7 +451,7 @@ protected:
/**
* Function called by pause() for subclasses to implement.
*/
- void pauseIntern(bool pause);
+ virtual void pauseIntern(bool shouldPause) {}
private:
bool _paused;
@@ -596,7 +597,7 @@ protected:
virtual Audio::Mixer::SoundType getSoundType() const { return Audio::Mixer::kPlainSoundType; }
protected:
- void pauseIntern(bool pause);
+ void pauseIntern(bool shouldPause);
/**
* Get the AudioStream that is the representation of this AudioTrack
@@ -762,7 +763,7 @@ private:
TrackList _tracks;
// Current playback status
- bool _isPlaying, _needsRewind, _needsUpdate;
+ bool _isPlaying, _needsUpdate;
Audio::Timestamp _lastTimeChange, _endTime;
bool _endTimeSet;
@@ -777,6 +778,7 @@ private:
void stopAudio();
void startAudio();
void startAudioLimit(const Audio::Timestamp &limit);
+ bool hasFramesLeft() const;
int32 _startTime;
uint32 _pauseLevel;