diff options
author | Martin Kiewitz | 2010-07-19 14:50:37 +0000 |
---|---|---|
committer | Martin Kiewitz | 2010-07-19 14:50:37 +0000 |
commit | cda55fe770ed71f5d4b00690ba4ea196ad4b533f (patch) | |
tree | 60a96a2d7cba88142667316dc595f0c72c5d3874 /engines/sci/parser | |
parent | 9977e2db90bf26ae4d956d2a61063f2b62b18780 (diff) | |
download | scummvm-rg350-cda55fe770ed71f5d4b00690ba4ea196ad4b533f.tar.gz scummvm-rg350-cda55fe770ed71f5d4b00690ba4ea196ad4b533f.tar.bz2 scummvm-rg350-cda55fe770ed71f5d4b00690ba4ea196ad4b533f.zip |
SCI: parser now matches extended chars too
fixes multilingual games not accepting words that contain extended chars (>= 0x80)
svn-id: r51029
Diffstat (limited to 'engines/sci/parser')
-rw-r--r-- | engines/sci/parser/vocabulary.cpp | 67 | ||||
-rw-r--r-- | engines/sci/parser/vocabulary.h | 4 |
2 files changed, 47 insertions, 24 deletions
diff --git a/engines/sci/parser/vocabulary.cpp b/engines/sci/parser/vocabulary.cpp index cb580017c4..82051a3ecd 100644 --- a/engines/sci/parser/vocabulary.cpp +++ b/engines/sci/parser/vocabulary.cpp @@ -83,9 +83,8 @@ Vocabulary::~Vocabulary() { } bool Vocabulary::loadParserWords() { - - char currentword[256] = ""; // They're not going to use words longer than 255 ;-) - int currentwordpos = 0; + char currentWord[VOCAB_MAX_WORDLENGTH] = ""; + int currentWordPos = 0; // First try to load the SCI0 vocab resource. Resource *resource = _resMan->findResource(ResourceId(kResourceTypeVocab, _resourceIdWords), 0); @@ -132,13 +131,13 @@ bool Vocabulary::loadParserWords() { while (seeker < resource->size) { byte c; - currentwordpos = resource->data[seeker++]; // Parts of previous words may be re-used + currentWordPos = resource->data[seeker++]; // Parts of previous words may be re-used if (resourceType == kVocabularySCI1) { c = 1; - while (seeker < resource->size && currentwordpos < 255 && c) { + while (seeker < resource->size && currentWordPos < 255 && c) { c = resource->data[seeker++]; - currentword[currentwordpos++] = c; + currentWord[currentWordPos++] = c; } if (seeker == resource->size) { warning("SCI1: Vocabulary not usable, disabling"); @@ -148,11 +147,11 @@ bool Vocabulary::loadParserWords() { } else { do { c = resource->data[seeker++]; - currentword[currentwordpos++] = c & 0x7f; // 0x80 is used to terminate the string + currentWord[currentWordPos++] = c & 0x7f; // 0x80 is used to terminate the string } while (c < 0x80); } - currentword[currentwordpos] = 0; + currentWord[currentWordPos] = 0; // Now decode class and group: c = resource->data[seeker + 1]; @@ -161,7 +160,7 @@ bool Vocabulary::loadParserWords() { newWord._group = (resource->data[seeker + 2]) | ((c & 0x0f) << 8); // Add the word to the list - _parserWords[currentword] = newWord; + _parserWords[currentWord] = newWord; seeker += 3; } @@ -259,7 +258,7 @@ bool Vocabulary::loadBranches() { return true; } - +// we assume that *word points to an already lowercased word ResultWord Vocabulary::lookupWord(const char *word, int word_len) { Common::String tempword(word, word_len); @@ -286,7 +285,7 @@ ResultWord Vocabulary::lookupWord(const char *word, int word_len) { int suff_index = word_len - suffix->alt_suffix_length; // Offset of the start of the suffix - if (scumm_strnicmp(suffix->alt_suffix, word + suff_index, suffix->alt_suffix_length) == 0) { // Suffix matched! + if (strncmp(suffix->alt_suffix, word + suff_index, suffix->alt_suffix_length) == 0) { // Suffix matched! // Terminate word at suffix start position...: Common::String tempword2(word, MIN(word_len, suff_index)); @@ -366,32 +365,55 @@ void Vocabulary::decipherSaidBlock(byte *addr) { printf("\n"); } +static const byte lowerCaseMap[256] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, // 0x00 + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, // 0x10 + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, // 0x20 + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, // 0x30 + 0x40, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', // 0x40 + 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, // 0x50 + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, // 0x60 + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, // 0x70 + 0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x84, 0x86, // 0x80 + //^^ ^^^^ ^^^^ + 0x82, 0x91, 0x91, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x94, 0x81, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, // 0x90 + //^^ ^^^^ ^^^^ ^^^^ + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, // 0xa0 + // ^^^^ + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, // 0xb0 + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, // 0xc0 + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, // 0xd0 + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, // 0xe0 + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff // 0xf0 +}; + bool Vocabulary::tokenizeString(ResultWordList &retval, const char *sentence, char **error) { - const char *lastword = sentence; + char currentWord[VOCAB_MAX_WORDLENGTH] = ""; int pos_in_sentence = 0; unsigned char c; - int wordlen = 0; + int wordLen = 0; *error = NULL; do { c = sentence[pos_in_sentence++]; - - if (isalnum(c) || (c == '-' && wordlen) || (c >= 0x80)) - ++wordlen; + if (isalnum(c) || (c == '-' && wordLen) || (c >= 0x80)) { + currentWord[wordLen] = lowerCaseMap[c]; + ++wordLen; + } // Continue on this word */ // Words may contain a '-', but may not // start with one. else { - if (wordlen) { // Finished a word? + if (wordLen) { // Finished a word? - ResultWord lookup_result = lookupWord(lastword, wordlen); + ResultWord lookup_result = lookupWord(currentWord, wordLen); // Look it up if (lookup_result._class == -1) { // Not found? - *error = (char *)calloc(wordlen + 1, 1); - strncpy(*error, lastword, wordlen); // Set the offending word + *error = (char *)calloc(wordLen + 1, 1); + strncpy(*error, currentWord, wordLen); // Set the offending word retval.clear(); return false; // And return with error } @@ -400,8 +422,7 @@ bool Vocabulary::tokenizeString(ResultWordList &retval, const char *sentence, ch retval.push_back(lookup_result); } - lastword = sentence + pos_in_sentence; - wordlen = 0; + wordLen = 0; } } while (c); // Until terminator is hit @@ -410,7 +431,7 @@ bool Vocabulary::tokenizeString(ResultWordList &retval, const char *sentence, ch } void Vocabulary::printSuffixes() const { - char word_buf[256], alt_buf[256]; + char word_buf[VOCAB_MAX_WORDLENGTH], alt_buf[VOCAB_MAX_WORDLENGTH]; Console *con = g_sci->getSciDebugger(); int i = 0; diff --git a/engines/sci/parser/vocabulary.h b/engines/sci/parser/vocabulary.h index e637d8088a..80ed68547d 100644 --- a/engines/sci/parser/vocabulary.h +++ b/engines/sci/parser/vocabulary.h @@ -73,6 +73,8 @@ enum { kParseNumber = 4 }; +#define VOCAB_MAX_WORDLENGTH 256 + /* Anywords are ignored by the parser */ #define VOCAB_CLASS_ANYWORD 0xff @@ -116,7 +118,7 @@ struct ResultWord { typedef Common::List<ResultWord> ResultWordList; -typedef Common::HashMap<Common::String, ResultWord, Common::IgnoreCase_Hash, Common::IgnoreCase_EqualTo> WordMap; +typedef Common::HashMap<Common::String, ResultWord, Common::CaseSensitiveString_Hash, Common::CaseSensitiveString_EqualTo> WordMap; struct ParseRuleList; |