aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMax Horn2011-05-06 13:14:44 +0200
committerMax Horn2011-05-25 13:24:38 +0200
commitd2e778bf0b4692e957c74a7749a75d6fd5ed214d (patch)
treeb1a5a429c3c960d6f38ee6511e809f278137d6c2
parent4b7f6dfa3c8ca3ee614810007728e49ba2fd9e6e (diff)
downloadscummvm-rg350-d2e778bf0b4692e957c74a7749a75d6fd5ed214d.tar.gz
scummvm-rg350-d2e778bf0b4692e957c74a7749a75d6fd5ed214d.tar.bz2
scummvm-rg350-d2e778bf0b4692e957c74a7749a75d6fd5ed214d.zip
BUILD: Replace _need_memalign runtime test by hardcoded list
According to a discussion on -devel, this test cannot work reliably in general: It cannot determine when unaligned access really works reliably in all situations, nor on all implementations of the target CPU arch; nor does it determine whether unaligned access is supported effectively (as opposed to say supported via super-slow fault handler mechanism).
-rwxr-xr-xconfigure56
1 files changed, 21 insertions, 35 deletions
diff --git a/configure b/configure
index 6697fcc5d1..d230d588df 100755
--- a/configure
+++ b/configure
@@ -2061,48 +2061,34 @@ fi
# alignment can be a lot slower than regular access, so we don't want
# to use it if we don't have to.
#
-# So we do the following: First, for CPU families where we know whether
-# unaligned access is safe & fast, we enable / disable unaligned access
-# accordingly.
-# Otherwise, for cross compiled builds we just disable memory alignment.
-# For native builds, we run some test code that detects whether unaligned
-# access is supported (and is supported without an exception handler).
-#
-# NOTE: The only kinds of unaligned access we allow are for 2 byte and
-# 4 byte loads / stores. No promises are made for bigger sizes, such as
-# 8 or 16 byte loads, for which various architectures (e.g. x86 and PowerPC)
-# behave differently than for the smaller sizes).
+# So we do the following: For CPU families where we know whether unaligned
+# access is safe & fast, we enable / disable unaligned access accordingly.
+# Otherwise, we just disable memory alignment.
+#
+# NOTE: In the past, for non-cross compiled builds, we would also run some code
+# which would try to test whether unaligned access worked or not. But this test
+# could not reliably determine whether unaligned access really worked in all
+# situations (and across different implementations of the target CPU arch), nor
+# whether it was fast (as opposed to slowly emulated by fault handlers). Hence,
+# we do not use this approach anymore.
+#
+# NOTE: The only kinds of unaligned access we allow are for 2 byte and 4
+# byte loads / stores. No promises are made for bigger sizes, such as 8
+# or 16 byte loads, for which architectures may behave differently than
+# for the smaller sizes.
echo_n "Alignment required... "
case $_host_cpu in
+ i[3-6]86 | x86_64 | ppc*)
+ # Unaligned access should work
+ _need_memalign=no
+ ;;
alpha* | arm* | bfin* | hp* | mips* | sh* | sparc* | ia64 | nv1*)
# Unaligned access is not supported or extremely slow.
_need_memalign=yes
;;
- i[3-6]86 | x86_64 | ppc*)
- # Unaligned access should work reasonably well
- _need_memalign=no
- ;;
*)
- if test -z "$_host"; then
- # NOT in cross-compiling mode:
- # Try to auto-detect....
- cat > $TMPC << EOF
-#include <stdlib.h>
-#include <signal.h>
-int main(int argc, char **argv) {
- unsigned char test[8] = { 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88 };
- signal(SIGBUS, exit);
- signal(SIGABRT, exit);
- signal(SIGSEGV, exit);
- if (*((unsigned int *)(test + 1)) != 0x55443322 && *((unsigned int *)(test + 1)) != 0x22334455) {
- return 1;
- }
- return 0;
-}
-EOF
- cc_check_no_clean && $TMPO$HOSTEXEEXT && _need_memalign=no
- cc_check_clean
- fi
+ # Status of unaligned access is unknown, so assume the worst.
+ _need_memalign=yes
;;
esac
echo "$_need_memalign"