X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=perl.h;h=8f1cad367ef8c6e388023f7a70766b846fe63125;hb=40b7a5f5e789eb31046d021a15e48b502ad8e1e9;hp=27393f669edf01afb940039abd0cee0e0e4e0ea1;hpb=2765b840fca882ed4632588ab696d917cbd8f128;p=p5sagit%2Fp5-mst-13.2.git diff --git a/perl.h b/perl.h index 27393f6..8f1cad3 100644 --- a/perl.h +++ b/perl.h @@ -1131,23 +1131,9 @@ typedef UVTYPE UV; # endif #endif -/* - I've tracked down a weird bug in Perl5.6.1 to the UTS compiler's - mishandling of MY_UV_MAX in util.c. It is defined as - #ifndef MY_UV_MAX - # define MY_UV_MAX ((UV)IV_MAX * (UV)2 + (UV)1) - #endif - The compiler handles {double floating point value} >= MY_UV_MAX as if - MY_UV_MAX were the signed integer -1. In fact it will do the same - thing with (UV)(0xffffffff), in place of MY_UV_MAX, though 0xffffffff - *without* the typecast to UV works fine. - - hom00@utsglobal.com (Hal Morris) 2001-05-02 - - */ - -#ifdef UTS -# define MY_UV_MAX 0xffffffff +#if defined(uts) || defined(UTS) +# undef UV_MAX +# define UV_MAX (4294967295u) #endif #define IV_DIG (BIT_DIGITS(IVSIZE * 8))