Continued Chinese puzzles from Autrijus.
Jarkko Hietaniemi [Thu, 7 Mar 2002 01:48:46 +0000 (01:48 +0000)]
p4raw-id: //depot/perl@15075

MANIFEST
ext/Encode/lib/Encode/CN/HZ.pm
ext/Encode/t/CN.t [new file with mode: 0644]
ext/Encode/t/TW.t [new file with mode: 0644]

index dfae962..0880337 100644 (file)
--- a/MANIFEST
+++ b/MANIFEST
@@ -357,12 +357,14 @@ ext/Encode/lib/EncodeFormat.pod           Encode extension
 ext/Encode/Makefile.PL         Encode extension makefile writer
 ext/Encode/MANIFEST            Encode extension
 ext/Encode/README              Encode extension
+ext/Encode/t/CN.t              Encode extension test
 ext/Encode/t/Encode.t          Encode extension test
 ext/Encode/t/japanese.pl       Encode extension
 ext/Encode/t/JP.t              Encode extension test
 ext/Encode/t/table.euc         Encode extension test
 ext/Encode/t/table.ref         Encode extension test
 ext/Encode/t/Tcl.t             Encode extension test
+ext/Encode/t/TW.t              Encode extension test
 ext/Encode/TW/Makefile.PL      Encode extension
 ext/Encode/TW/TW.pm            Encode extension
 ext/Errno/ChangeLog    Errno perl module change log
index a57ae8a..e4c2a8f 100644 (file)
@@ -1,11 +1,12 @@
 package Encode::CN::HZ;
 
+use strict;
+no warnings 'redefine'; # to quell the "use Encode" below
+
 use Encode::CN;
 use Encode qw|encode decode|;
 use base 'Encode::Encoding';
 
-use strict;
-
 # HZ is but escaped GB, so we implement it with the
 # GB2312(raw) encoding here. Cf. RFC 1842 & 1843.
 
@@ -18,8 +19,30 @@ sub decode
     my ($obj,$str,$chk) = @_;
     my $gb = Encode::find_encoding('gb2312');
 
-    $str =~ s{~(?:(~)|\n|{([^~]*)~}|)}
-             {$1 ? '~' : defined $2 ? $gb->decode($2, $chk) : ''}eg;
+    $str =~ s{~                        # starting tilde
+       (?:
+           (~)                 # another tilde - escaped (set $1)
+               |               #     or
+           \n                  # \n - output nothing
+               |               #     or
+           \{                  # opening brace of GB data
+               (               #  set $2 to any number of...
+                   (?: 
+                       [^~]    #  non-tilde GB character
+                           |   #     or
+                       ~(?!\}) #  tilde not followed by a closing brace
+                   )*
+               )
+           ~\}                 # closing brace of GB data
+               |               # XXX: invalid escape - maybe die on $chk?
+       )
+    }{
+       (defined $1)    ? '~'                   # two tildes make one tilde
+           :
+       (defined $2)    ? $gb->decode($2, $chk) # decode the characters
+           :
+       ''                                      # '' on ~\n and invalid escape
+    }egx;
 
     return $str;
 }
@@ -27,23 +50,41 @@ sub decode
 sub encode
 {
     my ($obj,$str,$chk) = @_;
+    my ($out, $in_gb);
     my $gb = Encode::find_encoding('gb2312');
 
     $str =~ s/~/~~/g;
-    $str =~ s/((?:     
-       \p{InCJKCompatibility}|
-       \p{InCJKCompatibilityForms}|
-       \p{InCJKCompatibilityIdeographs}|
-       \p{InCJKCompatibilityIdeographsSupplement}|
-       \p{InCJKRadicalsSupplement}|
-       \p{InCJKSymbolsAndPunctuation}|
-       \p{InCJKUnifiedIdeographsExtensionA}|
-       \p{InCJKUnifiedIdeographs}|
-       \p{InCJKUnifiedIdeographsExtensionB}|
-       \p{InEnclosedCJKLettersAndMonths}
-    )+)/'~{'.$gb->encode($1, $chk).'~}'/egx;
 
-    return $str;
+    # XXX: Since CHECK and partial decoding  has not been implemented yet,
+    #      we'll use a very crude way to test for GB2312ness.
+
+    for my $index (0 .. length($str) - 1) {
+       no warnings 'utf8';
+
+       my $char = substr($str, $index, 1);
+       my $try  = $gb->encode($char);  # try encode this char
+
+       if (defined($try)) {            # is a GB character
+           if ($in_gb) {
+               $out .= $try;           # in GB mode - just append it
+           }
+           else {
+               $out .= "~{$try";       # enter GB mode, then append it
+               $in_gb = 1;
+           }
+       }
+       elsif ($in_gb) {
+           $out .= "~}$char";          # leave GB mode, then append it
+           $in_gb = 0;
+       }
+       else {
+           $out .= $char;              # not in GB mode - just append it
+       }
+    }
+
+    $out .= '~}' if $in_gb;            # add closing brace as needed
+
+    return $out;
 }
 
 1;
diff --git a/ext/Encode/t/CN.t b/ext/Encode/t/CN.t
new file mode 100644 (file)
index 0000000..2185a07
--- /dev/null
@@ -0,0 +1,145 @@
+BEGIN {
+    chdir 't' if -d 't';
+    @INC = '../lib';
+    require Config; import Config;
+    if ($Config{'extensions'} !~ /\bEncode\b/) {
+      print "1..0 # Skip: Encode was not built\n";
+      exit 0;
+    }
+    unless (find PerlIO::Layer 'perlio') {
+       print "1..0 # Skip: PerlIO was not built\n";
+       exit 0;
+    }
+    if (ord("A") == 193) {
+       print "1..0 # Skip: EBCDIC\n";
+       exit 0;
+    }
+    $| = 1;
+}
+
+use strict;
+use Test::More tests => 29;
+use Encode;
+
+use_ok('Encode::CN');
+
+# Since JP.t already test basic file IO, we will just focus on
+# internal encode / decode test here. Unfortunately, to test
+# against all the UniHan characters will take a huge disk space,
+# not to mention the time it will take, and the fact that Perl
+# did not bundle UniHan.txt anyway.
+
+# So, here we just test a typical snippet spanning multiple Unicode
+# blocks, and hope it can point out obvious errors.
+
+run_tests('Simplified Chinese only', {
+    'utf'      => (
+12298.26131.32463.12299.31532.19968.21350.
+24406.26352.65306.
+22823.21705.20094.20803.65292.19975.29289.36164.22987.65292.
+20035.32479.22825.12290.
+20113.34892.38632.26045.65292.21697.29289.27969.24418.12290.
+22823.26126.22987.32456.65292.20845.20301.26102.25104.65292.
+26102.20056.20845.40857.20197.24481.22825.12290.
+20094.36947.21464.21270.65292.21508.27491.24615.21629.65292.
+20445.21512.22823.21644.65292.20035.21033.36126.12290.
+39318.20986.24246.29289.65292.19975.22269.21688.23425.12290
+    ),
+
+    'euc-cn'   => join('',
+'¡¶Ò×¾­¡·µÚÒ»ØÔ',
+'åèÔ»£º',
+'´óÔÕǬԪ£¬ÍòÎï×Êʼ£¬',
+'ÄËͳÌì¡£',
+'ÔÆÐÐÓêÊ©£¬Æ·ÎïÁ÷ÐΡ£',
+'´óÃ÷ʼÖÕ£¬Áùλʱ³É£¬',
+'ʱ³ËÁùÁúÒÔÓùÌì¡£',
+'ǬµÀ±ä»¯£¬¸÷ÕýÐÔÃü£¬',
+'±£ºÏ´óºÍ£¬ÄËÀûÕê¡£',
+'Ê׳öÊüÎÍò¹úÏÌÄþ¡£',
+    ),
+
+    'gb2312'   => join('',
+'!6RW>-!75ZR;XT',
+'ehT;#:',
+'4sTUG,T*#,MrNoWJJ<#,',
+'DKM3Ll!#',
+'TFPPSjJ)#,F7NoAwPN!#',
+'4sCwJ<VU#,AyN;J13I#,',
+'J13KAyAzRTSyLl!#',
+'G,5@1d;/#,8wU}PTC|#,',
+'1#:O4s:M#,DK@{Uj!#',
+'JW3vJ|No#,Mr9zOLD~!#'
+    ), 
+
+    'iso-ir-165'=> join('',
+'!6RW>-!75ZR;XT',
+'ehT;#:',
+'4sTUG,T*#,MrNoWJJ<#,',
+'DKM3Ll!#',
+'TFPPSjJ)#,F7NoAwPN!#',
+'4sCwJ<VU#,AyN;J13I#,',
+'J13KAyAzRTSyLl!#',
+'G,5@1d;/#,8wU}PTC|#,',
+'1#:O4s:M#,DK@{Uj!#',
+'JW3vJ|No#,Mr9zOLD~!#'
+    ), 
+});
+
+run_tests('Simplified Chinese + ASCII', {
+    'utf'      => (
+35937.26352.65306.10.
+22825.34892.20581.65292.21531.23376.20197.33258.24378.19981.24687.12290.10.
+28508.40857.21247.29992.65292.38451.22312.19979.20063.12290.32.
+35265.40857.22312.30000.65292.24503.26045.26222.20063.12290.32.
+32456.26085.20094.20094.65292.21453.22797.36947.20063.12290.10.
+25110.36291.22312.28170.65292.36827.26080.21646.20063.12290.39134.
+40857.22312.22825.65292.22823.20154.36896.20063.12290.32.
+20130.40857.26377.24724.65292.30408.19981.21487.20037.20063.12290.10.
+29992.20061.65292.22825.24503.19981.21487.20026.39318.20063.12290
+    ),
+
+    'cp936'    => join(chr(10),
+'ÏóÔ»£º',
+'ÌìÐн¡£¬¾ý×ÓÒÔ×ÔÇ¿²»Ï¢¡£',
+'DZÁúÎðÓã¬ÑôÔÚÏÂÒ²¡£ ¼ûÁúÔÚÌµÂÊ©ÆÕÒ²¡£ ÖÕÈÕǬǬ£¬·´¸´µÀÒ²¡£',
+'»òÔ¾ÔÚÔ¨£¬½øÎÞ¾ÌÒ²¡£·ÉÁúÔÚÌ죬´óÈËÔìÒ²¡£ ¿ºÁúÓлڣ¬Ó¯²»¿É¾ÃÒ²¡£',
+'Óþţ¬ÌìµÂ²»¿ÉΪÊ×Ò²¡£',
+    ),
+
+    'hz'       => join(chr(10),
+'~{OsT;#:~}',
+'~{LlPP=!#,>}WSRTWTG?2;O"!#~}',
+'~{G1AzNpSC#,QtTZOBR2!#~} ~{<{AzTZLo#,5BJ)FUR2!#~} ~{VUHUG,G,#,74845@R2!#~}',
+'~{;rT>TZT(#,=xN^>LR2!#7IAzTZLl#,4sHKTlR2!#~} ~{?:AzSP;Z#,S/2;?I>CR2!#~}',
+'~{SC>E#,Ll5B2;?IN*JWR2!#~}',
+    ),
+});
+
+run_tests('Traditional Chinese', {
+    'utf',     => 20094.65306.20803.12289.20136.12289.21033.12289.35998,
+    'gb12345'  => 'G,#:T*!":`!"@{!"Uj',
+    'gbk'      => 'Ǭ£ºÔª¡¢ºà¡¢Àû¡¢Ø‘',
+});
+
+sub run_tests {
+    my ($title, $tests) = @_;
+    my $utf = delete $tests->{'utf'};
+
+    # $enc = encoding, $str = content
+    foreach my $enc (sort keys %{$tests}) {
+       my $str = $tests->{$enc};
+
+       is(Encode::decode($enc, $str), $utf, "[$enc] decode - $title");
+       is(Encode::encode($enc, $utf), $str, "[$enc] encode - $title");
+
+       my $str2 = $str;
+       my $utf8 = Encode::encode('utf-8', $utf);
+
+       Encode::from_to($str2, $enc, 'utf-8');
+       is($str2, $utf8, "[$enc] from_to => utf8 - $title");
+
+       Encode::from_to($utf8, 'utf-8', $enc); # convert $utf8 as $enc
+       is($utf8, $str,  "[$enc] utf8 => from_to - $title");
+    }
+}
diff --git a/ext/Encode/t/TW.t b/ext/Encode/t/TW.t
new file mode 100644 (file)
index 0000000..b125a8f
--- /dev/null
@@ -0,0 +1,94 @@
+BEGIN {
+    chdir 't' if -d 't';
+    @INC = '../lib';
+    require Config; import Config;
+    if ($Config{'extensions'} !~ /\bEncode\b/) {
+      print "1..0 # Skip: Encode was not built\n";
+      exit 0;
+    }
+    unless (find PerlIO::Layer 'perlio') {
+       print "1..0 # Skip: PerlIO was not built\n";
+       exit 0;
+    }
+    if (ord("A") == 193) {
+       print "1..0 # Skip: EBCDIC\n";
+       exit 0;
+    }
+    $| = 1;
+}
+
+use strict;
+use Test::More tests => 17;
+use Encode;
+
+use_ok('Encode::TW');
+
+# Since JP.t already test basic file IO, we will just focus on
+# internal encode / decode test here. Unfortunately, to test
+# against all the UniHan characters will take a huge disk space,
+# not to mention the time it will take, and the fact that Perl
+# did not bundle UniHan.txt anyway.
+
+# So, here we just test a typical snippet spanning multiple Unicode
+# blocks, and hope it can point out obvious errors.
+
+run_tests('Basic Big5 range', {
+    'utf'      => (
+24093.39640.38525.20043.33495.35028.20846.65292.
+26389.30343.32771.26352.20271.24248.65108.
+25885.25552.35998.20110.23391.38508.20846.65292.
+24799.24218.23493.21566.20197.38477.65108
+    ),
+
+    'big5'     => (join('',
+'«Ò°ª¶§¤§­]¸Ç¤¼¡A®Ó¬Ó¦Ò¤ê§B±e¡Q',
+'Äá´£­s¤_©s³µ¤¼¡A±©©°±G§^¥H­°¡Q',
+    )),
+
+    'big5-hkscs'=> (join('',
+'«Ò°ª¶§¤§­]¸Ç¤¼¡A®Ó¬Ó¦Ò¤ê§B±e¡Q',
+'Äá´£­s¤_©s³µ¤¼¡A±©©°±G§^¥H­°¡Q',
+    )),
+
+    'cp950'    => (join('',
+'«Ò°ª¶§¤§­]¸Ç¤¼¡A®Ó¬Ó¦Ò¤ê§B±e¡Q',
+'Äá´£­s¤_©s³µ¤¼¡A±©©°±G§^¥H­°¡Q',
+    )),
+});
+
+run_tests('Hong Kong Extensions', {
+    'utf'      => (
+24863.35613.25152.26377.20351.29992.32.80.101.114.108.32.
+22021.26379.21451.65292.32102.25105.21707.22021.
+25903.25345.12289.24847.35211.21644.40723.21237.
+22914.26524.32232.30908.26377.20219.20309.37679.28431.
+65292.35531.21578.35380.25105.21707.12290
+    ),
+
+    'big5-hkscs'       => join('',
+'·PÁ©Ҧ³¨Ï¥Î Perl ïªB¤Í¡Aµ¹§Ú’]ï¤ä«ù¡B·N¨£©M¹ªÀy',
+'¦pªG½s½X¦³¥ô¦ó¿ùº|¡A½Ð§i¶D§Ú’]¡C'
+    ),
+});
+
+sub run_tests {
+    my ($title, $tests) = @_;
+    my $utf = delete $tests->{'utf'};
+
+    # $enc = encoding, $str = content
+    foreach my $enc (sort keys %{$tests}) {
+       my $str = $tests->{$enc};
+
+       is(Encode::decode($enc, $str), $utf, "[$enc] decode - $title");
+       is(Encode::encode($enc, $utf), $str, "[$enc] encode - $title");
+
+       my $str2 = $str;
+       my $utf8 = Encode::encode('utf-8', $utf);
+
+       Encode::from_to($str2, $enc, 'utf-8');
+       is($str2, $utf8, "[$enc] from_to => utf8 - $title");
+
+       Encode::from_to($utf8, 'utf-8', $enc); # convert $utf8 as $enc
+       is($utf8, $str,  "[$enc] utf8 => from_to - $title");
+    }
+}