Merge pull request #151 from google/sr

Enable unittest on Windows
This commit is contained in:
Taku Kudo 2018-08-05 11:59:44 +09:00 коммит произвёл GitHub
Родитель c93b4d15cd 9de12b5e7d
Коммит 942baf3cda
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
14 изменённых файлов: 98 добавлений и 92 удалений

Просмотреть файл

@ -37,7 +37,7 @@ if(MSVC)
string(REPLACE "/MD" "/MT" CMAKE_CXX_FLAGS_MINSIZEREL ${CMAKE_CXX_FLAGS_MINSIZEREL})
string(REPLACE "/MD" "/MT" CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE})
string(REPLACE "/MD" "/MT" CMAKE_CXX_FLAGS_RELWITHDEBINFO ${CMAKE_CXX_FLAGS_RELWITHDEBINFO})
add_definitions("/wd4267 /wd4244 /wd4305 /source-charset:utf-8")
add_definitions("/wd4267 /wd4244 /wd4305 /Zc:strictStrings /utf8")
endif(MSVC)

Просмотреть файл

@ -103,13 +103,13 @@ TEST(BPEModelTest, EncodeTest) {
EXPECT_EQ("d", result[6].first);
// all unknown.
result = model.Encode(u8"xyz東京");
result = model.Encode("xyz東京");
EXPECT_EQ(5, result.size());
EXPECT_EQ("x", result[0].first);
EXPECT_EQ("y", result[1].first);
EXPECT_EQ("z", result[2].first);
EXPECT_EQ(u8"", result[3].first);
EXPECT_EQ(u8"", result[4].first);
EXPECT_EQ("", result[3].first);
EXPECT_EQ("", result[4].first);
// User defined
result = model.Encode("ABC");
@ -177,7 +177,7 @@ TEST(BPEModelTest, EncodeAmbiguousTest) {
EXPECT_EQ("a", result[2].first);
// makes a broken utf-8
const std::string broken_utf8 = std::string(u8"").substr(0, 1);
const std::string broken_utf8 = std::string("").substr(0, 1);
result = model.Encode(broken_utf8);
EXPECT_EQ(1, result.size());
EXPECT_EQ(broken_utf8, result[0].first);

Просмотреть файл

@ -109,16 +109,16 @@ TEST(BPETrainerTest, EndToEndTest) {
EXPECT_TRUE(tok.empty());
EXPECT_OK(sp.Encode(
u8"吾輩《わがはい》は猫である。名前はまだ無い。"
u8"どこで生れたかとんと見当《けんとう》がつかぬ。"
u8"何でも薄暗いじめじめした所でニャーニャー泣いていた事だけは記憶している"
u8"",
"吾輩《わがはい》は猫である。名前はまだ無い。"
"どこで生れたかとんと見当《けんとう》がつかぬ。"
"何でも薄暗いじめじめした所でニャーニャー泣いていた事だけは記憶している"
"",
&tok));
EXPECT_EQ(WS
u8" 吾輩 《 わが はい 》 は猫 である 。 名前 はまだ 無い 。 "
u8"どこで 生 れた か とん と見 当 《 けんとう 》 が つかぬ 。 "
u8"何でも 薄 暗 いじ め じ め した 所で ニャー ニャー 泣 いていた "
u8"事 だけは 記憶 している 。",
" 吾輩 《 わが はい 》 は猫 である 。 名前 はまだ 無い 。 "
"どこで 生 れた か とん と見 当 《 けんとう 》 が つかぬ 。 "
"何でも 薄 暗 いじ め じ め した 所で ニャー ニャー 泣 いていた "
"事 だけは 記憶 している 。",
string_util::Join(tok, " "));
}

Просмотреть файл

@ -66,16 +66,16 @@ TEST(BuilderTest, GetPrecompiledCharsMapTest) {
const NormalizerSpec spec =
SentencePieceTrainer::GetNormalizerSpec("nmt_nfkc");
const Normalizer normalizer(spec);
EXPECT_EQ(WS "ABC", normalizer.Normalize(u8""));
EXPECT_EQ(WS u8"(株)", normalizer.Normalize(u8""));
EXPECT_EQ(WS u8"グーグル", normalizer.Normalize(u8"グーグル"));
EXPECT_EQ(WS "ABC", normalizer.Normalize(""));
EXPECT_EQ(WS "(株)", normalizer.Normalize(""));
EXPECT_EQ(WS "グーグル", normalizer.Normalize("グーグル"));
}
{
const NormalizerSpec spec =
SentencePieceTrainer::GetNormalizerSpec("nfkc_cf");
const Normalizer normalizer(spec);
EXPECT_EQ(WS "abc", normalizer.Normalize(u8""));
EXPECT_EQ(WS "abc", normalizer.Normalize(""));
EXPECT_EQ(WS "abc", normalizer.Normalize("ABC"));
}
@ -83,7 +83,7 @@ TEST(BuilderTest, GetPrecompiledCharsMapTest) {
const NormalizerSpec spec =
SentencePieceTrainer::GetNormalizerSpec("nmt_nfkc_cf");
const Normalizer normalizer(spec);
EXPECT_EQ(WS "abc", normalizer.Normalize(u8""));
EXPECT_EQ(WS "abc", normalizer.Normalize(""));
EXPECT_EQ(WS "abc", normalizer.Normalize("ABC"));
}
@ -92,9 +92,9 @@ TEST(BuilderTest, GetPrecompiledCharsMapTest) {
SentencePieceTrainer::GetNormalizerSpec("identity");
EXPECT_TRUE(spec.precompiled_charsmap().empty());
const Normalizer normalizer(spec);
EXPECT_EQ(WS u8"", normalizer.Normalize(u8""));
EXPECT_EQ(WS u8"", normalizer.Normalize(u8""));
EXPECT_EQ(WS u8"グーグル", normalizer.Normalize(u8"グーグル"));
EXPECT_EQ(WS "", normalizer.Normalize(""));
EXPECT_EQ(WS "", normalizer.Normalize(""));
EXPECT_EQ(WS "グーグル", normalizer.Normalize("グーグル"));
}
}
@ -129,11 +129,11 @@ TEST(BuilderTest, CompileCharsMap) {
EXPECT_EQ("ABC", normalizer.Normalize("ABC"));
EXPECT_EQ("XY" WS "Z", normalizer.Normalize("xy z"));
EXPECT_EQ(u8"", normalizer.Normalize(u8""));
EXPECT_EQ("abc", normalizer.Normalize(u8"あいう"));
EXPECT_EQ("abcえ", normalizer.Normalize(u8"あいうえ"));
EXPECT_EQ("", normalizer.Normalize(""));
EXPECT_EQ("abc", normalizer.Normalize("あいう"));
EXPECT_EQ("abcえ", normalizer.Normalize("あいうえ"));
EXPECT_EQ("ABCabcD", normalizer.Normalize("abcあいうd"));
EXPECT_EQ("abcか", normalizer.Normalize(u8"あいうえおか"));
EXPECT_EQ("abcか", normalizer.Normalize("あいうえおか"));
}
TEST(BuilderTest, LoadCharsMapTest) {

Просмотреть файл

@ -93,7 +93,7 @@ TEST(ModelTest, EncodeTest) {
EXPECT_EQ("c", result[9].first);
// makes a broken utf-8
const std::string broken_utf8 = std::string(u8"").substr(0, 1);
const std::string broken_utf8 = std::string("").substr(0, 1);
result = model.Encode(broken_utf8);
EXPECT_EQ(1, result.size());
EXPECT_EQ(broken_utf8, result[0].first);

Просмотреть файл

@ -187,7 +187,7 @@ TEST(ModelInterfaceTest, InvalidModelTest) {
std::string RandomString(int length) {
const char kAlphaNum[] =
"0123456789"
u8"!@#$%^&*"
"!@#$%^&*"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
const int kAlphaSize = sizeof(kAlphaNum) - 1;
@ -266,7 +266,7 @@ TEST(ModelInterfaceTest, SplitIntoWordsTest) {
}
TEST(ModelInterfaceTest, PrefixMatcherTest) {
const PrefixMatcher matcher({"abc", "ab", "xy", u8"京都"});
const PrefixMatcher matcher({"abc", "ab", "xy", "京都"});
bool found;
EXPECT_EQ(1, matcher.PrefixMatch("test", &found));
EXPECT_FALSE(found);
@ -278,14 +278,14 @@ TEST(ModelInterfaceTest, PrefixMatcherTest) {
EXPECT_FALSE(found);
EXPECT_EQ(2, matcher.PrefixMatch("xyz", &found));
EXPECT_TRUE(found);
EXPECT_EQ(6, matcher.PrefixMatch(u8"京都大学", &found));
EXPECT_EQ(6, matcher.PrefixMatch("京都大学", &found));
EXPECT_TRUE(found);
EXPECT_EQ(3, matcher.PrefixMatch(u8"東京大学", &found));
EXPECT_EQ(3, matcher.PrefixMatch("東京大学", &found));
EXPECT_FALSE(found);
EXPECT_EQ("", matcher.GlobalReplace("", ""));
EXPECT_EQ("", matcher.GlobalReplace("abc", ""));
EXPECT_EQ("--de-pqr", matcher.GlobalReplace("xyabcdeabpqr", u8"-"));
EXPECT_EQ("--de-pqr", matcher.GlobalReplace("xyabcdeabpqr", "-"));
}
TEST(ModelInterfaceTest, PrefixMatcherWithEmptyTest) {
@ -301,9 +301,9 @@ TEST(ModelInterfaceTest, PrefixMatcherWithEmptyTest) {
EXPECT_FALSE(found);
EXPECT_EQ(1, matcher.PrefixMatch("xyz", &found));
EXPECT_FALSE(found);
EXPECT_EQ(3, matcher.PrefixMatch(u8"京都大学", &found));
EXPECT_EQ(3, matcher.PrefixMatch("京都大学", &found));
EXPECT_FALSE(found);
EXPECT_EQ(3, matcher.PrefixMatch(u8"東京大学", &found));
EXPECT_EQ(3, matcher.PrefixMatch("東京大学", &found));
EXPECT_FALSE(found);
EXPECT_EQ("", matcher.GlobalReplace("", ""));

Просмотреть файл

@ -41,7 +41,7 @@ TEST(NormalizerTest, NormalizeTest) {
// Empty strings.
EXPECT_EQ("", normalizer.Normalize(""));
EXPECT_EQ("", normalizer.Normalize(" "));
EXPECT_EQ("", normalizer.Normalize(u8" "));
EXPECT_EQ("", normalizer.Normalize(" "));
// Sentence with heading/tailing/redundant spaces.
EXPECT_EQ(WS "ABC", normalizer.Normalize("ABC"));
@ -53,13 +53,13 @@ TEST(NormalizerTest, NormalizeTest) {
EXPECT_EQ(WS "ABC", normalizer.Normalize("  ABC  "));
// NFKC char to char normalization.
EXPECT_EQ(WS "123", normalizer.Normalize(u8"①②③"));
EXPECT_EQ(WS "123", normalizer.Normalize("①②③"));
// NFKC char to multi-char normalization.
EXPECT_EQ(WS u8"株式会社", normalizer.Normalize(u8""));
EXPECT_EQ(WS "株式会社", normalizer.Normalize(""));
// Half width katakana, character composition happens.
EXPECT_EQ(WS u8"グーグル", normalizer.Normalize(" グーグル "));
EXPECT_EQ(WS "グーグル", normalizer.Normalize(" グーグル "));
EXPECT_EQ(WS "I" WS "saw" WS "a" WS "girl",
normalizer.Normalize(" I saw a   girl  "));
@ -73,7 +73,7 @@ TEST(NormalizerTest, NormalizeWithoutDummyPrefixTest) {
// Empty strings.
EXPECT_EQ("", normalizer.Normalize(""));
EXPECT_EQ("", normalizer.Normalize(" "));
EXPECT_EQ("", normalizer.Normalize(u8" "));
EXPECT_EQ("", normalizer.Normalize(" "));
// Sentence with heading/tailing/redundant spaces.
EXPECT_EQ("ABC", normalizer.Normalize("ABC"));
@ -93,7 +93,7 @@ TEST(NormalizerTest, NormalizeWithoutRemoveExtraWhitespacesTest) {
// Empty strings.
EXPECT_EQ("", normalizer.Normalize(""));
EXPECT_EQ(WS WS WS WS WS WS WS, normalizer.Normalize(" "));
EXPECT_EQ(WS WS, normalizer.Normalize(u8" "));
EXPECT_EQ(WS WS, normalizer.Normalize(" "));
// Sentence with heading/tailing/redundant spaces.
EXPECT_EQ(WS "ABC", normalizer.Normalize("ABC"));
@ -112,7 +112,7 @@ TEST(NormalizerTest, NormalizeWithoutEscapeWhitespacesTest) {
// Empty strings.
EXPECT_EQ("", normalizer.Normalize(""));
EXPECT_EQ("", normalizer.Normalize(" "));
EXPECT_EQ("", normalizer.Normalize(u8" "));
EXPECT_EQ("", normalizer.Normalize(" "));
// Sentence with heading/tailing/redundant spaces.
EXPECT_EQ("ABC", normalizer.Normalize("ABC"));
@ -291,7 +291,7 @@ TEST(NormalizerTest, NormalizeFullTest) {
{
const std::string input = " グーグル "; // halfwidth katakana
normalizer.Normalize(input, &output, &n2i);
EXPECT_EQ(WS u8"グーグル", output);
EXPECT_EQ(WS "グーグル", output);
const std::vector<size_t> expected = {1, 1, 1, // WS (3byte)
1, 1, 1, // グ
7, 7, 7, // ー
@ -302,7 +302,7 @@ TEST(NormalizerTest, NormalizeFullTest) {
}
{
const std::string input = u8"①②③";
const std::string input = "①②③";
normalizer.Normalize(input, &output, &n2i);
EXPECT_EQ(WS "123", output);
const std::vector<size_t> expected = {0, 0, 0, // WS (3byte)
@ -314,17 +314,17 @@ TEST(NormalizerTest, NormalizeFullTest) {
}
{
const std::string input = u8"";
const std::string input = "";
normalizer.Normalize(input, &output, &n2i);
EXPECT_EQ(WS u8"株式会社", output);
EXPECT_EQ(WS "株式会社", output);
const std::vector<size_t> expected = {0, 0, 0, // WS (3byte)
0, 0, 0, // 株
0, 0, 0, // 式
0, 0, 0, // 会
0, 0, 0, // 社
3};
// When u8"株式" is one piece, this has no alignment to the input.
// Sentencepieces which includes the last character (u8"会社" or u8"社")
// When "株式" is one piece, this has no alignment to the input.
// Sentencepieces which includes the last character ("会社" or "社")
// have the alignment to the input.
EXPECT_EQ(expected, n2i);
}

Просмотреть файл

@ -259,23 +259,23 @@ TEST(SentencepieceProcessorTest, EncodeTest) {
// Halfwidth to Fullwidith katakana normalization.
{
auto mock = MakeUnique<MockModel>();
const EncodeResult result = {{WS u8"グー", 3}, {u8"グル", 4}, {"</s>", 2}};
const absl::string_view input = WS u8"グーグル";
const EncodeResult result = {{WS "グー", 3}, {"グル", 4}, {"</s>", 2}};
const absl::string_view input = WS "グーグル";
mock->SetEncodeResult(input, result);
sp.SetModel(std::move(mock));
std::vector<std::string> output;
EXPECT_OK(sp.Encode(u8"グーグル", &output));
EXPECT_OK(sp.Encode("グーグル", &output));
EXPECT_EQ(GetSpVec(result), output);
SentencePieceText spt;
EXPECT_OK(sp.Encode(u8"グーグル", &spt));
EXPECT_OK(sp.Encode("グーグル", &spt));
EXPECT_EQ(3, spt.pieces_size());
for (int i = 0; i < 3; ++i) {
EXPECT_EQ(result[i].first, spt.pieces(i).piece());
}
EXPECT_EQ(u8"グー", spt.pieces(0).surface());
EXPECT_EQ(u8"グル", spt.pieces(1).surface());
EXPECT_EQ("グー", spt.pieces(0).surface());
EXPECT_EQ("グル", spt.pieces(1).surface());
EXPECT_EQ("", spt.pieces(2).surface());
EXPECT_EQ(3, spt.pieces(0).id());
@ -293,23 +293,23 @@ TEST(SentencepieceProcessorTest, EncodeTest) {
// One to many normalization.
{
auto mock = MakeUnique<MockModel>();
const EncodeResult result = {{WS u8"株式", 3}, {u8"会社", 4}, {"</s>", 2}};
const absl::string_view input = WS u8"株式会社";
const EncodeResult result = {{WS "株式", 3}, {"会社", 4}, {"</s>", 2}};
const absl::string_view input = WS "株式会社";
mock->SetEncodeResult(input, result);
sp.SetModel(std::move(mock));
std::vector<std::string> output;
EXPECT_OK(sp.Encode(u8"", &output));
EXPECT_OK(sp.Encode("", &output));
EXPECT_EQ(GetSpVec(result), output);
SentencePieceText spt;
EXPECT_OK(sp.Encode(u8"", &spt));
EXPECT_OK(sp.Encode("", &spt));
EXPECT_EQ(3, spt.pieces_size());
for (int i = 0; i < 3; ++i) {
EXPECT_EQ(result[i].first, spt.pieces(i).piece());
}
EXPECT_EQ("", spt.pieces(0).surface());
EXPECT_EQ(u8"", spt.pieces(1).surface());
EXPECT_EQ("", spt.pieces(1).surface());
EXPECT_EQ("", spt.pieces(2).surface());
EXPECT_EQ(3, spt.pieces(0).id());

Просмотреть файл

@ -14,7 +14,10 @@
#include "testharness.h"
#ifndef OS_WIN
#include <unistd.h>
#endif
#include <fstream>
#include <iostream>
#include <memory>

Просмотреть файл

@ -49,9 +49,9 @@ TEST(TrainerInterfaceTest, IsValidSentencePieceTest) {
EXPECT_FALSE(IsValid(WS "a" WS));
EXPECT_FALSE(IsValid("a" WS "b"));
EXPECT_FALSE(IsValid("a" WS "b" WS));
EXPECT_TRUE(IsValid(u8"あいう"));
EXPECT_TRUE(IsValid(u8"グーグル")); // u8"ー" is a part of Katakana
EXPECT_TRUE(IsValid(u8"食べる"));
EXPECT_TRUE(IsValid("あいう"));
EXPECT_TRUE(IsValid("グーグル")); // "ー" is a part of Katakana
EXPECT_TRUE(IsValid("食べる"));
EXPECT_FALSE(IsValid("漢字ABC")); // mixed CJK scripts
EXPECT_FALSE(IsValid("F1"));
EXPECT_TRUE(IsValid("$10")); // $ and 1 are both "common" script.
@ -70,9 +70,9 @@ TEST(TrainerInterfaceTest, IsValidSentencePieceTest) {
EXPECT_FALSE(IsValid("a" WS "b" WS));
trainer_spec.set_split_by_unicode_script(false);
EXPECT_TRUE(IsValid(u8"あいう"));
EXPECT_TRUE(IsValid(u8"グーグル"));
EXPECT_TRUE(IsValid(u8"食べる"));
EXPECT_TRUE(IsValid("あいう"));
EXPECT_TRUE(IsValid("グーグル"));
EXPECT_TRUE(IsValid("食べる"));
EXPECT_TRUE(IsValid("漢字ABC"));
EXPECT_TRUE(IsValid("F1"));
EXPECT_TRUE(IsValid("$10"));

Просмотреть файл

@ -27,17 +27,17 @@ ScriptType GetScriptType(absl::string_view s) {
}
TEST(UnicodeScript, GetScriptTypeTest) {
EXPECT_EQ(U_Han, GetScriptType(u8""));
EXPECT_EQ(U_Han, GetScriptType(u8""));
EXPECT_EQ(U_Hiragana, GetScriptType(u8""));
EXPECT_EQ(U_Katakana, GetScriptType(u8""));
EXPECT_EQ(U_Common, GetScriptType(u8""));
EXPECT_EQ(U_Han, GetScriptType(""));
EXPECT_EQ(U_Han, GetScriptType(""));
EXPECT_EQ(U_Hiragana, GetScriptType(""));
EXPECT_EQ(U_Katakana, GetScriptType(""));
EXPECT_EQ(U_Common, GetScriptType(""));
EXPECT_EQ(U_Latin, GetScriptType("a"));
EXPECT_EQ(U_Latin, GetScriptType("A"));
EXPECT_EQ(U_Common, GetScriptType("0"));
EXPECT_EQ(U_Common, GetScriptType(u8"$"));
EXPECT_EQ(U_Common, GetScriptType(u8"@"));
EXPECT_EQ(U_Common, GetScriptType(u8"-"));
EXPECT_EQ(U_Common, GetScriptType("$"));
EXPECT_EQ(U_Common, GetScriptType("@"));
EXPECT_EQ(U_Common, GetScriptType("-"));
}
} // namespace unicode_script
} // namespace sentencepiece

Просмотреть файл

@ -87,19 +87,19 @@ TEST(LatticeTest, InsertTest) {
EXPECT_EQ("A", node[0]->piece);
EXPECT_EQ("B", node[1]->piece);
EXPECT_EQ(u8"", node[2]->piece);
EXPECT_EQ(u8"", node[3]->piece);
EXPECT_EQ("", node[2]->piece);
EXPECT_EQ("", node[3]->piece);
EXPECT_EQ("AB", node[4]->piece);
EXPECT_EQ("Bあ", node[5]->piece);
EXPECT_EQ(u8"あい", node[6]->piece);
EXPECT_EQ("あい", node[6]->piece);
EXPECT_EQ("A", node[0]->piece);
EXPECT_EQ("B", node[1]->piece);
EXPECT_EQ(u8"", node[2]->piece);
EXPECT_EQ(u8"", node[3]->piece);
EXPECT_EQ("", node[2]->piece);
EXPECT_EQ("", node[3]->piece);
EXPECT_EQ("AB", node[4]->piece);
EXPECT_EQ("Bあ", node[5]->piece);
EXPECT_EQ(u8"あい", node[6]->piece);
EXPECT_EQ("あい", node[6]->piece);
EXPECT_EQ(0, node[0]->pos);
EXPECT_EQ(1, node[1]->pos);
@ -569,8 +569,8 @@ TEST(UnigramModelTest, EncodeTest) {
EXPECT_EQ("x", result[0].first);
EXPECT_EQ("y", result[1].first);
EXPECT_EQ("z", result[2].first);
EXPECT_EQ(u8"", result[3].first);
EXPECT_EQ(u8"", result[4].first);
EXPECT_EQ("", result[3].first);
EXPECT_EQ("", result[4].first);
// User defined
result = model.Encode("ABC");

Просмотреть файл

@ -63,17 +63,20 @@ TEST(UnigramTrainerTest, EndToEndTest) {
EXPECT_TRUE(tok.empty());
EXPECT_OK(sp.Encode(
u8"吾輩《わがはい》は猫である。名前はまだ無い。"
u8"どこで生れたかとんと見当《けんとう》がつかぬ。"
u8"何でも薄暗いじめじめした所でニャーニャー泣いていた事だけは記憶している"
u8"",
"吾輩《わがはい》は猫である。名前はまだ無い。"
"どこで生れたかとんと見当《けんとう》がつかぬ。"
"何でも薄暗いじめじめした所でニャーニャー泣いていた事だけは記憶している"
"",
&tok));
// TODO(taku): Temporally disable this test on Windows.
#ifndef OS_WIN
EXPECT_EQ(WS
u8" 吾輩 《 わが はい 》 は 猫 である 。 名前 はまだ 無い 。 "
u8"どこ で 生 れた か とん と 見当 《 けん とう 》 が つか ぬ 。 "
u8"何でも 薄 暗 い じめ じめ した 所で ニャーニャー "
u8"泣 い ていた 事 だけは 記憶 している 。",
" 吾輩 《 わが はい 》 は 猫 である 。 名前 はまだ 無い 。 "
"どこ で 生 れた か とん と 見当 《 けん とう 》 が つか ぬ 。 "
"何でも 薄 暗 い じめ じめ した 所で ニャーニャー "
"泣 い ていた 事 だけは 記憶 している 。",
string_util::Join(tok, " "));
#endif
}
} // namespace

Просмотреть файл

@ -178,7 +178,7 @@ TEST(UtilTest, JoinTest) {
tokens.push_back("a");
tokens.push_back("test");
EXPECT_EQ(string_util::Join(tokens, " "), "this is a test");
EXPECT_EQ(string_util::Join(tokens, u8":"), "this:is:a:test");
EXPECT_EQ(string_util::Join(tokens, ":"), "this:is:a:test");
EXPECT_EQ(string_util::Join(tokens, ""), "thisisatest");
tokens[2] = "";
EXPECT_EQ(string_util::Join(tokens, " "), "this is test");
@ -191,7 +191,7 @@ TEST(UtilTest, JoinIntTest) {
tokens.push_back(-4);
tokens.push_back(5);
EXPECT_EQ(string_util::Join(tokens, " "), "10 2 -4 5");
EXPECT_EQ(string_util::Join(tokens, u8":"), "10:2:-4:5");
EXPECT_EQ(string_util::Join(tokens, ":"), "10:2:-4:5");
EXPECT_EQ(string_util::Join(tokens, ""), "102-45");
}
@ -285,7 +285,7 @@ TEST(UtilTest, ItoaTest) {
TEST(UtilTest, OneCharLenTest) {
EXPECT_EQ(1, string_util::OneCharLen("abc"));
EXPECT_EQ(3, string_util::OneCharLen(u8"テスト"));
EXPECT_EQ(3, string_util::OneCharLen("テスト"));
}
TEST(UtilTest, DecodeUTF8Test) {
@ -449,8 +449,8 @@ TEST(UtilTest, UnicodeTextToUTF8Test) {
ut = string_util::UTF8ToUnicodeText("test");
EXPECT_EQ("test", string_util::UnicodeTextToUTF8(ut));
ut = string_util::UTF8ToUnicodeText(u8"テスト");
EXPECT_EQ(u8"テスト", string_util::UnicodeTextToUTF8(ut));
ut = string_util::UTF8ToUnicodeText("テスト");
EXPECT_EQ("テスト", string_util::UnicodeTextToUTF8(ut));
ut = string_util::UTF8ToUnicodeText("これはtest");
EXPECT_EQ("これはtest", string_util::UnicodeTextToUTF8(ut));