Modified tests to use tlogger. (#3343)
* Modified tests to use tlogger. * Fail on errors, with error expectations. * Added expects and MixedCapsed grpclb_config tests * Moved tlogger to grpctest, moved leakcheck tester to grpctest.go * Added ExpectErrorN() * Removed redundant leak checks * Fixed new test * Made tlogger globals into tlogger methods * ErrorsLeft -> EndTest * Removed some redundant lines * Fixed error in test and empty map in EndTest
This commit is contained in:
Родитель
cb03b9f65c
Коммит
132187f04c
|
@ -29,7 +29,7 @@ import (
|
|||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
func Test_Parse(t *testing.T) {
|
||||
func (s) TestParse(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s string
|
||||
|
@ -71,7 +71,7 @@ func Test_Parse(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func Test_childIsPickFirst(t *testing.T) {
|
||||
func (s) TestChildIsPickFirst(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s string
|
||||
|
|
|
@ -39,7 +39,7 @@ import (
|
|||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
_ "google.golang.org/grpc/grpclog/glogger"
|
||||
"google.golang.org/grpc/internal/leakcheck"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/resolver"
|
||||
|
@ -59,6 +59,14 @@ var (
|
|||
fakeName = "fake.Name"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
type serverNameCheckCreds struct {
|
||||
mu sync.Mutex
|
||||
sn string
|
||||
|
@ -383,9 +391,7 @@ func newLoadBalancer(numberOfBackends int, statsChan chan *lbpb.ClientStats) (ts
|
|||
return
|
||||
}
|
||||
|
||||
func TestGRPCLB(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
|
||||
func (s) TestGRPCLB(t *testing.T) {
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
|
@ -429,9 +435,7 @@ func TestGRPCLB(t *testing.T) {
|
|||
}
|
||||
|
||||
// The remote balancer sends response with duplicates to grpclb client.
|
||||
func TestGRPCLBWeighted(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
|
||||
func (s) TestGRPCLBWeighted(t *testing.T) {
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
|
@ -497,9 +501,7 @@ func TestGRPCLBWeighted(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDropRequest(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
|
||||
func (s) TestDropRequest(t *testing.T) {
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
|
@ -650,9 +652,7 @@ func TestDropRequest(t *testing.T) {
|
|||
}
|
||||
|
||||
// When the balancer in use disconnects, grpclb should connect to the next address from resolved balancer address list.
|
||||
func TestBalancerDisconnects(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
|
||||
func (s) TestBalancerDisconnects(t *testing.T) {
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
|
@ -727,12 +727,10 @@ func TestBalancerDisconnects(t *testing.T) {
|
|||
t.Fatalf("No RPC sent to second backend after 1 second")
|
||||
}
|
||||
|
||||
func TestFallback(t *testing.T) {
|
||||
func (s) TestFallback(t *testing.T) {
|
||||
balancer.Register(newLBBuilderWithFallbackTimeout(100 * time.Millisecond))
|
||||
defer balancer.Register(newLBBuilder())
|
||||
|
||||
defer leakcheck.Check(t)
|
||||
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
|
@ -860,9 +858,7 @@ func TestFallback(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestExplicitFallback(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
|
||||
func (s) TestExplicitFallback(t *testing.T) {
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
|
@ -965,9 +961,7 @@ func TestExplicitFallback(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFallBackWithNoServerAddress(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
|
||||
func (s) TestFallBackWithNoServerAddress(t *testing.T) {
|
||||
resolveNowCh := make(chan struct{}, 1)
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
r.ResolveNowCallback = func(resolver.ResolveNowOptions) {
|
||||
|
@ -1087,9 +1081,7 @@ func TestFallBackWithNoServerAddress(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGRPCLBPickFirst(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
|
||||
func (s) TestGRPCLBPickFirst(t *testing.T) {
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
|
@ -1301,8 +1293,7 @@ const (
|
|||
failtosendURI = "failtosend"
|
||||
)
|
||||
|
||||
func TestGRPCLBStatsUnarySuccess(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
func (s) TestGRPCLBStatsUnarySuccess(t *testing.T) {
|
||||
if err := runAndCheckStats(t, false, nil, func(cc *grpc.ClientConn) {
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
// The first non-failfast RPC succeeds, all connections are up.
|
||||
|
@ -1321,8 +1312,7 @@ func TestGRPCLBStatsUnarySuccess(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGRPCLBStatsUnaryDrop(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
func (s) TestGRPCLBStatsUnaryDrop(t *testing.T) {
|
||||
if err := runAndCheckStats(t, true, nil, func(cc *grpc.ClientConn) {
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
// The first non-failfast RPC succeeds, all connections are up.
|
||||
|
@ -1342,8 +1332,7 @@ func TestGRPCLBStatsUnaryDrop(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGRPCLBStatsUnaryFailedToSend(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
func (s) TestGRPCLBStatsUnaryFailedToSend(t *testing.T) {
|
||||
if err := runAndCheckStats(t, false, nil, func(cc *grpc.ClientConn) {
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
// The first non-failfast RPC succeeds, all connections are up.
|
||||
|
@ -1363,8 +1352,7 @@ func TestGRPCLBStatsUnaryFailedToSend(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGRPCLBStatsStreamingSuccess(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
func (s) TestGRPCLBStatsStreamingSuccess(t *testing.T) {
|
||||
if err := runAndCheckStats(t, false, nil, func(cc *grpc.ClientConn) {
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
// The first non-failfast RPC succeeds, all connections are up.
|
||||
|
@ -1397,8 +1385,7 @@ func TestGRPCLBStatsStreamingSuccess(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGRPCLBStatsStreamingDrop(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
func (s) TestGRPCLBStatsStreamingDrop(t *testing.T) {
|
||||
if err := runAndCheckStats(t, true, nil, func(cc *grpc.ClientConn) {
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
// The first non-failfast RPC succeeds, all connections are up.
|
||||
|
@ -1432,8 +1419,7 @@ func TestGRPCLBStatsStreamingDrop(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGRPCLBStatsStreamingFailedToSend(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
func (s) TestGRPCLBStatsStreamingFailedToSend(t *testing.T) {
|
||||
if err := runAndCheckStats(t, false, nil, func(cc *grpc.ClientConn) {
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
// The first non-failfast RPC succeeds, all connections are up.
|
||||
|
@ -1459,8 +1445,7 @@ func TestGRPCLBStatsStreamingFailedToSend(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGRPCLBStatsQuashEmpty(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
func (s) TestGRPCLBStatsQuashEmpty(t *testing.T) {
|
||||
ch := make(chan *lbpb.ClientStats)
|
||||
defer close(ch)
|
||||
if err := runAndCheckStats(t, false, ch, func(cc *grpc.ClientConn) {
|
||||
|
|
|
@ -83,7 +83,7 @@ func checkCacheCC(ccc *lbCacheClientConn, sccLen, sctaLen int) error {
|
|||
}
|
||||
|
||||
// Test that SubConn won't be immediately removed.
|
||||
func TestLBCacheClientConnExpire(t *testing.T) {
|
||||
func (s) TestLBCacheClientConnExpire(t *testing.T) {
|
||||
mcc := newMockClientConn()
|
||||
if err := checkMockCC(mcc, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -135,7 +135,7 @@ func TestLBCacheClientConnExpire(t *testing.T) {
|
|||
|
||||
// Test that NewSubConn with the same address of a SubConn being removed will
|
||||
// reuse the SubConn and cancel the removing.
|
||||
func TestLBCacheClientConnReuse(t *testing.T) {
|
||||
func (s) TestLBCacheClientConnReuse(t *testing.T) {
|
||||
mcc := newMockClientConn()
|
||||
if err := checkMockCC(mcc, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -220,7 +220,7 @@ func TestLBCacheClientConnReuse(t *testing.T) {
|
|||
|
||||
// Test that if the timer to remove a SubConn fires at the same time NewSubConn
|
||||
// cancels the timer, it doesn't cause deadlock.
|
||||
func TestLBCache_RemoveTimer_New_Race(t *testing.T) {
|
||||
func (s) TestLBCache_RemoveTimer_New_Race(t *testing.T) {
|
||||
mcc := newMockClientConn()
|
||||
if err := checkMockCC(mcc, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
_ "google.golang.org/grpc/grpclog/glogger"
|
||||
"google.golang.org/grpc/internal/leakcheck"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/resolver/manual"
|
||||
|
@ -40,6 +40,14 @@ import (
|
|||
testpb "google.golang.org/grpc/test/grpc_testing"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
type testServer struct {
|
||||
testpb.UnimplementedTestServiceServer
|
||||
}
|
||||
|
@ -90,8 +98,7 @@ func startTestServers(count int) (_ *test, err error) {
|
|||
return t, nil
|
||||
}
|
||||
|
||||
func TestOneBackend(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
func (s) TestOneBackend(t *testing.T) {
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
|
@ -121,8 +128,7 @@ func TestOneBackend(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestBackendsRoundRobin(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
func (s) TestBackendsRoundRobin(t *testing.T) {
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
|
@ -181,8 +187,7 @@ func TestBackendsRoundRobin(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAddressesRemoved(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
func (s) TestAddressesRemoved(t *testing.T) {
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
|
@ -239,8 +244,7 @@ func TestAddressesRemoved(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
func TestCloseWithPendingRPC(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
func (s) TestCloseWithPendingRPC(t *testing.T) {
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
|
@ -273,8 +277,7 @@ func TestCloseWithPendingRPC(t *testing.T) {
|
|||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestNewAddressWhileBlocking(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
func (s) TestNewAddressWhileBlocking(t *testing.T) {
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
|
@ -321,8 +324,7 @@ func TestNewAddressWhileBlocking(t *testing.T) {
|
|||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestOneServerDown(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
func (s) TestOneServerDown(t *testing.T) {
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
|
@ -419,8 +421,7 @@ func TestOneServerDown(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAllServersDown(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
func (s) TestAllServersDown(t *testing.T) {
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
|
|
|
@ -23,9 +23,19 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
func TestStringWithAllowedValues(t *testing.T) {
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func (s) TestStringWithAllowedValues(t *testing.T) {
|
||||
const defaultVal = "default"
|
||||
tests := []struct {
|
||||
args string
|
||||
|
@ -54,7 +64,7 @@ func TestStringWithAllowedValues(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDurationSlice(t *testing.T) {
|
||||
func (s) TestDurationSlice(t *testing.T) {
|
||||
defaultVal := []time.Duration{time.Second, time.Nanosecond}
|
||||
tests := []struct {
|
||||
args string
|
||||
|
@ -83,7 +93,7 @@ func TestDurationSlice(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIntSlice(t *testing.T) {
|
||||
func (s) TestIntSlice(t *testing.T) {
|
||||
defaultVal := []int{1, 1024}
|
||||
tests := []struct {
|
||||
args string
|
||||
|
@ -112,7 +122,7 @@ func TestIntSlice(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStringSlice(t *testing.T) {
|
||||
func (s) TestStringSlice(t *testing.T) {
|
||||
defaultVal := []string{"bar", "baz"}
|
||||
tests := []struct {
|
||||
args string
|
||||
|
|
|
@ -26,8 +26,18 @@ import (
|
|||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
// bufConn is a net.Conn implemented by a bytes.Buffer (which is a ReadWriter).
|
||||
type bufConn struct {
|
||||
*bytes.Buffer
|
||||
|
@ -49,7 +59,7 @@ func restoreHooks() func() {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConn(t *testing.T) {
|
||||
func (s) TestConn(t *testing.T) {
|
||||
defer restoreHooks()()
|
||||
|
||||
// Constant time.
|
||||
|
@ -122,7 +132,7 @@ func TestConn(t *testing.T) {
|
|||
wantSleeps(pkt4Time)
|
||||
}
|
||||
|
||||
func TestSync(t *testing.T) {
|
||||
func (s) TestSync(t *testing.T) {
|
||||
defer restoreHooks()()
|
||||
|
||||
// Infinitely fast CPU: time doesn't pass unless sleep is called.
|
||||
|
@ -145,7 +155,7 @@ func TestSync(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSyncTooSlow(t *testing.T) {
|
||||
func (s) TestSyncTooSlow(t *testing.T) {
|
||||
defer restoreHooks()()
|
||||
|
||||
// Infinitely fast CPU: time doesn't pass unless sleep is called.
|
||||
|
@ -166,7 +176,7 @@ func TestSyncTooSlow(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestListenerAndDialer(t *testing.T) {
|
||||
func (s) TestListenerAndDialer(t *testing.T) {
|
||||
defer restoreHooks()()
|
||||
|
||||
tn := time.Unix(123, 0)
|
||||
|
@ -292,7 +302,7 @@ func TestListenerAndDialer(t *testing.T) {
|
|||
read(clientConn, len(pkt1), pkt1, tn)
|
||||
}
|
||||
|
||||
func TestBufferBloat(t *testing.T) {
|
||||
func (s) TestBufferBloat(t *testing.T) {
|
||||
defer restoreHooks()()
|
||||
|
||||
// Infinitely fast CPU: time doesn't pass unless sleep is called.
|
||||
|
|
|
@ -124,7 +124,7 @@ func protoToSocketOption(skopts []*channelzpb.SocketOption) *channelz.SocketOpti
|
|||
return skdata
|
||||
}
|
||||
|
||||
func TestGetSocketOptions(t *testing.T) {
|
||||
func (s) TestGetSocketOptions(t *testing.T) {
|
||||
czCleanup := channelz.NewChannelzStorage()
|
||||
defer cleanupWrapper(czCleanup, t)
|
||||
ss := []*dummySocket{
|
||||
|
|
|
@ -33,12 +33,21 @@ import (
|
|||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
func init() {
|
||||
channelz.TurnOn()
|
||||
}
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func cleanupWrapper(cleanup func() error, t *testing.T) {
|
||||
if err := cleanup(); err != nil {
|
||||
t.Error(err)
|
||||
|
@ -284,7 +293,7 @@ func init() {
|
|||
proto.RegisterType((*OtherSecurityValue)(nil), "grpc.credentials.OtherChannelzSecurityValue")
|
||||
}
|
||||
|
||||
func TestGetTopChannels(t *testing.T) {
|
||||
func (s) TestGetTopChannels(t *testing.T) {
|
||||
tcs := []*dummyChannel{
|
||||
{
|
||||
state: connectivity.Connecting,
|
||||
|
@ -337,7 +346,7 @@ func TestGetTopChannels(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetServers(t *testing.T) {
|
||||
func (s) TestGetServers(t *testing.T) {
|
||||
ss := []*dummyServer{
|
||||
{
|
||||
callsStarted: 6,
|
||||
|
@ -384,7 +393,7 @@ func TestGetServers(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetServerSockets(t *testing.T) {
|
||||
func (s) TestGetServerSockets(t *testing.T) {
|
||||
czCleanup := channelz.NewChannelzStorage()
|
||||
defer cleanupWrapper(czCleanup, t)
|
||||
svrID := channelz.RegisterServer(&dummyServer{}, "")
|
||||
|
@ -423,7 +432,7 @@ func TestGetServerSockets(t *testing.T) {
|
|||
|
||||
// This test makes a GetServerSockets with a non-zero start ID, and expect only
|
||||
// sockets with ID >= the given start ID.
|
||||
func TestGetServerSocketsNonZeroStartID(t *testing.T) {
|
||||
func (s) TestGetServerSocketsNonZeroStartID(t *testing.T) {
|
||||
czCleanup := channelz.NewChannelzStorage()
|
||||
defer cleanupWrapper(czCleanup, t)
|
||||
svrID := channelz.RegisterServer(&dummyServer{}, "")
|
||||
|
@ -453,7 +462,7 @@ func TestGetServerSocketsNonZeroStartID(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetChannel(t *testing.T) {
|
||||
func (s) TestGetChannel(t *testing.T) {
|
||||
czCleanup := channelz.NewChannelzStorage()
|
||||
defer cleanupWrapper(czCleanup, t)
|
||||
refNames := []string{"top channel 1", "nested channel 1", "sub channel 2", "nested channel 3"}
|
||||
|
@ -551,7 +560,7 @@ func TestGetChannel(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetSubChannel(t *testing.T) {
|
||||
func (s) TestGetSubChannel(t *testing.T) {
|
||||
var (
|
||||
subchanCreated = "SubChannel Created"
|
||||
subchanConnectivityChange = fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Ready)
|
||||
|
@ -628,7 +637,7 @@ func TestGetSubChannel(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetSocket(t *testing.T) {
|
||||
func (s) TestGetSocket(t *testing.T) {
|
||||
czCleanup := channelz.NewChannelzStorage()
|
||||
defer cleanupWrapper(czCleanup, t)
|
||||
ss := []*dummySocket{
|
||||
|
|
|
@ -24,9 +24,18 @@ import (
|
|||
"testing"
|
||||
|
||||
cpb "google.golang.org/genproto/googleapis/rpc/code"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
func TestUnmarshalJSON(t *testing.T) {
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func (s) TestUnmarshalJSON(t *testing.T) {
|
||||
for s, v := range cpb.Code_value {
|
||||
want := Code(v)
|
||||
var got Code
|
||||
|
@ -36,7 +45,7 @@ func TestUnmarshalJSON(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestJSONUnmarshal(t *testing.T) {
|
||||
func (s) TestJSONUnmarshal(t *testing.T) {
|
||||
var got []Code
|
||||
want := []Code{OK, NotFound, Internal, Canceled}
|
||||
in := `["OK", "NOT_FOUND", "INTERNAL", "CANCELLED"]`
|
||||
|
@ -46,7 +55,7 @@ func TestJSONUnmarshal(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalJSON_NilReceiver(t *testing.T) {
|
||||
func (s) TestUnmarshalJSON_NilReceiver(t *testing.T) {
|
||||
var got *Code
|
||||
in := OK.String()
|
||||
if err := got.UnmarshalJSON([]byte(in)); err == nil {
|
||||
|
@ -54,7 +63,7 @@ func TestUnmarshalJSON_NilReceiver(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalJSON_UnknownInput(t *testing.T) {
|
||||
func (s) TestUnmarshalJSON_UnknownInput(t *testing.T) {
|
||||
var got Code
|
||||
for _, in := range [][]byte{[]byte(""), []byte("xxx"), []byte("Code(17)"), nil} {
|
||||
if err := got.UnmarshalJSON([]byte(in)); err == nil {
|
||||
|
@ -63,7 +72,7 @@ func TestUnmarshalJSON_UnknownInput(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalJSON_MarshalUnmarshal(t *testing.T) {
|
||||
func (s) TestUnmarshalJSON_MarshalUnmarshal(t *testing.T) {
|
||||
for i := 0; i < _maxCode; i++ {
|
||||
var cUnMarshaled Code
|
||||
c := Code(i)
|
||||
|
|
|
@ -24,9 +24,18 @@ import (
|
|||
|
||||
"github.com/golang/protobuf/proto"
|
||||
altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
func TestInfoServerName(t *testing.T) {
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func (s) TestInfoServerName(t *testing.T) {
|
||||
// This is not testing any handshaker functionality, so it's fine to only
|
||||
// use NewServerCreds and not NewClientCreds.
|
||||
alts := NewServerCreds(DefaultServerOptions())
|
||||
|
@ -35,7 +44,7 @@ func TestInfoServerName(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestOverrideServerName(t *testing.T) {
|
||||
func (s) TestOverrideServerName(t *testing.T) {
|
||||
wantServerName := "server.name"
|
||||
// This is not testing any handshaker functionality, so it's fine to only
|
||||
// use NewServerCreds and not NewClientCreds.
|
||||
|
@ -46,7 +55,7 @@ func TestOverrideServerName(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCloneClient(t *testing.T) {
|
||||
func (s) TestCloneClient(t *testing.T) {
|
||||
wantServerName := "server.name"
|
||||
opt := DefaultClientOptions()
|
||||
opt.TargetServiceAccounts = []string{"not", "empty"}
|
||||
|
@ -78,7 +87,7 @@ func TestCloneClient(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCloneServer(t *testing.T) {
|
||||
func (s) TestCloneServer(t *testing.T) {
|
||||
wantServerName := "server.name"
|
||||
c := NewServerCreds(DefaultServerOptions())
|
||||
c.OverrideServerName(wantServerName)
|
||||
|
@ -108,7 +117,7 @@ func TestCloneServer(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestInfo(t *testing.T) {
|
||||
func (s) TestInfo(t *testing.T) {
|
||||
// This is not testing any handshaker functionality, so it's fine to only
|
||||
// use NewServerCreds and not NewClientCreds.
|
||||
c := NewServerCreds(DefaultServerOptions())
|
||||
|
@ -127,7 +136,7 @@ func TestInfo(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCompareRPCVersions(t *testing.T) {
|
||||
func (s) TestCompareRPCVersions(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
v1 *altspb.RpcProtocolVersions_Version
|
||||
v2 *altspb.RpcProtocolVersions_Version
|
||||
|
@ -165,7 +174,7 @@ func TestCompareRPCVersions(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCheckRPCVersions(t *testing.T) {
|
||||
func (s) TestCheckRPCVersions(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
local *altspb.RpcProtocolVersions
|
||||
|
|
|
@ -23,8 +23,17 @@ import (
|
|||
"testing"
|
||||
|
||||
altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
const (
|
||||
testAppProtocol = "my_app"
|
||||
testRecordProtocol = "very_secure_protocol"
|
||||
|
@ -34,7 +43,7 @@ const (
|
|||
testLocalHostname = "local_hostname"
|
||||
)
|
||||
|
||||
func TestALTSAuthInfo(t *testing.T) {
|
||||
func (s) TestALTSAuthInfo(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
result *altspb.HandshakerResult
|
||||
outAppProtocol string
|
||||
|
|
|
@ -31,7 +31,7 @@ type rekeyAEADTestVector struct {
|
|||
}
|
||||
|
||||
// Test encrypt and decrypt using (adapted) test vectors for AES-GCM.
|
||||
func TestAES128GCMRekeyEncrypt(t *testing.T) {
|
||||
func (s) TestAES128GCMRekeyEncrypt(t *testing.T) {
|
||||
for _, test := range []rekeyAEADTestVector{
|
||||
// NIST vectors from:
|
||||
// http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf
|
||||
|
|
|
@ -81,7 +81,7 @@ func testGCMEncryptionDecryption(sender ALTSRecordCrypto, receiver ALTSRecordCry
|
|||
}
|
||||
|
||||
// Test encrypt and decrypt using test vectors for aes128gcm.
|
||||
func TestAES128GCMEncrypt(t *testing.T) {
|
||||
func (s) TestAES128GCMEncrypt(t *testing.T) {
|
||||
for _, test := range []cryptoTestVector{
|
||||
{
|
||||
key: dehex("11754cd72aec309bf52f7687212e8957"),
|
||||
|
@ -215,7 +215,7 @@ func testGCMEncryptRoundtrip(client ALTSRecordCrypto, server ALTSRecordCrypto, t
|
|||
}
|
||||
|
||||
// Test encrypt and decrypt on roundtrip messages for aes128gcm.
|
||||
func TestAES128GCMEncryptRoundtrip(t *testing.T) {
|
||||
func (s) TestAES128GCMEncryptRoundtrip(t *testing.T) {
|
||||
// Test for aes128gcm.
|
||||
key := make([]byte, 16)
|
||||
client, server := getGCMCryptoPair(key, nil, t)
|
||||
|
|
|
@ -104,7 +104,7 @@ func testRekeyEncryptRoundtrip(client ALTSRecordCrypto, server ALTSRecordCrypto,
|
|||
}
|
||||
|
||||
// Test encrypt and decrypt on roundtrip messages for aes128gcmRekey.
|
||||
func TestAES128GCMRekeyEncryptRoundtrip(t *testing.T) {
|
||||
func (s) TestAES128GCMRekeyEncryptRoundtrip(t *testing.T) {
|
||||
// Test for aes128gcmRekey.
|
||||
key := make([]byte, 44)
|
||||
client, server := getRekeyCryptoPair(key, nil, t)
|
||||
|
|
|
@ -29,7 +29,7 @@ const (
|
|||
testOverflowLen = 5
|
||||
)
|
||||
|
||||
func TestCounterSides(t *testing.T) {
|
||||
func (s) TestCounterSides(t *testing.T) {
|
||||
for _, side := range []core.Side{core.ClientSide, core.ServerSide} {
|
||||
outCounter := NewOutCounter(side, testOverflowLen)
|
||||
inCounter := NewInCounter(side, testOverflowLen)
|
||||
|
@ -50,7 +50,7 @@ func TestCounterSides(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCounterInc(t *testing.T) {
|
||||
func (s) TestCounterInc(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
counter []byte
|
||||
want []byte
|
||||
|
@ -89,7 +89,7 @@ func TestCounterInc(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestRolloverCounter(t *testing.T) {
|
||||
func (s) TestRolloverCounter(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
value []byte
|
||||
|
|
|
@ -29,8 +29,17 @@ import (
|
|||
"testing"
|
||||
|
||||
core "google.golang.org/grpc/credentials/alts/internal"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
var (
|
||||
nextProtocols = []string{"ALTSRP_GCM_AES128"}
|
||||
altsRecordFuncs = map[string]ALTSRecordFunc{
|
||||
|
@ -118,7 +127,7 @@ func testPingPong(t *testing.T, np string) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPingPong(t *testing.T) {
|
||||
func (s) TestPingPong(t *testing.T) {
|
||||
for _, np := range nextProtocols {
|
||||
testPingPong(t, np)
|
||||
}
|
||||
|
@ -145,7 +154,7 @@ func testSmallReadBuffer(t *testing.T, np string) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSmallReadBuffer(t *testing.T) {
|
||||
func (s) TestSmallReadBuffer(t *testing.T) {
|
||||
for _, np := range nextProtocols {
|
||||
testSmallReadBuffer(t, np)
|
||||
}
|
||||
|
@ -169,7 +178,7 @@ func testLargeMsg(t *testing.T, np string) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestLargeMsg(t *testing.T) {
|
||||
func (s) TestLargeMsg(t *testing.T) {
|
||||
for _, np := range nextProtocols {
|
||||
testLargeMsg(t, np)
|
||||
}
|
||||
|
@ -191,7 +200,7 @@ func testIncorrectMsgType(t *testing.T, np string) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIncorrectMsgType(t *testing.T) {
|
||||
func (s) TestIncorrectMsgType(t *testing.T) {
|
||||
for _, np := range nextProtocols {
|
||||
testIncorrectMsgType(t, np)
|
||||
}
|
||||
|
@ -224,7 +233,7 @@ func testFrameTooLarge(t *testing.T, np string) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFrameTooLarge(t *testing.T) {
|
||||
func (s) TestFrameTooLarge(t *testing.T) {
|
||||
for _, np := range nextProtocols {
|
||||
testFrameTooLarge(t, np)
|
||||
}
|
||||
|
@ -267,7 +276,7 @@ func testWriteLargeData(t *testing.T, np string) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWriteLargeData(t *testing.T) {
|
||||
func (s) TestWriteLargeData(t *testing.T) {
|
||||
for _, np := range nextProtocols {
|
||||
testWriteLargeData(t, np)
|
||||
}
|
||||
|
|
|
@ -28,8 +28,17 @@ import (
|
|||
core "google.golang.org/grpc/credentials/alts/internal"
|
||||
altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp"
|
||||
"google.golang.org/grpc/credentials/alts/internal/testutil"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
var (
|
||||
testRecordProtocol = rekeyRecordProtocolName
|
||||
testKey = []byte{
|
||||
|
@ -114,7 +123,7 @@ func (t *testRPCStream) CloseSend() error {
|
|||
|
||||
var stat testutil.Stats
|
||||
|
||||
func TestClientHandshake(t *testing.T) {
|
||||
func (s) TestClientHandshake(t *testing.T) {
|
||||
for _, testCase := range []struct {
|
||||
delay time.Duration
|
||||
numberOfHandshakes int
|
||||
|
@ -169,7 +178,7 @@ func TestClientHandshake(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestServerHandshake(t *testing.T) {
|
||||
func (s) TestServerHandshake(t *testing.T) {
|
||||
for _, testCase := range []struct {
|
||||
delay time.Duration
|
||||
numberOfHandshakes int
|
||||
|
@ -238,7 +247,7 @@ func (t *testUnresponsiveRPCStream) CloseSend() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestPeerNotResponding(t *testing.T) {
|
||||
func (s) TestPeerNotResponding(t *testing.T) {
|
||||
stream := &testUnresponsiveRPCStream{}
|
||||
chs := &altsHandshaker{
|
||||
stream: stream,
|
||||
|
|
|
@ -65,7 +65,7 @@ func setupError(testOS string, err error) func() {
|
|||
return setupManufacturerReader(testOS, reader)
|
||||
}
|
||||
|
||||
func TestIsRunningOnGCP(t *testing.T) {
|
||||
func (s) TestIsRunningOnGCP(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
description string
|
||||
testOS string
|
||||
|
@ -90,7 +90,7 @@ func TestIsRunningOnGCP(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIsRunningOnGCPNoProductNameFile(t *testing.T) {
|
||||
func (s) TestIsRunningOnGCPNoProductNameFile(t *testing.T) {
|
||||
reverseFunc := setupError("linux", os.ErrNotExist)
|
||||
if isRunningOnGCP() {
|
||||
t.Errorf("ErrNotExist: isRunningOnGCP()=true, want false")
|
||||
|
@ -98,7 +98,7 @@ func TestIsRunningOnGCPNoProductNameFile(t *testing.T) {
|
|||
reverseFunc()
|
||||
}
|
||||
|
||||
func TestAuthInfoFromContext(t *testing.T) {
|
||||
func (s) TestAuthInfoFromContext(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
altsAuthInfo := &fakeALTSAuthInfo{}
|
||||
p := &peer.Peer{
|
||||
|
@ -127,7 +127,7 @@ func TestAuthInfoFromContext(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAuthInfoFromPeer(t *testing.T) {
|
||||
func (s) TestAuthInfoFromPeer(t *testing.T) {
|
||||
altsAuthInfo := &fakeALTSAuthInfo{}
|
||||
p := &peer.Peer{
|
||||
AuthInfo: altsAuthInfo,
|
||||
|
@ -155,7 +155,7 @@ func TestAuthInfoFromPeer(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestClientAuthorizationCheck(t *testing.T) {
|
||||
func (s) TestClientAuthorizationCheck(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
altsAuthInfo := &fakeALTSAuthInfo{testServiceAccount1}
|
||||
p := &peer.Peer{
|
||||
|
|
|
@ -27,9 +27,18 @@ import (
|
|||
"testing"
|
||||
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/testdata"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
// A struct that implements AuthInfo interface but does not implement GetCommonAuthInfo() method.
|
||||
type testAuthInfoNoGetCommonAuthInfoMethod struct{}
|
||||
|
||||
|
@ -55,7 +64,7 @@ func createTestContext(s SecurityLevel) context.Context {
|
|||
return internal.NewRequestInfoContext.(func(context.Context, RequestInfo) context.Context)(context.Background(), ri)
|
||||
}
|
||||
|
||||
func TestCheckSecurityLevel(t *testing.T) {
|
||||
func (s) TestCheckSecurityLevel(t *testing.T) {
|
||||
testCases := []struct {
|
||||
authLevel SecurityLevel
|
||||
testLevel SecurityLevel
|
||||
|
@ -98,7 +107,7 @@ func TestCheckSecurityLevel(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCheckSecurityLevelNoGetCommonAuthInfoMethod(t *testing.T) {
|
||||
func (s) TestCheckSecurityLevelNoGetCommonAuthInfoMethod(t *testing.T) {
|
||||
auth := &testAuthInfoNoGetCommonAuthInfoMethod{}
|
||||
ri := RequestInfo{
|
||||
Method: "testInfo",
|
||||
|
@ -110,7 +119,7 @@ func TestCheckSecurityLevelNoGetCommonAuthInfoMethod(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTLSOverrideServerName(t *testing.T) {
|
||||
func (s) TestTLSOverrideServerName(t *testing.T) {
|
||||
expectedServerName := "server.name"
|
||||
c := NewTLS(nil)
|
||||
c.OverrideServerName(expectedServerName)
|
||||
|
@ -119,7 +128,7 @@ func TestTLSOverrideServerName(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTLSClone(t *testing.T) {
|
||||
func (s) TestTLSClone(t *testing.T) {
|
||||
expectedServerName := "server.name"
|
||||
c := NewTLS(nil)
|
||||
c.OverrideServerName(expectedServerName)
|
||||
|
@ -136,7 +145,7 @@ func TestTLSClone(t *testing.T) {
|
|||
|
||||
type serverHandshake func(net.Conn) (AuthInfo, error)
|
||||
|
||||
func TestClientHandshakeReturnsAuthInfo(t *testing.T) {
|
||||
func (s) TestClientHandshakeReturnsAuthInfo(t *testing.T) {
|
||||
tcs := []struct {
|
||||
name string
|
||||
address string
|
||||
|
@ -174,7 +183,7 @@ func TestClientHandshakeReturnsAuthInfo(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestServerHandshakeReturnsAuthInfo(t *testing.T) {
|
||||
func (s) TestServerHandshakeReturnsAuthInfo(t *testing.T) {
|
||||
done := make(chan AuthInfo, 1)
|
||||
lis := launchServer(t, gRPCServerHandshake, done)
|
||||
defer lis.Close()
|
||||
|
@ -189,7 +198,7 @@ func TestServerHandshakeReturnsAuthInfo(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestServerAndClientHandshake(t *testing.T) {
|
||||
func (s) TestServerAndClientHandshake(t *testing.T) {
|
||||
done := make(chan AuthInfo, 1)
|
||||
lis := launchServer(t, gRPCServerHandshake, done)
|
||||
defer lis.Close()
|
||||
|
@ -318,7 +327,7 @@ func tlsClientHandshake(conn net.Conn, _ string) (AuthInfo, error) {
|
|||
return TLSInfo{State: clientConn.ConnectionState(), CommonAuthInfo: CommonAuthInfo{SecurityLevel: PrivacyAndIntegrity}}, nil
|
||||
}
|
||||
|
||||
func TestAppendH2ToNextProtos(t *testing.T) {
|
||||
func (s) TestAppendH2ToNextProtos(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ps []string
|
||||
|
|
|
@ -26,8 +26,17 @@ import (
|
|||
"testing"
|
||||
|
||||
"google.golang.org/grpc/credentials/internal"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
type syscallConn struct {
|
||||
net.Conn
|
||||
}
|
||||
|
@ -40,7 +49,7 @@ type nonSyscallConn struct {
|
|||
net.Conn
|
||||
}
|
||||
|
||||
func TestWrapSyscallConn(t *testing.T) {
|
||||
func (s) TestWrapSyscallConn(t *testing.T) {
|
||||
sc := &syscallConn{}
|
||||
nsc := &nonSyscallConn{}
|
||||
|
||||
|
@ -50,7 +59,7 @@ func TestWrapSyscallConn(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWrapSyscallConnNoWrap(t *testing.T) {
|
||||
func (s) TestWrapSyscallConnNoWrap(t *testing.T) {
|
||||
nscRaw := &nonSyscallConn{}
|
||||
nsc := &nonSyscallConn{}
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"google.golang.org/grpc/encoding"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/test/codec_perf"
|
||||
)
|
||||
|
||||
|
@ -45,12 +46,20 @@ func marshalAndUnmarshal(t *testing.T, codec encoding.Codec, expectedBody []byte
|
|||
}
|
||||
}
|
||||
|
||||
func TestBasicProtoCodecMarshalAndUnmarshal(t *testing.T) {
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func (s) TestBasicProtoCodecMarshalAndUnmarshal(t *testing.T) {
|
||||
marshalAndUnmarshal(t, codec{}, []byte{1, 2, 3})
|
||||
}
|
||||
|
||||
// Try to catch possible race conditions around use of pools
|
||||
func TestConcurrentUsage(t *testing.T) {
|
||||
func (s) TestConcurrentUsage(t *testing.T) {
|
||||
const (
|
||||
numGoRoutines = 100
|
||||
numMarshUnmarsh = 1000
|
||||
|
@ -83,7 +92,7 @@ func TestConcurrentUsage(t *testing.T) {
|
|||
|
||||
// TestStaggeredMarshalAndUnmarshalUsingSamePool tries to catch potential errors in which slices get
|
||||
// stomped on during reuse of a proto.Buffer.
|
||||
func TestStaggeredMarshalAndUnmarshalUsingSamePool(t *testing.T) {
|
||||
func (s) TestStaggeredMarshalAndUnmarshalUsingSamePool(t *testing.T) {
|
||||
codec1 := codec{}
|
||||
codec2 := codec{}
|
||||
|
||||
|
|
|
@ -28,8 +28,17 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
helloworld "google.golang.org/grpc/examples/helloworld/helloworld"
|
||||
hwmock "google.golang.org/grpc/examples/helloworld/mock_helloworld"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
// rpcMsg implements the gomock.Matcher interface
|
||||
type rpcMsg struct {
|
||||
msg proto.Message
|
||||
|
@ -47,7 +56,7 @@ func (r *rpcMsg) String() string {
|
|||
return fmt.Sprintf("is %s", r.msg)
|
||||
}
|
||||
|
||||
func TestSayHello(t *testing.T) {
|
||||
func (s) TestSayHello(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockGreeterClient := hwmock.NewMockGreeterClient(ctrl)
|
||||
|
|
|
@ -28,14 +28,23 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
rgmock "google.golang.org/grpc/examples/route_guide/mock_routeguide"
|
||||
rgpb "google.golang.org/grpc/examples/route_guide/routeguide"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
var msg = &rgpb.RouteNote{
|
||||
Location: &rgpb.Point{Latitude: 17, Longitude: 29},
|
||||
Message: "Taxi-cab",
|
||||
}
|
||||
|
||||
func TestRouteChat(t *testing.T) {
|
||||
func (s) TestRouteChat(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
|
30
grpc_test.go
30
grpc_test.go
|
@ -19,39 +19,13 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/internal/grpctest/tlogger"
|
||||
"google.golang.org/grpc/internal/leakcheck"
|
||||
)
|
||||
|
||||
type s struct{}
|
||||
|
||||
var lcFailed uint32
|
||||
|
||||
type errorer struct {
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (e errorer) Errorf(format string, args ...interface{}) {
|
||||
atomic.StoreUint32(&lcFailed, 1)
|
||||
e.t.Errorf(format, args...)
|
||||
}
|
||||
|
||||
func (s) Setup(t *testing.T) {
|
||||
tlogger.Update(t)
|
||||
}
|
||||
|
||||
func (s) Teardown(t *testing.T) {
|
||||
if atomic.LoadUint32(&lcFailed) == 1 {
|
||||
return
|
||||
}
|
||||
leakcheck.Check(errorer{t: t})
|
||||
if atomic.LoadUint32(&lcFailed) == 1 {
|
||||
t.Log("Leak check disabled for future tests")
|
||||
}
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"google.golang.org/grpc/connectivity"
|
||||
)
|
||||
|
||||
func TestClientHealthCheckBackoff(t *testing.T) {
|
||||
func (s) TestClientHealthCheckBackoff(t *testing.T) {
|
||||
const maxRetries = 5
|
||||
|
||||
var want []time.Duration
|
||||
|
|
|
@ -24,9 +24,18 @@ import (
|
|||
"time"
|
||||
|
||||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
func TestShutdown(t *testing.T) {
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func (s) TestShutdown(t *testing.T) {
|
||||
const testService = "tteesstt"
|
||||
s := NewServer()
|
||||
s.SetServingStatus(testService, healthpb.HealthCheckResponse_SERVING)
|
||||
|
|
|
@ -24,10 +24,19 @@ import (
|
|||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/health"
|
||||
healthgrpc "google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
// Make sure the service implementation complies with the proto definition.
|
||||
func TestRegister(t *testing.T) {
|
||||
func (s) TestRegister(t *testing.T) {
|
||||
s := grpc.NewServer()
|
||||
healthgrpc.RegisterHealthServer(s, health.NewServer())
|
||||
s.Stop()
|
||||
|
|
|
@ -34,11 +34,20 @@ import (
|
|||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/binarylog"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/metadata"
|
||||
testpb "google.golang.org/grpc/stats/grpc_testing"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Setting environment variable in tests doesn't work because of the init
|
||||
// orders. Set the loggers directly here.
|
||||
|
@ -882,61 +891,61 @@ func testClientBinaryLog(t *testing.T, c *rpcConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestClientBinaryLogUnaryRPC(t *testing.T) {
|
||||
func (s) TestClientBinaryLogUnaryRPC(t *testing.T) {
|
||||
if err := testClientBinaryLog(t, &rpcConfig{success: true, callType: unaryRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientBinaryLogUnaryRPCError(t *testing.T) {
|
||||
func (s) TestClientBinaryLogUnaryRPCError(t *testing.T) {
|
||||
if err := testClientBinaryLog(t, &rpcConfig{success: false, callType: unaryRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientBinaryLogClientStreamRPC(t *testing.T) {
|
||||
func (s) TestClientBinaryLogClientStreamRPC(t *testing.T) {
|
||||
count := 5
|
||||
if err := testClientBinaryLog(t, &rpcConfig{count: count, success: true, callType: clientStreamRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientBinaryLogClientStreamRPCError(t *testing.T) {
|
||||
func (s) TestClientBinaryLogClientStreamRPCError(t *testing.T) {
|
||||
count := 1
|
||||
if err := testClientBinaryLog(t, &rpcConfig{count: count, success: false, callType: clientStreamRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientBinaryLogServerStreamRPC(t *testing.T) {
|
||||
func (s) TestClientBinaryLogServerStreamRPC(t *testing.T) {
|
||||
count := 5
|
||||
if err := testClientBinaryLog(t, &rpcConfig{count: count, success: true, callType: serverStreamRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientBinaryLogServerStreamRPCError(t *testing.T) {
|
||||
func (s) TestClientBinaryLogServerStreamRPCError(t *testing.T) {
|
||||
count := 5
|
||||
if err := testClientBinaryLog(t, &rpcConfig{count: count, success: false, callType: serverStreamRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientBinaryLogFullDuplexRPC(t *testing.T) {
|
||||
func (s) TestClientBinaryLogFullDuplexRPC(t *testing.T) {
|
||||
count := 5
|
||||
if err := testClientBinaryLog(t, &rpcConfig{count: count, success: true, callType: fullDuplexStreamRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientBinaryLogFullDuplexRPCError(t *testing.T) {
|
||||
func (s) TestClientBinaryLogFullDuplexRPCError(t *testing.T) {
|
||||
count := 5
|
||||
if err := testClientBinaryLog(t, &rpcConfig{count: count, success: false, callType: fullDuplexStreamRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientBinaryLogCancel(t *testing.T) {
|
||||
func (s) TestClientBinaryLogCancel(t *testing.T) {
|
||||
count := 5
|
||||
if err := testClientBinaryLog(t, &rpcConfig{count: count, success: false, callType: cancelRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -984,54 +993,54 @@ func testServerBinaryLog(t *testing.T, c *rpcConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestServerBinaryLogUnaryRPC(t *testing.T) {
|
||||
func (s) TestServerBinaryLogUnaryRPC(t *testing.T) {
|
||||
if err := testServerBinaryLog(t, &rpcConfig{success: true, callType: unaryRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServerBinaryLogUnaryRPCError(t *testing.T) {
|
||||
func (s) TestServerBinaryLogUnaryRPCError(t *testing.T) {
|
||||
if err := testServerBinaryLog(t, &rpcConfig{success: false, callType: unaryRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServerBinaryLogClientStreamRPC(t *testing.T) {
|
||||
func (s) TestServerBinaryLogClientStreamRPC(t *testing.T) {
|
||||
count := 5
|
||||
if err := testServerBinaryLog(t, &rpcConfig{count: count, success: true, callType: clientStreamRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServerBinaryLogClientStreamRPCError(t *testing.T) {
|
||||
func (s) TestServerBinaryLogClientStreamRPCError(t *testing.T) {
|
||||
count := 1
|
||||
if err := testServerBinaryLog(t, &rpcConfig{count: count, success: false, callType: clientStreamRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServerBinaryLogServerStreamRPC(t *testing.T) {
|
||||
func (s) TestServerBinaryLogServerStreamRPC(t *testing.T) {
|
||||
count := 5
|
||||
if err := testServerBinaryLog(t, &rpcConfig{count: count, success: true, callType: serverStreamRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServerBinaryLogServerStreamRPCError(t *testing.T) {
|
||||
func (s) TestServerBinaryLogServerStreamRPCError(t *testing.T) {
|
||||
count := 5
|
||||
if err := testServerBinaryLog(t, &rpcConfig{count: count, success: false, callType: serverStreamRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServerBinaryLogFullDuplex(t *testing.T) {
|
||||
func (s) TestServerBinaryLogFullDuplex(t *testing.T) {
|
||||
count := 5
|
||||
if err := testServerBinaryLog(t, &rpcConfig{count: count, success: true, callType: fullDuplexStreamRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServerBinaryLogFullDuplexError(t *testing.T) {
|
||||
func (s) TestServerBinaryLogFullDuplexError(t *testing.T) {
|
||||
count := 5
|
||||
if err := testServerBinaryLog(t, &rpcConfig{count: count, success: false, callType: fullDuplexStreamRPC}); err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
|
@ -20,10 +20,20 @@ package binarylog
|
|||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
// Test that get method logger returns the one with the most exact match.
|
||||
func TestGetMethodLogger(t *testing.T) {
|
||||
func (s) TestGetMethodLogger(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
method string
|
||||
|
@ -96,7 +106,7 @@ func TestGetMethodLogger(t *testing.T) {
|
|||
}
|
||||
|
||||
// expect method logger to be nil
|
||||
func TestGetMethodLoggerOff(t *testing.T) {
|
||||
func (s) TestGetMethodLoggerOff(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
method string
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
|
||||
// This tests that when multiple configs are specified, all methods loggers will
|
||||
// be set correctly. Correctness of each logger is covered by other unit tests.
|
||||
func TestNewLoggerFromConfigString(t *testing.T) {
|
||||
func (s) TestNewLoggerFromConfigString(t *testing.T) {
|
||||
const (
|
||||
s1 = "s1"
|
||||
m1 = "m1"
|
||||
|
@ -65,7 +65,7 @@ func TestNewLoggerFromConfigString(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNewLoggerFromConfigStringInvalid(t *testing.T) {
|
||||
func (s) TestNewLoggerFromConfigStringInvalid(t *testing.T) {
|
||||
testCases := []string{
|
||||
"",
|
||||
"*{}",
|
||||
|
@ -90,7 +90,7 @@ func TestNewLoggerFromConfigStringInvalid(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestParseMethodConfigAndSuffix(t *testing.T) {
|
||||
func (s) TestParseMethodConfigAndSuffix(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in, service, method, suffix string
|
||||
}{
|
||||
|
@ -152,7 +152,7 @@ func TestParseMethodConfigAndSuffix(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestParseMethodConfigAndSuffixInvalid(t *testing.T) {
|
||||
func (s) TestParseMethodConfigAndSuffixInvalid(t *testing.T) {
|
||||
testCases := []string{
|
||||
"*/m",
|
||||
"*/m{}",
|
||||
|
@ -165,7 +165,7 @@ func TestParseMethodConfigAndSuffixInvalid(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestParseHeaderMessageLengthConfig(t *testing.T) {
|
||||
func (s) TestParseHeaderMessageLengthConfig(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
hdr, msg uint64
|
||||
|
@ -222,7 +222,7 @@ func TestParseHeaderMessageLengthConfig(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
func TestParseHeaderMessageLengthConfigInvalid(t *testing.T) {
|
||||
func (s) TestParseHeaderMessageLengthConfigInvalid(t *testing.T) {
|
||||
testCases := []string{
|
||||
"{}",
|
||||
"{h;a}",
|
||||
|
@ -236,7 +236,7 @@ func TestParseHeaderMessageLengthConfigInvalid(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFillMethodLoggerWithConfigStringBlacklist(t *testing.T) {
|
||||
func (s) TestFillMethodLoggerWithConfigStringBlacklist(t *testing.T) {
|
||||
testCases := []string{
|
||||
"p.s/m",
|
||||
"service/method",
|
||||
|
@ -256,7 +256,7 @@ func TestFillMethodLoggerWithConfigStringBlacklist(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFillMethodLoggerWithConfigStringGlobal(t *testing.T) {
|
||||
func (s) TestFillMethodLoggerWithConfigStringGlobal(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
hdr, msg uint64
|
||||
|
@ -320,7 +320,7 @@ func TestFillMethodLoggerWithConfigStringGlobal(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFillMethodLoggerWithConfigStringPerService(t *testing.T) {
|
||||
func (s) TestFillMethodLoggerWithConfigStringPerService(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
hdr, msg uint64
|
||||
|
@ -386,7 +386,7 @@ func TestFillMethodLoggerWithConfigStringPerService(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFillMethodLoggerWithConfigStringPerMethod(t *testing.T) {
|
||||
func (s) TestFillMethodLoggerWithConfigStringPerMethod(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
hdr, msg uint64
|
||||
|
@ -456,7 +456,7 @@ func TestFillMethodLoggerWithConfigStringPerMethod(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFillMethodLoggerWithConfigStringInvalid(t *testing.T) {
|
||||
func (s) TestFillMethodLoggerWithConfigStringInvalid(t *testing.T) {
|
||||
testCases := []string{
|
||||
"",
|
||||
"{}",
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func TestLog(t *testing.T) {
|
||||
func (s) TestLog(t *testing.T) {
|
||||
idGen.reset()
|
||||
ml := newMethodLogger(10, 10)
|
||||
// Set sink to testing buffer.
|
||||
|
@ -348,7 +348,7 @@ func TestLog(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTruncateMetadataNotTruncated(t *testing.T) {
|
||||
func (s) TestTruncateMetadataNotTruncated(t *testing.T) {
|
||||
testCases := []struct {
|
||||
ml *MethodLogger
|
||||
mpPb *pb.Metadata
|
||||
|
@ -415,7 +415,7 @@ func TestTruncateMetadataNotTruncated(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTruncateMetadataTruncated(t *testing.T) {
|
||||
func (s) TestTruncateMetadataTruncated(t *testing.T) {
|
||||
testCases := []struct {
|
||||
ml *MethodLogger
|
||||
mpPb *pb.Metadata
|
||||
|
@ -476,7 +476,7 @@ func TestTruncateMetadataTruncated(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTruncateMessageNotTruncated(t *testing.T) {
|
||||
func (s) TestTruncateMessageNotTruncated(t *testing.T) {
|
||||
testCases := []struct {
|
||||
ml *MethodLogger
|
||||
msgPb *pb.Message
|
||||
|
@ -509,7 +509,7 @@ func TestTruncateMessageNotTruncated(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTruncateMessageTruncated(t *testing.T) {
|
||||
func (s) TestTruncateMessageTruncated(t *testing.T) {
|
||||
testCases := []struct {
|
||||
ml *MethodLogger
|
||||
msgPb *pb.Message
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func TestLongMethodConfigRegexp(t *testing.T) {
|
||||
func (s) TestLongMethodConfigRegexp(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
out []string
|
||||
|
@ -87,7 +87,7 @@ func TestLongMethodConfigRegexp(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestHeaderConfigRegexp(t *testing.T) {
|
||||
func (s) TestHeaderConfigRegexp(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
out []string
|
||||
|
@ -114,7 +114,7 @@ func TestHeaderConfigRegexp(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMessageConfigRegexp(t *testing.T) {
|
||||
func (s) TestMessageConfigRegexp(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
out []string
|
||||
|
@ -141,7 +141,7 @@ func TestMessageConfigRegexp(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestHeaderMessageConfigRegexp(t *testing.T) {
|
||||
func (s) TestHeaderMessageConfigRegexp(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
out []string
|
||||
|
|
|
@ -20,7 +20,7 @@ package binarylog
|
|||
|
||||
import "testing"
|
||||
|
||||
func TestParseMethodName(t *testing.T) {
|
||||
func (s) TestParseMethodName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
methodName string
|
||||
service, method string
|
||||
|
@ -43,7 +43,7 @@ func TestParseMethodName(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestParseMethodNameInvalid(t *testing.T) {
|
||||
func (s) TestParseMethodNameInvalid(t *testing.T) {
|
||||
testCases := []string{
|
||||
"/",
|
||||
"/sm",
|
||||
|
|
|
@ -22,6 +22,8 @@ import (
|
|||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -29,6 +31,14 @@ const (
|
|||
numWrites = 10
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
// wantReads contains the set of values expected to be read by the reader
|
||||
// goroutine in the tests.
|
||||
var wantReads []int
|
||||
|
@ -43,7 +53,7 @@ func init() {
|
|||
|
||||
// TestSingleWriter starts one reader and one writer goroutine and makes sure
|
||||
// that the reader gets all the value added to the buffer by the writer.
|
||||
func TestSingleWriter(t *testing.T) {
|
||||
func (s) TestSingleWriter(t *testing.T) {
|
||||
ub := NewUnbounded()
|
||||
reads := []int{}
|
||||
|
||||
|
@ -77,7 +87,7 @@ func TestSingleWriter(t *testing.T) {
|
|||
|
||||
// TestMultipleWriters starts multiple writers and one reader goroutine and
|
||||
// makes sure that the reader gets all the data written by all writers.
|
||||
func TestMultipleWriters(t *testing.T) {
|
||||
func (s) TestMultipleWriters(t *testing.T) {
|
||||
ub := NewUnbounded()
|
||||
reads := []int{}
|
||||
|
||||
|
|
|
@ -22,12 +22,22 @@ import (
|
|||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
const (
|
||||
testCacheTimeout = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func (c *TimeoutCache) getForTesting(key interface{}) (*cacheEntry, bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
@ -38,7 +48,7 @@ func (c *TimeoutCache) getForTesting(key interface{}) (*cacheEntry, bool) {
|
|||
// TestCacheExpire attempts to add an entry to the cache and verifies that it
|
||||
// was added successfully. It then makes sure that on timeout, it's removed and
|
||||
// the associated callback is called.
|
||||
func TestCacheExpire(t *testing.T) {
|
||||
func (s) TestCacheExpire(t *testing.T) {
|
||||
const k, v = 1, "1"
|
||||
c := NewTimeoutCache(testCacheTimeout)
|
||||
|
||||
|
@ -63,7 +73,7 @@ func TestCacheExpire(t *testing.T) {
|
|||
// TestCacheRemove attempts to remove an existing entry from the cache and
|
||||
// verifies that the entry is removed and the associated callback is not
|
||||
// invoked.
|
||||
func TestCacheRemove(t *testing.T) {
|
||||
func (s) TestCacheRemove(t *testing.T) {
|
||||
const k, v = 1, "1"
|
||||
c := NewTimeoutCache(testCacheTimeout)
|
||||
|
||||
|
@ -94,7 +104,7 @@ func TestCacheRemove(t *testing.T) {
|
|||
|
||||
// TestCacheClearWithoutCallback attempts to clear all entries from the cache
|
||||
// and verifies that the associated callbacks are not invoked.
|
||||
func TestCacheClearWithoutCallback(t *testing.T) {
|
||||
func (s) TestCacheClearWithoutCallback(t *testing.T) {
|
||||
var values []string
|
||||
const itemCount = 3
|
||||
for i := 0; i < itemCount; i++ {
|
||||
|
@ -142,7 +152,7 @@ func TestCacheClearWithoutCallback(t *testing.T) {
|
|||
|
||||
// TestCacheClearWithCallback attempts to clear all entries from the cache and
|
||||
// verifies that the associated callbacks are invoked.
|
||||
func TestCacheClearWithCallback(t *testing.T) {
|
||||
func (s) TestCacheClearWithCallback(t *testing.T) {
|
||||
var values []string
|
||||
const itemCount = 3
|
||||
for i := 0; i < itemCount; i++ {
|
||||
|
@ -198,7 +208,7 @@ func TestCacheClearWithCallback(t *testing.T) {
|
|||
// TestCacheRetrieveTimeoutRace simulates the case where an entry's timer fires
|
||||
// around the same time that Remove() is called for it. It verifies that there
|
||||
// is no deadlock.
|
||||
func TestCacheRetrieveTimeoutRace(t *testing.T) {
|
||||
func (s) TestCacheRetrieveTimeoutRace(t *testing.T) {
|
||||
c := NewTimeoutCache(time.Nanosecond)
|
||||
|
||||
done := make(chan struct{})
|
||||
|
|
|
@ -32,9 +32,18 @@ import (
|
|||
|
||||
"golang.org/x/sys/unix"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
func TestGetSocketOpt(t *testing.T) {
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func (s) TestGetSocketOpt(t *testing.T) {
|
||||
network, addr := "tcp", ":0"
|
||||
ln, err := net.Listen(network, addr)
|
||||
if err != nil {
|
||||
|
|
|
@ -18,9 +18,21 @@
|
|||
|
||||
package grpcsync
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
|
||||
func TestEventHasFired(t *testing.T) {
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func (s) TestEventHasFired(t *testing.T) {
|
||||
e := NewEvent()
|
||||
if e.HasFired() {
|
||||
t.Fatal("e.HasFired() = true; want false")
|
||||
|
@ -33,7 +45,7 @@ func TestEventHasFired(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestEventDoneChannel(t *testing.T) {
|
||||
func (s) TestEventDoneChannel(t *testing.T) {
|
||||
e := NewEvent()
|
||||
select {
|
||||
case <-e.Done():
|
||||
|
@ -50,7 +62,7 @@ func TestEventDoneChannel(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestEventMultipleFires(t *testing.T) {
|
||||
func (s) TestEventMultipleFires(t *testing.T) {
|
||||
e := NewEvent()
|
||||
if e.HasFired() {
|
||||
t.Fatal("e.HasFired() = true; want false")
|
||||
|
|
|
@ -22,9 +22,46 @@ package grpctest
|
|||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc/internal/leakcheck"
|
||||
)
|
||||
|
||||
var lcFailed uint32
|
||||
|
||||
type errorer struct {
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (e errorer) Errorf(format string, args ...interface{}) {
|
||||
atomic.StoreUint32(&lcFailed, 1)
|
||||
e.t.Errorf(format, args...)
|
||||
}
|
||||
|
||||
// Tester is an implementation of the x interface parameter to
|
||||
// grpctest.RunSubTests with default Setup and Teardown behavior. Setup updates
|
||||
// the tlogger and Teardown performs a leak check. Embed in a struct with tests
|
||||
// defined to use.
|
||||
type Tester struct{}
|
||||
|
||||
// Setup updates the tlogger.
|
||||
func (Tester) Setup(t *testing.T) {
|
||||
TLogger.Update(t)
|
||||
}
|
||||
|
||||
// Teardown performs a leak check.
|
||||
func (Tester) Teardown(t *testing.T) {
|
||||
if atomic.LoadUint32(&lcFailed) == 1 {
|
||||
return
|
||||
}
|
||||
leakcheck.Check(errorer{t: t})
|
||||
if atomic.LoadUint32(&lcFailed) == 1 {
|
||||
t.Log("Leak check disabled for future tests")
|
||||
}
|
||||
TLogger.EndTest(t)
|
||||
}
|
||||
|
||||
func getTestFunc(t *testing.T, xv reflect.Value, name string) func(*testing.T) {
|
||||
if m := xv.MethodByName(name); m.IsValid() {
|
||||
if f, ok := m.Interface().(func(*testing.T)); ok {
|
||||
|
|
|
@ -0,0 +1,214 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpctest
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// TLogger serves as the grpclog logger and is the interface through which
|
||||
// expected errors are declared in tests.
|
||||
var TLogger *tLogger
|
||||
|
||||
const callingFrame = 4
|
||||
|
||||
type logType int
|
||||
|
||||
const (
|
||||
logLog logType = iota
|
||||
errorLog
|
||||
fatalLog
|
||||
)
|
||||
|
||||
type tLogger struct {
|
||||
v int
|
||||
t *testing.T
|
||||
errors map[*regexp.Regexp]int
|
||||
initialized bool
|
||||
}
|
||||
|
||||
func init() {
|
||||
TLogger = &tLogger{0, nil, map[*regexp.Regexp]int{}, false}
|
||||
vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL")
|
||||
if vl, err := strconv.Atoi(vLevel); err == nil {
|
||||
TLogger.v = vl
|
||||
}
|
||||
}
|
||||
|
||||
func getStackFrame(stack []byte, frame int) (string, error) {
|
||||
s := strings.Split(string(stack), "\n")
|
||||
if frame >= (len(s)-1)/2 {
|
||||
return "", errors.New("frame request out-of-bounds")
|
||||
}
|
||||
split := strings.Split(strings.Fields(s[(frame*2)+2][1:])[0], "/")
|
||||
return fmt.Sprintf("%v:", split[len(split)-1]), nil
|
||||
}
|
||||
|
||||
func (g *tLogger) log(ltype logType, format string, args ...interface{}) {
|
||||
s := debug.Stack()
|
||||
prefix, err := getStackFrame(s, callingFrame)
|
||||
args = append([]interface{}{prefix}, args...)
|
||||
if err != nil {
|
||||
g.t.Error(err)
|
||||
return
|
||||
}
|
||||
if format == "" {
|
||||
switch ltype {
|
||||
case errorLog:
|
||||
// fmt.Sprintln is used rather than fmt.Sprint because t.Log uses fmt.Sprintln behavior.
|
||||
if g.expected(fmt.Sprintln(args...)) {
|
||||
g.t.Log(args...)
|
||||
} else {
|
||||
g.t.Error(args...)
|
||||
}
|
||||
case fatalLog:
|
||||
panic(fmt.Sprint(args...))
|
||||
default:
|
||||
g.t.Log(args...)
|
||||
}
|
||||
} else {
|
||||
format = "%v " + format
|
||||
switch ltype {
|
||||
case errorLog:
|
||||
if g.expected(fmt.Sprintf(format, args...)) {
|
||||
g.t.Logf(format, args...)
|
||||
} else {
|
||||
g.t.Errorf(format, args...)
|
||||
}
|
||||
case fatalLog:
|
||||
panic(fmt.Sprintf(format, args...))
|
||||
default:
|
||||
g.t.Logf(format, args...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update updates the testing.T that the testing logger logs to. Should be done
|
||||
// before every test. It also initializes the tLogger if it has not already.
|
||||
func (g *tLogger) Update(t *testing.T) {
|
||||
if !g.initialized {
|
||||
grpclog.SetLoggerV2(TLogger)
|
||||
g.initialized = true
|
||||
}
|
||||
g.t = t
|
||||
g.errors = map[*regexp.Regexp]int{}
|
||||
}
|
||||
|
||||
// ExpectError declares an error to be expected. For the next test, the first
|
||||
// error log matching the expression (using FindString) will not cause the test
|
||||
// to fail. "For the next test" includes all the time until the next call to
|
||||
// Update(). Note that if an expected error is not encountered, this will cause
|
||||
// the test to fail.
|
||||
func (g *tLogger) ExpectError(expr string) {
|
||||
g.ExpectErrorN(expr, 1)
|
||||
}
|
||||
|
||||
// ExpectErrorN declares an error to be expected n times.
|
||||
func (g *tLogger) ExpectErrorN(expr string, n int) {
|
||||
re, err := regexp.Compile(expr)
|
||||
if err != nil {
|
||||
g.t.Error(err)
|
||||
return
|
||||
}
|
||||
g.errors[re] += n
|
||||
}
|
||||
|
||||
// EndTest checks if expected errors were not encountered.
|
||||
func (g *tLogger) EndTest(t *testing.T) {
|
||||
for re, count := range g.errors {
|
||||
if count > 0 {
|
||||
t.Errorf("Expected error '%v' not encountered", re.String())
|
||||
}
|
||||
}
|
||||
g.errors = map[*regexp.Regexp]int{}
|
||||
}
|
||||
|
||||
func (g *tLogger) expected(s string) bool {
|
||||
for re, count := range g.errors {
|
||||
if re.FindStringIndex(s) != nil {
|
||||
g.errors[re]--
|
||||
if count <= 1 {
|
||||
delete(g.errors, re)
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (g *tLogger) Info(args ...interface{}) {
|
||||
g.log(logLog, "", args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Infoln(args ...interface{}) {
|
||||
g.log(logLog, "", args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Infof(format string, args ...interface{}) {
|
||||
g.log(logLog, format, args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Warning(args ...interface{}) {
|
||||
g.log(logLog, "", args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Warningln(args ...interface{}) {
|
||||
g.log(logLog, "", args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Warningf(format string, args ...interface{}) {
|
||||
g.log(logLog, format, args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Error(args ...interface{}) {
|
||||
g.log(errorLog, "", args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Errorln(args ...interface{}) {
|
||||
g.log(errorLog, "", args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Errorf(format string, args ...interface{}) {
|
||||
g.log(errorLog, format, args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Fatal(args ...interface{}) {
|
||||
g.log(fatalLog, "", args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Fatalln(args ...interface{}) {
|
||||
g.log(fatalLog, "", args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Fatalf(format string, args ...interface{}) {
|
||||
g.log(fatalLog, format, args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) V(l int) bool {
|
||||
return l <= g.v
|
||||
}
|
|
@ -1,140 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package tlogger initializes the testing logger on import which logs to the
|
||||
// testing package's T struct.
|
||||
package tlogger
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var logger = tLogger{v: 0}
|
||||
|
||||
const callingFrame = 4
|
||||
|
||||
type tLogger struct {
|
||||
v int
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func init() {
|
||||
vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL")
|
||||
if vl, err := strconv.Atoi(vLevel); err == nil {
|
||||
logger.v = vl
|
||||
}
|
||||
grpclog.SetLoggerV2(&logger)
|
||||
}
|
||||
|
||||
func getStackFrame(stack []byte, frame int) (string, error) {
|
||||
s := strings.Split(string(stack), "\n")
|
||||
if frame >= (len(s)-1)/2 {
|
||||
return "", errors.New("frame request out-of-bounds")
|
||||
}
|
||||
split := strings.Split(strings.Fields(s[(frame*2)+2][1:])[0], "/")
|
||||
return fmt.Sprintf("%v:", split[len(split)-1]), nil
|
||||
}
|
||||
|
||||
func log(t *testing.T, format string, fatal bool, args ...interface{}) {
|
||||
s := debug.Stack()
|
||||
prefix, err := getStackFrame(s, callingFrame)
|
||||
args = append([]interface{}{prefix}, args...)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if format == "" {
|
||||
if fatal {
|
||||
panic(fmt.Sprint(args...))
|
||||
} else {
|
||||
t.Log(args...)
|
||||
}
|
||||
} else {
|
||||
if fatal {
|
||||
panic(fmt.Sprintf("%v "+format, args...))
|
||||
} else {
|
||||
t.Logf("%v "+format, args...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update updates the testing.T that the testing logger logs to. Should be done
|
||||
// before every test.
|
||||
func Update(t *testing.T) {
|
||||
logger.t = t
|
||||
}
|
||||
|
||||
func (g *tLogger) Info(args ...interface{}) {
|
||||
log(g.t, "", false, args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Infoln(args ...interface{}) {
|
||||
log(g.t, "", false, args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Infof(format string, args ...interface{}) {
|
||||
log(g.t, format, false, args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Warning(args ...interface{}) {
|
||||
log(g.t, "", false, args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Warningln(args ...interface{}) {
|
||||
log(g.t, "", false, args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Warningf(format string, args ...interface{}) {
|
||||
log(g.t, format, false, args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Error(args ...interface{}) {
|
||||
log(g.t, "", false, args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Errorln(args ...interface{}) {
|
||||
log(g.t, "", false, args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Errorf(format string, args ...interface{}) {
|
||||
log(g.t, format, false, args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Fatal(args ...interface{}) {
|
||||
log(g.t, "", true, args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Fatalln(args ...interface{}) {
|
||||
log(g.t, "", true, args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) Fatalf(format string, args ...interface{}) {
|
||||
log(g.t, format, true, args...)
|
||||
}
|
||||
|
||||
func (g *tLogger) V(l int) bool {
|
||||
return l <= g.v
|
||||
}
|
|
@ -16,51 +16,56 @@
|
|||
*
|
||||
*/
|
||||
|
||||
package tlogger
|
||||
package grpctest
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
func TestInfo(t *testing.T) {
|
||||
Update(t)
|
||||
type s struct {
|
||||
Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func (s) TestInfo(t *testing.T) {
|
||||
grpclog.Info("Info", "message.")
|
||||
}
|
||||
|
||||
func TestInfoln(t *testing.T) {
|
||||
Update(t)
|
||||
func (s) TestInfoln(t *testing.T) {
|
||||
grpclog.Infoln("Info", "message.")
|
||||
}
|
||||
|
||||
func TestInfof(t *testing.T) {
|
||||
Update(t)
|
||||
func (s) TestInfof(t *testing.T) {
|
||||
grpclog.Infof("%v %v.", "Info", "message")
|
||||
}
|
||||
|
||||
func TestWarning(t *testing.T) {
|
||||
Update(t)
|
||||
func (s) TestWarning(t *testing.T) {
|
||||
grpclog.Warning("Warning", "message.")
|
||||
}
|
||||
|
||||
func TestWarningln(t *testing.T) {
|
||||
Update(t)
|
||||
func (s) TestWarningln(t *testing.T) {
|
||||
grpclog.Warningln("Warning", "message.")
|
||||
}
|
||||
|
||||
func TestWarningf(t *testing.T) {
|
||||
Update(t)
|
||||
func (s) TestWarningf(t *testing.T) {
|
||||
grpclog.Warningf("%v %v.", "Warning", "message")
|
||||
}
|
||||
|
||||
func TestSubTests(t *testing.T) {
|
||||
testFuncs := [6]func(*testing.T){TestInfo, TestInfoln, TestInfof, TestWarning, TestWarningln, TestWarningf}
|
||||
for _, testFunc := range testFuncs {
|
||||
splitFuncName := strings.Split(runtime.FuncForPC(reflect.ValueOf(testFunc).Pointer()).Name(), ".")
|
||||
t.Run(splitFuncName[len(splitFuncName)-1], testFunc)
|
||||
func (s) TestError(t *testing.T) {
|
||||
const numErrors = 10
|
||||
TLogger.ExpectError("Expected error")
|
||||
TLogger.ExpectError("Expected ln error")
|
||||
TLogger.ExpectError("Expected formatted error")
|
||||
TLogger.ExpectErrorN("Expected repeated error", numErrors)
|
||||
grpclog.Error("Expected", "error")
|
||||
grpclog.Errorln("Expected", "ln", "error")
|
||||
grpclog.Errorf("%v %v %v", "Expected", "formatted", "error")
|
||||
for i := 0; i < numErrors; i++ {
|
||||
grpclog.Error("Expected repeated error")
|
||||
}
|
||||
}
|
|
@ -25,9 +25,19 @@ import (
|
|||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
func TestCircularBufferSerial(t *testing.T) {
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func (s) TestCircularBufferSerial(t *testing.T) {
|
||||
var size, i uint32
|
||||
var result []interface{}
|
||||
|
||||
|
@ -68,7 +78,7 @@ func TestCircularBufferSerial(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCircularBufferOverflow(t *testing.T) {
|
||||
func (s) TestCircularBufferOverflow(t *testing.T) {
|
||||
var size, i uint32
|
||||
var result []interface{}
|
||||
|
||||
|
@ -95,7 +105,7 @@ func TestCircularBufferOverflow(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCircularBufferConcurrent(t *testing.T) {
|
||||
func (s) TestCircularBufferConcurrent(t *testing.T) {
|
||||
for tn := 0; tn < 2; tn++ {
|
||||
var size uint32
|
||||
var result []interface{}
|
||||
|
|
|
@ -27,10 +27,19 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/internal/profiling/buffer"
|
||||
)
|
||||
|
||||
func TestProfiling(t *testing.T) {
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func (s) TestProfiling(t *testing.T) {
|
||||
cb, err := buffer.NewCircularBuffer(128)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating circular buffer: %v", err)
|
||||
|
@ -84,7 +93,7 @@ func TestProfiling(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestProfilingRace(t *testing.T) {
|
||||
func (s) TestProfilingRace(t *testing.T) {
|
||||
stat := NewStat("foo")
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
|
|
@ -24,11 +24,20 @@ import (
|
|||
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
scpb "google.golang.org/grpc/internal/proto/grpc_service_config"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
// TestXdsConfigMarshalToJSON is an example to print json format of xds_config.
|
||||
func TestXdsConfigMarshalToJSON(t *testing.T) {
|
||||
func (s) TestXdsConfigMarshalToJSON(t *testing.T) {
|
||||
c := &scpb.XdsConfig{
|
||||
ChildPolicy: []*scpb.LoadBalancingConfig{
|
||||
{Policy: &scpb.LoadBalancingConfig_Grpclb{
|
||||
|
|
|
@ -22,10 +22,19 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
)
|
||||
|
||||
func TestPipeListener(t *testing.T) {
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func (s) TestPipeListener(t *testing.T) {
|
||||
pl := testutils.NewPipeListener()
|
||||
recvdBytes := make(chan []byte, 1)
|
||||
const want = "hello world"
|
||||
|
@ -66,7 +75,7 @@ func TestPipeListener(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestUnblocking(t *testing.T) {
|
||||
func (s) TestUnblocking(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
blockFuncShouldError bool
|
||||
|
|
|
@ -24,9 +24,18 @@ import (
|
|||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
var statusErr = status.ErrorProto(&spb.Status{
|
||||
Code: int32(codes.DataLoss),
|
||||
Message: "error for testing",
|
||||
|
@ -36,7 +45,7 @@ var statusErr = status.ErrorProto(&spb.Status{
|
|||
}},
|
||||
})
|
||||
|
||||
func TestStatusErrEqual(t *testing.T) {
|
||||
func (s) TestStatusErrEqual(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
err1 error
|
||||
|
|
|
@ -39,7 +39,7 @@ import (
|
|||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func TestHandlerTransport_NewServerHandlerTransport(t *testing.T) {
|
||||
func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
req *http.Request
|
||||
|
@ -263,7 +263,7 @@ func newHandleStreamTest(t *testing.T) *handleStreamTest {
|
|||
}
|
||||
}
|
||||
|
||||
func TestHandlerTransport_HandleStreams(t *testing.T) {
|
||||
func (s) TestHandlerTransport_HandleStreams(t *testing.T) {
|
||||
st := newHandleStreamTest(t)
|
||||
handleStream := func(s *Stream) {
|
||||
if want := "/service/foo.bar"; s.method != want {
|
||||
|
@ -288,12 +288,12 @@ func TestHandlerTransport_HandleStreams(t *testing.T) {
|
|||
}
|
||||
|
||||
// Tests that codes.Unimplemented will close the body, per comment in handler_server.go.
|
||||
func TestHandlerTransport_HandleStreams_Unimplemented(t *testing.T) {
|
||||
func (s) TestHandlerTransport_HandleStreams_Unimplemented(t *testing.T) {
|
||||
handleStreamCloseBodyTest(t, codes.Unimplemented, "thingy is unimplemented")
|
||||
}
|
||||
|
||||
// Tests that codes.InvalidArgument will close the body, per comment in handler_server.go.
|
||||
func TestHandlerTransport_HandleStreams_InvalidArgument(t *testing.T) {
|
||||
func (s) TestHandlerTransport_HandleStreams_InvalidArgument(t *testing.T) {
|
||||
handleStreamCloseBodyTest(t, codes.InvalidArgument, "bad arg")
|
||||
}
|
||||
|
||||
|
@ -320,7 +320,7 @@ func handleStreamCloseBodyTest(t *testing.T, statusCode codes.Code, msg string)
|
|||
}
|
||||
}
|
||||
|
||||
func TestHandlerTransport_HandleStreams_Timeout(t *testing.T) {
|
||||
func (s) TestHandlerTransport_HandleStreams_Timeout(t *testing.T) {
|
||||
bodyr, bodyw := io.Pipe()
|
||||
req := &http.Request{
|
||||
ProtoMajor: 2,
|
||||
|
@ -373,7 +373,7 @@ func TestHandlerTransport_HandleStreams_Timeout(t *testing.T) {
|
|||
|
||||
// TestHandlerTransport_HandleStreams_MultiWriteStatus ensures that
|
||||
// concurrent "WriteStatus"s do not panic writing to closed "writes" channel.
|
||||
func TestHandlerTransport_HandleStreams_MultiWriteStatus(t *testing.T) {
|
||||
func (s) TestHandlerTransport_HandleStreams_MultiWriteStatus(t *testing.T) {
|
||||
testHandlerTransportHandleStreams(t, func(st *handleStreamTest, s *Stream) {
|
||||
if want := "/service/foo.bar"; s.method != want {
|
||||
t.Errorf("stream method = %q; want %q", s.method, want)
|
||||
|
@ -394,7 +394,7 @@ func TestHandlerTransport_HandleStreams_MultiWriteStatus(t *testing.T) {
|
|||
|
||||
// TestHandlerTransport_HandleStreams_WriteStatusWrite ensures that "Write"
|
||||
// following "WriteStatus" does not panic writing to closed "writes" channel.
|
||||
func TestHandlerTransport_HandleStreams_WriteStatusWrite(t *testing.T) {
|
||||
func (s) TestHandlerTransport_HandleStreams_WriteStatusWrite(t *testing.T) {
|
||||
testHandlerTransportHandleStreams(t, func(st *handleStreamTest, s *Stream) {
|
||||
if want := "/service/foo.bar"; s.method != want {
|
||||
t.Errorf("stream method = %q; want %q", s.method, want)
|
||||
|
@ -414,7 +414,7 @@ func testHandlerTransportHandleStreams(t *testing.T, handleStream func(st *handl
|
|||
)
|
||||
}
|
||||
|
||||
func TestHandlerTransport_HandleStreams_ErrDetails(t *testing.T) {
|
||||
func (s) TestHandlerTransport_HandleStreams_ErrDetails(t *testing.T) {
|
||||
errDetails := []proto.Message{
|
||||
&epb.RetryInfo{
|
||||
RetryDelay: &dpb.Duration{Seconds: 60},
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
func TestTimeoutEncode(t *testing.T) {
|
||||
func (s) TestTimeoutEncode(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
|
@ -52,7 +52,7 @@ func TestTimeoutEncode(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTimeoutDecode(t *testing.T) {
|
||||
func (s) TestTimeoutDecode(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
// input
|
||||
s string
|
||||
|
@ -72,7 +72,7 @@ func TestTimeoutDecode(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestContentSubtype(t *testing.T) {
|
||||
func (s) TestContentSubtype(t *testing.T) {
|
||||
tests := []struct {
|
||||
contentType string
|
||||
want string
|
||||
|
@ -95,7 +95,7 @@ func TestContentSubtype(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestEncodeGrpcMessage(t *testing.T) {
|
||||
func (s) TestEncodeGrpcMessage(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
input string
|
||||
expected string
|
||||
|
@ -131,7 +131,7 @@ func TestEncodeGrpcMessage(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDecodeGrpcMessage(t *testing.T) {
|
||||
func (s) TestDecodeGrpcMessage(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
input string
|
||||
expected string
|
||||
|
@ -169,7 +169,7 @@ func TestDecodeGrpcMessage(t *testing.T) {
|
|||
|
||||
// Decode an encoded string should get the same thing back, except for invalid
|
||||
// utf8 chars.
|
||||
func TestDecodeEncodeGrpcMessage(t *testing.T) {
|
||||
func (s) TestDecodeEncodeGrpcMessage(t *testing.T) {
|
||||
testCases := []struct {
|
||||
orig string
|
||||
want string
|
||||
|
@ -194,7 +194,7 @@ func TestDecodeEncodeGrpcMessage(t *testing.T) {
|
|||
|
||||
const binaryValue = string(128)
|
||||
|
||||
func TestEncodeMetadataHeader(t *testing.T) {
|
||||
func (s) TestEncodeMetadataHeader(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
// input
|
||||
kin string
|
||||
|
@ -214,7 +214,7 @@ func TestEncodeMetadataHeader(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDecodeMetadataHeader(t *testing.T) {
|
||||
func (s) TestDecodeMetadataHeader(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
// input
|
||||
kin string
|
||||
|
|
|
@ -37,7 +37,7 @@ import (
|
|||
// TestMaxConnectionIdle tests that a server will send GoAway to an idle
|
||||
// client. An idle client is one who doesn't make any RPC calls for a duration
|
||||
// of MaxConnectionIdle time.
|
||||
func TestMaxConnectionIdle(t *testing.T) {
|
||||
func (s) TestMaxConnectionIdle(t *testing.T) {
|
||||
serverConfig := &ServerConfig{
|
||||
KeepaliveParams: keepalive.ServerParameters{
|
||||
MaxConnectionIdle: 2 * time.Second,
|
||||
|
@ -74,7 +74,7 @@ func TestMaxConnectionIdle(t *testing.T) {
|
|||
|
||||
// TestMaxConenctionIdleBusyClient tests that a server will not send GoAway to
|
||||
// a busy client.
|
||||
func TestMaxConnectionIdleBusyClient(t *testing.T) {
|
||||
func (s) TestMaxConnectionIdleBusyClient(t *testing.T) {
|
||||
serverConfig := &ServerConfig{
|
||||
KeepaliveParams: keepalive.ServerParameters{
|
||||
MaxConnectionIdle: 2 * time.Second,
|
||||
|
@ -107,7 +107,7 @@ func TestMaxConnectionIdleBusyClient(t *testing.T) {
|
|||
|
||||
// TestMaxConnectionAge tests that a server will send GoAway after a duration
|
||||
// of MaxConnectionAge.
|
||||
func TestMaxConnectionAge(t *testing.T) {
|
||||
func (s) TestMaxConnectionAge(t *testing.T) {
|
||||
serverConfig := &ServerConfig{
|
||||
KeepaliveParams: keepalive.ServerParameters{
|
||||
MaxConnectionAge: 1 * time.Second,
|
||||
|
@ -152,7 +152,7 @@ const (
|
|||
//
|
||||
// This test creates a regular net.Conn connection to the server and sends the
|
||||
// clientPreface and the initial Settings frame, and then remains unresponsive.
|
||||
func TestKeepaliveServerClosesUnresponsiveClient(t *testing.T) {
|
||||
func (s) TestKeepaliveServerClosesUnresponsiveClient(t *testing.T) {
|
||||
serverConfig := &ServerConfig{
|
||||
KeepaliveParams: keepalive.ServerParameters{
|
||||
Time: 1 * time.Second,
|
||||
|
@ -211,7 +211,7 @@ func TestKeepaliveServerClosesUnresponsiveClient(t *testing.T) {
|
|||
|
||||
// TestKeepaliveServerWithResponsiveClient tests that a server doesn't close
|
||||
// the connection with a client that responds to keepalive pings.
|
||||
func TestKeepaliveServerWithResponsiveClient(t *testing.T) {
|
||||
func (s) TestKeepaliveServerWithResponsiveClient(t *testing.T) {
|
||||
serverConfig := &ServerConfig{
|
||||
KeepaliveParams: keepalive.ServerParameters{
|
||||
Time: 1 * time.Second,
|
||||
|
@ -239,7 +239,7 @@ func TestKeepaliveServerWithResponsiveClient(t *testing.T) {
|
|||
// transport once the keepalive logic kicks in. Here, we set the
|
||||
// `PermitWithoutStream` parameter to true which ensures that the keepalive
|
||||
// logic is running even without any active streams.
|
||||
func TestKeepaliveClientClosesUnresponsiveServer(t *testing.T) {
|
||||
func (s) TestKeepaliveClientClosesUnresponsiveServer(t *testing.T) {
|
||||
connCh := make(chan net.Conn, 1)
|
||||
client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{
|
||||
Time: 1 * time.Second,
|
||||
|
@ -269,7 +269,7 @@ func TestKeepaliveClientClosesUnresponsiveServer(t *testing.T) {
|
|||
// close the transport. Here, we do not set the `PermitWithoutStream` parameter
|
||||
// to true which ensures that the keepalive logic is turned off without any
|
||||
// active streams, and therefore the transport stays open.
|
||||
func TestKeepaliveClientOpenWithUnresponsiveServer(t *testing.T) {
|
||||
func (s) TestKeepaliveClientOpenWithUnresponsiveServer(t *testing.T) {
|
||||
connCh := make(chan net.Conn, 1)
|
||||
client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{
|
||||
Time: 1 * time.Second,
|
||||
|
@ -296,7 +296,7 @@ func TestKeepaliveClientOpenWithUnresponsiveServer(t *testing.T) {
|
|||
// TestKeepaliveClientClosesWithActiveStreams creates a server which does not
|
||||
// respond to keepalive pings, and makes sure that the client closes the
|
||||
// transport even when there is an active stream.
|
||||
func TestKeepaliveClientClosesWithActiveStreams(t *testing.T) {
|
||||
func (s) TestKeepaliveClientClosesWithActiveStreams(t *testing.T) {
|
||||
connCh := make(chan net.Conn, 1)
|
||||
client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{
|
||||
Time: 1 * time.Second,
|
||||
|
@ -328,7 +328,7 @@ func TestKeepaliveClientClosesWithActiveStreams(t *testing.T) {
|
|||
// TestKeepaliveClientStaysHealthyWithResponsiveServer creates a server which
|
||||
// responds to keepalive pings, and makes sure than a client transport stays
|
||||
// healthy without any active streams.
|
||||
func TestKeepaliveClientStaysHealthyWithResponsiveServer(t *testing.T) {
|
||||
func (s) TestKeepaliveClientStaysHealthyWithResponsiveServer(t *testing.T) {
|
||||
server, client, cancel := setUpWithOptions(t, 0, &ServerConfig{}, normal, ConnectOptions{
|
||||
KeepaliveParams: keepalive.ClientParameters{
|
||||
Time: 1 * time.Second,
|
||||
|
@ -357,7 +357,7 @@ func TestKeepaliveClientStaysHealthyWithResponsiveServer(t *testing.T) {
|
|||
// ping every [Time+Timeout] instead of every [Time] period, and this test
|
||||
// explicitly makes sure the fix works and the client sends a ping every [Time]
|
||||
// period.
|
||||
func TestKeepaliveClientFrequency(t *testing.T) {
|
||||
func (s) TestKeepaliveClientFrequency(t *testing.T) {
|
||||
serverConfig := &ServerConfig{
|
||||
KeepalivePolicy: keepalive.EnforcementPolicy{
|
||||
MinTime: 1200 * time.Millisecond, // 1.2 seconds
|
||||
|
@ -401,7 +401,7 @@ func TestKeepaliveClientFrequency(t *testing.T) {
|
|||
// server closes a client transport when it sends too many keepalive pings
|
||||
// (when there are no active streams), based on the configured
|
||||
// EnforcementPolicy.
|
||||
func TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) {
|
||||
func (s) TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) {
|
||||
serverConfig := &ServerConfig{
|
||||
KeepalivePolicy: keepalive.EnforcementPolicy{
|
||||
MinTime: 2 * time.Second,
|
||||
|
@ -444,7 +444,7 @@ func TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) {
|
|||
// server closes a client transport when it sends too many keepalive pings
|
||||
// (even when there is an active stream), based on the configured
|
||||
// EnforcementPolicy.
|
||||
func TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) {
|
||||
func (s) TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) {
|
||||
serverConfig := &ServerConfig{
|
||||
KeepalivePolicy: keepalive.EnforcementPolicy{
|
||||
MinTime: 2 * time.Second,
|
||||
|
@ -490,7 +490,7 @@ func TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) {
|
|||
// server does not close a client transport (with no active streams) which
|
||||
// sends keepalive pings in accordance to the configured keepalive
|
||||
// EnforcementPolicy.
|
||||
func TestKeepaliveServerEnforcementWithObeyingClientNoRPC(t *testing.T) {
|
||||
func (s) TestKeepaliveServerEnforcementWithObeyingClientNoRPC(t *testing.T) {
|
||||
serverConfig := &ServerConfig{
|
||||
KeepalivePolicy: keepalive.EnforcementPolicy{
|
||||
MinTime: 100 * time.Millisecond,
|
||||
|
@ -524,7 +524,7 @@ func TestKeepaliveServerEnforcementWithObeyingClientNoRPC(t *testing.T) {
|
|||
// server does not close a client transport (with active streams) which
|
||||
// sends keepalive pings in accordance to the configured keepalive
|
||||
// EnforcementPolicy.
|
||||
func TestKeepaliveServerEnforcementWithObeyingClientWithRPC(t *testing.T) {
|
||||
func (s) TestKeepaliveServerEnforcementWithObeyingClientWithRPC(t *testing.T) {
|
||||
serverConfig := &ServerConfig{
|
||||
KeepalivePolicy: keepalive.EnforcementPolicy{
|
||||
MinTime: 100 * time.Millisecond,
|
||||
|
@ -562,7 +562,7 @@ func TestKeepaliveServerEnforcementWithObeyingClientWithRPC(t *testing.T) {
|
|||
// transport does not have any active streams and `PermitWithoutStream` is set
|
||||
// to false. This should ensure that the keepalive functionality on the client
|
||||
// side enters a dormant state.
|
||||
func TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient(t *testing.T) {
|
||||
func (s) TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient(t *testing.T) {
|
||||
serverConfig := &ServerConfig{
|
||||
KeepalivePolicy: keepalive.EnforcementPolicy{
|
||||
MinTime: 2 * time.Second,
|
||||
|
@ -592,7 +592,7 @@ func TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient(t *testing.T) {
|
|||
|
||||
// TestTCPUserTimeout tests that the TCP_USER_TIMEOUT socket option is set to
|
||||
// the keepalive timeout, as detailed in proposal A18.
|
||||
func TestTCPUserTimeout(t *testing.T) {
|
||||
func (s) TestTCPUserTimeout(t *testing.T) {
|
||||
tests := []struct {
|
||||
time time.Duration
|
||||
timeout time.Duration
|
||||
|
|
|
@ -37,11 +37,20 @@ import (
|
|||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/internal/leakcheck"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
type server struct {
|
||||
lis net.Listener
|
||||
port string
|
||||
|
@ -466,7 +475,7 @@ func setUpWithNoPingServer(t *testing.T, copts ConnectOptions, connCh chan net.C
|
|||
|
||||
// TestInflightStreamClosing ensures that closing in-flight stream
|
||||
// sends status error to concurrent stream reader.
|
||||
func TestInflightStreamClosing(t *testing.T) {
|
||||
func (s) TestInflightStreamClosing(t *testing.T) {
|
||||
serverConfig := &ServerConfig{}
|
||||
server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{})
|
||||
defer cancel()
|
||||
|
@ -502,7 +511,7 @@ func TestInflightStreamClosing(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestClientSendAndReceive(t *testing.T) {
|
||||
func (s) TestClientSendAndReceive(t *testing.T) {
|
||||
server, ct, cancel := setUp(t, 0, math.MaxUint32, normal)
|
||||
defer cancel()
|
||||
callHdr := &CallHdr{
|
||||
|
@ -540,7 +549,7 @@ func TestClientSendAndReceive(t *testing.T) {
|
|||
server.stop()
|
||||
}
|
||||
|
||||
func TestClientErrorNotify(t *testing.T) {
|
||||
func (s) TestClientErrorNotify(t *testing.T) {
|
||||
server, ct, cancel := setUp(t, 0, math.MaxUint32, normal)
|
||||
defer cancel()
|
||||
go server.stop()
|
||||
|
@ -572,7 +581,7 @@ func performOneRPC(ct ClientTransport) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestClientMix(t *testing.T) {
|
||||
func (s) TestClientMix(t *testing.T) {
|
||||
s, ct, cancel := setUp(t, 0, math.MaxUint32, normal)
|
||||
defer cancel()
|
||||
go func(s *server) {
|
||||
|
@ -589,7 +598,7 @@ func TestClientMix(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestLargeMessage(t *testing.T) {
|
||||
func (s) TestLargeMessage(t *testing.T) {
|
||||
server, ct, cancel := setUp(t, 0, math.MaxUint32, normal)
|
||||
defer cancel()
|
||||
callHdr := &CallHdr{
|
||||
|
@ -622,7 +631,7 @@ func TestLargeMessage(t *testing.T) {
|
|||
server.stop()
|
||||
}
|
||||
|
||||
func TestLargeMessageWithDelayRead(t *testing.T) {
|
||||
func (s) TestLargeMessageWithDelayRead(t *testing.T) {
|
||||
// Disable dynamic flow control.
|
||||
sc := &ServerConfig{
|
||||
InitialWindowSize: defaultWindowSize,
|
||||
|
@ -719,7 +728,7 @@ func TestLargeMessageWithDelayRead(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGracefulClose(t *testing.T) {
|
||||
func (s) TestGracefulClose(t *testing.T) {
|
||||
server, ct, cancel := setUp(t, 0, math.MaxUint32, pingpong)
|
||||
defer cancel()
|
||||
defer func() {
|
||||
|
@ -782,7 +791,7 @@ func TestGracefulClose(t *testing.T) {
|
|||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestLargeMessageSuspension(t *testing.T) {
|
||||
func (s) TestLargeMessageSuspension(t *testing.T) {
|
||||
server, ct, cancel := setUp(t, 0, math.MaxUint32, suspended)
|
||||
defer cancel()
|
||||
callHdr := &CallHdr{
|
||||
|
@ -817,7 +826,7 @@ func TestLargeMessageSuspension(t *testing.T) {
|
|||
server.stop()
|
||||
}
|
||||
|
||||
func TestMaxStreams(t *testing.T) {
|
||||
func (s) TestMaxStreams(t *testing.T) {
|
||||
serverConfig := &ServerConfig{
|
||||
MaxStreams: 1,
|
||||
}
|
||||
|
@ -888,7 +897,7 @@ func TestMaxStreams(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestServerContextCanceledOnClosedConnection(t *testing.T) {
|
||||
func (s) TestServerContextCanceledOnClosedConnection(t *testing.T) {
|
||||
server, ct, cancel := setUp(t, 0, math.MaxUint32, suspended)
|
||||
defer cancel()
|
||||
callHdr := &CallHdr{
|
||||
|
@ -950,7 +959,7 @@ func TestServerContextCanceledOnClosedConnection(t *testing.T) {
|
|||
server.stop()
|
||||
}
|
||||
|
||||
func TestClientConnDecoupledFromApplicationRead(t *testing.T) {
|
||||
func (s) TestClientConnDecoupledFromApplicationRead(t *testing.T) {
|
||||
connectOptions := ConnectOptions{
|
||||
InitialWindowSize: defaultWindowSize,
|
||||
InitialConnWindowSize: defaultWindowSize,
|
||||
|
@ -1037,7 +1046,7 @@ func TestClientConnDecoupledFromApplicationRead(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestServerConnDecoupledFromApplicationRead(t *testing.T) {
|
||||
func (s) TestServerConnDecoupledFromApplicationRead(t *testing.T) {
|
||||
serverConfig := &ServerConfig{
|
||||
InitialWindowSize: defaultWindowSize,
|
||||
InitialConnWindowSize: defaultWindowSize,
|
||||
|
@ -1106,7 +1115,7 @@ func TestServerConnDecoupledFromApplicationRead(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
func TestServerWithMisbehavedClient(t *testing.T) {
|
||||
func (s) TestServerWithMisbehavedClient(t *testing.T) {
|
||||
server := setUpServerOnly(t, 0, &ServerConfig{}, suspended)
|
||||
defer server.stop()
|
||||
// Create a client that can override server stream quota.
|
||||
|
@ -1206,7 +1215,7 @@ func TestServerWithMisbehavedClient(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestClientWithMisbehavedServer(t *testing.T) {
|
||||
func (s) TestClientWithMisbehavedServer(t *testing.T) {
|
||||
// Create a misbehaving server.
|
||||
lis, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
|
@ -1295,7 +1304,7 @@ func TestClientWithMisbehavedServer(t *testing.T) {
|
|||
|
||||
var encodingTestStatus = status.New(codes.Internal, "\n")
|
||||
|
||||
func TestEncodingRequiredStatus(t *testing.T) {
|
||||
func (s) TestEncodingRequiredStatus(t *testing.T) {
|
||||
server, ct, cancel := setUp(t, 0, math.MaxUint32, encodingRequiredStatus)
|
||||
defer cancel()
|
||||
callHdr := &CallHdr{
|
||||
|
@ -1321,7 +1330,7 @@ func TestEncodingRequiredStatus(t *testing.T) {
|
|||
server.stop()
|
||||
}
|
||||
|
||||
func TestInvalidHeaderField(t *testing.T) {
|
||||
func (s) TestInvalidHeaderField(t *testing.T) {
|
||||
server, ct, cancel := setUp(t, 0, math.MaxUint32, invalidHeaderField)
|
||||
defer cancel()
|
||||
callHdr := &CallHdr{
|
||||
|
@ -1341,7 +1350,7 @@ func TestInvalidHeaderField(t *testing.T) {
|
|||
server.stop()
|
||||
}
|
||||
|
||||
func TestHeaderChanClosedAfterReceivingAnInvalidHeader(t *testing.T) {
|
||||
func (s) TestHeaderChanClosedAfterReceivingAnInvalidHeader(t *testing.T) {
|
||||
server, ct, cancel := setUp(t, 0, math.MaxUint32, invalidHeaderField)
|
||||
defer cancel()
|
||||
defer server.stop()
|
||||
|
@ -1359,7 +1368,7 @@ func TestHeaderChanClosedAfterReceivingAnInvalidHeader(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIsReservedHeader(t *testing.T) {
|
||||
func (s) TestIsReservedHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
h string
|
||||
want bool
|
||||
|
@ -1384,7 +1393,7 @@ func TestIsReservedHeader(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestContextErr(t *testing.T) {
|
||||
func (s) TestContextErr(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
// input
|
||||
errIn error
|
||||
|
@ -1408,7 +1417,7 @@ type windowSizeConfig struct {
|
|||
clientConn int32
|
||||
}
|
||||
|
||||
func TestAccountCheckWindowSizeWithLargeWindow(t *testing.T) {
|
||||
func (s) TestAccountCheckWindowSizeWithLargeWindow(t *testing.T) {
|
||||
wc := windowSizeConfig{
|
||||
serverStream: 10 * 1024 * 1024,
|
||||
serverConn: 12 * 1024 * 1024,
|
||||
|
@ -1418,7 +1427,7 @@ func TestAccountCheckWindowSizeWithLargeWindow(t *testing.T) {
|
|||
testFlowControlAccountCheck(t, 1024*1024, wc)
|
||||
}
|
||||
|
||||
func TestAccountCheckWindowSizeWithSmallWindow(t *testing.T) {
|
||||
func (s) TestAccountCheckWindowSizeWithSmallWindow(t *testing.T) {
|
||||
wc := windowSizeConfig{
|
||||
serverStream: defaultWindowSize,
|
||||
// Note this is smaller than initialConnWindowSize which is the current default.
|
||||
|
@ -1429,11 +1438,11 @@ func TestAccountCheckWindowSizeWithSmallWindow(t *testing.T) {
|
|||
testFlowControlAccountCheck(t, 1024*1024, wc)
|
||||
}
|
||||
|
||||
func TestAccountCheckDynamicWindowSmallMessage(t *testing.T) {
|
||||
func (s) TestAccountCheckDynamicWindowSmallMessage(t *testing.T) {
|
||||
testFlowControlAccountCheck(t, 1024, windowSizeConfig{})
|
||||
}
|
||||
|
||||
func TestAccountCheckDynamicWindowLargeMessage(t *testing.T) {
|
||||
func (s) TestAccountCheckDynamicWindowLargeMessage(t *testing.T) {
|
||||
testFlowControlAccountCheck(t, 1024*1024, windowSizeConfig{})
|
||||
}
|
||||
|
||||
|
@ -1583,7 +1592,7 @@ func waitWhileTrue(t *testing.T, condition func() (bool, error)) {
|
|||
|
||||
// If any error occurs on a call to Stream.Read, future calls
|
||||
// should continue to return that same error.
|
||||
func TestReadGivesSameErrorAfterAnyErrorOccurs(t *testing.T) {
|
||||
func (s) TestReadGivesSameErrorAfterAnyErrorOccurs(t *testing.T) {
|
||||
testRecvBuffer := newRecvBuffer()
|
||||
s := &Stream{
|
||||
ctx: context.Background(),
|
||||
|
@ -1629,19 +1638,19 @@ func TestReadGivesSameErrorAfterAnyErrorOccurs(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPingPong1B(t *testing.T) {
|
||||
func (s) TestPingPong1B(t *testing.T) {
|
||||
runPingPongTest(t, 1)
|
||||
}
|
||||
|
||||
func TestPingPong1KB(t *testing.T) {
|
||||
func (s) TestPingPong1KB(t *testing.T) {
|
||||
runPingPongTest(t, 1024)
|
||||
}
|
||||
|
||||
func TestPingPong64KB(t *testing.T) {
|
||||
func (s) TestPingPong64KB(t *testing.T) {
|
||||
runPingPongTest(t, 65536)
|
||||
}
|
||||
|
||||
func TestPingPong1MB(t *testing.T) {
|
||||
func (s) TestPingPong1MB(t *testing.T) {
|
||||
runPingPongTest(t, 1048576)
|
||||
}
|
||||
|
||||
|
@ -1722,7 +1731,7 @@ func (t *tableSizeLimit) getIndex(i int) uint32 {
|
|||
return t.limits[i]
|
||||
}
|
||||
|
||||
func TestHeaderTblSize(t *testing.T) {
|
||||
func (s) TestHeaderTblSize(t *testing.T) {
|
||||
limits := &tableSizeLimit{}
|
||||
updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
|
||||
e.SetMaxDynamicTableSizeLimit(v)
|
||||
|
|
|
@ -24,8 +24,17 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
const iterCount = 10000
|
||||
|
||||
func equalApproximate(a, b float64) error {
|
||||
|
@ -95,11 +104,11 @@ func testWRRNext(t *testing.T, newWRR func() WRR) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestRandomWRRNext(t *testing.T) {
|
||||
func (s) TestRandomWRRNext(t *testing.T) {
|
||||
testWRRNext(t, NewRandom)
|
||||
}
|
||||
|
||||
func TestEdfWrrNext(t *testing.T) {
|
||||
func (s) TestEdfWrrNext(t *testing.T) {
|
||||
testWRRNext(t, NewEDF)
|
||||
}
|
||||
|
||||
|
|
|
@ -23,9 +23,19 @@ import (
|
|||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
func TestPairsMD(t *testing.T) {
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func (s) TestPairsMD(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
// input
|
||||
kv []string
|
||||
|
@ -42,7 +52,7 @@ func TestPairsMD(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
func (s) TestCopy(t *testing.T) {
|
||||
const key, val = "key", "val"
|
||||
orig := Pairs(key, val)
|
||||
cpy := orig.Copy()
|
||||
|
@ -55,7 +65,7 @@ func TestCopy(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestJoin(t *testing.T) {
|
||||
func (s) TestJoin(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
mds []MD
|
||||
want MD
|
||||
|
@ -72,7 +82,7 @@ func TestJoin(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
func (s) TestGet(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
md MD
|
||||
key string
|
||||
|
@ -89,7 +99,7 @@ func TestGet(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSet(t *testing.T) {
|
||||
func (s) TestSet(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
md MD
|
||||
setKey string
|
||||
|
@ -122,7 +132,7 @@ func TestSet(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAppend(t *testing.T) {
|
||||
func (s) TestAppend(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
md MD
|
||||
appendKey string
|
||||
|
@ -156,7 +166,7 @@ func TestAppend(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAppendToOutgoingContext(t *testing.T) {
|
||||
func (s) TestAppendToOutgoingContext(t *testing.T) {
|
||||
// Pre-existing metadata
|
||||
ctx := NewOutgoingContext(context.Background(), Pairs("k1", "v1", "k2", "v2"))
|
||||
ctx = AppendToOutgoingContext(ctx, "k1", "v3")
|
||||
|
@ -182,7 +192,7 @@ func TestAppendToOutgoingContext(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAppendToOutgoingContext_Repeated(t *testing.T) {
|
||||
func (s) TestAppendToOutgoingContext_Repeated(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
for i := 0; i < 100; i = i + 2 {
|
||||
|
@ -200,7 +210,7 @@ func TestAppendToOutgoingContext_Repeated(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAppendToOutgoingContext_FromKVSlice(t *testing.T) {
|
||||
func (s) TestAppendToOutgoingContext_FromKVSlice(t *testing.T) {
|
||||
const k, v = "a", "b"
|
||||
kv := []string{k, v}
|
||||
ctx := AppendToOutgoingContext(context.Background(), kv...)
|
||||
|
|
|
@ -26,8 +26,18 @@ import (
|
|||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func newUpdateWithMD(op Operation, addr, lb string) *Update {
|
||||
return &Update{
|
||||
Op: op,
|
||||
|
@ -44,7 +54,7 @@ func toMap(u []*Update) map[string]*Update {
|
|||
return m
|
||||
}
|
||||
|
||||
func TestCompileUpdate(t *testing.T) {
|
||||
func (s) TestCompileUpdate(t *testing.T) {
|
||||
tests := []struct {
|
||||
oldAddrs []string
|
||||
newAddrs []string
|
||||
|
@ -109,7 +119,7 @@ func TestCompileUpdate(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestResolveFunc(t *testing.T) {
|
||||
func (s) TestResolveFunc(t *testing.T) {
|
||||
tests := []struct {
|
||||
addr string
|
||||
want error
|
||||
|
@ -269,14 +279,14 @@ func replaceNetFunc() func() {
|
|||
}
|
||||
}
|
||||
|
||||
func TestResolve(t *testing.T) {
|
||||
func (s) TestResolve(t *testing.T) {
|
||||
defer replaceNetFunc()()
|
||||
testResolver(t, time.Millisecond*5, time.Millisecond*10)
|
||||
}
|
||||
|
||||
const colonDefaultPort = ":" + defaultPort
|
||||
|
||||
func TestIPWatcher(t *testing.T) {
|
||||
func (s) TestIPWatcher(t *testing.T) {
|
||||
tests := []struct {
|
||||
target string
|
||||
want []*Update
|
||||
|
|
|
@ -34,6 +34,7 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
|
||||
pb "google.golang.org/grpc/reflection/grpc_testing"
|
||||
pbv3 "google.golang.org/grpc/reflection/grpc_testingv3"
|
||||
|
@ -55,6 +56,14 @@ var (
|
|||
fdProto2Ext2Byte []byte
|
||||
)
|
||||
|
||||
type x struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, x{})
|
||||
}
|
||||
|
||||
func loadFileDesc(filename string) (*dpb.FileDescriptorProto, []byte) {
|
||||
enc := proto.FileDescriptor(filename)
|
||||
if enc == nil {
|
||||
|
@ -79,7 +88,7 @@ func init() {
|
|||
fdProto2Ext2, fdProto2Ext2Byte = loadFileDesc("proto2_ext2.proto")
|
||||
}
|
||||
|
||||
func TestFileDescForType(t *testing.T) {
|
||||
func (x) TestFileDescForType(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
st reflect.Type
|
||||
wantFd *dpb.FileDescriptorProto
|
||||
|
@ -94,7 +103,7 @@ func TestFileDescForType(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTypeForName(t *testing.T) {
|
||||
func (x) TestTypeForName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
want reflect.Type
|
||||
|
@ -108,7 +117,7 @@ func TestTypeForName(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTypeForNameNotFound(t *testing.T) {
|
||||
func (x) TestTypeForNameNotFound(t *testing.T) {
|
||||
for _, test := range []string{
|
||||
"grpc.testing.not_exiting",
|
||||
} {
|
||||
|
@ -119,7 +128,7 @@ func TestTypeForNameNotFound(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFileDescContainingExtension(t *testing.T) {
|
||||
func (x) TestFileDescContainingExtension(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
st reflect.Type
|
||||
extNum int32
|
||||
|
@ -145,7 +154,7 @@ func (s intArray) Len() int { return len(s) }
|
|||
func (s intArray) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s intArray) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
func TestAllExtensionNumbersForType(t *testing.T) {
|
||||
func (x) TestAllExtensionNumbersForType(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
st reflect.Type
|
||||
want []int32
|
||||
|
@ -184,7 +193,7 @@ func (s *serverV3) StreamingSearch(stream pbv3.SearchServiceV3_StreamingSearchSe
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestReflectionEnd2end(t *testing.T) {
|
||||
func (x) TestReflectionEnd2end(t *testing.T) {
|
||||
// Start server.
|
||||
lis, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
|
|
|
@ -30,12 +30,21 @@ import (
|
|||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/stats"
|
||||
testpb "google.golang.org/grpc/stats/grpc_testing"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func init() {
|
||||
grpc.EnableTracing = false
|
||||
}
|
||||
|
@ -882,7 +891,7 @@ func testServerStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs []f
|
|||
checkServerStats(t, h.gotRPC, expect, checkFuncs)
|
||||
}
|
||||
|
||||
func TestServerStatsUnaryRPC(t *testing.T) {
|
||||
func (s) TestServerStatsUnaryRPC(t *testing.T) {
|
||||
testServerStats(t, &testConfig{compress: ""}, &rpcConfig{success: true, callType: unaryRPC}, []func(t *testing.T, d *gotData, e *expectedData){
|
||||
checkInHeader,
|
||||
checkBegin,
|
||||
|
@ -894,7 +903,7 @@ func TestServerStatsUnaryRPC(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestServerStatsUnaryRPCError(t *testing.T) {
|
||||
func (s) TestServerStatsUnaryRPCError(t *testing.T) {
|
||||
testServerStats(t, &testConfig{compress: ""}, &rpcConfig{success: false, callType: unaryRPC}, []func(t *testing.T, d *gotData, e *expectedData){
|
||||
checkInHeader,
|
||||
checkBegin,
|
||||
|
@ -905,7 +914,7 @@ func TestServerStatsUnaryRPCError(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestServerStatsClientStreamRPC(t *testing.T) {
|
||||
func (s) TestServerStatsClientStreamRPC(t *testing.T) {
|
||||
count := 5
|
||||
checkFuncs := []func(t *testing.T, d *gotData, e *expectedData){
|
||||
checkInHeader,
|
||||
|
@ -926,7 +935,7 @@ func TestServerStatsClientStreamRPC(t *testing.T) {
|
|||
testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, callType: clientStreamRPC}, checkFuncs)
|
||||
}
|
||||
|
||||
func TestServerStatsClientStreamRPCError(t *testing.T) {
|
||||
func (s) TestServerStatsClientStreamRPCError(t *testing.T) {
|
||||
count := 1
|
||||
testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, callType: clientStreamRPC}, []func(t *testing.T, d *gotData, e *expectedData){
|
||||
checkInHeader,
|
||||
|
@ -938,7 +947,7 @@ func TestServerStatsClientStreamRPCError(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestServerStatsServerStreamRPC(t *testing.T) {
|
||||
func (s) TestServerStatsServerStreamRPC(t *testing.T) {
|
||||
count := 5
|
||||
checkFuncs := []func(t *testing.T, d *gotData, e *expectedData){
|
||||
checkInHeader,
|
||||
|
@ -959,7 +968,7 @@ func TestServerStatsServerStreamRPC(t *testing.T) {
|
|||
testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, callType: serverStreamRPC}, checkFuncs)
|
||||
}
|
||||
|
||||
func TestServerStatsServerStreamRPCError(t *testing.T) {
|
||||
func (s) TestServerStatsServerStreamRPCError(t *testing.T) {
|
||||
count := 5
|
||||
testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, callType: serverStreamRPC}, []func(t *testing.T, d *gotData, e *expectedData){
|
||||
checkInHeader,
|
||||
|
@ -971,7 +980,7 @@ func TestServerStatsServerStreamRPCError(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestServerStatsFullDuplexRPC(t *testing.T) {
|
||||
func (s) TestServerStatsFullDuplexRPC(t *testing.T) {
|
||||
count := 5
|
||||
checkFuncs := []func(t *testing.T, d *gotData, e *expectedData){
|
||||
checkInHeader,
|
||||
|
@ -992,7 +1001,7 @@ func TestServerStatsFullDuplexRPC(t *testing.T) {
|
|||
testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, callType: fullDuplexStreamRPC}, checkFuncs)
|
||||
}
|
||||
|
||||
func TestServerStatsFullDuplexRPCError(t *testing.T) {
|
||||
func (s) TestServerStatsFullDuplexRPCError(t *testing.T) {
|
||||
count := 5
|
||||
testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, callType: fullDuplexStreamRPC}, []func(t *testing.T, d *gotData, e *expectedData){
|
||||
checkInHeader,
|
||||
|
@ -1177,7 +1186,7 @@ func testClientStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs map
|
|||
checkClientStats(t, h.gotRPC, expect, checkFuncs)
|
||||
}
|
||||
|
||||
func TestClientStatsUnaryRPC(t *testing.T) {
|
||||
func (s) TestClientStatsUnaryRPC(t *testing.T) {
|
||||
testClientStats(t, &testConfig{compress: ""}, &rpcConfig{success: true, failfast: false, callType: unaryRPC}, map[int]*checkFuncWithCount{
|
||||
begin: {checkBegin, 1},
|
||||
outHeader: {checkOutHeader, 1},
|
||||
|
@ -1189,7 +1198,7 @@ func TestClientStatsUnaryRPC(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestClientStatsUnaryRPCError(t *testing.T) {
|
||||
func (s) TestClientStatsUnaryRPCError(t *testing.T) {
|
||||
testClientStats(t, &testConfig{compress: ""}, &rpcConfig{success: false, failfast: false, callType: unaryRPC}, map[int]*checkFuncWithCount{
|
||||
begin: {checkBegin, 1},
|
||||
outHeader: {checkOutHeader, 1},
|
||||
|
@ -1200,7 +1209,7 @@ func TestClientStatsUnaryRPCError(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestClientStatsClientStreamRPC(t *testing.T) {
|
||||
func (s) TestClientStatsClientStreamRPC(t *testing.T) {
|
||||
count := 5
|
||||
testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, failfast: false, callType: clientStreamRPC}, map[int]*checkFuncWithCount{
|
||||
begin: {checkBegin, 1},
|
||||
|
@ -1213,7 +1222,7 @@ func TestClientStatsClientStreamRPC(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestClientStatsClientStreamRPCError(t *testing.T) {
|
||||
func (s) TestClientStatsClientStreamRPCError(t *testing.T) {
|
||||
count := 1
|
||||
testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, failfast: false, callType: clientStreamRPC}, map[int]*checkFuncWithCount{
|
||||
begin: {checkBegin, 1},
|
||||
|
@ -1225,7 +1234,7 @@ func TestClientStatsClientStreamRPCError(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestClientStatsServerStreamRPC(t *testing.T) {
|
||||
func (s) TestClientStatsServerStreamRPC(t *testing.T) {
|
||||
count := 5
|
||||
testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, failfast: false, callType: serverStreamRPC}, map[int]*checkFuncWithCount{
|
||||
begin: {checkBegin, 1},
|
||||
|
@ -1238,7 +1247,7 @@ func TestClientStatsServerStreamRPC(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestClientStatsServerStreamRPCError(t *testing.T) {
|
||||
func (s) TestClientStatsServerStreamRPCError(t *testing.T) {
|
||||
count := 5
|
||||
testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, failfast: false, callType: serverStreamRPC}, map[int]*checkFuncWithCount{
|
||||
begin: {checkBegin, 1},
|
||||
|
@ -1250,7 +1259,7 @@ func TestClientStatsServerStreamRPCError(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestClientStatsFullDuplexRPC(t *testing.T) {
|
||||
func (s) TestClientStatsFullDuplexRPC(t *testing.T) {
|
||||
count := 5
|
||||
testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, failfast: false, callType: fullDuplexStreamRPC}, map[int]*checkFuncWithCount{
|
||||
begin: {checkBegin, 1},
|
||||
|
@ -1263,7 +1272,7 @@ func TestClientStatsFullDuplexRPC(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestClientStatsFullDuplexRPCError(t *testing.T) {
|
||||
func (s) TestClientStatsFullDuplexRPCError(t *testing.T) {
|
||||
count := 5
|
||||
testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, failfast: false, callType: fullDuplexStreamRPC}, map[int]*checkFuncWithCount{
|
||||
begin: {checkBegin, 1},
|
||||
|
@ -1275,7 +1284,7 @@ func TestClientStatsFullDuplexRPCError(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestTags(t *testing.T) {
|
||||
func (s) TestTags(t *testing.T) {
|
||||
b := []byte{5, 2, 4, 3, 1}
|
||||
ctx := stats.SetTags(context.Background(), b)
|
||||
if tg := stats.OutgoingTags(ctx); !reflect.DeepEqual(tg, b) {
|
||||
|
@ -1294,7 +1303,7 @@ func TestTags(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTrace(t *testing.T) {
|
||||
func (s) TestTrace(t *testing.T) {
|
||||
b := []byte{5, 2, 4, 3, 1}
|
||||
ctx := stats.SetTrace(context.Background(), b)
|
||||
if tr := stats.OutgoingTrace(ctx); !reflect.DeepEqual(tr, b) {
|
||||
|
|
|
@ -24,10 +24,19 @@ import (
|
|||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/test/grpc_testing"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func errWithDetails(t *testing.T, s *status.Status, details ...proto.Message) error {
|
||||
t.Helper()
|
||||
res, err := s.WithDetails(details...)
|
||||
|
@ -37,7 +46,7 @@ func errWithDetails(t *testing.T, s *status.Status, details ...proto.Message) er
|
|||
return res.Err()
|
||||
}
|
||||
|
||||
func TestErrorIs(t *testing.T) {
|
||||
func (s) TestErrorIs(t *testing.T) {
|
||||
// Test errors.
|
||||
testErr := status.Error(codes.Internal, "internal server error")
|
||||
testErrWithDetails := errWithDetails(t, status.New(codes.Internal, "internal server error"), &grpc_testing.Empty{})
|
||||
|
|
|
@ -33,8 +33,17 @@ import (
|
|||
epb "google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
// errEqual is essentially a copy of testutils.StatusErrEqual(), to avoid a
|
||||
// cyclic dependency.
|
||||
func errEqual(err1, err2 error) bool {
|
||||
|
@ -49,7 +58,7 @@ func errEqual(err1, err2 error) bool {
|
|||
return proto.Equal(status1.Proto(), status2.Proto())
|
||||
}
|
||||
|
||||
func TestErrorsWithSameParameters(t *testing.T) {
|
||||
func (s) TestErrorsWithSameParameters(t *testing.T) {
|
||||
const description = "some description"
|
||||
e1 := Errorf(codes.AlreadyExists, description)
|
||||
e2 := Errorf(codes.AlreadyExists, description)
|
||||
|
@ -58,7 +67,7 @@ func TestErrorsWithSameParameters(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFromToProto(t *testing.T) {
|
||||
func (s) TestFromToProto(t *testing.T) {
|
||||
s := &spb.Status{
|
||||
Code: int32(codes.Internal),
|
||||
Message: "test test test",
|
||||
|
@ -71,7 +80,7 @@ func TestFromToProto(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFromNilProto(t *testing.T) {
|
||||
func (s) TestFromNilProto(t *testing.T) {
|
||||
tests := []*Status{nil, FromProto(nil)}
|
||||
for _, s := range tests {
|
||||
if c := s.Code(); c != codes.OK {
|
||||
|
@ -89,7 +98,7 @@ func TestFromNilProto(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestError(t *testing.T) {
|
||||
func (s) TestError(t *testing.T) {
|
||||
err := Error(codes.Internal, "test description")
|
||||
if got, want := err.Error(), "rpc error: code = Internal desc = test description"; got != want {
|
||||
t.Fatalf("err.Error() = %q; want %q", got, want)
|
||||
|
@ -103,21 +112,21 @@ func TestError(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestErrorOK(t *testing.T) {
|
||||
func (s) TestErrorOK(t *testing.T) {
|
||||
err := Error(codes.OK, "foo")
|
||||
if err != nil {
|
||||
t.Fatalf("Error(codes.OK, _) = %p; want nil", err.(*statusError))
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorProtoOK(t *testing.T) {
|
||||
func (s) TestErrorProtoOK(t *testing.T) {
|
||||
s := &spb.Status{Code: int32(codes.OK)}
|
||||
if got := ErrorProto(s); got != nil {
|
||||
t.Fatalf("ErrorProto(%v) = %v; want nil", s, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromError(t *testing.T) {
|
||||
func (s) TestFromError(t *testing.T) {
|
||||
code, message := codes.Internal, "test description"
|
||||
err := Error(code, message)
|
||||
s, ok := FromError(err)
|
||||
|
@ -126,7 +135,7 @@ func TestFromError(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFromErrorOK(t *testing.T) {
|
||||
func (s) TestFromErrorOK(t *testing.T) {
|
||||
code, message := codes.OK, ""
|
||||
s, ok := FromError(nil)
|
||||
if !ok || s.Code() != code || s.Message() != message || s.Err() != nil {
|
||||
|
@ -154,7 +163,7 @@ func (c customError) GRPCStatus() *Status {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFromErrorImplementsInterface(t *testing.T) {
|
||||
func (s) TestFromErrorImplementsInterface(t *testing.T) {
|
||||
code, message := codes.Internal, "test description"
|
||||
details := []*apb.Any{{
|
||||
TypeUrl: "testUrl",
|
||||
|
@ -175,7 +184,7 @@ func TestFromErrorImplementsInterface(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFromErrorUnknownError(t *testing.T) {
|
||||
func (s) TestFromErrorUnknownError(t *testing.T) {
|
||||
code, message := codes.Unknown, "unknown error"
|
||||
err := errors.New("unknown error")
|
||||
s, ok := FromError(err)
|
||||
|
@ -184,7 +193,7 @@ func TestFromErrorUnknownError(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConvertKnownError(t *testing.T) {
|
||||
func (s) TestConvertKnownError(t *testing.T) {
|
||||
code, message := codes.Internal, "test description"
|
||||
err := Error(code, message)
|
||||
s := Convert(err)
|
||||
|
@ -193,7 +202,7 @@ func TestConvertKnownError(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConvertUnknownError(t *testing.T) {
|
||||
func (s) TestConvertUnknownError(t *testing.T) {
|
||||
code, message := codes.Unknown, "unknown error"
|
||||
err := errors.New("unknown error")
|
||||
s := Convert(err)
|
||||
|
@ -202,7 +211,7 @@ func TestConvertUnknownError(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStatus_ErrorDetails(t *testing.T) {
|
||||
func (s) TestStatus_ErrorDetails(t *testing.T) {
|
||||
tests := []struct {
|
||||
code codes.Code
|
||||
details []proto.Message
|
||||
|
@ -261,7 +270,7 @@ func TestStatus_ErrorDetails(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStatus_WithDetails_Fail(t *testing.T) {
|
||||
func (s) TestStatus_WithDetails_Fail(t *testing.T) {
|
||||
tests := []*Status{
|
||||
nil,
|
||||
FromProto(nil),
|
||||
|
@ -274,7 +283,7 @@ func TestStatus_WithDetails_Fail(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStatus_ErrorDetails_Fail(t *testing.T) {
|
||||
func (s) TestStatus_ErrorDetails_Fail(t *testing.T) {
|
||||
tests := []struct {
|
||||
s *Status
|
||||
i []interface{}
|
||||
|
@ -347,7 +356,7 @@ func mustMarshalAny(msg proto.Message) *apb.Any {
|
|||
return any
|
||||
}
|
||||
|
||||
func TestFromContextError(t *testing.T) {
|
||||
func (s) TestFromContextError(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in error
|
||||
want *Status
|
||||
|
|
|
@ -25,8 +25,18 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func testRW(r io.Reader, w io.Writer) error {
|
||||
for i := 0; i < 20; i++ {
|
||||
d := make([]byte, i)
|
||||
|
@ -64,14 +74,14 @@ func testRW(r io.Reader, w io.Writer) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestPipe(t *testing.T) {
|
||||
func (s) TestPipe(t *testing.T) {
|
||||
p := newPipe(10)
|
||||
if err := testRW(p, p); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPipeClose(t *testing.T) {
|
||||
func (s) TestPipeClose(t *testing.T) {
|
||||
p := newPipe(10)
|
||||
p.Close()
|
||||
if _, err := p.Write(nil); err != io.ErrClosedPipe {
|
||||
|
@ -82,7 +92,7 @@ func TestPipeClose(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConn(t *testing.T) {
|
||||
func (s) TestConn(t *testing.T) {
|
||||
p1, p2 := newPipe(10), newPipe(10)
|
||||
c1, c2 := &conn{p1, p2}, &conn{p2, p1}
|
||||
|
||||
|
@ -94,7 +104,7 @@ func TestConn(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConnCloseWithData(t *testing.T) {
|
||||
func (s) TestConnCloseWithData(t *testing.T) {
|
||||
lis := Listen(7)
|
||||
errChan := make(chan error, 1)
|
||||
var lisConn net.Conn
|
||||
|
@ -144,7 +154,7 @@ func TestConnCloseWithData(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestListener(t *testing.T) {
|
||||
func (s) TestListener(t *testing.T) {
|
||||
l := Listen(7)
|
||||
var s net.Conn
|
||||
var serr error
|
||||
|
@ -166,7 +176,7 @@ func TestListener(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCloseWhileDialing(t *testing.T) {
|
||||
func (s) TestCloseWhileDialing(t *testing.T) {
|
||||
l := Listen(7)
|
||||
var c net.Conn
|
||||
var err error
|
||||
|
@ -182,7 +192,7 @@ func TestCloseWhileDialing(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCloseWhileAccepting(t *testing.T) {
|
||||
func (s) TestCloseWhileAccepting(t *testing.T) {
|
||||
l := Listen(7)
|
||||
var c net.Conn
|
||||
var err error
|
||||
|
@ -198,7 +208,7 @@ func TestCloseWhileAccepting(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDeadline(t *testing.T) {
|
||||
func (s) TestDeadline(t *testing.T) {
|
||||
sig := make(chan error, 2)
|
||||
blockingWrite := func(conn net.Conn) {
|
||||
_, err := conn.Write([]byte("0123456789"))
|
||||
|
|
|
@ -62,8 +62,6 @@ import (
|
|||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/internal/grpctest/tlogger"
|
||||
"google.golang.org/grpc/internal/leakcheck"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
@ -84,31 +82,8 @@ func init() {
|
|||
channelz.TurnOn()
|
||||
}
|
||||
|
||||
type s struct{}
|
||||
|
||||
var lcFailed uint32
|
||||
|
||||
type errorer struct {
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (e errorer) Errorf(format string, args ...interface{}) {
|
||||
atomic.StoreUint32(&lcFailed, 1)
|
||||
e.t.Errorf(format, args...)
|
||||
}
|
||||
|
||||
func (s) Setup(t *testing.T) {
|
||||
tlogger.Update(t)
|
||||
}
|
||||
|
||||
func (s) Teardown(t *testing.T) {
|
||||
if atomic.LoadUint32(&lcFailed) == 1 {
|
||||
return
|
||||
}
|
||||
leakcheck.Check(errorer{t: t})
|
||||
if atomic.LoadUint32(&lcFailed) == 1 {
|
||||
t.Log("Leak check disabled for future tests")
|
||||
}
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
|
@ -6556,7 +6531,6 @@ func (s) TestServeExitsWhenListenerClosed(t *testing.T) {
|
|||
|
||||
// Service handler returns status with invalid utf8 message.
|
||||
func (s) TestStatusInvalidUTF8Message(t *testing.T) {
|
||||
|
||||
var (
|
||||
origMsg = string([]byte{0xff, 0xfe, 0xfd})
|
||||
wantMsg = "<22><><EFBFBD>"
|
||||
|
@ -6584,6 +6558,7 @@ func (s) TestStatusInvalidUTF8Message(t *testing.T) {
|
|||
// will fail to marshal the status because of the invalid utf8 message. Details
|
||||
// will be dropped when sending.
|
||||
func (s) TestStatusInvalidUTF8Details(t *testing.T) {
|
||||
grpctest.TLogger.ExpectError("transport: failed to marshal rpc status")
|
||||
|
||||
var (
|
||||
origMsg = string([]byte{0xff, 0xfe, 0xfd})
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/resolver/manual"
|
||||
"google.golang.org/grpc/status"
|
||||
|
@ -247,6 +248,7 @@ func (s) TestHealthCheckWatchStateChange(t *testing.T) {
|
|||
|
||||
// If Watch returns Unimplemented, then the ClientConn should go into READY state.
|
||||
func (s) TestHealthCheckHealthServerNotRegistered(t *testing.T) {
|
||||
grpctest.TLogger.ExpectError("Subchannel health check is unimplemented at server side, thus health check is disabled")
|
||||
s := grpc.NewServer()
|
||||
lis, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
xdsinternal "google.golang.org/grpc/xds/internal"
|
||||
|
@ -43,6 +44,14 @@ const (
|
|||
defaultTestTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
type testClientConn struct {
|
||||
balancer.ClientConn
|
||||
}
|
||||
|
@ -245,7 +254,7 @@ func setupWithWatch(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBal
|
|||
// TestUpdateClientConnState invokes the UpdateClientConnState method on the
|
||||
// cdsBalancer with different inputs and verifies that the CDS watch API on the
|
||||
// provided xdsClient is invoked appropriately.
|
||||
func TestUpdateClientConnState(t *testing.T) {
|
||||
func (s) TestUpdateClientConnState(t *testing.T) {
|
||||
xdsC := fakeclient.NewClient()
|
||||
|
||||
tests := []struct {
|
||||
|
@ -319,7 +328,7 @@ func TestUpdateClientConnState(t *testing.T) {
|
|||
|
||||
// TestUpdateClientConnStateAfterClose invokes the UpdateClientConnState method
|
||||
// on the cdsBalancer after close and verifies that it returns an error.
|
||||
func TestUpdateClientConnStateAfterClose(t *testing.T) {
|
||||
func (s) TestUpdateClientConnStateAfterClose(t *testing.T) {
|
||||
cdsB, _, cancel := setup()
|
||||
defer cancel()
|
||||
cdsB.Close()
|
||||
|
@ -332,7 +341,7 @@ func TestUpdateClientConnStateAfterClose(t *testing.T) {
|
|||
// TestUpdateClientConnStateWithSameState verifies that a ClientConnState
|
||||
// update with the same cluster and xdsClient does not cause the cdsBalancer to
|
||||
// create a new watch.
|
||||
func TestUpdateClientConnStateWithSameState(t *testing.T) {
|
||||
func (s) TestUpdateClientConnStateWithSameState(t *testing.T) {
|
||||
xdsC, cdsB, _, cancel := setupWithWatch(t)
|
||||
defer func() {
|
||||
cancel()
|
||||
|
@ -350,7 +359,7 @@ func TestUpdateClientConnStateWithSameState(t *testing.T) {
|
|||
// TestHandleClusterUpdate invokes the registered CDS watch callback with
|
||||
// different updates and verifies that the expect ClientConnState is propagated
|
||||
// to the edsBalancer.
|
||||
func TestHandleClusterUpdate(t *testing.T) {
|
||||
func (s) TestHandleClusterUpdate(t *testing.T) {
|
||||
xdsC, cdsB, edsB, cancel := setupWithWatch(t)
|
||||
defer func() {
|
||||
cancel()
|
||||
|
@ -391,7 +400,7 @@ func TestHandleClusterUpdate(t *testing.T) {
|
|||
// TestResolverError verifies that an existing watch is cancelled when a
|
||||
// resolver error is received by the cdsBalancer, and also that the same error
|
||||
// is propagated to the edsBalancer.
|
||||
func TestResolverError(t *testing.T) {
|
||||
func (s) TestResolverError(t *testing.T) {
|
||||
xdsC, cdsB, edsB, cancel := setupWithWatch(t)
|
||||
defer func() {
|
||||
cancel()
|
||||
|
@ -416,7 +425,7 @@ func TestResolverError(t *testing.T) {
|
|||
|
||||
// TestUpdateSubConnState pushes a SubConn update to the cdsBalancer and
|
||||
// verifies that the update is propagated to the edsBalancer.
|
||||
func TestUpdateSubConnState(t *testing.T) {
|
||||
func (s) TestUpdateSubConnState(t *testing.T) {
|
||||
xdsC, cdsB, edsB, cancel := setupWithWatch(t)
|
||||
defer func() {
|
||||
cancel()
|
||||
|
@ -439,7 +448,7 @@ func TestUpdateSubConnState(t *testing.T) {
|
|||
|
||||
// TestClose calls Close() on the cdsBalancer, and verifies that the underlying
|
||||
// edsBalancer is also closed.
|
||||
func TestClose(t *testing.T) {
|
||||
func (s) TestClose(t *testing.T) {
|
||||
xdsC, cdsB, edsB, cancel := setupWithWatch(t)
|
||||
defer cancel()
|
||||
|
||||
|
@ -460,7 +469,7 @@ func TestClose(t *testing.T) {
|
|||
|
||||
// TestParseConfig exercises the config parsing functionality in the cds
|
||||
// balancer builder.
|
||||
func TestParseConfig(t *testing.T) {
|
||||
func (s) TestParseConfig(t *testing.T) {
|
||||
bb := cdsBB{}
|
||||
if gotName := bb.Name(); gotName != cdsName {
|
||||
t.Fatalf("cdsBB.Name() = %v, want %v", gotName, cdsName)
|
||||
|
|
|
@ -56,7 +56,7 @@ func subConnFromPicker(p balancer.V2Picker) func() balancer.SubConn {
|
|||
}
|
||||
|
||||
// 1 balancer, 1 backend -> 2 backends -> 1 backend.
|
||||
func TestBalancerGroup_OneRR_AddRemoveBackend(t *testing.T) {
|
||||
func (s) TestBalancerGroup_OneRR_AddRemoveBackend(t *testing.T) {
|
||||
cc := newTestClientConn(t)
|
||||
bg := newBalancerGroup(cc, nil)
|
||||
bg.start()
|
||||
|
@ -113,7 +113,7 @@ func TestBalancerGroup_OneRR_AddRemoveBackend(t *testing.T) {
|
|||
}
|
||||
|
||||
// 2 balancers, each with 1 backend.
|
||||
func TestBalancerGroup_TwoRR_OneBackend(t *testing.T) {
|
||||
func (s) TestBalancerGroup_TwoRR_OneBackend(t *testing.T) {
|
||||
cc := newTestClientConn(t)
|
||||
bg := newBalancerGroup(cc, nil)
|
||||
bg.start()
|
||||
|
@ -143,7 +143,7 @@ func TestBalancerGroup_TwoRR_OneBackend(t *testing.T) {
|
|||
}
|
||||
|
||||
// 2 balancers, each with more than 1 backends.
|
||||
func TestBalancerGroup_TwoRR_MoreBackends(t *testing.T) {
|
||||
func (s) TestBalancerGroup_TwoRR_MoreBackends(t *testing.T) {
|
||||
cc := newTestClientConn(t)
|
||||
bg := newBalancerGroup(cc, nil)
|
||||
bg.start()
|
||||
|
@ -228,7 +228,7 @@ func TestBalancerGroup_TwoRR_MoreBackends(t *testing.T) {
|
|||
}
|
||||
|
||||
// 2 balancers with different weights.
|
||||
func TestBalancerGroup_TwoRR_DifferentWeight_MoreBackends(t *testing.T) {
|
||||
func (s) TestBalancerGroup_TwoRR_DifferentWeight_MoreBackends(t *testing.T) {
|
||||
cc := newTestClientConn(t)
|
||||
bg := newBalancerGroup(cc, nil)
|
||||
bg.start()
|
||||
|
@ -264,7 +264,7 @@ func TestBalancerGroup_TwoRR_DifferentWeight_MoreBackends(t *testing.T) {
|
|||
}
|
||||
|
||||
// totally 3 balancers, add/remove balancer.
|
||||
func TestBalancerGroup_ThreeRR_RemoveBalancer(t *testing.T) {
|
||||
func (s) TestBalancerGroup_ThreeRR_RemoveBalancer(t *testing.T) {
|
||||
cc := newTestClientConn(t)
|
||||
bg := newBalancerGroup(cc, nil)
|
||||
bg.start()
|
||||
|
@ -326,7 +326,7 @@ func TestBalancerGroup_ThreeRR_RemoveBalancer(t *testing.T) {
|
|||
}
|
||||
|
||||
// 2 balancers, change balancer weight.
|
||||
func TestBalancerGroup_TwoRR_ChangeWeight_MoreBackends(t *testing.T) {
|
||||
func (s) TestBalancerGroup_TwoRR_ChangeWeight_MoreBackends(t *testing.T) {
|
||||
cc := newTestClientConn(t)
|
||||
bg := newBalancerGroup(cc, nil)
|
||||
bg.start()
|
||||
|
@ -370,7 +370,7 @@ func TestBalancerGroup_TwoRR_ChangeWeight_MoreBackends(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestBalancerGroup_LoadReport(t *testing.T) {
|
||||
func (s) TestBalancerGroup_LoadReport(t *testing.T) {
|
||||
testLoadStore := newTestLoadStore()
|
||||
|
||||
cc := newTestClientConn(t)
|
||||
|
@ -454,7 +454,7 @@ func TestBalancerGroup_LoadReport(t *testing.T) {
|
|||
// - b2, weight 3, backends [0,3]
|
||||
// - b3, weight 1, backends [1,2]
|
||||
// Start the balancer group again and check for behavior.
|
||||
func TestBalancerGroup_start_close(t *testing.T) {
|
||||
func (s) TestBalancerGroup_start_close(t *testing.T) {
|
||||
cc := newTestClientConn(t)
|
||||
bg := newBalancerGroup(cc, nil)
|
||||
|
||||
|
@ -538,7 +538,7 @@ func TestBalancerGroup_start_close(t *testing.T) {
|
|||
// This test starts the balancer group with a test balancer, will updates picker
|
||||
// whenever it gets an address update. It's expected that start() doesn't block
|
||||
// because of deadlock.
|
||||
func TestBalancerGroup_start_close_deadlock(t *testing.T) {
|
||||
func (s) TestBalancerGroup_start_close_deadlock(t *testing.T) {
|
||||
cc := newTestClientConn(t)
|
||||
bg := newBalancerGroup(cc, nil)
|
||||
|
||||
|
@ -620,7 +620,7 @@ func initBalancerGroupForCachingTest(t *testing.T) (*balancerGroup, *testClientC
|
|||
|
||||
// Test that if a sub-balancer is removed, and re-added within close timeout,
|
||||
// the subConns won't be re-created.
|
||||
func TestBalancerGroup_locality_caching(t *testing.T) {
|
||||
func (s) TestBalancerGroup_locality_caching(t *testing.T) {
|
||||
defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)()
|
||||
bg, cc, addrToSC := initBalancerGroupForCachingTest(t)
|
||||
|
||||
|
@ -668,7 +668,7 @@ func TestBalancerGroup_locality_caching(t *testing.T) {
|
|||
// Sub-balancers are put in cache when they are removed. If balancer group is
|
||||
// closed within close timeout, all subconns should still be rmeoved
|
||||
// immediately.
|
||||
func TestBalancerGroup_locality_caching_close_group(t *testing.T) {
|
||||
func (s) TestBalancerGroup_locality_caching_close_group(t *testing.T) {
|
||||
defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)()
|
||||
bg, cc, addrToSC := initBalancerGroupForCachingTest(t)
|
||||
|
||||
|
@ -697,7 +697,7 @@ func TestBalancerGroup_locality_caching_close_group(t *testing.T) {
|
|||
|
||||
// Sub-balancers in cache will be closed if not re-added within timeout, and
|
||||
// subConns will be removed.
|
||||
func TestBalancerGroup_locality_caching_not_readd_within_timeout(t *testing.T) {
|
||||
func (s) TestBalancerGroup_locality_caching_not_readd_within_timeout(t *testing.T) {
|
||||
defer replaceDefaultSubBalancerCloseTimeout(time.Second)()
|
||||
_, cc, addrToSC := initBalancerGroupForCachingTest(t)
|
||||
|
||||
|
@ -729,7 +729,7 @@ type noopBalancerBuilderWrapper struct {
|
|||
|
||||
// After removing a sub-balancer, re-add with same ID, but different balancer
|
||||
// builder. Old subconns should be removed, and new subconns should be created.
|
||||
func TestBalancerGroup_locality_caching_readd_with_different_builder(t *testing.T) {
|
||||
func (s) TestBalancerGroup_locality_caching_readd_with_different_builder(t *testing.T) {
|
||||
defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)()
|
||||
bg, cc, addrToSC := initBalancerGroupForCachingTest(t)
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
// changes.
|
||||
//
|
||||
// Init 0 and 1; 0 is up, use 0; add 2, use 0; remove 2, use 0.
|
||||
func TestEDSPriority_HighPriorityReady(t *testing.T) {
|
||||
func (s) TestEDSPriority_HighPriorityReady(t *testing.T) {
|
||||
cc := newTestClientConn(t)
|
||||
edsb := newEDSBalancerImpl(cc, nil)
|
||||
|
||||
|
@ -97,7 +97,7 @@ func TestEDSPriority_HighPriorityReady(t *testing.T) {
|
|||
//
|
||||
// Init 0 and 1; 0 is up, use 0; 0 is down, 1 is up, use 1; add 2, use 1; 1 is
|
||||
// down, use 2; remove 2, use 1.
|
||||
func TestEDSPriority_SwitchPriority(t *testing.T) {
|
||||
func (s) TestEDSPriority_SwitchPriority(t *testing.T) {
|
||||
cc := newTestClientConn(t)
|
||||
edsb := newEDSBalancerImpl(cc, nil)
|
||||
|
||||
|
@ -204,7 +204,7 @@ func TestEDSPriority_SwitchPriority(t *testing.T) {
|
|||
// Add a lower priority while the higher priority is down.
|
||||
//
|
||||
// Init 0 and 1; 0 and 1 both down; add 2, use 2.
|
||||
func TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) {
|
||||
func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) {
|
||||
cc := newTestClientConn(t)
|
||||
edsb := newEDSBalancerImpl(cc, nil)
|
||||
|
||||
|
@ -266,7 +266,7 @@ func TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) {
|
|||
// When a higher priority becomes available, all lower priorities are closed.
|
||||
//
|
||||
// Init 0,1,2; 0 and 1 down, use 2; 0 up, close 1 and 2.
|
||||
func TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) {
|
||||
func (s) TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) {
|
||||
defer time.Sleep(10 * time.Millisecond)
|
||||
|
||||
cc := newTestClientConn(t)
|
||||
|
@ -338,7 +338,7 @@ func TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) {
|
|||
// doesn't get ready.
|
||||
//
|
||||
// Init 0,1; 0 is not ready (in connecting), after timeout, use 1.
|
||||
func TestEDSPriority_InitTimeout(t *testing.T) {
|
||||
func (s) TestEDSPriority_InitTimeout(t *testing.T) {
|
||||
const testPriorityInitTimeout = time.Second
|
||||
defer func() func() {
|
||||
old := defaultPriorityInitTimeout
|
||||
|
@ -396,7 +396,7 @@ func TestEDSPriority_InitTimeout(t *testing.T) {
|
|||
//
|
||||
// - start with 2 locality with p0 and p1
|
||||
// - add localities to existing p0 and p1
|
||||
func TestEDSPriority_MultipleLocalities(t *testing.T) {
|
||||
func (s) TestEDSPriority_MultipleLocalities(t *testing.T) {
|
||||
cc := newTestClientConn(t)
|
||||
edsb := newEDSBalancerImpl(cc, nil)
|
||||
|
||||
|
@ -499,7 +499,7 @@ func TestEDSPriority_MultipleLocalities(t *testing.T) {
|
|||
}
|
||||
|
||||
// EDS removes all localities, and re-adds them.
|
||||
func TestEDSPriority_RemovesAllLocalities(t *testing.T) {
|
||||
func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) {
|
||||
const testPriorityInitTimeout = time.Second
|
||||
defer func() func() {
|
||||
old := defaultPriorityInitTimeout
|
||||
|
@ -625,7 +625,7 @@ func TestEDSPriority_RemovesAllLocalities(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPriorityType(t *testing.T) {
|
||||
func (s) TestPriorityType(t *testing.T) {
|
||||
p0 := newPriorityType(0)
|
||||
p1 := newPriorityType(1)
|
||||
p2 := newPriorityType(2)
|
||||
|
|
|
@ -50,7 +50,7 @@ func init() {
|
|||
// - remove backend
|
||||
// - replace backend
|
||||
// - change drop rate
|
||||
func TestEDS_OneLocality(t *testing.T) {
|
||||
func (s) TestEDS_OneLocality(t *testing.T) {
|
||||
cc := newTestClientConn(t)
|
||||
edsb := newEDSBalancerImpl(cc, nil)
|
||||
|
||||
|
@ -156,7 +156,7 @@ func TestEDS_OneLocality(t *testing.T) {
|
|||
// - remove locality
|
||||
// - address change for the <not-the-first> locality
|
||||
// - update locality weight
|
||||
func TestEDS_TwoLocalities(t *testing.T) {
|
||||
func (s) TestEDS_TwoLocalities(t *testing.T) {
|
||||
cc := newTestClientConn(t)
|
||||
edsb := newEDSBalancerImpl(cc, nil)
|
||||
|
||||
|
@ -286,7 +286,7 @@ func TestEDS_TwoLocalities(t *testing.T) {
|
|||
|
||||
// The EDS balancer gets EDS resp with unhealthy endpoints. Test that only
|
||||
// healthy ones are used.
|
||||
func TestEDS_EndpointsHealth(t *testing.T) {
|
||||
func (s) TestEDS_EndpointsHealth(t *testing.T) {
|
||||
cc := newTestClientConn(t)
|
||||
edsb := newEDSBalancerImpl(cc, nil)
|
||||
|
||||
|
@ -358,7 +358,7 @@ func TestEDS_EndpointsHealth(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestClose(t *testing.T) {
|
||||
func (s) TestClose(t *testing.T) {
|
||||
edsb := newEDSBalancerImpl(nil, nil)
|
||||
// This is what could happen when switching between fallback and eds. This
|
||||
// make sure it doesn't panic.
|
||||
|
@ -414,7 +414,7 @@ func (tcp *testConstPicker) Pick(info balancer.PickInfo) (balancer.PickResult, e
|
|||
// Create XDS balancer, and update sub-balancer before handling eds responses.
|
||||
// Then switch between round-robin and test-const-balancer after handling first
|
||||
// eds response.
|
||||
func TestEDS_UpdateSubBalancerName(t *testing.T) {
|
||||
func (s) TestEDS_UpdateSubBalancerName(t *testing.T) {
|
||||
cc := newTestClientConn(t)
|
||||
edsb := newEDSBalancerImpl(cc, nil)
|
||||
|
||||
|
@ -506,7 +506,7 @@ func TestEDS_UpdateSubBalancerName(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDropPicker(t *testing.T) {
|
||||
func (s) TestDropPicker(t *testing.T) {
|
||||
const pickCount = 12
|
||||
var constPicker = &testConstPicker{
|
||||
sc: testSubConns[0],
|
||||
|
@ -571,7 +571,7 @@ func TestDropPicker(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestEDS_LoadReport(t *testing.T) {
|
||||
func (s) TestEDS_LoadReport(t *testing.T) {
|
||||
testLoadStore := newTestLoadStore()
|
||||
|
||||
cc := newTestClientConn(t)
|
||||
|
|
|
@ -33,7 +33,6 @@ import (
|
|||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/internal/leakcheck"
|
||||
scpb "google.golang.org/grpc/internal/proto/grpc_service_config"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
|
@ -56,10 +55,8 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
type s struct{}
|
||||
|
||||
func (s) Teardown(t *testing.T) {
|
||||
leakcheck.Check(t)
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
|
@ -388,7 +385,7 @@ func (s) TestXDSSubConnStateChange(t *testing.T) {
|
|||
edsLB.waitForSubConnStateChange(&scStateChange{sc: fsc, state: state})
|
||||
}
|
||||
|
||||
func TestXDSBalancerConfigParsing(t *testing.T) {
|
||||
func (s) TestXDSBalancerConfigParsing(t *testing.T) {
|
||||
const testEDSName = "eds.service"
|
||||
var testLRSName = "lrs.server"
|
||||
b := bytes.NewBuffer(nil)
|
||||
|
@ -499,7 +496,7 @@ func TestXDSBalancerConfigParsing(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
func TestLoadbalancingConfigParsing(t *testing.T) {
|
||||
func (s) TestLoadbalancingConfigParsing(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s string
|
||||
|
@ -541,7 +538,7 @@ func TestLoadbalancingConfigParsing(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestEqualStringPointers(t *testing.T) {
|
||||
func (s) TestEqualStringPointers(t *testing.T) {
|
||||
var (
|
||||
ta1 = "test-a"
|
||||
ta2 = "test-a"
|
||||
|
|
|
@ -240,7 +240,7 @@ func (tc *testClosure) next() balancer.SubConn {
|
|||
return ret
|
||||
}
|
||||
|
||||
func TestIsRoundRobin(t *testing.T) {
|
||||
func (s) TestIsRoundRobin(t *testing.T) {
|
||||
var (
|
||||
sc1 = testSubConns[0]
|
||||
sc2 = testSubConns[1]
|
||||
|
|
|
@ -69,7 +69,7 @@ func init() {
|
|||
newRandomWRR = newTestWRR
|
||||
}
|
||||
|
||||
func TestDropper(t *testing.T) {
|
||||
func (s) TestDropper(t *testing.T) {
|
||||
const repeat = 2
|
||||
|
||||
type args struct {
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/metadata"
|
||||
orcapb "google.golang.org/grpc/xds/internal/proto/udpa/data/orca/v1"
|
||||
)
|
||||
|
@ -36,7 +37,15 @@ var (
|
|||
testBytes, _ = proto.Marshal(testMessage)
|
||||
)
|
||||
|
||||
func TestToMetadata(t *testing.T) {
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func (s) TestToMetadata(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
r *orcapb.OrcaLoadReport
|
||||
|
@ -61,7 +70,7 @@ func TestToMetadata(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFromMetadata(t *testing.T) {
|
||||
func (s) TestFromMetadata(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
md metadata.MD
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/google"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
|
||||
corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
structpb "github.com/golang/protobuf/ptypes/struct"
|
||||
|
@ -54,11 +55,19 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
// TestNewConfig exercises the functionality in NewConfig with different
|
||||
// bootstrap file contents. It overrides the fileReadFunc by returning
|
||||
// bootstrap file contents defined in this test, instead of reading from a
|
||||
// file.
|
||||
func TestNewConfig(t *testing.T) {
|
||||
func (s) TestNewConfig(t *testing.T) {
|
||||
bootstrapFileMap := map[string]string{
|
||||
"empty": "",
|
||||
"badJSON": `["test": 123]`,
|
||||
|
@ -266,7 +275,7 @@ func TestNewConfig(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNewConfigEnvNotSet(t *testing.T) {
|
||||
func (s) TestNewConfigEnvNotSet(t *testing.T) {
|
||||
os.Unsetenv(fileEnv)
|
||||
config, err := NewConfig()
|
||||
if err == nil {
|
||||
|
|
|
@ -52,7 +52,7 @@ func (v2c *v2Client) cloneCDSCacheForTesting() map[string]CDSUpdate {
|
|||
return cloneCache
|
||||
}
|
||||
|
||||
func TestValidateCluster(t *testing.T) {
|
||||
func (s) TestValidateCluster(t *testing.T) {
|
||||
emptyUpdate := CDSUpdate{ServiceName: "", EnableLRS: false}
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -165,7 +165,7 @@ func TestValidateCluster(t *testing.T) {
|
|||
// TestCDSHandleResponse starts a fake xDS server, makes a ClientConn to it,
|
||||
// and creates a v2Client using it. Then, it registers a CDS watcher and tests
|
||||
// different CDS responses.
|
||||
func TestCDSHandleResponse(t *testing.T) {
|
||||
func (s) TestCDSHandleResponse(t *testing.T) {
|
||||
fakeServer, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -238,7 +238,7 @@ func TestCDSHandleResponse(t *testing.T) {
|
|||
|
||||
// TestCDSHandleResponseWithoutWatch tests the case where the v2Client receives
|
||||
// a CDS response without a registered watcher.
|
||||
func TestCDSHandleResponseWithoutWatch(t *testing.T) {
|
||||
func (s) TestCDSHandleResponseWithoutWatch(t *testing.T) {
|
||||
_, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -327,7 +327,7 @@ func testCDSCaching(t *testing.T, cdsTestOps []cdsTestOp, errCh *testutils.Chann
|
|||
|
||||
// TestCDSCaching tests some end-to-end CDS flows using a fake xDS server, and
|
||||
// verifies the CDS data cached at the v2Client.
|
||||
func TestCDSCaching(t *testing.T) {
|
||||
func (s) TestCDSCaching(t *testing.T) {
|
||||
ops := []cdsTestOp{
|
||||
// Add an CDS watch for a cluster name (clusterName1), which returns one
|
||||
// matching resource in the response.
|
||||
|
@ -376,7 +376,7 @@ func TestCDSCaching(t *testing.T) {
|
|||
// TestCDSWatchExpiryTimer tests the case where the client does not receive an
|
||||
// CDS response for the request that it sends out. We want the watch callback
|
||||
// to be invoked with an error once the watchExpiryTimer fires.
|
||||
func TestCDSWatchExpiryTimer(t *testing.T) {
|
||||
func (s) TestCDSWatchExpiryTimer(t *testing.T) {
|
||||
oldWatchExpiryTimeout := defaultWatchExpiryTimeout
|
||||
defaultWatchExpiryTimeout = 500 * time.Millisecond
|
||||
defer func() {
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/xds/internal/client/bootstrap"
|
||||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal/testutils/fakeserver"
|
||||
|
@ -32,6 +33,14 @@ import (
|
|||
corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func clientOpts(balancerName string) Options {
|
||||
return Options{
|
||||
Config: bootstrap.Config{
|
||||
|
@ -45,7 +54,7 @@ func clientOpts(balancerName string) Options {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
func (s) TestNew(t *testing.T) {
|
||||
fakeServer, cleanup, err := fakeserver.StartServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start fake xDS server: %v", err)
|
||||
|
@ -110,7 +119,7 @@ func TestNew(t *testing.T) {
|
|||
|
||||
// TestWatchService tests the happy case of registering a watcher for
|
||||
// service updates and receiving a good update.
|
||||
func TestWatchService(t *testing.T) {
|
||||
func (s) TestWatchService(t *testing.T) {
|
||||
fakeServer, cleanup, err := fakeserver.StartServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start fake xDS server: %v", err)
|
||||
|
@ -157,7 +166,7 @@ func TestWatchService(t *testing.T) {
|
|||
// xDS server does not respond to the requests being sent out as part of
|
||||
// registering a service update watcher. The underlying v2Client will timeout
|
||||
// and will send us an error.
|
||||
func TestWatchServiceWithNoResponseFromServer(t *testing.T) {
|
||||
func (s) TestWatchServiceWithNoResponseFromServer(t *testing.T) {
|
||||
fakeServer, cleanup, err := fakeserver.StartServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start fake xDS server: %v", err)
|
||||
|
@ -201,7 +210,7 @@ func TestWatchServiceWithNoResponseFromServer(t *testing.T) {
|
|||
|
||||
// TestWatchServiceEmptyRDS tests the case where the underlying
|
||||
// v2Client receives an empty RDS response.
|
||||
func TestWatchServiceEmptyRDS(t *testing.T) {
|
||||
func (s) TestWatchServiceEmptyRDS(t *testing.T) {
|
||||
fakeServer, cleanup, err := fakeserver.StartServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start fake xDS server: %v", err)
|
||||
|
@ -253,7 +262,7 @@ func TestWatchServiceEmptyRDS(t *testing.T) {
|
|||
// TestWatchServiceWithClientClose tests the case where xDS responses are
|
||||
// received after the client is closed, and we make sure that the registered
|
||||
// watcher callback is not invoked.
|
||||
func TestWatchServiceWithClientClose(t *testing.T) {
|
||||
func (s) TestWatchServiceWithClientClose(t *testing.T) {
|
||||
fakeServer, cleanup, err := fakeserver.StartServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start fake xDS server: %v", err)
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
)
|
||||
|
||||
func TestEDSParseRespProto(t *testing.T) {
|
||||
func (s) TestEDSParseRespProto(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
m *xdspb.ClusterLoadAssignment
|
||||
|
@ -161,7 +161,7 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
func TestEDSHandleResponse(t *testing.T) {
|
||||
func (s) TestEDSHandleResponse(t *testing.T) {
|
||||
fakeServer, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -241,7 +241,7 @@ func TestEDSHandleResponse(t *testing.T) {
|
|||
|
||||
// TestEDSHandleResponseWithoutWatch tests the case where the v2Client
|
||||
// receives an EDS response without a registered EDS watcher.
|
||||
func TestEDSHandleResponseWithoutWatch(t *testing.T) {
|
||||
func (s) TestEDSHandleResponseWithoutWatch(t *testing.T) {
|
||||
_, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -253,7 +253,7 @@ func TestEDSHandleResponseWithoutWatch(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestEDSWatchExpiryTimer(t *testing.T) {
|
||||
func (s) TestEDSWatchExpiryTimer(t *testing.T) {
|
||||
oldWatchExpiryTimeout := defaultWatchExpiryTimeout
|
||||
defaultWatchExpiryTimeout = 500 * time.Millisecond
|
||||
defer func() {
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
)
|
||||
|
||||
func TestLDSGetRouteConfig(t *testing.T) {
|
||||
func (s) TestLDSGetRouteConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
lis *xdspb.Listener
|
||||
|
@ -89,7 +89,7 @@ func TestLDSGetRouteConfig(t *testing.T) {
|
|||
// TestLDSHandleResponse starts a fake xDS server, makes a ClientConn to it,
|
||||
// and creates a v2Client using it. Then, it registers a watchLDS and tests
|
||||
// different LDS responses.
|
||||
func TestLDSHandleResponse(t *testing.T) {
|
||||
func (s) TestLDSHandleResponse(t *testing.T) {
|
||||
fakeServer, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -192,7 +192,7 @@ func TestLDSHandleResponse(t *testing.T) {
|
|||
|
||||
// TestLDSHandleResponseWithoutWatch tests the case where the v2Client receives
|
||||
// an LDS response without a registered watcher.
|
||||
func TestLDSHandleResponseWithoutWatch(t *testing.T) {
|
||||
func (s) TestLDSHandleResponseWithoutWatch(t *testing.T) {
|
||||
_, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -207,7 +207,7 @@ func TestLDSHandleResponseWithoutWatch(t *testing.T) {
|
|||
// TestLDSWatchExpiryTimer tests the case where the client does not receive an
|
||||
// LDS response for the request that it sends out. We want the watch callback
|
||||
// to be invoked with an error once the watchExpiryTimer fires.
|
||||
func TestLDSWatchExpiryTimer(t *testing.T) {
|
||||
func (s) TestLDSWatchExpiryTimer(t *testing.T) {
|
||||
oldWatchExpiryTimeout := defaultWatchExpiryTimeout
|
||||
defaultWatchExpiryTimeout = 500 * time.Millisecond
|
||||
defer func() {
|
||||
|
|
|
@ -43,7 +43,7 @@ func (v2c *v2Client) cloneRDSCacheForTesting() map[string]string {
|
|||
return cloneCache
|
||||
}
|
||||
|
||||
func TestRDSGetClusterFromRouteConfiguration(t *testing.T) {
|
||||
func (s) TestRDSGetClusterFromRouteConfiguration(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
rc *xdspb.RouteConfiguration
|
||||
|
@ -192,7 +192,7 @@ func doLDS(t *testing.T, v2c *v2Client, fakeServer *fakeserver.Server) {
|
|||
// TestRDSHandleResponse starts a fake xDS server, makes a ClientConn to it,
|
||||
// and creates a v2Client using it. Then, it registers an LDS and RDS watcher
|
||||
// and tests different RDS responses.
|
||||
func TestRDSHandleResponse(t *testing.T) {
|
||||
func (s) TestRDSHandleResponse(t *testing.T) {
|
||||
fakeServer, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -269,7 +269,7 @@ func TestRDSHandleResponse(t *testing.T) {
|
|||
|
||||
// TestRDSHandleResponseWithoutLDSWatch tests the case where the v2Client
|
||||
// receives an RDS response without a registered LDS watcher.
|
||||
func TestRDSHandleResponseWithoutLDSWatch(t *testing.T) {
|
||||
func (s) TestRDSHandleResponseWithoutLDSWatch(t *testing.T) {
|
||||
_, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -283,7 +283,7 @@ func TestRDSHandleResponseWithoutLDSWatch(t *testing.T) {
|
|||
|
||||
// TestRDSHandleResponseWithoutRDSWatch tests the case where the v2Client
|
||||
// receives an RDS response without a registered RDS watcher.
|
||||
func TestRDSHandleResponseWithoutRDSWatch(t *testing.T) {
|
||||
func (s) TestRDSHandleResponseWithoutRDSWatch(t *testing.T) {
|
||||
fakeServer, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -374,7 +374,7 @@ func testRDSCaching(t *testing.T, rdsTestOps []rdsTestOp, errCh *testutils.Chann
|
|||
|
||||
// TestRDSCaching tests some end-to-end RDS flows using a fake xDS server, and
|
||||
// verifies the RDS data cached at the v2Client.
|
||||
func TestRDSCaching(t *testing.T) {
|
||||
func (s) TestRDSCaching(t *testing.T) {
|
||||
ops := []rdsTestOp{
|
||||
// Add an RDS watch for a resource name (goodRouteName1), which returns one
|
||||
// matching resource in the response.
|
||||
|
@ -426,7 +426,7 @@ func TestRDSCaching(t *testing.T) {
|
|||
// TestRDSWatchExpiryTimer tests the case where the client does not receive an
|
||||
// RDS response for the request that it sends out. We want the watch callback
|
||||
// to be invoked with an error once the watchExpiryTimer fires.
|
||||
func TestRDSWatchExpiryTimer(t *testing.T) {
|
||||
func (s) TestRDSWatchExpiryTimer(t *testing.T) {
|
||||
oldWatchExpiryTimeout := defaultWatchExpiryTimeout
|
||||
defaultWatchExpiryTimeout = 500 * time.Millisecond
|
||||
defer func() {
|
||||
|
@ -461,7 +461,7 @@ func TestRDSWatchExpiryTimer(t *testing.T) {
|
|||
waitForNilErr(t, callbackCh)
|
||||
}
|
||||
|
||||
func TestHostFromTarget(t *testing.T) {
|
||||
func (s) TestHostFromTarget(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
target string
|
||||
|
|
|
@ -144,7 +144,7 @@ func sendBadResp(t *testing.T, xdsname string, fakeServer *fakeserver.Server, ve
|
|||
// are nacked.
|
||||
//
|
||||
// This test also verifies the version for different types are independent.
|
||||
func TestV2ClientAck(t *testing.T) {
|
||||
func (s) TestV2ClientAck(t *testing.T) {
|
||||
var (
|
||||
versionLDS = 1000
|
||||
versionRDS = 2000
|
||||
|
@ -195,7 +195,7 @@ func TestV2ClientAck(t *testing.T) {
|
|||
|
||||
// Test when the first response is invalid, and is nacked, the nack requests
|
||||
// should have an empty version string.
|
||||
func TestV2ClientAckFirstIsNack(t *testing.T) {
|
||||
func (s) TestV2ClientAckFirstIsNack(t *testing.T) {
|
||||
var versionLDS = 1000
|
||||
|
||||
fakeServer, cc, cleanup := startServerAndGetCC(t)
|
||||
|
@ -227,7 +227,7 @@ func TestV2ClientAckFirstIsNack(t *testing.T) {
|
|||
|
||||
// Test when a nack is sent after a new watch, we nack with the previous acked
|
||||
// version (instead of resetting to empty string).
|
||||
func TestV2ClientAckNackAfterNewWatch(t *testing.T) {
|
||||
func (s) TestV2ClientAckNackAfterNewWatch(t *testing.T) {
|
||||
var versionLDS = 1000
|
||||
|
||||
fakeServer, cc, cleanup := startServerAndGetCC(t)
|
||||
|
|
|
@ -390,7 +390,7 @@ var (
|
|||
|
||||
// TestV2ClientBackoffAfterRecvError verifies if the v2Client backoffs when it
|
||||
// encounters a Recv error while receiving an LDS response.
|
||||
func TestV2ClientBackoffAfterRecvError(t *testing.T) {
|
||||
func (s) TestV2ClientBackoffAfterRecvError(t *testing.T) {
|
||||
fakeServer, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -433,7 +433,7 @@ func TestV2ClientBackoffAfterRecvError(t *testing.T) {
|
|||
// TestV2ClientRetriesAfterBrokenStream verifies the case where a stream
|
||||
// encountered a Recv() error, and is expected to send out xDS requests for
|
||||
// registered watchers once it comes back up again.
|
||||
func TestV2ClientRetriesAfterBrokenStream(t *testing.T) {
|
||||
func (s) TestV2ClientRetriesAfterBrokenStream(t *testing.T) {
|
||||
fakeServer, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -478,7 +478,7 @@ func TestV2ClientRetriesAfterBrokenStream(t *testing.T) {
|
|||
|
||||
// TestV2ClientCancelWatch verifies that the registered watch callback is not
|
||||
// invoked if a response is received after the watcher is cancelled.
|
||||
func TestV2ClientCancelWatch(t *testing.T) {
|
||||
func (s) TestV2ClientCancelWatch(t *testing.T) {
|
||||
fakeServer, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
|
@ -25,10 +25,19 @@ import (
|
|||
|
||||
corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
)
|
||||
|
||||
const ignorePrefix = "XXX_"
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
func ignore(name string) bool {
|
||||
if !unicode.IsUpper([]rune(name)[0]) {
|
||||
return true
|
||||
|
@ -38,7 +47,7 @@ func ignore(name string) bool {
|
|||
|
||||
// A reflection based test to make sure internal.Locality contains all the
|
||||
// fields (expect for XXX_) from the proto message.
|
||||
func TestLocalityMatchProtoMessage(t *testing.T) {
|
||||
func (s) TestLocalityMatchProtoMessage(t *testing.T) {
|
||||
want1 := make(map[string]string)
|
||||
for ty, i := reflect.TypeOf(Locality{}), 0; i < ty.NumField(); i++ {
|
||||
f := ty.Field(i)
|
||||
|
|
Загрузка…
Ссылка в новой задаче