xds: Client refactor in preparation for xDS v3 support (#3743)
This commit is contained in:
Родитель
d6c4e49aab
Коммит
97c30a1419
|
@ -25,7 +25,6 @@ import (
|
|||
"github.com/google/go-cmp/cmp"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
||||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
)
|
||||
|
||||
|
@ -39,10 +38,10 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) {
|
|||
edsb.enqueueChildBalancerStateUpdate = edsb.updateState
|
||||
|
||||
// Two localities, with priorities [0, 1], each with one backend.
|
||||
clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
|
||||
addrs1 := <-cc.NewSubConnAddrsCh
|
||||
if got, want := addrs1[0].Addr, testEndpointAddrs[0]; got != want {
|
||||
|
@ -62,11 +61,11 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) {
|
|||
}
|
||||
|
||||
// Add p2, it shouldn't cause any udpates.
|
||||
clab2 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab2.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build()))
|
||||
|
||||
select {
|
||||
case <-cc.NewPickerCh:
|
||||
|
@ -79,10 +78,10 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) {
|
|||
}
|
||||
|
||||
// Remove p2, no updates.
|
||||
clab3 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab3.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab3.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab3.Build()))
|
||||
|
||||
select {
|
||||
case <-cc.NewPickerCh:
|
||||
|
@ -105,10 +104,10 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) {
|
|||
edsb.enqueueChildBalancerStateUpdate = edsb.updateState
|
||||
|
||||
// Two localities, with priorities [0, 1], each with one backend.
|
||||
clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
|
||||
addrs0 := <-cc.NewSubConnAddrsCh
|
||||
if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want {
|
||||
|
@ -147,11 +146,11 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) {
|
|||
}
|
||||
|
||||
// Add p2, it shouldn't cause any udpates.
|
||||
clab2 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab2.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build()))
|
||||
|
||||
select {
|
||||
case <-cc.NewPickerCh:
|
||||
|
@ -183,10 +182,10 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) {
|
|||
}
|
||||
|
||||
// Remove 2, use 1.
|
||||
clab3 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab3.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab3.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab3.Build()))
|
||||
|
||||
// p2 SubConns are removed.
|
||||
scToRemove := <-cc.RemoveSubConnCh
|
||||
|
@ -212,10 +211,10 @@ func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) {
|
|||
edsb.enqueueChildBalancerStateUpdate = edsb.updateState
|
||||
|
||||
// Two localities, with different priorities, each with one backend.
|
||||
clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
|
||||
addrs0 := <-cc.NewSubConnAddrsCh
|
||||
if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want {
|
||||
|
@ -242,11 +241,11 @@ func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) {
|
|||
}
|
||||
|
||||
// Add p2, it should create a new SubConn.
|
||||
clab2 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab2.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build()))
|
||||
|
||||
addrs2 := <-cc.NewSubConnAddrsCh
|
||||
if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want {
|
||||
|
@ -277,11 +276,11 @@ func (s) TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) {
|
|||
edsb.enqueueChildBalancerStateUpdate = edsb.updateState
|
||||
|
||||
// Two localities, with priorities [0,1,2], each with one backend.
|
||||
clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
clab1.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
|
||||
addrs0 := <-cc.NewSubConnAddrsCh
|
||||
if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want {
|
||||
|
@ -359,10 +358,10 @@ func (s) TestEDSPriority_InitTimeout(t *testing.T) {
|
|||
edsb.enqueueChildBalancerStateUpdate = edsb.updateState
|
||||
|
||||
// Two localities, with different priorities, each with one backend.
|
||||
clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
|
||||
addrs0 := <-cc.NewSubConnAddrsCh
|
||||
if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want {
|
||||
|
@ -409,10 +408,10 @@ func (s) TestEDSPriority_MultipleLocalities(t *testing.T) {
|
|||
edsb.enqueueChildBalancerStateUpdate = edsb.updateState
|
||||
|
||||
// Two localities, with different priorities, each with one backend.
|
||||
clab0 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab0 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab0.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab0.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab0.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab0.Build()))
|
||||
|
||||
addrs0 := <-cc.NewSubConnAddrsCh
|
||||
if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want {
|
||||
|
@ -463,12 +462,12 @@ func (s) TestEDSPriority_MultipleLocalities(t *testing.T) {
|
|||
}
|
||||
|
||||
// Add two localities, with two priorities, with one backend.
|
||||
clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
clab1.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil)
|
||||
clab1.AddLocality(testSubZones[3], 1, 1, testEndpointAddrs[3:4], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
|
||||
addrs2 := <-cc.NewSubConnAddrsCh
|
||||
if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want {
|
||||
|
@ -520,10 +519,10 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) {
|
|||
edsb.enqueueChildBalancerStateUpdate = edsb.updateState
|
||||
|
||||
// Two localities, with different priorities, each with one backend.
|
||||
clab0 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab0 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab0.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab0.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab0.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab0.Build()))
|
||||
|
||||
addrs0 := <-cc.NewSubConnAddrsCh
|
||||
if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want {
|
||||
|
@ -541,8 +540,8 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) {
|
|||
}
|
||||
|
||||
// Remove all priorities.
|
||||
clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
|
||||
// p0 subconn should be removed.
|
||||
scToRemove := <-cc.RemoveSubConnCh
|
||||
|
@ -559,10 +558,10 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) {
|
|||
}
|
||||
|
||||
// Re-add two localities, with previous priorities, but different backends.
|
||||
clab2 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil)
|
||||
clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[3:4], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab2.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build()))
|
||||
|
||||
addrs01 := <-cc.NewSubConnAddrsCh
|
||||
if got, want := addrs01[0].Addr, testEndpointAddrs[2]; got != want {
|
||||
|
@ -591,9 +590,9 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) {
|
|||
}
|
||||
|
||||
// Remove p1 from EDS, to fallback to p0.
|
||||
clab3 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab3.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab3.Build()))
|
||||
|
||||
// p1 subconn should be removed.
|
||||
scToRemove1 := <-cc.RemoveSubConnCh
|
||||
|
@ -664,10 +663,10 @@ func (s) TestEDSPriority_HighPriorityNoEndpoints(t *testing.T) {
|
|||
edsb.enqueueChildBalancerStateUpdate = edsb.updateState
|
||||
|
||||
// Two localities, with priorities [0, 1], each with one backend.
|
||||
clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
|
||||
addrs1 := <-cc.NewSubConnAddrsCh
|
||||
if got, want := addrs1[0].Addr, testEndpointAddrs[0]; got != want {
|
||||
|
@ -687,10 +686,10 @@ func (s) TestEDSPriority_HighPriorityNoEndpoints(t *testing.T) {
|
|||
}
|
||||
|
||||
// Remove addresses from priority 0, should use p1.
|
||||
clab2 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab2.AddLocality(testSubZones[0], 1, 0, nil, nil)
|
||||
clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab2.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build()))
|
||||
|
||||
// p0 will remove the subconn, and ClientConn will send a sc update to
|
||||
// shutdown.
|
||||
|
@ -723,10 +722,10 @@ func (s) TestEDSPriority_HighPriorityAllUnhealthy(t *testing.T) {
|
|||
edsb.enqueueChildBalancerStateUpdate = edsb.updateState
|
||||
|
||||
// Two localities, with priorities [0, 1], each with one backend.
|
||||
clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
|
||||
addrs1 := <-cc.NewSubConnAddrsCh
|
||||
if got, want := addrs1[0].Addr, testEndpointAddrs[0]; got != want {
|
||||
|
@ -746,12 +745,12 @@ func (s) TestEDSPriority_HighPriorityAllUnhealthy(t *testing.T) {
|
|||
}
|
||||
|
||||
// Set priority 0 endpoints to all unhealthy, should use p1.
|
||||
clab2 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], &xdsclient.AddLocalityOptions{
|
||||
clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], &testutils.AddLocalityOptions{
|
||||
Health: []corepb.HealthStatus{corepb.HealthStatus_UNHEALTHY},
|
||||
})
|
||||
clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab2.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build()))
|
||||
|
||||
// p0 will remove the subconn, and ClientConn will send a sc update to
|
||||
// transient failure.
|
||||
|
|
|
@ -60,9 +60,9 @@ func (s) TestEDS_OneLocality(t *testing.T) {
|
|||
edsb.enqueueChildBalancerStateUpdate = edsb.updateState
|
||||
|
||||
// One locality with one backend.
|
||||
clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
|
||||
sc1 := <-cc.NewSubConnCh
|
||||
edsb.handleSubConnStateChange(sc1, connectivity.Connecting)
|
||||
|
@ -78,9 +78,9 @@ func (s) TestEDS_OneLocality(t *testing.T) {
|
|||
}
|
||||
|
||||
// The same locality, add one more backend.
|
||||
clab2 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab2.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build()))
|
||||
|
||||
sc2 := <-cc.NewSubConnCh
|
||||
edsb.handleSubConnStateChange(sc2, connectivity.Connecting)
|
||||
|
@ -94,9 +94,9 @@ func (s) TestEDS_OneLocality(t *testing.T) {
|
|||
}
|
||||
|
||||
// The same locality, delete first backend.
|
||||
clab3 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[1:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab3.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab3.Build()))
|
||||
|
||||
scToRemove := <-cc.RemoveSubConnCh
|
||||
if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) {
|
||||
|
@ -114,9 +114,9 @@ func (s) TestEDS_OneLocality(t *testing.T) {
|
|||
}
|
||||
|
||||
// The same locality, replace backend.
|
||||
clab4 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab4 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab4.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab4.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab4.Build()))
|
||||
|
||||
sc3 := <-cc.NewSubConnCh
|
||||
edsb.handleSubConnStateChange(sc3, connectivity.Connecting)
|
||||
|
@ -137,9 +137,9 @@ func (s) TestEDS_OneLocality(t *testing.T) {
|
|||
}
|
||||
|
||||
// The same locality, different drop rate, dropping 50%.
|
||||
clab5 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], []uint32{50})
|
||||
clab5 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], []uint32{50})
|
||||
clab5.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab5.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab5.Build()))
|
||||
|
||||
// Picks with drops.
|
||||
p5 := <-cc.NewPickerCh
|
||||
|
@ -155,9 +155,9 @@ func (s) TestEDS_OneLocality(t *testing.T) {
|
|||
}
|
||||
|
||||
// The same locality, remove drops.
|
||||
clab6 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab6 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab6.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab6.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab6.Build()))
|
||||
|
||||
// Pick without drops.
|
||||
p6 := <-cc.NewPickerCh
|
||||
|
@ -181,9 +181,9 @@ func (s) TestEDS_TwoLocalities(t *testing.T) {
|
|||
edsb.enqueueChildBalancerStateUpdate = edsb.updateState
|
||||
|
||||
// Two localities, each with one backend.
|
||||
clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
sc1 := <-cc.NewSubConnCh
|
||||
edsb.handleSubConnStateChange(sc1, connectivity.Connecting)
|
||||
edsb.handleSubConnStateChange(sc1, connectivity.Ready)
|
||||
|
@ -192,7 +192,7 @@ func (s) TestEDS_TwoLocalities(t *testing.T) {
|
|||
// locality. Otherwise the test is flaky because of a map is used in EDS to
|
||||
// keep localities.
|
||||
clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
sc2 := <-cc.NewSubConnCh
|
||||
edsb.handleSubConnStateChange(sc2, connectivity.Connecting)
|
||||
edsb.handleSubConnStateChange(sc2, connectivity.Ready)
|
||||
|
@ -205,11 +205,11 @@ func (s) TestEDS_TwoLocalities(t *testing.T) {
|
|||
}
|
||||
|
||||
// Add another locality, with one backend.
|
||||
clab2 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab2.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil)
|
||||
clab2.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab2.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build()))
|
||||
|
||||
sc3 := <-cc.NewSubConnCh
|
||||
edsb.handleSubConnStateChange(sc3, connectivity.Connecting)
|
||||
|
@ -223,10 +223,10 @@ func (s) TestEDS_TwoLocalities(t *testing.T) {
|
|||
}
|
||||
|
||||
// Remove first locality.
|
||||
clab3 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab3.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil)
|
||||
clab3.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab3.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab3.Build()))
|
||||
|
||||
scToRemove := <-cc.RemoveSubConnCh
|
||||
if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) {
|
||||
|
@ -242,10 +242,10 @@ func (s) TestEDS_TwoLocalities(t *testing.T) {
|
|||
}
|
||||
|
||||
// Add a backend to the last locality.
|
||||
clab4 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab4 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab4.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil)
|
||||
clab4.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab4.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab4.Build()))
|
||||
|
||||
sc4 := <-cc.NewSubConnCh
|
||||
edsb.handleSubConnStateChange(sc4, connectivity.Connecting)
|
||||
|
@ -262,10 +262,10 @@ func (s) TestEDS_TwoLocalities(t *testing.T) {
|
|||
}
|
||||
|
||||
// Change weight of the locality[1].
|
||||
clab5 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab5 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab5.AddLocality(testSubZones[1], 2, 0, testEndpointAddrs[1:2], nil)
|
||||
clab5.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab5.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab5.Build()))
|
||||
|
||||
// Test pick with two subconns different locality weight.
|
||||
p5 := <-cc.NewPickerCh
|
||||
|
@ -278,10 +278,10 @@ func (s) TestEDS_TwoLocalities(t *testing.T) {
|
|||
}
|
||||
|
||||
// Change weight of the locality[1] to 0, it should never be picked.
|
||||
clab6 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab6 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab6.AddLocality(testSubZones[1], 0, 0, testEndpointAddrs[1:2], nil)
|
||||
clab6.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab6.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab6.Build()))
|
||||
|
||||
// Changing weight of locality[1] to 0 caused it to be removed. It's subconn
|
||||
// should also be removed.
|
||||
|
@ -312,8 +312,8 @@ func (s) TestEDS_EndpointsHealth(t *testing.T) {
|
|||
edsb.enqueueChildBalancerStateUpdate = edsb.updateState
|
||||
|
||||
// Two localities, each 3 backend, one Healthy, one Unhealthy, one Unknown.
|
||||
clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:6], &xdsclient.AddLocalityOptions{
|
||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:6], &testutils.AddLocalityOptions{
|
||||
Health: []corepb.HealthStatus{
|
||||
corepb.HealthStatus_HEALTHY,
|
||||
corepb.HealthStatus_UNHEALTHY,
|
||||
|
@ -323,7 +323,7 @@ func (s) TestEDS_EndpointsHealth(t *testing.T) {
|
|||
corepb.HealthStatus_DEGRADED,
|
||||
},
|
||||
})
|
||||
clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[6:12], &xdsclient.AddLocalityOptions{
|
||||
clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[6:12], &testutils.AddLocalityOptions{
|
||||
Health: []corepb.HealthStatus{
|
||||
corepb.HealthStatus_HEALTHY,
|
||||
corepb.HealthStatus_UNHEALTHY,
|
||||
|
@ -333,7 +333,7 @@ func (s) TestEDS_EndpointsHealth(t *testing.T) {
|
|||
corepb.HealthStatus_DEGRADED,
|
||||
},
|
||||
})
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
|
||||
var (
|
||||
readySCs []balancer.SubConn
|
||||
|
@ -406,9 +406,9 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
// One locality with one backend.
|
||||
clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
|
||||
sc1 := <-cc.NewSubConnCh
|
||||
edsb.handleSubConnStateChange(sc1, connectivity.Connecting)
|
||||
|
@ -434,7 +434,7 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
// Handle another update with priorities and localities.
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
|
||||
sc2 := <-cc.NewSubConnCh
|
||||
edsb.handleSubConnStateChange(sc2, connectivity.Connecting)
|
||||
|
@ -462,10 +462,10 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) {
|
|||
edsb.handleChildPolicy("test-const-balancer", nil)
|
||||
|
||||
// Two localities, each with one backend.
|
||||
clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
sc := <-cc.NewSubConnCh
|
||||
|
@ -601,9 +601,9 @@ func (s) TestEDS_ChildPolicyUpdatePickerInline(t *testing.T) {
|
|||
|
||||
edsb.handleChildPolicy("test-inline-update-balancer", nil)
|
||||
|
||||
clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
|
||||
p0 := <-cc.NewPickerCh
|
||||
for i := 0; i < 5; i++ {
|
||||
|
@ -689,9 +689,9 @@ func (s) TestEDS_LoadReport(t *testing.T) {
|
|||
backendToBalancerID := make(map[balancer.SubConn]internal.LocalityID)
|
||||
|
||||
// Two localities, each with one backend.
|
||||
clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||
clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
sc1 := <-cc.NewSubConnCh
|
||||
edsb.handleSubConnStateChange(sc1, connectivity.Connecting)
|
||||
edsb.handleSubConnStateChange(sc1, connectivity.Ready)
|
||||
|
@ -703,7 +703,7 @@ func (s) TestEDS_LoadReport(t *testing.T) {
|
|||
// locality. Otherwise the test is flaky because of a map is used in EDS to
|
||||
// keep localities.
|
||||
clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil)
|
||||
edsb.handleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build()))
|
||||
edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build()))
|
||||
sc2 := <-cc.NewSubConnCh
|
||||
edsb.handleSubConnStateChange(sc2, connectivity.Connecting)
|
||||
edsb.handleSubConnStateChange(sc2, connectivity.Ready)
|
||||
|
|
|
@ -41,6 +41,8 @@ import (
|
|||
"google.golang.org/grpc/xds/internal/client/bootstrap"
|
||||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal/testutils/fakeclient"
|
||||
|
||||
_ "google.golang.org/grpc/xds/internal/client/v2" // V2 client registration.
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -15,7 +14,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package client
|
||||
package edsbalancer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -26,15 +25,61 @@ import (
|
|||
corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint"
|
||||
typepb "github.com/envoyproxy/go-control-plane/envoy/type"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"google.golang.org/grpc/xds/internal"
|
||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
||||
)
|
||||
|
||||
// parseEDSRespProtoForTesting parses EDS response, and panic if parsing fails.
|
||||
//
|
||||
// TODO: delete this. The EDS balancer tests should build an EndpointsUpdate
|
||||
// directly, instead of building and parsing a proto message.
|
||||
func parseEDSRespProtoForTesting(m *xdspb.ClusterLoadAssignment) xdsclient.EndpointsUpdate {
|
||||
u, err := parseEDSRespProto(m)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
// parseEDSRespProto turns EDS response proto message to EndpointsUpdate.
|
||||
func parseEDSRespProto(m *xdspb.ClusterLoadAssignment) (xdsclient.EndpointsUpdate, error) {
|
||||
ret := xdsclient.EndpointsUpdate{}
|
||||
for _, dropPolicy := range m.GetPolicy().GetDropOverloads() {
|
||||
ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy))
|
||||
}
|
||||
priorities := make(map[uint32]struct{})
|
||||
for _, locality := range m.Endpoints {
|
||||
l := locality.GetLocality()
|
||||
if l == nil {
|
||||
return xdsclient.EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality)
|
||||
}
|
||||
lid := internal.LocalityID{
|
||||
Region: l.Region,
|
||||
Zone: l.Zone,
|
||||
SubZone: l.SubZone,
|
||||
}
|
||||
priority := locality.GetPriority()
|
||||
priorities[priority] = struct{}{}
|
||||
ret.Localities = append(ret.Localities, xdsclient.Locality{
|
||||
ID: lid,
|
||||
Endpoints: parseEndpoints(locality.GetLbEndpoints()),
|
||||
Weight: locality.GetLoadBalancingWeight().GetValue(),
|
||||
Priority: priority,
|
||||
})
|
||||
}
|
||||
for i := 0; i < len(priorities); i++ {
|
||||
if _, ok := priorities[uint32(i)]; !ok {
|
||||
return xdsclient.EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities)
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func parseAddress(socketAddress *corepb.SocketAddress) string {
|
||||
return net.JoinHostPort(socketAddress.GetAddress(), strconv.Itoa(int(socketAddress.GetPortValue())))
|
||||
}
|
||||
|
||||
func parseDropPolicy(dropPolicy *xdspb.ClusterLoadAssignment_Policy_DropOverload) OverloadDropConfig {
|
||||
func parseDropPolicy(dropPolicy *xdspb.ClusterLoadAssignment_Policy_DropOverload) xdsclient.OverloadDropConfig {
|
||||
percentage := dropPolicy.GetDropPercentage()
|
||||
var (
|
||||
numerator = percentage.GetNumerator()
|
||||
|
@ -48,96 +93,21 @@ func parseDropPolicy(dropPolicy *xdspb.ClusterLoadAssignment_Policy_DropOverload
|
|||
case typepb.FractionalPercent_MILLION:
|
||||
denominator = 1000000
|
||||
}
|
||||
return OverloadDropConfig{
|
||||
return xdsclient.OverloadDropConfig{
|
||||
Category: dropPolicy.GetCategory(),
|
||||
Numerator: numerator,
|
||||
Denominator: denominator,
|
||||
}
|
||||
}
|
||||
|
||||
func parseEndpoints(lbEndpoints []*endpointpb.LbEndpoint) []Endpoint {
|
||||
endpoints := make([]Endpoint, 0, len(lbEndpoints))
|
||||
func parseEndpoints(lbEndpoints []*endpointpb.LbEndpoint) []xdsclient.Endpoint {
|
||||
endpoints := make([]xdsclient.Endpoint, 0, len(lbEndpoints))
|
||||
for _, lbEndpoint := range lbEndpoints {
|
||||
endpoints = append(endpoints, Endpoint{
|
||||
HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()),
|
||||
endpoints = append(endpoints, xdsclient.Endpoint{
|
||||
HealthStatus: xdsclient.EndpointHealthStatus(lbEndpoint.GetHealthStatus()),
|
||||
Address: parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()),
|
||||
Weight: lbEndpoint.GetLoadBalancingWeight().GetValue(),
|
||||
})
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
// ParseEDSRespProto turns EDS response proto message to EndpointsUpdate.
|
||||
//
|
||||
// This is temporarily exported to be used in eds balancer, before it switches
|
||||
// to use xds client. TODO: unexport.
|
||||
func ParseEDSRespProto(m *xdspb.ClusterLoadAssignment) (EndpointsUpdate, error) {
|
||||
ret := EndpointsUpdate{}
|
||||
for _, dropPolicy := range m.GetPolicy().GetDropOverloads() {
|
||||
ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy))
|
||||
}
|
||||
priorities := make(map[uint32]struct{})
|
||||
for _, locality := range m.Endpoints {
|
||||
l := locality.GetLocality()
|
||||
if l == nil {
|
||||
return EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality)
|
||||
}
|
||||
lid := internal.LocalityID{
|
||||
Region: l.Region,
|
||||
Zone: l.Zone,
|
||||
SubZone: l.SubZone,
|
||||
}
|
||||
priority := locality.GetPriority()
|
||||
priorities[priority] = struct{}{}
|
||||
ret.Localities = append(ret.Localities, Locality{
|
||||
ID: lid,
|
||||
Endpoints: parseEndpoints(locality.GetLbEndpoints()),
|
||||
Weight: locality.GetLoadBalancingWeight().GetValue(),
|
||||
Priority: priority,
|
||||
})
|
||||
}
|
||||
for i := 0; i < len(priorities); i++ {
|
||||
if _, ok := priorities[uint32(i)]; !ok {
|
||||
return EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities)
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// ParseEDSRespProtoForTesting parses EDS response, and panic if parsing fails.
|
||||
// This is used by EDS balancer tests.
|
||||
//
|
||||
// TODO: delete this. The EDS balancer tests should build an EndpointsUpdate directly,
|
||||
// instead of building and parsing a proto message.
|
||||
func ParseEDSRespProtoForTesting(m *xdspb.ClusterLoadAssignment) EndpointsUpdate {
|
||||
u, err := ParseEDSRespProto(m)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
func (v2c *v2Client) handleEDSResponse(resp *xdspb.DiscoveryResponse) error {
|
||||
returnUpdate := make(map[string]EndpointsUpdate)
|
||||
for _, r := range resp.GetResources() {
|
||||
var resource ptypes.DynamicAny
|
||||
if err := ptypes.UnmarshalAny(r, &resource); err != nil {
|
||||
return fmt.Errorf("xds: failed to unmarshal resource in EDS response: %v", err)
|
||||
}
|
||||
cla, ok := resource.Message.(*xdspb.ClusterLoadAssignment)
|
||||
if !ok {
|
||||
return fmt.Errorf("xds: unexpected resource type: %T in EDS response", resource.Message)
|
||||
}
|
||||
v2c.logger.Infof("Resource with name: %v, type: %T, contains: %v", cla.GetClusterName(), cla, cla)
|
||||
|
||||
u, err := ParseEDSRespProto(cla)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
returnUpdate[cla.GetClusterName()] = u
|
||||
}
|
||||
|
||||
v2c.parent.newEDSUpdate(returnUpdate)
|
||||
return nil
|
||||
}
|
|
@ -36,10 +36,7 @@ import (
|
|||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal/testutils/fakeclient"
|
||||
"google.golang.org/grpc/xds/internal/testutils/fakeserver"
|
||||
)
|
||||
|
||||
const (
|
||||
edsType = "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -135,7 +132,7 @@ func (s) TestClientWrapperWatchEDS(t *testing.T) {
|
|||
}
|
||||
|
||||
wantReq := &xdspb.DiscoveryRequest{
|
||||
TypeUrl: edsType,
|
||||
TypeUrl: version.V2EndpointsURL,
|
||||
ResourceNames: []string{test.wantResourceName},
|
||||
Node: testutils.EmptyNodeProtoV2,
|
||||
}
|
||||
|
|
|
@ -26,17 +26,204 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/buffer"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/xds/internal"
|
||||
"google.golang.org/grpc/xds/internal/client/bootstrap"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
var (
|
||||
m = make(map[version.TransportAPI]APIClientBuilder)
|
||||
)
|
||||
|
||||
// RegisterAPIClientBuilder registers a client builder for xDS transport protocol
|
||||
// version specified by b.Version().
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. If multiple builders are
|
||||
// registered for the same version, the one registered last will take effect.
|
||||
func RegisterAPIClientBuilder(b APIClientBuilder) {
|
||||
m[b.Version()] = b
|
||||
}
|
||||
|
||||
// getAPIClientBuilder returns the client builder registered for the provided
|
||||
// xDS transport API version.
|
||||
func getAPIClientBuilder(version version.TransportAPI) APIClientBuilder {
|
||||
if b, ok := m[version]; ok {
|
||||
return b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BuildOptions contains options to be passed to client builders.
|
||||
type BuildOptions struct {
|
||||
// Parent is a top-level xDS client or server which has the intelligence to
|
||||
// take appropriate action based on xDS responses received from the
|
||||
// management server.
|
||||
Parent UpdateHandler
|
||||
// NodeProto contains the Node proto to be used in xDS requests. The actual
|
||||
// type depends on the transport protocol version used.
|
||||
NodeProto proto.Message
|
||||
// Backoff returns the amount of time to backoff before retrying broken
|
||||
// streams.
|
||||
Backoff func(int) time.Duration
|
||||
// Logger provides enhanced logging capabilities.
|
||||
Logger *grpclog.PrefixLogger
|
||||
}
|
||||
|
||||
// APIClientBuilder creates an xDS client for a specific xDS transport protocol
|
||||
// version.
|
||||
type APIClientBuilder interface {
|
||||
// Build builds a transport protocol specific implementation of the xDS
|
||||
// client based on the provided clientConn to the management server and the
|
||||
// provided options.
|
||||
Build(*grpc.ClientConn, BuildOptions) (APIClient, error)
|
||||
// Version returns the xDS transport protocol version used by clients build
|
||||
// using this builder.
|
||||
Version() version.TransportAPI
|
||||
}
|
||||
|
||||
// APIClient represents the functionality provided by transport protocol
|
||||
// version specific implementations of the xDS client.
|
||||
type APIClient interface {
|
||||
// AddWatch adds a watch for an xDS resource given its type and name.
|
||||
AddWatch(resourceType, resourceName string)
|
||||
// RemoveWatch cancels an already registered watch for an xDS resource
|
||||
// given its type and name.
|
||||
RemoveWatch(resourceType, resourceName string)
|
||||
// Close cleans up resources allocated by the API client.
|
||||
Close()
|
||||
}
|
||||
|
||||
// UpdateHandler receives and processes (by taking appropriate actions) xDS
|
||||
// resource updates from an APIClient for a specific version.
|
||||
type UpdateHandler interface {
|
||||
// NewListeners handles updates to xDS listener resources.
|
||||
NewListeners(map[string]ListenerUpdate)
|
||||
// NewRouteConfigs handles updates to xDS RouteConfiguration resources.
|
||||
NewRouteConfigs(map[string]RouteConfigUpdate)
|
||||
// NewClusters handles updates to xDS Cluster resources.
|
||||
NewClusters(map[string]ClusterUpdate)
|
||||
// NewEndpoints handles updates to xDS ClusterLoadAssignment (or tersely
|
||||
// referred to as Endpoints) resources.
|
||||
NewEndpoints(map[string]EndpointsUpdate)
|
||||
}
|
||||
|
||||
// ListenerUpdate contains information received in an LDS response, which is of
|
||||
// interest to the registered LDS watcher.
|
||||
type ListenerUpdate struct {
|
||||
// RouteConfigName is the route configuration name corresponding to the
|
||||
// target which is being watched through LDS.
|
||||
RouteConfigName string
|
||||
}
|
||||
|
||||
// RouteConfigUpdate contains information received in an RDS response, which is
|
||||
// of interest to the registered RDS watcher.
|
||||
type RouteConfigUpdate struct {
|
||||
// Routes contains a list of routes, each containing matchers and
|
||||
// corresponding action.
|
||||
Routes []*Route
|
||||
}
|
||||
|
||||
// Route is both a specification of how to match a request as well as an
|
||||
// indication of the action to take upon match.
|
||||
type Route struct {
|
||||
Path, Prefix, Regex *string
|
||||
Headers []*HeaderMatcher
|
||||
Fraction *uint32
|
||||
Action map[string]uint32 // action is weighted clusters.
|
||||
}
|
||||
|
||||
// HeaderMatcher represents header matchers.
|
||||
type HeaderMatcher struct {
|
||||
Name string `json:"name"`
|
||||
InvertMatch *bool `json:"invertMatch,omitempty"`
|
||||
ExactMatch *string `json:"exactMatch,omitempty"`
|
||||
RegexMatch *string `json:"regexMatch,omitempty"`
|
||||
PrefixMatch *string `json:"prefixMatch,omitempty"`
|
||||
SuffixMatch *string `json:"suffixMatch,omitempty"`
|
||||
RangeMatch *Int64Range `json:"rangeMatch,omitempty"`
|
||||
PresentMatch *bool `json:"presentMatch,omitempty"`
|
||||
}
|
||||
|
||||
// Int64Range is a range for header range match.
|
||||
type Int64Range struct {
|
||||
Start int64 `json:"start"`
|
||||
End int64 `json:"end"`
|
||||
}
|
||||
|
||||
// ServiceUpdate contains information received from LDS and RDS responses,
|
||||
// which is of interest to the registered service watcher.
|
||||
type ServiceUpdate struct {
|
||||
// Routes contain matchers+actions to route RPCs.
|
||||
Routes []*Route
|
||||
}
|
||||
|
||||
// ClusterUpdate contains information from a received CDS response, which is of
|
||||
// interest to the registered CDS watcher.
|
||||
type ClusterUpdate struct {
|
||||
// ServiceName is the service name corresponding to the clusterName which
|
||||
// is being watched for through CDS.
|
||||
ServiceName string
|
||||
// EnableLRS indicates whether or not load should be reported through LRS.
|
||||
EnableLRS bool
|
||||
}
|
||||
|
||||
// OverloadDropConfig contains the config to drop overloads.
|
||||
type OverloadDropConfig struct {
|
||||
Category string
|
||||
Numerator uint32
|
||||
Denominator uint32
|
||||
}
|
||||
|
||||
// EndpointHealthStatus represents the health status of an endpoint.
|
||||
type EndpointHealthStatus int32
|
||||
|
||||
const (
|
||||
// EndpointHealthStatusUnknown represents HealthStatus UNKNOWN.
|
||||
EndpointHealthStatusUnknown EndpointHealthStatus = iota
|
||||
// EndpointHealthStatusHealthy represents HealthStatus HEALTHY.
|
||||
EndpointHealthStatusHealthy
|
||||
// EndpointHealthStatusUnhealthy represents HealthStatus UNHEALTHY.
|
||||
EndpointHealthStatusUnhealthy
|
||||
// EndpointHealthStatusDraining represents HealthStatus DRAINING.
|
||||
EndpointHealthStatusDraining
|
||||
// EndpointHealthStatusTimeout represents HealthStatus TIMEOUT.
|
||||
EndpointHealthStatusTimeout
|
||||
// EndpointHealthStatusDegraded represents HealthStatus DEGRADED.
|
||||
EndpointHealthStatusDegraded
|
||||
)
|
||||
|
||||
// Endpoint contains information of an endpoint.
|
||||
type Endpoint struct {
|
||||
Address string
|
||||
HealthStatus EndpointHealthStatus
|
||||
Weight uint32
|
||||
}
|
||||
|
||||
// Locality contains information of a locality.
|
||||
type Locality struct {
|
||||
Endpoints []Endpoint
|
||||
ID internal.LocalityID
|
||||
Priority uint32
|
||||
Weight uint32
|
||||
}
|
||||
|
||||
// EndpointsUpdate contains an EDS update.
|
||||
type EndpointsUpdate struct {
|
||||
Drops []OverloadDropConfig
|
||||
Localities []Locality
|
||||
}
|
||||
|
||||
// Options provides all parameters required for the creation of an xDS client.
|
||||
type Options struct {
|
||||
// Config contains a fully populated bootstrap config. It is the
|
||||
|
@ -49,16 +236,13 @@ type Options struct {
|
|||
TargetName string
|
||||
}
|
||||
|
||||
// Interface to be overridden in tests.
|
||||
type xdsv2Client interface {
|
||||
addWatch(resourceType, resourceName string)
|
||||
removeWatch(resourceType, resourceName string)
|
||||
close()
|
||||
}
|
||||
|
||||
// Function to be overridden in tests.
|
||||
var newXDSV2Client = func(parent *Client, cc *grpc.ClientConn, nodeProto *corepb.Node, backoff func(int) time.Duration, logger *grpclog.PrefixLogger) xdsv2Client {
|
||||
return newV2Client(parent, cc, nodeProto, backoff, logger)
|
||||
var newAPIClient = func(apiVersion version.TransportAPI, cc *grpc.ClientConn, opts BuildOptions) (APIClient, error) {
|
||||
cb := getAPIClientBuilder(apiVersion)
|
||||
if cb == nil {
|
||||
return nil, fmt.Errorf("no client builder for xDS API version: %v", apiVersion)
|
||||
}
|
||||
return cb.Build(cc, opts)
|
||||
}
|
||||
|
||||
// Client is a full fledged gRPC client which queries a set of discovery APIs
|
||||
|
@ -68,20 +252,25 @@ var newXDSV2Client = func(parent *Client, cc *grpc.ClientConn, nodeProto *corepb
|
|||
// A single client object will be shared by the xds resolver and balancer
|
||||
// implementations. But the same client can only be shared by the same parent
|
||||
// ClientConn.
|
||||
//
|
||||
// Implements UpdateHandler interface.
|
||||
// TODO(easwars): Make a wrapper struct which implements this interface in the
|
||||
// style of ccBalancerWrapper so that the Client type does not implement these
|
||||
// exported methods.
|
||||
type Client struct {
|
||||
done *grpcsync.Event
|
||||
opts Options
|
||||
cc *grpc.ClientConn // Connection to the xDS server
|
||||
v2c xdsv2Client // Actual xDS client implementation using the v2 API
|
||||
done *grpcsync.Event
|
||||
opts Options
|
||||
cc *grpc.ClientConn // Connection to the xDS server
|
||||
apiClient APIClient
|
||||
|
||||
logger *grpclog.PrefixLogger
|
||||
|
||||
updateCh *buffer.Unbounded // chan *watcherInfoWithUpdate
|
||||
mu sync.Mutex
|
||||
ldsWatchers map[string]map[*watchInfo]bool
|
||||
ldsCache map[string]ldsUpdate
|
||||
ldsCache map[string]ListenerUpdate
|
||||
rdsWatchers map[string]map[*watchInfo]bool
|
||||
rdsCache map[string]rdsUpdate
|
||||
rdsCache map[string]RouteConfigUpdate
|
||||
cdsWatchers map[string]map[*watchInfo]bool
|
||||
cdsCache map[string]ClusterUpdate
|
||||
edsWatchers map[string]map[*watchInfo]bool
|
||||
|
@ -99,6 +288,17 @@ func New(opts Options) (*Client, error) {
|
|||
return nil, errors.New("xds: no node_proto provided in options")
|
||||
}
|
||||
|
||||
switch opts.Config.TransportAPI {
|
||||
case version.TransportV2:
|
||||
if _, ok := opts.Config.NodeProto.(*v2corepb.Node); !ok {
|
||||
return nil, fmt.Errorf("xds: Node proto type (%T) does not match API version: %v", opts.Config.NodeProto, opts.Config.TransportAPI)
|
||||
}
|
||||
case version.TransportV3:
|
||||
if _, ok := opts.Config.NodeProto.(*v3corepb.Node); !ok {
|
||||
return nil, fmt.Errorf("xds: Node proto type (%T) does not match API version: %v", opts.Config.NodeProto, opts.Config.TransportAPI)
|
||||
}
|
||||
}
|
||||
|
||||
dopts := []grpc.DialOption{
|
||||
opts.Config.Creds,
|
||||
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
||||
|
@ -114,9 +314,9 @@ func New(opts Options) (*Client, error) {
|
|||
|
||||
updateCh: buffer.NewUnbounded(),
|
||||
ldsWatchers: make(map[string]map[*watchInfo]bool),
|
||||
ldsCache: make(map[string]ldsUpdate),
|
||||
ldsCache: make(map[string]ListenerUpdate),
|
||||
rdsWatchers: make(map[string]map[*watchInfo]bool),
|
||||
rdsCache: make(map[string]rdsUpdate),
|
||||
rdsCache: make(map[string]RouteConfigUpdate),
|
||||
cdsWatchers: make(map[string]map[*watchInfo]bool),
|
||||
cdsCache: make(map[string]ClusterUpdate),
|
||||
edsWatchers: make(map[string]map[*watchInfo]bool),
|
||||
|
@ -132,13 +332,16 @@ func New(opts Options) (*Client, error) {
|
|||
c.logger = prefixLogger((c))
|
||||
c.logger.Infof("Created ClientConn to xDS server: %s", opts.Config.BalancerName)
|
||||
|
||||
if opts.Config.TransportAPI == version.TransportV2 {
|
||||
c.v2c = newXDSV2Client(c, cc, opts.Config.NodeProto.(*corepb.Node), backoff.DefaultExponential.Backoff, c.logger)
|
||||
} else {
|
||||
// TODO(easwars): Remove this once v3Client is ready.
|
||||
return nil, errors.New("xds v3 client is not yet supported")
|
||||
apiClient, err := newAPIClient(opts.Config.TransportAPI, cc, BuildOptions{
|
||||
Parent: c,
|
||||
NodeProto: opts.Config.NodeProto,
|
||||
Backoff: backoff.DefaultExponential.Backoff,
|
||||
Logger: c.logger,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.apiClient = apiClient
|
||||
c.logger.Infof("Created")
|
||||
go c.run()
|
||||
return c, nil
|
||||
|
@ -173,7 +376,7 @@ func (c *Client) Close() {
|
|||
c.done.Fire()
|
||||
// TODO: Should we invoke the registered callbacks here with an error that
|
||||
// the client is closed?
|
||||
c.v2c.close()
|
||||
c.apiClient.Close()
|
||||
c.cc.Close()
|
||||
c.logger.Infof("Shutdown")
|
||||
}
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
|
||||
package client
|
||||
|
||||
import "google.golang.org/grpc/xds/internal/version"
|
||||
|
||||
type watcherInfoWithUpdate struct {
|
||||
wi *watchInfo
|
||||
update interface{}
|
||||
|
@ -45,19 +47,19 @@ func (c *Client) callCallback(wiu *watcherInfoWithUpdate) {
|
|||
// canceled, and the user needs to take care of it.
|
||||
var ccb func()
|
||||
switch wiu.wi.typeURL {
|
||||
case ldsURL:
|
||||
case version.V2ListenerURL:
|
||||
if s, ok := c.ldsWatchers[wiu.wi.target]; ok && s[wiu.wi] {
|
||||
ccb = func() { wiu.wi.ldsCallback(wiu.update.(ldsUpdate), wiu.err) }
|
||||
ccb = func() { wiu.wi.ldsCallback(wiu.update.(ListenerUpdate), wiu.err) }
|
||||
}
|
||||
case rdsURL:
|
||||
case version.V2RouteConfigURL:
|
||||
if s, ok := c.rdsWatchers[wiu.wi.target]; ok && s[wiu.wi] {
|
||||
ccb = func() { wiu.wi.rdsCallback(wiu.update.(rdsUpdate), wiu.err) }
|
||||
ccb = func() { wiu.wi.rdsCallback(wiu.update.(RouteConfigUpdate), wiu.err) }
|
||||
}
|
||||
case cdsURL:
|
||||
case version.V2ClusterURL:
|
||||
if s, ok := c.cdsWatchers[wiu.wi.target]; ok && s[wiu.wi] {
|
||||
ccb = func() { wiu.wi.cdsCallback(wiu.update.(ClusterUpdate), wiu.err) }
|
||||
}
|
||||
case edsURL:
|
||||
case version.V2EndpointsURL:
|
||||
if s, ok := c.edsWatchers[wiu.wi.target]; ok && s[wiu.wi] {
|
||||
ccb = func() { wiu.wi.edsCallback(wiu.update.(EndpointsUpdate), wiu.err) }
|
||||
}
|
||||
|
@ -69,12 +71,12 @@ func (c *Client) callCallback(wiu *watcherInfoWithUpdate) {
|
|||
}
|
||||
}
|
||||
|
||||
// newLDSUpdate is called by the underlying xdsv2Client when it receives an xDS
|
||||
// response.
|
||||
// NewListeners is called by the underlying xdsAPIClient when it receives an
|
||||
// xDS response.
|
||||
//
|
||||
// A response can contain multiple resources. They will be parsed and put in a
|
||||
// map from resource name to the resource content.
|
||||
func (c *Client) newLDSUpdate(updates map[string]ldsUpdate) {
|
||||
func (c *Client) NewListeners(updates map[string]ListenerUpdate) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
@ -104,12 +106,12 @@ func (c *Client) newLDSUpdate(updates map[string]ldsUpdate) {
|
|||
// last watch is canceled.
|
||||
}
|
||||
|
||||
// newRDSUpdate is called by the underlying xdsv2Client when it receives an xDS
|
||||
// response.
|
||||
// NewRouteConfigs is called by the underlying xdsAPIClient when it receives an
|
||||
// xDS response.
|
||||
//
|
||||
// A response can contain multiple resources. They will be parsed and put in a
|
||||
// map from resource name to the resource content.
|
||||
func (c *Client) newRDSUpdate(updates map[string]rdsUpdate) {
|
||||
func (c *Client) NewRouteConfigs(updates map[string]RouteConfigUpdate) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
@ -125,12 +127,12 @@ func (c *Client) newRDSUpdate(updates map[string]rdsUpdate) {
|
|||
}
|
||||
}
|
||||
|
||||
// newCDSUpdate is called by the underlying xdsv2Client when it receives an xDS
|
||||
// NewClusters is called by the underlying xdsAPIClient when it receives an xDS
|
||||
// response.
|
||||
//
|
||||
// A response can contain multiple resources. They will be parsed and put in a
|
||||
// map from resource name to the resource content.
|
||||
func (c *Client) newCDSUpdate(updates map[string]ClusterUpdate) {
|
||||
func (c *Client) NewClusters(updates map[string]ClusterUpdate) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
@ -160,12 +162,12 @@ func (c *Client) newCDSUpdate(updates map[string]ClusterUpdate) {
|
|||
// last watch is canceled.
|
||||
}
|
||||
|
||||
// newEDSUpdate is called by the underlying xdsv2Client when it receives an xDS
|
||||
// response.
|
||||
// NewEndpoints is called by the underlying xdsAPIClient when it receives an
|
||||
// xDS response.
|
||||
//
|
||||
// A response can contain multiple resources. They will be parsed and put in a
|
||||
// map from resource name to the resource content.
|
||||
func (c *Client) newEDSUpdate(updates map[string]EndpointsUpdate) {
|
||||
func (c *Client) NewEndpoints(updates map[string]EndpointsUpdate) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
|
|
@ -0,0 +1,312 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
|
||||
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||
"github.com/golang/protobuf/proto"
|
||||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
func (s) TestValidateCluster(t *testing.T) {
|
||||
const (
|
||||
clusterName = "clusterName"
|
||||
serviceName = "service"
|
||||
)
|
||||
var (
|
||||
emptyUpdate = ClusterUpdate{ServiceName: "", EnableLRS: false}
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
cluster *v3clusterpb.Cluster
|
||||
wantUpdate ClusterUpdate
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "non-eds-cluster-type",
|
||||
cluster: &v3clusterpb.Cluster{
|
||||
ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC},
|
||||
EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &v3corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{
|
||||
Ads: &v3corepb.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
LbPolicy: v3clusterpb.Cluster_LEAST_REQUEST,
|
||||
},
|
||||
wantUpdate: emptyUpdate,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no-eds-config",
|
||||
cluster: &v3clusterpb.Cluster{
|
||||
ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS},
|
||||
LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN,
|
||||
},
|
||||
wantUpdate: emptyUpdate,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no-ads-config-source",
|
||||
cluster: &v3clusterpb.Cluster{
|
||||
ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS},
|
||||
EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{},
|
||||
LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN,
|
||||
},
|
||||
wantUpdate: emptyUpdate,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non-round-robin-lb-policy",
|
||||
cluster: &v3clusterpb.Cluster{
|
||||
ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS},
|
||||
EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &v3corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{
|
||||
Ads: &v3corepb.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
LbPolicy: v3clusterpb.Cluster_LEAST_REQUEST,
|
||||
},
|
||||
wantUpdate: emptyUpdate,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "happy-case-no-service-name-no-lrs",
|
||||
cluster: &v3clusterpb.Cluster{
|
||||
ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS},
|
||||
EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &v3corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{
|
||||
Ads: &v3corepb.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN,
|
||||
},
|
||||
wantUpdate: emptyUpdate,
|
||||
},
|
||||
{
|
||||
name: "happy-case-no-lrs",
|
||||
cluster: &v3clusterpb.Cluster{
|
||||
ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS},
|
||||
EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &v3corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{
|
||||
Ads: &v3corepb.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
ServiceName: serviceName,
|
||||
},
|
||||
LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN,
|
||||
},
|
||||
wantUpdate: ClusterUpdate{ServiceName: serviceName, EnableLRS: false},
|
||||
},
|
||||
{
|
||||
name: "happiest-case",
|
||||
cluster: &v3clusterpb.Cluster{
|
||||
Name: clusterName,
|
||||
ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS},
|
||||
EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &v3corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{
|
||||
Ads: &v3corepb.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
ServiceName: serviceName,
|
||||
},
|
||||
LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN,
|
||||
LrsServer: &v3corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{
|
||||
Self: &v3corepb.SelfConfigSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantUpdate: ClusterUpdate{ServiceName: serviceName, EnableLRS: true},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
update, err := validateCluster(test.cluster)
|
||||
if ((err != nil) != test.wantErr) || !cmp.Equal(update, test.wantUpdate, cmpopts.EquateEmpty()) {
|
||||
t.Errorf("validateCluster(%+v) = (%v, %v), wantErr: (%v, %v)", test.cluster, update, err, test.wantUpdate, test.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s) TestUnmarshalCluster(t *testing.T) {
|
||||
const (
|
||||
v2ClusterName = "v2clusterName"
|
||||
v3ClusterName = "v3clusterName"
|
||||
v2Service = "v2Service"
|
||||
v3Service = "v2Service"
|
||||
)
|
||||
var (
|
||||
v2Cluster = &v2xdspb.Cluster{
|
||||
Name: v2ClusterName,
|
||||
ClusterDiscoveryType: &v2xdspb.Cluster_Type{Type: v2xdspb.Cluster_EDS},
|
||||
EdsClusterConfig: &v2xdspb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &v2corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v2corepb.ConfigSource_Ads{
|
||||
Ads: &v2corepb.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
ServiceName: v2Service,
|
||||
},
|
||||
LbPolicy: v2xdspb.Cluster_ROUND_ROBIN,
|
||||
LrsServer: &v2corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v2corepb.ConfigSource_Self{
|
||||
Self: &v2corepb.SelfConfigSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
v3Cluster = &v3clusterpb.Cluster{
|
||||
Name: v3ClusterName,
|
||||
ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS},
|
||||
EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &v3corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{
|
||||
Ads: &v3corepb.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
ServiceName: v3Service,
|
||||
},
|
||||
LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN,
|
||||
LrsServer: &v3corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{
|
||||
Self: &v3corepb.SelfConfigSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
resources []*anypb.Any
|
||||
wantUpdate map[string]ClusterUpdate
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "non-cluster resource type",
|
||||
resources: []*anypb.Any{{TypeUrl: version.V3HTTPConnManagerURL}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "badly marshaled cluster resource",
|
||||
resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: version.V3ClusterURL,
|
||||
Value: []byte{1, 2, 3, 4},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "bad cluster resource",
|
||||
resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: version.V3ClusterURL,
|
||||
Value: func() []byte {
|
||||
cl := &v3clusterpb.Cluster{
|
||||
ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC},
|
||||
}
|
||||
mcl, _ := proto.Marshal(cl)
|
||||
return mcl
|
||||
}(),
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "v2 cluster",
|
||||
resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: version.V3ClusterURL,
|
||||
Value: func() []byte {
|
||||
mcl, _ := proto.Marshal(v2Cluster)
|
||||
return mcl
|
||||
}(),
|
||||
},
|
||||
},
|
||||
wantUpdate: map[string]ClusterUpdate{
|
||||
v2ClusterName: {ServiceName: v2Service, EnableLRS: true},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "v3 cluster",
|
||||
resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: version.V3ClusterURL,
|
||||
Value: func() []byte {
|
||||
mcl, _ := proto.Marshal(v3Cluster)
|
||||
return mcl
|
||||
}(),
|
||||
},
|
||||
},
|
||||
wantUpdate: map[string]ClusterUpdate{
|
||||
v3ClusterName: {ServiceName: v3Service, EnableLRS: true},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple clusters",
|
||||
resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: version.V3ClusterURL,
|
||||
Value: func() []byte {
|
||||
mcl, _ := proto.Marshal(v2Cluster)
|
||||
return mcl
|
||||
}(),
|
||||
},
|
||||
{
|
||||
TypeUrl: version.V3ClusterURL,
|
||||
Value: func() []byte {
|
||||
mcl, _ := proto.Marshal(v3Cluster)
|
||||
return mcl
|
||||
}(),
|
||||
},
|
||||
},
|
||||
wantUpdate: map[string]ClusterUpdate{
|
||||
v2ClusterName: {ServiceName: v2Service, EnableLRS: true},
|
||||
v3ClusterName: {ServiceName: v3Service, EnableLRS: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
update, err := UnmarshalCluster(test.resources, nil)
|
||||
if ((err != nil) != test.wantErr) || !cmp.Equal(update, test.wantUpdate, cmpopts.EquateEmpty()) {
|
||||
t.Errorf("UnmarshalCluster(%v) = (%+v, %v) want (%+v, %v)", test.resources, update, err, test.wantUpdate, test.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,312 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||
v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
|
||||
v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3"
|
||||
"github.com/golang/protobuf/proto"
|
||||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"google.golang.org/grpc/xds/internal"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
func (s) TestEDSParseRespProto(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
m *v3endpointpb.ClusterLoadAssignment
|
||||
want EndpointsUpdate
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "missing-priority",
|
||||
m: func() *v3endpointpb.ClusterLoadAssignment {
|
||||
clab0 := newClaBuilder("test", nil)
|
||||
clab0.addLocality("locality-1", 1, 0, []string{"addr1:314"}, nil)
|
||||
clab0.addLocality("locality-2", 1, 2, []string{"addr2:159"}, nil)
|
||||
return clab0.Build()
|
||||
}(),
|
||||
want: EndpointsUpdate{},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing-locality-ID",
|
||||
m: func() *v3endpointpb.ClusterLoadAssignment {
|
||||
clab0 := newClaBuilder("test", nil)
|
||||
clab0.addLocality("", 1, 0, []string{"addr1:314"}, nil)
|
||||
return clab0.Build()
|
||||
}(),
|
||||
want: EndpointsUpdate{},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "good",
|
||||
m: func() *v3endpointpb.ClusterLoadAssignment {
|
||||
clab0 := newClaBuilder("test", nil)
|
||||
clab0.addLocality("locality-1", 1, 1, []string{"addr1:314"}, &addLocalityOptions{
|
||||
Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_UNHEALTHY},
|
||||
Weight: []uint32{271},
|
||||
})
|
||||
clab0.addLocality("locality-2", 1, 0, []string{"addr2:159"}, &addLocalityOptions{
|
||||
Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_DRAINING},
|
||||
Weight: []uint32{828},
|
||||
})
|
||||
return clab0.Build()
|
||||
}(),
|
||||
want: EndpointsUpdate{
|
||||
Drops: nil,
|
||||
Localities: []Locality{
|
||||
{
|
||||
Endpoints: []Endpoint{{
|
||||
Address: "addr1:314",
|
||||
HealthStatus: EndpointHealthStatusUnhealthy,
|
||||
Weight: 271,
|
||||
}},
|
||||
ID: internal.LocalityID{SubZone: "locality-1"},
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Endpoints: []Endpoint{{
|
||||
Address: "addr2:159",
|
||||
HealthStatus: EndpointHealthStatusDraining,
|
||||
Weight: 828,
|
||||
}},
|
||||
ID: internal.LocalityID{SubZone: "locality-2"},
|
||||
Priority: 0,
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := parseEDSRespProto(tt.m)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("parseEDSRespProto() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if d := cmp.Diff(got, tt.want); d != "" {
|
||||
t.Errorf("parseEDSRespProto() got = %v, want %v, diff: %v", got, tt.want, d)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s) TestUnmarshalEndpoints(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
resources []*anypb.Any
|
||||
wantUpdate map[string]EndpointsUpdate
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "non-clusterLoadAssignment resource type",
|
||||
resources: []*anypb.Any{{TypeUrl: version.V3HTTPConnManagerURL}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "badly marshaled clusterLoadAssignment resource",
|
||||
resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: version.V3EndpointsURL,
|
||||
Value: []byte{1, 2, 3, 4},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "bad endpoints resource",
|
||||
resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: version.V3EndpointsURL,
|
||||
Value: func() []byte {
|
||||
clab0 := newClaBuilder("test", nil)
|
||||
clab0.addLocality("locality-1", 1, 0, []string{"addr1:314"}, nil)
|
||||
clab0.addLocality("locality-2", 1, 2, []string{"addr2:159"}, nil)
|
||||
e := clab0.Build()
|
||||
me, _ := proto.Marshal(e)
|
||||
return me
|
||||
}(),
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "v3 endpoints",
|
||||
resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: version.V3EndpointsURL,
|
||||
Value: func() []byte {
|
||||
clab0 := newClaBuilder("test", nil)
|
||||
clab0.addLocality("locality-1", 1, 1, []string{"addr1:314"}, &addLocalityOptions{
|
||||
Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_UNHEALTHY},
|
||||
Weight: []uint32{271},
|
||||
})
|
||||
clab0.addLocality("locality-2", 1, 0, []string{"addr2:159"}, &addLocalityOptions{
|
||||
Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_DRAINING},
|
||||
Weight: []uint32{828},
|
||||
})
|
||||
e := clab0.Build()
|
||||
me, _ := proto.Marshal(e)
|
||||
return me
|
||||
}(),
|
||||
},
|
||||
},
|
||||
wantUpdate: map[string]EndpointsUpdate{
|
||||
"test": {
|
||||
Drops: nil,
|
||||
Localities: []Locality{
|
||||
{
|
||||
Endpoints: []Endpoint{{
|
||||
Address: "addr1:314",
|
||||
HealthStatus: EndpointHealthStatusUnhealthy,
|
||||
Weight: 271,
|
||||
}},
|
||||
ID: internal.LocalityID{SubZone: "locality-1"},
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Endpoints: []Endpoint{{
|
||||
Address: "addr2:159",
|
||||
HealthStatus: EndpointHealthStatusDraining,
|
||||
Weight: 828,
|
||||
}},
|
||||
ID: internal.LocalityID{SubZone: "locality-2"},
|
||||
Priority: 0,
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
update, err := UnmarshalEndpoints(test.resources, nil)
|
||||
if ((err != nil) != test.wantErr) || !cmp.Equal(update, test.wantUpdate, cmpopts.EquateEmpty()) {
|
||||
t.Errorf("UnmarshalEndpoints(%v) = (%+v, %v) want (%+v, %v)", test.resources, update, err, test.wantUpdate, test.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// claBuilder builds a ClusterLoadAssignment, aka EDS
|
||||
// response.
|
||||
type claBuilder struct {
|
||||
v *v3endpointpb.ClusterLoadAssignment
|
||||
}
|
||||
|
||||
// newClaBuilder creates a claBuilder.
|
||||
func newClaBuilder(clusterName string, dropPercents []uint32) *claBuilder {
|
||||
var drops []*v3endpointpb.ClusterLoadAssignment_Policy_DropOverload
|
||||
for i, d := range dropPercents {
|
||||
drops = append(drops, &v3endpointpb.ClusterLoadAssignment_Policy_DropOverload{
|
||||
Category: fmt.Sprintf("test-drop-%d", i),
|
||||
DropPercentage: &v3typepb.FractionalPercent{
|
||||
Numerator: d,
|
||||
Denominator: v3typepb.FractionalPercent_HUNDRED,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return &claBuilder{
|
||||
v: &v3endpointpb.ClusterLoadAssignment{
|
||||
ClusterName: clusterName,
|
||||
Policy: &v3endpointpb.ClusterLoadAssignment_Policy{
|
||||
DropOverloads: drops,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// addLocalityOptions contains options when adding locality to the builder.
|
||||
type addLocalityOptions struct {
|
||||
Health []v3corepb.HealthStatus
|
||||
Weight []uint32
|
||||
}
|
||||
|
||||
// addLocality adds a locality to the builder.
|
||||
func (clab *claBuilder) addLocality(subzone string, weight uint32, priority uint32, addrsWithPort []string, opts *addLocalityOptions) {
|
||||
var lbEndPoints []*v3endpointpb.LbEndpoint
|
||||
for i, a := range addrsWithPort {
|
||||
host, portStr, err := net.SplitHostPort(a)
|
||||
if err != nil {
|
||||
panic("failed to split " + a)
|
||||
}
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
panic("failed to atoi " + portStr)
|
||||
}
|
||||
|
||||
lbe := &v3endpointpb.LbEndpoint{
|
||||
HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{
|
||||
Endpoint: &v3endpointpb.Endpoint{
|
||||
Address: &v3corepb.Address{
|
||||
Address: &v3corepb.Address_SocketAddress{
|
||||
SocketAddress: &v3corepb.SocketAddress{
|
||||
Protocol: v3corepb.SocketAddress_TCP,
|
||||
Address: host,
|
||||
PortSpecifier: &v3corepb.SocketAddress_PortValue{
|
||||
PortValue: uint32(port)}}}}}},
|
||||
}
|
||||
if opts != nil {
|
||||
if i < len(opts.Health) {
|
||||
lbe.HealthStatus = opts.Health[i]
|
||||
}
|
||||
if i < len(opts.Weight) {
|
||||
lbe.LoadBalancingWeight = &wrapperspb.UInt32Value{Value: opts.Weight[i]}
|
||||
}
|
||||
}
|
||||
lbEndPoints = append(lbEndPoints, lbe)
|
||||
}
|
||||
|
||||
var localityID *v3corepb.Locality
|
||||
if subzone != "" {
|
||||
localityID = &v3corepb.Locality{
|
||||
Region: "",
|
||||
Zone: "",
|
||||
SubZone: subzone,
|
||||
}
|
||||
}
|
||||
|
||||
clab.v.Endpoints = append(clab.v.Endpoints, &v3endpointpb.LocalityLbEndpoints{
|
||||
Locality: localityID,
|
||||
LbEndpoints: lbEndPoints,
|
||||
LoadBalancingWeight: &wrapperspb.UInt32Value{Value: weight},
|
||||
Priority: priority,
|
||||
})
|
||||
}
|
||||
|
||||
// Build builds ClusterLoadAssignment.
|
||||
func (clab *claBuilder) Build() *v3endpointpb.ClusterLoadAssignment {
|
||||
return clab.v
|
||||
}
|
|
@ -0,0 +1,171 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||
v2httppb "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2"
|
||||
v2listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v2"
|
||||
v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
|
||||
v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
|
||||
"github.com/golang/protobuf/proto"
|
||||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
func (s) TestUnmarshalListener(t *testing.T) {
|
||||
const (
|
||||
v2LDSTarget = "lds.target.good:2222"
|
||||
v3LDSTarget = "lds.target.good:3333"
|
||||
v2RouteConfigName = "v2RouteConfig"
|
||||
v3RouteConfigName = "v3RouteConfig"
|
||||
)
|
||||
|
||||
var (
|
||||
v2Lis = &anypb.Any{
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
Value: func() []byte {
|
||||
cm := &v2httppb.HttpConnectionManager{
|
||||
RouteSpecifier: &v2httppb.HttpConnectionManager_Rds{
|
||||
Rds: &v2httppb.Rds{
|
||||
ConfigSource: &v2corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v2corepb.ConfigSource_Ads{Ads: &v2corepb.AggregatedConfigSource{}},
|
||||
},
|
||||
RouteConfigName: v2RouteConfigName,
|
||||
},
|
||||
},
|
||||
}
|
||||
mcm, _ := proto.Marshal(cm)
|
||||
lis := &v2xdspb.Listener{
|
||||
Name: v2LDSTarget,
|
||||
ApiListener: &v2listenerpb.ApiListener{
|
||||
ApiListener: &anypb.Any{
|
||||
TypeUrl: version.V2HTTPConnManagerURL,
|
||||
Value: mcm,
|
||||
},
|
||||
},
|
||||
}
|
||||
mLis, _ := proto.Marshal(lis)
|
||||
return mLis
|
||||
}(),
|
||||
}
|
||||
v3Lis = &anypb.Any{
|
||||
TypeUrl: version.V3ListenerURL,
|
||||
Value: func() []byte {
|
||||
cm := &v3httppb.HttpConnectionManager{
|
||||
RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{
|
||||
Rds: &v3httppb.Rds{
|
||||
ConfigSource: &v3corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}},
|
||||
},
|
||||
RouteConfigName: v3RouteConfigName,
|
||||
},
|
||||
},
|
||||
}
|
||||
mcm, _ := proto.Marshal(cm)
|
||||
lis := &v3listenerpb.Listener{
|
||||
Name: v3LDSTarget,
|
||||
ApiListener: &v3listenerpb.ApiListener{
|
||||
ApiListener: &anypb.Any{
|
||||
TypeUrl: version.V3HTTPConnManagerURL,
|
||||
Value: mcm,
|
||||
},
|
||||
},
|
||||
}
|
||||
mLis, _ := proto.Marshal(lis)
|
||||
return mLis
|
||||
}(),
|
||||
}
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
resources []*anypb.Any
|
||||
wantUpdate map[string]ListenerUpdate
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "non-listener resource",
|
||||
resources: []*anypb.Any{{TypeUrl: version.V3HTTPConnManagerURL}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "badly marshaled listener resource",
|
||||
resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: version.V3ListenerURL,
|
||||
Value: func() []byte {
|
||||
lis := &v3listenerpb.Listener{
|
||||
Name: v3LDSTarget,
|
||||
ApiListener: &v3listenerpb.ApiListener{
|
||||
ApiListener: &anypb.Any{
|
||||
TypeUrl: version.V3HTTPConnManagerURL,
|
||||
Value: []byte{1, 2, 3, 4},
|
||||
},
|
||||
},
|
||||
}
|
||||
mLis, _ := proto.Marshal(lis)
|
||||
return mLis
|
||||
}(),
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "empty resource list",
|
||||
},
|
||||
{
|
||||
name: "v2 listener resource",
|
||||
resources: []*anypb.Any{v2Lis},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v2LDSTarget: {RouteConfigName: v2RouteConfigName},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "v3 listener resource",
|
||||
resources: []*anypb.Any{v3Lis},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v3LDSTarget: {RouteConfigName: v3RouteConfigName},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple listener resources",
|
||||
resources: []*anypb.Any{v2Lis, v3Lis},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v2LDSTarget: {RouteConfigName: v2RouteConfigName},
|
||||
v3LDSTarget: {RouteConfigName: v3RouteConfigName},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
update, err := UnmarshalListener(test.resources, nil)
|
||||
if ((err != nil) != test.wantErr) || !cmp.Equal(update, test.wantUpdate, cmpopts.EquateEmpty()) {
|
||||
t.Errorf("UnmarshalListener(%v) = (%v, %v) want (%v, %v)", test.resources, update, err, test.wantUpdate, test.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,854 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
v2routepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/route"
|
||||
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||
v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
|
||||
v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
|
||||
v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
|
||||
v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3"
|
||||
"github.com/golang/protobuf/proto"
|
||||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
func (s) TestGetRouteConfigFromListener(t *testing.T) {
|
||||
const (
|
||||
goodLDSTarget = "lds.target.good:1111"
|
||||
goodRouteConfigName = "GoodRouteConfig"
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
lis *v3listenerpb.Listener
|
||||
wantRoute string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no-apiListener-field",
|
||||
lis: &v3listenerpb.Listener{},
|
||||
wantRoute: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "badly-marshaled-apiListener",
|
||||
lis: &v3listenerpb.Listener{
|
||||
Name: goodLDSTarget,
|
||||
ApiListener: &v3listenerpb.ApiListener{
|
||||
ApiListener: &anypb.Any{
|
||||
TypeUrl: version.V3HTTPConnManagerURL,
|
||||
Value: []byte{1, 2, 3, 4},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantRoute: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "wrong-type-in-apiListener",
|
||||
lis: &v3listenerpb.Listener{
|
||||
Name: goodLDSTarget,
|
||||
ApiListener: &v3listenerpb.ApiListener{
|
||||
ApiListener: &anypb.Any{
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
Value: func() []byte {
|
||||
cm := &v3httppb.HttpConnectionManager{
|
||||
RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{
|
||||
Rds: &v3httppb.Rds{
|
||||
ConfigSource: &v3corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}},
|
||||
},
|
||||
RouteConfigName: goodRouteConfigName}}}
|
||||
mcm, _ := proto.Marshal(cm)
|
||||
return mcm
|
||||
}()}}},
|
||||
wantRoute: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "empty-httpConnMgr-in-apiListener",
|
||||
lis: &v3listenerpb.Listener{
|
||||
Name: goodLDSTarget,
|
||||
ApiListener: &v3listenerpb.ApiListener{
|
||||
ApiListener: &anypb.Any{
|
||||
TypeUrl: version.V3HTTPConnManagerURL,
|
||||
Value: func() []byte {
|
||||
cm := &v3httppb.HttpConnectionManager{
|
||||
RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{
|
||||
Rds: &v3httppb.Rds{},
|
||||
},
|
||||
}
|
||||
mcm, _ := proto.Marshal(cm)
|
||||
return mcm
|
||||
}()}}},
|
||||
wantRoute: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "scopedRoutes-routeConfig-in-apiListener",
|
||||
lis: &v3listenerpb.Listener{
|
||||
Name: goodLDSTarget,
|
||||
ApiListener: &v3listenerpb.ApiListener{
|
||||
ApiListener: &anypb.Any{
|
||||
TypeUrl: version.V3HTTPConnManagerURL,
|
||||
Value: func() []byte {
|
||||
cm := &v3httppb.HttpConnectionManager{
|
||||
RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{},
|
||||
}
|
||||
mcm, _ := proto.Marshal(cm)
|
||||
return mcm
|
||||
}()}}},
|
||||
wantRoute: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "rds.ConfigSource-in-apiListener-is-not-ADS",
|
||||
lis: &v3listenerpb.Listener{
|
||||
Name: goodLDSTarget,
|
||||
ApiListener: &v3listenerpb.ApiListener{
|
||||
ApiListener: &anypb.Any{
|
||||
TypeUrl: version.V3HTTPConnManagerURL,
|
||||
Value: func() []byte {
|
||||
cm := &v3httppb.HttpConnectionManager{
|
||||
RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{
|
||||
Rds: &v3httppb.Rds{
|
||||
ConfigSource: &v3corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v3corepb.ConfigSource_Path{
|
||||
Path: "/some/path",
|
||||
},
|
||||
},
|
||||
RouteConfigName: goodRouteConfigName}}}
|
||||
mcm, _ := proto.Marshal(cm)
|
||||
return mcm
|
||||
}()}}},
|
||||
wantRoute: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "goodListener",
|
||||
lis: &v3listenerpb.Listener{
|
||||
Name: goodLDSTarget,
|
||||
ApiListener: &v3listenerpb.ApiListener{
|
||||
ApiListener: &anypb.Any{
|
||||
TypeUrl: version.V3HTTPConnManagerURL,
|
||||
Value: func() []byte {
|
||||
cm := &v3httppb.HttpConnectionManager{
|
||||
RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{
|
||||
Rds: &v3httppb.Rds{
|
||||
ConfigSource: &v3corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}},
|
||||
},
|
||||
RouteConfigName: goodRouteConfigName}}}
|
||||
mcm, _ := proto.Marshal(cm)
|
||||
return mcm
|
||||
}()}}},
|
||||
wantRoute: goodRouteConfigName,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
gotRoute, err := getRouteConfigNameFromListener(test.lis, nil)
|
||||
if (err != nil) != test.wantErr || gotRoute != test.wantRoute {
|
||||
t.Errorf("getRouteConfigNameFromListener(%+v) = (%s, %v), want (%s, %v)", test.lis, gotRoute, err, test.wantRoute, test.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) {
|
||||
const (
|
||||
uninterestingDomain = "uninteresting.domain"
|
||||
uninterestingClusterName = "uninterestingClusterName"
|
||||
ldsTarget = "lds.target.good:1111"
|
||||
routeName = "routeName"
|
||||
clusterName = "clusterName"
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
rc *v3routepb.RouteConfiguration
|
||||
wantUpdate RouteConfigUpdate
|
||||
wantError bool
|
||||
}{
|
||||
{
|
||||
name: "no-virtual-hosts-in-rc",
|
||||
rc: &v3routepb.RouteConfiguration{},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "no-domains-in-rc",
|
||||
rc: &v3routepb.RouteConfiguration{
|
||||
VirtualHosts: []*v3routepb.VirtualHost{{}},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "non-matching-domain-in-rc",
|
||||
rc: &v3routepb.RouteConfiguration{
|
||||
VirtualHosts: []*v3routepb.VirtualHost{
|
||||
{Domains: []string{uninterestingDomain}},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "no-routes-in-rc",
|
||||
rc: &v3routepb.RouteConfiguration{
|
||||
VirtualHosts: []*v3routepb.VirtualHost{
|
||||
{Domains: []string{ldsTarget}},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "default-route-match-field-is-nil",
|
||||
rc: &v3routepb.RouteConfiguration{
|
||||
VirtualHosts: []*v3routepb.VirtualHost{
|
||||
{
|
||||
Domains: []string{ldsTarget},
|
||||
Routes: []*v3routepb.Route{
|
||||
{
|
||||
Action: &v3routepb.Route_Route{
|
||||
Route: &v3routepb.RouteAction{
|
||||
ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "default-route-match-field-is-non-nil",
|
||||
rc: &v3routepb.RouteConfiguration{
|
||||
VirtualHosts: []*v3routepb.VirtualHost{
|
||||
{
|
||||
Domains: []string{ldsTarget},
|
||||
Routes: []*v3routepb.Route{
|
||||
{
|
||||
Match: &v3routepb.RouteMatch{},
|
||||
Action: &v3routepb.Route_Route{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "default-route-routeaction-field-is-nil",
|
||||
rc: &v3routepb.RouteConfiguration{
|
||||
VirtualHosts: []*v3routepb.VirtualHost{
|
||||
{
|
||||
Domains: []string{ldsTarget},
|
||||
Routes: []*v3routepb.Route{{}},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "default-route-cluster-field-is-empty",
|
||||
rc: &v3routepb.RouteConfiguration{
|
||||
VirtualHosts: []*v3routepb.VirtualHost{
|
||||
{
|
||||
Domains: []string{ldsTarget},
|
||||
Routes: []*v3routepb.Route{
|
||||
{
|
||||
Action: &v3routepb.Route_Route{
|
||||
Route: &v3routepb.RouteAction{
|
||||
ClusterSpecifier: &v3routepb.RouteAction_ClusterHeader{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
// default route's match sets case-sensitive to false.
|
||||
name: "good-route-config-but-with-casesensitive-false",
|
||||
rc: &v3routepb.RouteConfiguration{
|
||||
Name: routeName,
|
||||
VirtualHosts: []*v3routepb.VirtualHost{{
|
||||
Domains: []string{ldsTarget},
|
||||
Routes: []*v3routepb.Route{{
|
||||
Match: &v3routepb.RouteMatch{
|
||||
PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"},
|
||||
CaseSensitive: &wrapperspb.BoolValue{Value: false},
|
||||
},
|
||||
Action: &v3routepb.Route_Route{
|
||||
Route: &v3routepb.RouteAction{
|
||||
ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName},
|
||||
}}}}}}},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "good-route-config-with-empty-string-route",
|
||||
rc: &v3routepb.RouteConfiguration{
|
||||
Name: routeName,
|
||||
VirtualHosts: []*v3routepb.VirtualHost{
|
||||
{
|
||||
Domains: []string{uninterestingDomain},
|
||||
Routes: []*v3routepb.Route{
|
||||
{
|
||||
Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: ""}},
|
||||
Action: &v3routepb.Route_Route{
|
||||
Route: &v3routepb.RouteAction{
|
||||
ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: uninterestingClusterName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Domains: []string{ldsTarget},
|
||||
Routes: []*v3routepb.Route{
|
||||
{
|
||||
Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: ""}},
|
||||
Action: &v3routepb.Route_Route{
|
||||
Route: &v3routepb.RouteAction{
|
||||
ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantUpdate: RouteConfigUpdate{Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{clusterName: 1}}}},
|
||||
},
|
||||
{
|
||||
// default route's match is not empty string, but "/".
|
||||
name: "good-route-config-with-slash-string-route",
|
||||
rc: &v3routepb.RouteConfiguration{
|
||||
Name: routeName,
|
||||
VirtualHosts: []*v3routepb.VirtualHost{
|
||||
{
|
||||
Domains: []string{ldsTarget},
|
||||
Routes: []*v3routepb.Route{
|
||||
{
|
||||
Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}},
|
||||
Action: &v3routepb.Route_Route{
|
||||
Route: &v3routepb.RouteAction{
|
||||
ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantUpdate: RouteConfigUpdate{Routes: []*Route{{Prefix: newStringP("/"), Action: map[string]uint32{clusterName: 1}}}},
|
||||
},
|
||||
{
|
||||
// weights not add up to total-weight.
|
||||
name: "route-config-with-weighted_clusters_weights_not_add_up",
|
||||
rc: &v3routepb.RouteConfiguration{
|
||||
Name: routeName,
|
||||
VirtualHosts: []*v3routepb.VirtualHost{
|
||||
{
|
||||
Domains: []string{ldsTarget},
|
||||
Routes: []*v3routepb.Route{
|
||||
{
|
||||
Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}},
|
||||
Action: &v3routepb.Route_Route{
|
||||
Route: &v3routepb.RouteAction{
|
||||
ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{
|
||||
WeightedClusters: &v3routepb.WeightedCluster{
|
||||
Clusters: []*v3routepb.WeightedCluster_ClusterWeight{
|
||||
{Name: "a", Weight: &wrapperspb.UInt32Value{Value: 2}},
|
||||
{Name: "b", Weight: &wrapperspb.UInt32Value{Value: 3}},
|
||||
{Name: "c", Weight: &wrapperspb.UInt32Value{Value: 5}},
|
||||
},
|
||||
TotalWeight: &wrapperspb.UInt32Value{Value: 30},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "good-route-config-with-weighted_clusters",
|
||||
rc: &v3routepb.RouteConfiguration{
|
||||
Name: routeName,
|
||||
VirtualHosts: []*v3routepb.VirtualHost{
|
||||
{
|
||||
Domains: []string{ldsTarget},
|
||||
Routes: []*v3routepb.Route{
|
||||
{
|
||||
Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}},
|
||||
Action: &v3routepb.Route_Route{
|
||||
Route: &v3routepb.RouteAction{
|
||||
ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{
|
||||
WeightedClusters: &v3routepb.WeightedCluster{
|
||||
Clusters: []*v3routepb.WeightedCluster_ClusterWeight{
|
||||
{Name: "a", Weight: &wrapperspb.UInt32Value{Value: 2}},
|
||||
{Name: "b", Weight: &wrapperspb.UInt32Value{Value: 3}},
|
||||
{Name: "c", Weight: &wrapperspb.UInt32Value{Value: 5}},
|
||||
},
|
||||
TotalWeight: &wrapperspb.UInt32Value{Value: 10},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantUpdate: RouteConfigUpdate{Routes: []*Route{{Prefix: newStringP("/"), Action: map[string]uint32{"a": 2, "b": 3, "c": 5}}}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
gotUpdate, gotError := generateRDSUpdateFromRouteConfiguration(test.rc, ldsTarget, nil)
|
||||
if (gotError != nil) != test.wantError || !cmp.Equal(gotUpdate, test.wantUpdate, cmpopts.EquateEmpty()) {
|
||||
t.Errorf("generateRDSUpdateFromRouteConfiguration(%+v, %v) = %v, want %v", test.rc, ldsTarget, gotUpdate, test.wantUpdate)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s) TestUnmarshalRouteConfig(t *testing.T) {
|
||||
const (
|
||||
ldsTarget = "lds.target.good:1111"
|
||||
uninterestingDomain = "uninteresting.domain"
|
||||
uninterestingClusterName = "uninterestingClusterName"
|
||||
v2RouteConfigName = "v2RouteConfig"
|
||||
v3RouteConfigName = "v3RouteConfig"
|
||||
v2ClusterName = "v2Cluster"
|
||||
v3ClusterName = "v3Cluster"
|
||||
)
|
||||
|
||||
var (
|
||||
v2VirtualHost = []*v2routepb.VirtualHost{
|
||||
{
|
||||
Domains: []string{uninterestingDomain},
|
||||
Routes: []*v2routepb.Route{
|
||||
{
|
||||
Match: &v2routepb.RouteMatch{PathSpecifier: &v2routepb.RouteMatch_Prefix{Prefix: ""}},
|
||||
Action: &v2routepb.Route_Route{
|
||||
Route: &v2routepb.RouteAction{
|
||||
ClusterSpecifier: &v2routepb.RouteAction_Cluster{Cluster: uninterestingClusterName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Domains: []string{ldsTarget},
|
||||
Routes: []*v2routepb.Route{
|
||||
{
|
||||
Match: &v2routepb.RouteMatch{PathSpecifier: &v2routepb.RouteMatch_Prefix{Prefix: ""}},
|
||||
Action: &v2routepb.Route_Route{
|
||||
Route: &v2routepb.RouteAction{
|
||||
ClusterSpecifier: &v2routepb.RouteAction_Cluster{Cluster: v2ClusterName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
v2RouteConfig = &anypb.Any{
|
||||
TypeUrl: version.V2RouteConfigURL,
|
||||
Value: func() []byte {
|
||||
rc := &v2xdspb.RouteConfiguration{
|
||||
Name: v2RouteConfigName,
|
||||
VirtualHosts: v2VirtualHost,
|
||||
}
|
||||
m, _ := proto.Marshal(rc)
|
||||
return m
|
||||
}(),
|
||||
}
|
||||
v3VirtualHost = []*v3routepb.VirtualHost{
|
||||
{
|
||||
Domains: []string{uninterestingDomain},
|
||||
Routes: []*v3routepb.Route{
|
||||
{
|
||||
Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: ""}},
|
||||
Action: &v3routepb.Route_Route{
|
||||
Route: &v3routepb.RouteAction{
|
||||
ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: uninterestingClusterName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Domains: []string{ldsTarget},
|
||||
Routes: []*v3routepb.Route{
|
||||
{
|
||||
Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: ""}},
|
||||
Action: &v3routepb.Route_Route{
|
||||
Route: &v3routepb.RouteAction{
|
||||
ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: v3ClusterName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
v3RouteConfig = &anypb.Any{
|
||||
TypeUrl: version.V2RouteConfigURL,
|
||||
Value: func() []byte {
|
||||
rc := &v3routepb.RouteConfiguration{
|
||||
Name: v3RouteConfigName,
|
||||
VirtualHosts: v3VirtualHost,
|
||||
}
|
||||
m, _ := proto.Marshal(rc)
|
||||
return m
|
||||
}(),
|
||||
}
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
resources []*anypb.Any
|
||||
wantUpdate map[string]RouteConfigUpdate
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "non-routeConfig resource type",
|
||||
resources: []*anypb.Any{{TypeUrl: version.V3HTTPConnManagerURL}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "badly marshaled routeconfig resource",
|
||||
resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: version.V3RouteConfigURL,
|
||||
Value: []byte{1, 2, 3, 4},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "bad routeConfig resource",
|
||||
resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: version.V3RouteConfigURL,
|
||||
Value: func() []byte {
|
||||
rc := &v3routepb.RouteConfiguration{
|
||||
VirtualHosts: []*v3routepb.VirtualHost{
|
||||
{Domains: []string{uninterestingDomain}},
|
||||
},
|
||||
}
|
||||
m, _ := proto.Marshal(rc)
|
||||
return m
|
||||
}(),
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "empty resource list",
|
||||
},
|
||||
{
|
||||
name: "v2 routeConfig resource",
|
||||
resources: []*anypb.Any{v2RouteConfig},
|
||||
wantUpdate: map[string]RouteConfigUpdate{
|
||||
v2RouteConfigName: {Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{v2ClusterName: 1}}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "v3 routeConfig resource",
|
||||
resources: []*anypb.Any{v3RouteConfig},
|
||||
wantUpdate: map[string]RouteConfigUpdate{
|
||||
v3RouteConfigName: {Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{v3ClusterName: 1}}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple routeConfig resources",
|
||||
resources: []*anypb.Any{v2RouteConfig, v3RouteConfig},
|
||||
wantUpdate: map[string]RouteConfigUpdate{
|
||||
v3RouteConfigName: {Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{v3ClusterName: 1}}}},
|
||||
v2RouteConfigName: {Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{v2ClusterName: 1}}}},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
update, err := UnmarshalRouteConfig(test.resources, ldsTarget, nil)
|
||||
if ((err != nil) != test.wantErr) || !cmp.Equal(update, test.wantUpdate, cmpopts.EquateEmpty()) {
|
||||
t.Errorf("UnmarshalRouteConfig(%v, %v) = (%v, %v) want (%v, %v)", test.resources, ldsTarget, update, err, test.wantUpdate, test.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s) TestMatchTypeForDomain(t *testing.T) {
|
||||
tests := []struct {
|
||||
d string
|
||||
want domainMatchType
|
||||
}{
|
||||
{d: "", want: domainMatchTypeInvalid},
|
||||
{d: "*", want: domainMatchTypeUniversal},
|
||||
{d: "bar.*", want: domainMatchTypePrefix},
|
||||
{d: "*.abc.com", want: domainMatchTypeSuffix},
|
||||
{d: "foo.bar.com", want: domainMatchTypeExact},
|
||||
{d: "foo.*.com", want: domainMatchTypeInvalid},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if got := matchTypeForDomain(tt.d); got != tt.want {
|
||||
t.Errorf("matchTypeForDomain(%q) = %v, want %v", tt.d, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s) TestMatch(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
domain string
|
||||
host string
|
||||
wantTyp domainMatchType
|
||||
wantMatched bool
|
||||
}{
|
||||
{name: "invalid-empty", domain: "", host: "", wantTyp: domainMatchTypeInvalid, wantMatched: false},
|
||||
{name: "invalid", domain: "a.*.b", host: "", wantTyp: domainMatchTypeInvalid, wantMatched: false},
|
||||
{name: "universal", domain: "*", host: "abc.com", wantTyp: domainMatchTypeUniversal, wantMatched: true},
|
||||
{name: "prefix-match", domain: "abc.*", host: "abc.123", wantTyp: domainMatchTypePrefix, wantMatched: true},
|
||||
{name: "prefix-no-match", domain: "abc.*", host: "abcd.123", wantTyp: domainMatchTypePrefix, wantMatched: false},
|
||||
{name: "suffix-match", domain: "*.123", host: "abc.123", wantTyp: domainMatchTypeSuffix, wantMatched: true},
|
||||
{name: "suffix-no-match", domain: "*.123", host: "abc.1234", wantTyp: domainMatchTypeSuffix, wantMatched: false},
|
||||
{name: "exact-match", domain: "foo.bar", host: "foo.bar", wantTyp: domainMatchTypeExact, wantMatched: true},
|
||||
{name: "exact-no-match", domain: "foo.bar.com", host: "foo.bar", wantTyp: domainMatchTypeExact, wantMatched: false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if gotTyp, gotMatched := match(tt.domain, tt.host); gotTyp != tt.wantTyp || gotMatched != tt.wantMatched {
|
||||
t.Errorf("match() = %v, %v, want %v, %v", gotTyp, gotMatched, tt.wantTyp, tt.wantMatched)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s) TestFindBestMatchingVirtualHost(t *testing.T) {
|
||||
var (
|
||||
oneExactMatch = &v3routepb.VirtualHost{
|
||||
Name: "one-exact-match",
|
||||
Domains: []string{"foo.bar.com"},
|
||||
}
|
||||
oneSuffixMatch = &v3routepb.VirtualHost{
|
||||
Name: "one-suffix-match",
|
||||
Domains: []string{"*.bar.com"},
|
||||
}
|
||||
onePrefixMatch = &v3routepb.VirtualHost{
|
||||
Name: "one-prefix-match",
|
||||
Domains: []string{"foo.bar.*"},
|
||||
}
|
||||
oneUniversalMatch = &v3routepb.VirtualHost{
|
||||
Name: "one-universal-match",
|
||||
Domains: []string{"*"},
|
||||
}
|
||||
longExactMatch = &v3routepb.VirtualHost{
|
||||
Name: "one-exact-match",
|
||||
Domains: []string{"v2.foo.bar.com"},
|
||||
}
|
||||
multipleMatch = &v3routepb.VirtualHost{
|
||||
Name: "multiple-match",
|
||||
Domains: []string{"pi.foo.bar.com", "314.*", "*.159"},
|
||||
}
|
||||
vhs = []*v3routepb.VirtualHost{oneExactMatch, oneSuffixMatch, onePrefixMatch, oneUniversalMatch, longExactMatch, multipleMatch}
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
host string
|
||||
vHosts []*v3routepb.VirtualHost
|
||||
want *v3routepb.VirtualHost
|
||||
}{
|
||||
{name: "exact-match", host: "foo.bar.com", vHosts: vhs, want: oneExactMatch},
|
||||
{name: "suffix-match", host: "123.bar.com", vHosts: vhs, want: oneSuffixMatch},
|
||||
{name: "prefix-match", host: "foo.bar.org", vHosts: vhs, want: onePrefixMatch},
|
||||
{name: "universal-match", host: "abc.123", vHosts: vhs, want: oneUniversalMatch},
|
||||
{name: "long-exact-match", host: "v2.foo.bar.com", vHosts: vhs, want: longExactMatch},
|
||||
// Matches suffix "*.bar.com" and exact "pi.foo.bar.com". Takes exact.
|
||||
{name: "multiple-match-exact", host: "pi.foo.bar.com", vHosts: vhs, want: multipleMatch},
|
||||
// Matches suffix "*.159" and prefix "foo.bar.*". Takes suffix.
|
||||
{name: "multiple-match-suffix", host: "foo.bar.159", vHosts: vhs, want: multipleMatch},
|
||||
// Matches suffix "*.bar.com" and prefix "314.*". Takes suffix.
|
||||
{name: "multiple-match-prefix", host: "314.bar.com", vHosts: vhs, want: oneSuffixMatch},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := findBestMatchingVirtualHost(tt.host, tt.vHosts); !cmp.Equal(got, tt.want, cmp.Comparer(proto.Equal)) {
|
||||
t.Errorf("findBestMatchingVirtualHost() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s) TestRoutesProtoToSlice(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
routes []*v3routepb.Route
|
||||
wantRoutes []*Route
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no path",
|
||||
routes: []*v3routepb.Route{{
|
||||
Match: &v3routepb.RouteMatch{},
|
||||
}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "case_sensitive is false",
|
||||
routes: []*v3routepb.Route{{
|
||||
Match: &v3routepb.RouteMatch{
|
||||
PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"},
|
||||
CaseSensitive: &wrapperspb.BoolValue{Value: false},
|
||||
},
|
||||
}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "good",
|
||||
routes: []*v3routepb.Route{
|
||||
{
|
||||
Match: &v3routepb.RouteMatch{
|
||||
PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"},
|
||||
Headers: []*v3routepb.HeaderMatcher{
|
||||
{
|
||||
Name: "th",
|
||||
HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{
|
||||
PrefixMatch: "tv",
|
||||
},
|
||||
InvertMatch: true,
|
||||
},
|
||||
},
|
||||
RuntimeFraction: &v3corepb.RuntimeFractionalPercent{
|
||||
DefaultValue: &v3typepb.FractionalPercent{
|
||||
Numerator: 1,
|
||||
Denominator: v3typepb.FractionalPercent_HUNDRED,
|
||||
},
|
||||
},
|
||||
},
|
||||
Action: &v3routepb.Route_Route{
|
||||
Route: &v3routepb.RouteAction{
|
||||
ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{
|
||||
WeightedClusters: &v3routepb.WeightedCluster{
|
||||
Clusters: []*v3routepb.WeightedCluster_ClusterWeight{
|
||||
{Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}},
|
||||
{Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}},
|
||||
},
|
||||
TotalWeight: &wrapperspb.UInt32Value{Value: 100},
|
||||
}}}},
|
||||
},
|
||||
},
|
||||
wantRoutes: []*Route{{
|
||||
Prefix: newStringP("/a/"),
|
||||
Headers: []*HeaderMatcher{
|
||||
{
|
||||
Name: "th",
|
||||
InvertMatch: newBoolP(true),
|
||||
PrefixMatch: newStringP("tv"),
|
||||
},
|
||||
},
|
||||
Fraction: newUInt32P(10000),
|
||||
Action: map[string]uint32{"A": 40, "B": 60},
|
||||
}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "query is ignored",
|
||||
routes: []*v3routepb.Route{
|
||||
{
|
||||
Match: &v3routepb.RouteMatch{
|
||||
PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"},
|
||||
},
|
||||
Action: &v3routepb.Route_Route{
|
||||
Route: &v3routepb.RouteAction{
|
||||
ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{
|
||||
WeightedClusters: &v3routepb.WeightedCluster{
|
||||
Clusters: []*v3routepb.WeightedCluster_ClusterWeight{
|
||||
{Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}},
|
||||
{Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}},
|
||||
},
|
||||
TotalWeight: &wrapperspb.UInt32Value{Value: 100},
|
||||
}}}},
|
||||
},
|
||||
{
|
||||
Name: "with_query",
|
||||
Match: &v3routepb.RouteMatch{
|
||||
PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/b/"},
|
||||
QueryParameters: []*v3routepb.QueryParameterMatcher{{Name: "route_will_be_ignored"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Only one route in the result, because the second one with query
|
||||
// parameters is ignored.
|
||||
wantRoutes: []*Route{{
|
||||
Prefix: newStringP("/a/"),
|
||||
Action: map[string]uint32{"A": 40, "B": 60},
|
||||
}},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
cmpOpts := []cmp.Option{
|
||||
cmp.AllowUnexported(Route{}, HeaderMatcher{}, Int64Range{}),
|
||||
cmpopts.EquateEmpty(),
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := routesProtoToSlice(tt.routes, nil)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("routesProtoToSlice() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !cmp.Equal(got, tt.wantRoutes, cmpOpts...) {
|
||||
t.Errorf("routesProtoToSlice() got = %v, want %v, diff: %v", got, tt.wantRoutes, cmp.Diff(got, tt.wantRoutes, cmpOpts...))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newStringP(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
func newUInt32P(i uint32) *uint32 {
|
||||
return &i
|
||||
}
|
||||
|
||||
func newBoolP(b bool) *bool {
|
||||
return &b
|
||||
}
|
|
@ -23,13 +23,10 @@ import (
|
|||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/xds/internal/client/bootstrap"
|
||||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal/testutils/fakeserver"
|
||||
|
||||
corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
|
@ -60,119 +57,56 @@ func clientOpts(balancerName string) Options {
|
|||
}
|
||||
}
|
||||
|
||||
func (s) TestNew(t *testing.T) {
|
||||
fakeServer, cleanup, err := fakeserver.StartServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start fake xDS server: %v", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
opts Options
|
||||
wantErr bool
|
||||
}{
|
||||
{name: "empty-opts", opts: Options{}, wantErr: true},
|
||||
{
|
||||
name: "empty-balancer-name",
|
||||
opts: Options{
|
||||
Config: bootstrap.Config{
|
||||
Creds: grpc.WithInsecure(),
|
||||
NodeProto: testutils.EmptyNodeProtoV2,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "empty-dial-creds",
|
||||
opts: Options{
|
||||
Config: bootstrap.Config{
|
||||
BalancerName: "dummy",
|
||||
NodeProto: testutils.EmptyNodeProtoV2,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "empty-node-proto",
|
||||
opts: Options{
|
||||
Config: bootstrap.Config{
|
||||
BalancerName: "dummy",
|
||||
Creds: grpc.WithInsecure(),
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "happy-case",
|
||||
opts: clientOpts(fakeServer.Address),
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
c, err := New(test.opts)
|
||||
if err == nil {
|
||||
defer c.Close()
|
||||
}
|
||||
if (err != nil) != test.wantErr {
|
||||
t.Fatalf("New(%+v) = %v, wantErr: %v", test.opts, err, test.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testXDSV2Client struct {
|
||||
r updateHandler
|
||||
type testAPIClient struct {
|
||||
r UpdateHandler
|
||||
|
||||
addWatches map[string]*testutils.Channel
|
||||
removeWatches map[string]*testutils.Channel
|
||||
}
|
||||
|
||||
func overrideNewXDSV2Client() (<-chan *testXDSV2Client, func()) {
|
||||
oldNewXDSV2Client := newXDSV2Client
|
||||
ch := make(chan *testXDSV2Client, 1)
|
||||
newXDSV2Client = func(parent *Client, cc *grpc.ClientConn, nodeProto *corepb.Node, backoff func(int) time.Duration, logger *grpclog.PrefixLogger) xdsv2Client {
|
||||
ret := newTestXDSV2Client(parent)
|
||||
func overrideNewAPIClient() (<-chan *testAPIClient, func()) {
|
||||
origNewAPIClient := newAPIClient
|
||||
ch := make(chan *testAPIClient, 1)
|
||||
newAPIClient = func(apiVersion version.TransportAPI, cc *grpc.ClientConn, opts BuildOptions) (APIClient, error) {
|
||||
ret := newTestAPIClient(opts.Parent)
|
||||
ch <- ret
|
||||
return ret
|
||||
return ret, nil
|
||||
}
|
||||
return ch, func() { newXDSV2Client = oldNewXDSV2Client }
|
||||
return ch, func() { newAPIClient = origNewAPIClient }
|
||||
}
|
||||
|
||||
func newTestXDSV2Client(r updateHandler) *testXDSV2Client {
|
||||
func newTestAPIClient(r UpdateHandler) *testAPIClient {
|
||||
addWatches := make(map[string]*testutils.Channel)
|
||||
addWatches[ldsURL] = testutils.NewChannel()
|
||||
addWatches[rdsURL] = testutils.NewChannel()
|
||||
addWatches[cdsURL] = testutils.NewChannel()
|
||||
addWatches[edsURL] = testutils.NewChannel()
|
||||
addWatches[version.V2ListenerURL] = testutils.NewChannel()
|
||||
addWatches[version.V2RouteConfigURL] = testutils.NewChannel()
|
||||
addWatches[version.V2ClusterURL] = testutils.NewChannel()
|
||||
addWatches[version.V2EndpointsURL] = testutils.NewChannel()
|
||||
removeWatches := make(map[string]*testutils.Channel)
|
||||
removeWatches[ldsURL] = testutils.NewChannel()
|
||||
removeWatches[rdsURL] = testutils.NewChannel()
|
||||
removeWatches[cdsURL] = testutils.NewChannel()
|
||||
removeWatches[edsURL] = testutils.NewChannel()
|
||||
return &testXDSV2Client{
|
||||
removeWatches[version.V2ListenerURL] = testutils.NewChannel()
|
||||
removeWatches[version.V2RouteConfigURL] = testutils.NewChannel()
|
||||
removeWatches[version.V2ClusterURL] = testutils.NewChannel()
|
||||
removeWatches[version.V2EndpointsURL] = testutils.NewChannel()
|
||||
return &testAPIClient{
|
||||
r: r,
|
||||
addWatches: addWatches,
|
||||
removeWatches: removeWatches,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *testXDSV2Client) addWatch(resourceType, resourceName string) {
|
||||
func (c *testAPIClient) AddWatch(resourceType, resourceName string) {
|
||||
c.addWatches[resourceType].Send(resourceName)
|
||||
}
|
||||
|
||||
func (c *testXDSV2Client) removeWatch(resourceType, resourceName string) {
|
||||
func (c *testAPIClient) RemoveWatch(resourceType, resourceName string) {
|
||||
c.removeWatches[resourceType].Send(resourceName)
|
||||
}
|
||||
|
||||
func (c *testXDSV2Client) close() {}
|
||||
func (c *testAPIClient) Close() {}
|
||||
|
||||
// TestWatchCallAnotherWatch covers the case where watch() is called inline by a
|
||||
// callback. It makes sure it doesn't cause a deadlock.
|
||||
func (s) TestWatchCallAnotherWatch(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -189,17 +123,17 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) {
|
|||
clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err})
|
||||
// Calls another watch inline, to ensure there's deadlock.
|
||||
c.WatchCluster("another-random-name", func(ClusterUpdate, error) {})
|
||||
if _, err := v2Client.addWatches[cdsURL].Receive(); firstTime && err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ClusterURL].Receive(); firstTime && err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
firstTime = false
|
||||
})
|
||||
if _, err := v2Client.addWatches[cdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ClusterURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate := ClusterUpdate{ServiceName: testEDSName}
|
||||
v2Client.r.newCDSUpdate(map[string]ClusterUpdate{
|
||||
v2Client.r.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName: wantUpdate,
|
||||
})
|
||||
|
||||
|
@ -208,7 +142,7 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) {
|
|||
}
|
||||
|
||||
wantUpdate2 := ClusterUpdate{ServiceName: testEDSName + "2"}
|
||||
v2Client.r.newCDSUpdate(map[string]ClusterUpdate{
|
||||
v2Client.r.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName: wantUpdate2,
|
||||
})
|
||||
|
||||
|
|
|
@ -22,19 +22,14 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
// The value chosen here is based on the default value of the
|
||||
// initial_fetch_timeout field in corepb.ConfigSource proto.
|
||||
var defaultWatchExpiryTimeout = 15 * time.Second
|
||||
|
||||
const (
|
||||
ldsURL = "type.googleapis.com/envoy.api.v2.Listener"
|
||||
rdsURL = "type.googleapis.com/envoy.api.v2.RouteConfiguration"
|
||||
cdsURL = "type.googleapis.com/envoy.api.v2.Cluster"
|
||||
edsURL = "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment"
|
||||
)
|
||||
|
||||
type watchInfoState int
|
||||
|
||||
const (
|
||||
|
@ -50,8 +45,8 @@ type watchInfo struct {
|
|||
typeURL string
|
||||
target string
|
||||
|
||||
ldsCallback ldsCallbackFunc
|
||||
rdsCallback rdsCallbackFunc
|
||||
ldsCallback func(ListenerUpdate, error)
|
||||
rdsCallback func(RouteConfigUpdate, error)
|
||||
cdsCallback func(ClusterUpdate, error)
|
||||
edsCallback func(EndpointsUpdate, error)
|
||||
|
||||
|
@ -102,13 +97,13 @@ func (wi *watchInfo) sendErrorLocked(err error) {
|
|||
u interface{}
|
||||
)
|
||||
switch wi.typeURL {
|
||||
case ldsURL:
|
||||
u = ldsUpdate{}
|
||||
case rdsURL:
|
||||
u = rdsUpdate{}
|
||||
case cdsURL:
|
||||
case version.V2ListenerURL:
|
||||
u = ListenerUpdate{}
|
||||
case version.V2RouteConfigURL:
|
||||
u = RouteConfigUpdate{}
|
||||
case version.V2ClusterURL:
|
||||
u = ClusterUpdate{}
|
||||
case edsURL:
|
||||
case version.V2EndpointsURL:
|
||||
u = EndpointsUpdate{}
|
||||
}
|
||||
wi.c.scheduleCallback(wi, u, err)
|
||||
|
@ -130,13 +125,13 @@ func (c *Client) watch(wi *watchInfo) (cancel func()) {
|
|||
c.logger.Debugf("new watch for type %v, resource name %v", wi.typeURL, wi.target)
|
||||
var watchers map[string]map[*watchInfo]bool
|
||||
switch wi.typeURL {
|
||||
case ldsURL:
|
||||
case version.V2ListenerURL:
|
||||
watchers = c.ldsWatchers
|
||||
case rdsURL:
|
||||
case version.V2RouteConfigURL:
|
||||
watchers = c.rdsWatchers
|
||||
case cdsURL:
|
||||
case version.V2ClusterURL:
|
||||
watchers = c.cdsWatchers
|
||||
case edsURL:
|
||||
case version.V2EndpointsURL:
|
||||
watchers = c.edsWatchers
|
||||
}
|
||||
|
||||
|
@ -151,7 +146,7 @@ func (c *Client) watch(wi *watchInfo) (cancel func()) {
|
|||
c.logger.Debugf("first watch for type %v, resource name %v, will send a new xDS request", wi.typeURL, wi.target)
|
||||
s = make(map[*watchInfo]bool)
|
||||
watchers[resourceName] = s
|
||||
c.v2c.addWatch(wi.typeURL, resourceName)
|
||||
c.apiClient.AddWatch(wi.typeURL, resourceName)
|
||||
}
|
||||
// No matter what, add the new watcher to the set, so it's callback will be
|
||||
// call for new responses.
|
||||
|
@ -159,22 +154,22 @@ func (c *Client) watch(wi *watchInfo) (cancel func()) {
|
|||
|
||||
// If the resource is in cache, call the callback with the value.
|
||||
switch wi.typeURL {
|
||||
case ldsURL:
|
||||
case version.V2ListenerURL:
|
||||
if v, ok := c.ldsCache[resourceName]; ok {
|
||||
c.logger.Debugf("LDS resource with name %v found in cache: %+v", wi.target, v)
|
||||
wi.newUpdate(v)
|
||||
}
|
||||
case rdsURL:
|
||||
case version.V2RouteConfigURL:
|
||||
if v, ok := c.rdsCache[resourceName]; ok {
|
||||
c.logger.Debugf("RDS resource with name %v found in cache: %+v", wi.target, v)
|
||||
wi.newUpdate(v)
|
||||
}
|
||||
case cdsURL:
|
||||
case version.V2ClusterURL:
|
||||
if v, ok := c.cdsCache[resourceName]; ok {
|
||||
c.logger.Debugf("CDS resource with name %v found in cache: %+v", wi.target, v)
|
||||
wi.newUpdate(v)
|
||||
}
|
||||
case edsURL:
|
||||
case version.V2EndpointsURL:
|
||||
if v, ok := c.edsCache[resourceName]; ok {
|
||||
c.logger.Debugf("EDS resource with name %v found in cache: %+v", wi.target, v)
|
||||
wi.newUpdate(v)
|
||||
|
@ -195,21 +190,207 @@ func (c *Client) watch(wi *watchInfo) (cancel func()) {
|
|||
// If this was the last watcher, also tell xdsv2Client to stop
|
||||
// watching this resource.
|
||||
delete(watchers, resourceName)
|
||||
c.v2c.removeWatch(wi.typeURL, resourceName)
|
||||
c.apiClient.RemoveWatch(wi.typeURL, resourceName)
|
||||
// Remove the resource from cache. When a watch for this
|
||||
// resource is added later, it will trigger a xDS request with
|
||||
// resource names, and client will receive new xDS responses.
|
||||
switch wi.typeURL {
|
||||
case ldsURL:
|
||||
case version.V2ListenerURL:
|
||||
delete(c.ldsCache, resourceName)
|
||||
case rdsURL:
|
||||
case version.V2RouteConfigURL:
|
||||
delete(c.rdsCache, resourceName)
|
||||
case cdsURL:
|
||||
case version.V2ClusterURL:
|
||||
delete(c.cdsCache, resourceName)
|
||||
case edsURL:
|
||||
case version.V2EndpointsURL:
|
||||
delete(c.edsCache, resourceName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// watchLDS starts a listener watcher for the service..
|
||||
//
|
||||
// Note that during race (e.g. an xDS response is received while the user is
|
||||
// calling cancel()), there's a small window where the callback can be called
|
||||
// after the watcher is canceled. The caller needs to handle this case.
|
||||
func (c *Client) watchLDS(serviceName string, cb func(ListenerUpdate, error)) (cancel func()) {
|
||||
wi := &watchInfo{
|
||||
c: c,
|
||||
typeURL: version.V2ListenerURL,
|
||||
target: serviceName,
|
||||
ldsCallback: cb,
|
||||
}
|
||||
|
||||
wi.expiryTimer = time.AfterFunc(defaultWatchExpiryTimeout, func() {
|
||||
wi.timeout()
|
||||
})
|
||||
return c.watch(wi)
|
||||
}
|
||||
|
||||
// watchRDS starts a listener watcher for the service..
|
||||
//
|
||||
// Note that during race (e.g. an xDS response is received while the user is
|
||||
// calling cancel()), there's a small window where the callback can be called
|
||||
// after the watcher is canceled. The caller needs to handle this case.
|
||||
func (c *Client) watchRDS(routeName string, cb func(RouteConfigUpdate, error)) (cancel func()) {
|
||||
wi := &watchInfo{
|
||||
c: c,
|
||||
typeURL: version.V2RouteConfigURL,
|
||||
target: routeName,
|
||||
rdsCallback: cb,
|
||||
}
|
||||
|
||||
wi.expiryTimer = time.AfterFunc(defaultWatchExpiryTimeout, func() {
|
||||
wi.timeout()
|
||||
})
|
||||
return c.watch(wi)
|
||||
}
|
||||
|
||||
// WatchService uses LDS and RDS to discover information about the provided
|
||||
// serviceName.
|
||||
//
|
||||
// WatchService can only be called once. The second call will not start a
|
||||
// watcher and the callback will get an error. It's this case because an xDS
|
||||
// client is expected to be used only by one ClientConn.
|
||||
//
|
||||
// Note that during race (e.g. an xDS response is received while the user is
|
||||
// calling cancel()), there's a small window where the callback can be called
|
||||
// after the watcher is canceled. The caller needs to handle this case.
|
||||
func (c *Client) WatchService(serviceName string, cb func(ServiceUpdate, error)) (cancel func()) {
|
||||
c.mu.Lock()
|
||||
if len(c.ldsWatchers) != 0 {
|
||||
go cb(ServiceUpdate{}, fmt.Errorf("unexpected WatchService when there's another service being watched"))
|
||||
c.mu.Unlock()
|
||||
return func() {}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
w := &serviceUpdateWatcher{c: c, serviceCb: cb}
|
||||
w.ldsCancel = c.watchLDS(serviceName, w.handleLDSResp)
|
||||
|
||||
return w.close
|
||||
}
|
||||
|
||||
// serviceUpdateWatcher handles LDS and RDS response, and calls the service
|
||||
// callback at the right time.
|
||||
type serviceUpdateWatcher struct {
|
||||
c *Client
|
||||
ldsCancel func()
|
||||
serviceCb func(ServiceUpdate, error)
|
||||
|
||||
mu sync.Mutex
|
||||
closed bool
|
||||
rdsName string
|
||||
rdsCancel func()
|
||||
}
|
||||
|
||||
func (w *serviceUpdateWatcher) handleLDSResp(update ListenerUpdate, err error) {
|
||||
w.c.logger.Infof("xds: client received LDS update: %+v, err: %v", update, err)
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
if w.closed {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
// We check the error type and do different things. For now, the only
|
||||
// type we check is ResourceNotFound, which indicates the LDS resource
|
||||
// was removed, and besides sending the error to callback, we also
|
||||
// cancel the RDS watch.
|
||||
if ErrType(err) == ErrorTypeResourceNotFound && w.rdsCancel != nil {
|
||||
w.rdsCancel()
|
||||
w.rdsName = ""
|
||||
w.rdsCancel = nil
|
||||
}
|
||||
// The other error cases still return early without canceling the
|
||||
// existing RDS watch.
|
||||
w.serviceCb(ServiceUpdate{}, err)
|
||||
return
|
||||
}
|
||||
|
||||
if w.rdsName == update.RouteConfigName {
|
||||
// If the new RouteConfigName is same as the previous, don't cancel and
|
||||
// restart the RDS watch.
|
||||
return
|
||||
}
|
||||
w.rdsName = update.RouteConfigName
|
||||
if w.rdsCancel != nil {
|
||||
w.rdsCancel()
|
||||
}
|
||||
w.rdsCancel = w.c.watchRDS(update.RouteConfigName, w.handleRDSResp)
|
||||
}
|
||||
|
||||
func (w *serviceUpdateWatcher) handleRDSResp(update RouteConfigUpdate, err error) {
|
||||
w.c.logger.Infof("xds: client received RDS update: %+v, err: %v", update, err)
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
if w.closed {
|
||||
return
|
||||
}
|
||||
if w.rdsCancel == nil {
|
||||
// This mean only the RDS watch is canceled, can happen if the LDS
|
||||
// resource is removed.
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
w.serviceCb(ServiceUpdate{}, err)
|
||||
return
|
||||
}
|
||||
w.serviceCb(ServiceUpdate(update), nil)
|
||||
}
|
||||
|
||||
func (w *serviceUpdateWatcher) close() {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.closed = true
|
||||
w.ldsCancel()
|
||||
if w.rdsCancel != nil {
|
||||
w.rdsCancel()
|
||||
w.rdsCancel = nil
|
||||
}
|
||||
}
|
||||
|
||||
// WatchCluster uses CDS to discover information about the provided
|
||||
// clusterName.
|
||||
//
|
||||
// WatchCluster can be called multiple times, with same or different
|
||||
// clusterNames. Each call will start an independent watcher for the resource.
|
||||
//
|
||||
// Note that during race (e.g. an xDS response is received while the user is
|
||||
// calling cancel()), there's a small window where the callback can be called
|
||||
// after the watcher is canceled. The caller needs to handle this case.
|
||||
func (c *Client) WatchCluster(clusterName string, cb func(ClusterUpdate, error)) (cancel func()) {
|
||||
wi := &watchInfo{
|
||||
c: c,
|
||||
typeURL: version.V2ClusterURL,
|
||||
target: clusterName,
|
||||
cdsCallback: cb,
|
||||
}
|
||||
|
||||
wi.expiryTimer = time.AfterFunc(defaultWatchExpiryTimeout, func() {
|
||||
wi.timeout()
|
||||
})
|
||||
return c.watch(wi)
|
||||
}
|
||||
|
||||
// WatchEndpoints uses EDS to discover endpoints in the provided clusterName.
|
||||
//
|
||||
// WatchEndpoints can be called multiple times, with same or different
|
||||
// clusterNames. Each call will start an independent watcher for the resource.
|
||||
//
|
||||
// Note that during race (e.g. an xDS response is received while the user is
|
||||
// calling cancel()), there's a small window where the callback can be called
|
||||
// after the watcher is canceled. The caller needs to handle this case.
|
||||
func (c *Client) WatchEndpoints(clusterName string, cb func(EndpointsUpdate, error)) (cancel func()) {
|
||||
wi := &watchInfo{
|
||||
c: c,
|
||||
typeURL: version.V2EndpointsURL,
|
||||
target: clusterName,
|
||||
edsCallback: cb,
|
||||
}
|
||||
|
||||
wi.expiryTimer = time.AfterFunc(defaultWatchExpiryTimeout, func() {
|
||||
wi.timeout()
|
||||
})
|
||||
return c.watch(wi)
|
||||
}
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// ClusterUpdate contains information from a received CDS response, which is of
|
||||
// interest to the registered CDS watcher.
|
||||
type ClusterUpdate struct {
|
||||
// ServiceName is the service name corresponding to the clusterName which
|
||||
// is being watched for through CDS.
|
||||
ServiceName string
|
||||
// EnableLRS indicates whether or not load should be reported through LRS.
|
||||
EnableLRS bool
|
||||
}
|
||||
|
||||
// WatchCluster uses CDS to discover information about the provided
|
||||
// clusterName.
|
||||
//
|
||||
// WatchCluster can be called multiple times, with same or different
|
||||
// clusterNames. Each call will start an independent watcher for the resource.
|
||||
//
|
||||
// Note that during race (e.g. an xDS response is received while the user is
|
||||
// calling cancel()), there's a small window where the callback can be called
|
||||
// after the watcher is canceled. The caller needs to handle this case.
|
||||
func (c *Client) WatchCluster(clusterName string, cb func(ClusterUpdate, error)) (cancel func()) {
|
||||
wi := &watchInfo{
|
||||
c: c,
|
||||
typeURL: cdsURL,
|
||||
target: clusterName,
|
||||
cdsCallback: cb,
|
||||
}
|
||||
|
||||
wi.expiryTimer = time.AfterFunc(defaultWatchExpiryTimeout, func() {
|
||||
wi.timeout()
|
||||
})
|
||||
return c.watch(wi)
|
||||
}
|
|
@ -23,6 +23,7 @@ import (
|
|||
"time"
|
||||
|
||||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
type clusterUpdateErr struct {
|
||||
|
@ -35,7 +36,7 @@ type clusterUpdateErr struct {
|
|||
// - an update for another resource name
|
||||
// - an update is received after cancel()
|
||||
func (s) TestClusterWatch(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -53,7 +54,7 @@ func (s) TestClusterWatch(t *testing.T) {
|
|||
cancelWatch := c.WatchCluster(testCDSName, func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[cdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ClusterURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
|
@ -64,7 +65,7 @@ func (s) TestClusterWatch(t *testing.T) {
|
|||
//
|
||||
// TODO: in a future cleanup, this (and the same thing in other tests) can
|
||||
// be changed call Client directly.
|
||||
v2Client.r.newCDSUpdate(map[string]ClusterUpdate{
|
||||
v2Client.r.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName: wantUpdate,
|
||||
})
|
||||
|
||||
|
@ -73,7 +74,7 @@ func (s) TestClusterWatch(t *testing.T) {
|
|||
}
|
||||
|
||||
// Another update, with an extra resource for a different resource name.
|
||||
v2Client.r.newCDSUpdate(map[string]ClusterUpdate{
|
||||
v2Client.r.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName: wantUpdate,
|
||||
"randomName": {},
|
||||
})
|
||||
|
@ -84,7 +85,7 @@ func (s) TestClusterWatch(t *testing.T) {
|
|||
|
||||
// Cancel watch, and send update again.
|
||||
cancelWatch()
|
||||
v2Client.r.newCDSUpdate(map[string]ClusterUpdate{
|
||||
v2Client.r.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName: wantUpdate,
|
||||
})
|
||||
|
||||
|
@ -96,7 +97,7 @@ func (s) TestClusterWatch(t *testing.T) {
|
|||
// TestClusterTwoWatchSameResourceName covers the case where an update is received
|
||||
// after two watch() for the same resource name.
|
||||
func (s) TestClusterTwoWatchSameResourceName(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -118,13 +119,13 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) {
|
|||
cancelLastWatch = c.WatchCluster(testCDSName, func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[cdsURL].Receive(); i == 0 && err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ClusterURL].Receive(); i == 0 && err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
wantUpdate := ClusterUpdate{ServiceName: testEDSName}
|
||||
v2Client.r.newCDSUpdate(map[string]ClusterUpdate{
|
||||
v2Client.r.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName: wantUpdate,
|
||||
})
|
||||
|
||||
|
@ -136,7 +137,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) {
|
|||
|
||||
// Cancel the last watch, and send update again.
|
||||
cancelLastWatch()
|
||||
v2Client.r.newCDSUpdate(map[string]ClusterUpdate{
|
||||
v2Client.r.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName: wantUpdate,
|
||||
})
|
||||
|
||||
|
@ -154,7 +155,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) {
|
|||
// TestClusterThreeWatchDifferentResourceName covers the case where an update is
|
||||
// received after three watch() for different resource names.
|
||||
func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -175,7 +176,7 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) {
|
|||
c.WatchCluster(testCDSName+"1", func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[cdsURL].Receive(); i == 0 && err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ClusterURL].Receive(); i == 0 && err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -185,13 +186,13 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) {
|
|||
c.WatchCluster(testCDSName+"2", func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh2.Send(clusterUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[cdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ClusterURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate1 := ClusterUpdate{ServiceName: testEDSName + "1"}
|
||||
wantUpdate2 := ClusterUpdate{ServiceName: testEDSName + "2"}
|
||||
v2Client.r.newCDSUpdate(map[string]ClusterUpdate{
|
||||
v2Client.r.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName + "1": wantUpdate1,
|
||||
testCDSName + "2": wantUpdate2,
|
||||
})
|
||||
|
@ -210,7 +211,7 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) {
|
|||
// TestClusterWatchAfterCache covers the case where watch is called after the update
|
||||
// is in cache.
|
||||
func (s) TestClusterWatchAfterCache(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -225,12 +226,12 @@ func (s) TestClusterWatchAfterCache(t *testing.T) {
|
|||
c.WatchCluster(testCDSName, func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[cdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ClusterURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate := ClusterUpdate{ServiceName: testEDSName}
|
||||
v2Client.r.newCDSUpdate(map[string]ClusterUpdate{
|
||||
v2Client.r.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName: wantUpdate,
|
||||
})
|
||||
|
||||
|
@ -243,7 +244,7 @@ func (s) TestClusterWatchAfterCache(t *testing.T) {
|
|||
c.WatchCluster(testCDSName, func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh2.Send(clusterUpdateErr{u: update, err: err})
|
||||
})
|
||||
if n, err := v2Client.addWatches[cdsURL].Receive(); err == nil {
|
||||
if n, err := v2Client.addWatches[version.V2ClusterURL].Receive(); err == nil {
|
||||
t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err)
|
||||
}
|
||||
|
||||
|
@ -268,7 +269,7 @@ func (s) TestClusterWatchExpiryTimer(t *testing.T) {
|
|||
defaultWatchExpiryTimeout = oldWatchExpiryTimeout
|
||||
}()
|
||||
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -283,7 +284,7 @@ func (s) TestClusterWatchExpiryTimer(t *testing.T) {
|
|||
c.WatchCluster(testCDSName, func(u ClusterUpdate, err error) {
|
||||
clusterUpdateCh.Send(clusterUpdateErr{u: u, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[cdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ClusterURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
|
@ -310,7 +311,7 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) {
|
|||
defaultWatchExpiryTimeout = oldWatchExpiryTimeout
|
||||
}()
|
||||
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -325,12 +326,12 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) {
|
|||
c.WatchCluster(testCDSName, func(u ClusterUpdate, err error) {
|
||||
clusterUpdateCh.Send(clusterUpdateErr{u: u, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[cdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ClusterURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate := ClusterUpdate{ServiceName: testEDSName}
|
||||
v2Client.r.newCDSUpdate(map[string]ClusterUpdate{
|
||||
v2Client.r.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName: wantUpdate,
|
||||
})
|
||||
|
||||
|
@ -352,7 +353,7 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) {
|
|||
// - one more update without the removed resource
|
||||
// - the callback (above) shouldn't receive any update
|
||||
func (s) TestClusterResourceRemoved(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -367,7 +368,7 @@ func (s) TestClusterResourceRemoved(t *testing.T) {
|
|||
c.WatchCluster(testCDSName+"1", func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh1.Send(clusterUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[cdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ClusterURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
// Another watch for a different name.
|
||||
|
@ -375,13 +376,13 @@ func (s) TestClusterResourceRemoved(t *testing.T) {
|
|||
c.WatchCluster(testCDSName+"2", func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh2.Send(clusterUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[cdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ClusterURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate1 := ClusterUpdate{ServiceName: testEDSName + "1"}
|
||||
wantUpdate2 := ClusterUpdate{ServiceName: testEDSName + "2"}
|
||||
v2Client.r.newCDSUpdate(map[string]ClusterUpdate{
|
||||
v2Client.r.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName + "1": wantUpdate1,
|
||||
testCDSName + "2": wantUpdate2,
|
||||
})
|
||||
|
@ -395,7 +396,7 @@ func (s) TestClusterResourceRemoved(t *testing.T) {
|
|||
}
|
||||
|
||||
// Send another update to remove resource 1.
|
||||
v2Client.r.newCDSUpdate(map[string]ClusterUpdate{
|
||||
v2Client.r.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName + "2": wantUpdate2,
|
||||
})
|
||||
|
||||
|
@ -410,7 +411,7 @@ func (s) TestClusterResourceRemoved(t *testing.T) {
|
|||
}
|
||||
|
||||
// Send one more update without resource 1.
|
||||
v2Client.r.newCDSUpdate(map[string]ClusterUpdate{
|
||||
v2Client.r.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName + "2": wantUpdate2,
|
||||
})
|
||||
|
||||
|
|
|
@ -1,93 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/xds/internal"
|
||||
)
|
||||
|
||||
// OverloadDropConfig contains the config to drop overloads.
|
||||
type OverloadDropConfig struct {
|
||||
Category string
|
||||
Numerator uint32
|
||||
Denominator uint32
|
||||
}
|
||||
|
||||
// EndpointHealthStatus represents the health status of an endpoint.
|
||||
type EndpointHealthStatus int32
|
||||
|
||||
const (
|
||||
// EndpointHealthStatusUnknown represents HealthStatus UNKNOWN.
|
||||
EndpointHealthStatusUnknown EndpointHealthStatus = iota
|
||||
// EndpointHealthStatusHealthy represents HealthStatus HEALTHY.
|
||||
EndpointHealthStatusHealthy
|
||||
// EndpointHealthStatusUnhealthy represents HealthStatus UNHEALTHY.
|
||||
EndpointHealthStatusUnhealthy
|
||||
// EndpointHealthStatusDraining represents HealthStatus DRAINING.
|
||||
EndpointHealthStatusDraining
|
||||
// EndpointHealthStatusTimeout represents HealthStatus TIMEOUT.
|
||||
EndpointHealthStatusTimeout
|
||||
// EndpointHealthStatusDegraded represents HealthStatus DEGRADED.
|
||||
EndpointHealthStatusDegraded
|
||||
)
|
||||
|
||||
// Endpoint contains information of an endpoint.
|
||||
type Endpoint struct {
|
||||
Address string
|
||||
HealthStatus EndpointHealthStatus
|
||||
Weight uint32
|
||||
}
|
||||
|
||||
// Locality contains information of a locality.
|
||||
type Locality struct {
|
||||
Endpoints []Endpoint
|
||||
ID internal.LocalityID
|
||||
Priority uint32
|
||||
Weight uint32
|
||||
}
|
||||
|
||||
// EndpointsUpdate contains an EDS update.
|
||||
type EndpointsUpdate struct {
|
||||
Drops []OverloadDropConfig
|
||||
Localities []Locality
|
||||
}
|
||||
|
||||
// WatchEndpoints uses EDS to discover endpoints in the provided clusterName.
|
||||
//
|
||||
// WatchEndpoints can be called multiple times, with same or different
|
||||
// clusterNames. Each call will start an independent watcher for the resource.
|
||||
//
|
||||
// Note that during race (e.g. an xDS response is received while the user is
|
||||
// calling cancel()), there's a small window where the callback can be called
|
||||
// after the watcher is canceled. The caller needs to handle this case.
|
||||
func (c *Client) WatchEndpoints(clusterName string, cb func(EndpointsUpdate, error)) (cancel func()) {
|
||||
wi := &watchInfo{
|
||||
c: c,
|
||||
typeURL: edsURL,
|
||||
target: clusterName,
|
||||
edsCallback: cb,
|
||||
}
|
||||
|
||||
wi.expiryTimer = time.AfterFunc(defaultWatchExpiryTimeout, func() {
|
||||
wi.timeout()
|
||||
})
|
||||
return c.watch(wi)
|
||||
}
|
|
@ -24,9 +24,9 @@ import (
|
|||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
|
||||
"google.golang.org/grpc/xds/internal"
|
||||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -57,7 +57,7 @@ type endpointsUpdateErr struct {
|
|||
// - an update for another resource name (which doesn't trigger callback)
|
||||
// - an update is received after cancel()
|
||||
func (s) TestEndpointsWatch(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -72,12 +72,12 @@ func (s) TestEndpointsWatch(t *testing.T) {
|
|||
cancelWatch := c.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) {
|
||||
endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[edsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2EndpointsURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}}
|
||||
v2Client.r.newEDSUpdate(map[string]EndpointsUpdate{
|
||||
v2Client.r.NewEndpoints(map[string]EndpointsUpdate{
|
||||
testCDSName: wantUpdate,
|
||||
})
|
||||
|
||||
|
@ -86,7 +86,7 @@ func (s) TestEndpointsWatch(t *testing.T) {
|
|||
}
|
||||
|
||||
// Another update for a different resource name.
|
||||
v2Client.r.newEDSUpdate(map[string]EndpointsUpdate{
|
||||
v2Client.r.NewEndpoints(map[string]EndpointsUpdate{
|
||||
"randomName": {},
|
||||
})
|
||||
|
||||
|
@ -96,7 +96,7 @@ func (s) TestEndpointsWatch(t *testing.T) {
|
|||
|
||||
// Cancel watch, and send update again.
|
||||
cancelWatch()
|
||||
v2Client.r.newEDSUpdate(map[string]EndpointsUpdate{
|
||||
v2Client.r.NewEndpoints(map[string]EndpointsUpdate{
|
||||
testCDSName: wantUpdate,
|
||||
})
|
||||
|
||||
|
@ -108,7 +108,7 @@ func (s) TestEndpointsWatch(t *testing.T) {
|
|||
// TestEndpointsTwoWatchSameResourceName covers the case where an update is received
|
||||
// after two watch() for the same resource name.
|
||||
func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -130,13 +130,13 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) {
|
|||
cancelLastWatch = c.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) {
|
||||
endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[edsURL].Receive(); i == 0 && err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2EndpointsURL].Receive(); i == 0 && err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}}
|
||||
v2Client.r.newEDSUpdate(map[string]EndpointsUpdate{
|
||||
v2Client.r.NewEndpoints(map[string]EndpointsUpdate{
|
||||
testCDSName: wantUpdate,
|
||||
})
|
||||
|
||||
|
@ -148,7 +148,7 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) {
|
|||
|
||||
// Cancel the last watch, and send update again.
|
||||
cancelLastWatch()
|
||||
v2Client.r.newEDSUpdate(map[string]EndpointsUpdate{
|
||||
v2Client.r.NewEndpoints(map[string]EndpointsUpdate{
|
||||
testCDSName: wantUpdate,
|
||||
})
|
||||
|
||||
|
@ -166,7 +166,7 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) {
|
|||
// TestEndpointsThreeWatchDifferentResourceName covers the case where an update is
|
||||
// received after three watch() for different resource names.
|
||||
func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -187,7 +187,7 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) {
|
|||
c.WatchEndpoints(testCDSName+"1", func(update EndpointsUpdate, err error) {
|
||||
endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[edsURL].Receive(); i == 0 && err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2EndpointsURL].Receive(); i == 0 && err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -197,13 +197,13 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) {
|
|||
c.WatchEndpoints(testCDSName+"2", func(update EndpointsUpdate, err error) {
|
||||
endpointsUpdateCh2.Send(endpointsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[edsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2EndpointsURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate1 := EndpointsUpdate{Localities: []Locality{testLocalities[0]}}
|
||||
wantUpdate2 := EndpointsUpdate{Localities: []Locality{testLocalities[1]}}
|
||||
v2Client.r.newEDSUpdate(map[string]EndpointsUpdate{
|
||||
v2Client.r.NewEndpoints(map[string]EndpointsUpdate{
|
||||
testCDSName + "1": wantUpdate1,
|
||||
testCDSName + "2": wantUpdate2,
|
||||
})
|
||||
|
@ -222,7 +222,7 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) {
|
|||
// TestEndpointsWatchAfterCache covers the case where watch is called after the update
|
||||
// is in cache.
|
||||
func (s) TestEndpointsWatchAfterCache(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -237,12 +237,12 @@ func (s) TestEndpointsWatchAfterCache(t *testing.T) {
|
|||
c.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) {
|
||||
endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[edsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2EndpointsURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}}
|
||||
v2Client.r.newEDSUpdate(map[string]EndpointsUpdate{
|
||||
v2Client.r.NewEndpoints(map[string]EndpointsUpdate{
|
||||
testCDSName: wantUpdate,
|
||||
})
|
||||
|
||||
|
@ -255,7 +255,7 @@ func (s) TestEndpointsWatchAfterCache(t *testing.T) {
|
|||
c.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) {
|
||||
endpointsUpdateCh2.Send(endpointsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if n, err := v2Client.addWatches[edsURL].Receive(); err == nil {
|
||||
if n, err := v2Client.addWatches[version.V2EndpointsURL].Receive(); err == nil {
|
||||
t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err)
|
||||
}
|
||||
|
||||
|
@ -280,7 +280,7 @@ func (s) TestEndpointsWatchExpiryTimer(t *testing.T) {
|
|||
defaultWatchExpiryTimeout = oldWatchExpiryTimeout
|
||||
}()
|
||||
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -295,7 +295,7 @@ func (s) TestEndpointsWatchExpiryTimer(t *testing.T) {
|
|||
c.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) {
|
||||
endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[edsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2EndpointsURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,47 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type ldsUpdate struct {
|
||||
routeName string
|
||||
}
|
||||
type ldsCallbackFunc func(ldsUpdate, error)
|
||||
|
||||
// watchLDS starts a listener watcher for the service..
|
||||
//
|
||||
// Note that during race (e.g. an xDS response is received while the user is
|
||||
// calling cancel()), there's a small window where the callback can be called
|
||||
// after the watcher is canceled. The caller needs to handle this case.
|
||||
func (c *Client) watchLDS(serviceName string, cb ldsCallbackFunc) (cancel func()) {
|
||||
wi := &watchInfo{
|
||||
c: c,
|
||||
typeURL: ldsURL,
|
||||
target: serviceName,
|
||||
ldsCallback: cb,
|
||||
}
|
||||
|
||||
wi.expiryTimer = time.AfterFunc(defaultWatchExpiryTimeout, func() {
|
||||
wi.timeout()
|
||||
})
|
||||
return c.watch(wi)
|
||||
}
|
|
@ -22,10 +22,11 @@ import (
|
|||
"testing"
|
||||
|
||||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
type ldsUpdateErr struct {
|
||||
u ldsUpdate
|
||||
u ListenerUpdate
|
||||
err error
|
||||
}
|
||||
|
||||
|
@ -34,7 +35,7 @@ type ldsUpdateErr struct {
|
|||
// - an update for another resource name
|
||||
// - an update is received after cancel()
|
||||
func (s) TestLDSWatch(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -46,47 +47,47 @@ func (s) TestLDSWatch(t *testing.T) {
|
|||
v2Client := <-v2ClientCh
|
||||
|
||||
ldsUpdateCh := testutils.NewChannel()
|
||||
cancelWatch := c.watchLDS(testLDSName, func(update ldsUpdate, err error) {
|
||||
cancelWatch := c.watchLDS(testLDSName, func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[ldsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ListenerURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate := ldsUpdate{routeName: testRDSName}
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
wantUpdate := ListenerUpdate{RouteConfigName: testRDSName}
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: wantUpdate,
|
||||
})
|
||||
|
||||
if u, err := ldsUpdateCh.Receive(); err != nil || u != (ldsUpdateErr{wantUpdate, nil}) {
|
||||
t.Errorf("unexpected ldsUpdate: %v, error receiving from channel: %v", u, err)
|
||||
t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v", u, err)
|
||||
}
|
||||
|
||||
// Another update, with an extra resource for a different resource name.
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: wantUpdate,
|
||||
"randomName": {},
|
||||
})
|
||||
|
||||
if u, err := ldsUpdateCh.Receive(); err != nil || u != (ldsUpdateErr{wantUpdate, nil}) {
|
||||
t.Errorf("unexpected ldsUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
t.Errorf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
}
|
||||
|
||||
// Cancel watch, and send update again.
|
||||
cancelWatch()
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: wantUpdate,
|
||||
})
|
||||
|
||||
if u, err := ldsUpdateCh.TimedReceive(chanRecvTimeout); err != testutils.ErrRecvTimeout {
|
||||
t.Errorf("unexpected ldsUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
t.Errorf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLDSTwoWatchSameResourceName covers the case where an update is received
|
||||
// after two watch() for the same resource name.
|
||||
func (s) TestLDSTwoWatchSameResourceName(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -105,46 +106,46 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) {
|
|||
for i := 0; i < count; i++ {
|
||||
ldsUpdateCh := testutils.NewChannel()
|
||||
ldsUpdateChs = append(ldsUpdateChs, ldsUpdateCh)
|
||||
cancelLastWatch = c.watchLDS(testLDSName, func(update ldsUpdate, err error) {
|
||||
cancelLastWatch = c.watchLDS(testLDSName, func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[ldsURL].Receive(); i == 0 && err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ListenerURL].Receive(); i == 0 && err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
wantUpdate := ldsUpdate{routeName: testRDSName}
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
wantUpdate := ListenerUpdate{RouteConfigName: testRDSName}
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: wantUpdate,
|
||||
})
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
if u, err := ldsUpdateChs[i].Receive(); err != nil || u != (ldsUpdateErr{wantUpdate, nil}) {
|
||||
t.Errorf("i=%v, unexpected ldsUpdate: %v, error receiving from channel: %v", i, u, err)
|
||||
t.Errorf("i=%v, unexpected ListenerUpdate: %v, error receiving from channel: %v", i, u, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel the last watch, and send update again.
|
||||
cancelLastWatch()
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: wantUpdate,
|
||||
})
|
||||
|
||||
for i := 0; i < count-1; i++ {
|
||||
if u, err := ldsUpdateChs[i].Receive(); err != nil || u != (ldsUpdateErr{wantUpdate, nil}) {
|
||||
t.Errorf("i=%v, unexpected ldsUpdate: %v, error receiving from channel: %v", i, u, err)
|
||||
t.Errorf("i=%v, unexpected ListenerUpdate: %v, error receiving from channel: %v", i, u, err)
|
||||
}
|
||||
}
|
||||
|
||||
if u, err := ldsUpdateChs[count-1].TimedReceive(chanRecvTimeout); err != testutils.ErrRecvTimeout {
|
||||
t.Errorf("unexpected ldsUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
t.Errorf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLDSThreeWatchDifferentResourceName covers the case where an update is
|
||||
// received after three watch() for different resource names.
|
||||
func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -162,45 +163,45 @@ func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) {
|
|||
for i := 0; i < count; i++ {
|
||||
ldsUpdateCh := testutils.NewChannel()
|
||||
ldsUpdateChs = append(ldsUpdateChs, ldsUpdateCh)
|
||||
c.watchLDS(testLDSName+"1", func(update ldsUpdate, err error) {
|
||||
c.watchLDS(testLDSName+"1", func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[ldsURL].Receive(); i == 0 && err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ListenerURL].Receive(); i == 0 && err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Third watch for a different name.
|
||||
ldsUpdateCh2 := testutils.NewChannel()
|
||||
c.watchLDS(testLDSName+"2", func(update ldsUpdate, err error) {
|
||||
c.watchLDS(testLDSName+"2", func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh2.Send(ldsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[ldsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ListenerURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate1 := ldsUpdate{routeName: testRDSName + "1"}
|
||||
wantUpdate2 := ldsUpdate{routeName: testRDSName + "2"}
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
wantUpdate1 := ListenerUpdate{RouteConfigName: testRDSName + "1"}
|
||||
wantUpdate2 := ListenerUpdate{RouteConfigName: testRDSName + "2"}
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName + "1": wantUpdate1,
|
||||
testLDSName + "2": wantUpdate2,
|
||||
})
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
if u, err := ldsUpdateChs[i].Receive(); err != nil || u != (ldsUpdateErr{wantUpdate1, nil}) {
|
||||
t.Errorf("i=%v, unexpected ldsUpdate: %v, error receiving from channel: %v", i, u, err)
|
||||
t.Errorf("i=%v, unexpected ListenerUpdate: %v, error receiving from channel: %v", i, u, err)
|
||||
}
|
||||
}
|
||||
|
||||
if u, err := ldsUpdateCh2.Receive(); err != nil || u != (ldsUpdateErr{wantUpdate2, nil}) {
|
||||
t.Errorf("unexpected ldsUpdate: %v, error receiving from channel: %v", u, err)
|
||||
t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v", u, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLDSWatchAfterCache covers the case where watch is called after the update
|
||||
// is in cache.
|
||||
func (s) TestLDSWatchAfterCache(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -212,39 +213,39 @@ func (s) TestLDSWatchAfterCache(t *testing.T) {
|
|||
v2Client := <-v2ClientCh
|
||||
|
||||
ldsUpdateCh := testutils.NewChannel()
|
||||
c.watchLDS(testLDSName, func(update ldsUpdate, err error) {
|
||||
c.watchLDS(testLDSName, func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[ldsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ListenerURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate := ldsUpdate{routeName: testRDSName}
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
wantUpdate := ListenerUpdate{RouteConfigName: testRDSName}
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: wantUpdate,
|
||||
})
|
||||
|
||||
if u, err := ldsUpdateCh.Receive(); err != nil || u != (ldsUpdateErr{wantUpdate, nil}) {
|
||||
t.Errorf("unexpected ldsUpdate: %v, error receiving from channel: %v", u, err)
|
||||
t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v", u, err)
|
||||
}
|
||||
|
||||
// Another watch for the resource in cache.
|
||||
ldsUpdateCh2 := testutils.NewChannel()
|
||||
c.watchLDS(testLDSName, func(update ldsUpdate, err error) {
|
||||
c.watchLDS(testLDSName, func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh2.Send(ldsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if n, err := v2Client.addWatches[ldsURL].Receive(); err == nil {
|
||||
if n, err := v2Client.addWatches[version.V2ListenerURL].Receive(); err == nil {
|
||||
t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err)
|
||||
}
|
||||
|
||||
// New watch should receives the update.
|
||||
if u, err := ldsUpdateCh2.Receive(); err != nil || u != (ldsUpdateErr{wantUpdate, nil}) {
|
||||
t.Errorf("unexpected ldsUpdate: %v, error receiving from channel: %v", u, err)
|
||||
t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v", u, err)
|
||||
}
|
||||
|
||||
// Old watch should see nothing.
|
||||
if u, err := ldsUpdateCh.TimedReceive(chanRecvTimeout); err != testutils.ErrRecvTimeout {
|
||||
t.Errorf("unexpected ldsUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
t.Errorf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -255,7 +256,7 @@ func (s) TestLDSWatchAfterCache(t *testing.T) {
|
|||
// - one more update without the removed resource
|
||||
// - the callback (above) shouldn't receive any update
|
||||
func (s) TestLDSResourceRemoved(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -267,63 +268,63 @@ func (s) TestLDSResourceRemoved(t *testing.T) {
|
|||
v2Client := <-v2ClientCh
|
||||
|
||||
ldsUpdateCh1 := testutils.NewChannel()
|
||||
c.watchLDS(testLDSName+"1", func(update ldsUpdate, err error) {
|
||||
c.watchLDS(testLDSName+"1", func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh1.Send(ldsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[ldsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ListenerURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
// Another watch for a different name.
|
||||
ldsUpdateCh2 := testutils.NewChannel()
|
||||
c.watchLDS(testLDSName+"2", func(update ldsUpdate, err error) {
|
||||
c.watchLDS(testLDSName+"2", func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh2.Send(ldsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[ldsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ListenerURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate1 := ldsUpdate{routeName: testEDSName + "1"}
|
||||
wantUpdate2 := ldsUpdate{routeName: testEDSName + "2"}
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
wantUpdate1 := ListenerUpdate{RouteConfigName: testEDSName + "1"}
|
||||
wantUpdate2 := ListenerUpdate{RouteConfigName: testEDSName + "2"}
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName + "1": wantUpdate1,
|
||||
testLDSName + "2": wantUpdate2,
|
||||
})
|
||||
|
||||
if u, err := ldsUpdateCh1.Receive(); err != nil || u != (ldsUpdateErr{wantUpdate1, nil}) {
|
||||
t.Errorf("unexpected ldsUpdate: %v, error receiving from channel: %v", u, err)
|
||||
t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v", u, err)
|
||||
}
|
||||
|
||||
if u, err := ldsUpdateCh2.Receive(); err != nil || u != (ldsUpdateErr{wantUpdate2, nil}) {
|
||||
t.Errorf("unexpected ldsUpdate: %v, error receiving from channel: %v", u, err)
|
||||
t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v", u, err)
|
||||
}
|
||||
|
||||
// Send another update to remove resource 1.
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName + "2": wantUpdate2,
|
||||
})
|
||||
|
||||
// watcher 1 should get an error.
|
||||
if u, err := ldsUpdateCh1.Receive(); err != nil || ErrType(u.(ldsUpdateErr).err) != ErrorTypeResourceNotFound {
|
||||
t.Errorf("unexpected ldsUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err)
|
||||
t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err)
|
||||
}
|
||||
|
||||
// watcher 2 should get the same update again.
|
||||
if u, err := ldsUpdateCh2.Receive(); err != nil || u != (ldsUpdateErr{wantUpdate2, nil}) {
|
||||
t.Errorf("unexpected ldsUpdate: %v, error receiving from channel: %v", u, err)
|
||||
t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v", u, err)
|
||||
}
|
||||
|
||||
// Send one more update without resource 1.
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName + "2": wantUpdate2,
|
||||
})
|
||||
|
||||
// watcher 1 should get an error.
|
||||
if u, err := ldsUpdateCh1.Receive(); err != testutils.ErrRecvTimeout {
|
||||
t.Errorf("unexpected ldsUpdate: %v, want receiving from channel timeout", u)
|
||||
t.Errorf("unexpected ListenerUpdate: %v, want receiving from channel timeout", u)
|
||||
}
|
||||
|
||||
// watcher 2 should get the same update again.
|
||||
if u, err := ldsUpdateCh2.Receive(); err != nil || u != (ldsUpdateErr{wantUpdate2, nil}) {
|
||||
t.Errorf("unexpected ldsUpdate: %v, error receiving from channel: %v", u, err)
|
||||
t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v", u, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,73 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Int64Range is a range for header range match.
|
||||
type Int64Range struct {
|
||||
Start int64 `json:"start"`
|
||||
End int64 `json:"end"`
|
||||
}
|
||||
|
||||
// HeaderMatcher represents header matchers.
|
||||
type HeaderMatcher struct {
|
||||
Name string `json:"name"`
|
||||
InvertMatch *bool `json:"invertMatch,omitempty"`
|
||||
ExactMatch *string `json:"exactMatch,omitempty"`
|
||||
RegexMatch *string `json:"regexMatch,omitempty"`
|
||||
PrefixMatch *string `json:"prefixMatch,omitempty"`
|
||||
SuffixMatch *string `json:"suffixMatch,omitempty"`
|
||||
RangeMatch *Int64Range `json:"rangeMatch,omitempty"`
|
||||
PresentMatch *bool `json:"presentMatch,omitempty"`
|
||||
}
|
||||
|
||||
// Route represents route with matchers and action.
|
||||
type Route struct {
|
||||
Path, Prefix, Regex *string
|
||||
Headers []*HeaderMatcher
|
||||
Fraction *uint32
|
||||
Action map[string]uint32 // action is weighted clusters.
|
||||
}
|
||||
|
||||
type rdsUpdate struct {
|
||||
routes []*Route
|
||||
}
|
||||
type rdsCallbackFunc func(rdsUpdate, error)
|
||||
|
||||
// watchRDS starts a listener watcher for the service..
|
||||
//
|
||||
// Note that during race (e.g. an xDS response is received while the user is
|
||||
// calling cancel()), there's a small window where the callback can be called
|
||||
// after the watcher is canceled. The caller needs to handle this case.
|
||||
func (c *Client) watchRDS(routeName string, cb rdsCallbackFunc) (cancel func()) {
|
||||
wi := &watchInfo{
|
||||
c: c,
|
||||
typeURL: rdsURL,
|
||||
target: routeName,
|
||||
rdsCallback: cb,
|
||||
}
|
||||
|
||||
wi.expiryTimer = time.AfterFunc(defaultWatchExpiryTimeout, func() {
|
||||
wi.timeout()
|
||||
})
|
||||
return c.watch(wi)
|
||||
}
|
|
@ -23,10 +23,11 @@ import (
|
|||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
type rdsUpdateErr struct {
|
||||
u rdsUpdate
|
||||
u RouteConfigUpdate
|
||||
err error
|
||||
}
|
||||
|
||||
|
@ -35,7 +36,7 @@ type rdsUpdateErr struct {
|
|||
// - an update for another resource name (which doesn't trigger callback)
|
||||
// - an update is received after cancel()
|
||||
func (s) TestRDSWatch(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -47,46 +48,46 @@ func (s) TestRDSWatch(t *testing.T) {
|
|||
v2Client := <-v2ClientCh
|
||||
|
||||
rdsUpdateCh := testutils.NewChannel()
|
||||
cancelWatch := c.watchRDS(testRDSName, func(update rdsUpdate, err error) {
|
||||
cancelWatch := c.watchRDS(testRDSName, func(update RouteConfigUpdate, err error) {
|
||||
rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[rdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2RouteConfigURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate := rdsUpdate{routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}}
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
wantUpdate := RouteConfigUpdate{Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}}
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName: wantUpdate,
|
||||
})
|
||||
|
||||
if u, err := rdsUpdateCh.Receive(); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate, nil}, cmp.AllowUnexported(rdsUpdate{}, rdsUpdateErr{})) {
|
||||
t.Errorf("unexpected rdsUpdate: %v, error receiving from channel: %v", u, err)
|
||||
if u, err := rdsUpdateCh.Receive(); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate, nil}, cmp.AllowUnexported(rdsUpdateErr{})) {
|
||||
t.Errorf("unexpected RouteConfigUpdate: %v, error receiving from channel: %v", u, err)
|
||||
}
|
||||
|
||||
// Another update for a different resource name.
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
"randomName": {},
|
||||
})
|
||||
|
||||
if u, err := rdsUpdateCh.TimedReceive(chanRecvTimeout); err != testutils.ErrRecvTimeout {
|
||||
t.Errorf("unexpected rdsUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
}
|
||||
|
||||
// Cancel watch, and send update again.
|
||||
cancelWatch()
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName: wantUpdate,
|
||||
})
|
||||
|
||||
if u, err := rdsUpdateCh.TimedReceive(chanRecvTimeout); err != testutils.ErrRecvTimeout {
|
||||
t.Errorf("unexpected rdsUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRDSTwoWatchSameResourceName covers the case where an update is received
|
||||
// after two watch() for the same resource name.
|
||||
func (s) TestRDSTwoWatchSameResourceName(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -105,46 +106,46 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) {
|
|||
for i := 0; i < count; i++ {
|
||||
rdsUpdateCh := testutils.NewChannel()
|
||||
rdsUpdateChs = append(rdsUpdateChs, rdsUpdateCh)
|
||||
cancelLastWatch = c.watchRDS(testRDSName, func(update rdsUpdate, err error) {
|
||||
cancelLastWatch = c.watchRDS(testRDSName, func(update RouteConfigUpdate, err error) {
|
||||
rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[rdsURL].Receive(); i == 0 && err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2RouteConfigURL].Receive(); i == 0 && err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
wantUpdate := rdsUpdate{routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}}
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
wantUpdate := RouteConfigUpdate{Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}}
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName: wantUpdate,
|
||||
})
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
if u, err := rdsUpdateChs[i].Receive(); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate, nil}, cmp.AllowUnexported(rdsUpdate{}, rdsUpdateErr{})) {
|
||||
t.Errorf("i=%v, unexpected rdsUpdate: %v, error receiving from channel: %v", i, u, err)
|
||||
if u, err := rdsUpdateChs[i].Receive(); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate, nil}, cmp.AllowUnexported(rdsUpdateErr{})) {
|
||||
t.Errorf("i=%v, unexpected RouteConfigUpdate: %v, error receiving from channel: %v", i, u, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel the last watch, and send update again.
|
||||
cancelLastWatch()
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName: wantUpdate,
|
||||
})
|
||||
|
||||
for i := 0; i < count-1; i++ {
|
||||
if u, err := rdsUpdateChs[i].Receive(); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate, nil}, cmp.AllowUnexported(rdsUpdate{}, rdsUpdateErr{})) {
|
||||
t.Errorf("i=%v, unexpected rdsUpdate: %v, error receiving from channel: %v", i, u, err)
|
||||
if u, err := rdsUpdateChs[i].Receive(); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate, nil}, cmp.AllowUnexported(rdsUpdateErr{})) {
|
||||
t.Errorf("i=%v, unexpected RouteConfigUpdate: %v, error receiving from channel: %v", i, u, err)
|
||||
}
|
||||
}
|
||||
|
||||
if u, err := rdsUpdateChs[count-1].TimedReceive(chanRecvTimeout); err != testutils.ErrRecvTimeout {
|
||||
t.Errorf("unexpected rdsUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRDSThreeWatchDifferentResourceName covers the case where an update is
|
||||
// received after three watch() for different resource names.
|
||||
func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -162,45 +163,45 @@ func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) {
|
|||
for i := 0; i < count; i++ {
|
||||
rdsUpdateCh := testutils.NewChannel()
|
||||
rdsUpdateChs = append(rdsUpdateChs, rdsUpdateCh)
|
||||
c.watchRDS(testRDSName+"1", func(update rdsUpdate, err error) {
|
||||
c.watchRDS(testRDSName+"1", func(update RouteConfigUpdate, err error) {
|
||||
rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[rdsURL].Receive(); i == 0 && err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2RouteConfigURL].Receive(); i == 0 && err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Third watch for a different name.
|
||||
rdsUpdateCh2 := testutils.NewChannel()
|
||||
c.watchRDS(testRDSName+"2", func(update rdsUpdate, err error) {
|
||||
c.watchRDS(testRDSName+"2", func(update RouteConfigUpdate, err error) {
|
||||
rdsUpdateCh2.Send(rdsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[rdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2RouteConfigURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate1 := rdsUpdate{routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName + "1": 1}}}}
|
||||
wantUpdate2 := rdsUpdate{routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName + "2": 1}}}}
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
wantUpdate1 := RouteConfigUpdate{Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName + "1": 1}}}}
|
||||
wantUpdate2 := RouteConfigUpdate{Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName + "2": 1}}}}
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName + "1": wantUpdate1,
|
||||
testRDSName + "2": wantUpdate2,
|
||||
})
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
if u, err := rdsUpdateChs[i].Receive(); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate1, nil}, cmp.AllowUnexported(rdsUpdate{}, rdsUpdateErr{})) {
|
||||
t.Errorf("i=%v, unexpected rdsUpdate: %v, error receiving from channel: %v", i, u, err)
|
||||
if u, err := rdsUpdateChs[i].Receive(); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate1, nil}, cmp.AllowUnexported(rdsUpdateErr{})) {
|
||||
t.Errorf("i=%v, unexpected RouteConfigUpdate: %v, error receiving from channel: %v", i, u, err)
|
||||
}
|
||||
}
|
||||
|
||||
if u, err := rdsUpdateCh2.Receive(); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate2, nil}, cmp.AllowUnexported(rdsUpdate{}, rdsUpdateErr{})) {
|
||||
t.Errorf("unexpected rdsUpdate: %v, error receiving from channel: %v", u, err)
|
||||
if u, err := rdsUpdateCh2.Receive(); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate2, nil}, cmp.AllowUnexported(rdsUpdateErr{})) {
|
||||
t.Errorf("unexpected RouteConfigUpdate: %v, error receiving from channel: %v", u, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRDSWatchAfterCache covers the case where watch is called after the update
|
||||
// is in cache.
|
||||
func (s) TestRDSWatchAfterCache(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -212,38 +213,38 @@ func (s) TestRDSWatchAfterCache(t *testing.T) {
|
|||
v2Client := <-v2ClientCh
|
||||
|
||||
rdsUpdateCh := testutils.NewChannel()
|
||||
c.watchRDS(testRDSName, func(update rdsUpdate, err error) {
|
||||
c.watchRDS(testRDSName, func(update RouteConfigUpdate, err error) {
|
||||
rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[rdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2RouteConfigURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate := rdsUpdate{routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}}
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
wantUpdate := RouteConfigUpdate{Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}}
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName: wantUpdate,
|
||||
})
|
||||
|
||||
if u, err := rdsUpdateCh.Receive(); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate, nil}, cmp.AllowUnexported(rdsUpdate{}, rdsUpdateErr{})) {
|
||||
t.Errorf("unexpected rdsUpdate: %v, error receiving from channel: %v", u, err)
|
||||
if u, err := rdsUpdateCh.Receive(); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate, nil}, cmp.AllowUnexported(rdsUpdateErr{})) {
|
||||
t.Errorf("unexpected RouteConfigUpdate: %v, error receiving from channel: %v", u, err)
|
||||
}
|
||||
|
||||
// Another watch for the resource in cache.
|
||||
rdsUpdateCh2 := testutils.NewChannel()
|
||||
c.watchRDS(testRDSName, func(update rdsUpdate, err error) {
|
||||
c.watchRDS(testRDSName, func(update RouteConfigUpdate, err error) {
|
||||
rdsUpdateCh2.Send(rdsUpdateErr{u: update, err: err})
|
||||
})
|
||||
if n, err := v2Client.addWatches[rdsURL].Receive(); err == nil {
|
||||
if n, err := v2Client.addWatches[version.V2RouteConfigURL].Receive(); err == nil {
|
||||
t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err)
|
||||
}
|
||||
|
||||
// New watch should receives the update.
|
||||
if u, err := rdsUpdateCh2.Receive(); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate, nil}, cmp.AllowUnexported(rdsUpdate{}, rdsUpdateErr{})) {
|
||||
t.Errorf("unexpected rdsUpdate: %v, error receiving from channel: %v", u, err)
|
||||
if u, err := rdsUpdateCh2.Receive(); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate, nil}, cmp.AllowUnexported(rdsUpdateErr{})) {
|
||||
t.Errorf("unexpected RouteConfigUpdate: %v, error receiving from channel: %v", u, err)
|
||||
}
|
||||
|
||||
// Old watch should see nothing.
|
||||
if u, err := rdsUpdateCh.TimedReceive(chanRecvTimeout); err != testutils.ErrRecvTimeout {
|
||||
t.Errorf("unexpected rdsUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,135 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ServiceUpdate contains update about the service.
|
||||
type ServiceUpdate struct {
|
||||
// Routes contain matchers+actions to route RPCs.
|
||||
Routes []*Route
|
||||
}
|
||||
|
||||
// WatchService uses LDS and RDS to discover information about the provided
|
||||
// serviceName.
|
||||
//
|
||||
// WatchService can only be called once. The second call will not start a
|
||||
// watcher and the callback will get an error. It's this case because an xDS
|
||||
// client is expected to be used only by one ClientConn.
|
||||
//
|
||||
// Note that during race (e.g. an xDS response is received while the user is
|
||||
// calling cancel()), there's a small window where the callback can be called
|
||||
// after the watcher is canceled. The caller needs to handle this case.
|
||||
func (c *Client) WatchService(serviceName string, cb func(ServiceUpdate, error)) (cancel func()) {
|
||||
c.mu.Lock()
|
||||
if len(c.ldsWatchers) != 0 {
|
||||
go cb(ServiceUpdate{}, fmt.Errorf("unexpected WatchService when there's another service being watched"))
|
||||
c.mu.Unlock()
|
||||
return func() {}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
w := &serviceUpdateWatcher{c: c, serviceCb: cb}
|
||||
w.ldsCancel = c.watchLDS(serviceName, w.handleLDSResp)
|
||||
|
||||
return w.close
|
||||
}
|
||||
|
||||
// serviceUpdateWatcher handles LDS and RDS response, and calls the service
|
||||
// callback at the right time.
|
||||
type serviceUpdateWatcher struct {
|
||||
c *Client
|
||||
ldsCancel func()
|
||||
serviceCb func(ServiceUpdate, error)
|
||||
|
||||
mu sync.Mutex
|
||||
closed bool
|
||||
rdsName string
|
||||
rdsCancel func()
|
||||
}
|
||||
|
||||
func (w *serviceUpdateWatcher) handleLDSResp(update ldsUpdate, err error) {
|
||||
w.c.logger.Infof("xds: client received LDS update: %+v, err: %v", update, err)
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
if w.closed {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
// We check the error type and do different things. For now, the only
|
||||
// type we check is ResourceNotFound, which indicates the LDS resource
|
||||
// was removed, and besides sending the error to callback, we also
|
||||
// cancel the RDS watch.
|
||||
if ErrType(err) == ErrorTypeResourceNotFound && w.rdsCancel != nil {
|
||||
w.rdsCancel()
|
||||
w.rdsName = ""
|
||||
w.rdsCancel = nil
|
||||
}
|
||||
// The other error cases still return early without canceling the
|
||||
// existing RDS watch.
|
||||
w.serviceCb(ServiceUpdate{}, err)
|
||||
return
|
||||
}
|
||||
|
||||
if w.rdsName == update.routeName {
|
||||
// If the new routeName is same as the previous, don't cancel and
|
||||
// restart the RDS watch.
|
||||
return
|
||||
}
|
||||
w.rdsName = update.routeName
|
||||
if w.rdsCancel != nil {
|
||||
w.rdsCancel()
|
||||
}
|
||||
w.rdsCancel = w.c.watchRDS(update.routeName, w.handleRDSResp)
|
||||
}
|
||||
|
||||
func (w *serviceUpdateWatcher) handleRDSResp(update rdsUpdate, err error) {
|
||||
w.c.logger.Infof("xds: client received RDS update: %+v, err: %v", update, err)
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
if w.closed {
|
||||
return
|
||||
}
|
||||
if w.rdsCancel == nil {
|
||||
// This mean only the RDS watch is canceled, can happen if the LDS
|
||||
// resource is removed.
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
w.serviceCb(ServiceUpdate{}, err)
|
||||
return
|
||||
}
|
||||
w.serviceCb(ServiceUpdate{
|
||||
Routes: update.routes,
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (w *serviceUpdateWatcher) close() {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.closed = true
|
||||
w.ldsCancel()
|
||||
if w.rdsCancel != nil {
|
||||
w.rdsCancel()
|
||||
w.rdsCancel = nil
|
||||
}
|
||||
}
|
|
@ -19,16 +19,13 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
|
||||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal/testutils/fakeserver"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
type serviceUpdateErr struct {
|
||||
|
@ -42,7 +39,7 @@ var serviceCmpOpts = []cmp.Option{cmp.AllowUnexported(serviceUpdateErr{}), cmpop
|
|||
// - an update is received after a watch()
|
||||
// - an update with routes received
|
||||
func (s) TestServiceWatch(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -60,17 +57,17 @@ func (s) TestServiceWatch(t *testing.T) {
|
|||
|
||||
wantUpdate := ServiceUpdate{Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}}
|
||||
|
||||
if _, err := v2Client.addWatches[ldsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ListenerURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
testLDSName: {routeName: testRDSName},
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: {RouteConfigName: testRDSName},
|
||||
})
|
||||
if _, err := v2Client.addWatches[rdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2RouteConfigURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
testRDSName: {routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}},
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName: {Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}},
|
||||
})
|
||||
|
||||
if u, err := serviceUpdateCh.Receive(); err != nil || !cmp.Equal(u, serviceUpdateErr{wantUpdate, nil}, serviceCmpOpts...) {
|
||||
|
@ -83,9 +80,9 @@ func (s) TestServiceWatch(t *testing.T) {
|
|||
Action: map[string]uint32{testCDSName: 1},
|
||||
}},
|
||||
}
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName: {
|
||||
routes: []*Route{{
|
||||
Routes: []*Route{{
|
||||
Prefix: newStringP(""),
|
||||
Action: map[string]uint32{testCDSName: 1},
|
||||
}},
|
||||
|
@ -100,7 +97,7 @@ func (s) TestServiceWatch(t *testing.T) {
|
|||
// response, the second LDS response trigger an new RDS watch, and an update of
|
||||
// the old RDS watch doesn't trigger update to service callback.
|
||||
func (s) TestServiceWatchLDSUpdate(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -118,17 +115,17 @@ func (s) TestServiceWatchLDSUpdate(t *testing.T) {
|
|||
|
||||
wantUpdate := ServiceUpdate{Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}}
|
||||
|
||||
if _, err := v2Client.addWatches[ldsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ListenerURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
testLDSName: {routeName: testRDSName},
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: {RouteConfigName: testRDSName},
|
||||
})
|
||||
if _, err := v2Client.addWatches[rdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2RouteConfigURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
testRDSName: {routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}},
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName: {Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}},
|
||||
})
|
||||
|
||||
if u, err := serviceUpdateCh.Receive(); err != nil || !cmp.Equal(u, serviceUpdateErr{wantUpdate, nil}, serviceCmpOpts...) {
|
||||
|
@ -136,16 +133,16 @@ func (s) TestServiceWatchLDSUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
// Another LDS update with a different RDS_name.
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
testLDSName: {routeName: testRDSName + "2"},
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: {RouteConfigName: testRDSName + "2"},
|
||||
})
|
||||
if _, err := v2Client.addWatches[rdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2RouteConfigURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
// Another update for the old name.
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
testRDSName: {routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}},
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName: {Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}},
|
||||
})
|
||||
|
||||
if u, err := serviceUpdateCh.Receive(); err != testutils.ErrRecvTimeout {
|
||||
|
@ -154,8 +151,8 @@ func (s) TestServiceWatchLDSUpdate(t *testing.T) {
|
|||
|
||||
wantUpdate2 := ServiceUpdate{Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName + "2": 1}}}}
|
||||
// RDS update for the new name.
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
testRDSName + "2": {routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName + "2": 1}}}},
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName + "2": {Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName + "2": 1}}}},
|
||||
})
|
||||
|
||||
if u, err := serviceUpdateCh.Receive(); err != nil || !cmp.Equal(u, serviceUpdateErr{wantUpdate2, nil}, serviceCmpOpts...) {
|
||||
|
@ -167,7 +164,7 @@ func (s) TestServiceWatchLDSUpdate(t *testing.T) {
|
|||
// error (because only one is allowed). But the first watch still receives
|
||||
// updates.
|
||||
func (s) TestServiceWatchSecond(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -185,17 +182,17 @@ func (s) TestServiceWatchSecond(t *testing.T) {
|
|||
|
||||
wantUpdate := ServiceUpdate{Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}}
|
||||
|
||||
if _, err := v2Client.addWatches[ldsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ListenerURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
testLDSName: {routeName: testRDSName},
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: {RouteConfigName: testRDSName},
|
||||
})
|
||||
if _, err := v2Client.addWatches[rdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2RouteConfigURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
testRDSName: {routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}},
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName: {Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}},
|
||||
})
|
||||
|
||||
if u, err := serviceUpdateCh.Receive(); err != nil || !cmp.Equal(u, serviceUpdateErr{wantUpdate, nil}, serviceCmpOpts...) {
|
||||
|
@ -222,11 +219,11 @@ func (s) TestServiceWatchSecond(t *testing.T) {
|
|||
|
||||
// Send update again, first callback should be called, second should
|
||||
// timeout.
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
testLDSName: {routeName: testRDSName},
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: {RouteConfigName: testRDSName},
|
||||
})
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
testRDSName: {routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}},
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName: {Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}},
|
||||
})
|
||||
|
||||
if u, err := serviceUpdateCh.Receive(); err != nil || !cmp.Equal(u, serviceUpdateErr{wantUpdate, nil}, serviceCmpOpts...) {
|
||||
|
@ -242,135 +239,130 @@ func (s) TestServiceWatchSecond(t *testing.T) {
|
|||
// does not respond to the requests being sent out as part of registering a
|
||||
// service update watcher. The callback will get an error.
|
||||
func (s) TestServiceWatchWithNoResponseFromServer(t *testing.T) {
|
||||
fakeServer, cleanup, err := fakeserver.StartServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start fake xDS server: %v", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
xdsClient, err := New(clientOpts(fakeServer.Address))
|
||||
if err != nil {
|
||||
t.Fatalf("New returned error: %v", err)
|
||||
}
|
||||
defer xdsClient.Close()
|
||||
t.Log("Created an xdsClient...")
|
||||
|
||||
oldWatchExpiryTimeout := defaultWatchExpiryTimeout
|
||||
defaultWatchExpiryTimeout = 500 * time.Millisecond
|
||||
defer func() {
|
||||
defaultWatchExpiryTimeout = oldWatchExpiryTimeout
|
||||
}()
|
||||
|
||||
callbackCh := testutils.NewChannel()
|
||||
cancelWatch := xdsClient.WatchService(goodLDSTarget1, func(su ServiceUpdate, err error) {
|
||||
if su.Routes != nil {
|
||||
callbackCh.Send(fmt.Errorf("got WeightedCluster: %+v, want nil", su.Routes))
|
||||
return
|
||||
}
|
||||
if err == nil {
|
||||
callbackCh.Send(errors.New("xdsClient.WatchService returned error non-nil error"))
|
||||
return
|
||||
}
|
||||
callbackCh.Send(nil)
|
||||
})
|
||||
defer cancelWatch()
|
||||
t.Log("Registered a watcher for service updates...")
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
// Wait for one request from the client, but send no reponses.
|
||||
if _, err := fakeServer.XDSRequestChan.Receive(); err != nil {
|
||||
t.Fatalf("Timeout expired when expecting an LDS request")
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
v2Client := <-v2ClientCh
|
||||
|
||||
serviceUpdateCh := testutils.NewChannel()
|
||||
c.WatchService(testLDSName, func(update ServiceUpdate, err error) {
|
||||
serviceUpdateCh.Send(serviceUpdateErr{u: update, err: err})
|
||||
})
|
||||
if _, err := v2Client.addWatches[version.V2ListenerURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
u, err := serviceUpdateCh.TimedReceive(defaultWatchExpiryTimeout * 2)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get serviceUpdate: %v", err)
|
||||
}
|
||||
uu := u.(serviceUpdateErr)
|
||||
if !cmp.Equal(uu.u, ServiceUpdate{}) {
|
||||
t.Errorf("unexpected serviceUpdate: %v, want %v", uu.u, ServiceUpdate{})
|
||||
}
|
||||
if uu.err == nil {
|
||||
t.Errorf("unexpected serviceError: <nil>, want error watcher timeout")
|
||||
}
|
||||
waitForNilErr(t, callbackCh)
|
||||
}
|
||||
|
||||
// TestServiceWatchEmptyRDS tests the case where the underlying v2Client
|
||||
// receives an empty RDS response. The callback will get an error.
|
||||
func (s) TestServiceWatchEmptyRDS(t *testing.T) {
|
||||
fakeServer, cleanup, err := fakeserver.StartServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start fake xDS server: %v", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
xdsClient, err := New(clientOpts(fakeServer.Address))
|
||||
if err != nil {
|
||||
t.Fatalf("New returned error: %v", err)
|
||||
}
|
||||
defer xdsClient.Close()
|
||||
t.Log("Created an xdsClient...")
|
||||
|
||||
oldWatchExpiryTimeout := defaultWatchExpiryTimeout
|
||||
defaultWatchExpiryTimeout = 500 * time.Millisecond
|
||||
defer func() {
|
||||
defaultWatchExpiryTimeout = oldWatchExpiryTimeout
|
||||
}()
|
||||
|
||||
callbackCh := testutils.NewChannel()
|
||||
cancelWatch := xdsClient.WatchService(goodLDSTarget1, func(su ServiceUpdate, err error) {
|
||||
if su.Routes != nil {
|
||||
callbackCh.Send(fmt.Errorf("got WeightedCluster: %+v, want nil", su.Routes))
|
||||
return
|
||||
}
|
||||
if err == nil {
|
||||
callbackCh.Send(errors.New("xdsClient.WatchService returned error non-nil error"))
|
||||
return
|
||||
}
|
||||
callbackCh.Send(nil)
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
v2Client := <-v2ClientCh
|
||||
|
||||
serviceUpdateCh := testutils.NewChannel()
|
||||
c.WatchService(testLDSName, func(update ServiceUpdate, err error) {
|
||||
serviceUpdateCh.Send(serviceUpdateErr{u: update, err: err})
|
||||
})
|
||||
defer cancelWatch()
|
||||
t.Log("Registered a watcher for service updates...")
|
||||
|
||||
// Make the fakeServer send LDS response.
|
||||
if _, err := fakeServer.XDSRequestChan.Receive(); err != nil {
|
||||
t.Fatalf("Timeout expired when expecting an LDS request")
|
||||
if _, err := v2Client.addWatches[version.V2ListenerURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1}
|
||||
|
||||
// Make the fakeServer send an empty RDS response.
|
||||
if _, err := fakeServer.XDSRequestChan.Receive(); err != nil {
|
||||
t.Fatalf("Timeout expired when expecting an RDS request")
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: {RouteConfigName: testRDSName},
|
||||
})
|
||||
if _, err := v2Client.addWatches[version.V2RouteConfigURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{})
|
||||
u, err := serviceUpdateCh.TimedReceive(defaultWatchExpiryTimeout * 2)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get serviceUpdate: %v", err)
|
||||
}
|
||||
uu := u.(serviceUpdateErr)
|
||||
if !cmp.Equal(uu.u, ServiceUpdate{}) {
|
||||
t.Errorf("unexpected serviceUpdate: %v, want %v", uu.u, ServiceUpdate{})
|
||||
}
|
||||
if uu.err == nil {
|
||||
t.Errorf("unexpected serviceError: <nil>, want error watcher timeout")
|
||||
}
|
||||
fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: noVirtualHostsInRDSResponse}
|
||||
waitForNilErr(t, callbackCh)
|
||||
}
|
||||
|
||||
// TestServiceWatchWithClientClose tests the case where xDS responses are
|
||||
// received after the client is closed, and we make sure that the registered
|
||||
// watcher callback is not invoked.
|
||||
func (s) TestServiceWatchWithClientClose(t *testing.T) {
|
||||
fakeServer, cleanup, err := fakeserver.StartServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start fake xDS server: %v", err)
|
||||
}
|
||||
oldWatchExpiryTimeout := defaultWatchExpiryTimeout
|
||||
defaultWatchExpiryTimeout = 500 * time.Millisecond
|
||||
defer func() {
|
||||
defaultWatchExpiryTimeout = oldWatchExpiryTimeout
|
||||
}()
|
||||
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
xdsClient, err := New(clientOpts(fakeServer.Address))
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
if err != nil {
|
||||
t.Fatalf("New returned error: %v", err)
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
defer xdsClient.Close()
|
||||
t.Log("Created an xdsClient...")
|
||||
defer c.Close()
|
||||
|
||||
callbackCh := testutils.NewChannel()
|
||||
cancelWatch := xdsClient.WatchService(goodLDSTarget1, func(su ServiceUpdate, err error) {
|
||||
callbackCh.Send(errors.New("watcher callback invoked after client close"))
|
||||
v2Client := <-v2ClientCh
|
||||
|
||||
serviceUpdateCh := testutils.NewChannel()
|
||||
c.WatchService(testLDSName, func(update ServiceUpdate, err error) {
|
||||
serviceUpdateCh.Send(serviceUpdateErr{u: update, err: err})
|
||||
})
|
||||
defer cancelWatch()
|
||||
t.Log("Registered a watcher for service updates...")
|
||||
|
||||
// Make the fakeServer send LDS response.
|
||||
if _, err := fakeServer.XDSRequestChan.Receive(); err != nil {
|
||||
t.Fatalf("Timeout expired when expecting an LDS request")
|
||||
if _, err := v2Client.addWatches[version.V2ListenerURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1}
|
||||
|
||||
xdsClient.Close()
|
||||
t.Log("Closing the xdsClient...")
|
||||
|
||||
// Push an RDS response from the fakeserver
|
||||
fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodRDSResponse1}
|
||||
if cbErr, err := callbackCh.Receive(); err != testutils.ErrRecvTimeout {
|
||||
t.Fatal(cbErr)
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: {RouteConfigName: testRDSName},
|
||||
})
|
||||
if _, err := v2Client.addWatches[version.V2RouteConfigURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
// Client is closed before it receives the RDS response.
|
||||
c.Close()
|
||||
if u, err := serviceUpdateCh.TimedReceive(defaultWatchExpiryTimeout * 2); err != testutils.ErrRecvTimeout {
|
||||
t.Errorf("unexpected serviceUpdate: %v, %v, want channel recv timeout", u, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -378,7 +370,7 @@ func (s) TestServiceWatchWithClientClose(t *testing.T) {
|
|||
// update contains the same RDS name as the previous, the RDS watch isn't
|
||||
// canceled and restarted.
|
||||
func (s) TestServiceNotCancelRDSOnSameLDSUpdate(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -396,17 +388,17 @@ func (s) TestServiceNotCancelRDSOnSameLDSUpdate(t *testing.T) {
|
|||
|
||||
wantUpdate := ServiceUpdate{Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}}
|
||||
|
||||
if _, err := v2Client.addWatches[ldsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ListenerURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
testLDSName: {routeName: testRDSName},
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: {RouteConfigName: testRDSName},
|
||||
})
|
||||
if _, err := v2Client.addWatches[rdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2RouteConfigURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
testRDSName: {routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}},
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName: {Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}},
|
||||
})
|
||||
|
||||
if u, err := serviceUpdateCh.Receive(); err != nil || !cmp.Equal(u, serviceUpdateErr{wantUpdate, nil}, serviceCmpOpts...) {
|
||||
|
@ -414,10 +406,10 @@ func (s) TestServiceNotCancelRDSOnSameLDSUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
// Another LDS update with a the same RDS_name.
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
testLDSName: {routeName: testRDSName},
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: {RouteConfigName: testRDSName},
|
||||
})
|
||||
if v, err := v2Client.removeWatches[rdsURL].Receive(); err == nil {
|
||||
if v, err := v2Client.removeWatches[version.V2RouteConfigURL].Receive(); err == nil {
|
||||
t.Fatalf("unexpected rds watch cancel: %v", v)
|
||||
}
|
||||
}
|
||||
|
@ -429,7 +421,7 @@ func (s) TestServiceNotCancelRDSOnSameLDSUpdate(t *testing.T) {
|
|||
// - one more update without the removed resource
|
||||
// - the callback (above) shouldn't receive any update
|
||||
func (s) TestServiceResourceRemoved(t *testing.T) {
|
||||
v2ClientCh, cleanup := overrideNewXDSV2Client()
|
||||
v2ClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
c, err := New(clientOpts(testXDSServer))
|
||||
|
@ -447,17 +439,17 @@ func (s) TestServiceResourceRemoved(t *testing.T) {
|
|||
|
||||
wantUpdate := ServiceUpdate{Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}}
|
||||
|
||||
if _, err := v2Client.addWatches[ldsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2ListenerURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
testLDSName: {routeName: testRDSName},
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: {RouteConfigName: testRDSName},
|
||||
})
|
||||
if _, err := v2Client.addWatches[rdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2RouteConfigURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
testRDSName: {routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}},
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName: {Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName: 1}}}},
|
||||
})
|
||||
|
||||
if u, err := serviceUpdateCh.Receive(); err != nil || !cmp.Equal(u, serviceUpdateErr{wantUpdate, nil}, serviceCmpOpts...) {
|
||||
|
@ -466,8 +458,8 @@ func (s) TestServiceResourceRemoved(t *testing.T) {
|
|||
|
||||
// Remove LDS resource, should cancel the RDS watch, and trigger resource
|
||||
// removed error.
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{})
|
||||
if _, err := v2Client.removeWatches[rdsURL].Receive(); err != nil {
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{})
|
||||
if _, err := v2Client.removeWatches[version.V2RouteConfigURL].Receive(); err != nil {
|
||||
t.Fatalf("want watch to be canceled, got error %v", err)
|
||||
}
|
||||
if u, err := serviceUpdateCh.Receive(); err != nil || ErrType(u.(serviceUpdateErr).err) != ErrorTypeResourceNotFound {
|
||||
|
@ -476,8 +468,8 @@ func (s) TestServiceResourceRemoved(t *testing.T) {
|
|||
|
||||
// Send RDS update for the removed LDS resource, expect no updates to
|
||||
// callback, because RDS should be canceled.
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
testRDSName: {routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName + "new": 1}}}},
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName: {Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName + "new": 1}}}},
|
||||
})
|
||||
if u, err := serviceUpdateCh.Receive(); err != testutils.ErrRecvTimeout {
|
||||
t.Errorf("unexpected serviceUpdate: %v, want receiving from channel timeout", u)
|
||||
|
@ -486,18 +478,18 @@ func (s) TestServiceResourceRemoved(t *testing.T) {
|
|||
// Add LDS resource, but not RDS resource, should
|
||||
// - start a new RDS watch
|
||||
// - timeout on service channel, because RDS cache was cleared
|
||||
v2Client.r.newLDSUpdate(map[string]ldsUpdate{
|
||||
testLDSName: {routeName: testRDSName},
|
||||
v2Client.r.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: {RouteConfigName: testRDSName},
|
||||
})
|
||||
if _, err := v2Client.addWatches[rdsURL].Receive(); err != nil {
|
||||
if _, err := v2Client.addWatches[version.V2RouteConfigURL].Receive(); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
if u, err := serviceUpdateCh.Receive(); err != testutils.ErrRecvTimeout {
|
||||
t.Errorf("unexpected serviceUpdate: %v, want receiving from channel timeout", u)
|
||||
}
|
||||
|
||||
v2Client.r.newRDSUpdate(map[string]rdsUpdate{
|
||||
testRDSName: {routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName + "new2": 1}}}},
|
||||
v2Client.r.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName: {Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName + "new2": 1}}}},
|
||||
})
|
||||
if u, err := serviceUpdateCh.Receive(); err != nil || !cmp.Equal(u, serviceUpdateErr{ServiceUpdate{Routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{testCDSName + "new2": 1}}}}, nil}, serviceCmpOpts...) {
|
||||
t.Errorf("unexpected serviceUpdate: %v, error receiving from channel: %v", u, err)
|
||||
|
|
|
@ -0,0 +1,507 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
|
||||
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||
v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
|
||||
v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
|
||||
v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
|
||||
v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
|
||||
v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3"
|
||||
"github.com/golang/protobuf/proto"
|
||||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/xds/internal"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
// UnmarshalListener processes resources received in an LDS response, validates
|
||||
// them, and transforms them into a native struct which contains only fields we
|
||||
// are interested in.
|
||||
func UnmarshalListener(resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ListenerUpdate, error) {
|
||||
update := make(map[string]ListenerUpdate)
|
||||
for _, r := range resources {
|
||||
if t := r.GetTypeUrl(); t != version.V2ListenerURL && t != version.V3ListenerURL {
|
||||
return nil, fmt.Errorf("xds: unexpected resource type: %s in LDS response", t)
|
||||
}
|
||||
lis := &v3listenerpb.Listener{}
|
||||
if err := proto.Unmarshal(r.GetValue(), lis); err != nil {
|
||||
return nil, fmt.Errorf("xds: failed to unmarshal resource in LDS response: %v", err)
|
||||
}
|
||||
logger.Infof("Resource with name: %v, type: %T, contains: %v", lis.GetName(), lis, lis)
|
||||
routeName, err := getRouteConfigNameFromListener(lis, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
update[lis.GetName()] = ListenerUpdate{RouteConfigName: routeName}
|
||||
}
|
||||
return update, nil
|
||||
}
|
||||
|
||||
// getRouteConfigNameFromListener checks if the provided Listener proto meets
|
||||
// the expected criteria. If so, it returns a non-empty routeConfigName.
|
||||
func getRouteConfigNameFromListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger) (string, error) {
|
||||
if lis.GetApiListener() == nil {
|
||||
return "", fmt.Errorf("xds: no api_listener field in LDS response %+v", lis)
|
||||
}
|
||||
apiLisAny := lis.GetApiListener().GetApiListener()
|
||||
if t := apiLisAny.GetTypeUrl(); t != version.V3HTTPConnManagerURL && t != version.V2HTTPConnManagerURL {
|
||||
return "", fmt.Errorf("xds: unexpected resource type: %s in LDS response", t)
|
||||
}
|
||||
apiLis := &v3httppb.HttpConnectionManager{}
|
||||
if err := proto.Unmarshal(apiLisAny.GetValue(), apiLis); err != nil {
|
||||
return "", fmt.Errorf("xds: failed to unmarshal api_listner in LDS response: %v", err)
|
||||
}
|
||||
|
||||
logger.Infof("Resource with type %T, contains %v", apiLis, apiLis)
|
||||
switch apiLis.RouteSpecifier.(type) {
|
||||
case *v3httppb.HttpConnectionManager_Rds:
|
||||
if apiLis.GetRds().GetConfigSource().GetAds() == nil {
|
||||
return "", fmt.Errorf("xds: ConfigSource is not ADS in LDS response: %+v", lis)
|
||||
}
|
||||
name := apiLis.GetRds().GetRouteConfigName()
|
||||
if name == "" {
|
||||
return "", fmt.Errorf("xds: empty route_config_name in LDS response: %+v", lis)
|
||||
}
|
||||
return name, nil
|
||||
case *v3httppb.HttpConnectionManager_RouteConfig:
|
||||
// TODO: Add support for specifying the RouteConfiguration inline
|
||||
// in the LDS response.
|
||||
return "", fmt.Errorf("xds: LDS response contains RDS config inline. Not supported for now: %+v", apiLis)
|
||||
case nil:
|
||||
return "", fmt.Errorf("xds: no RouteSpecifier in received LDS response: %+v", apiLis)
|
||||
default:
|
||||
return "", fmt.Errorf("xds: unsupported type %T for RouteSpecifier in received LDS response", apiLis.RouteSpecifier)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalRouteConfig processes resources received in an RDS response,
|
||||
// validates them, and transforms them into a native struct which contains only
|
||||
// fields we are interested in. The provided hostname determines the route
|
||||
// configuration resources of interest.
|
||||
func UnmarshalRouteConfig(resources []*anypb.Any, hostname string, logger *grpclog.PrefixLogger) (map[string]RouteConfigUpdate, error) {
|
||||
update := make(map[string]RouteConfigUpdate)
|
||||
for _, r := range resources {
|
||||
if t := r.GetTypeUrl(); t != version.V2RouteConfigURL && t != version.V3RouteConfigURL {
|
||||
return nil, fmt.Errorf("xds: unexpected resource type: %s in RDS response", t)
|
||||
}
|
||||
rc := &v3routepb.RouteConfiguration{}
|
||||
if err := proto.Unmarshal(r.GetValue(), rc); err != nil {
|
||||
return nil, fmt.Errorf("xds: failed to unmarshal resource in RDS response: %v", err)
|
||||
}
|
||||
logger.Infof("Resource with name: %v, type: %T, contains: %v. Picking routes for current watching hostname %v", rc.GetName(), rc, rc, hostname)
|
||||
|
||||
// Use the hostname (resourceName for LDS) to find the routes.
|
||||
u, err := generateRDSUpdateFromRouteConfiguration(rc, hostname, logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("xds: received invalid RouteConfiguration in RDS response: %+v with err: %v", rc, err)
|
||||
}
|
||||
update[rc.GetName()] = u
|
||||
}
|
||||
return update, nil
|
||||
}
|
||||
|
||||
// generateRDSUpdateFromRouteConfiguration checks if the provided
|
||||
// RouteConfiguration meets the expected criteria. If so, it returns a
|
||||
// RouteConfigUpdate with nil error.
|
||||
//
|
||||
// A RouteConfiguration resource is considered valid when only if it contains a
|
||||
// VirtualHost whose domain field matches the server name from the URI passed
|
||||
// to the gRPC channel, and it contains a clusterName or a weighted cluster.
|
||||
//
|
||||
// The RouteConfiguration includes a list of VirtualHosts, which may have zero
|
||||
// or more elements. We are interested in the element whose domains field
|
||||
// matches the server name specified in the "xds:" URI. The only field in the
|
||||
// VirtualHost proto that the we are interested in is the list of routes. We
|
||||
// only look at the last route in the list (the default route), whose match
|
||||
// field must be empty and whose route field must be set. Inside that route
|
||||
// message, the cluster field will contain the clusterName or weighted clusters
|
||||
// we are looking for.
|
||||
func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, host string, logger *grpclog.PrefixLogger) (RouteConfigUpdate, error) {
|
||||
//
|
||||
// Currently this returns "" on error, and the caller will return an error.
|
||||
// But the error doesn't contain details of why the response is invalid
|
||||
// (mismatch domain or empty route).
|
||||
//
|
||||
// For logging purposes, we can log in line. But if we want to populate
|
||||
// error details for nack, a detailed error needs to be returned.
|
||||
vh := findBestMatchingVirtualHost(host, rc.GetVirtualHosts())
|
||||
if vh == nil {
|
||||
// No matching virtual host found.
|
||||
return RouteConfigUpdate{}, fmt.Errorf("no matching virtual host found")
|
||||
}
|
||||
if len(vh.Routes) == 0 {
|
||||
// The matched virtual host has no routes, this is invalid because there
|
||||
// should be at least one default route.
|
||||
return RouteConfigUpdate{}, fmt.Errorf("matched virtual host has no routes")
|
||||
}
|
||||
|
||||
routes, err := routesProtoToSlice(vh.Routes, logger)
|
||||
if err != nil {
|
||||
return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err)
|
||||
}
|
||||
return RouteConfigUpdate{Routes: routes}, nil
|
||||
}
|
||||
|
||||
func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger) ([]*Route, error) {
|
||||
var routesRet []*Route
|
||||
|
||||
for _, r := range routes {
|
||||
match := r.GetMatch()
|
||||
if match == nil {
|
||||
return nil, fmt.Errorf("route %+v doesn't have a match", r)
|
||||
}
|
||||
|
||||
if len(match.GetQueryParameters()) != 0 {
|
||||
// Ignore route with query parameters.
|
||||
logger.Warningf("route %+v has query parameter matchers, the route will be ignored", r)
|
||||
continue
|
||||
}
|
||||
|
||||
if caseSensitive := match.GetCaseSensitive(); caseSensitive != nil && !caseSensitive.Value {
|
||||
return nil, fmt.Errorf("route %+v has case-sensitive false", r)
|
||||
}
|
||||
|
||||
pathSp := match.GetPathSpecifier()
|
||||
if pathSp == nil {
|
||||
return nil, fmt.Errorf("route %+v doesn't have a path specifier", r)
|
||||
}
|
||||
|
||||
var route Route
|
||||
switch pt := pathSp.(type) {
|
||||
case *v3routepb.RouteMatch_Prefix:
|
||||
route.Prefix = &pt.Prefix
|
||||
case *v3routepb.RouteMatch_Path:
|
||||
route.Path = &pt.Path
|
||||
case *v3routepb.RouteMatch_SafeRegex:
|
||||
route.Regex = &pt.SafeRegex.Regex
|
||||
default:
|
||||
logger.Warningf("route %+v has an unrecognized path specifier: %+v", r, pt)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, h := range match.GetHeaders() {
|
||||
var header HeaderMatcher
|
||||
switch ht := h.GetHeaderMatchSpecifier().(type) {
|
||||
case *v3routepb.HeaderMatcher_ExactMatch:
|
||||
header.ExactMatch = &ht.ExactMatch
|
||||
case *v3routepb.HeaderMatcher_SafeRegexMatch:
|
||||
header.RegexMatch = &ht.SafeRegexMatch.Regex
|
||||
case *v3routepb.HeaderMatcher_RangeMatch:
|
||||
header.RangeMatch = &Int64Range{
|
||||
Start: ht.RangeMatch.Start,
|
||||
End: ht.RangeMatch.End,
|
||||
}
|
||||
case *v3routepb.HeaderMatcher_PresentMatch:
|
||||
header.PresentMatch = &ht.PresentMatch
|
||||
case *v3routepb.HeaderMatcher_PrefixMatch:
|
||||
header.PrefixMatch = &ht.PrefixMatch
|
||||
case *v3routepb.HeaderMatcher_SuffixMatch:
|
||||
header.SuffixMatch = &ht.SuffixMatch
|
||||
default:
|
||||
logger.Warningf("route %+v has an unrecognized header matcher: %+v", r, ht)
|
||||
continue
|
||||
}
|
||||
header.Name = h.GetName()
|
||||
invert := h.GetInvertMatch()
|
||||
header.InvertMatch = &invert
|
||||
route.Headers = append(route.Headers, &header)
|
||||
}
|
||||
|
||||
if fr := match.GetRuntimeFraction(); fr != nil {
|
||||
d := fr.GetDefaultValue()
|
||||
n := d.GetNumerator()
|
||||
switch d.GetDenominator() {
|
||||
case v3typepb.FractionalPercent_HUNDRED:
|
||||
n *= 10000
|
||||
case v3typepb.FractionalPercent_TEN_THOUSAND:
|
||||
n *= 100
|
||||
case v3typepb.FractionalPercent_MILLION:
|
||||
}
|
||||
route.Fraction = &n
|
||||
}
|
||||
|
||||
clusters := make(map[string]uint32)
|
||||
switch a := r.GetRoute().GetClusterSpecifier().(type) {
|
||||
case *v3routepb.RouteAction_Cluster:
|
||||
clusters[a.Cluster] = 1
|
||||
case *v3routepb.RouteAction_WeightedClusters:
|
||||
wcs := a.WeightedClusters
|
||||
var totalWeight uint32
|
||||
for _, c := range wcs.Clusters {
|
||||
w := c.GetWeight().GetValue()
|
||||
clusters[c.GetName()] = w
|
||||
totalWeight += w
|
||||
}
|
||||
if totalWeight != wcs.GetTotalWeight().GetValue() {
|
||||
return nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, want %v", r, a, wcs.GetTotalWeight().GetValue(), totalWeight)
|
||||
}
|
||||
case *v3routepb.RouteAction_ClusterHeader:
|
||||
continue
|
||||
}
|
||||
|
||||
route.Action = clusters
|
||||
routesRet = append(routesRet, &route)
|
||||
}
|
||||
return routesRet, nil
|
||||
}
|
||||
|
||||
type domainMatchType int
|
||||
|
||||
const (
|
||||
domainMatchTypeInvalid domainMatchType = iota
|
||||
domainMatchTypeUniversal
|
||||
domainMatchTypePrefix
|
||||
domainMatchTypeSuffix
|
||||
domainMatchTypeExact
|
||||
)
|
||||
|
||||
// Exact > Suffix > Prefix > Universal > Invalid.
|
||||
func (t domainMatchType) betterThan(b domainMatchType) bool {
|
||||
return t > b
|
||||
}
|
||||
|
||||
func matchTypeForDomain(d string) domainMatchType {
|
||||
if d == "" {
|
||||
return domainMatchTypeInvalid
|
||||
}
|
||||
if d == "*" {
|
||||
return domainMatchTypeUniversal
|
||||
}
|
||||
if strings.HasPrefix(d, "*") {
|
||||
return domainMatchTypeSuffix
|
||||
}
|
||||
if strings.HasSuffix(d, "*") {
|
||||
return domainMatchTypePrefix
|
||||
}
|
||||
if strings.Contains(d, "*") {
|
||||
return domainMatchTypeInvalid
|
||||
}
|
||||
return domainMatchTypeExact
|
||||
}
|
||||
|
||||
func match(domain, host string) (domainMatchType, bool) {
|
||||
switch typ := matchTypeForDomain(domain); typ {
|
||||
case domainMatchTypeInvalid:
|
||||
return typ, false
|
||||
case domainMatchTypeUniversal:
|
||||
return typ, true
|
||||
case domainMatchTypePrefix:
|
||||
// abc.*
|
||||
return typ, strings.HasPrefix(host, strings.TrimSuffix(domain, "*"))
|
||||
case domainMatchTypeSuffix:
|
||||
// *.123
|
||||
return typ, strings.HasSuffix(host, strings.TrimPrefix(domain, "*"))
|
||||
case domainMatchTypeExact:
|
||||
return typ, domain == host
|
||||
default:
|
||||
return domainMatchTypeInvalid, false
|
||||
}
|
||||
}
|
||||
|
||||
// findBestMatchingVirtualHost returns the virtual host whose domains field best
|
||||
// matches host
|
||||
//
|
||||
// The domains field support 4 different matching pattern types:
|
||||
// - Exact match
|
||||
// - Suffix match (e.g. “*ABC”)
|
||||
// - Prefix match (e.g. “ABC*)
|
||||
// - Universal match (e.g. “*”)
|
||||
//
|
||||
// The best match is defined as:
|
||||
// - A match is better if it’s matching pattern type is better
|
||||
// - Exact match > suffix match > prefix match > universal match
|
||||
// - If two matches are of the same pattern type, the longer match is better
|
||||
// - This is to compare the length of the matching pattern, e.g. “*ABCDE” >
|
||||
// “*ABC”
|
||||
func findBestMatchingVirtualHost(host string, vHosts []*v3routepb.VirtualHost) *v3routepb.VirtualHost {
|
||||
var (
|
||||
matchVh *v3routepb.VirtualHost
|
||||
matchType = domainMatchTypeInvalid
|
||||
matchLen int
|
||||
)
|
||||
for _, vh := range vHosts {
|
||||
for _, domain := range vh.GetDomains() {
|
||||
typ, matched := match(domain, host)
|
||||
if typ == domainMatchTypeInvalid {
|
||||
// The rds response is invalid.
|
||||
return nil
|
||||
}
|
||||
if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched {
|
||||
// The previous match has better type, or the previous match has
|
||||
// better length, or this domain isn't a match.
|
||||
continue
|
||||
}
|
||||
matchVh = vh
|
||||
matchType = typ
|
||||
matchLen = len(domain)
|
||||
}
|
||||
}
|
||||
return matchVh
|
||||
}
|
||||
|
||||
// UnmarshalCluster processes resources received in an CDS response, validates
|
||||
// them, and transforms them into a native struct which contains only fields we
|
||||
// are interested in.
|
||||
func UnmarshalCluster(resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ClusterUpdate, error) {
|
||||
update := make(map[string]ClusterUpdate)
|
||||
for _, r := range resources {
|
||||
if t := r.GetTypeUrl(); t != version.V2ClusterURL && t != version.V3ClusterURL {
|
||||
return nil, fmt.Errorf("xds: unexpected resource type: %s in CDS response", t)
|
||||
}
|
||||
|
||||
cluster := &v3clusterpb.Cluster{}
|
||||
if err := proto.Unmarshal(r.GetValue(), cluster); err != nil {
|
||||
return nil, fmt.Errorf("xds: failed to unmarshal resource in CDS response: %v", err)
|
||||
}
|
||||
logger.Infof("Resource with name: %v, type: %T, contains: %v", cluster.GetName(), cluster, cluster)
|
||||
cu, err := validateCluster(cluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the Cluster message in the CDS response did not contain a
|
||||
// serviceName, we will just use the clusterName for EDS.
|
||||
if cu.ServiceName == "" {
|
||||
cu.ServiceName = cluster.GetName()
|
||||
}
|
||||
logger.Debugf("Resource with name %v, value %+v added to cache", cluster.GetName(), cu)
|
||||
update[cluster.GetName()] = cu
|
||||
}
|
||||
return update, nil
|
||||
}
|
||||
|
||||
func validateCluster(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) {
|
||||
emptyUpdate := ClusterUpdate{ServiceName: "", EnableLRS: false}
|
||||
switch {
|
||||
case cluster.GetType() != v3clusterpb.Cluster_EDS:
|
||||
return emptyUpdate, fmt.Errorf("xds: unexpected cluster type %v in response: %+v", cluster.GetType(), cluster)
|
||||
case cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil:
|
||||
return emptyUpdate, fmt.Errorf("xds: unexpected edsConfig in response: %+v", cluster)
|
||||
case cluster.GetLbPolicy() != v3clusterpb.Cluster_ROUND_ROBIN:
|
||||
return emptyUpdate, fmt.Errorf("xds: unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster)
|
||||
}
|
||||
|
||||
return ClusterUpdate{
|
||||
ServiceName: cluster.GetEdsClusterConfig().GetServiceName(),
|
||||
EnableLRS: cluster.GetLrsServer().GetSelf() != nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UnmarshalEndpoints processes resources received in an EDS response,
|
||||
// validates them, and transforms them into a native struct which contains only
|
||||
// fields we are interested in.
|
||||
func UnmarshalEndpoints(resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]EndpointsUpdate, error) {
|
||||
update := make(map[string]EndpointsUpdate)
|
||||
for _, r := range resources {
|
||||
if t := r.GetTypeUrl(); t != version.V2EndpointsURL && t != version.V3EndpointsURL {
|
||||
return nil, fmt.Errorf("xds: unexpected resource type: %s in EDS response", t)
|
||||
}
|
||||
|
||||
cla := &v3endpointpb.ClusterLoadAssignment{}
|
||||
if err := proto.Unmarshal(r.GetValue(), cla); err != nil {
|
||||
return nil, fmt.Errorf("xds: failed to unmarshal resource in EDS response: %v", err)
|
||||
}
|
||||
logger.Infof("Resource with name: %v, type: %T, contains: %v", cla.GetClusterName(), cla, cla)
|
||||
|
||||
u, err := parseEDSRespProto(cla)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
update[cla.GetClusterName()] = u
|
||||
}
|
||||
return update, nil
|
||||
}
|
||||
|
||||
func parseAddress(socketAddress *v3corepb.SocketAddress) string {
|
||||
return net.JoinHostPort(socketAddress.GetAddress(), strconv.Itoa(int(socketAddress.GetPortValue())))
|
||||
}
|
||||
|
||||
func parseDropPolicy(dropPolicy *v3endpointpb.ClusterLoadAssignment_Policy_DropOverload) OverloadDropConfig {
|
||||
percentage := dropPolicy.GetDropPercentage()
|
||||
var (
|
||||
numerator = percentage.GetNumerator()
|
||||
denominator uint32
|
||||
)
|
||||
switch percentage.GetDenominator() {
|
||||
case v3typepb.FractionalPercent_HUNDRED:
|
||||
denominator = 100
|
||||
case v3typepb.FractionalPercent_TEN_THOUSAND:
|
||||
denominator = 10000
|
||||
case v3typepb.FractionalPercent_MILLION:
|
||||
denominator = 1000000
|
||||
}
|
||||
return OverloadDropConfig{
|
||||
Category: dropPolicy.GetCategory(),
|
||||
Numerator: numerator,
|
||||
Denominator: denominator,
|
||||
}
|
||||
}
|
||||
|
||||
func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint) []Endpoint {
|
||||
endpoints := make([]Endpoint, 0, len(lbEndpoints))
|
||||
for _, lbEndpoint := range lbEndpoints {
|
||||
endpoints = append(endpoints, Endpoint{
|
||||
HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()),
|
||||
Address: parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()),
|
||||
Weight: lbEndpoint.GetLoadBalancingWeight().GetValue(),
|
||||
})
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, error) {
|
||||
ret := EndpointsUpdate{}
|
||||
for _, dropPolicy := range m.GetPolicy().GetDropOverloads() {
|
||||
ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy))
|
||||
}
|
||||
priorities := make(map[uint32]struct{})
|
||||
for _, locality := range m.Endpoints {
|
||||
l := locality.GetLocality()
|
||||
if l == nil {
|
||||
return EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality)
|
||||
}
|
||||
lid := internal.LocalityID{
|
||||
Region: l.Region,
|
||||
Zone: l.Zone,
|
||||
SubZone: l.SubZone,
|
||||
}
|
||||
priority := locality.GetPriority()
|
||||
priorities[priority] = struct{}{}
|
||||
ret.Localities = append(ret.Localities, Locality{
|
||||
ID: lid,
|
||||
Endpoints: parseEndpoints(locality.GetLbEndpoints()),
|
||||
Weight: locality.GetLoadBalancingWeight().GetValue(),
|
||||
Priority: priority,
|
||||
})
|
||||
}
|
||||
for i := 0; i < len(priorities); i++ {
|
||||
if _, ok := priorities[uint32(i)]; !ok {
|
||||
return EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities)
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package tests
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
||||
"google.golang.org/grpc/xds/internal/client/bootstrap"
|
||||
_ "google.golang.org/grpc/xds/internal/client/v2" // Register the v2 API client.
|
||||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
const (
|
||||
testXDSServer = "xds-server"
|
||||
)
|
||||
|
||||
func clientOpts(balancerName string) xdsclient.Options {
|
||||
return xdsclient.Options{
|
||||
Config: bootstrap.Config{
|
||||
BalancerName: balancerName,
|
||||
Creds: grpc.WithInsecure(),
|
||||
NodeProto: testutils.EmptyNodeProtoV2,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s) TestNew(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
opts xdsclient.Options
|
||||
wantErr bool
|
||||
}{
|
||||
{name: "empty-opts", opts: xdsclient.Options{}, wantErr: true},
|
||||
{
|
||||
name: "empty-balancer-name",
|
||||
opts: xdsclient.Options{
|
||||
Config: bootstrap.Config{
|
||||
Creds: grpc.WithInsecure(),
|
||||
NodeProto: testutils.EmptyNodeProtoV2,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "empty-dial-creds",
|
||||
opts: xdsclient.Options{
|
||||
Config: bootstrap.Config{
|
||||
BalancerName: testXDSServer,
|
||||
NodeProto: testutils.EmptyNodeProtoV2,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "empty-node-proto",
|
||||
opts: xdsclient.Options{
|
||||
Config: bootstrap.Config{
|
||||
BalancerName: testXDSServer,
|
||||
Creds: grpc.WithInsecure(),
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "node-proto-version-mismatch",
|
||||
opts: xdsclient.Options{
|
||||
Config: bootstrap.Config{
|
||||
BalancerName: testXDSServer,
|
||||
Creds: grpc.WithInsecure(),
|
||||
NodeProto: testutils.EmptyNodeProtoV3,
|
||||
TransportAPI: version.TransportV2,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
// TODO(easwars): Add cases for v3 API client.
|
||||
{
|
||||
name: "happy-case",
|
||||
opts: clientOpts(testXDSServer),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
c, err := xdsclient.New(test.opts)
|
||||
if (err != nil) != test.wantErr {
|
||||
t.Fatalf("New(%+v) = %v, wantErr: %v", test.opts, err, test.wantErr)
|
||||
}
|
||||
if c != nil {
|
||||
c.Close()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,209 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal/testutils/fakeserver"
|
||||
)
|
||||
|
||||
type watchHandleTestcase struct {
|
||||
typeURL string
|
||||
resourceName string
|
||||
|
||||
responseToHandle *xdspb.DiscoveryResponse
|
||||
wantHandleErr bool
|
||||
wantUpdate interface{}
|
||||
wantUpdateErr bool
|
||||
}
|
||||
|
||||
type testUpdateReceiver struct {
|
||||
f func(typeURL string, d map[string]interface{})
|
||||
}
|
||||
|
||||
func (t *testUpdateReceiver) newLDSUpdate(d map[string]ldsUpdate) {
|
||||
dd := make(map[string]interface{})
|
||||
for k, v := range d {
|
||||
dd[k] = v
|
||||
}
|
||||
t.newUpdate(ldsURL, dd)
|
||||
}
|
||||
|
||||
func (t *testUpdateReceiver) newRDSUpdate(d map[string]rdsUpdate) {
|
||||
dd := make(map[string]interface{})
|
||||
for k, v := range d {
|
||||
dd[k] = v
|
||||
}
|
||||
t.newUpdate(rdsURL, dd)
|
||||
}
|
||||
|
||||
func (t *testUpdateReceiver) newCDSUpdate(d map[string]ClusterUpdate) {
|
||||
dd := make(map[string]interface{})
|
||||
for k, v := range d {
|
||||
dd[k] = v
|
||||
}
|
||||
t.newUpdate(cdsURL, dd)
|
||||
}
|
||||
|
||||
func (t *testUpdateReceiver) newEDSUpdate(d map[string]EndpointsUpdate) {
|
||||
dd := make(map[string]interface{})
|
||||
for k, v := range d {
|
||||
dd[k] = v
|
||||
}
|
||||
t.newUpdate(edsURL, dd)
|
||||
}
|
||||
|
||||
func (t *testUpdateReceiver) newUpdate(typeURL string, d map[string]interface{}) {
|
||||
t.f(typeURL, d)
|
||||
}
|
||||
|
||||
// testWatchHandle is called to test response handling for each xDS.
|
||||
//
|
||||
// It starts the xDS watch as configured in test, waits for the fake xds server
|
||||
// to receive the request (so watch callback is installed), and calls
|
||||
// handleXDSResp with responseToHandle (if it's set). It then compares the
|
||||
// update received by watch callback with the expected results.
|
||||
func testWatchHandle(t *testing.T, test *watchHandleTestcase) {
|
||||
fakeServer, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
type updateErr struct {
|
||||
u interface{}
|
||||
err error
|
||||
}
|
||||
gotUpdateCh := testutils.NewChannel()
|
||||
|
||||
v2c := newV2Client(&testUpdateReceiver{
|
||||
f: func(typeURL string, d map[string]interface{}) {
|
||||
if typeURL == test.typeURL {
|
||||
if u, ok := d[test.resourceName]; ok {
|
||||
gotUpdateCh.Send(updateErr{u, nil})
|
||||
}
|
||||
}
|
||||
},
|
||||
}, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil)
|
||||
defer v2c.close()
|
||||
|
||||
// RDS needs an existin LDS watch for the hostname.
|
||||
if test.typeURL == rdsURL {
|
||||
doLDS(t, v2c, fakeServer)
|
||||
}
|
||||
|
||||
// Register the watcher, this will also trigger the v2Client to send the xDS
|
||||
// request.
|
||||
v2c.addWatch(test.typeURL, test.resourceName)
|
||||
|
||||
// Wait till the request makes it to the fakeServer. This ensures that
|
||||
// the watch request has been processed by the v2Client.
|
||||
if _, err := fakeServer.XDSRequestChan.Receive(); err != nil {
|
||||
t.Fatalf("Timeout waiting for an xDS request: %v", err)
|
||||
}
|
||||
|
||||
// Directly push the response through a call to handleXDSResp. This bypasses
|
||||
// the fakeServer, so it's only testing the handle logic. Client response
|
||||
// processing is covered elsewhere.
|
||||
//
|
||||
// Also note that this won't trigger ACK, so there's no need to clear the
|
||||
// request channel afterwards.
|
||||
var handleXDSResp func(response *xdspb.DiscoveryResponse) error
|
||||
switch test.typeURL {
|
||||
case ldsURL:
|
||||
handleXDSResp = v2c.handleLDSResponse
|
||||
case rdsURL:
|
||||
handleXDSResp = v2c.handleRDSResponse
|
||||
case cdsURL:
|
||||
handleXDSResp = v2c.handleCDSResponse
|
||||
case edsURL:
|
||||
handleXDSResp = v2c.handleEDSResponse
|
||||
}
|
||||
if err := handleXDSResp(test.responseToHandle); (err != nil) != test.wantHandleErr {
|
||||
t.Fatalf("v2c.handleRDSResponse() returned err: %v, wantErr: %v", err, test.wantHandleErr)
|
||||
}
|
||||
|
||||
// If the test doesn't expect the callback to be invoked, verify that no
|
||||
// update or error is pushed to the callback.
|
||||
//
|
||||
// Cannot directly compare test.wantUpdate with nil (typed vs non-typed nil:
|
||||
// https://golang.org/doc/faq#nil_error).
|
||||
if c := test.wantUpdate; c == nil || (reflect.ValueOf(c).Kind() == reflect.Ptr && reflect.ValueOf(c).IsNil()) {
|
||||
update, err := gotUpdateCh.Receive()
|
||||
if err == testutils.ErrRecvTimeout {
|
||||
return
|
||||
}
|
||||
t.Fatalf("Unexpected update: +%v", update)
|
||||
}
|
||||
|
||||
wantUpdate := reflect.ValueOf(test.wantUpdate).Elem().Interface()
|
||||
uErr, err := gotUpdateCh.Receive()
|
||||
if err == testutils.ErrRecvTimeout {
|
||||
t.Fatal("Timeout expecting xDS update")
|
||||
}
|
||||
gotUpdate := uErr.(updateErr).u
|
||||
opt := cmp.AllowUnexported(rdsUpdate{}, ldsUpdate{}, ClusterUpdate{}, EndpointsUpdate{})
|
||||
if diff := cmp.Diff(gotUpdate, wantUpdate, opt); diff != "" {
|
||||
t.Fatalf("got update : %+v, want %+v, diff: %s", gotUpdate, wantUpdate, diff)
|
||||
}
|
||||
gotUpdateErr := uErr.(updateErr).err
|
||||
if (gotUpdateErr != nil) != test.wantUpdateErr {
|
||||
t.Fatalf("got xDS update error {%v}, wantErr: %v", gotUpdateErr, test.wantUpdateErr)
|
||||
}
|
||||
}
|
||||
|
||||
// startServerAndGetCC starts a fake XDS server and also returns a ClientConn
|
||||
// connected to it.
|
||||
func startServerAndGetCC(t *testing.T) (*fakeserver.Server, *grpc.ClientConn, func()) {
|
||||
t.Helper()
|
||||
|
||||
fs, sCleanup, err := fakeserver.StartServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start fake xDS server: %v", err)
|
||||
}
|
||||
|
||||
cc, ccCleanup, err := fs.XDSClientConn()
|
||||
if err != nil {
|
||||
sCleanup()
|
||||
t.Fatalf("Failed to get a clientConn to the fake xDS server: %v", err)
|
||||
}
|
||||
return fs, cc, func() {
|
||||
sCleanup()
|
||||
ccCleanup()
|
||||
}
|
||||
}
|
||||
|
||||
// waitForNilErr waits for a nil error value to be received on the
|
||||
// provided channel.
|
||||
func waitForNilErr(t *testing.T, ch *testutils.Channel) {
|
||||
t.Helper()
|
||||
|
||||
val, err := ch.Receive()
|
||||
if err == testutils.ErrRecvTimeout {
|
||||
t.Fatalf("Timeout expired when expecting update")
|
||||
}
|
||||
if val != nil {
|
||||
if cbErr := val.(error); cbErr != nil {
|
||||
t.Fatal(cbErr)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -16,38 +16,68 @@
|
|||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
// Package v2 provides xDS v2 transport protocol specific functionality.
|
||||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/internal/buffer"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
|
||||
xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2"
|
||||
v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
v2adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2"
|
||||
)
|
||||
|
||||
type adsStream adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient
|
||||
|
||||
var _ xdsv2Client = &v2Client{}
|
||||
|
||||
// updateHandler handles the update (parsed from xds responses). It's
|
||||
// implemented by the upper level Client.
|
||||
//
|
||||
// It's an interface to be overridden in test.
|
||||
type updateHandler interface {
|
||||
newLDSUpdate(d map[string]ldsUpdate)
|
||||
newRDSUpdate(d map[string]rdsUpdate)
|
||||
newCDSUpdate(d map[string]ClusterUpdate)
|
||||
newEDSUpdate(d map[string]EndpointsUpdate)
|
||||
func init() {
|
||||
xdsclient.RegisterAPIClientBuilder(clientBuilder{})
|
||||
}
|
||||
|
||||
// v2Client performs the actual xDS RPCs using the xDS v2 API. It creates a
|
||||
type clientBuilder struct{}
|
||||
|
||||
func (clientBuilder) Build(cc *grpc.ClientConn, opts xdsclient.BuildOptions) (xdsclient.APIClient, error) {
|
||||
return newClient(cc, opts)
|
||||
}
|
||||
|
||||
func (clientBuilder) Version() version.TransportAPI {
|
||||
return version.TransportV2
|
||||
}
|
||||
|
||||
func newClient(cc *grpc.ClientConn, opts xdsclient.BuildOptions) (xdsclient.APIClient, error) {
|
||||
nodeProto, ok := opts.NodeProto.(*v2corepb.Node)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("xds: unsupported Node proto type: %T, want %T", opts.NodeProto, v2corepb.Node{})
|
||||
}
|
||||
v2c := &client{
|
||||
cc: cc,
|
||||
parent: opts.Parent,
|
||||
nodeProto: nodeProto,
|
||||
backoff: opts.Backoff,
|
||||
logger: opts.Logger,
|
||||
|
||||
streamCh: make(chan adsStream, 1),
|
||||
sendCh: buffer.NewUnbounded(),
|
||||
|
||||
watchMap: make(map[string]map[string]bool),
|
||||
versionMap: make(map[string]string),
|
||||
nonceMap: make(map[string]string),
|
||||
}
|
||||
v2c.ctx, v2c.cancelCtx = context.WithCancel(context.Background())
|
||||
|
||||
go v2c.run()
|
||||
return v2c, nil
|
||||
}
|
||||
|
||||
type adsStream v2adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient
|
||||
|
||||
// client performs the actual xDS RPCs using the xDS v2 API. It creates a
|
||||
// single ADS stream on which the different types of xDS requests and responses
|
||||
// are multiplexed.
|
||||
//
|
||||
|
@ -55,18 +85,14 @@ type updateHandler interface {
|
|||
// and do ACK/NACK. It's a naive implementation that sends whatever the upper
|
||||
// layer tells it to send. It will call the callback with everything in every
|
||||
// response. It doesn't keep a cache of responses, or check for duplicates.
|
||||
//
|
||||
// The reason for splitting this out from the top level xdsClient object is
|
||||
// because there is already an xDS v3Aplha API in development. If and when we
|
||||
// want to switch to that, this separation will ease that process.
|
||||
type v2Client struct {
|
||||
type client struct {
|
||||
ctx context.Context
|
||||
cancelCtx context.CancelFunc
|
||||
parent updateHandler
|
||||
parent xdsclient.UpdateHandler
|
||||
|
||||
// ClientConn to the xDS gRPC server. Owned by the parent xdsClient.
|
||||
cc *grpc.ClientConn
|
||||
nodeProto *corepb.Node
|
||||
nodeProto *v2corepb.Node
|
||||
backoff func(int) time.Duration
|
||||
|
||||
logger *grpclog.PrefixLogger
|
||||
|
@ -102,38 +128,31 @@ type v2Client struct {
|
|||
hostname string
|
||||
}
|
||||
|
||||
// newV2Client creates a new v2Client initialized with the passed arguments.
|
||||
func newV2Client(parent updateHandler, cc *grpc.ClientConn, nodeProto *corepb.Node, backoff func(int) time.Duration, logger *grpclog.PrefixLogger) *v2Client {
|
||||
v2c := &v2Client{
|
||||
cc: cc,
|
||||
parent: parent,
|
||||
nodeProto: nodeProto,
|
||||
backoff: backoff,
|
||||
func (v2c *client) AddWatch(resourceType, resourceName string) {
|
||||
v2c.sendCh.Put(&watchAction{
|
||||
typeURL: resourceType,
|
||||
remove: false,
|
||||
resource: resourceName,
|
||||
})
|
||||
}
|
||||
|
||||
logger: logger,
|
||||
|
||||
streamCh: make(chan adsStream, 1),
|
||||
sendCh: buffer.NewUnbounded(),
|
||||
|
||||
watchMap: make(map[string]map[string]bool),
|
||||
versionMap: make(map[string]string),
|
||||
nonceMap: make(map[string]string),
|
||||
}
|
||||
v2c.ctx, v2c.cancelCtx = context.WithCancel(context.Background())
|
||||
|
||||
go v2c.run()
|
||||
return v2c
|
||||
func (v2c *client) RemoveWatch(resourceType, resourceName string) {
|
||||
v2c.sendCh.Put(&watchAction{
|
||||
typeURL: resourceType,
|
||||
remove: true,
|
||||
resource: resourceName,
|
||||
})
|
||||
}
|
||||
|
||||
// close cleans up resources and goroutines allocated by this client.
|
||||
func (v2c *v2Client) close() {
|
||||
func (v2c *client) Close() {
|
||||
v2c.cancelCtx()
|
||||
}
|
||||
|
||||
// run starts an ADS stream (and backs off exponentially, if the previous
|
||||
// stream failed without receiving a single reply) and runs the sender and
|
||||
// receiver routines to send and receive data from the stream respectively.
|
||||
func (v2c *v2Client) run() {
|
||||
func (v2c *client) run() {
|
||||
go v2c.send()
|
||||
// TODO: start a goroutine monitoring ClientConn's connectivity state, and
|
||||
// report error (and log) when stats is transient failure.
|
||||
|
@ -159,7 +178,7 @@ func (v2c *v2Client) run() {
|
|||
}
|
||||
|
||||
retries++
|
||||
cli := adsgrpc.NewAggregatedDiscoveryServiceClient(v2c.cc)
|
||||
cli := v2adsgrpc.NewAggregatedDiscoveryServiceClient(v2c.cc)
|
||||
stream, err := cli.StreamAggregatedResources(v2c.ctx, grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
v2c.logger.Warningf("xds: ADS stream creation failed: %v", err)
|
||||
|
@ -187,8 +206,8 @@ func (v2c *v2Client) run() {
|
|||
// - If this is an ack, version will be the version from the response
|
||||
// - If this is a nack, version will be the previous acked version (from
|
||||
// versionMap). If there was no ack before, it will be an empty string
|
||||
func (v2c *v2Client) sendRequest(stream adsStream, resourceNames []string, typeURL, version, nonce string) bool {
|
||||
req := &xdspb.DiscoveryRequest{
|
||||
func (v2c *client) sendRequest(stream adsStream, resourceNames []string, typeURL, version, nonce string) bool {
|
||||
req := &v2xdspb.DiscoveryRequest{
|
||||
Node: v2c.nodeProto,
|
||||
TypeUrl: typeURL,
|
||||
ResourceNames: resourceNames,
|
||||
|
@ -210,7 +229,7 @@ func (v2c *v2Client) sendRequest(stream adsStream, resourceNames []string, typeU
|
|||
// that here because the stream has just started and Send() usually returns
|
||||
// quickly (once it pushes the message onto the transport layer) and is only
|
||||
// ever blocked if we don't have enough flow control quota.
|
||||
func (v2c *v2Client) sendExisting(stream adsStream) bool {
|
||||
func (v2c *client) sendExisting(stream adsStream) bool {
|
||||
v2c.mu.Lock()
|
||||
defer v2c.mu.Unlock()
|
||||
|
||||
|
@ -236,7 +255,7 @@ type watchAction struct {
|
|||
// processWatchInfo pulls the fields needed by the request from a watchAction.
|
||||
//
|
||||
// It also updates the watch map in v2c.
|
||||
func (v2c *v2Client) processWatchInfo(t *watchAction) (target []string, typeURL, version, nonce string, send bool) {
|
||||
func (v2c *client) processWatchInfo(t *watchAction) (target []string, typeURL, ver, nonce string, send bool) {
|
||||
v2c.mu.Lock()
|
||||
defer v2c.mu.Unlock()
|
||||
|
||||
|
@ -258,7 +277,7 @@ func (v2c *v2Client) processWatchInfo(t *watchAction) (target []string, typeURL,
|
|||
|
||||
// Special handling for LDS, because RDS needs the LDS resource_name for
|
||||
// response host matching.
|
||||
if t.typeURL == ldsURL {
|
||||
if t.typeURL == version.V2ListenerURL {
|
||||
// Set hostname to the first LDS resource_name, and reset it when the
|
||||
// last LDS watch is removed. The upper level Client isn't expected to
|
||||
// watchLDS more than once.
|
||||
|
@ -275,9 +294,9 @@ func (v2c *v2Client) processWatchInfo(t *watchAction) (target []string, typeURL,
|
|||
// We don't reset version or nonce when a new watch is started. The version
|
||||
// and nonce from previous response are carried by the request unless the
|
||||
// stream is recreated.
|
||||
version = v2c.versionMap[typeURL]
|
||||
ver = v2c.versionMap[typeURL]
|
||||
nonce = v2c.nonceMap[typeURL]
|
||||
return target, typeURL, version, nonce, send
|
||||
return target, typeURL, ver, nonce, send
|
||||
}
|
||||
|
||||
type ackAction struct {
|
||||
|
@ -293,7 +312,7 @@ type ackAction struct {
|
|||
// processAckInfo pulls the fields needed by the ack request from a ackAction.
|
||||
//
|
||||
// If no active watch is found for this ack, it returns false for send.
|
||||
func (v2c *v2Client) processAckInfo(t *ackAction, stream adsStream) (target []string, typeURL, version, nonce string, send bool) {
|
||||
func (v2c *client) processAckInfo(t *ackAction, stream adsStream) (target []string, typeURL, version, nonce string, send bool) {
|
||||
if t.stream != stream {
|
||||
// If ACK's stream isn't the current sending stream, this means the ACK
|
||||
// was pushed to queue before the old stream broke, and a new stream has
|
||||
|
@ -353,7 +372,7 @@ func (v2c *v2Client) processAckInfo(t *ackAction, stream adsStream) (target []st
|
|||
// Note that this goroutine doesn't do anything to the old stream when there's a
|
||||
// new one. In fact, there should be only one stream in progress, and new one
|
||||
// should only be created when the old one fails (recv returns an error).
|
||||
func (v2c *v2Client) send() {
|
||||
func (v2c *client) send() {
|
||||
var stream adsStream
|
||||
for {
|
||||
select {
|
||||
|
@ -398,7 +417,7 @@ func (v2c *v2Client) send() {
|
|||
|
||||
// recv receives xDS responses on the provided ADS stream and branches out to
|
||||
// message specific handlers.
|
||||
func (v2c *v2Client) recv(stream adsStream) bool {
|
||||
func (v2c *client) recv(stream adsStream) bool {
|
||||
success := false
|
||||
for {
|
||||
resp, err := stream.Recv()
|
||||
|
@ -409,15 +428,20 @@ func (v2c *v2Client) recv(stream adsStream) bool {
|
|||
}
|
||||
v2c.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl())
|
||||
v2c.logger.Debugf("ADS response received: %v", resp)
|
||||
|
||||
// Note that the xDS transport protocol is versioned independently of
|
||||
// the resource types, and it is supported to transfer older versions
|
||||
// of resource types using new versions of the transport protocol, or
|
||||
// vice-versa. Hence we need to handle v3 type_urls as well here.
|
||||
var respHandleErr error
|
||||
switch resp.GetTypeUrl() {
|
||||
case ldsURL:
|
||||
case version.V2ListenerURL, version.V3ListenerURL:
|
||||
respHandleErr = v2c.handleLDSResponse(resp)
|
||||
case rdsURL:
|
||||
case version.V2RouteConfigURL, version.V3RouteConfigURL:
|
||||
respHandleErr = v2c.handleRDSResponse(resp)
|
||||
case cdsURL:
|
||||
case version.V2ClusterURL, version.V3ClusterURL:
|
||||
respHandleErr = v2c.handleCDSResponse(resp)
|
||||
case edsURL:
|
||||
case version.V2EndpointsURL, version.V3EndpointsURL:
|
||||
respHandleErr = v2c.handleEDSResponse(resp)
|
||||
default:
|
||||
v2c.logger.Warningf("Resource type %v unknown in response from server", resp.GetTypeUrl())
|
||||
|
@ -446,25 +470,56 @@ func (v2c *v2Client) recv(stream adsStream) bool {
|
|||
}
|
||||
}
|
||||
|
||||
func (v2c *v2Client) addWatch(resourceType, resourceName string) {
|
||||
v2c.sendCh.Put(&watchAction{
|
||||
typeURL: resourceType,
|
||||
remove: false,
|
||||
resource: resourceName,
|
||||
})
|
||||
}
|
||||
|
||||
func (v2c *v2Client) removeWatch(resourceType, resourceName string) {
|
||||
v2c.sendCh.Put(&watchAction{
|
||||
typeURL: resourceType,
|
||||
remove: true,
|
||||
resource: resourceName,
|
||||
})
|
||||
}
|
||||
|
||||
func mapToSlice(m map[string]bool) (ret []string) {
|
||||
for i := range m {
|
||||
ret = append(ret, i)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// handleLDSResponse processes an LDS response received from the xDS server. On
|
||||
// receipt of a good response, it also invokes the registered watcher callback.
|
||||
func (v2c *client) handleLDSResponse(resp *v2xdspb.DiscoveryResponse) error {
|
||||
update, err := xdsclient.UnmarshalListener(resp.GetResources(), v2c.logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v2c.parent.NewListeners(update)
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleRDSResponse processes an RDS response received from the xDS server. On
|
||||
// receipt of a good response, it caches validated resources and also invokes
|
||||
// the registered watcher callback.
|
||||
func (v2c *client) handleRDSResponse(resp *v2xdspb.DiscoveryResponse) error {
|
||||
v2c.mu.Lock()
|
||||
hostname := v2c.hostname
|
||||
v2c.mu.Unlock()
|
||||
|
||||
update, err := xdsclient.UnmarshalRouteConfig(resp.GetResources(), hostname, v2c.logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v2c.parent.NewRouteConfigs(update)
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleCDSResponse processes an CDS response received from the xDS server. On
|
||||
// receipt of a good response, it also invokes the registered watcher callback.
|
||||
func (v2c *client) handleCDSResponse(resp *v2xdspb.DiscoveryResponse) error {
|
||||
update, err := xdsclient.UnmarshalCluster(resp.GetResources(), v2c.logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v2c.parent.NewClusters(update)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v2c *client) handleEDSResponse(resp *v2xdspb.DiscoveryResponse) error {
|
||||
update, err := xdsclient.UnmarshalEndpoints(resp.GetResources(), v2c.logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v2c.parent.NewEndpoints(update)
|
||||
return nil
|
||||
}
|
|
@ -15,7 +15,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package client
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -30,42 +30,46 @@ import (
|
|||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal/testutils/fakeserver"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
func startXDSV2Client(t *testing.T, cc *grpc.ClientConn) (v2c *v2Client, cbLDS, cbRDS, cbCDS, cbEDS *testutils.Channel, cleanup func()) {
|
||||
func startXDSV2Client(t *testing.T, cc *grpc.ClientConn) (v2c *client, cbLDS, cbRDS, cbCDS, cbEDS *testutils.Channel, cleanup func()) {
|
||||
cbLDS = testutils.NewChannel()
|
||||
cbRDS = testutils.NewChannel()
|
||||
cbCDS = testutils.NewChannel()
|
||||
cbEDS = testutils.NewChannel()
|
||||
v2c = newV2Client(&testUpdateReceiver{
|
||||
v2c, err := newV2Client(&testUpdateReceiver{
|
||||
f: func(typeURL string, d map[string]interface{}) {
|
||||
t.Logf("Received %s callback with {%+v}", typeURL, d)
|
||||
switch typeURL {
|
||||
case ldsURL:
|
||||
case version.V2ListenerURL:
|
||||
if _, ok := d[goodLDSTarget1]; ok {
|
||||
cbLDS.Send(struct{}{})
|
||||
}
|
||||
case rdsURL:
|
||||
case version.V2RouteConfigURL:
|
||||
if _, ok := d[goodRouteName1]; ok {
|
||||
cbRDS.Send(struct{}{})
|
||||
}
|
||||
case cdsURL:
|
||||
case version.V2ClusterURL:
|
||||
if _, ok := d[goodClusterName1]; ok {
|
||||
cbCDS.Send(struct{}{})
|
||||
}
|
||||
case edsURL:
|
||||
case version.V2EndpointsURL:
|
||||
if _, ok := d[goodEDSName]; ok {
|
||||
cbEDS.Send(struct{}{})
|
||||
}
|
||||
}
|
||||
},
|
||||
}, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil)
|
||||
t.Log("Started xds v2Client...")
|
||||
return v2c, cbLDS, cbRDS, cbCDS, cbEDS, v2c.close
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("Started xds client...")
|
||||
return v2c, cbLDS, cbRDS, cbCDS, cbEDS, v2c.Close
|
||||
}
|
||||
|
||||
// compareXDSRequest reads requests from channel, compare it with want.
|
||||
func compareXDSRequest(ch *testutils.Channel, want *xdspb.DiscoveryRequest, version, nonce string) error {
|
||||
func compareXDSRequest(ch *testutils.Channel, want *xdspb.DiscoveryRequest, ver, nonce string) error {
|
||||
val, err := ch.Receive()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -75,7 +79,7 @@ func compareXDSRequest(ch *testutils.Channel, want *xdspb.DiscoveryRequest, vers
|
|||
return fmt.Errorf("unexpected error from request: %v", req.Err)
|
||||
}
|
||||
wantClone := proto.Clone(want).(*xdspb.DiscoveryRequest)
|
||||
wantClone.VersionInfo = version
|
||||
wantClone.VersionInfo = ver
|
||||
wantClone.ResponseNonce = nonce
|
||||
if !cmp.Equal(req.Req, wantClone, cmp.Comparer(proto.Equal)) {
|
||||
return fmt.Errorf("received request different from want, diff: %s", cmp.Diff(req.Req, wantClone))
|
||||
|
@ -83,9 +87,9 @@ func compareXDSRequest(ch *testutils.Channel, want *xdspb.DiscoveryRequest, vers
|
|||
return nil
|
||||
}
|
||||
|
||||
func sendXDSRespWithVersion(ch chan<- *fakeserver.Response, respWithoutVersion *xdspb.DiscoveryResponse, version int) (nonce string) {
|
||||
func sendXDSRespWithVersion(ch chan<- *fakeserver.Response, respWithoutVersion *xdspb.DiscoveryResponse, ver int) (nonce string) {
|
||||
respToSend := proto.Clone(respWithoutVersion).(*xdspb.DiscoveryResponse)
|
||||
respToSend.VersionInfo = strconv.Itoa(version)
|
||||
respToSend.VersionInfo = strconv.Itoa(ver)
|
||||
nonce = strconv.Itoa(int(time.Now().UnixNano()))
|
||||
respToSend.Nonce = nonce
|
||||
ch <- &fakeserver.Response{Resp: respToSend}
|
||||
|
@ -94,25 +98,25 @@ func sendXDSRespWithVersion(ch chan<- *fakeserver.Response, respWithoutVersion *
|
|||
|
||||
// startXDS calls watch to send the first request. It then sends a good response
|
||||
// and checks for ack.
|
||||
func startXDS(t *testing.T, xdsname string, v2c *v2Client, reqChan *testutils.Channel, req *xdspb.DiscoveryRequest, preVersion string, preNonce string) {
|
||||
func startXDS(t *testing.T, xdsname string, v2c *client, reqChan *testutils.Channel, req *xdspb.DiscoveryRequest, preVersion string, preNonce string) {
|
||||
var (
|
||||
nameToWatch, typeURLToWatch string
|
||||
)
|
||||
switch xdsname {
|
||||
case "LDS":
|
||||
typeURLToWatch = ldsURL
|
||||
typeURLToWatch = version.V2ListenerURL
|
||||
nameToWatch = goodLDSTarget1
|
||||
case "RDS":
|
||||
typeURLToWatch = rdsURL
|
||||
typeURLToWatch = version.V2RouteConfigURL
|
||||
nameToWatch = goodRouteName1
|
||||
case "CDS":
|
||||
typeURLToWatch = cdsURL
|
||||
typeURLToWatch = version.V2ClusterURL
|
||||
nameToWatch = goodClusterName1
|
||||
case "EDS":
|
||||
typeURLToWatch = edsURL
|
||||
typeURLToWatch = version.V2EndpointsURL
|
||||
nameToWatch = goodEDSName
|
||||
}
|
||||
v2c.addWatch(typeURLToWatch, nameToWatch)
|
||||
v2c.AddWatch(typeURLToWatch, nameToWatch)
|
||||
|
||||
if err := compareXDSRequest(reqChan, req, preVersion, preNonce); err != nil {
|
||||
t.Fatalf("Failed to receive %s request: %v", xdsname, err)
|
||||
|
@ -125,11 +129,11 @@ func startXDS(t *testing.T, xdsname string, v2c *v2Client, reqChan *testutils.Ch
|
|||
//
|
||||
// It also waits and checks that the ack request contains the given version, and
|
||||
// the generated nonce.
|
||||
func sendGoodResp(t *testing.T, xdsname string, fakeServer *fakeserver.Server, version int, goodResp *xdspb.DiscoveryResponse, wantReq *xdspb.DiscoveryRequest, callbackCh *testutils.Channel) (string, error) {
|
||||
nonce := sendXDSRespWithVersion(fakeServer.XDSResponseChan, goodResp, version)
|
||||
func sendGoodResp(t *testing.T, xdsname string, fakeServer *fakeserver.Server, ver int, goodResp *xdspb.DiscoveryResponse, wantReq *xdspb.DiscoveryRequest, callbackCh *testutils.Channel) (string, error) {
|
||||
nonce := sendXDSRespWithVersion(fakeServer.XDSResponseChan, goodResp, ver)
|
||||
t.Logf("Good %s response pushed to fakeServer...", xdsname)
|
||||
|
||||
if err := compareXDSRequest(fakeServer.XDSRequestChan, wantReq, strconv.Itoa(version), nonce); err != nil {
|
||||
if err := compareXDSRequest(fakeServer.XDSRequestChan, wantReq, strconv.Itoa(ver), nonce); err != nil {
|
||||
return "", fmt.Errorf("failed to receive %s request: %v", xdsname, err)
|
||||
}
|
||||
t.Logf("Good %s response acked", xdsname)
|
||||
|
@ -145,24 +149,24 @@ func sendGoodResp(t *testing.T, xdsname string, fakeServer *fakeserver.Server, v
|
|||
// be nacked, so we expect a request with the previous version (version-1).
|
||||
//
|
||||
// But the nonce in request should be the new nonce.
|
||||
func sendBadResp(t *testing.T, xdsname string, fakeServer *fakeserver.Server, version int, wantReq *xdspb.DiscoveryRequest) error {
|
||||
func sendBadResp(t *testing.T, xdsname string, fakeServer *fakeserver.Server, ver int, wantReq *xdspb.DiscoveryRequest) error {
|
||||
var typeURL string
|
||||
switch xdsname {
|
||||
case "LDS":
|
||||
typeURL = ldsURL
|
||||
typeURL = version.V2ListenerURL
|
||||
case "RDS":
|
||||
typeURL = rdsURL
|
||||
typeURL = version.V2RouteConfigURL
|
||||
case "CDS":
|
||||
typeURL = cdsURL
|
||||
typeURL = version.V2ClusterURL
|
||||
case "EDS":
|
||||
typeURL = edsURL
|
||||
typeURL = version.V2EndpointsURL
|
||||
}
|
||||
nonce := sendXDSRespWithVersion(fakeServer.XDSResponseChan, &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{{}},
|
||||
TypeUrl: typeURL,
|
||||
}, version)
|
||||
}, ver)
|
||||
t.Logf("Bad %s response pushed to fakeServer...", xdsname)
|
||||
if err := compareXDSRequest(fakeServer.XDSRequestChan, wantReq, strconv.Itoa(version-1), nonce); err != nil {
|
||||
if err := compareXDSRequest(fakeServer.XDSRequestChan, wantReq, strconv.Itoa(ver-1), nonce); err != nil {
|
||||
return fmt.Errorf("failed to receive %s request: %v", xdsname, err)
|
||||
}
|
||||
t.Logf("Bad %s response nacked", xdsname)
|
||||
|
@ -262,7 +266,7 @@ func (s) TestV2ClientAckFirstIsNack(t *testing.T) {
|
|||
|
||||
nonce := sendXDSRespWithVersion(fakeServer.XDSResponseChan, &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{{}},
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
}, versionLDS)
|
||||
t.Logf("Bad response pushed to fakeServer...")
|
||||
|
||||
|
@ -303,7 +307,7 @@ func (s) TestV2ClientAckNackAfterNewWatch(t *testing.T) {
|
|||
// This is an invalid response after the new watch.
|
||||
nonce = sendXDSRespWithVersion(fakeServer.XDSResponseChan, &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{{}},
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
}, versionLDS)
|
||||
t.Logf("Bad response pushed to fakeServer...")
|
||||
|
||||
|
@ -332,7 +336,7 @@ func (s) TestV2ClientAckNewWatchAfterCancel(t *testing.T) {
|
|||
defer v2cCleanup()
|
||||
|
||||
// Start a CDS watch.
|
||||
v2c.addWatch(cdsURL, goodClusterName1)
|
||||
v2c.AddWatch(version.V2ClusterURL, goodClusterName1)
|
||||
if err := compareXDSRequest(fakeServer.XDSRequestChan, goodCDSRequest, "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -346,14 +350,14 @@ func (s) TestV2ClientAckNewWatchAfterCancel(t *testing.T) {
|
|||
}
|
||||
// Cancel the CDS watch, and start a new one. The new watch should have the
|
||||
// version from the response above.
|
||||
v2c.removeWatch(cdsURL, goodClusterName1)
|
||||
v2c.RemoveWatch(version.V2ClusterURL, goodClusterName1)
|
||||
// Wait for a request with no resource names, because the only watch was
|
||||
// removed.
|
||||
emptyReq := &xdspb.DiscoveryRequest{Node: goodNodeProto, TypeUrl: cdsURL}
|
||||
emptyReq := &xdspb.DiscoveryRequest{Node: goodNodeProto, TypeUrl: version.V2ClusterURL}
|
||||
if err := compareXDSRequest(fakeServer.XDSRequestChan, emptyReq, strconv.Itoa(versionCDS), nonce); err != nil {
|
||||
t.Fatalf("Failed to receive %s request: %v", "CDS", err)
|
||||
}
|
||||
v2c.addWatch(cdsURL, goodClusterName1)
|
||||
v2c.AddWatch(version.V2ClusterURL, goodClusterName1)
|
||||
// Wait for a request with correct resource names and version.
|
||||
if err := compareXDSRequest(fakeServer.XDSRequestChan, goodCDSRequest, strconv.Itoa(versionCDS), nonce); err != nil {
|
||||
t.Fatalf("Failed to receive %s request: %v", "CDS", err)
|
||||
|
@ -387,7 +391,7 @@ func (s) TestV2ClientAckCancelResponseRace(t *testing.T) {
|
|||
defer v2cCleanup()
|
||||
|
||||
// Start a CDS watch.
|
||||
v2c.addWatch(cdsURL, goodClusterName1)
|
||||
v2c.AddWatch(version.V2ClusterURL, goodClusterName1)
|
||||
if err := compareXDSRequest(fakeServer.XDSRequestChan, goodCDSRequest, "", ""); err != nil {
|
||||
t.Fatalf("Failed to receive %s request: %v", "CDS", err)
|
||||
}
|
||||
|
@ -400,10 +404,10 @@ func (s) TestV2ClientAckCancelResponseRace(t *testing.T) {
|
|||
}
|
||||
// Cancel the watch before the next response is sent. This mimics the case
|
||||
// watch is canceled while response is on wire.
|
||||
v2c.removeWatch(cdsURL, goodClusterName1)
|
||||
v2c.RemoveWatch(version.V2ClusterURL, goodClusterName1)
|
||||
// Wait for a request with no resource names, because the only watch was
|
||||
// removed.
|
||||
emptyReq := &xdspb.DiscoveryRequest{Node: goodNodeProto, TypeUrl: cdsURL}
|
||||
emptyReq := &xdspb.DiscoveryRequest{Node: goodNodeProto, TypeUrl: version.V2ClusterURL}
|
||||
if err := compareXDSRequest(fakeServer.XDSRequestChan, emptyReq, strconv.Itoa(versionCDS), nonce); err != nil {
|
||||
t.Fatalf("Failed to receive %s request: %v", "CDS", err)
|
||||
}
|
||||
|
@ -428,7 +432,7 @@ func (s) TestV2ClientAckCancelResponseRace(t *testing.T) {
|
|||
|
||||
// Start a new watch. The new watch should have the nonce from the response
|
||||
// above, and version from the first good response.
|
||||
v2c.addWatch(cdsURL, goodClusterName1)
|
||||
v2c.AddWatch(version.V2ClusterURL, goodClusterName1)
|
||||
if err := compareXDSRequest(fakeServer.XDSRequestChan, goodCDSRequest, strconv.Itoa(versionCDS-1), nonce); err != nil {
|
||||
t.Fatalf("Failed to receive %s request: %v", "CDS", err)
|
||||
}
|
|
@ -16,7 +16,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
package v2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
@ -26,7 +26,8 @@ import (
|
|||
corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
"github.com/golang/protobuf/proto"
|
||||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -34,115 +35,68 @@ const (
|
|||
serviceName2 = "bar-service"
|
||||
)
|
||||
|
||||
func (s) TestValidateCluster(t *testing.T) {
|
||||
emptyUpdate := ClusterUpdate{ServiceName: "", EnableLRS: false}
|
||||
tests := []struct {
|
||||
name string
|
||||
cluster *xdspb.Cluster
|
||||
wantUpdate ClusterUpdate
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "non-eds-cluster-type",
|
||||
cluster: &xdspb.Cluster{
|
||||
ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_STATIC},
|
||||
EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &corepb.ConfigSource_Ads{
|
||||
Ads: &corepb.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
var (
|
||||
badlyMarshaledCDSResponse = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: version.V2ClusterURL,
|
||||
Value: []byte{1, 2, 3, 4},
|
||||
},
|
||||
},
|
||||
TypeUrl: version.V2ClusterURL,
|
||||
}
|
||||
goodCluster1 = &xdspb.Cluster{
|
||||
Name: goodClusterName1,
|
||||
ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS},
|
||||
EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &corepb.ConfigSource_Ads{
|
||||
Ads: &corepb.AggregatedConfigSource{},
|
||||
},
|
||||
LbPolicy: xdspb.Cluster_LEAST_REQUEST,
|
||||
},
|
||||
wantUpdate: emptyUpdate,
|
||||
wantErr: true,
|
||||
ServiceName: serviceName1,
|
||||
},
|
||||
{
|
||||
name: "no-eds-config",
|
||||
cluster: &xdspb.Cluster{
|
||||
ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS},
|
||||
LbPolicy: xdspb.Cluster_ROUND_ROBIN,
|
||||
LbPolicy: xdspb.Cluster_ROUND_ROBIN,
|
||||
LrsServer: &corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &corepb.ConfigSource_Self{
|
||||
Self: &corepb.SelfConfigSource{},
|
||||
},
|
||||
wantUpdate: emptyUpdate,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no-ads-config-source",
|
||||
cluster: &xdspb.Cluster{
|
||||
ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS},
|
||||
EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{},
|
||||
LbPolicy: xdspb.Cluster_ROUND_ROBIN,
|
||||
},
|
||||
wantUpdate: emptyUpdate,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non-round-robin-lb-policy",
|
||||
cluster: &xdspb.Cluster{
|
||||
ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS},
|
||||
EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &corepb.ConfigSource_Ads{
|
||||
Ads: &corepb.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
LbPolicy: xdspb.Cluster_LEAST_REQUEST,
|
||||
},
|
||||
wantUpdate: emptyUpdate,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "happy-case-no-service-name-no-lrs",
|
||||
cluster: &xdspb.Cluster{
|
||||
ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS},
|
||||
EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &corepb.ConfigSource_Ads{
|
||||
Ads: &corepb.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
LbPolicy: xdspb.Cluster_ROUND_ROBIN,
|
||||
},
|
||||
wantUpdate: emptyUpdate,
|
||||
},
|
||||
{
|
||||
name: "happy-case-no-lrs",
|
||||
cluster: &xdspb.Cluster{
|
||||
ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS},
|
||||
EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &corepb.ConfigSource_Ads{
|
||||
Ads: &corepb.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
ServiceName: serviceName1,
|
||||
},
|
||||
LbPolicy: xdspb.Cluster_ROUND_ROBIN,
|
||||
},
|
||||
wantUpdate: ClusterUpdate{ServiceName: serviceName1, EnableLRS: false},
|
||||
},
|
||||
{
|
||||
name: "happiest-case",
|
||||
cluster: goodCluster1,
|
||||
wantUpdate: ClusterUpdate{ServiceName: serviceName1, EnableLRS: true},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
gotUpdate, gotErr := validateCluster(test.cluster)
|
||||
if (gotErr != nil) != test.wantErr {
|
||||
t.Errorf("validateCluster(%+v) returned error: %v, wantErr: %v", test.cluster, gotErr, test.wantErr)
|
||||
}
|
||||
if !cmp.Equal(gotUpdate, test.wantUpdate) {
|
||||
t.Errorf("validateCluster(%+v) = %v, want: %v", test.cluster, gotUpdate, test.wantUpdate)
|
||||
}
|
||||
})
|
||||
marshaledCluster1, _ = proto.Marshal(goodCluster1)
|
||||
goodCluster2 = &xdspb.Cluster{
|
||||
Name: goodClusterName2,
|
||||
ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS},
|
||||
EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &corepb.ConfigSource_Ads{
|
||||
Ads: &corepb.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
ServiceName: serviceName2,
|
||||
},
|
||||
LbPolicy: xdspb.Cluster_ROUND_ROBIN,
|
||||
}
|
||||
}
|
||||
marshaledCluster2, _ = proto.Marshal(goodCluster2)
|
||||
goodCDSResponse1 = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: version.V2ClusterURL,
|
||||
Value: marshaledCluster1,
|
||||
},
|
||||
},
|
||||
TypeUrl: version.V2ClusterURL,
|
||||
}
|
||||
goodCDSResponse2 = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: version.V2ClusterURL,
|
||||
Value: marshaledCluster2,
|
||||
},
|
||||
},
|
||||
TypeUrl: version.V2ClusterURL,
|
||||
}
|
||||
)
|
||||
|
||||
// TestCDSHandleResponse starts a fake xDS server, makes a ClientConn to it,
|
||||
// and creates a v2Client using it. Then, it registers a CDS watcher and tests
|
||||
|
@ -152,7 +106,7 @@ func (s) TestCDSHandleResponse(t *testing.T) {
|
|||
name string
|
||||
cdsResponse *xdspb.DiscoveryResponse
|
||||
wantErr bool
|
||||
wantUpdate *ClusterUpdate
|
||||
wantUpdate *xdsclient.ClusterUpdate
|
||||
wantUpdateErr bool
|
||||
}{
|
||||
// Badly marshaled CDS response.
|
||||
|
@ -192,14 +146,14 @@ func (s) TestCDSHandleResponse(t *testing.T) {
|
|||
name: "one-good-cluster",
|
||||
cdsResponse: goodCDSResponse1,
|
||||
wantErr: false,
|
||||
wantUpdate: &ClusterUpdate{ServiceName: serviceName1, EnableLRS: true},
|
||||
wantUpdate: &xdsclient.ClusterUpdate{ServiceName: serviceName1, EnableLRS: true},
|
||||
wantUpdateErr: false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testWatchHandle(t, &watchHandleTestcase{
|
||||
typeURL: cdsURL,
|
||||
typeURL: version.V2ClusterURL,
|
||||
resourceName: goodClusterName1,
|
||||
|
||||
responseToHandle: test.cdsResponse,
|
||||
|
@ -217,10 +171,13 @@ func (s) TestCDSHandleResponseWithoutWatch(t *testing.T) {
|
|||
_, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
v2c := newV2Client(&testUpdateReceiver{
|
||||
v2c, err := newV2Client(&testUpdateReceiver{
|
||||
f: func(string, map[string]interface{}) {},
|
||||
}, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil)
|
||||
defer v2c.close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer v2c.Close()
|
||||
|
||||
if v2c.handleCDSResponse(badResourceTypeInLDSResponse) == nil {
|
||||
t.Fatal("v2c.handleCDSResponse() succeeded, should have failed")
|
||||
|
@ -230,66 +187,3 @@ func (s) TestCDSHandleResponseWithoutWatch(t *testing.T) {
|
|||
t.Fatal("v2c.handleCDSResponse() succeeded, should have failed")
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
badlyMarshaledCDSResponse = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: cdsURL,
|
||||
Value: []byte{1, 2, 3, 4},
|
||||
},
|
||||
},
|
||||
TypeUrl: cdsURL,
|
||||
}
|
||||
goodCluster1 = &xdspb.Cluster{
|
||||
Name: goodClusterName1,
|
||||
ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS},
|
||||
EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &corepb.ConfigSource_Ads{
|
||||
Ads: &corepb.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
ServiceName: serviceName1,
|
||||
},
|
||||
LbPolicy: xdspb.Cluster_ROUND_ROBIN,
|
||||
LrsServer: &corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &corepb.ConfigSource_Self{
|
||||
Self: &corepb.SelfConfigSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
marshaledCluster1, _ = proto.Marshal(goodCluster1)
|
||||
goodCluster2 = &xdspb.Cluster{
|
||||
Name: goodClusterName2,
|
||||
ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS},
|
||||
EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &corepb.ConfigSource_Ads{
|
||||
Ads: &corepb.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
ServiceName: serviceName2,
|
||||
},
|
||||
LbPolicy: xdspb.Cluster_ROUND_ROBIN,
|
||||
}
|
||||
marshaledCluster2, _ = proto.Marshal(goodCluster2)
|
||||
goodCDSResponse1 = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: cdsURL,
|
||||
Value: marshaledCluster1,
|
||||
},
|
||||
},
|
||||
TypeUrl: cdsURL,
|
||||
}
|
||||
goodCDSResponse2 = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: cdsURL,
|
||||
Value: marshaledCluster2,
|
||||
},
|
||||
},
|
||||
TypeUrl: cdsURL,
|
||||
}
|
||||
)
|
|
@ -13,157 +13,75 @@
|
|||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
package v2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"google.golang.org/grpc/xds/internal"
|
||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
||||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
func (s) TestEDSParseRespProto(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
m *xdspb.ClusterLoadAssignment
|
||||
want EndpointsUpdate
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "missing-priority",
|
||||
m: func() *xdspb.ClusterLoadAssignment {
|
||||
clab0 := NewClusterLoadAssignmentBuilder("test", nil)
|
||||
clab0.AddLocality("locality-1", 1, 0, []string{"addr1:314"}, nil)
|
||||
clab0.AddLocality("locality-2", 1, 2, []string{"addr2:159"}, nil)
|
||||
return clab0.Build()
|
||||
}(),
|
||||
want: EndpointsUpdate{},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing-locality-ID",
|
||||
m: func() *xdspb.ClusterLoadAssignment {
|
||||
clab0 := NewClusterLoadAssignmentBuilder("test", nil)
|
||||
clab0.AddLocality("", 1, 0, []string{"addr1:314"}, nil)
|
||||
return clab0.Build()
|
||||
}(),
|
||||
want: EndpointsUpdate{},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "good",
|
||||
m: func() *xdspb.ClusterLoadAssignment {
|
||||
clab0 := NewClusterLoadAssignmentBuilder("test", nil)
|
||||
clab0.AddLocality("locality-1", 1, 1, []string{"addr1:314"}, &AddLocalityOptions{
|
||||
Health: []corepb.HealthStatus{corepb.HealthStatus_UNHEALTHY},
|
||||
Weight: []uint32{271},
|
||||
})
|
||||
clab0.AddLocality("locality-2", 1, 0, []string{"addr2:159"}, &AddLocalityOptions{
|
||||
Health: []corepb.HealthStatus{corepb.HealthStatus_DRAINING},
|
||||
Weight: []uint32{828},
|
||||
})
|
||||
return clab0.Build()
|
||||
}(),
|
||||
want: EndpointsUpdate{
|
||||
Drops: nil,
|
||||
Localities: []Locality{
|
||||
{
|
||||
Endpoints: []Endpoint{{
|
||||
Address: "addr1:314",
|
||||
HealthStatus: EndpointHealthStatusUnhealthy,
|
||||
Weight: 271,
|
||||
}},
|
||||
ID: internal.LocalityID{SubZone: "locality-1"},
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Endpoints: []Endpoint{{
|
||||
Address: "addr2:159",
|
||||
HealthStatus: EndpointHealthStatusDraining,
|
||||
Weight: 828,
|
||||
}},
|
||||
ID: internal.LocalityID{SubZone: "locality-2"},
|
||||
Priority: 0,
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := ParseEDSRespProto(tt.m)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ParseEDSRespProto() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if d := cmp.Diff(got, tt.want); d != "" {
|
||||
t.Errorf("ParseEDSRespProto() got = %v, want %v, diff: %v", got, tt.want, d)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
badlyMarshaledEDSResponse = &xdspb.DiscoveryResponse{
|
||||
badlyMarshaledEDSResponse = &v2xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: edsURL,
|
||||
TypeUrl: version.V2EndpointsURL,
|
||||
Value: []byte{1, 2, 3, 4},
|
||||
},
|
||||
},
|
||||
TypeUrl: edsURL,
|
||||
TypeUrl: version.V2EndpointsURL,
|
||||
}
|
||||
badResourceTypeInEDSResponse = &xdspb.DiscoveryResponse{
|
||||
badResourceTypeInEDSResponse = &v2xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: httpConnManagerURL,
|
||||
Value: marshaledConnMgr1,
|
||||
},
|
||||
},
|
||||
TypeUrl: edsURL,
|
||||
TypeUrl: version.V2EndpointsURL,
|
||||
}
|
||||
goodEDSResponse1 = &xdspb.DiscoveryResponse{
|
||||
goodEDSResponse1 = &v2xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
func() *anypb.Any {
|
||||
clab0 := NewClusterLoadAssignmentBuilder(goodEDSName, nil)
|
||||
clab0 := testutils.NewClusterLoadAssignmentBuilder(goodEDSName, nil)
|
||||
clab0.AddLocality("locality-1", 1, 1, []string{"addr1:314"}, nil)
|
||||
clab0.AddLocality("locality-2", 1, 0, []string{"addr2:159"}, nil)
|
||||
a, _ := ptypes.MarshalAny(clab0.Build())
|
||||
return a
|
||||
}(),
|
||||
},
|
||||
TypeUrl: edsURL,
|
||||
TypeUrl: version.V2EndpointsURL,
|
||||
}
|
||||
goodEDSResponse2 = &xdspb.DiscoveryResponse{
|
||||
goodEDSResponse2 = &v2xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
func() *anypb.Any {
|
||||
clab0 := NewClusterLoadAssignmentBuilder("not-goodEDSName", nil)
|
||||
clab0 := testutils.NewClusterLoadAssignmentBuilder("not-goodEDSName", nil)
|
||||
clab0.AddLocality("locality-1", 1, 1, []string{"addr1:314"}, nil)
|
||||
clab0.AddLocality("locality-2", 1, 0, []string{"addr2:159"}, nil)
|
||||
a, _ := ptypes.MarshalAny(clab0.Build())
|
||||
return a
|
||||
}(),
|
||||
},
|
||||
TypeUrl: edsURL,
|
||||
TypeUrl: version.V2EndpointsURL,
|
||||
}
|
||||
)
|
||||
|
||||
func (s) TestEDSHandleResponse(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
edsResponse *xdspb.DiscoveryResponse
|
||||
edsResponse *v2xdspb.DiscoveryResponse
|
||||
wantErr bool
|
||||
wantUpdate *EndpointsUpdate
|
||||
wantUpdate *xdsclient.EndpointsUpdate
|
||||
wantUpdateErr bool
|
||||
}{
|
||||
// Any in resource is badly marshaled.
|
||||
|
@ -195,16 +113,16 @@ func (s) TestEDSHandleResponse(t *testing.T) {
|
|||
name: "one-good-assignment",
|
||||
edsResponse: goodEDSResponse1,
|
||||
wantErr: false,
|
||||
wantUpdate: &EndpointsUpdate{
|
||||
Localities: []Locality{
|
||||
wantUpdate: &xdsclient.EndpointsUpdate{
|
||||
Localities: []xdsclient.Locality{
|
||||
{
|
||||
Endpoints: []Endpoint{{Address: "addr1:314"}},
|
||||
Endpoints: []xdsclient.Endpoint{{Address: "addr1:314"}},
|
||||
ID: internal.LocalityID{SubZone: "locality-1"},
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Endpoints: []Endpoint{{Address: "addr2:159"}},
|
||||
Endpoints: []xdsclient.Endpoint{{Address: "addr2:159"}},
|
||||
ID: internal.LocalityID{SubZone: "locality-2"},
|
||||
Priority: 0,
|
||||
Weight: 1,
|
||||
|
@ -217,7 +135,7 @@ func (s) TestEDSHandleResponse(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testWatchHandle(t, &watchHandleTestcase{
|
||||
typeURL: edsURL,
|
||||
typeURL: version.V2EndpointsURL,
|
||||
resourceName: goodEDSName,
|
||||
responseToHandle: test.edsResponse,
|
||||
wantHandleErr: test.wantErr,
|
||||
|
@ -234,10 +152,13 @@ func (s) TestEDSHandleResponseWithoutWatch(t *testing.T) {
|
|||
_, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
v2c := newV2Client(&testUpdateReceiver{
|
||||
v2c, err := newV2Client(&testUpdateReceiver{
|
||||
f: func(string, map[string]interface{}) {},
|
||||
}, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil)
|
||||
defer v2c.close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer v2c.Close()
|
||||
|
||||
if v2c.handleEDSResponse(badResourceTypeInEDSResponse) == nil {
|
||||
t.Fatal("v2c.handleEDSResponse() succeeded, should have failed")
|
|
@ -16,114 +16,26 @@
|
|||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
package v2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
basepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
httppb "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2"
|
||||
listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v2"
|
||||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
func (s) TestLDSGetRouteConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
lis *xdspb.Listener
|
||||
wantRoute string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no-apiListener-field",
|
||||
lis: &xdspb.Listener{},
|
||||
wantRoute: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "badly-marshaled-apiListener",
|
||||
lis: badAPIListener1,
|
||||
wantRoute: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "wrong-type-in-apiListener",
|
||||
lis: badResourceListener,
|
||||
wantRoute: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "empty-httpConnMgr-in-apiListener",
|
||||
lis: listenerWithEmptyHTTPConnMgr,
|
||||
wantRoute: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "scopedRoutes-routeConfig-in-apiListener",
|
||||
lis: listenerWithScopedRoutesRouteConfig,
|
||||
wantRoute: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "rds.ConfigSource-in-apiListener-is-not-ADS",
|
||||
lis: &xdspb.Listener{
|
||||
Name: goodLDSTarget1,
|
||||
ApiListener: &listenerpb.ApiListener{
|
||||
ApiListener: &anypb.Any{
|
||||
TypeUrl: httpConnManagerURL,
|
||||
Value: func() []byte {
|
||||
cm := &httppb.HttpConnectionManager{
|
||||
RouteSpecifier: &httppb.HttpConnectionManager_Rds{
|
||||
Rds: &httppb.Rds{
|
||||
ConfigSource: &basepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &basepb.ConfigSource_Path{
|
||||
Path: "/some/path",
|
||||
},
|
||||
},
|
||||
RouteConfigName: goodRouteName1}}}
|
||||
mcm, _ := proto.Marshal(cm)
|
||||
return mcm
|
||||
}()}}},
|
||||
wantRoute: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "goodListener1",
|
||||
lis: goodListener1,
|
||||
wantRoute: goodRouteName1,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
_, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
v2c := newV2Client(nil, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil)
|
||||
defer v2c.close()
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
gotRoute, err := v2c.getRouteConfigNameFromListener(test.lis)
|
||||
if gotRoute != test.wantRoute {
|
||||
t.Errorf("getRouteConfigNameFromListener(%+v) = %v, want %v", test.lis, gotRoute, test.wantRoute)
|
||||
}
|
||||
if (err != nil) != test.wantErr {
|
||||
t.Errorf("getRouteConfigNameFromListener(%+v) = %v, want %v", test.lis, err, test.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestLDSHandleResponse starts a fake xDS server, makes a ClientConn to it,
|
||||
// and creates a v2Client using it. Then, it registers a watchLDS and tests
|
||||
// and creates a client using it. Then, it registers a watchLDS and tests
|
||||
// different LDS responses.
|
||||
func (s) TestLDSHandleResponse(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ldsResponse *xdspb.DiscoveryResponse
|
||||
ldsResponse *v2xdspb.DiscoveryResponse
|
||||
wantErr bool
|
||||
wantUpdate *ldsUpdate
|
||||
wantUpdate *xdsclient.ListenerUpdate
|
||||
wantUpdateErr bool
|
||||
}{
|
||||
// Badly marshaled LDS response.
|
||||
|
@ -157,7 +69,7 @@ func (s) TestLDSHandleResponse(t *testing.T) {
|
|||
name: "one-good-listener",
|
||||
ldsResponse: goodLDSResponse1,
|
||||
wantErr: false,
|
||||
wantUpdate: &ldsUpdate{routeName: goodRouteName1},
|
||||
wantUpdate: &xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1},
|
||||
wantUpdateErr: false,
|
||||
},
|
||||
// Response contains multiple good listeners, including the one we are
|
||||
|
@ -166,7 +78,7 @@ func (s) TestLDSHandleResponse(t *testing.T) {
|
|||
name: "multiple-good-listener",
|
||||
ldsResponse: ldsResponseWithMultipleResources,
|
||||
wantErr: false,
|
||||
wantUpdate: &ldsUpdate{routeName: goodRouteName1},
|
||||
wantUpdate: &xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1},
|
||||
wantUpdateErr: false,
|
||||
},
|
||||
// Response contains two good listeners (one interesting and one
|
||||
|
@ -201,7 +113,7 @@ func (s) TestLDSHandleResponse(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testWatchHandle(t, &watchHandleTestcase{
|
||||
typeURL: ldsURL,
|
||||
typeURL: version.V2ListenerURL,
|
||||
resourceName: goodLDSTarget1,
|
||||
responseToHandle: test.ldsResponse,
|
||||
wantHandleErr: test.wantErr,
|
||||
|
@ -212,16 +124,19 @@ func (s) TestLDSHandleResponse(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestLDSHandleResponseWithoutWatch tests the case where the v2Client receives
|
||||
// TestLDSHandleResponseWithoutWatch tests the case where the client receives
|
||||
// an LDS response without a registered watcher.
|
||||
func (s) TestLDSHandleResponseWithoutWatch(t *testing.T) {
|
||||
_, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
v2c := newV2Client(&testUpdateReceiver{
|
||||
v2c, err := newV2Client(&testUpdateReceiver{
|
||||
f: func(string, map[string]interface{}) {},
|
||||
}, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil)
|
||||
defer v2c.close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer v2c.Close()
|
||||
|
||||
if v2c.handleLDSResponse(badResourceTypeInLDSResponse) == nil {
|
||||
t.Fatal("v2c.handleLDSResponse() succeeded, should have failed")
|
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package v2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
||||
"google.golang.org/grpc/xds/internal/testutils/fakeserver"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
)
|
||||
|
||||
// doLDS makes a LDS watch, and waits for the response and ack to finish.
|
||||
//
|
||||
// This is called by RDS tests to start LDS first, because LDS is a
|
||||
// pre-requirement for RDS, and RDS handle would fail without an existing LDS
|
||||
// watch.
|
||||
func doLDS(t *testing.T, v2c xdsclient.APIClient, fakeServer *fakeserver.Server) {
|
||||
v2c.AddWatch(version.V2ListenerURL, goodLDSTarget1)
|
||||
if _, err := fakeServer.XDSRequestChan.Receive(); err != nil {
|
||||
t.Fatalf("Timeout waiting for LDS request: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRDSHandleResponseWithRouting starts a fake xDS server, makes a ClientConn
|
||||
// to it, and creates a v2Client using it. Then, it registers an LDS and RDS
|
||||
// watcher and tests different RDS responses.
|
||||
func (s) TestRDSHandleResponseWithRouting(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
rdsResponse *xdspb.DiscoveryResponse
|
||||
wantErr bool
|
||||
wantUpdate *xdsclient.RouteConfigUpdate
|
||||
wantUpdateErr bool
|
||||
}{
|
||||
// Badly marshaled RDS response.
|
||||
{
|
||||
name: "badly-marshaled-response",
|
||||
rdsResponse: badlyMarshaledRDSResponse,
|
||||
wantErr: true,
|
||||
wantUpdate: nil,
|
||||
wantUpdateErr: false,
|
||||
},
|
||||
// Response does not contain RouteConfiguration proto.
|
||||
{
|
||||
name: "no-route-config-in-response",
|
||||
rdsResponse: badResourceTypeInRDSResponse,
|
||||
wantErr: true,
|
||||
wantUpdate: nil,
|
||||
wantUpdateErr: false,
|
||||
},
|
||||
// No VirtualHosts in the response. Just one test case here for a bad
|
||||
// RouteConfiguration, since the others are covered in
|
||||
// TestGetClusterFromRouteConfiguration.
|
||||
{
|
||||
name: "no-virtual-hosts-in-response",
|
||||
rdsResponse: noVirtualHostsInRDSResponse,
|
||||
wantErr: true,
|
||||
wantUpdate: nil,
|
||||
wantUpdateErr: false,
|
||||
},
|
||||
// Response contains one good RouteConfiguration, uninteresting though.
|
||||
{
|
||||
name: "one-uninteresting-route-config",
|
||||
rdsResponse: goodRDSResponse2,
|
||||
wantErr: false,
|
||||
wantUpdate: nil,
|
||||
wantUpdateErr: false,
|
||||
},
|
||||
// Response contains one good interesting RouteConfiguration.
|
||||
{
|
||||
name: "one-good-route-config",
|
||||
rdsResponse: goodRDSResponse1,
|
||||
wantErr: false,
|
||||
wantUpdate: &xdsclient.RouteConfigUpdate{Routes: []*xdsclient.Route{{Prefix: newStringP(""), Action: map[string]uint32{goodClusterName1: 1}}}},
|
||||
wantUpdateErr: false,
|
||||
},
|
||||
{
|
||||
name: "one-good-route-config with routes",
|
||||
rdsResponse: goodRDSResponse1,
|
||||
wantErr: false,
|
||||
wantUpdate: &xdsclient.RouteConfigUpdate{
|
||||
// Instead of just weighted targets when routing is disabled,
|
||||
// this result contains a route with perfix "", and action as
|
||||
// weighted targets.
|
||||
Routes: []*xdsclient.Route{{
|
||||
Prefix: newStringP(""),
|
||||
Action: map[string]uint32{goodClusterName1: 1},
|
||||
}},
|
||||
},
|
||||
wantUpdateErr: false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testWatchHandle(t, &watchHandleTestcase{
|
||||
typeURL: version.V2RouteConfigURL,
|
||||
resourceName: goodRouteName1,
|
||||
responseToHandle: test.rdsResponse,
|
||||
wantHandleErr: test.wantErr,
|
||||
wantUpdate: test.wantUpdate,
|
||||
wantUpdateErr: test.wantUpdateErr,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestRDSHandleResponseWithoutLDSWatch tests the case where the v2Client
|
||||
// receives an RDS response without a registered LDS watcher.
|
||||
func (s) TestRDSHandleResponseWithoutLDSWatch(t *testing.T) {
|
||||
_, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
v2c, err := newV2Client(&testUpdateReceiver{
|
||||
f: func(string, map[string]interface{}) {},
|
||||
}, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer v2c.Close()
|
||||
|
||||
if v2c.handleRDSResponse(goodRDSResponse1) == nil {
|
||||
t.Fatal("v2c.handleRDSResponse() succeeded, should have failed")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRDSHandleResponseWithoutRDSWatch tests the case where the v2Client
|
||||
// receives an RDS response without a registered RDS watcher.
|
||||
func (s) TestRDSHandleResponseWithoutRDSWatch(t *testing.T) {
|
||||
fakeServer, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
v2c, err := newV2Client(&testUpdateReceiver{
|
||||
f: func(string, map[string]interface{}) {},
|
||||
}, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer v2c.Close()
|
||||
doLDS(t, v2c, fakeServer)
|
||||
|
||||
if v2c.handleRDSResponse(badResourceTypeInRDSResponse) == nil {
|
||||
t.Fatal("v2c.handleRDSResponse() succeeded, should have failed")
|
||||
}
|
||||
|
||||
if v2c.handleRDSResponse(goodRDSResponse1) != nil {
|
||||
t.Fatal("v2c.handleRDSResponse() succeeded, should have failed")
|
||||
}
|
||||
}
|
|
@ -16,19 +16,25 @@
|
|||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
package v2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/resolver/manual"
|
||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
||||
"google.golang.org/grpc/xds/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal/testutils/fakeserver"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
|
||||
xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
basepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
|
@ -39,8 +45,15 @@ import (
|
|||
structpb "github.com/golang/protobuf/ptypes/struct"
|
||||
)
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
grpctest.RunSubTests(t, s{})
|
||||
}
|
||||
|
||||
const (
|
||||
defaultTestTimeout = 1 * time.Second
|
||||
goodLDSTarget1 = "lds.target.good:1111"
|
||||
goodLDSTarget2 = "lds.target.good:2222"
|
||||
goodRouteName1 = "GoodRouteConfig1"
|
||||
|
@ -67,22 +80,22 @@ var (
|
|||
}
|
||||
goodLDSRequest = &xdspb.DiscoveryRequest{
|
||||
Node: goodNodeProto,
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
ResourceNames: []string{goodLDSTarget1},
|
||||
}
|
||||
goodRDSRequest = &xdspb.DiscoveryRequest{
|
||||
Node: goodNodeProto,
|
||||
TypeUrl: rdsURL,
|
||||
TypeUrl: version.V2RouteConfigURL,
|
||||
ResourceNames: []string{goodRouteName1},
|
||||
}
|
||||
goodCDSRequest = &xdspb.DiscoveryRequest{
|
||||
Node: goodNodeProto,
|
||||
TypeUrl: cdsURL,
|
||||
TypeUrl: version.V2ClusterURL,
|
||||
ResourceNames: []string{goodClusterName1},
|
||||
}
|
||||
goodEDSRequest = &xdspb.DiscoveryRequest{
|
||||
Node: goodNodeProto,
|
||||
TypeUrl: edsURL,
|
||||
TypeUrl: version.V2EndpointsURL,
|
||||
ResourceNames: []string{goodEDSName},
|
||||
}
|
||||
goodHTTPConnManager1 = &httppb.HttpConnectionManager{
|
||||
|
@ -96,17 +109,7 @@ var (
|
|||
},
|
||||
}
|
||||
marshaledConnMgr1, _ = proto.Marshal(goodHTTPConnManager1)
|
||||
emptyHTTPConnManager = &httppb.HttpConnectionManager{
|
||||
RouteSpecifier: &httppb.HttpConnectionManager_Rds{
|
||||
Rds: &httppb.Rds{},
|
||||
},
|
||||
}
|
||||
emptyMarshaledConnMgr, _ = proto.Marshal(emptyHTTPConnManager)
|
||||
connMgrWithScopedRoutes = &httppb.HttpConnectionManager{
|
||||
RouteSpecifier: &httppb.HttpConnectionManager_ScopedRoutes{},
|
||||
}
|
||||
marshaledConnMgrWithScopedRoutes, _ = proto.Marshal(connMgrWithScopedRoutes)
|
||||
goodListener1 = &xdspb.Listener{
|
||||
goodListener1 = &xdspb.Listener{
|
||||
Name: goodLDSTarget1,
|
||||
ApiListener: &listenerpb.ApiListener{
|
||||
ApiListener: &anypb.Any{
|
||||
|
@ -128,16 +131,7 @@ var (
|
|||
marshaledListener2, _ = proto.Marshal(goodListener2)
|
||||
noAPIListener = &xdspb.Listener{Name: goodLDSTarget1}
|
||||
marshaledNoAPIListener, _ = proto.Marshal(noAPIListener)
|
||||
badAPIListener1 = &xdspb.Listener{
|
||||
Name: goodLDSTarget1,
|
||||
ApiListener: &listenerpb.ApiListener{
|
||||
ApiListener: &anypb.Any{
|
||||
TypeUrl: httpConnManagerURL,
|
||||
Value: []byte{1, 2, 3, 4},
|
||||
},
|
||||
},
|
||||
}
|
||||
badAPIListener2 = &xdspb.Listener{
|
||||
badAPIListener2 = &xdspb.Listener{
|
||||
Name: goodLDSTarget2,
|
||||
ApiListener: &listenerpb.ApiListener{
|
||||
ApiListener: &anypb.Any{
|
||||
|
@ -147,60 +141,33 @@ var (
|
|||
},
|
||||
}
|
||||
badlyMarshaledAPIListener2, _ = proto.Marshal(badAPIListener2)
|
||||
badResourceListener = &xdspb.Listener{
|
||||
Name: goodLDSTarget1,
|
||||
ApiListener: &listenerpb.ApiListener{
|
||||
ApiListener: &anypb.Any{
|
||||
TypeUrl: ldsURL,
|
||||
Value: marshaledListener1,
|
||||
},
|
||||
},
|
||||
}
|
||||
listenerWithEmptyHTTPConnMgr = &xdspb.Listener{
|
||||
Name: goodLDSTarget1,
|
||||
ApiListener: &listenerpb.ApiListener{
|
||||
ApiListener: &anypb.Any{
|
||||
TypeUrl: httpConnManagerURL,
|
||||
Value: emptyMarshaledConnMgr,
|
||||
},
|
||||
},
|
||||
}
|
||||
listenerWithScopedRoutesRouteConfig = &xdspb.Listener{
|
||||
Name: goodLDSTarget1,
|
||||
ApiListener: &listenerpb.ApiListener{
|
||||
ApiListener: &anypb.Any{
|
||||
TypeUrl: httpConnManagerURL,
|
||||
Value: marshaledConnMgrWithScopedRoutes,
|
||||
},
|
||||
},
|
||||
}
|
||||
goodLDSResponse1 = &xdspb.DiscoveryResponse{
|
||||
goodLDSResponse1 = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
Value: marshaledListener1,
|
||||
},
|
||||
},
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
}
|
||||
goodLDSResponse2 = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
Value: marshaledListener2,
|
||||
},
|
||||
},
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
}
|
||||
emptyLDSResponse = &xdspb.DiscoveryResponse{TypeUrl: ldsURL}
|
||||
emptyLDSResponse = &xdspb.DiscoveryResponse{TypeUrl: version.V2ListenerURL}
|
||||
badlyMarshaledLDSResponse = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
Value: []byte{1, 2, 3, 4},
|
||||
},
|
||||
},
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
}
|
||||
badResourceTypeInLDSResponse = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
|
@ -209,55 +176,55 @@ var (
|
|||
Value: marshaledConnMgr1,
|
||||
},
|
||||
},
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
}
|
||||
ldsResponseWithMultipleResources = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
Value: marshaledListener2,
|
||||
},
|
||||
{
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
Value: marshaledListener1,
|
||||
},
|
||||
},
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
}
|
||||
noAPIListenerLDSResponse = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
Value: marshaledNoAPIListener,
|
||||
},
|
||||
},
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
}
|
||||
goodBadUglyLDSResponse = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
Value: marshaledListener2,
|
||||
},
|
||||
{
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
Value: marshaledListener1,
|
||||
},
|
||||
{
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
Value: badlyMarshaledAPIListener2,
|
||||
},
|
||||
},
|
||||
TypeUrl: ldsURL,
|
||||
TypeUrl: version.V2ListenerURL,
|
||||
}
|
||||
badlyMarshaledRDSResponse = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: rdsURL,
|
||||
TypeUrl: version.V2RouteConfigURL,
|
||||
Value: []byte{1, 2, 3, 4},
|
||||
},
|
||||
},
|
||||
TypeUrl: rdsURL,
|
||||
TypeUrl: version.V2RouteConfigURL,
|
||||
}
|
||||
badResourceTypeInRDSResponse = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
|
@ -266,21 +233,18 @@ var (
|
|||
Value: marshaledConnMgr1,
|
||||
},
|
||||
},
|
||||
TypeUrl: rdsURL,
|
||||
TypeUrl: version.V2RouteConfigURL,
|
||||
}
|
||||
emptyRouteConfig = &xdspb.RouteConfiguration{}
|
||||
marshaledEmptyRouteConfig, _ = proto.Marshal(emptyRouteConfig)
|
||||
noDomainsInRouteConfig = &xdspb.RouteConfiguration{
|
||||
VirtualHosts: []*routepb.VirtualHost{{}},
|
||||
}
|
||||
noVirtualHostsInRDSResponse = &xdspb.DiscoveryResponse{
|
||||
noVirtualHostsInRDSResponse = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: rdsURL,
|
||||
TypeUrl: version.V2RouteConfigURL,
|
||||
Value: marshaledEmptyRouteConfig,
|
||||
},
|
||||
},
|
||||
TypeUrl: rdsURL,
|
||||
TypeUrl: version.V2RouteConfigURL,
|
||||
}
|
||||
goodRouteConfig1 = &xdspb.RouteConfiguration{
|
||||
Name: goodRouteName1,
|
||||
|
@ -349,23 +313,201 @@ var (
|
|||
goodRDSResponse1 = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: rdsURL,
|
||||
TypeUrl: version.V2RouteConfigURL,
|
||||
Value: marshaledGoodRouteConfig1,
|
||||
},
|
||||
},
|
||||
TypeUrl: rdsURL,
|
||||
TypeUrl: version.V2RouteConfigURL,
|
||||
}
|
||||
goodRDSResponse2 = &xdspb.DiscoveryResponse{
|
||||
Resources: []*anypb.Any{
|
||||
{
|
||||
TypeUrl: rdsURL,
|
||||
TypeUrl: version.V2RouteConfigURL,
|
||||
Value: marshaledGoodRouteConfig2,
|
||||
},
|
||||
},
|
||||
TypeUrl: rdsURL,
|
||||
TypeUrl: version.V2RouteConfigURL,
|
||||
}
|
||||
)
|
||||
|
||||
type watchHandleTestcase struct {
|
||||
typeURL string
|
||||
resourceName string
|
||||
|
||||
responseToHandle *xdspb.DiscoveryResponse
|
||||
wantHandleErr bool
|
||||
wantUpdate interface{}
|
||||
wantUpdateErr bool
|
||||
}
|
||||
|
||||
type testUpdateReceiver struct {
|
||||
f func(typeURL string, d map[string]interface{})
|
||||
}
|
||||
|
||||
func (t *testUpdateReceiver) NewListeners(d map[string]xdsclient.ListenerUpdate) {
|
||||
dd := make(map[string]interface{})
|
||||
for k, v := range d {
|
||||
dd[k] = v
|
||||
}
|
||||
t.newUpdate(version.V2ListenerURL, dd)
|
||||
}
|
||||
|
||||
func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsclient.RouteConfigUpdate) {
|
||||
dd := make(map[string]interface{})
|
||||
for k, v := range d {
|
||||
dd[k] = v
|
||||
}
|
||||
t.newUpdate(version.V2RouteConfigURL, dd)
|
||||
}
|
||||
|
||||
func (t *testUpdateReceiver) NewClusters(d map[string]xdsclient.ClusterUpdate) {
|
||||
dd := make(map[string]interface{})
|
||||
for k, v := range d {
|
||||
dd[k] = v
|
||||
}
|
||||
t.newUpdate(version.V2ClusterURL, dd)
|
||||
}
|
||||
|
||||
func (t *testUpdateReceiver) NewEndpoints(d map[string]xdsclient.EndpointsUpdate) {
|
||||
dd := make(map[string]interface{})
|
||||
for k, v := range d {
|
||||
dd[k] = v
|
||||
}
|
||||
t.newUpdate(version.V2EndpointsURL, dd)
|
||||
}
|
||||
|
||||
func (t *testUpdateReceiver) newUpdate(typeURL string, d map[string]interface{}) {
|
||||
t.f(typeURL, d)
|
||||
}
|
||||
|
||||
// testWatchHandle is called to test response handling for each xDS.
|
||||
//
|
||||
// It starts the xDS watch as configured in test, waits for the fake xds server
|
||||
// to receive the request (so watch callback is installed), and calls
|
||||
// handleXDSResp with responseToHandle (if it's set). It then compares the
|
||||
// update received by watch callback with the expected results.
|
||||
func testWatchHandle(t *testing.T, test *watchHandleTestcase) {
|
||||
fakeServer, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
type updateErr struct {
|
||||
u interface{}
|
||||
err error
|
||||
}
|
||||
gotUpdateCh := testutils.NewChannel()
|
||||
|
||||
v2c, err := newV2Client(&testUpdateReceiver{
|
||||
f: func(typeURL string, d map[string]interface{}) {
|
||||
if typeURL == test.typeURL {
|
||||
if u, ok := d[test.resourceName]; ok {
|
||||
gotUpdateCh.Send(updateErr{u, nil})
|
||||
}
|
||||
}
|
||||
},
|
||||
}, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer v2c.Close()
|
||||
|
||||
// RDS needs an existin LDS watch for the hostname.
|
||||
if test.typeURL == version.V2RouteConfigURL {
|
||||
doLDS(t, v2c, fakeServer)
|
||||
}
|
||||
|
||||
// Register the watcher, this will also trigger the v2Client to send the xDS
|
||||
// request.
|
||||
v2c.AddWatch(test.typeURL, test.resourceName)
|
||||
|
||||
// Wait till the request makes it to the fakeServer. This ensures that
|
||||
// the watch request has been processed by the v2Client.
|
||||
if _, err := fakeServer.XDSRequestChan.Receive(); err != nil {
|
||||
t.Fatalf("Timeout waiting for an xDS request: %v", err)
|
||||
}
|
||||
|
||||
// Directly push the response through a call to handleXDSResp. This bypasses
|
||||
// the fakeServer, so it's only testing the handle logic. Client response
|
||||
// processing is covered elsewhere.
|
||||
//
|
||||
// Also note that this won't trigger ACK, so there's no need to clear the
|
||||
// request channel afterwards.
|
||||
var handleXDSResp func(response *xdspb.DiscoveryResponse) error
|
||||
switch test.typeURL {
|
||||
case version.V2ListenerURL:
|
||||
handleXDSResp = v2c.handleLDSResponse
|
||||
case version.V2RouteConfigURL:
|
||||
handleXDSResp = v2c.handleRDSResponse
|
||||
case version.V2ClusterURL:
|
||||
handleXDSResp = v2c.handleCDSResponse
|
||||
case version.V2EndpointsURL:
|
||||
handleXDSResp = v2c.handleEDSResponse
|
||||
}
|
||||
if err := handleXDSResp(test.responseToHandle); (err != nil) != test.wantHandleErr {
|
||||
t.Fatalf("v2c.handleRDSResponse() returned err: %v, wantErr: %v", err, test.wantHandleErr)
|
||||
}
|
||||
|
||||
// If the test doesn't expect the callback to be invoked, verify that no
|
||||
// update or error is pushed to the callback.
|
||||
//
|
||||
// Cannot directly compare test.wantUpdate with nil (typed vs non-typed nil:
|
||||
// https://golang.org/doc/faq#nil_error).
|
||||
if c := test.wantUpdate; c == nil || (reflect.ValueOf(c).Kind() == reflect.Ptr && reflect.ValueOf(c).IsNil()) {
|
||||
update, err := gotUpdateCh.Receive()
|
||||
if err == testutils.ErrRecvTimeout {
|
||||
return
|
||||
}
|
||||
t.Fatalf("Unexpected update: +%v", update)
|
||||
}
|
||||
|
||||
wantUpdate := reflect.ValueOf(test.wantUpdate).Elem().Interface()
|
||||
uErr, err := gotUpdateCh.Receive()
|
||||
if err == testutils.ErrRecvTimeout {
|
||||
t.Fatal("Timeout expecting xDS update")
|
||||
}
|
||||
gotUpdate := uErr.(updateErr).u
|
||||
if diff := cmp.Diff(gotUpdate, wantUpdate); diff != "" {
|
||||
t.Fatalf("got update : %+v, want %+v, diff: %s", gotUpdate, wantUpdate, diff)
|
||||
}
|
||||
gotUpdateErr := uErr.(updateErr).err
|
||||
if (gotUpdateErr != nil) != test.wantUpdateErr {
|
||||
t.Fatalf("got xDS update error {%v}, wantErr: %v", gotUpdateErr, test.wantUpdateErr)
|
||||
}
|
||||
}
|
||||
|
||||
// startServerAndGetCC starts a fake XDS server and also returns a ClientConn
|
||||
// connected to it.
|
||||
func startServerAndGetCC(t *testing.T) (*fakeserver.Server, *grpc.ClientConn, func()) {
|
||||
t.Helper()
|
||||
|
||||
fs, sCleanup, err := fakeserver.StartServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start fake xDS server: %v", err)
|
||||
}
|
||||
|
||||
cc, ccCleanup, err := fs.XDSClientConn()
|
||||
if err != nil {
|
||||
sCleanup()
|
||||
t.Fatalf("Failed to get a clientConn to the fake xDS server: %v", err)
|
||||
}
|
||||
return fs, cc, func() {
|
||||
sCleanup()
|
||||
ccCleanup()
|
||||
}
|
||||
}
|
||||
|
||||
func newV2Client(p xdsclient.UpdateHandler, cc *grpc.ClientConn, n *basepb.Node, b func(int) time.Duration, l *grpclog.PrefixLogger) (*client, error) {
|
||||
c, err := newClient(cc, xdsclient.BuildOptions{
|
||||
Parent: p,
|
||||
NodeProto: n,
|
||||
Backoff: b,
|
||||
Logger: l,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.(*client), nil
|
||||
}
|
||||
|
||||
// TestV2ClientBackoffAfterRecvError verifies if the v2Client backoffs when it
|
||||
// encounters a Recv error while receiving an LDS response.
|
||||
func (s) TestV2ClientBackoffAfterRecvError(t *testing.T) {
|
||||
|
@ -381,14 +523,17 @@ func (s) TestV2ClientBackoffAfterRecvError(t *testing.T) {
|
|||
}
|
||||
|
||||
callbackCh := make(chan struct{})
|
||||
v2c := newV2Client(&testUpdateReceiver{
|
||||
v2c, err := newV2Client(&testUpdateReceiver{
|
||||
f: func(string, map[string]interface{}) { close(callbackCh) },
|
||||
}, cc, goodNodeProto, clientBackoff, nil)
|
||||
defer v2c.close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer v2c.Close()
|
||||
t.Log("Started xds v2Client...")
|
||||
|
||||
// v2c.watchLDS(goodLDSTarget1, func(u ldsUpdate, err error) {})
|
||||
v2c.addWatch(ldsURL, goodLDSTarget1)
|
||||
v2c.AddWatch(version.V2ListenerURL, goodLDSTarget1)
|
||||
if _, err := fakeServer.XDSRequestChan.Receive(); err != nil {
|
||||
t.Fatalf("Timeout expired when expecting an LDS request")
|
||||
}
|
||||
|
@ -397,7 +542,7 @@ func (s) TestV2ClientBackoffAfterRecvError(t *testing.T) {
|
|||
fakeServer.XDSResponseChan <- &fakeserver.Response{Err: errors.New("RPC error")}
|
||||
t.Log("Bad LDS response pushed to fakeServer...")
|
||||
|
||||
timer := time.NewTimer(defaultTestTimeout)
|
||||
timer := time.NewTimer(1 * time.Second)
|
||||
select {
|
||||
case <-timer.C:
|
||||
t.Fatal("Timeout when expecting LDS update")
|
||||
|
@ -417,9 +562,9 @@ func (s) TestV2ClientRetriesAfterBrokenStream(t *testing.T) {
|
|||
defer cleanup()
|
||||
|
||||
callbackCh := testutils.NewChannel()
|
||||
v2c := newV2Client(&testUpdateReceiver{
|
||||
v2c, err := newV2Client(&testUpdateReceiver{
|
||||
f: func(typeURL string, d map[string]interface{}) {
|
||||
if typeURL == ldsURL {
|
||||
if typeURL == version.V2ListenerURL {
|
||||
if u, ok := d[goodLDSTarget1]; ok {
|
||||
t.Logf("Received LDS callback with ldsUpdate {%+v}", u)
|
||||
callbackCh.Send(struct{}{})
|
||||
|
@ -427,10 +572,13 @@ func (s) TestV2ClientRetriesAfterBrokenStream(t *testing.T) {
|
|||
}
|
||||
},
|
||||
}, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil)
|
||||
defer v2c.close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer v2c.Close()
|
||||
t.Log("Started xds v2Client...")
|
||||
|
||||
v2c.addWatch(ldsURL, goodLDSTarget1)
|
||||
v2c.AddWatch(version.V2ListenerURL, goodLDSTarget1)
|
||||
if _, err := fakeServer.XDSRequestChan.Receive(); err != nil {
|
||||
t.Fatalf("Timeout expired when expecting an LDS request")
|
||||
}
|
||||
|
@ -467,12 +615,6 @@ func (s) TestV2ClientRetriesAfterBrokenStream(t *testing.T) {
|
|||
// level). And when the stream is re-created, the watcher should get future
|
||||
// updates.
|
||||
func (s) TestV2ClientWatchWithoutStream(t *testing.T) {
|
||||
oldWatchExpiryTimeout := defaultWatchExpiryTimeout
|
||||
defaultWatchExpiryTimeout = 500 * time.Millisecond
|
||||
defer func() {
|
||||
defaultWatchExpiryTimeout = oldWatchExpiryTimeout
|
||||
}()
|
||||
|
||||
fakeServer, sCleanup, err := fakeserver.StartServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start fake xDS server: %v", err)
|
||||
|
@ -490,9 +632,9 @@ func (s) TestV2ClientWatchWithoutStream(t *testing.T) {
|
|||
defer cc.Close()
|
||||
|
||||
callbackCh := testutils.NewChannel()
|
||||
v2c := newV2Client(&testUpdateReceiver{
|
||||
v2c, err := newV2Client(&testUpdateReceiver{
|
||||
f: func(typeURL string, d map[string]interface{}) {
|
||||
if typeURL == ldsURL {
|
||||
if typeURL == version.V2ListenerURL {
|
||||
if u, ok := d[goodLDSTarget1]; ok {
|
||||
t.Logf("Received LDS callback with ldsUpdate {%+v}", u)
|
||||
callbackCh.Send(u)
|
||||
|
@ -500,12 +642,15 @@ func (s) TestV2ClientWatchWithoutStream(t *testing.T) {
|
|||
}
|
||||
},
|
||||
}, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil)
|
||||
defer v2c.close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer v2c.Close()
|
||||
t.Log("Started xds v2Client...")
|
||||
|
||||
// This watch is started when the xds-ClientConn is in Transient Failure,
|
||||
// and no xds stream is created.
|
||||
v2c.addWatch(ldsURL, goodLDSTarget1)
|
||||
v2c.AddWatch(version.V2ListenerURL, goodLDSTarget1)
|
||||
|
||||
// The watcher should receive an update, with a timeout error in it.
|
||||
if v, err := callbackCh.TimedReceive(100 * time.Millisecond); err == nil {
|
||||
|
@ -528,7 +673,11 @@ func (s) TestV2ClientWatchWithoutStream(t *testing.T) {
|
|||
|
||||
if v, err := callbackCh.Receive(); err != nil {
|
||||
t.Fatal("Timeout when expecting LDS update")
|
||||
} else if _, ok := v.(ldsUpdate); !ok {
|
||||
} else if _, ok := v.(xdsclient.ListenerUpdate); !ok {
|
||||
t.Fatalf("Expect an LDS update from watcher, got %v", v)
|
||||
}
|
||||
}
|
||||
|
||||
func newStringP(s string) *string {
|
||||
return &s
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
)
|
||||
|
||||
// handleCDSResponse processes an CDS response received from the xDS server. On
|
||||
// receipt of a good response, it also invokes the registered watcher callback.
|
||||
func (v2c *v2Client) handleCDSResponse(resp *xdspb.DiscoveryResponse) error {
|
||||
returnUpdate := make(map[string]ClusterUpdate)
|
||||
for _, r := range resp.GetResources() {
|
||||
var resource ptypes.DynamicAny
|
||||
if err := ptypes.UnmarshalAny(r, &resource); err != nil {
|
||||
return fmt.Errorf("xds: failed to unmarshal resource in CDS response: %v", err)
|
||||
}
|
||||
cluster, ok := resource.Message.(*xdspb.Cluster)
|
||||
if !ok {
|
||||
return fmt.Errorf("xds: unexpected resource type: %T in CDS response", resource.Message)
|
||||
}
|
||||
v2c.logger.Infof("Resource with name: %v, type: %T, contains: %v", cluster.GetName(), cluster, cluster)
|
||||
update, err := validateCluster(cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the Cluster message in the CDS response did not contain a
|
||||
// serviceName, we will just use the clusterName for EDS.
|
||||
if update.ServiceName == "" {
|
||||
update.ServiceName = cluster.GetName()
|
||||
}
|
||||
v2c.logger.Debugf("Resource with name %v, type %T, value %+v added to cache", cluster.GetName(), update, update)
|
||||
returnUpdate[cluster.GetName()] = update
|
||||
}
|
||||
|
||||
v2c.parent.newCDSUpdate(returnUpdate)
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateCluster(cluster *xdspb.Cluster) (ClusterUpdate, error) {
|
||||
emptyUpdate := ClusterUpdate{ServiceName: "", EnableLRS: false}
|
||||
switch {
|
||||
case cluster.GetType() != xdspb.Cluster_EDS:
|
||||
return emptyUpdate, fmt.Errorf("xds: unexpected cluster type %v in response: %+v", cluster.GetType(), cluster)
|
||||
case cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil:
|
||||
return emptyUpdate, fmt.Errorf("xds: unexpected edsConfig in response: %+v", cluster)
|
||||
case cluster.GetLbPolicy() != xdspb.Cluster_ROUND_ROBIN:
|
||||
return emptyUpdate, fmt.Errorf("xds: unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster)
|
||||
}
|
||||
|
||||
return ClusterUpdate{
|
||||
ServiceName: cluster.GetEdsClusterConfig().GetServiceName(),
|
||||
EnableLRS: cluster.GetLrsServer().GetSelf() != nil,
|
||||
}, nil
|
||||
}
|
|
@ -1,128 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// All structs/functions in this file should be unexported. They are used in EDS
|
||||
// balancer tests now, to generate test inputs. Eventually, EDS balancer tests
|
||||
// should generate EndpointsUpdate directly, instead of generating and parsing the
|
||||
// proto message.
|
||||
// TODO: unexported everything in this file.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint"
|
||||
typepb "github.com/envoyproxy/go-control-plane/envoy/type"
|
||||
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
|
||||
)
|
||||
|
||||
// ClusterLoadAssignmentBuilder builds a ClusterLoadAssignment, aka EDS
|
||||
// response.
|
||||
type ClusterLoadAssignmentBuilder struct {
|
||||
v *xdspb.ClusterLoadAssignment
|
||||
}
|
||||
|
||||
// NewClusterLoadAssignmentBuilder creates a ClusterLoadAssignmentBuilder.
|
||||
func NewClusterLoadAssignmentBuilder(clusterName string, dropPercents []uint32) *ClusterLoadAssignmentBuilder {
|
||||
var drops []*xdspb.ClusterLoadAssignment_Policy_DropOverload
|
||||
for i, d := range dropPercents {
|
||||
drops = append(drops, &xdspb.ClusterLoadAssignment_Policy_DropOverload{
|
||||
Category: fmt.Sprintf("test-drop-%d", i),
|
||||
DropPercentage: &typepb.FractionalPercent{
|
||||
Numerator: d,
|
||||
Denominator: typepb.FractionalPercent_HUNDRED,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return &ClusterLoadAssignmentBuilder{
|
||||
v: &xdspb.ClusterLoadAssignment{
|
||||
ClusterName: clusterName,
|
||||
Policy: &xdspb.ClusterLoadAssignment_Policy{
|
||||
DropOverloads: drops,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddLocalityOptions contains options when adding locality to the builder.
|
||||
type AddLocalityOptions struct {
|
||||
Health []corepb.HealthStatus
|
||||
Weight []uint32
|
||||
}
|
||||
|
||||
// AddLocality adds a locality to the builder.
|
||||
func (clab *ClusterLoadAssignmentBuilder) AddLocality(subzone string, weight uint32, priority uint32, addrsWithPort []string, opts *AddLocalityOptions) {
|
||||
var lbEndPoints []*endpointpb.LbEndpoint
|
||||
for i, a := range addrsWithPort {
|
||||
host, portStr, err := net.SplitHostPort(a)
|
||||
if err != nil {
|
||||
panic("failed to split " + a)
|
||||
}
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
panic("failed to atoi " + portStr)
|
||||
}
|
||||
|
||||
lbe := &endpointpb.LbEndpoint{
|
||||
HostIdentifier: &endpointpb.LbEndpoint_Endpoint{
|
||||
Endpoint: &endpointpb.Endpoint{
|
||||
Address: &corepb.Address{
|
||||
Address: &corepb.Address_SocketAddress{
|
||||
SocketAddress: &corepb.SocketAddress{
|
||||
Protocol: corepb.SocketAddress_TCP,
|
||||
Address: host,
|
||||
PortSpecifier: &corepb.SocketAddress_PortValue{
|
||||
PortValue: uint32(port)}}}}}},
|
||||
}
|
||||
if opts != nil {
|
||||
if i < len(opts.Health) {
|
||||
lbe.HealthStatus = opts.Health[i]
|
||||
}
|
||||
if i < len(opts.Weight) {
|
||||
lbe.LoadBalancingWeight = &wrapperspb.UInt32Value{Value: opts.Weight[i]}
|
||||
}
|
||||
}
|
||||
lbEndPoints = append(lbEndPoints, lbe)
|
||||
}
|
||||
|
||||
var localityID *corepb.Locality
|
||||
if subzone != "" {
|
||||
localityID = &corepb.Locality{
|
||||
Region: "",
|
||||
Zone: "",
|
||||
SubZone: subzone,
|
||||
}
|
||||
}
|
||||
|
||||
clab.v.Endpoints = append(clab.v.Endpoints, &endpointpb.LocalityLbEndpoints{
|
||||
Locality: localityID,
|
||||
LbEndpoints: lbEndPoints,
|
||||
LoadBalancingWeight: &wrapperspb.UInt32Value{Value: weight},
|
||||
Priority: priority,
|
||||
})
|
||||
}
|
||||
|
||||
// Build builds ClusterLoadAssignment.
|
||||
func (clab *ClusterLoadAssignmentBuilder) Build() *xdspb.ClusterLoadAssignment {
|
||||
return clab.v
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
httppb "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
)
|
||||
|
||||
// handleLDSResponse processes an LDS response received from the xDS server. On
|
||||
// receipt of a good response, it also invokes the registered watcher callback.
|
||||
func (v2c *v2Client) handleLDSResponse(resp *xdspb.DiscoveryResponse) error {
|
||||
returnUpdate := make(map[string]ldsUpdate)
|
||||
for _, r := range resp.GetResources() {
|
||||
var resource ptypes.DynamicAny
|
||||
if err := ptypes.UnmarshalAny(r, &resource); err != nil {
|
||||
return fmt.Errorf("xds: failed to unmarshal resource in LDS response: %v", err)
|
||||
}
|
||||
lis, ok := resource.Message.(*xdspb.Listener)
|
||||
if !ok {
|
||||
return fmt.Errorf("xds: unexpected resource type: %T in LDS response", resource.Message)
|
||||
}
|
||||
v2c.logger.Infof("Resource with name: %v, type: %T, contains: %v", lis.GetName(), lis, lis)
|
||||
routeName, err := v2c.getRouteConfigNameFromListener(lis)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
returnUpdate[lis.GetName()] = ldsUpdate{routeName: routeName}
|
||||
}
|
||||
|
||||
v2c.parent.newLDSUpdate(returnUpdate)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getRouteConfigNameFromListener checks if the provided Listener proto meets
|
||||
// the expected criteria. If so, it returns a non-empty routeConfigName.
|
||||
func (v2c *v2Client) getRouteConfigNameFromListener(lis *xdspb.Listener) (string, error) {
|
||||
if lis.GetApiListener() == nil {
|
||||
return "", fmt.Errorf("xds: no api_listener field in LDS response %+v", lis)
|
||||
}
|
||||
var apiAny ptypes.DynamicAny
|
||||
if err := ptypes.UnmarshalAny(lis.GetApiListener().GetApiListener(), &apiAny); err != nil {
|
||||
return "", fmt.Errorf("xds: failed to unmarshal api_listner in LDS response: %v", err)
|
||||
}
|
||||
apiLis, ok := apiAny.Message.(*httppb.HttpConnectionManager)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("xds: unexpected api_listener type: %T in LDS response", apiAny.Message)
|
||||
}
|
||||
v2c.logger.Infof("Resource with type %T, contains %v", apiLis, apiLis)
|
||||
switch apiLis.RouteSpecifier.(type) {
|
||||
case *httppb.HttpConnectionManager_Rds:
|
||||
if apiLis.GetRds().GetConfigSource().GetAds() == nil {
|
||||
return "", fmt.Errorf("xds: ConfigSource is not ADS in LDS response: %+v", lis)
|
||||
}
|
||||
name := apiLis.GetRds().GetRouteConfigName()
|
||||
if name == "" {
|
||||
return "", fmt.Errorf("xds: empty route_config_name in LDS response: %+v", lis)
|
||||
}
|
||||
return name, nil
|
||||
case *httppb.HttpConnectionManager_RouteConfig:
|
||||
// TODO: Add support for specifying the RouteConfiguration inline
|
||||
// in the LDS response.
|
||||
return "", fmt.Errorf("xds: LDS response contains RDS config inline. Not supported for now: %+v", apiLis)
|
||||
case nil:
|
||||
return "", fmt.Errorf("xds: no RouteSpecifier in received LDS response: %+v", apiLis)
|
||||
default:
|
||||
return "", fmt.Errorf("xds: unsupported type %T for RouteSpecifier in received LDS response", apiLis.RouteSpecifier)
|
||||
}
|
||||
}
|
|
@ -1,323 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
routepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/route"
|
||||
typepb "github.com/envoyproxy/go-control-plane/envoy/type"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
|
||||
// handleRDSResponse processes an RDS response received from the xDS server. On
|
||||
// receipt of a good response, it caches validated resources and also invokes
|
||||
// the registered watcher callback.
|
||||
func (v2c *v2Client) handleRDSResponse(resp *xdspb.DiscoveryResponse) error {
|
||||
v2c.mu.Lock()
|
||||
hostname := v2c.hostname
|
||||
v2c.mu.Unlock()
|
||||
|
||||
returnUpdate := make(map[string]rdsUpdate)
|
||||
for _, r := range resp.GetResources() {
|
||||
var resource ptypes.DynamicAny
|
||||
if err := ptypes.UnmarshalAny(r, &resource); err != nil {
|
||||
return fmt.Errorf("xds: failed to unmarshal resource in RDS response: %v", err)
|
||||
}
|
||||
rc, ok := resource.Message.(*xdspb.RouteConfiguration)
|
||||
if !ok {
|
||||
return fmt.Errorf("xds: unexpected resource type: %T in RDS response", resource.Message)
|
||||
}
|
||||
v2c.logger.Infof("Resource with name: %v, type: %T, contains: %v. Picking routes for current watching hostname %v", rc.GetName(), rc, rc, v2c.hostname)
|
||||
|
||||
// Use the hostname (resourceName for LDS) to find the routes.
|
||||
u, err := generateRDSUpdateFromRouteConfiguration(rc, hostname, v2c.logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("xds: received invalid RouteConfiguration in RDS response: %+v with err: %v", rc, err)
|
||||
}
|
||||
// If we get here, it means that this resource was a good one.
|
||||
returnUpdate[rc.GetName()] = u
|
||||
}
|
||||
|
||||
v2c.parent.newRDSUpdate(returnUpdate)
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateRDSUpdateFromRouteConfiguration checks if the provided
|
||||
// RouteConfiguration meets the expected criteria. If so, it returns a rdsUpdate
|
||||
// with nil error.
|
||||
//
|
||||
// A RouteConfiguration resource is considered valid when only if it contains a
|
||||
// VirtualHost whose domain field matches the server name from the URI passed
|
||||
// to the gRPC channel, and it contains a clusterName or a weighted cluster.
|
||||
//
|
||||
// The RouteConfiguration includes a list of VirtualHosts, which may have zero
|
||||
// or more elements. We are interested in the element whose domains field
|
||||
// matches the server name specified in the "xds:" URI. The only field in the
|
||||
// VirtualHost proto that the we are interested in is the list of routes. We
|
||||
// only look at the last route in the list (the default route), whose match
|
||||
// field must be empty and whose route field must be set. Inside that route
|
||||
// message, the cluster field will contain the clusterName or weighted clusters
|
||||
// we are looking for.
|
||||
func generateRDSUpdateFromRouteConfiguration(rc *xdspb.RouteConfiguration, host string, logger *grpclog.PrefixLogger) (rdsUpdate, error) {
|
||||
//
|
||||
// Currently this returns "" on error, and the caller will return an error.
|
||||
// But the error doesn't contain details of why the response is invalid
|
||||
// (mismatch domain or empty route).
|
||||
//
|
||||
// For logging purposes, we can log in line. But if we want to populate
|
||||
// error details for nack, a detailed error needs to be returned.
|
||||
vh := findBestMatchingVirtualHost(host, rc.GetVirtualHosts())
|
||||
if vh == nil {
|
||||
// No matching virtual host found.
|
||||
return rdsUpdate{}, fmt.Errorf("no matching virtual host found")
|
||||
}
|
||||
if len(vh.Routes) == 0 {
|
||||
// The matched virtual host has no routes, this is invalid because there
|
||||
// should be at least one default route.
|
||||
return rdsUpdate{}, fmt.Errorf("matched virtual host has no routes")
|
||||
}
|
||||
|
||||
routes, err := routesProtoToSlice(vh.Routes, logger)
|
||||
if err != nil {
|
||||
return rdsUpdate{}, fmt.Errorf("received route is invalid: %v", err)
|
||||
}
|
||||
return rdsUpdate{routes: routes}, nil
|
||||
}
|
||||
|
||||
func routesProtoToSlice(routes []*routepb.Route, logger *grpclog.PrefixLogger) ([]*Route, error) {
|
||||
var routesRet []*Route
|
||||
|
||||
for _, r := range routes {
|
||||
match := r.GetMatch()
|
||||
if match == nil {
|
||||
return nil, fmt.Errorf("route %+v doesn't have a match", r)
|
||||
}
|
||||
|
||||
if len(match.GetQueryParameters()) != 0 {
|
||||
// Ignore route with query parameters.
|
||||
logger.Warningf("route %+v has query parameter matchers, the route will be ignored", r)
|
||||
continue
|
||||
}
|
||||
|
||||
if caseSensitive := match.GetCaseSensitive(); caseSensitive != nil && !caseSensitive.Value {
|
||||
return nil, fmt.Errorf("route %+v has case-sensitive false", r)
|
||||
}
|
||||
|
||||
pathSp := match.GetPathSpecifier()
|
||||
if pathSp == nil {
|
||||
return nil, fmt.Errorf("route %+v doesn't have a path specifier", r)
|
||||
}
|
||||
|
||||
var route Route
|
||||
switch pt := pathSp.(type) {
|
||||
case *routepb.RouteMatch_Prefix:
|
||||
route.Prefix = &pt.Prefix
|
||||
case *routepb.RouteMatch_Path:
|
||||
route.Path = &pt.Path
|
||||
case *routepb.RouteMatch_SafeRegex:
|
||||
route.Regex = &pt.SafeRegex.Regex
|
||||
case *routepb.RouteMatch_Regex:
|
||||
return nil, fmt.Errorf("route %+v has Regex, expected SafeRegex instead", r)
|
||||
default:
|
||||
logger.Warningf("route %+v has an unrecognized path specifier: %+v", r, pt)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, h := range match.GetHeaders() {
|
||||
var header HeaderMatcher
|
||||
switch ht := h.GetHeaderMatchSpecifier().(type) {
|
||||
case *routepb.HeaderMatcher_ExactMatch:
|
||||
header.ExactMatch = &ht.ExactMatch
|
||||
case *routepb.HeaderMatcher_SafeRegexMatch:
|
||||
header.RegexMatch = &ht.SafeRegexMatch.Regex
|
||||
case *routepb.HeaderMatcher_RangeMatch:
|
||||
header.RangeMatch = &Int64Range{
|
||||
Start: ht.RangeMatch.Start,
|
||||
End: ht.RangeMatch.End,
|
||||
}
|
||||
case *routepb.HeaderMatcher_PresentMatch:
|
||||
header.PresentMatch = &ht.PresentMatch
|
||||
case *routepb.HeaderMatcher_PrefixMatch:
|
||||
header.PrefixMatch = &ht.PrefixMatch
|
||||
case *routepb.HeaderMatcher_SuffixMatch:
|
||||
header.SuffixMatch = &ht.SuffixMatch
|
||||
case *routepb.HeaderMatcher_RegexMatch:
|
||||
return nil, fmt.Errorf("route %+v has a header matcher with Regex, expected SafeRegex instead", r)
|
||||
default:
|
||||
logger.Warningf("route %+v has an unrecognized header matcher: %+v", r, ht)
|
||||
continue
|
||||
}
|
||||
header.Name = h.GetName()
|
||||
invert := h.GetInvertMatch()
|
||||
header.InvertMatch = &invert
|
||||
route.Headers = append(route.Headers, &header)
|
||||
}
|
||||
|
||||
if fr := match.GetRuntimeFraction(); fr != nil {
|
||||
d := fr.GetDefaultValue()
|
||||
n := d.GetNumerator()
|
||||
switch d.GetDenominator() {
|
||||
case typepb.FractionalPercent_HUNDRED:
|
||||
n *= 10000
|
||||
case typepb.FractionalPercent_TEN_THOUSAND:
|
||||
n *= 100
|
||||
case typepb.FractionalPercent_MILLION:
|
||||
}
|
||||
route.Fraction = &n
|
||||
}
|
||||
|
||||
clusters := make(map[string]uint32)
|
||||
switch a := r.GetRoute().GetClusterSpecifier().(type) {
|
||||
case *routepb.RouteAction_Cluster:
|
||||
clusters[a.Cluster] = 1
|
||||
case *routepb.RouteAction_WeightedClusters:
|
||||
wcs := a.WeightedClusters
|
||||
var totalWeight uint32
|
||||
for _, c := range wcs.Clusters {
|
||||
w := c.GetWeight().GetValue()
|
||||
clusters[c.GetName()] = w
|
||||
totalWeight += w
|
||||
}
|
||||
if totalWeight != wcs.GetTotalWeight().GetValue() {
|
||||
return nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, want %v", r, a, wcs.GetTotalWeight().GetValue(), totalWeight)
|
||||
}
|
||||
case *routepb.RouteAction_ClusterHeader:
|
||||
continue
|
||||
}
|
||||
|
||||
route.Action = clusters
|
||||
routesRet = append(routesRet, &route)
|
||||
}
|
||||
return routesRet, nil
|
||||
}
|
||||
|
||||
func weightedClustersProtoToMap(wc *routepb.WeightedCluster) (map[string]uint32, error) {
|
||||
ret := make(map[string]uint32)
|
||||
var totalWeight uint32 = 100
|
||||
if t := wc.GetTotalWeight().GetValue(); t != 0 {
|
||||
totalWeight = t
|
||||
}
|
||||
for _, cw := range wc.Clusters {
|
||||
w := cw.Weight.GetValue()
|
||||
ret[cw.Name] = w
|
||||
totalWeight -= w
|
||||
}
|
||||
if totalWeight != 0 {
|
||||
return nil, fmt.Errorf("weights of clusters do not add up to total total weight, difference: %v", totalWeight)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
type domainMatchType int
|
||||
|
||||
const (
|
||||
domainMatchTypeInvalid domainMatchType = iota
|
||||
domainMatchTypeUniversal
|
||||
domainMatchTypePrefix
|
||||
domainMatchTypeSuffix
|
||||
domainMatchTypeExact
|
||||
)
|
||||
|
||||
// Exact > Suffix > Prefix > Universal > Invalid.
|
||||
func (t domainMatchType) betterThan(b domainMatchType) bool {
|
||||
return t > b
|
||||
}
|
||||
|
||||
func matchTypeForDomain(d string) domainMatchType {
|
||||
if d == "" {
|
||||
return domainMatchTypeInvalid
|
||||
}
|
||||
if d == "*" {
|
||||
return domainMatchTypeUniversal
|
||||
}
|
||||
if strings.HasPrefix(d, "*") {
|
||||
return domainMatchTypeSuffix
|
||||
}
|
||||
if strings.HasSuffix(d, "*") {
|
||||
return domainMatchTypePrefix
|
||||
}
|
||||
if strings.Contains(d, "*") {
|
||||
return domainMatchTypeInvalid
|
||||
}
|
||||
return domainMatchTypeExact
|
||||
}
|
||||
|
||||
func match(domain, host string) (domainMatchType, bool) {
|
||||
switch typ := matchTypeForDomain(domain); typ {
|
||||
case domainMatchTypeInvalid:
|
||||
return typ, false
|
||||
case domainMatchTypeUniversal:
|
||||
return typ, true
|
||||
case domainMatchTypePrefix:
|
||||
// abc.*
|
||||
return typ, strings.HasPrefix(host, strings.TrimSuffix(domain, "*"))
|
||||
case domainMatchTypeSuffix:
|
||||
// *.123
|
||||
return typ, strings.HasSuffix(host, strings.TrimPrefix(domain, "*"))
|
||||
case domainMatchTypeExact:
|
||||
return typ, domain == host
|
||||
default:
|
||||
return domainMatchTypeInvalid, false
|
||||
}
|
||||
}
|
||||
|
||||
// findBestMatchingVirtualHost returns the virtual host whose domains field best
|
||||
// matches host
|
||||
//
|
||||
// The domains field support 4 different matching pattern types:
|
||||
// - Exact match
|
||||
// - Suffix match (e.g. “*ABC”)
|
||||
// - Prefix match (e.g. “ABC*)
|
||||
// - Universal match (e.g. “*”)
|
||||
//
|
||||
// The best match is defined as:
|
||||
// - A match is better if it’s matching pattern type is better
|
||||
// - Exact match > suffix match > prefix match > universal match
|
||||
// - If two matches are of the same pattern type, the longer match is better
|
||||
// - This is to compare the length of the matching pattern, e.g. “*ABCDE” >
|
||||
// “*ABC”
|
||||
func findBestMatchingVirtualHost(host string, vHosts []*routepb.VirtualHost) *routepb.VirtualHost {
|
||||
var (
|
||||
matchVh *routepb.VirtualHost
|
||||
matchType = domainMatchTypeInvalid
|
||||
matchLen int
|
||||
)
|
||||
for _, vh := range vHosts {
|
||||
for _, domain := range vh.GetDomains() {
|
||||
typ, matched := match(domain, host)
|
||||
if typ == domainMatchTypeInvalid {
|
||||
// The rds response is invalid.
|
||||
return nil
|
||||
}
|
||||
if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched {
|
||||
// The previous match has better type, or the previous match has
|
||||
// better length, or this domain isn't a match.
|
||||
continue
|
||||
}
|
||||
matchVh = vh
|
||||
matchType = typ
|
||||
matchLen = len(domain)
|
||||
}
|
||||
}
|
||||
return matchVh
|
||||
}
|
|
@ -1,699 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
routepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/route"
|
||||
typepb "github.com/envoyproxy/go-control-plane/envoy/type"
|
||||
"github.com/golang/protobuf/proto"
|
||||
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"google.golang.org/grpc/xds/internal/testutils/fakeserver"
|
||||
)
|
||||
|
||||
func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
rc *xdspb.RouteConfiguration
|
||||
wantUpdate rdsUpdate
|
||||
wantError bool
|
||||
}{
|
||||
{
|
||||
name: "no-virtual-hosts-in-rc",
|
||||
rc: emptyRouteConfig,
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "no-domains-in-rc",
|
||||
rc: noDomainsInRouteConfig,
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "non-matching-domain-in-rc",
|
||||
rc: &xdspb.RouteConfiguration{
|
||||
VirtualHosts: []*routepb.VirtualHost{
|
||||
{Domains: []string{uninterestingDomain}},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "no-routes-in-rc",
|
||||
rc: &xdspb.RouteConfiguration{
|
||||
VirtualHosts: []*routepb.VirtualHost{
|
||||
{Domains: []string{goodLDSTarget1}},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "default-route-match-field-is-nil",
|
||||
rc: &xdspb.RouteConfiguration{
|
||||
VirtualHosts: []*routepb.VirtualHost{
|
||||
{
|
||||
Domains: []string{goodLDSTarget1},
|
||||
Routes: []*routepb.Route{
|
||||
{
|
||||
Action: &routepb.Route_Route{
|
||||
Route: &routepb.RouteAction{
|
||||
ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: goodClusterName1},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "default-route-match-field-is-non-nil",
|
||||
rc: &xdspb.RouteConfiguration{
|
||||
VirtualHosts: []*routepb.VirtualHost{
|
||||
{
|
||||
Domains: []string{goodLDSTarget1},
|
||||
Routes: []*routepb.Route{
|
||||
{
|
||||
Match: &routepb.RouteMatch{},
|
||||
Action: &routepb.Route_Route{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "default-route-routeaction-field-is-nil",
|
||||
rc: &xdspb.RouteConfiguration{
|
||||
VirtualHosts: []*routepb.VirtualHost{
|
||||
{
|
||||
Domains: []string{goodLDSTarget1},
|
||||
Routes: []*routepb.Route{{}},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "default-route-cluster-field-is-empty",
|
||||
rc: &xdspb.RouteConfiguration{
|
||||
VirtualHosts: []*routepb.VirtualHost{
|
||||
{
|
||||
Domains: []string{goodLDSTarget1},
|
||||
Routes: []*routepb.Route{
|
||||
{
|
||||
Action: &routepb.Route_Route{
|
||||
Route: &routepb.RouteAction{
|
||||
ClusterSpecifier: &routepb.RouteAction_ClusterHeader{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
// default route's match sets case-sensitive to false.
|
||||
name: "good-route-config-but-with-casesensitive-false",
|
||||
rc: &xdspb.RouteConfiguration{
|
||||
Name: goodRouteName1,
|
||||
VirtualHosts: []*routepb.VirtualHost{{
|
||||
Domains: []string{goodLDSTarget1},
|
||||
Routes: []*routepb.Route{{
|
||||
Match: &routepb.RouteMatch{
|
||||
PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: "/"},
|
||||
CaseSensitive: &wrapperspb.BoolValue{Value: false},
|
||||
},
|
||||
Action: &routepb.Route_Route{
|
||||
Route: &routepb.RouteAction{
|
||||
ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: goodClusterName1},
|
||||
}}}}}}},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "good-route-config-with-empty-string-route",
|
||||
rc: goodRouteConfig1,
|
||||
wantUpdate: rdsUpdate{routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{goodClusterName1: 1}}}},
|
||||
},
|
||||
{
|
||||
// default route's match is not empty string, but "/".
|
||||
name: "good-route-config-with-slash-string-route",
|
||||
rc: &xdspb.RouteConfiguration{
|
||||
Name: goodRouteName1,
|
||||
VirtualHosts: []*routepb.VirtualHost{{
|
||||
Domains: []string{goodLDSTarget1},
|
||||
Routes: []*routepb.Route{{
|
||||
Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: "/"}},
|
||||
Action: &routepb.Route_Route{
|
||||
Route: &routepb.RouteAction{
|
||||
ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: goodClusterName1},
|
||||
}}}}}}},
|
||||
wantUpdate: rdsUpdate{routes: []*Route{{Prefix: newStringP("/"), Action: map[string]uint32{goodClusterName1: 1}}}},
|
||||
},
|
||||
|
||||
{
|
||||
// weights not add up to total-weight.
|
||||
name: "route-config-with-weighted_clusters_weights_not_add_up",
|
||||
rc: &xdspb.RouteConfiguration{
|
||||
Name: goodRouteName1,
|
||||
VirtualHosts: []*routepb.VirtualHost{{
|
||||
Domains: []string{goodLDSTarget1},
|
||||
Routes: []*routepb.Route{{
|
||||
Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: "/"}},
|
||||
Action: &routepb.Route_Route{
|
||||
Route: &routepb.RouteAction{
|
||||
ClusterSpecifier: &routepb.RouteAction_WeightedClusters{
|
||||
WeightedClusters: &routepb.WeightedCluster{
|
||||
Clusters: []*routepb.WeightedCluster_ClusterWeight{
|
||||
{Name: "a", Weight: &wrapperspb.UInt32Value{Value: 2}},
|
||||
{Name: "b", Weight: &wrapperspb.UInt32Value{Value: 3}},
|
||||
{Name: "c", Weight: &wrapperspb.UInt32Value{Value: 5}},
|
||||
},
|
||||
TotalWeight: &wrapperspb.UInt32Value{Value: 30},
|
||||
}}}}}}}}},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "good-route-config-with-weighted_clusters",
|
||||
rc: &xdspb.RouteConfiguration{
|
||||
Name: goodRouteName1,
|
||||
VirtualHosts: []*routepb.VirtualHost{{
|
||||
Domains: []string{goodLDSTarget1},
|
||||
Routes: []*routepb.Route{{
|
||||
Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: "/"}},
|
||||
Action: &routepb.Route_Route{
|
||||
Route: &routepb.RouteAction{
|
||||
ClusterSpecifier: &routepb.RouteAction_WeightedClusters{
|
||||
WeightedClusters: &routepb.WeightedCluster{
|
||||
Clusters: []*routepb.WeightedCluster_ClusterWeight{
|
||||
{Name: "a", Weight: &wrapperspb.UInt32Value{Value: 2}},
|
||||
{Name: "b", Weight: &wrapperspb.UInt32Value{Value: 3}},
|
||||
{Name: "c", Weight: &wrapperspb.UInt32Value{Value: 5}},
|
||||
},
|
||||
TotalWeight: &wrapperspb.UInt32Value{Value: 10},
|
||||
}}}}}}}}},
|
||||
wantUpdate: rdsUpdate{routes: []*Route{{Prefix: newStringP("/"), Action: map[string]uint32{"a": 2, "b": 3, "c": 5}}}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
gotUpdate, gotError := generateRDSUpdateFromRouteConfiguration(test.rc, goodLDSTarget1, nil)
|
||||
if !cmp.Equal(gotUpdate, test.wantUpdate, cmp.AllowUnexported(rdsUpdate{})) || (gotError != nil) != test.wantError {
|
||||
t.Errorf("generateRDSUpdateFromRouteConfiguration(%+v, %v) = %v, want %v", test.rc, goodLDSTarget1, gotUpdate, test.wantUpdate)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// doLDS makes a LDS watch, and waits for the response and ack to finish.
|
||||
//
|
||||
// This is called by RDS tests to start LDS first, because LDS is a
|
||||
// pre-requirement for RDS, and RDS handle would fail without an existing LDS
|
||||
// watch.
|
||||
func doLDS(t *testing.T, v2c *v2Client, fakeServer *fakeserver.Server) {
|
||||
v2c.addWatch(ldsURL, goodLDSTarget1)
|
||||
if _, err := fakeServer.XDSRequestChan.Receive(); err != nil {
|
||||
t.Fatalf("Timeout waiting for LDS request: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRDSHandleResponseWithRouting starts a fake xDS server, makes a ClientConn
|
||||
// to it, and creates a v2Client using it. Then, it registers an LDS and RDS
|
||||
// watcher and tests different RDS responses.
|
||||
func (s) TestRDSHandleResponseWithRouting(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
rdsResponse *xdspb.DiscoveryResponse
|
||||
wantErr bool
|
||||
wantUpdate *rdsUpdate
|
||||
wantUpdateErr bool
|
||||
}{
|
||||
// Badly marshaled RDS response.
|
||||
{
|
||||
name: "badly-marshaled-response",
|
||||
rdsResponse: badlyMarshaledRDSResponse,
|
||||
wantErr: true,
|
||||
wantUpdate: nil,
|
||||
wantUpdateErr: false,
|
||||
},
|
||||
// Response does not contain RouteConfiguration proto.
|
||||
{
|
||||
name: "no-route-config-in-response",
|
||||
rdsResponse: badResourceTypeInRDSResponse,
|
||||
wantErr: true,
|
||||
wantUpdate: nil,
|
||||
wantUpdateErr: false,
|
||||
},
|
||||
// No VirtualHosts in the response. Just one test case here for a bad
|
||||
// RouteConfiguration, since the others are covered in
|
||||
// TestGetClusterFromRouteConfiguration.
|
||||
{
|
||||
name: "no-virtual-hosts-in-response",
|
||||
rdsResponse: noVirtualHostsInRDSResponse,
|
||||
wantErr: true,
|
||||
wantUpdate: nil,
|
||||
wantUpdateErr: false,
|
||||
},
|
||||
// Response contains one good RouteConfiguration, uninteresting though.
|
||||
{
|
||||
name: "one-uninteresting-route-config",
|
||||
rdsResponse: goodRDSResponse2,
|
||||
wantErr: false,
|
||||
wantUpdate: nil,
|
||||
wantUpdateErr: false,
|
||||
},
|
||||
// Response contains one good interesting RouteConfiguration.
|
||||
{
|
||||
name: "one-good-route-config",
|
||||
rdsResponse: goodRDSResponse1,
|
||||
wantErr: false,
|
||||
wantUpdate: &rdsUpdate{routes: []*Route{{Prefix: newStringP(""), Action: map[string]uint32{goodClusterName1: 1}}}},
|
||||
wantUpdateErr: false,
|
||||
},
|
||||
{
|
||||
name: "one-good-route-config with routes",
|
||||
rdsResponse: goodRDSResponse1,
|
||||
wantErr: false,
|
||||
wantUpdate: &rdsUpdate{
|
||||
// Instead of just weighted targets when routing is disabled,
|
||||
// this result contains a route with perfix "", and action as
|
||||
// weighted targets.
|
||||
routes: []*Route{{
|
||||
Prefix: newStringP(""),
|
||||
Action: map[string]uint32{goodClusterName1: 1},
|
||||
}},
|
||||
},
|
||||
wantUpdateErr: false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testWatchHandle(t, &watchHandleTestcase{
|
||||
typeURL: rdsURL,
|
||||
resourceName: goodRouteName1,
|
||||
responseToHandle: test.rdsResponse,
|
||||
wantHandleErr: test.wantErr,
|
||||
wantUpdate: test.wantUpdate,
|
||||
wantUpdateErr: test.wantUpdateErr,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestRDSHandleResponseWithoutLDSWatch tests the case where the v2Client
|
||||
// receives an RDS response without a registered LDS watcher.
|
||||
func (s) TestRDSHandleResponseWithoutLDSWatch(t *testing.T) {
|
||||
_, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
v2c := newV2Client(&testUpdateReceiver{
|
||||
f: func(string, map[string]interface{}) {},
|
||||
}, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil)
|
||||
defer v2c.close()
|
||||
|
||||
if v2c.handleRDSResponse(goodRDSResponse1) == nil {
|
||||
t.Fatal("v2c.handleRDSResponse() succeeded, should have failed")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRDSHandleResponseWithoutRDSWatch tests the case where the v2Client
|
||||
// receives an RDS response without a registered RDS watcher.
|
||||
func (s) TestRDSHandleResponseWithoutRDSWatch(t *testing.T) {
|
||||
fakeServer, cc, cleanup := startServerAndGetCC(t)
|
||||
defer cleanup()
|
||||
|
||||
v2c := newV2Client(&testUpdateReceiver{
|
||||
f: func(string, map[string]interface{}) {},
|
||||
}, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil)
|
||||
defer v2c.close()
|
||||
doLDS(t, v2c, fakeServer)
|
||||
|
||||
if v2c.handleRDSResponse(badResourceTypeInRDSResponse) == nil {
|
||||
t.Fatal("v2c.handleRDSResponse() succeeded, should have failed")
|
||||
}
|
||||
|
||||
if v2c.handleRDSResponse(goodRDSResponse1) != nil {
|
||||
t.Fatal("v2c.handleRDSResponse() succeeded, should have failed")
|
||||
}
|
||||
}
|
||||
|
||||
func (s) TestMatchTypeForDomain(t *testing.T) {
|
||||
tests := []struct {
|
||||
d string
|
||||
want domainMatchType
|
||||
}{
|
||||
{d: "", want: domainMatchTypeInvalid},
|
||||
{d: "*", want: domainMatchTypeUniversal},
|
||||
{d: "bar.*", want: domainMatchTypePrefix},
|
||||
{d: "*.abc.com", want: domainMatchTypeSuffix},
|
||||
{d: "foo.bar.com", want: domainMatchTypeExact},
|
||||
{d: "foo.*.com", want: domainMatchTypeInvalid},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if got := matchTypeForDomain(tt.d); got != tt.want {
|
||||
t.Errorf("matchTypeForDomain(%q) = %v, want %v", tt.d, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s) TestMatch(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
domain string
|
||||
host string
|
||||
wantTyp domainMatchType
|
||||
wantMatched bool
|
||||
}{
|
||||
{name: "invalid-empty", domain: "", host: "", wantTyp: domainMatchTypeInvalid, wantMatched: false},
|
||||
{name: "invalid", domain: "a.*.b", host: "", wantTyp: domainMatchTypeInvalid, wantMatched: false},
|
||||
{name: "universal", domain: "*", host: "abc.com", wantTyp: domainMatchTypeUniversal, wantMatched: true},
|
||||
{name: "prefix-match", domain: "abc.*", host: "abc.123", wantTyp: domainMatchTypePrefix, wantMatched: true},
|
||||
{name: "prefix-no-match", domain: "abc.*", host: "abcd.123", wantTyp: domainMatchTypePrefix, wantMatched: false},
|
||||
{name: "suffix-match", domain: "*.123", host: "abc.123", wantTyp: domainMatchTypeSuffix, wantMatched: true},
|
||||
{name: "suffix-no-match", domain: "*.123", host: "abc.1234", wantTyp: domainMatchTypeSuffix, wantMatched: false},
|
||||
{name: "exact-match", domain: "foo.bar", host: "foo.bar", wantTyp: domainMatchTypeExact, wantMatched: true},
|
||||
{name: "exact-no-match", domain: "foo.bar.com", host: "foo.bar", wantTyp: domainMatchTypeExact, wantMatched: false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if gotTyp, gotMatched := match(tt.domain, tt.host); gotTyp != tt.wantTyp || gotMatched != tt.wantMatched {
|
||||
t.Errorf("match() = %v, %v, want %v, %v", gotTyp, gotMatched, tt.wantTyp, tt.wantMatched)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s) TestFindBestMatchingVirtualHost(t *testing.T) {
|
||||
var (
|
||||
oneExactMatch = &routepb.VirtualHost{
|
||||
Name: "one-exact-match",
|
||||
Domains: []string{"foo.bar.com"},
|
||||
}
|
||||
oneSuffixMatch = &routepb.VirtualHost{
|
||||
Name: "one-suffix-match",
|
||||
Domains: []string{"*.bar.com"},
|
||||
}
|
||||
onePrefixMatch = &routepb.VirtualHost{
|
||||
Name: "one-prefix-match",
|
||||
Domains: []string{"foo.bar.*"},
|
||||
}
|
||||
oneUniversalMatch = &routepb.VirtualHost{
|
||||
Name: "one-universal-match",
|
||||
Domains: []string{"*"},
|
||||
}
|
||||
longExactMatch = &routepb.VirtualHost{
|
||||
Name: "one-exact-match",
|
||||
Domains: []string{"v2.foo.bar.com"},
|
||||
}
|
||||
multipleMatch = &routepb.VirtualHost{
|
||||
Name: "multiple-match",
|
||||
Domains: []string{"pi.foo.bar.com", "314.*", "*.159"},
|
||||
}
|
||||
vhs = []*routepb.VirtualHost{oneExactMatch, oneSuffixMatch, onePrefixMatch, oneUniversalMatch, longExactMatch, multipleMatch}
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
host string
|
||||
vHosts []*routepb.VirtualHost
|
||||
want *routepb.VirtualHost
|
||||
}{
|
||||
{name: "exact-match", host: "foo.bar.com", vHosts: vhs, want: oneExactMatch},
|
||||
{name: "suffix-match", host: "123.bar.com", vHosts: vhs, want: oneSuffixMatch},
|
||||
{name: "prefix-match", host: "foo.bar.org", vHosts: vhs, want: onePrefixMatch},
|
||||
{name: "universal-match", host: "abc.123", vHosts: vhs, want: oneUniversalMatch},
|
||||
{name: "long-exact-match", host: "v2.foo.bar.com", vHosts: vhs, want: longExactMatch},
|
||||
// Matches suffix "*.bar.com" and exact "pi.foo.bar.com". Takes exact.
|
||||
{name: "multiple-match-exact", host: "pi.foo.bar.com", vHosts: vhs, want: multipleMatch},
|
||||
// Matches suffix "*.159" and prefix "foo.bar.*". Takes suffix.
|
||||
{name: "multiple-match-suffix", host: "foo.bar.159", vHosts: vhs, want: multipleMatch},
|
||||
// Matches suffix "*.bar.com" and prefix "314.*". Takes suffix.
|
||||
{name: "multiple-match-prefix", host: "314.bar.com", vHosts: vhs, want: oneSuffixMatch},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := findBestMatchingVirtualHost(tt.host, tt.vHosts); !cmp.Equal(got, tt.want, cmp.Comparer(proto.Equal)) {
|
||||
t.Errorf("findBestMatchingVirtualHost() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s) TestWeightedClustersProtoToMap(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
wc *routepb.WeightedCluster
|
||||
want map[string]uint32
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "weight not add up to non default total",
|
||||
wc: &routepb.WeightedCluster{
|
||||
Clusters: []*routepb.WeightedCluster_ClusterWeight{
|
||||
{Name: "a", Weight: &wrapperspb.UInt32Value{Value: 1}},
|
||||
{Name: "b", Weight: &wrapperspb.UInt32Value{Value: 1}},
|
||||
{Name: "c", Weight: &wrapperspb.UInt32Value{Value: 1}},
|
||||
},
|
||||
TotalWeight: &wrapperspb.UInt32Value{Value: 10},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "weight not add up to default total",
|
||||
wc: &routepb.WeightedCluster{
|
||||
Clusters: []*routepb.WeightedCluster_ClusterWeight{
|
||||
{Name: "a", Weight: &wrapperspb.UInt32Value{Value: 2}},
|
||||
{Name: "b", Weight: &wrapperspb.UInt32Value{Value: 3}},
|
||||
{Name: "c", Weight: &wrapperspb.UInt32Value{Value: 5}},
|
||||
},
|
||||
TotalWeight: nil,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "ok non default total weight",
|
||||
wc: &routepb.WeightedCluster{
|
||||
Clusters: []*routepb.WeightedCluster_ClusterWeight{
|
||||
{Name: "a", Weight: &wrapperspb.UInt32Value{Value: 2}},
|
||||
{Name: "b", Weight: &wrapperspb.UInt32Value{Value: 3}},
|
||||
{Name: "c", Weight: &wrapperspb.UInt32Value{Value: 5}},
|
||||
},
|
||||
TotalWeight: &wrapperspb.UInt32Value{Value: 10},
|
||||
},
|
||||
want: map[string]uint32{"a": 2, "b": 3, "c": 5},
|
||||
},
|
||||
{
|
||||
name: "ok default total weight is 100",
|
||||
wc: &routepb.WeightedCluster{
|
||||
Clusters: []*routepb.WeightedCluster_ClusterWeight{
|
||||
{Name: "a", Weight: &wrapperspb.UInt32Value{Value: 20}},
|
||||
{Name: "b", Weight: &wrapperspb.UInt32Value{Value: 30}},
|
||||
{Name: "c", Weight: &wrapperspb.UInt32Value{Value: 50}},
|
||||
},
|
||||
TotalWeight: nil,
|
||||
},
|
||||
want: map[string]uint32{"a": 20, "b": 30, "c": 50},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := weightedClustersProtoToMap(tt.wc)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("weightedClustersProtoToMap() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !cmp.Equal(got, tt.want) {
|
||||
t.Errorf("weightedClustersProtoToMap() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoutesProtoToSlice(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
routes []*routepb.Route
|
||||
wantRoutes []*Route
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no path",
|
||||
routes: []*routepb.Route{{
|
||||
Match: &routepb.RouteMatch{},
|
||||
}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "path is regex instead of saferegex",
|
||||
routes: []*routepb.Route{{
|
||||
Match: &routepb.RouteMatch{
|
||||
PathSpecifier: &routepb.RouteMatch_Regex{Regex: "*"},
|
||||
},
|
||||
}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "header contains regex",
|
||||
routes: []*routepb.Route{{
|
||||
Match: &routepb.RouteMatch{
|
||||
PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: "/"},
|
||||
Headers: []*routepb.HeaderMatcher{{
|
||||
Name: "th",
|
||||
HeaderMatchSpecifier: &routepb.HeaderMatcher_RegexMatch{
|
||||
RegexMatch: "*",
|
||||
},
|
||||
}},
|
||||
},
|
||||
}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "case_sensitive is false",
|
||||
routes: []*routepb.Route{{
|
||||
Match: &routepb.RouteMatch{
|
||||
PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: "/"},
|
||||
CaseSensitive: &wrapperspb.BoolValue{Value: false},
|
||||
},
|
||||
}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "good",
|
||||
routes: []*routepb.Route{
|
||||
{
|
||||
Match: &routepb.RouteMatch{
|
||||
PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: "/a/"},
|
||||
Headers: []*routepb.HeaderMatcher{
|
||||
{
|
||||
Name: "th",
|
||||
HeaderMatchSpecifier: &routepb.HeaderMatcher_PrefixMatch{
|
||||
PrefixMatch: "tv",
|
||||
},
|
||||
InvertMatch: true,
|
||||
},
|
||||
},
|
||||
RuntimeFraction: &corepb.RuntimeFractionalPercent{
|
||||
DefaultValue: &typepb.FractionalPercent{
|
||||
Numerator: 1,
|
||||
Denominator: typepb.FractionalPercent_HUNDRED,
|
||||
},
|
||||
},
|
||||
},
|
||||
Action: &routepb.Route_Route{
|
||||
Route: &routepb.RouteAction{
|
||||
ClusterSpecifier: &routepb.RouteAction_WeightedClusters{
|
||||
WeightedClusters: &routepb.WeightedCluster{
|
||||
Clusters: []*routepb.WeightedCluster_ClusterWeight{
|
||||
{Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}},
|
||||
{Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}},
|
||||
},
|
||||
TotalWeight: &wrapperspb.UInt32Value{Value: 100},
|
||||
}}}},
|
||||
},
|
||||
},
|
||||
wantRoutes: []*Route{{
|
||||
Prefix: newStringP("/a/"),
|
||||
Headers: []*HeaderMatcher{
|
||||
{
|
||||
Name: "th",
|
||||
InvertMatch: newBoolP(true),
|
||||
PrefixMatch: newStringP("tv"),
|
||||
},
|
||||
},
|
||||
Fraction: newUInt32P(10000),
|
||||
Action: map[string]uint32{"A": 40, "B": 60},
|
||||
}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "query is ignored",
|
||||
routes: []*routepb.Route{
|
||||
{
|
||||
Match: &routepb.RouteMatch{
|
||||
PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: "/a/"},
|
||||
},
|
||||
Action: &routepb.Route_Route{
|
||||
Route: &routepb.RouteAction{
|
||||
ClusterSpecifier: &routepb.RouteAction_WeightedClusters{
|
||||
WeightedClusters: &routepb.WeightedCluster{
|
||||
Clusters: []*routepb.WeightedCluster_ClusterWeight{
|
||||
{Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}},
|
||||
{Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}},
|
||||
},
|
||||
TotalWeight: &wrapperspb.UInt32Value{Value: 100},
|
||||
}}}},
|
||||
},
|
||||
{
|
||||
Name: "with_query",
|
||||
Match: &routepb.RouteMatch{
|
||||
PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: "/b/"},
|
||||
QueryParameters: []*routepb.QueryParameterMatcher{{Name: "route_will_be_ignored"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Only one route in the result, because the second one with query
|
||||
// parameters is ignored.
|
||||
wantRoutes: []*Route{{
|
||||
Prefix: newStringP("/a/"),
|
||||
Action: map[string]uint32{"A": 40, "B": 60},
|
||||
}},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
cmpOpts := []cmp.Option{
|
||||
cmp.AllowUnexported(Route{}, HeaderMatcher{}, Int64Range{}),
|
||||
cmpopts.EquateEmpty(),
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := routesProtoToSlice(tt.routes, nil)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("routesProtoToSlice() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !cmp.Equal(got, tt.wantRoutes, cmpOpts...) {
|
||||
t.Errorf("routesProtoToSlice() got = %v, want %v, diff: %v", got, tt.wantRoutes, cmp.Diff(got, tt.wantRoutes, cmpOpts...))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newStringP(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
func newUInt32P(i uint32) *uint32 {
|
||||
return &i
|
||||
}
|
||||
|
||||
func newBoolP(b bool) *bool {
|
||||
return &b
|
||||
}
|
|
@ -27,7 +27,6 @@ import (
|
|||
"google.golang.org/grpc/serviceconfig"
|
||||
_ "google.golang.org/grpc/xds/internal/balancer/weightedtarget"
|
||||
_ "google.golang.org/grpc/xds/internal/balancer/xdsrouting"
|
||||
"google.golang.org/grpc/xds/internal/client"
|
||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
||||
)
|
||||
|
||||
|
@ -312,13 +311,13 @@ func TestRoutesToJSON(t *testing.T) {
|
|||
func TestServiceUpdateToJSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
su client.ServiceUpdate
|
||||
su xdsclient.ServiceUpdate
|
||||
wantJSON string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "routing",
|
||||
su: client.ServiceUpdate{
|
||||
su: xdsclient.ServiceUpdate{
|
||||
Routes: []*xdsclient.Route{{
|
||||
Path: newStringP("/service_1/method_1"),
|
||||
Action: map[string]uint32{"cluster_1": 75, "cluster_2": 25},
|
||||
|
|
|
@ -319,11 +319,11 @@ func TestXDSResolverGoodServiceUpdate(t *testing.T) {
|
|||
defer replaceRandNumGenerator(0)()
|
||||
|
||||
for _, tt := range []struct {
|
||||
su client.ServiceUpdate
|
||||
su xdsclient.ServiceUpdate
|
||||
wantJSON string
|
||||
}{
|
||||
{
|
||||
su: client.ServiceUpdate{Routes: []*client.Route{{Prefix: newStringP(""), Action: map[string]uint32{testCluster1: 1}}}},
|
||||
su: xdsclient.ServiceUpdate{Routes: []*client.Route{{Prefix: newStringP(""), Action: map[string]uint32{testCluster1: 1}}}},
|
||||
wantJSON: testOneClusterOnlyJSON,
|
||||
},
|
||||
{
|
||||
|
|
|
@ -18,13 +18,25 @@
|
|||
package testutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
v2endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint"
|
||||
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||
v2typepb "github.com/envoyproxy/go-control-plane/envoy/type"
|
||||
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
|
||||
"google.golang.org/grpc/xds/internal"
|
||||
)
|
||||
|
||||
// EmptyNodeProtoV2 is a node proto with no fields set.
|
||||
// EmptyNodeProtoV2 is a v2 Node proto with no fields set.
|
||||
var EmptyNodeProtoV2 = &v2corepb.Node{}
|
||||
|
||||
// EmptyNodeProtoV3 is a v3 Node proto with no fields set.
|
||||
var EmptyNodeProtoV3 = &v3corepb.Node{}
|
||||
|
||||
// LocalityIDToProto converts a LocalityID to its proto representation.
|
||||
func LocalityIDToProto(l internal.LocalityID) *v2corepb.Locality {
|
||||
return &v2corepb.Locality{
|
||||
|
@ -33,3 +45,101 @@ func LocalityIDToProto(l internal.LocalityID) *v2corepb.Locality {
|
|||
SubZone: l.SubZone,
|
||||
}
|
||||
}
|
||||
|
||||
// The helper structs/functions related to EDS protos are used in EDS balancer
|
||||
// tests now, to generate test inputs. Eventually, EDS balancer tests should
|
||||
// generate EndpointsUpdate directly, instead of generating and parsing the
|
||||
// proto message.
|
||||
// TODO: Once EDS balancer tests don't use these, these can be moved to v2 client code.
|
||||
|
||||
// ClusterLoadAssignmentBuilder builds a ClusterLoadAssignment, aka EDS
|
||||
// response.
|
||||
type ClusterLoadAssignmentBuilder struct {
|
||||
v *v2xdspb.ClusterLoadAssignment
|
||||
}
|
||||
|
||||
// NewClusterLoadAssignmentBuilder creates a ClusterLoadAssignmentBuilder.
|
||||
func NewClusterLoadAssignmentBuilder(clusterName string, dropPercents []uint32) *ClusterLoadAssignmentBuilder {
|
||||
var drops []*v2xdspb.ClusterLoadAssignment_Policy_DropOverload
|
||||
for i, d := range dropPercents {
|
||||
drops = append(drops, &v2xdspb.ClusterLoadAssignment_Policy_DropOverload{
|
||||
Category: fmt.Sprintf("test-drop-%d", i),
|
||||
DropPercentage: &v2typepb.FractionalPercent{
|
||||
Numerator: d,
|
||||
Denominator: v2typepb.FractionalPercent_HUNDRED,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return &ClusterLoadAssignmentBuilder{
|
||||
v: &v2xdspb.ClusterLoadAssignment{
|
||||
ClusterName: clusterName,
|
||||
Policy: &v2xdspb.ClusterLoadAssignment_Policy{
|
||||
DropOverloads: drops,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddLocalityOptions contains options when adding locality to the builder.
|
||||
type AddLocalityOptions struct {
|
||||
Health []v2corepb.HealthStatus
|
||||
Weight []uint32
|
||||
}
|
||||
|
||||
// AddLocality adds a locality to the builder.
|
||||
func (clab *ClusterLoadAssignmentBuilder) AddLocality(subzone string, weight uint32, priority uint32, addrsWithPort []string, opts *AddLocalityOptions) {
|
||||
var lbEndPoints []*v2endpointpb.LbEndpoint
|
||||
for i, a := range addrsWithPort {
|
||||
host, portStr, err := net.SplitHostPort(a)
|
||||
if err != nil {
|
||||
panic("failed to split " + a)
|
||||
}
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
panic("failed to atoi " + portStr)
|
||||
}
|
||||
|
||||
lbe := &v2endpointpb.LbEndpoint{
|
||||
HostIdentifier: &v2endpointpb.LbEndpoint_Endpoint{
|
||||
Endpoint: &v2endpointpb.Endpoint{
|
||||
Address: &v2corepb.Address{
|
||||
Address: &v2corepb.Address_SocketAddress{
|
||||
SocketAddress: &v2corepb.SocketAddress{
|
||||
Protocol: v2corepb.SocketAddress_TCP,
|
||||
Address: host,
|
||||
PortSpecifier: &v2corepb.SocketAddress_PortValue{
|
||||
PortValue: uint32(port)}}}}}},
|
||||
}
|
||||
if opts != nil {
|
||||
if i < len(opts.Health) {
|
||||
lbe.HealthStatus = opts.Health[i]
|
||||
}
|
||||
if i < len(opts.Weight) {
|
||||
lbe.LoadBalancingWeight = &wrapperspb.UInt32Value{Value: opts.Weight[i]}
|
||||
}
|
||||
}
|
||||
lbEndPoints = append(lbEndPoints, lbe)
|
||||
}
|
||||
|
||||
var localityID *v2corepb.Locality
|
||||
if subzone != "" {
|
||||
localityID = &v2corepb.Locality{
|
||||
Region: "",
|
||||
Zone: "",
|
||||
SubZone: subzone,
|
||||
}
|
||||
}
|
||||
|
||||
clab.v.Endpoints = append(clab.v.Endpoints, &v2endpointpb.LocalityLbEndpoints{
|
||||
Locality: localityID,
|
||||
LbEndpoints: lbEndPoints,
|
||||
LoadBalancingWeight: &wrapperspb.UInt32Value{Value: weight},
|
||||
Priority: priority,
|
||||
})
|
||||
}
|
||||
|
||||
// Build builds ClusterLoadAssignment.
|
||||
func (clab *ClusterLoadAssignmentBuilder) Build() *v2xdspb.ClusterLoadAssignment {
|
||||
return clab.v
|
||||
}
|
||||
|
|
|
@ -16,7 +16,8 @@
|
|||
*
|
||||
*/
|
||||
|
||||
// Package version defines supported xDS API versions.
|
||||
// Package version defines constants to distinguish between supported xDS API
|
||||
// versions.
|
||||
package version
|
||||
|
||||
// TransportAPI refers to the API version for xDS transport protocol. This
|
||||
|
@ -30,3 +31,19 @@ const (
|
|||
// TransportV3 refers to the v3 xDS transport protocol.
|
||||
TransportV3
|
||||
)
|
||||
|
||||
// Resource URLs. We need to be able to accept either version of the resource
|
||||
// regardless of the version of the transport protocol in use.
|
||||
const (
|
||||
V2ListenerURL = "type.googleapis.com/envoy.api.v2.Listener"
|
||||
V2RouteConfigURL = "type.googleapis.com/envoy.api.v2.RouteConfiguration"
|
||||
V2ClusterURL = "type.googleapis.com/envoy.api.v2.Cluster"
|
||||
V2EndpointsURL = "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment"
|
||||
V2HTTPConnManagerURL = "type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"
|
||||
|
||||
V3ListenerURL = "type.googleapis.com/envoy.config.listener.v3.Listener"
|
||||
V3RouteConfigURL = "type.googleapis.com/envoy.config.route.v3.RouteConfiguration"
|
||||
V3ClusterURL = "type.googleapis.com/envoy.config.cluster.v3.Cluster"
|
||||
V3EndpointsURL = "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment"
|
||||
V3HTTPConnManagerURL = "type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"
|
||||
)
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
package xds
|
||||
|
||||
import (
|
||||
_ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers.
|
||||
_ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver
|
||||
_ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers.
|
||||
_ "google.golang.org/grpc/xds/internal/client/v2" // Register the v2 xDS API client.
|
||||
_ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver.
|
||||
)
|
||||
|
|
Загрузка…
Ссылка в новой задаче