balancer: move Balancer and Picker to V2; delete legacy API (#3431)
This commit is contained in:
Родитель
b0ac601168
Коммит
4eb418e5b2
391
balancer.go
391
balancer.go
|
@ -1,391 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2016 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package grpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/credentials"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
"google.golang.org/grpc/naming"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Address represents a server the client connects to.
|
|
||||||
//
|
|
||||||
// Deprecated: please use package balancer.
|
|
||||||
type Address struct {
|
|
||||||
// Addr is the server address on which a connection will be established.
|
|
||||||
Addr string
|
|
||||||
// Metadata is the information associated with Addr, which may be used
|
|
||||||
// to make load balancing decision.
|
|
||||||
Metadata interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BalancerConfig specifies the configurations for Balancer.
|
|
||||||
//
|
|
||||||
// Deprecated: please use package balancer. May be removed in a future 1.x release.
|
|
||||||
type BalancerConfig struct {
|
|
||||||
// DialCreds is the transport credential the Balancer implementation can
|
|
||||||
// use to dial to a remote load balancer server. The Balancer implementations
|
|
||||||
// can ignore this if it does not need to talk to another party securely.
|
|
||||||
DialCreds credentials.TransportCredentials
|
|
||||||
// Dialer is the custom dialer the Balancer implementation can use to dial
|
|
||||||
// to a remote load balancer server. The Balancer implementations
|
|
||||||
// can ignore this if it doesn't need to talk to remote balancer.
|
|
||||||
Dialer func(context.Context, string) (net.Conn, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BalancerGetOptions configures a Get call.
|
|
||||||
//
|
|
||||||
// Deprecated: please use package balancer. May be removed in a future 1.x release.
|
|
||||||
type BalancerGetOptions struct {
|
|
||||||
// BlockingWait specifies whether Get should block when there is no
|
|
||||||
// connected address.
|
|
||||||
BlockingWait bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Balancer chooses network addresses for RPCs.
|
|
||||||
//
|
|
||||||
// Deprecated: please use package balancer. May be removed in a future 1.x release.
|
|
||||||
type Balancer interface {
|
|
||||||
// Start does the initialization work to bootstrap a Balancer. For example,
|
|
||||||
// this function may start the name resolution and watch the updates. It will
|
|
||||||
// be called when dialing.
|
|
||||||
Start(target string, config BalancerConfig) error
|
|
||||||
// Up informs the Balancer that gRPC has a connection to the server at
|
|
||||||
// addr. It returns down which is called once the connection to addr gets
|
|
||||||
// lost or closed.
|
|
||||||
// TODO: It is not clear how to construct and take advantage of the meaningful error
|
|
||||||
// parameter for down. Need realistic demands to guide.
|
|
||||||
Up(addr Address) (down func(error))
|
|
||||||
// Get gets the address of a server for the RPC corresponding to ctx.
|
|
||||||
// i) If it returns a connected address, gRPC internals issues the RPC on the
|
|
||||||
// connection to this address;
|
|
||||||
// ii) If it returns an address on which the connection is under construction
|
|
||||||
// (initiated by Notify(...)) but not connected, gRPC internals
|
|
||||||
// * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or
|
|
||||||
// Shutdown state;
|
|
||||||
// or
|
|
||||||
// * issues RPC on the connection otherwise.
|
|
||||||
// iii) If it returns an address on which the connection does not exist, gRPC
|
|
||||||
// internals treats it as an error and will fail the corresponding RPC.
|
|
||||||
//
|
|
||||||
// Therefore, the following is the recommended rule when writing a custom Balancer.
|
|
||||||
// If opts.BlockingWait is true, it should return a connected address or
|
|
||||||
// block if there is no connected address. It should respect the timeout or
|
|
||||||
// cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast
|
|
||||||
// RPCs), it should return an address it has notified via Notify(...) immediately
|
|
||||||
// instead of blocking.
|
|
||||||
//
|
|
||||||
// The function returns put which is called once the rpc has completed or failed.
|
|
||||||
// put can collect and report RPC stats to a remote load balancer.
|
|
||||||
//
|
|
||||||
// This function should only return the errors Balancer cannot recover by itself.
|
|
||||||
// gRPC internals will fail the RPC if an error is returned.
|
|
||||||
Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error)
|
|
||||||
// Notify returns a channel that is used by gRPC internals to watch the addresses
|
|
||||||
// gRPC needs to connect. The addresses might be from a name resolver or remote
|
|
||||||
// load balancer. gRPC internals will compare it with the existing connected
|
|
||||||
// addresses. If the address Balancer notified is not in the existing connected
|
|
||||||
// addresses, gRPC starts to connect the address. If an address in the existing
|
|
||||||
// connected addresses is not in the notification list, the corresponding connection
|
|
||||||
// is shutdown gracefully. Otherwise, there are no operations to take. Note that
|
|
||||||
// the Address slice must be the full list of the Addresses which should be connected.
|
|
||||||
// It is NOT delta.
|
|
||||||
Notify() <-chan []Address
|
|
||||||
// Close shuts down the balancer.
|
|
||||||
Close() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
|
|
||||||
// the name resolution updates and updates the addresses available correspondingly.
|
|
||||||
//
|
|
||||||
// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release.
|
|
||||||
func RoundRobin(r naming.Resolver) Balancer {
|
|
||||||
return &roundRobin{r: r}
|
|
||||||
}
|
|
||||||
|
|
||||||
type addrInfo struct {
|
|
||||||
addr Address
|
|
||||||
connected bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type roundRobin struct {
|
|
||||||
r naming.Resolver
|
|
||||||
w naming.Watcher
|
|
||||||
addrs []*addrInfo // all the addresses the client should potentially connect
|
|
||||||
mu sync.Mutex
|
|
||||||
addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to.
|
|
||||||
next int // index of the next address to return for Get()
|
|
||||||
waitCh chan struct{} // the channel to block when there is no connected address available
|
|
||||||
done bool // The Balancer is closed.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rr *roundRobin) watchAddrUpdates() error {
|
|
||||||
updates, err := rr.w.Next()
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rr.mu.Lock()
|
|
||||||
defer rr.mu.Unlock()
|
|
||||||
for _, update := range updates {
|
|
||||||
addr := Address{
|
|
||||||
Addr: update.Addr,
|
|
||||||
Metadata: update.Metadata,
|
|
||||||
}
|
|
||||||
switch update.Op {
|
|
||||||
case naming.Add:
|
|
||||||
var exist bool
|
|
||||||
for _, v := range rr.addrs {
|
|
||||||
if addr == v.addr {
|
|
||||||
exist = true
|
|
||||||
grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if exist {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
rr.addrs = append(rr.addrs, &addrInfo{addr: addr})
|
|
||||||
case naming.Delete:
|
|
||||||
for i, v := range rr.addrs {
|
|
||||||
if addr == v.addr {
|
|
||||||
copy(rr.addrs[i:], rr.addrs[i+1:])
|
|
||||||
rr.addrs = rr.addrs[:len(rr.addrs)-1]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
grpclog.Errorln("Unknown update.Op ", update.Op)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
|
|
||||||
open := make([]Address, len(rr.addrs))
|
|
||||||
for i, v := range rr.addrs {
|
|
||||||
open[i] = v.addr
|
|
||||||
}
|
|
||||||
if rr.done {
|
|
||||||
return ErrClientConnClosing
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-rr.addrCh:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
rr.addrCh <- open
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rr *roundRobin) Start(target string, config BalancerConfig) error {
|
|
||||||
rr.mu.Lock()
|
|
||||||
defer rr.mu.Unlock()
|
|
||||||
if rr.done {
|
|
||||||
return ErrClientConnClosing
|
|
||||||
}
|
|
||||||
if rr.r == nil {
|
|
||||||
// If there is no name resolver installed, it is not needed to
|
|
||||||
// do name resolution. In this case, target is added into rr.addrs
|
|
||||||
// as the only address available and rr.addrCh stays nil.
|
|
||||||
rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
w, err := rr.r.Resolve(target)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rr.w = w
|
|
||||||
rr.addrCh = make(chan []Address, 1)
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
if err := rr.watchAddrUpdates(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Up sets the connected state of addr and sends notification if there are pending
|
|
||||||
// Get() calls.
|
|
||||||
func (rr *roundRobin) Up(addr Address) func(error) {
|
|
||||||
rr.mu.Lock()
|
|
||||||
defer rr.mu.Unlock()
|
|
||||||
var cnt int
|
|
||||||
for _, a := range rr.addrs {
|
|
||||||
if a.addr == addr {
|
|
||||||
if a.connected {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
a.connected = true
|
|
||||||
}
|
|
||||||
if a.connected {
|
|
||||||
cnt++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// addr is only one which is connected. Notify the Get() callers who are blocking.
|
|
||||||
if cnt == 1 && rr.waitCh != nil {
|
|
||||||
close(rr.waitCh)
|
|
||||||
rr.waitCh = nil
|
|
||||||
}
|
|
||||||
return func(err error) {
|
|
||||||
rr.down(addr, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// down unsets the connected state of addr.
|
|
||||||
func (rr *roundRobin) down(addr Address, err error) {
|
|
||||||
rr.mu.Lock()
|
|
||||||
defer rr.mu.Unlock()
|
|
||||||
for _, a := range rr.addrs {
|
|
||||||
if addr == a.addr {
|
|
||||||
a.connected = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the next addr in the rotation.
|
|
||||||
func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
|
|
||||||
var ch chan struct{}
|
|
||||||
rr.mu.Lock()
|
|
||||||
if rr.done {
|
|
||||||
rr.mu.Unlock()
|
|
||||||
err = ErrClientConnClosing
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(rr.addrs) > 0 {
|
|
||||||
if rr.next >= len(rr.addrs) {
|
|
||||||
rr.next = 0
|
|
||||||
}
|
|
||||||
next := rr.next
|
|
||||||
for {
|
|
||||||
a := rr.addrs[next]
|
|
||||||
next = (next + 1) % len(rr.addrs)
|
|
||||||
if a.connected {
|
|
||||||
addr = a.addr
|
|
||||||
rr.next = next
|
|
||||||
rr.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if next == rr.next {
|
|
||||||
// Has iterated all the possible address but none is connected.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !opts.BlockingWait {
|
|
||||||
if len(rr.addrs) == 0 {
|
|
||||||
rr.mu.Unlock()
|
|
||||||
err = status.Errorf(codes.Unavailable, "there is no address available")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Returns the next addr on rr.addrs for failfast RPCs.
|
|
||||||
addr = rr.addrs[rr.next].addr
|
|
||||||
rr.next++
|
|
||||||
rr.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Wait on rr.waitCh for non-failfast RPCs.
|
|
||||||
if rr.waitCh == nil {
|
|
||||||
ch = make(chan struct{})
|
|
||||||
rr.waitCh = ch
|
|
||||||
} else {
|
|
||||||
ch = rr.waitCh
|
|
||||||
}
|
|
||||||
rr.mu.Unlock()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
err = ctx.Err()
|
|
||||||
return
|
|
||||||
case <-ch:
|
|
||||||
rr.mu.Lock()
|
|
||||||
if rr.done {
|
|
||||||
rr.mu.Unlock()
|
|
||||||
err = ErrClientConnClosing
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(rr.addrs) > 0 {
|
|
||||||
if rr.next >= len(rr.addrs) {
|
|
||||||
rr.next = 0
|
|
||||||
}
|
|
||||||
next := rr.next
|
|
||||||
for {
|
|
||||||
a := rr.addrs[next]
|
|
||||||
next = (next + 1) % len(rr.addrs)
|
|
||||||
if a.connected {
|
|
||||||
addr = a.addr
|
|
||||||
rr.next = next
|
|
||||||
rr.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if next == rr.next {
|
|
||||||
// Has iterated all the possible address but none is connected.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// The newly added addr got removed by Down() again.
|
|
||||||
if rr.waitCh == nil {
|
|
||||||
ch = make(chan struct{})
|
|
||||||
rr.waitCh = ch
|
|
||||||
} else {
|
|
||||||
ch = rr.waitCh
|
|
||||||
}
|
|
||||||
rr.mu.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rr *roundRobin) Notify() <-chan []Address {
|
|
||||||
return rr.addrCh
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rr *roundRobin) Close() error {
|
|
||||||
rr.mu.Lock()
|
|
||||||
defer rr.mu.Unlock()
|
|
||||||
if rr.done {
|
|
||||||
return errBalancerClosed
|
|
||||||
}
|
|
||||||
rr.done = true
|
|
||||||
if rr.w != nil {
|
|
||||||
rr.w.Close()
|
|
||||||
}
|
|
||||||
if rr.waitCh != nil {
|
|
||||||
close(rr.waitCh)
|
|
||||||
rr.waitCh = nil
|
|
||||||
}
|
|
||||||
if rr.addrCh != nil {
|
|
||||||
close(rr.addrCh)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn.
|
|
||||||
// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get()
|
|
||||||
// returns the only address Up by resetTransport().
|
|
||||||
type pickFirst struct {
|
|
||||||
*roundRobin
|
|
||||||
}
|
|
|
@ -126,7 +126,7 @@ type State struct {
|
||||||
// determine the state of the ClientConn.
|
// determine the state of the ClientConn.
|
||||||
ConnectivityState connectivity.State
|
ConnectivityState connectivity.State
|
||||||
// Picker is used to choose connections (SubConns) for RPCs.
|
// Picker is used to choose connections (SubConns) for RPCs.
|
||||||
Picker V2Picker
|
Picker Picker
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientConn represents a gRPC ClientConn.
|
// ClientConn represents a gRPC ClientConn.
|
||||||
|
@ -144,20 +144,11 @@ type ClientConn interface {
|
||||||
// The SubConn will be shutdown.
|
// The SubConn will be shutdown.
|
||||||
RemoveSubConn(SubConn)
|
RemoveSubConn(SubConn)
|
||||||
|
|
||||||
// UpdateBalancerState is called by balancer to notify gRPC that some internal
|
|
||||||
// state in balancer has changed.
|
|
||||||
//
|
|
||||||
// gRPC will update the connectivity state of the ClientConn, and will call pick
|
|
||||||
// on the new picker to pick new SubConn.
|
|
||||||
//
|
|
||||||
// Deprecated: use UpdateState instead
|
|
||||||
UpdateBalancerState(s connectivity.State, p Picker)
|
|
||||||
|
|
||||||
// UpdateState notifies gRPC that the balancer's internal state has
|
// UpdateState notifies gRPC that the balancer's internal state has
|
||||||
// changed.
|
// changed.
|
||||||
//
|
//
|
||||||
// gRPC will update the connectivity state of the ClientConn, and will call pick
|
// gRPC will update the connectivity state of the ClientConn, and will call
|
||||||
// on the new picker to pick new SubConns.
|
// Pick on the new Picker to pick new SubConns.
|
||||||
UpdateState(State)
|
UpdateState(State)
|
||||||
|
|
||||||
// ResolveNow is called by balancer to notify gRPC to do a name resolving.
|
// ResolveNow is called by balancer to notify gRPC to do a name resolving.
|
||||||
|
@ -235,56 +226,17 @@ type DoneInfo struct {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrNoSubConnAvailable indicates no SubConn is available for pick().
|
// ErrNoSubConnAvailable indicates no SubConn is available for pick().
|
||||||
// gRPC will block the RPC until a new picker is available via UpdateBalancerState().
|
// gRPC will block the RPC until a new picker is available via UpdateState().
|
||||||
ErrNoSubConnAvailable = errors.New("no SubConn is available")
|
ErrNoSubConnAvailable = errors.New("no SubConn is available")
|
||||||
// ErrTransientFailure indicates all SubConns are in TransientFailure.
|
// ErrTransientFailure indicates all SubConns are in TransientFailure.
|
||||||
// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
|
// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
|
||||||
ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure"))
|
//
|
||||||
|
// Deprecated: return an appropriate error based on the last resolution or
|
||||||
|
// connection attempt instead. The behavior is the same for any non-gRPC
|
||||||
|
// status error.
|
||||||
|
ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Picker is used by gRPC to pick a SubConn to send an RPC.
|
|
||||||
// Balancer is expected to generate a new picker from its snapshot every time its
|
|
||||||
// internal state has changed.
|
|
||||||
//
|
|
||||||
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
|
|
||||||
//
|
|
||||||
// Deprecated: use V2Picker instead
|
|
||||||
type Picker interface {
|
|
||||||
// Pick returns the SubConn to be used to send the RPC.
|
|
||||||
// The returned SubConn must be one returned by NewSubConn().
|
|
||||||
//
|
|
||||||
// This functions is expected to return:
|
|
||||||
// - a SubConn that is known to be READY;
|
|
||||||
// - ErrNoSubConnAvailable if no SubConn is available, but progress is being
|
|
||||||
// made (for example, some SubConn is in CONNECTING mode);
|
|
||||||
// - other errors if no active connecting is happening (for example, all SubConn
|
|
||||||
// are in TRANSIENT_FAILURE mode).
|
|
||||||
//
|
|
||||||
// If a SubConn is returned:
|
|
||||||
// - If it is READY, gRPC will send the RPC on it;
|
|
||||||
// - If it is not ready, or becomes not ready after it's returned, gRPC will
|
|
||||||
// block until UpdateBalancerState() is called and will call pick on the
|
|
||||||
// new picker. The done function returned from Pick(), if not nil, will be
|
|
||||||
// called with nil error, no bytes sent and no bytes received.
|
|
||||||
//
|
|
||||||
// If the returned error is not nil:
|
|
||||||
// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
|
|
||||||
// - If the error is ErrTransientFailure or implements IsTransientFailure()
|
|
||||||
// bool, returning true:
|
|
||||||
// - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
|
|
||||||
// is called to pick again;
|
|
||||||
// - Otherwise, RPC will fail with unavailable error.
|
|
||||||
// - Else (error is other non-nil error):
|
|
||||||
// - The RPC will fail with the error's status code, or Unknown if it is
|
|
||||||
// not a status error.
|
|
||||||
//
|
|
||||||
// The returned done() function will be called once the rpc has finished,
|
|
||||||
// with the final status of that RPC. If the SubConn returned is not a
|
|
||||||
// valid SubConn type, done may not be called. done may be nil if balancer
|
|
||||||
// doesn't care about the RPC status.
|
|
||||||
Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PickResult contains information related to a connection chosen for an RPC.
|
// PickResult contains information related to a connection chosen for an RPC.
|
||||||
type PickResult struct {
|
type PickResult struct {
|
||||||
// SubConn is the connection to use for this pick, if its state is Ready.
|
// SubConn is the connection to use for this pick, if its state is Ready.
|
||||||
|
@ -300,24 +252,19 @@ type PickResult struct {
|
||||||
Done func(DoneInfo)
|
Done func(DoneInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
type transientFailureError struct {
|
// TransientFailureError returns e. It exists for backward compatibility and
|
||||||
error
|
// will be deleted soon.
|
||||||
}
|
//
|
||||||
|
// Deprecated: no longer necessary, picker errors are treated this way by
|
||||||
|
// default.
|
||||||
|
func TransientFailureError(e error) error { return e }
|
||||||
|
|
||||||
func (e *transientFailureError) IsTransientFailure() bool { return true }
|
// Picker is used by gRPC to pick a SubConn to send an RPC.
|
||||||
|
|
||||||
// TransientFailureError wraps err in an error implementing
|
|
||||||
// IsTransientFailure() bool, returning true.
|
|
||||||
func TransientFailureError(err error) error {
|
|
||||||
return &transientFailureError{error: err}
|
|
||||||
}
|
|
||||||
|
|
||||||
// V2Picker is used by gRPC to pick a SubConn to send an RPC.
|
|
||||||
// Balancer is expected to generate a new picker from its snapshot every time its
|
// Balancer is expected to generate a new picker from its snapshot every time its
|
||||||
// internal state has changed.
|
// internal state has changed.
|
||||||
//
|
//
|
||||||
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
|
// The pickers used by gRPC can be updated by ClientConn.UpdateState().
|
||||||
type V2Picker interface {
|
type Picker interface {
|
||||||
// Pick returns the connection to use for this RPC and related information.
|
// Pick returns the connection to use for this RPC and related information.
|
||||||
//
|
//
|
||||||
// Pick should not block. If the balancer needs to do I/O or any blocking
|
// Pick should not block. If the balancer needs to do I/O or any blocking
|
||||||
|
@ -330,14 +277,13 @@ type V2Picker interface {
|
||||||
// - If the error is ErrNoSubConnAvailable, gRPC will block until a new
|
// - If the error is ErrNoSubConnAvailable, gRPC will block until a new
|
||||||
// Picker is provided by the balancer (using ClientConn.UpdateState).
|
// Picker is provided by the balancer (using ClientConn.UpdateState).
|
||||||
//
|
//
|
||||||
// - If the error implements IsTransientFailure() bool, returning true,
|
// - If the error is a status error (implemented by the grpc/status
|
||||||
// wait for ready RPCs will wait, but non-wait for ready RPCs will be
|
// package), gRPC will terminate the RPC with the code and message
|
||||||
// terminated with this error's Error() string and status code
|
// provided.
|
||||||
// Unavailable.
|
|
||||||
//
|
//
|
||||||
// - Any other errors terminate all RPCs with the code and message
|
// - For all other errors, wait for ready RPCs will wait, but non-wait for
|
||||||
// provided. If the error is not a status error, it will be converted by
|
// ready RPCs will be terminated with this error's Error() string and
|
||||||
// gRPC to a status error with code Unknown.
|
// status code Unavailable.
|
||||||
Pick(info PickInfo) (PickResult, error)
|
Pick(info PickInfo) (PickResult, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -346,34 +292,36 @@ type V2Picker interface {
|
||||||
//
|
//
|
||||||
// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
|
// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
|
||||||
//
|
//
|
||||||
// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed
|
// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are
|
||||||
// to be called synchronously from the same goroutine.
|
// guaranteed to be called synchronously from the same goroutine. There's no
|
||||||
// There's no guarantee on picker.Pick, it may be called anytime.
|
// guarantee on picker.Pick, it may be called anytime.
|
||||||
type Balancer interface {
|
type Balancer interface {
|
||||||
// HandleSubConnStateChange is called by gRPC when the connectivity state
|
// UpdateClientConnState is called by gRPC when the state of the ClientConn
|
||||||
// of sc has changed.
|
// changes. If the error returned is ErrBadResolverState, the ClientConn
|
||||||
// Balancer is expected to aggregate all the state of SubConn and report
|
// will begin calling ResolveNow on the active name resolver with
|
||||||
// that back to gRPC.
|
// exponential backoff until a subsequent call to UpdateClientConnState
|
||||||
// Balancer should also generate and update Pickers when its internal state has
|
// returns a nil error. Any other errors are currently ignored.
|
||||||
// been changed by the new state.
|
UpdateClientConnState(ClientConnState) error
|
||||||
//
|
// ResolverError is called by gRPC when the name resolver reports an error.
|
||||||
// Deprecated: if V2Balancer is implemented by the Balancer,
|
ResolverError(error)
|
||||||
// UpdateSubConnState will be called instead.
|
// UpdateSubConnState is called by gRPC when the state of a SubConn
|
||||||
HandleSubConnStateChange(sc SubConn, state connectivity.State)
|
// changes.
|
||||||
// HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
|
UpdateSubConnState(SubConn, SubConnState)
|
||||||
// balancers.
|
|
||||||
// Balancer can create new SubConn or remove SubConn with the addresses.
|
|
||||||
// An empty address slice and a non-nil error will be passed if the resolver returns
|
|
||||||
// non-nil error to gRPC.
|
|
||||||
//
|
|
||||||
// Deprecated: if V2Balancer is implemented by the Balancer,
|
|
||||||
// UpdateClientConnState will be called instead.
|
|
||||||
HandleResolvedAddrs([]resolver.Address, error)
|
|
||||||
// Close closes the balancer. The balancer is not required to call
|
// Close closes the balancer. The balancer is not required to call
|
||||||
// ClientConn.RemoveSubConn for its existing SubConns.
|
// ClientConn.RemoveSubConn for its existing SubConns.
|
||||||
Close()
|
Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// V2Balancer is temporarily defined for backward compatibility reasons.
|
||||||
|
//
|
||||||
|
// Deprecated: use Balancer directly instead.
|
||||||
|
type V2Balancer = Balancer
|
||||||
|
|
||||||
|
// V2Picker is temporarily defined for backward compatibility reasons.
|
||||||
|
//
|
||||||
|
// Deprecated: use Picker directly instead.
|
||||||
|
type V2Picker = Picker
|
||||||
|
|
||||||
// SubConnState describes the state of a SubConn.
|
// SubConnState describes the state of a SubConn.
|
||||||
type SubConnState struct {
|
type SubConnState struct {
|
||||||
// ConnectivityState is the connectivity state of the SubConn.
|
// ConnectivityState is the connectivity state of the SubConn.
|
||||||
|
@ -396,27 +344,6 @@ type ClientConnState struct {
|
||||||
// problem with the provided name resolver data.
|
// problem with the provided name resolver data.
|
||||||
var ErrBadResolverState = errors.New("bad resolver state")
|
var ErrBadResolverState = errors.New("bad resolver state")
|
||||||
|
|
||||||
// V2Balancer is defined for documentation purposes. If a Balancer also
|
|
||||||
// implements V2Balancer, its UpdateClientConnState method will be called
|
|
||||||
// instead of HandleResolvedAddrs and its UpdateSubConnState will be called
|
|
||||||
// instead of HandleSubConnStateChange.
|
|
||||||
type V2Balancer interface {
|
|
||||||
// UpdateClientConnState is called by gRPC when the state of the ClientConn
|
|
||||||
// changes. If the error returned is ErrBadResolverState, the ClientConn
|
|
||||||
// will begin calling ResolveNow on the active name resolver with
|
|
||||||
// exponential backoff until a subsequent call to UpdateClientConnState
|
|
||||||
// returns a nil error. Any other errors are currently ignored.
|
|
||||||
UpdateClientConnState(ClientConnState) error
|
|
||||||
// ResolverError is called by gRPC when the name resolver reports an error.
|
|
||||||
ResolverError(error)
|
|
||||||
// UpdateSubConnState is called by gRPC when the state of a SubConn
|
|
||||||
// changes.
|
|
||||||
UpdateSubConnState(SubConn, SubConnState)
|
|
||||||
// Close closes the balancer. The balancer is not required to call
|
|
||||||
// ClientConn.RemoveSubConn for its existing SubConns.
|
|
||||||
Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
|
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
|
||||||
// and returns one aggregated connectivity state.
|
// and returns one aggregated connectivity state.
|
||||||
//
|
//
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
package base
|
package base
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
@ -30,17 +29,15 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type baseBuilder struct {
|
type baseBuilder struct {
|
||||||
name string
|
name string
|
||||||
pickerBuilder PickerBuilder
|
pickerBuilder PickerBuilder
|
||||||
v2PickerBuilder V2PickerBuilder
|
config Config
|
||||||
config Config
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||||
bal := &baseBalancer{
|
bal := &baseBalancer{
|
||||||
cc: cc,
|
cc: cc,
|
||||||
pickerBuilder: bb.pickerBuilder,
|
pickerBuilder: bb.pickerBuilder,
|
||||||
v2PickerBuilder: bb.v2PickerBuilder,
|
|
||||||
|
|
||||||
subConns: make(map[resolver.Address]balancer.SubConn),
|
subConns: make(map[resolver.Address]balancer.SubConn),
|
||||||
scStates: make(map[balancer.SubConn]connectivity.State),
|
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||||
|
@ -50,11 +47,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions)
|
||||||
// Initialize picker to a picker that always returns
|
// Initialize picker to a picker that always returns
|
||||||
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
||||||
// may call UpdateState with this picker.
|
// may call UpdateState with this picker.
|
||||||
if bb.pickerBuilder != nil {
|
bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
|
||||||
bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
|
|
||||||
} else {
|
|
||||||
bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable)
|
|
||||||
}
|
|
||||||
return bal
|
return bal
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,12 +55,9 @@ func (bb *baseBuilder) Name() string {
|
||||||
return bb.name
|
return bb.name
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer
|
|
||||||
|
|
||||||
type baseBalancer struct {
|
type baseBalancer struct {
|
||||||
cc balancer.ClientConn
|
cc balancer.ClientConn
|
||||||
pickerBuilder PickerBuilder
|
pickerBuilder PickerBuilder
|
||||||
v2PickerBuilder V2PickerBuilder
|
|
||||||
|
|
||||||
csEvltr *balancer.ConnectivityStateEvaluator
|
csEvltr *balancer.ConnectivityStateEvaluator
|
||||||
state connectivity.State
|
state connectivity.State
|
||||||
|
@ -75,40 +65,31 @@ type baseBalancer struct {
|
||||||
subConns map[resolver.Address]balancer.SubConn
|
subConns map[resolver.Address]balancer.SubConn
|
||||||
scStates map[balancer.SubConn]connectivity.State
|
scStates map[balancer.SubConn]connectivity.State
|
||||||
picker balancer.Picker
|
picker balancer.Picker
|
||||||
v2Picker balancer.V2Picker
|
|
||||||
config Config
|
config Config
|
||||||
|
|
||||||
resolverErr error // the last error reported by the resolver; cleared on successful resolution
|
resolverErr error // the last error reported by the resolver; cleared on successful resolution
|
||||||
connErr error // the last connection error; cleared upon leaving TransientFailure
|
connErr error // the last connection error; cleared upon leaving TransientFailure
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *baseBalancer) ResolverError(err error) {
|
func (b *baseBalancer) ResolverError(err error) {
|
||||||
b.resolverErr = err
|
b.resolverErr = err
|
||||||
if len(b.subConns) == 0 {
|
if len(b.subConns) == 0 {
|
||||||
b.state = connectivity.TransientFailure
|
b.state = connectivity.TransientFailure
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.state != connectivity.TransientFailure {
|
if b.state != connectivity.TransientFailure {
|
||||||
// The picker will not change since the balancer does not currently
|
// The picker will not change since the balancer does not currently
|
||||||
// report an error.
|
// report an error.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
b.regeneratePicker()
|
b.regeneratePicker()
|
||||||
if b.picker != nil {
|
b.cc.UpdateState(balancer.State{
|
||||||
b.cc.UpdateBalancerState(b.state, b.picker)
|
ConnectivityState: b.state,
|
||||||
} else {
|
Picker: b.picker,
|
||||||
b.cc.UpdateState(balancer.State{
|
})
|
||||||
ConnectivityState: b.state,
|
|
||||||
Picker: b.v2Picker,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||||
// TODO: handle s.ResolverState.Err (log if not nil) once implemented.
|
|
||||||
// TODO: handle s.ResolverState.ServiceConfig?
|
// TODO: handle s.ResolverState.ServiceConfig?
|
||||||
if grpclog.V(2) {
|
if grpclog.V(2) {
|
||||||
grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
|
grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
|
||||||
|
@ -137,7 +118,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||||
b.cc.RemoveSubConn(sc)
|
b.cc.RemoveSubConn(sc)
|
||||||
delete(b.subConns, a)
|
delete(b.subConns, a)
|
||||||
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
||||||
// The entry will be deleted in HandleSubConnStateChange.
|
// The entry will be deleted in UpdateSubConnState.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If resolver state contains no addresses, return an error so ClientConn
|
// If resolver state contains no addresses, return an error so ClientConn
|
||||||
|
@ -171,38 +152,18 @@ func (b *baseBalancer) mergeErrors() error {
|
||||||
// - built by the pickerBuilder with all READY SubConns otherwise.
|
// - built by the pickerBuilder with all READY SubConns otherwise.
|
||||||
func (b *baseBalancer) regeneratePicker() {
|
func (b *baseBalancer) regeneratePicker() {
|
||||||
if b.state == connectivity.TransientFailure {
|
if b.state == connectivity.TransientFailure {
|
||||||
if b.pickerBuilder != nil {
|
b.picker = NewErrPicker(b.mergeErrors())
|
||||||
b.picker = NewErrPicker(balancer.ErrTransientFailure)
|
|
||||||
} else {
|
|
||||||
b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors()))
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if b.pickerBuilder != nil {
|
readySCs := make(map[balancer.SubConn]SubConnInfo)
|
||||||
readySCs := make(map[resolver.Address]balancer.SubConn)
|
|
||||||
|
|
||||||
// Filter out all ready SCs from full subConn map.
|
// Filter out all ready SCs from full subConn map.
|
||||||
for addr, sc := range b.subConns {
|
for addr, sc := range b.subConns {
|
||||||
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
||||||
readySCs[addr] = sc
|
readySCs[sc] = SubConnInfo{Address: addr}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
b.picker = b.pickerBuilder.Build(readySCs)
|
|
||||||
} else {
|
|
||||||
readySCs := make(map[balancer.SubConn]SubConnInfo)
|
|
||||||
|
|
||||||
// Filter out all ready SCs from full subConn map.
|
|
||||||
for addr, sc := range b.subConns {
|
|
||||||
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
|
||||||
readySCs[sc] = SubConnInfo{Address: addr}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
|
|
||||||
}
|
}
|
||||||
}
|
b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
|
||||||
|
|
||||||
func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||||
|
@ -247,11 +208,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
||||||
b.regeneratePicker()
|
b.regeneratePicker()
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.picker != nil {
|
b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
|
||||||
b.cc.UpdateBalancerState(b.state, b.picker)
|
|
||||||
} else {
|
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close is a nop because base balancer doesn't have internal state to clean up,
|
// Close is a nop because base balancer doesn't have internal state to clean up,
|
||||||
|
@ -259,28 +216,20 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
||||||
func (b *baseBalancer) Close() {
|
func (b *baseBalancer) Close() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewErrPicker returns a picker that always returns err on Pick().
|
// NewErrPicker returns a Picker that always returns err on Pick().
|
||||||
func NewErrPicker(err error) balancer.Picker {
|
func NewErrPicker(err error) balancer.Picker {
|
||||||
return &errPicker{err: err}
|
return &errPicker{err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewErrPickerV2 is temporarily defined for backward compatibility reasons.
|
||||||
|
//
|
||||||
|
// Deprecated: use NewErrPicker instead.
|
||||||
|
var NewErrPickerV2 = NewErrPicker
|
||||||
|
|
||||||
type errPicker struct {
|
type errPicker struct {
|
||||||
err error // Pick() always returns this err.
|
err error // Pick() always returns this err.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
||||||
return nil, nil, p.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewErrPickerV2 returns a V2Picker that always returns err on Pick().
|
|
||||||
func NewErrPickerV2(err error) balancer.V2Picker {
|
|
||||||
return &errPickerV2{err: err}
|
|
||||||
}
|
|
||||||
|
|
||||||
type errPickerV2 struct {
|
|
||||||
err error // Pick() always returns this err.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
|
||||||
return balancer.PickResult{}, p.err
|
return balancer.PickResult{}, p.err
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,15 +37,8 @@ import (
|
||||||
|
|
||||||
// PickerBuilder creates balancer.Picker.
|
// PickerBuilder creates balancer.Picker.
|
||||||
type PickerBuilder interface {
|
type PickerBuilder interface {
|
||||||
// Build takes a slice of ready SubConns, and returns a picker that will be
|
|
||||||
// used by gRPC to pick a SubConn.
|
|
||||||
Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
|
|
||||||
}
|
|
||||||
|
|
||||||
// V2PickerBuilder creates balancer.V2Picker.
|
|
||||||
type V2PickerBuilder interface {
|
|
||||||
// Build returns a picker that will be used by gRPC to pick a SubConn.
|
// Build returns a picker that will be used by gRPC to pick a SubConn.
|
||||||
Build(info PickerBuildInfo) balancer.V2Picker
|
Build(info PickerBuildInfo) balancer.Picker
|
||||||
}
|
}
|
||||||
|
|
||||||
// PickerBuildInfo contains information needed by the picker builder to
|
// PickerBuildInfo contains information needed by the picker builder to
|
||||||
|
@ -62,20 +55,14 @@ type SubConnInfo struct {
|
||||||
Address resolver.Address // the address used to create this SubConn
|
Address resolver.Address // the address used to create this SubConn
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBalancerBuilder returns a balancer builder. The balancers
|
|
||||||
// built by this builder will use the picker builder to build pickers.
|
|
||||||
func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
|
|
||||||
return NewBalancerBuilderWithConfig(name, pb, Config{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config contains the config info about the base balancer builder.
|
// Config contains the config info about the base balancer builder.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// HealthCheck indicates whether health checking should be enabled for this specific balancer.
|
// HealthCheck indicates whether health checking should be enabled for this specific balancer.
|
||||||
HealthCheck bool
|
HealthCheck bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config.
|
// NewBalancerBuilder returns a base balancer builder configured by the provided config.
|
||||||
func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder {
|
func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder {
|
||||||
return &baseBuilder{
|
return &baseBuilder{
|
||||||
name: name,
|
name: name,
|
||||||
pickerBuilder: pb,
|
pickerBuilder: pb,
|
||||||
|
@ -83,11 +70,13 @@ func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config.
|
// NewBalancerBuilderV2 is temporarily defined for backward compatibility
|
||||||
func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder {
|
// reasons.
|
||||||
return &baseBuilder{
|
//
|
||||||
name: name,
|
// Deprecated: use NewBalancerBuilder instead.
|
||||||
v2PickerBuilder: pb,
|
var NewBalancerBuilderV2 = NewBalancerBuilder
|
||||||
config: config,
|
|
||||||
}
|
// V2PickerBuilder is temporarily defined for backward compatibility reasons.
|
||||||
}
|
//
|
||||||
|
// Deprecated: use PickerBuilder instead.
|
||||||
|
type V2PickerBuilder = PickerBuilder
|
||||||
|
|
|
@ -159,8 +159,6 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal
|
||||||
return lb
|
return lb
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ balancer.V2Balancer = (*lbBalancer)(nil) // Assert that we implement V2Balancer
|
|
||||||
|
|
||||||
type lbBalancer struct {
|
type lbBalancer struct {
|
||||||
cc *lbCacheClientConn
|
cc *lbCacheClientConn
|
||||||
target string
|
target string
|
||||||
|
@ -210,7 +208,7 @@ type lbBalancer struct {
|
||||||
state connectivity.State
|
state connectivity.State
|
||||||
subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn.
|
subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn.
|
||||||
scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns.
|
scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns.
|
||||||
picker balancer.V2Picker
|
picker balancer.Picker
|
||||||
// Support fallback to resolved backend addresses if there's no response
|
// Support fallback to resolved backend addresses if there's no response
|
||||||
// from remote balancer within fallbackTimeout.
|
// from remote balancer within fallbackTimeout.
|
||||||
remoteBalancerConnected bool
|
remoteBalancerConnected bool
|
||||||
|
@ -308,10 +306,6 @@ func (lb *lbBalancer) aggregateSubConnStates() connectivity.State {
|
||||||
return connectivity.TransientFailure
|
return connectivity.TransientFailure
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
|
||||||
panic("not used")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) {
|
func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) {
|
||||||
s := scs.ConnectivityState
|
s := scs.ConnectivityState
|
||||||
if grpclog.V(2) {
|
if grpclog.V(2) {
|
||||||
|
@ -389,13 +383,6 @@ func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) {
|
||||||
lb.mu.Unlock()
|
lb.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB
|
|
||||||
// clientConn. The remoteLB clientConn will handle creating/removing remoteLB
|
|
||||||
// connections.
|
|
||||||
func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
|
||||||
panic("not used")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) {
|
func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) {
|
||||||
lb.mu.Lock()
|
lb.mu.Lock()
|
||||||
defer lb.mu.Unlock()
|
defer lb.mu.Unlock()
|
||||||
|
|
|
@ -192,7 +192,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback
|
||||||
lb.cc.RemoveSubConn(sc)
|
lb.cc.RemoveSubConn(sc)
|
||||||
delete(lb.subConns, a)
|
delete(lb.subConns, a)
|
||||||
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
||||||
// The entry will be deleted in HandleSubConnStateChange.
|
// The entry will be deleted in UpdateSubConnState.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -85,10 +85,10 @@ type Entry struct {
|
||||||
// X-Google-RLS-Data header for matching RPCs.
|
// X-Google-RLS-Data header for matching RPCs.
|
||||||
HeaderData string
|
HeaderData string
|
||||||
// ChildPicker is a very thin wrapper around the child policy wrapper.
|
// ChildPicker is a very thin wrapper around the child policy wrapper.
|
||||||
// The type is declared as a V2Picker interface since the users of
|
// The type is declared as a Picker interface since the users of
|
||||||
// the cache only care about the picker provided by the child policy, and
|
// the cache only care about the picker provided by the child policy, and
|
||||||
// this makes it easy for testing.
|
// this makes it easy for testing.
|
||||||
ChildPicker balancer.V2Picker
|
ChildPicker balancer.Picker
|
||||||
|
|
||||||
// size stores the size of this cache entry. Uses only a subset of the
|
// size stores the size of this cache entry. Uses only a subset of the
|
||||||
// fields. See `entrySize` for this is computed.
|
// fields. See `entrySize` for this is computed.
|
||||||
|
|
|
@ -31,10 +31,7 @@ import (
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
var errRLSThrottled = balancer.TransientFailureError(errors.New("RLS call throttled at client side"))
|
var errRLSThrottled = errors.New("RLS call throttled at client side")
|
||||||
|
|
||||||
// Compile time assert to ensure we implement V2Picker.
|
|
||||||
var _ balancer.V2Picker = (*rlsPicker)(nil)
|
|
||||||
|
|
||||||
// RLS rlsPicker selects the subConn to be used for a particular RPC. It does
|
// RLS rlsPicker selects the subConn to be used for a particular RPC. It does
|
||||||
// not manage subConns directly and usually deletegates to pickers provided by
|
// not manage subConns directly and usually deletegates to pickers provided by
|
||||||
|
|
|
@ -35,7 +35,7 @@ const Name = "round_robin"
|
||||||
|
|
||||||
// newBuilder creates a new roundrobin balancer builder.
|
// newBuilder creates a new roundrobin balancer builder.
|
||||||
func newBuilder() balancer.Builder {
|
func newBuilder() balancer.Builder {
|
||||||
return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
|
return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -44,10 +44,10 @@ func init() {
|
||||||
|
|
||||||
type rrPickerBuilder struct{}
|
type rrPickerBuilder struct{}
|
||||||
|
|
||||||
func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker {
|
func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
|
||||||
grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info)
|
grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info)
|
||||||
if len(info.ReadySCs) == 0 {
|
if len(info.ReadySCs) == 0 {
|
||||||
return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable)
|
return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
|
||||||
}
|
}
|
||||||
var scs []balancer.SubConn
|
var scs []balancer.SubConn
|
||||||
for sc := range info.ReadySCs {
|
for sc := range info.ReadySCs {
|
||||||
|
|
|
@ -74,11 +74,7 @@ func (ccb *ccBalancerWrapper) watcher() {
|
||||||
}
|
}
|
||||||
ccb.balancerMu.Lock()
|
ccb.balancerMu.Lock()
|
||||||
su := t.(*scStateUpdate)
|
su := t.(*scStateUpdate)
|
||||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
|
||||||
ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
|
|
||||||
} else {
|
|
||||||
ccb.balancer.HandleSubConnStateChange(su.sc, su.state)
|
|
||||||
}
|
|
||||||
ccb.balancerMu.Unlock()
|
ccb.balancerMu.Unlock()
|
||||||
case <-ccb.done.Done():
|
case <-ccb.done.Done():
|
||||||
}
|
}
|
||||||
|
@ -123,19 +119,13 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
|
||||||
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
||||||
ccb.balancerMu.Lock()
|
ccb.balancerMu.Lock()
|
||||||
defer ccb.balancerMu.Unlock()
|
defer ccb.balancerMu.Unlock()
|
||||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
return ccb.balancer.UpdateClientConnState(*ccs)
|
||||||
return ub.UpdateClientConnState(*ccs)
|
|
||||||
}
|
|
||||||
ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
||||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
ccb.balancerMu.Lock()
|
||||||
ccb.balancerMu.Lock()
|
ccb.balancer.ResolverError(err)
|
||||||
ub.ResolverError(err)
|
ccb.balancerMu.Unlock()
|
||||||
ccb.balancerMu.Unlock()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||||
|
@ -173,21 +163,6 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
|
|
||||||
ccb.mu.Lock()
|
|
||||||
defer ccb.mu.Unlock()
|
|
||||||
if ccb.subConns == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Update picker before updating state. Even though the ordering here does
|
|
||||||
// not matter, it can lead to multiple calls of Pick in the common start-up
|
|
||||||
// case where we wait for ready and then perform an RPC. If the picker is
|
|
||||||
// updated later, we could call the "connecting" picker when the state is
|
|
||||||
// updated, and then call the "ready" picker after the picker gets updated.
|
|
||||||
ccb.cc.blockingpicker.updatePicker(p)
|
|
||||||
ccb.cc.csMgr.updateState(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||||
ccb.mu.Lock()
|
ccb.mu.Lock()
|
||||||
defer ccb.mu.Unlock()
|
defer ccb.mu.Unlock()
|
||||||
|
@ -199,7 +174,7 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||||
// case where we wait for ready and then perform an RPC. If the picker is
|
// case where we wait for ready and then perform an RPC. If the picker is
|
||||||
// updated later, we could call the "connecting" picker when the state is
|
// updated later, we could call the "connecting" picker when the state is
|
||||||
// updated, and then call the "ready" picker after the picker gets updated.
|
// updated, and then call the "ready" picker after the picker gets updated.
|
||||||
ccb.cc.blockingpicker.updatePickerV2(s.Picker)
|
ccb.cc.blockingpicker.updatePicker(s.Picker)
|
||||||
ccb.cc.csMgr.updateState(s.ConnectivityState)
|
ccb.cc.csMgr.updateState(s.ConnectivityState)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,50 +25,19 @@ import (
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/balancer/roundrobin"
|
"google.golang.org/grpc/balancer/roundrobin"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/internal/balancer/stub"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/resolver/manual"
|
"google.golang.org/grpc/resolver/manual"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ balancer.V2Balancer = &funcBalancer{}
|
|
||||||
|
|
||||||
type funcBalancer struct {
|
|
||||||
updateClientConnState func(s balancer.ClientConnState) error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*funcBalancer) HandleSubConnStateChange(balancer.SubConn, connectivity.State) {
|
|
||||||
panic("unimplemented") // v1 API
|
|
||||||
}
|
|
||||||
func (*funcBalancer) HandleResolvedAddrs([]resolver.Address, error) {
|
|
||||||
panic("unimplemented") // v1 API
|
|
||||||
}
|
|
||||||
func (b *funcBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
|
||||||
return b.updateClientConnState(s)
|
|
||||||
}
|
|
||||||
func (*funcBalancer) ResolverError(error) {}
|
|
||||||
func (*funcBalancer) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) {
|
|
||||||
panic("unimplemented") // we never have sub-conns
|
|
||||||
}
|
|
||||||
func (*funcBalancer) Close() {}
|
|
||||||
|
|
||||||
type funcBalancerBuilder struct {
|
|
||||||
name string
|
|
||||||
instance *funcBalancer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *funcBalancerBuilder) Build(balancer.ClientConn, balancer.BuildOptions) balancer.Balancer {
|
|
||||||
return b.instance
|
|
||||||
}
|
|
||||||
func (b *funcBalancerBuilder) Name() string { return b.name }
|
|
||||||
|
|
||||||
// TestBalancerErrorResolverPolling injects balancer errors and verifies
|
// TestBalancerErrorResolverPolling injects balancer errors and verifies
|
||||||
// ResolveNow is called on the resolver with the appropriate backoff strategy
|
// ResolveNow is called on the resolver with the appropriate backoff strategy
|
||||||
// being consulted between ResolveNow calls.
|
// being consulted between ResolveNow calls.
|
||||||
func (s) TestBalancerErrorResolverPolling(t *testing.T) {
|
func (s) TestBalancerErrorResolverPolling(t *testing.T) {
|
||||||
// The test balancer will return ErrBadResolverState iff the
|
// The test balancer will return ErrBadResolverState iff the
|
||||||
// ClientConnState contains no addresses.
|
// ClientConnState contains no addresses.
|
||||||
fb := &funcBalancer{
|
bf := stub.BalancerFuncs{
|
||||||
updateClientConnState: func(s balancer.ClientConnState) error {
|
UpdateClientConnState: func(_ *stub.BalancerData, s balancer.ClientConnState) error {
|
||||||
if len(s.ResolverState.Addresses) == 0 {
|
if len(s.ResolverState.Addresses) == 0 {
|
||||||
return balancer.ErrBadResolverState
|
return balancer.ErrBadResolverState
|
||||||
}
|
}
|
||||||
|
@ -76,7 +45,7 @@ func (s) TestBalancerErrorResolverPolling(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
const balName = "BalancerErrorResolverPolling"
|
const balName = "BalancerErrorResolverPolling"
|
||||||
balancer.Register(&funcBalancerBuilder{name: balName, instance: fb})
|
stub.Register(balName, bf)
|
||||||
|
|
||||||
testResolverErrorPolling(t,
|
testResolverErrorPolling(t,
|
||||||
func(r *manual.Resolver) {
|
func(r *manual.Resolver) {
|
||||||
|
|
|
@ -27,7 +27,6 @@ import (
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/balancer/roundrobin"
|
"google.golang.org/grpc/balancer/roundrobin"
|
||||||
"google.golang.org/grpc/connectivity"
|
|
||||||
"google.golang.org/grpc/internal"
|
"google.golang.org/grpc/internal"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/resolver/manual"
|
"google.golang.org/grpc/resolver/manual"
|
||||||
|
@ -48,9 +47,13 @@ func (b *magicalLB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) ba
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *magicalLB) HandleSubConnStateChange(balancer.SubConn, connectivity.State) {}
|
func (b *magicalLB) ResolverError(error) {}
|
||||||
|
|
||||||
func (b *magicalLB) HandleResolvedAddrs([]resolver.Address, error) {}
|
func (b *magicalLB) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) {}
|
||||||
|
|
||||||
|
func (b *magicalLB) UpdateClientConnState(balancer.ClientConnState) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (b *magicalLB) Close() {}
|
func (b *magicalLB) Close() {}
|
||||||
|
|
||||||
|
@ -58,6 +61,21 @@ func init() {
|
||||||
balancer.Register(&magicalLB{})
|
balancer.Register(&magicalLB{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func startServers(t *testing.T, numServers int, maxStreams uint32) ([]*server, func()) {
|
||||||
|
var servers []*server
|
||||||
|
for i := 0; i < numServers; i++ {
|
||||||
|
s := newTestServer()
|
||||||
|
servers = append(servers, s)
|
||||||
|
go s.start(t, 0, maxStreams)
|
||||||
|
s.wait(t, 2*time.Second)
|
||||||
|
}
|
||||||
|
return servers, func() {
|
||||||
|
for i := 0; i < numServers; i++ {
|
||||||
|
servers[i].stop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func checkPickFirst(cc *ClientConn, servers []*server) error {
|
func checkPickFirst(cc *ClientConn, servers []*server) error {
|
||||||
var (
|
var (
|
||||||
req = "port"
|
req = "port"
|
||||||
|
@ -133,7 +151,7 @@ func (s) TestSwitchBalancer(t *testing.T) {
|
||||||
defer rcleanup()
|
defer rcleanup()
|
||||||
|
|
||||||
const numServers = 2
|
const numServers = 2
|
||||||
servers, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
servers, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||||
defer scleanup()
|
defer scleanup()
|
||||||
|
|
||||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
||||||
|
@ -165,7 +183,7 @@ func (s) TestBalancerDialOption(t *testing.T) {
|
||||||
defer rcleanup()
|
defer rcleanup()
|
||||||
|
|
||||||
const numServers = 2
|
const numServers = 2
|
||||||
servers, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
servers, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||||
defer scleanup()
|
defer scleanup()
|
||||||
|
|
||||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}), WithBalancerName(roundrobin.Name))
|
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}), WithBalancerName(roundrobin.Name))
|
||||||
|
@ -481,7 +499,7 @@ func (s) TestSwitchBalancerGRPCLBWithGRPCLBNotRegistered(t *testing.T) {
|
||||||
defer rcleanup()
|
defer rcleanup()
|
||||||
|
|
||||||
const numServers = 3
|
const numServers = 3
|
||||||
servers, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
servers, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||||
defer scleanup()
|
defer scleanup()
|
||||||
|
|
||||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
||||||
|
|
789
balancer_test.go
789
balancer_test.go
|
@ -1,789 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2016 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package grpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/naming"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
func pickFirstBalancerV1(r naming.Resolver) Balancer {
|
|
||||||
return &pickFirst{&roundRobin{r: r}}
|
|
||||||
}
|
|
||||||
|
|
||||||
type testWatcher struct {
|
|
||||||
// the channel to receives name resolution updates
|
|
||||||
update chan *naming.Update
|
|
||||||
// the side channel to get to know how many updates in a batch
|
|
||||||
side chan int
|
|
||||||
// the channel to notify update injector that the update reading is done
|
|
||||||
readDone chan int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *testWatcher) Next() (updates []*naming.Update, err error) {
|
|
||||||
n := <-w.side
|
|
||||||
if n == 0 {
|
|
||||||
return nil, fmt.Errorf("w.side is closed")
|
|
||||||
}
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
u := <-w.update
|
|
||||||
if u != nil {
|
|
||||||
updates = append(updates, u)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.readDone <- 0
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *testWatcher) Close() {
|
|
||||||
close(w.side)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inject naming resolution updates to the testWatcher.
|
|
||||||
func (w *testWatcher) inject(updates []*naming.Update) {
|
|
||||||
w.side <- len(updates)
|
|
||||||
for _, u := range updates {
|
|
||||||
w.update <- u
|
|
||||||
}
|
|
||||||
<-w.readDone
|
|
||||||
}
|
|
||||||
|
|
||||||
type testNameResolver struct {
|
|
||||||
w *testWatcher
|
|
||||||
addr string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *testNameResolver) Resolve(target string) (naming.Watcher, error) {
|
|
||||||
r.w = &testWatcher{
|
|
||||||
update: make(chan *naming.Update, 1),
|
|
||||||
side: make(chan int, 1),
|
|
||||||
readDone: make(chan int),
|
|
||||||
}
|
|
||||||
r.w.side <- 1
|
|
||||||
r.w.update <- &naming.Update{
|
|
||||||
Op: naming.Add,
|
|
||||||
Addr: r.addr,
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
<-r.w.readDone
|
|
||||||
}()
|
|
||||||
return r.w, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func startServers(t *testing.T, numServers int, maxStreams uint32) ([]*server, *testNameResolver, func()) {
|
|
||||||
var servers []*server
|
|
||||||
for i := 0; i < numServers; i++ {
|
|
||||||
s := newTestServer()
|
|
||||||
servers = append(servers, s)
|
|
||||||
go s.start(t, 0, maxStreams)
|
|
||||||
s.wait(t, 2*time.Second)
|
|
||||||
}
|
|
||||||
// Point to server[0]
|
|
||||||
addr := "localhost:" + servers[0].port
|
|
||||||
return servers, &testNameResolver{
|
|
||||||
addr: addr,
|
|
||||||
}, func() {
|
|
||||||
for i := 0; i < numServers; i++ {
|
|
||||||
servers[i].stop()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestNameDiscovery(t *testing.T) {
|
|
||||||
// Start 2 servers on 2 ports.
|
|
||||||
numServers := 2
|
|
||||||
servers, r, cleanup := startServers(t, numServers, math.MaxUint32)
|
|
||||||
defer cleanup()
|
|
||||||
cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
|
||||||
}
|
|
||||||
defer cc.Close()
|
|
||||||
req := "port"
|
|
||||||
var reply string
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[0].port {
|
|
||||||
t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port)
|
|
||||||
}
|
|
||||||
// Inject the name resolution change to remove servers[0] and add servers[1].
|
|
||||||
var updates []*naming.Update
|
|
||||||
updates = append(updates, &naming.Update{
|
|
||||||
Op: naming.Delete,
|
|
||||||
Addr: "localhost:" + servers[0].port,
|
|
||||||
})
|
|
||||||
updates = append(updates, &naming.Update{
|
|
||||||
Op: naming.Add,
|
|
||||||
Addr: "localhost:" + servers[1].port,
|
|
||||||
})
|
|
||||||
r.w.inject(updates)
|
|
||||||
// Loop until the rpcs in flight talks to servers[1].
|
|
||||||
for {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestEmptyAddrs(t *testing.T) {
|
|
||||||
servers, r, cleanup := startServers(t, 1, math.MaxUint32)
|
|
||||||
defer cleanup()
|
|
||||||
cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
|
||||||
}
|
|
||||||
defer cc.Close()
|
|
||||||
var reply string
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply); err != nil || reply != expectedResponse {
|
|
||||||
t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, reply = %q, want %q, <nil>", err, reply, expectedResponse)
|
|
||||||
}
|
|
||||||
// Inject name resolution change to remove the server so that there is no address
|
|
||||||
// available after that.
|
|
||||||
u := &naming.Update{
|
|
||||||
Op: naming.Delete,
|
|
||||||
Addr: "localhost:" + servers[0].port,
|
|
||||||
}
|
|
||||||
r.w.inject([]*naming.Update{u})
|
|
||||||
// Loop until the above updates apply.
|
|
||||||
for {
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
|
||||||
if err := cc.Invoke(ctx, "/foo/bar", &expectedRequest, &reply); err != nil {
|
|
||||||
cancel()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestRoundRobin(t *testing.T) {
|
|
||||||
// Start 3 servers on 3 ports.
|
|
||||||
numServers := 3
|
|
||||||
servers, r, cleanup := startServers(t, numServers, math.MaxUint32)
|
|
||||||
defer cleanup()
|
|
||||||
cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
|
||||||
}
|
|
||||||
defer cc.Close()
|
|
||||||
// Add servers[1] to the service discovery.
|
|
||||||
u := &naming.Update{
|
|
||||||
Op: naming.Add,
|
|
||||||
Addr: "localhost:" + servers[1].port,
|
|
||||||
}
|
|
||||||
r.w.inject([]*naming.Update{u})
|
|
||||||
req := "port"
|
|
||||||
var reply string
|
|
||||||
// Loop until servers[1] is up
|
|
||||||
for {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
// Add server2[2] to the service discovery.
|
|
||||||
u = &naming.Update{
|
|
||||||
Op: naming.Add,
|
|
||||||
Addr: "localhost:" + servers[2].port,
|
|
||||||
}
|
|
||||||
r.w.inject([]*naming.Update{u})
|
|
||||||
// Loop until both servers[2] are up.
|
|
||||||
for {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[2].port {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
// Check the incoming RPCs served in a round-robin manner.
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[i%numServers].port {
|
|
||||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", i, err, servers[i%numServers].port)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestCloseWithPendingRPC(t *testing.T) {
|
|
||||||
servers, r, cleanup := startServers(t, 1, math.MaxUint32)
|
|
||||||
defer cleanup()
|
|
||||||
cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
|
||||||
}
|
|
||||||
defer cc.Close()
|
|
||||||
var reply string
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err != nil {
|
|
||||||
t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port)
|
|
||||||
}
|
|
||||||
// Remove the server.
|
|
||||||
updates := []*naming.Update{{
|
|
||||||
Op: naming.Delete,
|
|
||||||
Addr: "localhost:" + servers[0].port,
|
|
||||||
}}
|
|
||||||
r.w.inject(updates)
|
|
||||||
// Loop until the above update applies.
|
|
||||||
for {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
|
||||||
if err := cc.Invoke(ctx, "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); status.Code(err) == codes.DeadlineExceeded {
|
|
||||||
cancel()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
// Issue 2 RPCs which should be completed with error status once cc is closed.
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(2)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
var reply string
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err == nil {
|
|
||||||
t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
var reply string
|
|
||||||
time.Sleep(5 * time.Millisecond)
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err == nil {
|
|
||||||
t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
time.Sleep(5 * time.Millisecond)
|
|
||||||
cc.Close()
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestGetOnWaitChannel(t *testing.T) {
|
|
||||||
servers, r, cleanup := startServers(t, 1, math.MaxUint32)
|
|
||||||
defer cleanup()
|
|
||||||
cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
|
||||||
}
|
|
||||||
defer cc.Close()
|
|
||||||
// Remove all servers so that all upcoming RPCs will block on waitCh.
|
|
||||||
updates := []*naming.Update{{
|
|
||||||
Op: naming.Delete,
|
|
||||||
Addr: "localhost:" + servers[0].port,
|
|
||||||
}}
|
|
||||||
r.w.inject(updates)
|
|
||||||
for {
|
|
||||||
var reply string
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
|
||||||
if err := cc.Invoke(ctx, "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); status.Code(err) == codes.DeadlineExceeded {
|
|
||||||
cancel()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
cancel()
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
var reply string
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err != nil {
|
|
||||||
t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want <nil>", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
// Add a connected server to get the above RPC through.
|
|
||||||
updates = []*naming.Update{{
|
|
||||||
Op: naming.Add,
|
|
||||||
Addr: "localhost:" + servers[0].port,
|
|
||||||
}}
|
|
||||||
r.w.inject(updates)
|
|
||||||
// Wait until the above RPC succeeds.
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestOneServerDown(t *testing.T) {
|
|
||||||
// Start 2 servers.
|
|
||||||
numServers := 2
|
|
||||||
servers, r, cleanup := startServers(t, numServers, math.MaxUint32)
|
|
||||||
defer cleanup()
|
|
||||||
cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
|
||||||
}
|
|
||||||
defer cc.Close()
|
|
||||||
// Add servers[1] to the service discovery.
|
|
||||||
var updates []*naming.Update
|
|
||||||
updates = append(updates, &naming.Update{
|
|
||||||
Op: naming.Add,
|
|
||||||
Addr: "localhost:" + servers[1].port,
|
|
||||||
})
|
|
||||||
r.w.inject(updates)
|
|
||||||
req := "port"
|
|
||||||
var reply string
|
|
||||||
// Loop until servers[1] is up
|
|
||||||
for {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
numRPC := 100
|
|
||||||
sleepDuration := 10 * time.Millisecond
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
time.Sleep(sleepDuration)
|
|
||||||
// After sleepDuration, kill server[0].
|
|
||||||
servers[0].stop()
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
|
|
||||||
// All non-failfast RPCs should not block because there's at least one connection available.
|
|
||||||
for i := 0; i < numRPC; i++ {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
time.Sleep(sleepDuration)
|
|
||||||
// After sleepDuration, invoke RPC.
|
|
||||||
// server[0] is killed around the same time to make it racy between balancer and gRPC internals.
|
|
||||||
cc.Invoke(context.Background(), "/foo/bar", &req, &reply, WaitForReady(true))
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestOneAddressRemoval(t *testing.T) {
|
|
||||||
// Start 2 servers.
|
|
||||||
numServers := 2
|
|
||||||
servers, r, cleanup := startServers(t, numServers, math.MaxUint32)
|
|
||||||
defer cleanup()
|
|
||||||
cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
|
||||||
}
|
|
||||||
defer cc.Close()
|
|
||||||
// Add servers[1] to the service discovery.
|
|
||||||
var updates []*naming.Update
|
|
||||||
updates = append(updates, &naming.Update{
|
|
||||||
Op: naming.Add,
|
|
||||||
Addr: "localhost:" + servers[1].port,
|
|
||||||
})
|
|
||||||
r.w.inject(updates)
|
|
||||||
req := "port"
|
|
||||||
var reply string
|
|
||||||
// Loop until servers[1] is up
|
|
||||||
for {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
numRPC := 100
|
|
||||||
sleepDuration := 10 * time.Millisecond
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
time.Sleep(sleepDuration)
|
|
||||||
// After sleepDuration, delete server[0].
|
|
||||||
var updates []*naming.Update
|
|
||||||
updates = append(updates, &naming.Update{
|
|
||||||
Op: naming.Delete,
|
|
||||||
Addr: "localhost:" + servers[0].port,
|
|
||||||
})
|
|
||||||
r.w.inject(updates)
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
|
|
||||||
// All non-failfast RPCs should not fail because there's at least one connection available.
|
|
||||||
for i := 0; i < numRPC; i++ {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
var reply string
|
|
||||||
time.Sleep(sleepDuration)
|
|
||||||
// After sleepDuration, invoke RPC.
|
|
||||||
// server[0] is removed around the same time to make it racy between balancer and gRPC internals.
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err != nil {
|
|
||||||
t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want nil", err)
|
|
||||||
}
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkServerUp(t *testing.T, currentServer *server) {
|
|
||||||
req := "port"
|
|
||||||
port := currentServer.port
|
|
||||||
cc, err := Dial("passthrough:///localhost:"+port, WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
|
||||||
}
|
|
||||||
defer cc.Close()
|
|
||||||
var reply string
|
|
||||||
for {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == port {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestPickFirstEmptyAddrs(t *testing.T) {
|
|
||||||
servers, r, cleanup := startServers(t, 1, math.MaxUint32)
|
|
||||||
defer cleanup()
|
|
||||||
cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
|
||||||
}
|
|
||||||
defer cc.Close()
|
|
||||||
var reply string
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply); err != nil || reply != expectedResponse {
|
|
||||||
t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, reply = %q, want %q, <nil>", err, reply, expectedResponse)
|
|
||||||
}
|
|
||||||
// Inject name resolution change to remove the server so that there is no address
|
|
||||||
// available after that.
|
|
||||||
u := &naming.Update{
|
|
||||||
Op: naming.Delete,
|
|
||||||
Addr: "localhost:" + servers[0].port,
|
|
||||||
}
|
|
||||||
r.w.inject([]*naming.Update{u})
|
|
||||||
// Loop until the above updates apply.
|
|
||||||
for {
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
|
||||||
if err := cc.Invoke(ctx, "/foo/bar", &expectedRequest, &reply); err != nil {
|
|
||||||
cancel()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestPickFirstCloseWithPendingRPC(t *testing.T) {
|
|
||||||
servers, r, cleanup := startServers(t, 1, math.MaxUint32)
|
|
||||||
defer cleanup()
|
|
||||||
cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
|
||||||
}
|
|
||||||
defer cc.Close()
|
|
||||||
var reply string
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err != nil {
|
|
||||||
t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port)
|
|
||||||
}
|
|
||||||
// Remove the server.
|
|
||||||
updates := []*naming.Update{{
|
|
||||||
Op: naming.Delete,
|
|
||||||
Addr: "localhost:" + servers[0].port,
|
|
||||||
}}
|
|
||||||
r.w.inject(updates)
|
|
||||||
// Loop until the above update applies.
|
|
||||||
for {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
|
||||||
if err := cc.Invoke(ctx, "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); status.Code(err) == codes.DeadlineExceeded {
|
|
||||||
cancel()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
// Issue 2 RPCs which should be completed with error status once cc is closed.
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(2)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
var reply string
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err == nil {
|
|
||||||
t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
var reply string
|
|
||||||
time.Sleep(5 * time.Millisecond)
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err == nil {
|
|
||||||
t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
time.Sleep(5 * time.Millisecond)
|
|
||||||
cc.Close()
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestPickFirstOrderAllServerUp(t *testing.T) {
|
|
||||||
// Start 3 servers on 3 ports.
|
|
||||||
numServers := 3
|
|
||||||
servers, r, cleanup := startServers(t, numServers, math.MaxUint32)
|
|
||||||
defer cleanup()
|
|
||||||
cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
|
||||||
}
|
|
||||||
defer cc.Close()
|
|
||||||
// Add servers[1] and [2] to the service discovery.
|
|
||||||
u := &naming.Update{
|
|
||||||
Op: naming.Add,
|
|
||||||
Addr: "localhost:" + servers[1].port,
|
|
||||||
}
|
|
||||||
r.w.inject([]*naming.Update{u})
|
|
||||||
|
|
||||||
u = &naming.Update{
|
|
||||||
Op: naming.Add,
|
|
||||||
Addr: "localhost:" + servers[2].port,
|
|
||||||
}
|
|
||||||
r.w.inject([]*naming.Update{u})
|
|
||||||
|
|
||||||
// Loop until all 3 servers are up
|
|
||||||
checkServerUp(t, servers[0])
|
|
||||||
checkServerUp(t, servers[1])
|
|
||||||
checkServerUp(t, servers[2])
|
|
||||||
|
|
||||||
// Check the incoming RPCs served in server[0]
|
|
||||||
req := "port"
|
|
||||||
var reply string
|
|
||||||
for i := 0; i < 20; i++ {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[0].port {
|
|
||||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port)
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete server[0] in the balancer, the incoming RPCs served in server[1]
|
|
||||||
// For test addrconn, close server[0] instead
|
|
||||||
u = &naming.Update{
|
|
||||||
Op: naming.Delete,
|
|
||||||
Addr: "localhost:" + servers[0].port,
|
|
||||||
}
|
|
||||||
r.w.inject([]*naming.Update{u})
|
|
||||||
// Loop until it changes to server[1]
|
|
||||||
for {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
for i := 0; i < 20; i++ {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[1].port {
|
|
||||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port)
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add server[0] back to the balancer, the incoming RPCs served in server[1]
|
|
||||||
// Add is append operation, the order of Notify now is {server[1].port server[2].port server[0].port}
|
|
||||||
u = &naming.Update{
|
|
||||||
Op: naming.Add,
|
|
||||||
Addr: "localhost:" + servers[0].port,
|
|
||||||
}
|
|
||||||
r.w.inject([]*naming.Update{u})
|
|
||||||
for i := 0; i < 20; i++ {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[1].port {
|
|
||||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port)
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete server[1] in the balancer, the incoming RPCs served in server[2]
|
|
||||||
u = &naming.Update{
|
|
||||||
Op: naming.Delete,
|
|
||||||
Addr: "localhost:" + servers[1].port,
|
|
||||||
}
|
|
||||||
r.w.inject([]*naming.Update{u})
|
|
||||||
for {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[2].port {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
}
|
|
||||||
for i := 0; i < 20; i++ {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[2].port {
|
|
||||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 2, err, servers[2].port)
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete server[2] in the balancer, the incoming RPCs served in server[0]
|
|
||||||
u = &naming.Update{
|
|
||||||
Op: naming.Delete,
|
|
||||||
Addr: "localhost:" + servers[2].port,
|
|
||||||
}
|
|
||||||
r.w.inject([]*naming.Update{u})
|
|
||||||
for {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
}
|
|
||||||
for i := 0; i < 20; i++ {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[0].port {
|
|
||||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port)
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestPickFirstOrderOneServerDown(t *testing.T) {
|
|
||||||
// Start 3 servers on 3 ports.
|
|
||||||
numServers := 3
|
|
||||||
servers, r, cleanup := startServers(t, numServers, math.MaxUint32)
|
|
||||||
defer cleanup()
|
|
||||||
cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
|
||||||
}
|
|
||||||
defer cc.Close()
|
|
||||||
// Add servers[1] and [2] to the service discovery.
|
|
||||||
u := &naming.Update{
|
|
||||||
Op: naming.Add,
|
|
||||||
Addr: "localhost:" + servers[1].port,
|
|
||||||
}
|
|
||||||
r.w.inject([]*naming.Update{u})
|
|
||||||
|
|
||||||
u = &naming.Update{
|
|
||||||
Op: naming.Add,
|
|
||||||
Addr: "localhost:" + servers[2].port,
|
|
||||||
}
|
|
||||||
r.w.inject([]*naming.Update{u})
|
|
||||||
|
|
||||||
// Loop until all 3 servers are up
|
|
||||||
checkServerUp(t, servers[0])
|
|
||||||
checkServerUp(t, servers[1])
|
|
||||||
checkServerUp(t, servers[2])
|
|
||||||
|
|
||||||
// Check the incoming RPCs served in server[0]
|
|
||||||
req := "port"
|
|
||||||
var reply string
|
|
||||||
for i := 0; i < 20; i++ {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[0].port {
|
|
||||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port)
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
// server[0] down, incoming RPCs served in server[1], but the order of Notify still remains
|
|
||||||
// {server[0] server[1] server[2]}
|
|
||||||
servers[0].stop()
|
|
||||||
// Loop until it changes to server[1]
|
|
||||||
for {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
for i := 0; i < 20; i++ {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[1].port {
|
|
||||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port)
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
// up the server[0] back, the incoming RPCs served in server[1]
|
|
||||||
p, _ := strconv.Atoi(servers[0].port)
|
|
||||||
servers[0] = newTestServer()
|
|
||||||
go servers[0].start(t, p, math.MaxUint32)
|
|
||||||
defer servers[0].stop()
|
|
||||||
servers[0].wait(t, 2*time.Second)
|
|
||||||
checkServerUp(t, servers[0])
|
|
||||||
|
|
||||||
for i := 0; i < 20; i++ {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[1].port {
|
|
||||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port)
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete server[1] in the balancer, the incoming RPCs served in server[0]
|
|
||||||
u = &naming.Update{
|
|
||||||
Op: naming.Delete,
|
|
||||||
Addr: "localhost:" + servers[1].port,
|
|
||||||
}
|
|
||||||
r.w.inject([]*naming.Update{u})
|
|
||||||
for {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
}
|
|
||||||
for i := 0; i < 20; i++ {
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[0].port {
|
|
||||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port)
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestPickFirstOneAddressRemoval(t *testing.T) {
|
|
||||||
// Start 2 servers.
|
|
||||||
numServers := 2
|
|
||||||
servers, r, cleanup := startServers(t, numServers, math.MaxUint32)
|
|
||||||
defer cleanup()
|
|
||||||
cc, err := Dial("passthrough:///localhost:"+servers[0].port, WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
|
||||||
}
|
|
||||||
defer cc.Close()
|
|
||||||
// Add servers[1] to the service discovery.
|
|
||||||
var updates []*naming.Update
|
|
||||||
updates = append(updates, &naming.Update{
|
|
||||||
Op: naming.Add,
|
|
||||||
Addr: "localhost:" + servers[1].port,
|
|
||||||
})
|
|
||||||
r.w.inject(updates)
|
|
||||||
|
|
||||||
// Create a new cc to Loop until servers[1] is up
|
|
||||||
checkServerUp(t, servers[0])
|
|
||||||
checkServerUp(t, servers[1])
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
numRPC := 100
|
|
||||||
sleepDuration := 10 * time.Millisecond
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
time.Sleep(sleepDuration)
|
|
||||||
// After sleepDuration, delete server[0].
|
|
||||||
var updates []*naming.Update
|
|
||||||
updates = append(updates, &naming.Update{
|
|
||||||
Op: naming.Delete,
|
|
||||||
Addr: "localhost:" + servers[0].port,
|
|
||||||
})
|
|
||||||
r.w.inject(updates)
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
|
|
||||||
// All non-failfast RPCs should not fail because there's at least one connection available.
|
|
||||||
for i := 0; i < numRPC; i++ {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
var reply string
|
|
||||||
time.Sleep(sleepDuration)
|
|
||||||
// After sleepDuration, invoke RPC.
|
|
||||||
// server[0] is removed around the same time to make it racy between balancer and gRPC internals.
|
|
||||||
if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err != nil {
|
|
||||||
t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want nil", err)
|
|
||||||
}
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
|
@ -1,334 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2017 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package grpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
|
||||||
"google.golang.org/grpc/connectivity"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
"google.golang.org/grpc/resolver"
|
|
||||||
)
|
|
||||||
|
|
||||||
type balancerWrapperBuilder struct {
|
|
||||||
b Balancer // The v1 balancer.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
|
|
||||||
bwb.b.Start(opts.Target.Endpoint, BalancerConfig{
|
|
||||||
DialCreds: opts.DialCreds,
|
|
||||||
Dialer: opts.Dialer,
|
|
||||||
})
|
|
||||||
_, pickfirst := bwb.b.(*pickFirst)
|
|
||||||
bw := &balancerWrapper{
|
|
||||||
balancer: bwb.b,
|
|
||||||
pickfirst: pickfirst,
|
|
||||||
cc: cc,
|
|
||||||
targetAddr: opts.Target.Endpoint,
|
|
||||||
startCh: make(chan struct{}),
|
|
||||||
conns: make(map[resolver.Address]balancer.SubConn),
|
|
||||||
connSt: make(map[balancer.SubConn]*scState),
|
|
||||||
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
|
||||||
state: connectivity.Idle,
|
|
||||||
}
|
|
||||||
cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw})
|
|
||||||
go bw.lbWatcher()
|
|
||||||
return bw
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bwb *balancerWrapperBuilder) Name() string {
|
|
||||||
return "wrapper"
|
|
||||||
}
|
|
||||||
|
|
||||||
type scState struct {
|
|
||||||
addr Address // The v1 address type.
|
|
||||||
s connectivity.State
|
|
||||||
down func(error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type balancerWrapper struct {
|
|
||||||
balancer Balancer // The v1 balancer.
|
|
||||||
pickfirst bool
|
|
||||||
|
|
||||||
cc balancer.ClientConn
|
|
||||||
targetAddr string // Target without the scheme.
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
conns map[resolver.Address]balancer.SubConn
|
|
||||||
connSt map[balancer.SubConn]*scState
|
|
||||||
// This channel is closed when handling the first resolver result.
|
|
||||||
// lbWatcher blocks until this is closed, to avoid race between
|
|
||||||
// - NewSubConn is created, cc wants to notify balancer of state changes;
|
|
||||||
// - Build hasn't return, cc doesn't have access to balancer.
|
|
||||||
startCh chan struct{}
|
|
||||||
|
|
||||||
// To aggregate the connectivity state.
|
|
||||||
csEvltr *balancer.ConnectivityStateEvaluator
|
|
||||||
state connectivity.State
|
|
||||||
}
|
|
||||||
|
|
||||||
// lbWatcher watches the Notify channel of the balancer and manages
|
|
||||||
// connections accordingly.
|
|
||||||
func (bw *balancerWrapper) lbWatcher() {
|
|
||||||
<-bw.startCh
|
|
||||||
notifyCh := bw.balancer.Notify()
|
|
||||||
if notifyCh == nil {
|
|
||||||
// There's no resolver in the balancer. Connect directly.
|
|
||||||
a := resolver.Address{
|
|
||||||
Addr: bw.targetAddr,
|
|
||||||
Type: resolver.Backend,
|
|
||||||
}
|
|
||||||
sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
|
|
||||||
} else {
|
|
||||||
bw.mu.Lock()
|
|
||||||
bw.conns[a] = sc
|
|
||||||
bw.connSt[sc] = &scState{
|
|
||||||
addr: Address{Addr: bw.targetAddr},
|
|
||||||
s: connectivity.Idle,
|
|
||||||
}
|
|
||||||
bw.mu.Unlock()
|
|
||||||
sc.Connect()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for addrs := range notifyCh {
|
|
||||||
grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs)
|
|
||||||
if bw.pickfirst {
|
|
||||||
var (
|
|
||||||
oldA resolver.Address
|
|
||||||
oldSC balancer.SubConn
|
|
||||||
)
|
|
||||||
bw.mu.Lock()
|
|
||||||
for oldA, oldSC = range bw.conns {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
bw.mu.Unlock()
|
|
||||||
if len(addrs) <= 0 {
|
|
||||||
if oldSC != nil {
|
|
||||||
// Teardown old sc.
|
|
||||||
bw.mu.Lock()
|
|
||||||
delete(bw.conns, oldA)
|
|
||||||
delete(bw.connSt, oldSC)
|
|
||||||
bw.mu.Unlock()
|
|
||||||
bw.cc.RemoveSubConn(oldSC)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var newAddrs []resolver.Address
|
|
||||||
for _, a := range addrs {
|
|
||||||
newAddr := resolver.Address{
|
|
||||||
Addr: a.Addr,
|
|
||||||
Type: resolver.Backend, // All addresses from balancer are all backends.
|
|
||||||
ServerName: "",
|
|
||||||
Metadata: a.Metadata,
|
|
||||||
}
|
|
||||||
newAddrs = append(newAddrs, newAddr)
|
|
||||||
}
|
|
||||||
if oldSC == nil {
|
|
||||||
// Create new sc.
|
|
||||||
sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{})
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err)
|
|
||||||
} else {
|
|
||||||
bw.mu.Lock()
|
|
||||||
// For pickfirst, there should be only one SubConn, so the
|
|
||||||
// address doesn't matter. All states updating (up and down)
|
|
||||||
// and picking should all happen on that only SubConn.
|
|
||||||
bw.conns[resolver.Address{}] = sc
|
|
||||||
bw.connSt[sc] = &scState{
|
|
||||||
addr: addrs[0], // Use the first address.
|
|
||||||
s: connectivity.Idle,
|
|
||||||
}
|
|
||||||
bw.mu.Unlock()
|
|
||||||
sc.Connect()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
bw.mu.Lock()
|
|
||||||
bw.connSt[oldSC].addr = addrs[0]
|
|
||||||
bw.mu.Unlock()
|
|
||||||
oldSC.UpdateAddresses(newAddrs)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var (
|
|
||||||
add []resolver.Address // Addresses need to setup connections.
|
|
||||||
del []balancer.SubConn // Connections need to tear down.
|
|
||||||
)
|
|
||||||
resAddrs := make(map[resolver.Address]Address)
|
|
||||||
for _, a := range addrs {
|
|
||||||
resAddrs[resolver.Address{
|
|
||||||
Addr: a.Addr,
|
|
||||||
Type: resolver.Backend, // All addresses from balancer are all backends.
|
|
||||||
ServerName: "",
|
|
||||||
Metadata: a.Metadata,
|
|
||||||
}] = a
|
|
||||||
}
|
|
||||||
bw.mu.Lock()
|
|
||||||
for a := range resAddrs {
|
|
||||||
if _, ok := bw.conns[a]; !ok {
|
|
||||||
add = append(add, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for a, c := range bw.conns {
|
|
||||||
if _, ok := resAddrs[a]; !ok {
|
|
||||||
del = append(del, c)
|
|
||||||
delete(bw.conns, a)
|
|
||||||
// Keep the state of this sc in bw.connSt until its state becomes Shutdown.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bw.mu.Unlock()
|
|
||||||
for _, a := range add {
|
|
||||||
sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
|
|
||||||
} else {
|
|
||||||
bw.mu.Lock()
|
|
||||||
bw.conns[a] = sc
|
|
||||||
bw.connSt[sc] = &scState{
|
|
||||||
addr: resAddrs[a],
|
|
||||||
s: connectivity.Idle,
|
|
||||||
}
|
|
||||||
bw.mu.Unlock()
|
|
||||||
sc.Connect()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, c := range del {
|
|
||||||
bw.cc.RemoveSubConn(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
|
||||||
bw.mu.Lock()
|
|
||||||
defer bw.mu.Unlock()
|
|
||||||
scSt, ok := bw.connSt[sc]
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if s == connectivity.Idle {
|
|
||||||
sc.Connect()
|
|
||||||
}
|
|
||||||
oldS := scSt.s
|
|
||||||
scSt.s = s
|
|
||||||
if oldS != connectivity.Ready && s == connectivity.Ready {
|
|
||||||
scSt.down = bw.balancer.Up(scSt.addr)
|
|
||||||
} else if oldS == connectivity.Ready && s != connectivity.Ready {
|
|
||||||
if scSt.down != nil {
|
|
||||||
scSt.down(errConnClosing)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sa := bw.csEvltr.RecordTransition(oldS, s)
|
|
||||||
if bw.state != sa {
|
|
||||||
bw.state = sa
|
|
||||||
}
|
|
||||||
bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw})
|
|
||||||
if s == connectivity.Shutdown {
|
|
||||||
// Remove state for this sc.
|
|
||||||
delete(bw.connSt, sc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
|
|
||||||
bw.mu.Lock()
|
|
||||||
defer bw.mu.Unlock()
|
|
||||||
select {
|
|
||||||
case <-bw.startCh:
|
|
||||||
default:
|
|
||||||
close(bw.startCh)
|
|
||||||
}
|
|
||||||
// There should be a resolver inside the balancer.
|
|
||||||
// All updates here, if any, are ignored.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bw *balancerWrapper) Close() {
|
|
||||||
bw.mu.Lock()
|
|
||||||
defer bw.mu.Unlock()
|
|
||||||
select {
|
|
||||||
case <-bw.startCh:
|
|
||||||
default:
|
|
||||||
close(bw.startCh)
|
|
||||||
}
|
|
||||||
bw.balancer.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// The picker is the balancerWrapper itself.
|
|
||||||
// It either blocks or returns error, consistent with v1 balancer Get().
|
|
||||||
func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) {
|
|
||||||
failfast := true // Default failfast is true.
|
|
||||||
if ss, ok := rpcInfoFromContext(info.Ctx); ok {
|
|
||||||
failfast = ss.failfast
|
|
||||||
}
|
|
||||||
a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast})
|
|
||||||
if err != nil {
|
|
||||||
return balancer.PickResult{}, toRPCErr(err)
|
|
||||||
}
|
|
||||||
if p != nil {
|
|
||||||
result.Done = func(balancer.DoneInfo) { p() }
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
p()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
bw.mu.Lock()
|
|
||||||
defer bw.mu.Unlock()
|
|
||||||
if bw.pickfirst {
|
|
||||||
// Get the first sc in conns.
|
|
||||||
for _, result.SubConn = range bw.conns {
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
|
||||||
}
|
|
||||||
var ok1 bool
|
|
||||||
result.SubConn, ok1 = bw.conns[resolver.Address{
|
|
||||||
Addr: a.Addr,
|
|
||||||
Type: resolver.Backend,
|
|
||||||
ServerName: "",
|
|
||||||
Metadata: a.Metadata,
|
|
||||||
}]
|
|
||||||
s, ok2 := bw.connSt[result.SubConn]
|
|
||||||
if !ok1 || !ok2 {
|
|
||||||
// This can only happen due to a race where Get() returned an address
|
|
||||||
// that was subsequently removed by Notify. In this case we should
|
|
||||||
// retry always.
|
|
||||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
|
||||||
}
|
|
||||||
switch s.s {
|
|
||||||
case connectivity.Ready, connectivity.Idle:
|
|
||||||
return result, nil
|
|
||||||
case connectivity.Shutdown, connectivity.TransientFailure:
|
|
||||||
// If the returned sc has been shut down or is in transient failure,
|
|
||||||
// return error, and this RPC will fail or wait for another picker (if
|
|
||||||
// non-failfast).
|
|
||||||
return balancer.PickResult{}, balancer.ErrTransientFailure
|
|
||||||
default:
|
|
||||||
// For other states (connecting or unknown), the v1 balancer would
|
|
||||||
// traditionally wait until ready and then issue the RPC. Returning
|
|
||||||
// ErrNoSubConnAvailable will be a slight improvement in that it will
|
|
||||||
// allow the balancer to choose another address in case others are
|
|
||||||
// connected.
|
|
||||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -68,8 +68,6 @@ var (
|
||||||
errConnDrain = errors.New("grpc: the connection is drained")
|
errConnDrain = errors.New("grpc: the connection is drained")
|
||||||
// errConnClosing indicates that the connection is closing.
|
// errConnClosing indicates that the connection is closing.
|
||||||
errConnClosing = errors.New("grpc: the connection is closing")
|
errConnClosing = errors.New("grpc: the connection is closing")
|
||||||
// errBalancerClosed indicates that the balancer is closed.
|
|
||||||
errBalancerClosed = errors.New("grpc: balancer is closed")
|
|
||||||
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
|
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
|
||||||
// service config.
|
// service config.
|
||||||
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
|
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
|
||||||
|
@ -318,7 +316,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||||
if s == connectivity.Ready {
|
if s == connectivity.Ready {
|
||||||
break
|
break
|
||||||
} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
|
} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
|
||||||
if err = cc.blockingpicker.connectionError(); err != nil {
|
if err = cc.connectionError(); err != nil {
|
||||||
terr, ok := err.(interface {
|
terr, ok := err.(interface {
|
||||||
Temporary() bool
|
Temporary() bool
|
||||||
})
|
})
|
||||||
|
@ -329,7 +327,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||||
}
|
}
|
||||||
if !cc.WaitForStateChange(ctx, s) {
|
if !cc.WaitForStateChange(ctx, s) {
|
||||||
// ctx got timeout or canceled.
|
// ctx got timeout or canceled.
|
||||||
if err = cc.blockingpicker.connectionError(); err != nil && cc.dopts.returnLastError {
|
if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return nil, ctx.Err()
|
return nil, ctx.Err()
|
||||||
|
@ -500,6 +498,9 @@ type ClientConn struct {
|
||||||
|
|
||||||
channelzID int64 // channelz unique identification number
|
channelzID int64 // channelz unique identification number
|
||||||
czData *channelzData
|
czData *channelzData
|
||||||
|
|
||||||
|
lceMu sync.Mutex // protects lastConnectionError
|
||||||
|
lastConnectionError error
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
|
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
|
||||||
|
@ -1209,7 +1210,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
|
||||||
if firstConnErr == nil {
|
if firstConnErr == nil {
|
||||||
firstConnErr = err
|
firstConnErr = err
|
||||||
}
|
}
|
||||||
ac.cc.blockingpicker.updateConnectionError(err)
|
ac.cc.updateConnectionError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Couldn't connect to any address.
|
// Couldn't connect to any address.
|
||||||
|
@ -1535,3 +1536,15 @@ func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
|
||||||
}
|
}
|
||||||
return resolver.Get(scheme)
|
return resolver.Get(scheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cc *ClientConn) updateConnectionError(err error) {
|
||||||
|
cc.lceMu.Lock()
|
||||||
|
cc.lastConnectionError = err
|
||||||
|
cc.lceMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cc *ClientConn) connectionError() error {
|
||||||
|
cc.lceMu.Lock()
|
||||||
|
defer cc.lceMu.Unlock()
|
||||||
|
return cc.lastConnectionError
|
||||||
|
}
|
||||||
|
|
|
@ -449,9 +449,9 @@ type stateRecordingBalancer struct {
|
||||||
balancer.Balancer
|
balancer.Balancer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *stateRecordingBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
func (b *stateRecordingBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
|
||||||
b.notifier <- s
|
b.notifier <- s.ConnectivityState
|
||||||
b.Balancer.HandleSubConnStateChange(sc, s)
|
b.Balancer.UpdateSubConnState(sc, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *stateRecordingBalancer) ResetNotifier(r chan<- connectivity.State) {
|
func (b *stateRecordingBalancer) ResetNotifier(r chan<- connectivity.State) {
|
||||||
|
|
|
@ -36,21 +36,11 @@ import (
|
||||||
internalbackoff "google.golang.org/grpc/internal/backoff"
|
internalbackoff "google.golang.org/grpc/internal/backoff"
|
||||||
"google.golang.org/grpc/internal/transport"
|
"google.golang.org/grpc/internal/transport"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
"google.golang.org/grpc/naming"
|
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/resolver/manual"
|
"google.golang.org/grpc/resolver/manual"
|
||||||
"google.golang.org/grpc/testdata"
|
"google.golang.org/grpc/testdata"
|
||||||
)
|
)
|
||||||
|
|
||||||
func assertState(wantState connectivity.State, cc *ClientConn) (connectivity.State, bool) {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
|
||||||
defer cancel()
|
|
||||||
var state connectivity.State
|
|
||||||
for state = cc.GetState(); state != wantState && cc.WaitForStateChange(ctx, state); state = cc.GetState() {
|
|
||||||
}
|
|
||||||
return state, state == wantState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestDialWithTimeout(t *testing.T) {
|
func (s) TestDialWithTimeout(t *testing.T) {
|
||||||
lis, err := net.Listen("tcp", "localhost:0")
|
lis, err := net.Listen("tcp", "localhost:0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -361,42 +351,6 @@ func (s) TestBackoffWhenNoServerPrefaceReceived(t *testing.T) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s) TestConnectivityStates(t *testing.T) {
|
|
||||||
servers, resolver, cleanup := startServers(t, 2, math.MaxUint32)
|
|
||||||
defer cleanup()
|
|
||||||
cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(RoundRobin(resolver)), WithInsecure())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Dial(\"foo.bar.com\", WithBalancer(_)) = _, %v, want _ <nil>", err)
|
|
||||||
}
|
|
||||||
defer cc.Close()
|
|
||||||
wantState := connectivity.Ready
|
|
||||||
if state, ok := assertState(wantState, cc); !ok {
|
|
||||||
t.Fatalf("asserState(%s) = %s, false, want %s, true", wantState, state, wantState)
|
|
||||||
}
|
|
||||||
// Send an update to delete the server connection (tearDown addrConn).
|
|
||||||
update := []*naming.Update{
|
|
||||||
{
|
|
||||||
Op: naming.Delete,
|
|
||||||
Addr: "localhost:" + servers[0].port,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
resolver.w.inject(update)
|
|
||||||
wantState = connectivity.TransientFailure
|
|
||||||
if state, ok := assertState(wantState, cc); !ok {
|
|
||||||
t.Fatalf("asserState(%s) = %s, false, want %s, true", wantState, state, wantState)
|
|
||||||
}
|
|
||||||
update[0] = &naming.Update{
|
|
||||||
Op: naming.Add,
|
|
||||||
Addr: "localhost:" + servers[1].port,
|
|
||||||
}
|
|
||||||
resolver.w.inject(update)
|
|
||||||
wantState = connectivity.Ready
|
|
||||||
if state, ok := assertState(wantState, cc); !ok {
|
|
||||||
t.Fatalf("asserState(%s) = %s, false, want %s, true", wantState, state, wantState)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestWithTimeout(t *testing.T) {
|
func (s) TestWithTimeout(t *testing.T) {
|
||||||
conn, err := Dial("passthrough:///Non-Existent.Server:80", WithTimeout(time.Millisecond), WithBlock(), WithInsecure())
|
conn, err := Dial("passthrough:///Non-Existent.Server:80", WithTimeout(time.Millisecond), WithBlock(), WithInsecure())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -592,43 +546,6 @@ func (s) TestDialContextFailFast(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// blockingBalancer mimics the behavior of balancers whose initialization takes a long time.
|
|
||||||
// In this test, reading from blockingBalancer.Notify() blocks forever.
|
|
||||||
type blockingBalancer struct {
|
|
||||||
ch chan []Address
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBlockingBalancer() Balancer {
|
|
||||||
return &blockingBalancer{ch: make(chan []Address)}
|
|
||||||
}
|
|
||||||
func (b *blockingBalancer) Start(target string, config BalancerConfig) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (b *blockingBalancer) Up(addr Address) func(error) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (b *blockingBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
|
|
||||||
return Address{}, nil, nil
|
|
||||||
}
|
|
||||||
func (b *blockingBalancer) Notify() <-chan []Address {
|
|
||||||
return b.ch
|
|
||||||
}
|
|
||||||
func (b *blockingBalancer) Close() error {
|
|
||||||
close(b.ch)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestDialWithBlockingBalancer(t *testing.T) {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
dialDone := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
DialContext(ctx, "Non-Existent.Server:80", WithBlock(), WithInsecure(), WithBalancer(newBlockingBalancer()))
|
|
||||||
close(dialDone)
|
|
||||||
}()
|
|
||||||
cancel()
|
|
||||||
<-dialDone
|
|
||||||
}
|
|
||||||
|
|
||||||
// securePerRPCCredentials always requires transport security.
|
// securePerRPCCredentials always requires transport security.
|
||||||
type securePerRPCCredentials struct{}
|
type securePerRPCCredentials struct{}
|
||||||
|
|
||||||
|
@ -724,50 +641,6 @@ func (s) TestConnectParamsWithMinConnectTimeout(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// emptyBalancer returns an empty set of servers.
|
|
||||||
type emptyBalancer struct {
|
|
||||||
ch chan []Address
|
|
||||||
}
|
|
||||||
|
|
||||||
func newEmptyBalancer() Balancer {
|
|
||||||
return &emptyBalancer{ch: make(chan []Address, 1)}
|
|
||||||
}
|
|
||||||
func (b *emptyBalancer) Start(_ string, _ BalancerConfig) error {
|
|
||||||
b.ch <- nil
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (b *emptyBalancer) Up(_ Address) func(error) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (b *emptyBalancer) Get(_ context.Context, _ BalancerGetOptions) (Address, func(), error) {
|
|
||||||
return Address{}, nil, nil
|
|
||||||
}
|
|
||||||
func (b *emptyBalancer) Notify() <-chan []Address {
|
|
||||||
return b.ch
|
|
||||||
}
|
|
||||||
func (b *emptyBalancer) Close() error {
|
|
||||||
close(b.ch)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestNonblockingDialWithEmptyBalancer(t *testing.T) {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
dialDone := make(chan error)
|
|
||||||
go func() {
|
|
||||||
dialDone <- func() error {
|
|
||||||
conn, err := DialContext(ctx, "Non-Existent.Server:80", WithInsecure(), WithBalancer(newEmptyBalancer()))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return conn.Close()
|
|
||||||
}()
|
|
||||||
}()
|
|
||||||
if err := <-dialDone; err != nil {
|
|
||||||
t.Fatalf("unexpected error dialing connection: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestResolverServiceConfigBeforeAddressNotPanic(t *testing.T) {
|
func (s) TestResolverServiceConfigBeforeAddressNotPanic(t *testing.T) {
|
||||||
r, rcleanup := manual.GenerateAndRegisterManualResolver()
|
r, rcleanup := manual.GenerateAndRegisterManualResolver()
|
||||||
defer rcleanup()
|
defer rcleanup()
|
||||||
|
|
|
@ -57,8 +57,7 @@ type dialOptions struct {
|
||||||
authority string
|
authority string
|
||||||
copts transport.ConnectOptions
|
copts transport.ConnectOptions
|
||||||
callOptions []CallOption
|
callOptions []CallOption
|
||||||
// This is used by v1 balancer dial option WithBalancer to support v1
|
// This is used by WithBalancerName dial option.
|
||||||
// balancer, and also by WithBalancerName dial option.
|
|
||||||
balancerBuilder balancer.Builder
|
balancerBuilder balancer.Builder
|
||||||
channelzParentID int64
|
channelzParentID int64
|
||||||
disableServiceConfig bool
|
disableServiceConfig bool
|
||||||
|
@ -200,19 +199,6 @@ func WithDecompressor(dc Decompressor) DialOption {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
|
|
||||||
// Name resolver will be ignored if this DialOption is specified.
|
|
||||||
//
|
|
||||||
// Deprecated: use the new balancer APIs in balancer package and
|
|
||||||
// WithBalancerName. Will be removed in a future 1.x release.
|
|
||||||
func WithBalancer(b Balancer) DialOption {
|
|
||||||
return newFuncDialOption(func(o *dialOptions) {
|
|
||||||
o.balancerBuilder = &balancerWrapperBuilder{
|
|
||||||
b: b,
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithBalancerName sets the balancer that the ClientConn will be initialized
|
// WithBalancerName sets the balancer that the ClientConn will be initialized
|
||||||
// with. Balancer registered with balancerName will be used. This function
|
// with. Balancer registered with balancerName will be used. This function
|
||||||
// panics if no balancer was registered by balancerName.
|
// panics if no balancer was registered by balancerName.
|
||||||
|
|
|
@ -63,6 +63,8 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||||
|
google.golang.org/grpc v1.30.0-dev.1 h1:UPWdABFs9zu2kdq7GrCUcfnVgCT65hSpvHmy0RiKn0M=
|
||||||
|
google.golang.org/grpc v1.30.0-dev.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
|
|
@ -137,25 +137,25 @@ func (s *Status) Details() []interface{} {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error is an alias of a status proto. It implements error and Status,
|
// Error is an alias of a status proto. It implements error and Status,
|
||||||
// and a nil Error should never be returned by this package.
|
// and a nil *Error should never be returned by this package.
|
||||||
type Error spb.Status
|
type Error spb.Status
|
||||||
|
|
||||||
func (se *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
p := (*spb.Status)(se)
|
p := (*spb.Status)(e)
|
||||||
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
|
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
|
||||||
}
|
}
|
||||||
|
|
||||||
// GRPCStatus returns the Status represented by se.
|
// GRPCStatus returns the Status represented by se.
|
||||||
func (se *Error) GRPCStatus() *Status {
|
func (e *Error) GRPCStatus() *Status {
|
||||||
return FromProto((*spb.Status)(se))
|
return FromProto((*spb.Status)(e))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Is implements future error.Is functionality.
|
// Is implements future error.Is functionality.
|
||||||
// A Error is equivalent if the code and message are identical.
|
// A Error is equivalent if the code and message are identical.
|
||||||
func (se *Error) Is(target error) bool {
|
func (e *Error) Is(target error) bool {
|
||||||
tse, ok := target.(*Error)
|
tse, ok := target.(*Error)
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return proto.Equal((*spb.Status)(se), (*spb.Status)(tse))
|
return proto.Equal((*spb.Status)(e), (*spb.Status)(tse))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,293 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2017 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package naming
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultPort = "443"
|
|
||||||
defaultFreq = time.Minute * 30
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errMissingAddr = errors.New("missing address")
|
|
||||||
errWatcherClose = errors.New("watcher has been closed")
|
|
||||||
|
|
||||||
lookupHost = net.DefaultResolver.LookupHost
|
|
||||||
lookupSRV = net.DefaultResolver.LookupSRV
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and
|
|
||||||
// create watchers that poll the DNS server using the frequency set by freq.
|
|
||||||
func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) {
|
|
||||||
return &dnsResolver{freq: freq}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create
|
|
||||||
// watchers that poll the DNS server using the default frequency defined by defaultFreq.
|
|
||||||
func NewDNSResolver() (Resolver, error) {
|
|
||||||
return NewDNSResolverWithFreq(defaultFreq)
|
|
||||||
}
|
|
||||||
|
|
||||||
// dnsResolver handles name resolution for names following the DNS scheme
|
|
||||||
type dnsResolver struct {
|
|
||||||
// frequency of polling the DNS server that the watchers created by this resolver will use.
|
|
||||||
freq time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
|
|
||||||
// If addr is an IPv4 address, return the addr and ok = true.
|
|
||||||
// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
|
|
||||||
func formatIP(addr string) (addrIP string, ok bool) {
|
|
||||||
ip := net.ParseIP(addr)
|
|
||||||
if ip == nil {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
if ip.To4() != nil {
|
|
||||||
return addr, true
|
|
||||||
}
|
|
||||||
return "[" + addr + "]", true
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseTarget takes the user input target string, returns formatted host and port info.
|
|
||||||
// If target doesn't specify a port, set the port to be the defaultPort.
|
|
||||||
// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
|
|
||||||
// are stripped when setting the host.
|
|
||||||
// examples:
|
|
||||||
// target: "www.google.com" returns host: "www.google.com", port: "443"
|
|
||||||
// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
|
|
||||||
// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
|
|
||||||
// target: ":80" returns host: "localhost", port: "80"
|
|
||||||
// target: ":" returns host: "localhost", port: "443"
|
|
||||||
func parseTarget(target string) (host, port string, err error) {
|
|
||||||
if target == "" {
|
|
||||||
return "", "", errMissingAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
if ip := net.ParseIP(target); ip != nil {
|
|
||||||
// target is an IPv4 or IPv6(without brackets) address
|
|
||||||
return target, defaultPort, nil
|
|
||||||
}
|
|
||||||
if host, port, err := net.SplitHostPort(target); err == nil {
|
|
||||||
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
|
|
||||||
if host == "" {
|
|
||||||
// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
|
|
||||||
host = "localhost"
|
|
||||||
}
|
|
||||||
if port == "" {
|
|
||||||
// If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used.
|
|
||||||
port = defaultPort
|
|
||||||
}
|
|
||||||
return host, port, nil
|
|
||||||
}
|
|
||||||
if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil {
|
|
||||||
// target doesn't have port
|
|
||||||
return host, port, nil
|
|
||||||
}
|
|
||||||
return "", "", fmt.Errorf("invalid target address %v", target)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve creates a watcher that watches the name resolution of the target.
|
|
||||||
func (r *dnsResolver) Resolve(target string) (Watcher, error) {
|
|
||||||
host, port, err := parseTarget(target)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if net.ParseIP(host) != nil {
|
|
||||||
ipWatcher := &ipWatcher{
|
|
||||||
updateChan: make(chan *Update, 1),
|
|
||||||
}
|
|
||||||
host, _ = formatIP(host)
|
|
||||||
ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port}
|
|
||||||
return ipWatcher, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
return &dnsWatcher{
|
|
||||||
r: r,
|
|
||||||
host: host,
|
|
||||||
port: port,
|
|
||||||
ctx: ctx,
|
|
||||||
cancel: cancel,
|
|
||||||
t: time.NewTimer(0),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// dnsWatcher watches for the name resolution update for a specific target
|
|
||||||
type dnsWatcher struct {
|
|
||||||
r *dnsResolver
|
|
||||||
host string
|
|
||||||
port string
|
|
||||||
// The latest resolved address set
|
|
||||||
curAddrs map[string]*Update
|
|
||||||
ctx context.Context
|
|
||||||
cancel context.CancelFunc
|
|
||||||
t *time.Timer
|
|
||||||
}
|
|
||||||
|
|
||||||
// ipWatcher watches for the name resolution update for an IP address.
|
|
||||||
type ipWatcher struct {
|
|
||||||
updateChan chan *Update
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next returns the address resolution Update for the target. For IP address,
|
|
||||||
// the resolution is itself, thus polling name server is unnecessary. Therefore,
|
|
||||||
// Next() will return an Update the first time it is called, and will be blocked
|
|
||||||
// for all following calls as no Update exists until watcher is closed.
|
|
||||||
func (i *ipWatcher) Next() ([]*Update, error) {
|
|
||||||
u, ok := <-i.updateChan
|
|
||||||
if !ok {
|
|
||||||
return nil, errWatcherClose
|
|
||||||
}
|
|
||||||
return []*Update{u}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the ipWatcher.
|
|
||||||
func (i *ipWatcher) Close() {
|
|
||||||
close(i.updateChan)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddressType indicates the address type returned by name resolution.
|
|
||||||
type AddressType uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Backend indicates the server is a backend server.
|
|
||||||
Backend AddressType = iota
|
|
||||||
// GRPCLB indicates the server is a grpclb load balancer.
|
|
||||||
GRPCLB
|
|
||||||
)
|
|
||||||
|
|
||||||
// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The
|
|
||||||
// name resolver used by the grpclb balancer is required to provide this type of metadata in
|
|
||||||
// its address updates.
|
|
||||||
type AddrMetadataGRPCLB struct {
|
|
||||||
// AddrType is the type of server (grpc load balancer or backend).
|
|
||||||
AddrType AddressType
|
|
||||||
// ServerName is the name of the grpc load balancer. Used for authentication.
|
|
||||||
ServerName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// compileUpdate compares the old resolved addresses and newly resolved addresses,
|
|
||||||
// and generates an update list
|
|
||||||
func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update {
|
|
||||||
var res []*Update
|
|
||||||
for a, u := range w.curAddrs {
|
|
||||||
if _, ok := newAddrs[a]; !ok {
|
|
||||||
u.Op = Delete
|
|
||||||
res = append(res, u)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for a, u := range newAddrs {
|
|
||||||
if _, ok := w.curAddrs[a]; !ok {
|
|
||||||
res = append(res, u)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *dnsWatcher) lookupSRV() map[string]*Update {
|
|
||||||
newAddrs := make(map[string]*Update)
|
|
||||||
_, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host)
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for _, s := range srvs {
|
|
||||||
lbAddrs, err := lookupHost(w.ctx, s.Target)
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, a := range lbAddrs {
|
|
||||||
a, ok := formatIP(a)
|
|
||||||
if !ok {
|
|
||||||
grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
addr := a + ":" + strconv.Itoa(int(s.Port))
|
|
||||||
newAddrs[addr] = &Update{Addr: addr,
|
|
||||||
Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return newAddrs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *dnsWatcher) lookupHost() map[string]*Update {
|
|
||||||
newAddrs := make(map[string]*Update)
|
|
||||||
addrs, err := lookupHost(w.ctx, w.host)
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for _, a := range addrs {
|
|
||||||
a, ok := formatIP(a)
|
|
||||||
if !ok {
|
|
||||||
grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
addr := a + ":" + w.port
|
|
||||||
newAddrs[addr] = &Update{Addr: addr}
|
|
||||||
}
|
|
||||||
return newAddrs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *dnsWatcher) lookup() []*Update {
|
|
||||||
newAddrs := w.lookupSRV()
|
|
||||||
if newAddrs == nil {
|
|
||||||
// If failed to get any balancer address (either no corresponding SRV for the
|
|
||||||
// target, or caused by failure during resolution/parsing of the balancer target),
|
|
||||||
// return any A record info available.
|
|
||||||
newAddrs = w.lookupHost()
|
|
||||||
}
|
|
||||||
result := w.compileUpdate(newAddrs)
|
|
||||||
w.curAddrs = newAddrs
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next returns the resolved address update(delta) for the target. If there's no
|
|
||||||
// change, it will sleep for 30 mins and try to resolve again after that.
|
|
||||||
func (w *dnsWatcher) Next() ([]*Update, error) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-w.ctx.Done():
|
|
||||||
return nil, errWatcherClose
|
|
||||||
case <-w.t.C:
|
|
||||||
}
|
|
||||||
result := w.lookup()
|
|
||||||
// Next lookup should happen after an interval defined by w.r.freq.
|
|
||||||
w.t.Reset(w.r.freq)
|
|
||||||
if len(result) > 0 {
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *dnsWatcher) Close() {
|
|
||||||
w.cancel()
|
|
||||||
}
|
|
|
@ -1,341 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2017 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package naming
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/internal/grpctest"
|
|
||||||
)
|
|
||||||
|
|
||||||
type s struct {
|
|
||||||
grpctest.Tester
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test(t *testing.T) {
|
|
||||||
grpctest.RunSubTests(t, s{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func newUpdateWithMD(op Operation, addr, lb string) *Update {
|
|
||||||
return &Update{
|
|
||||||
Op: op,
|
|
||||||
Addr: addr,
|
|
||||||
Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: lb},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func toMap(u []*Update) map[string]*Update {
|
|
||||||
m := make(map[string]*Update)
|
|
||||||
for _, v := range u {
|
|
||||||
m[v.Addr] = v
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestCompileUpdate(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
oldAddrs []string
|
|
||||||
newAddrs []string
|
|
||||||
want []*Update
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
[]string{},
|
|
||||||
[]string{"1.0.0.1"},
|
|
||||||
[]*Update{{Op: Add, Addr: "1.0.0.1"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]string{"1.0.0.1"},
|
|
||||||
[]string{"1.0.0.1"},
|
|
||||||
[]*Update{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]string{"1.0.0.0"},
|
|
||||||
[]string{"1.0.0.1"},
|
|
||||||
[]*Update{{Op: Delete, Addr: "1.0.0.0"}, {Op: Add, Addr: "1.0.0.1"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]string{"1.0.0.1"},
|
|
||||||
[]string{"1.0.0.0"},
|
|
||||||
[]*Update{{Op: Add, Addr: "1.0.0.0"}, {Op: Delete, Addr: "1.0.0.1"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]string{"1.0.0.1"},
|
|
||||||
[]string{"1.0.0.1", "1.0.0.2", "1.0.0.3"},
|
|
||||||
[]*Update{{Op: Add, Addr: "1.0.0.2"}, {Op: Add, Addr: "1.0.0.3"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]string{"1.0.0.1", "1.0.0.2", "1.0.0.3"},
|
|
||||||
[]string{"1.0.0.0"},
|
|
||||||
[]*Update{{Op: Add, Addr: "1.0.0.0"}, {Op: Delete, Addr: "1.0.0.1"}, {Op: Delete, Addr: "1.0.0.2"}, {Op: Delete, Addr: "1.0.0.3"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]string{"1.0.0.1", "1.0.0.3", "1.0.0.5"},
|
|
||||||
[]string{"1.0.0.2", "1.0.0.3", "1.0.0.6"},
|
|
||||||
[]*Update{{Op: Delete, Addr: "1.0.0.1"}, {Op: Add, Addr: "1.0.0.2"}, {Op: Delete, Addr: "1.0.0.5"}, {Op: Add, Addr: "1.0.0.6"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]string{"1.0.0.1", "1.0.0.1", "1.0.0.2"},
|
|
||||||
[]string{"1.0.0.1"},
|
|
||||||
[]*Update{{Op: Delete, Addr: "1.0.0.2"}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var w dnsWatcher
|
|
||||||
for _, c := range tests {
|
|
||||||
w.curAddrs = make(map[string]*Update)
|
|
||||||
newUpdates := make(map[string]*Update)
|
|
||||||
for _, a := range c.oldAddrs {
|
|
||||||
w.curAddrs[a] = &Update{Addr: a}
|
|
||||||
}
|
|
||||||
for _, a := range c.newAddrs {
|
|
||||||
newUpdates[a] = &Update{Addr: a}
|
|
||||||
}
|
|
||||||
r := w.compileUpdate(newUpdates)
|
|
||||||
if !reflect.DeepEqual(toMap(c.want), toMap(r)) {
|
|
||||||
t.Errorf("w(%+v).compileUpdate(%+v) = %+v, want %+v", c.oldAddrs, c.newAddrs, updatesToSlice(r), updatesToSlice(c.want))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestResolveFunc(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
addr string
|
|
||||||
want error
|
|
||||||
}{
|
|
||||||
// TODO(yuxuanli): More false cases?
|
|
||||||
{"www.google.com", nil},
|
|
||||||
{"foo.bar:12345", nil},
|
|
||||||
{"127.0.0.1", nil},
|
|
||||||
{"127.0.0.1:12345", nil},
|
|
||||||
{"[::1]:80", nil},
|
|
||||||
{"[2001:db8:a0b:12f0::1]:21", nil},
|
|
||||||
{":80", nil},
|
|
||||||
{"127.0.0...1:12345", nil},
|
|
||||||
{"[fe80::1%lo0]:80", nil},
|
|
||||||
{"golang.org:http", nil},
|
|
||||||
{"[2001:db8::1]:http", nil},
|
|
||||||
{":", nil},
|
|
||||||
{"", errMissingAddr},
|
|
||||||
{"[2001:db8:a0b:12f0::1", fmt.Errorf("invalid target address %v", "[2001:db8:a0b:12f0::1")},
|
|
||||||
}
|
|
||||||
|
|
||||||
r, err := NewDNSResolver()
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("%v", err)
|
|
||||||
}
|
|
||||||
for _, v := range tests {
|
|
||||||
_, err := r.Resolve(v.addr)
|
|
||||||
if !reflect.DeepEqual(err, v.want) {
|
|
||||||
t.Errorf("Resolve(%q) = %v, want %v", v.addr, err, v.want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var hostLookupTbl = map[string][]string{
|
|
||||||
"foo.bar.com": {"1.2.3.4", "5.6.7.8"},
|
|
||||||
"ipv4.single.fake": {"1.2.3.4"},
|
|
||||||
"ipv4.multi.fake": {"1.2.3.4", "5.6.7.8", "9.10.11.12"},
|
|
||||||
"ipv6.single.fake": {"2607:f8b0:400a:801::1001"},
|
|
||||||
"ipv6.multi.fake": {"2607:f8b0:400a:801::1001", "2607:f8b0:400a:801::1002", "2607:f8b0:400a:801::1003"},
|
|
||||||
}
|
|
||||||
|
|
||||||
func hostLookup(host string) ([]string, error) {
|
|
||||||
if addrs, ok := hostLookupTbl[host]; ok {
|
|
||||||
return addrs, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("failed to lookup host:%s resolution in hostLookupTbl", host)
|
|
||||||
}
|
|
||||||
|
|
||||||
var srvLookupTbl = map[string][]*net.SRV{
|
|
||||||
"_grpclb._tcp.srv.ipv4.single.fake": {&net.SRV{Target: "ipv4.single.fake", Port: 1234}},
|
|
||||||
"_grpclb._tcp.srv.ipv4.multi.fake": {&net.SRV{Target: "ipv4.multi.fake", Port: 1234}},
|
|
||||||
"_grpclb._tcp.srv.ipv6.single.fake": {&net.SRV{Target: "ipv6.single.fake", Port: 1234}},
|
|
||||||
"_grpclb._tcp.srv.ipv6.multi.fake": {&net.SRV{Target: "ipv6.multi.fake", Port: 1234}},
|
|
||||||
}
|
|
||||||
|
|
||||||
func srvLookup(service, proto, name string) (string, []*net.SRV, error) {
|
|
||||||
cname := "_" + service + "._" + proto + "." + name
|
|
||||||
if srvs, ok := srvLookupTbl[cname]; ok {
|
|
||||||
return cname, srvs, nil
|
|
||||||
}
|
|
||||||
return "", nil, fmt.Errorf("failed to lookup srv record for %s in srvLookupTbl", cname)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updatesToSlice(updates []*Update) []Update {
|
|
||||||
res := make([]Update, len(updates))
|
|
||||||
for i, u := range updates {
|
|
||||||
res[i] = *u
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func testResolver(t *testing.T, freq time.Duration, slp time.Duration) {
|
|
||||||
tests := []struct {
|
|
||||||
target string
|
|
||||||
want []*Update
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"foo.bar.com",
|
|
||||||
[]*Update{{Op: Add, Addr: "1.2.3.4" + colonDefaultPort}, {Op: Add, Addr: "5.6.7.8" + colonDefaultPort}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"foo.bar.com:1234",
|
|
||||||
[]*Update{{Op: Add, Addr: "1.2.3.4:1234"}, {Op: Add, Addr: "5.6.7.8:1234"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"srv.ipv4.single.fake",
|
|
||||||
[]*Update{newUpdateWithMD(Add, "1.2.3.4:1234", "ipv4.single.fake")},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"srv.ipv4.multi.fake",
|
|
||||||
[]*Update{
|
|
||||||
newUpdateWithMD(Add, "1.2.3.4:1234", "ipv4.multi.fake"),
|
|
||||||
newUpdateWithMD(Add, "5.6.7.8:1234", "ipv4.multi.fake"),
|
|
||||||
newUpdateWithMD(Add, "9.10.11.12:1234", "ipv4.multi.fake")},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"srv.ipv6.single.fake",
|
|
||||||
[]*Update{newUpdateWithMD(Add, "[2607:f8b0:400a:801::1001]:1234", "ipv6.single.fake")},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"srv.ipv6.multi.fake",
|
|
||||||
[]*Update{
|
|
||||||
newUpdateWithMD(Add, "[2607:f8b0:400a:801::1001]:1234", "ipv6.multi.fake"),
|
|
||||||
newUpdateWithMD(Add, "[2607:f8b0:400a:801::1002]:1234", "ipv6.multi.fake"),
|
|
||||||
newUpdateWithMD(Add, "[2607:f8b0:400a:801::1003]:1234", "ipv6.multi.fake"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, a := range tests {
|
|
||||||
r, err := NewDNSResolverWithFreq(freq)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%v\n", err)
|
|
||||||
}
|
|
||||||
w, err := r.Resolve(a.target)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%v\n", err)
|
|
||||||
}
|
|
||||||
updates, err := w.Next()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%v\n", err)
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(toMap(a.want), toMap(updates)) {
|
|
||||||
t.Errorf("Resolve(%q) = %+v, want %+v\n", a.target, updatesToSlice(updates), updatesToSlice(a.want))
|
|
||||||
}
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
for {
|
|
||||||
_, err := w.Next()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.Error("Execution shouldn't reach here, since w.Next() should be blocked until close happen.")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
// Sleep for sometime to let watcher do more than one lookup
|
|
||||||
time.Sleep(slp)
|
|
||||||
w.Close()
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func replaceNetFunc() func() {
|
|
||||||
oldLookupHost := lookupHost
|
|
||||||
oldLookupSRV := lookupSRV
|
|
||||||
lookupHost = func(ctx context.Context, host string) ([]string, error) {
|
|
||||||
return hostLookup(host)
|
|
||||||
}
|
|
||||||
lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
|
|
||||||
return srvLookup(service, proto, name)
|
|
||||||
}
|
|
||||||
return func() {
|
|
||||||
lookupHost = oldLookupHost
|
|
||||||
lookupSRV = oldLookupSRV
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s) TestResolve(t *testing.T) {
|
|
||||||
defer replaceNetFunc()()
|
|
||||||
testResolver(t, time.Millisecond*5, time.Millisecond*10)
|
|
||||||
}
|
|
||||||
|
|
||||||
const colonDefaultPort = ":" + defaultPort
|
|
||||||
|
|
||||||
func (s) TestIPWatcher(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
target string
|
|
||||||
want []*Update
|
|
||||||
}{
|
|
||||||
{"127.0.0.1", []*Update{{Op: Add, Addr: "127.0.0.1" + colonDefaultPort}}},
|
|
||||||
{"127.0.0.1:12345", []*Update{{Op: Add, Addr: "127.0.0.1:12345"}}},
|
|
||||||
{"::1", []*Update{{Op: Add, Addr: "[::1]" + colonDefaultPort}}},
|
|
||||||
{"[::1]:12345", []*Update{{Op: Add, Addr: "[::1]:12345"}}},
|
|
||||||
{"[::1]:", []*Update{{Op: Add, Addr: "[::1]:443"}}},
|
|
||||||
{"2001:db8:85a3::8a2e:370:7334", []*Update{{Op: Add, Addr: "[2001:db8:85a3::8a2e:370:7334]" + colonDefaultPort}}},
|
|
||||||
{"[2001:db8:85a3::8a2e:370:7334]", []*Update{{Op: Add, Addr: "[2001:db8:85a3::8a2e:370:7334]" + colonDefaultPort}}},
|
|
||||||
{"[2001:db8:85a3::8a2e:370:7334]:12345", []*Update{{Op: Add, Addr: "[2001:db8:85a3::8a2e:370:7334]:12345"}}},
|
|
||||||
{"[2001:db8::1]:http", []*Update{{Op: Add, Addr: "[2001:db8::1]:http"}}},
|
|
||||||
// TODO(yuxuanli): zone support?
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, v := range tests {
|
|
||||||
r, err := NewDNSResolverWithFreq(time.Millisecond * 5)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%v\n", err)
|
|
||||||
}
|
|
||||||
w, err := r.Resolve(v.target)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%v\n", err)
|
|
||||||
}
|
|
||||||
var updates []*Update
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(1)
|
|
||||||
count := 0
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
for {
|
|
||||||
u, err := w.Next()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
updates = u
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
// Sleep for sometime to let watcher do more than one lookup
|
|
||||||
time.Sleep(time.Millisecond * 10)
|
|
||||||
w.Close()
|
|
||||||
wg.Wait()
|
|
||||||
if !reflect.DeepEqual(v.want, updates) {
|
|
||||||
t.Errorf("Resolve(%q) = %v, want %+v\n", v.target, updatesToSlice(updates), updatesToSlice(v.want))
|
|
||||||
}
|
|
||||||
if count != 1 {
|
|
||||||
t.Errorf("IPWatcher Next() should return only once, not %d times\n", count)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,68 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2014 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package naming defines the naming API and related data structures for gRPC.
|
|
||||||
//
|
|
||||||
// This package is deprecated: please use package resolver instead.
|
|
||||||
package naming
|
|
||||||
|
|
||||||
// Operation defines the corresponding operations for a name resolution change.
|
|
||||||
//
|
|
||||||
// Deprecated: please use package resolver.
|
|
||||||
type Operation uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Add indicates a new address is added.
|
|
||||||
Add Operation = iota
|
|
||||||
// Delete indicates an existing address is deleted.
|
|
||||||
Delete
|
|
||||||
)
|
|
||||||
|
|
||||||
// Update defines a name resolution update. Notice that it is not valid having both
|
|
||||||
// empty string Addr and nil Metadata in an Update.
|
|
||||||
//
|
|
||||||
// Deprecated: please use package resolver.
|
|
||||||
type Update struct {
|
|
||||||
// Op indicates the operation of the update.
|
|
||||||
Op Operation
|
|
||||||
// Addr is the updated address. It is empty string if there is no address update.
|
|
||||||
Addr string
|
|
||||||
// Metadata is the updated metadata. It is nil if there is no metadata update.
|
|
||||||
// Metadata is not required for a custom naming implementation.
|
|
||||||
Metadata interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolver creates a Watcher for a target to track its resolution changes.
|
|
||||||
//
|
|
||||||
// Deprecated: please use package resolver.
|
|
||||||
type Resolver interface {
|
|
||||||
// Resolve creates a Watcher for target.
|
|
||||||
Resolve(target string) (Watcher, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Watcher watches for the updates on the specified target.
|
|
||||||
//
|
|
||||||
// Deprecated: please use package resolver.
|
|
||||||
type Watcher interface {
|
|
||||||
// Next blocks until an update or error happens. It may return one or more
|
|
||||||
// updates. The first call should get the full set of the results. It should
|
|
||||||
// return an error if and only if Watcher cannot recover.
|
|
||||||
Next() ([]*Update, error)
|
|
||||||
// Close closes the Watcher.
|
|
||||||
Close()
|
|
||||||
}
|
|
|
@ -20,7 +20,6 @@ package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
@ -32,68 +31,21 @@ import (
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
// v2PickerWrapper wraps a balancer.Picker while providing the
|
|
||||||
// balancer.V2Picker API. It requires a pickerWrapper to generate errors
|
|
||||||
// including the latest connectionError. To be deleted when balancer.Picker is
|
|
||||||
// updated to the balancer.V2Picker API.
|
|
||||||
type v2PickerWrapper struct {
|
|
||||||
picker balancer.Picker
|
|
||||||
connErr *connErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
|
||||||
sc, done, err := v.picker.Pick(info.Ctx, info)
|
|
||||||
if err != nil {
|
|
||||||
if err == balancer.ErrTransientFailure {
|
|
||||||
return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError()))
|
|
||||||
}
|
|
||||||
return balancer.PickResult{}, err
|
|
||||||
}
|
|
||||||
return balancer.PickResult{SubConn: sc, Done: done}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
|
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
|
||||||
// actions and unblock when there's a picker update.
|
// actions and unblock when there's a picker update.
|
||||||
type pickerWrapper struct {
|
type pickerWrapper struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
done bool
|
done bool
|
||||||
blockingCh chan struct{}
|
blockingCh chan struct{}
|
||||||
picker balancer.V2Picker
|
picker balancer.Picker
|
||||||
|
|
||||||
// The latest connection error. TODO: remove when V1 picker is deprecated;
|
|
||||||
// balancer should be responsible for providing the error.
|
|
||||||
*connErr
|
|
||||||
}
|
|
||||||
|
|
||||||
type connErr struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *connErr) updateConnectionError(err error) {
|
|
||||||
c.mu.Lock()
|
|
||||||
c.err = err
|
|
||||||
c.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *connErr) connectionError() error {
|
|
||||||
c.mu.Lock()
|
|
||||||
err := c.err
|
|
||||||
c.mu.Unlock()
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPickerWrapper() *pickerWrapper {
|
func newPickerWrapper() *pickerWrapper {
|
||||||
return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}}
|
return &pickerWrapper{blockingCh: make(chan struct{})}
|
||||||
}
|
}
|
||||||
|
|
||||||
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
||||||
func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
|
func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||||
pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr})
|
|
||||||
}
|
|
||||||
|
|
||||||
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
|
||||||
func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) {
|
|
||||||
pw.mu.Lock()
|
pw.mu.Lock()
|
||||||
if pw.done {
|
if pw.done {
|
||||||
pw.mu.Unlock()
|
pw.mu.Unlock()
|
||||||
|
@ -154,8 +106,6 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||||
var errStr string
|
var errStr string
|
||||||
if lastPickErr != nil {
|
if lastPickErr != nil {
|
||||||
errStr = "latest balancer error: " + lastPickErr.Error()
|
errStr = "latest balancer error: " + lastPickErr.Error()
|
||||||
} else if connectionErr := pw.connectionError(); connectionErr != nil {
|
|
||||||
errStr = "latest connection error: " + connectionErr.Error()
|
|
||||||
} else {
|
} else {
|
||||||
errStr = ctx.Err().Error()
|
errStr = ctx.Err().Error()
|
||||||
}
|
}
|
||||||
|
@ -180,18 +130,17 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||||
if err == balancer.ErrNoSubConnAvailable {
|
if err == balancer.ErrNoSubConnAvailable {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() {
|
|
||||||
if !failfast {
|
|
||||||
lastPickErr = err
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil, nil, status.Error(codes.Unavailable, err.Error())
|
|
||||||
}
|
|
||||||
if _, ok := status.FromError(err); ok {
|
if _, ok := status.FromError(err); ok {
|
||||||
|
// Status error: end the RPC unconditionally with this status.
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
// err is some other error.
|
// For all other errors, wait for ready RPCs should block and other
|
||||||
return nil, nil, status.Error(codes.Unknown, err.Error())
|
// RPCs should fail with unavailable.
|
||||||
|
if !failfast {
|
||||||
|
lastPickErr = err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, nil, status.Error(codes.Unavailable, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
acw, ok := pickResult.SubConn.(*acBalancerWrapper)
|
acw, ok := pickResult.SubConn.(*acBalancerWrapper)
|
||||||
|
|
|
@ -55,14 +55,14 @@ type testingPicker struct {
|
||||||
maxCalled int64
|
maxCalled int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *testingPicker) Pick(ctx context.Context, info balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
func (p *testingPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
||||||
if atomic.AddInt64(&p.maxCalled, -1) < 0 {
|
if atomic.AddInt64(&p.maxCalled, -1) < 0 {
|
||||||
return nil, nil, fmt.Errorf("pick called to many times (> goroutineCount)")
|
return balancer.PickResult{}, fmt.Errorf("pick called to many times (> goroutineCount)")
|
||||||
}
|
}
|
||||||
if p.err != nil {
|
if p.err != nil {
|
||||||
return nil, nil, p.err
|
return balancer.PickResult{}, p.err
|
||||||
}
|
}
|
||||||
return p.sc, nil, nil
|
return balancer.PickResult{SubConn: p.sc}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s) TestBlockingPickTimeout(t *testing.T) {
|
func (s) TestBlockingPickTimeout(t *testing.T) {
|
||||||
|
|
36
pickfirst.go
36
pickfirst.go
|
@ -20,13 +20,11 @@ package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
"google.golang.org/grpc/resolver"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// PickFirstBalancerName is the name of the pick_first balancer.
|
// PickFirstBalancerName is the name of the pick_first balancer.
|
||||||
|
@ -52,27 +50,13 @@ type pickfirstBalancer struct {
|
||||||
sc balancer.SubConn
|
sc balancer.SubConn
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2
|
|
||||||
|
|
||||||
func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
|
||||||
if err != nil {
|
|
||||||
b.ResolverError(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
|
||||||
b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *pickfirstBalancer) ResolverError(err error) {
|
func (b *pickfirstBalancer) ResolverError(err error) {
|
||||||
switch b.state {
|
switch b.state {
|
||||||
case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting:
|
case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting:
|
||||||
// Set a failing picker if we don't have a good picker.
|
// Set a failing picker if we don't have a good picker.
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
|
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
|
||||||
Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}},
|
Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
|
||||||
)
|
})
|
||||||
}
|
}
|
||||||
if grpclog.V(2) {
|
if grpclog.V(2) {
|
||||||
grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err)
|
grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err)
|
||||||
|
@ -93,8 +77,8 @@ func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) e
|
||||||
}
|
}
|
||||||
b.state = connectivity.TransientFailure
|
b.state = connectivity.TransientFailure
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
|
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
|
||||||
Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}},
|
Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
|
||||||
)
|
})
|
||||||
return balancer.ErrBadResolverState
|
return balancer.ErrBadResolverState
|
||||||
}
|
}
|
||||||
b.state = connectivity.Idle
|
b.state = connectivity.Idle
|
||||||
|
@ -109,7 +93,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) e
|
||||||
|
|
||||||
func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
|
func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
|
||||||
if grpclog.V(2) {
|
if grpclog.V(2) {
|
||||||
grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
|
grpclog.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s)
|
||||||
}
|
}
|
||||||
if b.sc != sc {
|
if b.sc != sc {
|
||||||
if grpclog.V(2) {
|
if grpclog.V(2) {
|
||||||
|
@ -129,15 +113,9 @@ func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.S
|
||||||
case connectivity.Connecting:
|
case connectivity.Connecting:
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}})
|
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}})
|
||||||
case connectivity.TransientFailure:
|
case connectivity.TransientFailure:
|
||||||
err := balancer.ErrTransientFailure
|
|
||||||
// TODO: this can be unconditional after the V1 API is removed, as
|
|
||||||
// SubConnState will always contain a connection error.
|
|
||||||
if s.ConnectionError != nil {
|
|
||||||
err = balancer.TransientFailureError(s.ConnectionError)
|
|
||||||
}
|
|
||||||
b.cc.UpdateState(balancer.State{
|
b.cc.UpdateState(balancer.State{
|
||||||
ConnectivityState: s.ConnectivityState,
|
ConnectivityState: s.ConnectivityState,
|
||||||
Picker: &picker{err: err},
|
Picker: &picker{err: s.ConnectionError},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,7 +43,7 @@ func (s) TestOneBackendPickfirst(t *testing.T) {
|
||||||
defer rcleanup()
|
defer rcleanup()
|
||||||
|
|
||||||
numServers := 1
|
numServers := 1
|
||||||
servers, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
servers, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||||
defer scleanup()
|
defer scleanup()
|
||||||
|
|
||||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
||||||
|
@ -76,7 +76,7 @@ func (s) TestBackendsPickfirst(t *testing.T) {
|
||||||
defer rcleanup()
|
defer rcleanup()
|
||||||
|
|
||||||
numServers := 2
|
numServers := 2
|
||||||
servers, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
servers, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||||
defer scleanup()
|
defer scleanup()
|
||||||
|
|
||||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
||||||
|
@ -109,7 +109,7 @@ func (s) TestNewAddressWhileBlockingPickfirst(t *testing.T) {
|
||||||
defer rcleanup()
|
defer rcleanup()
|
||||||
|
|
||||||
numServers := 1
|
numServers := 1
|
||||||
servers, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
servers, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||||
defer scleanup()
|
defer scleanup()
|
||||||
|
|
||||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
||||||
|
@ -145,7 +145,7 @@ func (s) TestCloseWithPendingRPCPickfirst(t *testing.T) {
|
||||||
defer rcleanup()
|
defer rcleanup()
|
||||||
|
|
||||||
numServers := 1
|
numServers := 1
|
||||||
_, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
_, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||||
defer scleanup()
|
defer scleanup()
|
||||||
|
|
||||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
||||||
|
@ -181,7 +181,7 @@ func (s) TestOneServerDownPickfirst(t *testing.T) {
|
||||||
defer rcleanup()
|
defer rcleanup()
|
||||||
|
|
||||||
numServers := 2
|
numServers := 2
|
||||||
servers, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
servers, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||||
defer scleanup()
|
defer scleanup()
|
||||||
|
|
||||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
||||||
|
@ -222,7 +222,7 @@ func (s) TestAllServersDownPickfirst(t *testing.T) {
|
||||||
defer rcleanup()
|
defer rcleanup()
|
||||||
|
|
||||||
numServers := 2
|
numServers := 2
|
||||||
servers, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
servers, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||||
defer scleanup()
|
defer scleanup()
|
||||||
|
|
||||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
||||||
|
@ -265,7 +265,7 @@ func (s) TestAddressesRemovedPickfirst(t *testing.T) {
|
||||||
defer rcleanup()
|
defer rcleanup()
|
||||||
|
|
||||||
numServers := 3
|
numServers := 3
|
||||||
servers, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
servers, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||||
defer scleanup()
|
defer scleanup()
|
||||||
|
|
||||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}))
|
||||||
|
|
|
@ -29,6 +29,7 @@ import (
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/internal/balancer/stub"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/resolver/manual"
|
"google.golang.org/grpc/resolver/manual"
|
||||||
"google.golang.org/grpc/serviceconfig"
|
"google.golang.org/grpc/serviceconfig"
|
||||||
|
@ -130,12 +131,12 @@ const happyBalancerName = "happy balancer"
|
||||||
func init() {
|
func init() {
|
||||||
// Register a balancer that never returns an error from
|
// Register a balancer that never returns an error from
|
||||||
// UpdateClientConnState, and doesn't do anything else either.
|
// UpdateClientConnState, and doesn't do anything else either.
|
||||||
fb := &funcBalancer{
|
bf := stub.BalancerFuncs{
|
||||||
updateClientConnState: func(s balancer.ClientConnState) error {
|
UpdateClientConnState: func(*stub.BalancerData, balancer.ClientConnState) error {
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
balancer.Register(&funcBalancerBuilder{name: happyBalancerName, instance: fb})
|
stub.Register(happyBalancerName, bf)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestResolverErrorPolling injects resolver errors and verifies ResolveNow is
|
// TestResolverErrorPolling injects resolver errors and verifies ResolveNow is
|
||||||
|
|
|
@ -79,7 +79,7 @@ type ServiceConfig struct {
|
||||||
serviceconfig.Config
|
serviceconfig.Config
|
||||||
|
|
||||||
// LB is the load balancer the service providers recommends. The balancer
|
// LB is the load balancer the service providers recommends. The balancer
|
||||||
// specified via grpc.WithBalancer will override this. This is deprecated;
|
// specified via grpc.WithBalancerName will override this. This is deprecated;
|
||||||
// lbConfigs is preferred. If lbConfig and LB are both present, lbConfig
|
// lbConfigs is preferred. If lbConfig and LB are both present, lbConfig
|
||||||
// will be used.
|
// will be used.
|
||||||
LB *string
|
LB *string
|
||||||
|
|
|
@ -20,6 +20,7 @@ package test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
@ -30,12 +31,14 @@ import (
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/attributes"
|
"google.golang.org/grpc/attributes"
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/balancer/roundrobin"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
"google.golang.org/grpc/internal/balancer/stub"
|
"google.golang.org/grpc/internal/balancer/stub"
|
||||||
"google.golang.org/grpc/internal/balancerload"
|
"google.golang.org/grpc/internal/balancerload"
|
||||||
|
"google.golang.org/grpc/internal/grpcsync"
|
||||||
"google.golang.org/grpc/internal/testutils"
|
"google.golang.org/grpc/internal/testutils"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
|
@ -69,37 +72,43 @@ func (*testBalancer) Name() string {
|
||||||
return testBalancerName
|
return testBalancerName
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *testBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
func (*testBalancer) ResolverError(err error) {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *testBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
|
||||||
// Only create a subconn at the first time.
|
// Only create a subconn at the first time.
|
||||||
if err == nil && b.sc == nil {
|
if b.sc == nil {
|
||||||
b.sc, err = b.cc.NewSubConn(addrs, b.newSubConnOptions)
|
var err error
|
||||||
|
b.sc, err = b.cc.NewSubConn(state.ResolverState.Addresses, b.newSubConnOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Errorf("testBalancer: failed to NewSubConn: %v", err)
|
grpclog.Errorf("testBalancer: failed to NewSubConn: %v", err)
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: &picker{sc: b.sc, bal: b}})
|
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: &picker{sc: b.sc, bal: b}})
|
||||||
b.sc.Connect()
|
b.sc.Connect()
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *testBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
func (b *testBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
|
||||||
grpclog.Infof("testBalancer: HandleSubConnStateChange: %p, %v", sc, s)
|
grpclog.Infof("testBalancer: UpdateSubConnState: %p, %v", sc, s)
|
||||||
if b.sc != sc {
|
if b.sc != sc {
|
||||||
grpclog.Infof("testBalancer: ignored state change because sc is not recognized")
|
grpclog.Infof("testBalancer: ignored state change because sc is not recognized")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if s == connectivity.Shutdown {
|
if s.ConnectivityState == connectivity.Shutdown {
|
||||||
b.sc = nil
|
b.sc = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
switch s {
|
switch s.ConnectivityState {
|
||||||
case connectivity.Ready, connectivity.Idle:
|
case connectivity.Ready, connectivity.Idle:
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: s, Picker: &picker{sc: sc, bal: b}})
|
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{sc: sc, bal: b}})
|
||||||
case connectivity.Connecting:
|
case connectivity.Connecting:
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: s, Picker: &picker{err: balancer.ErrNoSubConnAvailable, bal: b}})
|
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable, bal: b}})
|
||||||
case connectivity.TransientFailure:
|
case connectivity.TransientFailure:
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: s, Picker: &picker{err: balancer.ErrTransientFailure, bal: b}})
|
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrTransientFailure, bal: b}})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -285,6 +294,10 @@ func newTestBalancerKeepAddresses() *testBalancerKeepAddresses {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (testBalancerKeepAddresses) ResolverError(err error) {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
func (b *testBalancerKeepAddresses) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
func (b *testBalancerKeepAddresses) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
@ -293,11 +306,12 @@ func (*testBalancerKeepAddresses) Name() string {
|
||||||
return testBalancerKeepAddressesName
|
return testBalancerKeepAddressesName
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *testBalancerKeepAddresses) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
func (b *testBalancerKeepAddresses) UpdateClientConnState(state balancer.ClientConnState) error {
|
||||||
b.addrsChan <- addrs
|
b.addrsChan <- state.ResolverState.Addresses
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (testBalancerKeepAddresses) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
func (testBalancerKeepAddresses) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
|
||||||
panic("not used")
|
panic("not used")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -472,3 +486,217 @@ func (s) TestAddressAttributesInNewSubConn(t *testing.T) {
|
||||||
t.Fatalf("received attributes %v in creds, want %v", gotAttr, wantAttr)
|
t.Fatalf("received attributes %v in creds, want %v", gotAttr, wantAttr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestServersSwap creates two servers and verifies the client switches between
|
||||||
|
// them when the name resolver reports the first and then the second.
|
||||||
|
func (s) TestServersSwap(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Initialize servers
|
||||||
|
reg := func(username string) (addr string, cleanup func()) {
|
||||||
|
lis, err := net.Listen("tcp", "localhost:0")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error while listening. Err: %v", err)
|
||||||
|
}
|
||||||
|
s := grpc.NewServer()
|
||||||
|
ts := &funcServer{
|
||||||
|
unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
|
||||||
|
return &testpb.SimpleResponse{Username: username}, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
testpb.RegisterTestServiceServer(s, ts)
|
||||||
|
go s.Serve(lis)
|
||||||
|
return lis.Addr().String(), s.Stop
|
||||||
|
}
|
||||||
|
const one = "1"
|
||||||
|
addr1, cleanup := reg(one)
|
||||||
|
defer cleanup()
|
||||||
|
const two = "2"
|
||||||
|
addr2, cleanup := reg(two)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Initialize client
|
||||||
|
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||||
|
defer cleanup()
|
||||||
|
r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: addr1}}})
|
||||||
|
cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithInsecure())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error creating client: %v", err)
|
||||||
|
}
|
||||||
|
defer cc.Close()
|
||||||
|
client := testpb.NewTestServiceClient(cc)
|
||||||
|
|
||||||
|
// Confirm we are connected to the first server
|
||||||
|
if res, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil || res.Username != one {
|
||||||
|
t.Fatalf("UnaryCall(_) = %v, %v; want {Username: %q}, nil", res, err, one)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update resolver to report only the second server
|
||||||
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: addr2}}})
|
||||||
|
|
||||||
|
// Loop until new RPCs talk to server two.
|
||||||
|
for i := 0; i < 2000; i++ {
|
||||||
|
if res, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
|
||||||
|
t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
|
||||||
|
} else if res.Username == two {
|
||||||
|
break // pass
|
||||||
|
}
|
||||||
|
time.Sleep(5 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestEmptyAddrs verifies client behavior when a working connection is
|
||||||
|
// removed. In pick first and round-robin, both will continue using the old
|
||||||
|
// connections.
|
||||||
|
func (s) TestEmptyAddrs(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Initialize server
|
||||||
|
lis, err := net.Listen("tcp", "localhost:0")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error while listening. Err: %v", err)
|
||||||
|
}
|
||||||
|
s := grpc.NewServer()
|
||||||
|
defer s.Stop()
|
||||||
|
const one = "1"
|
||||||
|
ts := &funcServer{
|
||||||
|
unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
|
||||||
|
return &testpb.SimpleResponse{Username: one}, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
testpb.RegisterTestServiceServer(s, ts)
|
||||||
|
go s.Serve(lis)
|
||||||
|
|
||||||
|
// Initialize pickfirst client
|
||||||
|
pfr, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||||
|
defer cleanup()
|
||||||
|
pfrnCalled := grpcsync.NewEvent()
|
||||||
|
pfr.ResolveNowCallback = func(resolver.ResolveNowOptions) {
|
||||||
|
pfrnCalled.Fire()
|
||||||
|
}
|
||||||
|
pfr.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}})
|
||||||
|
|
||||||
|
pfcc, err := grpc.DialContext(ctx, pfr.Scheme()+":///", grpc.WithInsecure())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error creating client: %v", err)
|
||||||
|
}
|
||||||
|
defer pfcc.Close()
|
||||||
|
pfclient := testpb.NewTestServiceClient(pfcc)
|
||||||
|
|
||||||
|
// Confirm we are connected to the server
|
||||||
|
if res, err := pfclient.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil || res.Username != one {
|
||||||
|
t.Fatalf("UnaryCall(_) = %v, %v; want {Username: %q}, nil", res, err, one)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove all addresses.
|
||||||
|
pfr.UpdateState(resolver.State{})
|
||||||
|
// Wait for a ResolveNow call on the pick first client's resolver.
|
||||||
|
<-pfrnCalled.Done()
|
||||||
|
|
||||||
|
// Initialize roundrobin client
|
||||||
|
rrr, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||||
|
defer cleanup()
|
||||||
|
rrrnCalled := grpcsync.NewEvent()
|
||||||
|
rrr.ResolveNowCallback = func(resolver.ResolveNowOptions) {
|
||||||
|
rrrnCalled.Fire()
|
||||||
|
}
|
||||||
|
rrr.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}})
|
||||||
|
|
||||||
|
rrcc, err := grpc.DialContext(ctx, rrr.Scheme()+":///", grpc.WithInsecure(),
|
||||||
|
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, roundrobin.Name)))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error creating client: %v", err)
|
||||||
|
}
|
||||||
|
defer rrcc.Close()
|
||||||
|
rrclient := testpb.NewTestServiceClient(rrcc)
|
||||||
|
|
||||||
|
// Confirm we are connected to the server
|
||||||
|
if res, err := rrclient.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil || res.Username != one {
|
||||||
|
t.Fatalf("UnaryCall(_) = %v, %v; want {Username: %q}, nil", res, err, one)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove all addresses.
|
||||||
|
rrr.UpdateState(resolver.State{})
|
||||||
|
// Wait for a ResolveNow call on the round robin client's resolver.
|
||||||
|
<-rrrnCalled.Done()
|
||||||
|
|
||||||
|
// Confirm several new RPCs succeed on pick first.
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
if _, err := pfclient.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
|
||||||
|
t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
|
||||||
|
}
|
||||||
|
time.Sleep(5 * time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Confirm several new RPCs succeed on round robin.
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
if _, err := pfclient.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
|
||||||
|
t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
|
||||||
|
}
|
||||||
|
time.Sleep(5 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s) TestWaitForReady(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Initialize server
|
||||||
|
lis, err := net.Listen("tcp", "localhost:0")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error while listening. Err: %v", err)
|
||||||
|
}
|
||||||
|
s := grpc.NewServer()
|
||||||
|
defer s.Stop()
|
||||||
|
const one = "1"
|
||||||
|
ts := &funcServer{
|
||||||
|
unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
|
||||||
|
return &testpb.SimpleResponse{Username: one}, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
testpb.RegisterTestServiceServer(s, ts)
|
||||||
|
go s.Serve(lis)
|
||||||
|
|
||||||
|
// Initialize client
|
||||||
|
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithInsecure())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error creating client: %v", err)
|
||||||
|
}
|
||||||
|
defer cc.Close()
|
||||||
|
client := testpb.NewTestServiceClient(cc)
|
||||||
|
|
||||||
|
// Report an error so non-WFR RPCs will give up early.
|
||||||
|
r.CC.ReportError(errors.New("fake resolver error"))
|
||||||
|
|
||||||
|
// Ensure the client is not connected to anything and fails non-WFR RPCs.
|
||||||
|
if res, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.Unavailable {
|
||||||
|
t.Fatalf("UnaryCall(_) = %v, %v; want _, Code()=%v", res, err, codes.Unavailable)
|
||||||
|
}
|
||||||
|
|
||||||
|
errChan := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
if res, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}, grpc.WaitForReady(true)); err != nil || res.Username != one {
|
||||||
|
errChan <- fmt.Errorf("UnaryCall(_) = %v, %v; want {Username: %q}, nil", res, err, one)
|
||||||
|
}
|
||||||
|
close(errChan)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-errChan:
|
||||||
|
t.Errorf("unexpected receive from errChan before addresses provided")
|
||||||
|
t.Fatal(err.Error())
|
||||||
|
case <-time.After(5 * time.Millisecond):
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve the server. The WFR RPC should unblock and use it.
|
||||||
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}})
|
||||||
|
|
||||||
|
if err := <-errChan; err != nil {
|
||||||
|
t.Fatal(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1880,7 +1880,8 @@ func (s) TestCZTraceOverwriteChannelDeletion(t *testing.T) {
|
||||||
czCleanup := channelz.NewChannelzStorage()
|
czCleanup := channelz.NewChannelzStorage()
|
||||||
defer czCleanupWrapper(czCleanup, t)
|
defer czCleanupWrapper(czCleanup, t)
|
||||||
e := tcpClearRREnv
|
e := tcpClearRREnv
|
||||||
// avoid newTest using WithBalancer, which would override service config's change of balancer below.
|
// avoid newTest using WithBalancerName, which would override service
|
||||||
|
// config's change of balancer below.
|
||||||
e.balancer = ""
|
e.balancer = ""
|
||||||
te := newTest(t, e)
|
te := newTest(t, e)
|
||||||
channelz.SetMaxTraceEntry(1)
|
channelz.SetMaxTraceEntry(1)
|
||||||
|
|
|
@ -75,7 +75,7 @@ func (c *testCredsBundle) NewWithMode(mode string) (credentials.Bundle, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s) TestCredsBundleBoth(t *testing.T) {
|
func (s) TestCredsBundleBoth(t *testing.T) {
|
||||||
te := newTest(t, env{name: "creds-bundle", network: "tcp", balancer: "v1", security: "empty"})
|
te := newTest(t, env{name: "creds-bundle", network: "tcp", security: "empty"})
|
||||||
te.tapHandle = authHandle
|
te.tapHandle = authHandle
|
||||||
te.customDialOptions = []grpc.DialOption{
|
te.customDialOptions = []grpc.DialOption{
|
||||||
grpc.WithCredentialsBundle(&testCredsBundle{t: t}),
|
grpc.WithCredentialsBundle(&testCredsBundle{t: t}),
|
||||||
|
@ -98,7 +98,7 @@ func (s) TestCredsBundleBoth(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s) TestCredsBundleTransportCredentials(t *testing.T) {
|
func (s) TestCredsBundleTransportCredentials(t *testing.T) {
|
||||||
te := newTest(t, env{name: "creds-bundle", network: "tcp", balancer: "v1", security: "empty"})
|
te := newTest(t, env{name: "creds-bundle", network: "tcp", security: "empty"})
|
||||||
te.customDialOptions = []grpc.DialOption{
|
te.customDialOptions = []grpc.DialOption{
|
||||||
grpc.WithCredentialsBundle(&testCredsBundle{t: t, mode: bundleTLSOnly}),
|
grpc.WithCredentialsBundle(&testCredsBundle{t: t, mode: bundleTLSOnly}),
|
||||||
}
|
}
|
||||||
|
@ -120,7 +120,7 @@ func (s) TestCredsBundleTransportCredentials(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s) TestCredsBundlePerRPCCredentials(t *testing.T) {
|
func (s) TestCredsBundlePerRPCCredentials(t *testing.T) {
|
||||||
te := newTest(t, env{name: "creds-bundle", network: "tcp", balancer: "v1", security: "empty"})
|
te := newTest(t, env{name: "creds-bundle", network: "tcp", security: "empty"})
|
||||||
te.tapHandle = authHandle
|
te.tapHandle = authHandle
|
||||||
te.customDialOptions = []grpc.DialOption{
|
te.customDialOptions = []grpc.DialOption{
|
||||||
grpc.WithCredentialsBundle(&testCredsBundle{t: t, mode: bundlePerRPCOnly}),
|
grpc.WithCredentialsBundle(&testCredsBundle{t: t, mode: bundlePerRPCOnly}),
|
||||||
|
@ -157,7 +157,7 @@ func (c *clientTimeoutCreds) Clone() credentials.TransportCredentials {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s) TestNonFailFastRPCSucceedOnTimeoutCreds(t *testing.T) {
|
func (s) TestNonFailFastRPCSucceedOnTimeoutCreds(t *testing.T) {
|
||||||
te := newTest(t, env{name: "timeout-cred", network: "tcp", security: "empty", balancer: "v1"})
|
te := newTest(t, env{name: "timeout-cred", network: "tcp", security: "empty"})
|
||||||
te.userAgent = testAppUA
|
te.userAgent = testAppUA
|
||||||
te.startServer(&testServer{security: te.e.security})
|
te.startServer(&testServer{security: te.e.security})
|
||||||
defer te.tearDown()
|
defer te.tearDown()
|
||||||
|
@ -181,7 +181,7 @@ func (m *methodTestCreds) RequireTransportSecurity() bool { return false }
|
||||||
|
|
||||||
func (s) TestGRPCMethodAccessibleToCredsViaContextRequestInfo(t *testing.T) {
|
func (s) TestGRPCMethodAccessibleToCredsViaContextRequestInfo(t *testing.T) {
|
||||||
const wantMethod = "/grpc.testing.TestService/EmptyCall"
|
const wantMethod = "/grpc.testing.TestService/EmptyCall"
|
||||||
te := newTest(t, env{name: "context-request-info", network: "tcp", balancer: "v1"})
|
te := newTest(t, env{name: "context-request-info", network: "tcp"})
|
||||||
te.userAgent = testAppUA
|
te.userAgent = testAppUA
|
||||||
te.startServer(&testServer{security: te.e.security})
|
te.startServer(&testServer{security: te.e.security})
|
||||||
defer te.tearDown()
|
defer te.tearDown()
|
||||||
|
|
|
@ -47,7 +47,6 @@ import (
|
||||||
"golang.org/x/net/http2/hpack"
|
"golang.org/x/net/http2/hpack"
|
||||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/balancer/roundrobin"
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
@ -397,7 +396,7 @@ type env struct {
|
||||||
network string // The type of network such as tcp, unix, etc.
|
network string // The type of network such as tcp, unix, etc.
|
||||||
security string // The security protocol such as TLS, SSH, etc.
|
security string // The security protocol such as TLS, SSH, etc.
|
||||||
httpHandler bool // whether to use the http.Handler ServerTransport; requires TLS
|
httpHandler bool // whether to use the http.Handler ServerTransport; requires TLS
|
||||||
balancer string // One of "round_robin", "pick_first", "v1", or "".
|
balancer string // One of "round_robin", "pick_first", or "".
|
||||||
customDialer func(string, string, time.Duration) (net.Conn, error)
|
customDialer func(string, string, time.Duration) (net.Conn, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -416,8 +415,8 @@ func (e env) dialer(addr string, timeout time.Duration) (net.Conn, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
tcpClearEnv = env{name: "tcp-clear-v1-balancer", network: "tcp", balancer: "v1"}
|
tcpClearEnv = env{name: "tcp-clear-v1-balancer", network: "tcp"}
|
||||||
tcpTLSEnv = env{name: "tcp-tls-v1-balancer", network: "tcp", security: "tls", balancer: "v1"}
|
tcpTLSEnv = env{name: "tcp-tls-v1-balancer", network: "tcp", security: "tls"}
|
||||||
tcpClearRREnv = env{name: "tcp-clear", network: "tcp", balancer: "round_robin"}
|
tcpClearRREnv = env{name: "tcp-clear", network: "tcp", balancer: "round_robin"}
|
||||||
tcpTLSRREnv = env{name: "tcp-tls", network: "tcp", security: "tls", balancer: "round_robin"}
|
tcpTLSRREnv = env{name: "tcp-tls", network: "tcp", security: "tls", balancer: "round_robin"}
|
||||||
handlerEnv = env{name: "handler-tls", network: "tcp", security: "tls", httpHandler: true, balancer: "round_robin"}
|
handlerEnv = env{name: "handler-tls", network: "tcp", security: "tls", httpHandler: true, balancer: "round_robin"}
|
||||||
|
@ -568,6 +567,7 @@ func newTest(t *testing.T, e env) *test {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (te *test) listenAndServe(ts testpb.TestServiceServer, listen func(network, address string) (net.Listener, error)) net.Listener {
|
func (te *test) listenAndServe(ts testpb.TestServiceServer, listen func(network, address string) (net.Listener, error)) net.Listener {
|
||||||
|
te.t.Helper()
|
||||||
te.t.Logf("Running test in %s environment...", te.e.name)
|
te.t.Logf("Running test in %s environment...", te.e.name)
|
||||||
sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(te.maxStream)}
|
sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(te.maxStream)}
|
||||||
if te.maxServerMsgSize != nil {
|
if te.maxServerMsgSize != nil {
|
||||||
|
@ -699,6 +699,7 @@ func (te *test) startServerWithConnControl(ts testpb.TestServiceServer) *listene
|
||||||
// startServer starts a gRPC server exposing the provided TestService
|
// startServer starts a gRPC server exposing the provided TestService
|
||||||
// implementation. Callers should defer a call to te.tearDown to clean up
|
// implementation. Callers should defer a call to te.tearDown to clean up
|
||||||
func (te *test) startServer(ts testpb.TestServiceServer) {
|
func (te *test) startServer(ts testpb.TestServiceServer) {
|
||||||
|
te.t.Helper()
|
||||||
te.listenAndServe(ts, net.Listen)
|
te.listenAndServe(ts, net.Listen)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -809,11 +810,8 @@ func (te *test) configDial(opts ...grpc.DialOption) ([]grpc.DialOption, string)
|
||||||
} else {
|
} else {
|
||||||
scheme = te.resolverScheme + ":///"
|
scheme = te.resolverScheme + ":///"
|
||||||
}
|
}
|
||||||
switch te.e.balancer {
|
if te.e.balancer != "" {
|
||||||
case "v1":
|
opts = append(opts, grpc.WithBalancerName(te.e.balancer))
|
||||||
opts = append(opts, grpc.WithBalancer(grpc.RoundRobin(nil)))
|
|
||||||
case "round_robin":
|
|
||||||
opts = append(opts, grpc.WithBalancerName(roundrobin.Name))
|
|
||||||
}
|
}
|
||||||
if te.clientInitialWindowSize > 0 {
|
if te.clientInitialWindowSize > 0 {
|
||||||
opts = append(opts, grpc.WithInitialWindowSize(te.clientInitialWindowSize))
|
opts = append(opts, grpc.WithInitialWindowSize(te.clientInitialWindowSize))
|
||||||
|
@ -4712,6 +4710,48 @@ func testClientResourceExhaustedCancelFullDuplex(t *testing.T, e env) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type clientFailCreds struct{}
|
||||||
|
|
||||||
|
func (c *clientFailCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
|
||||||
|
return rawConn, nil, nil
|
||||||
|
}
|
||||||
|
func (c *clientFailCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
|
||||||
|
return nil, nil, fmt.Errorf("client handshake fails with fatal error")
|
||||||
|
}
|
||||||
|
func (c *clientFailCreds) Info() credentials.ProtocolInfo {
|
||||||
|
return credentials.ProtocolInfo{}
|
||||||
|
}
|
||||||
|
func (c *clientFailCreds) Clone() credentials.TransportCredentials {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
func (c *clientFailCreds) OverrideServerName(s string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test makes sure that failfast RPCs fail if client handshake fails with
|
||||||
|
// fatal errors.
|
||||||
|
func (s) TestFailfastRPCFailOnFatalHandshakeError(t *testing.T) {
|
||||||
|
lis, err := net.Listen("tcp", "localhost:0")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to listen: %v", err)
|
||||||
|
}
|
||||||
|
defer lis.Close()
|
||||||
|
|
||||||
|
cc, err := grpc.Dial("passthrough:///"+lis.Addr().String(), grpc.WithTransportCredentials(&clientFailCreds{}))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("grpc.Dial(_) = %v", err)
|
||||||
|
}
|
||||||
|
defer cc.Close()
|
||||||
|
|
||||||
|
tc := testpb.NewTestServiceClient(cc)
|
||||||
|
// This unary call should fail, but not timeout.
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
|
defer cancel()
|
||||||
|
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(false)); status.Code(err) != codes.Unavailable {
|
||||||
|
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want <Unavailable>", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (s) TestFlowControlLogicalRace(t *testing.T) {
|
func (s) TestFlowControlLogicalRace(t *testing.T) {
|
||||||
// Test for a regression of https://github.com/grpc/grpc-go/issues/632,
|
// Test for a regression of https://github.com/grpc/grpc-go/issues/632,
|
||||||
// and other flow control bugs.
|
// and other flow control bugs.
|
||||||
|
|
11
vet.sh
11
vet.sh
|
@ -125,14 +125,12 @@ staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true
|
||||||
not grep -v "is deprecated:.*SA1019" "${SC_OUT}"
|
not grep -v "is deprecated:.*SA1019" "${SC_OUT}"
|
||||||
# Only ignore the following deprecated types/fields/functions.
|
# Only ignore the following deprecated types/fields/functions.
|
||||||
not grep -Fv '.CredsBundle
|
not grep -Fv '.CredsBundle
|
||||||
.HandleResolvedAddrs
|
|
||||||
.HandleSubConnStateChange
|
|
||||||
.HeaderMap
|
.HeaderMap
|
||||||
|
.Metadata is deprecated: use Attributes
|
||||||
.NewAddress
|
.NewAddress
|
||||||
.NewServiceConfig
|
.NewServiceConfig
|
||||||
.Metadata is deprecated: use Attributes
|
|
||||||
.Type is deprecated: use Attributes
|
.Type is deprecated: use Attributes
|
||||||
.UpdateBalancerState
|
balancer.ErrTransientFailure
|
||||||
balancer.Picker
|
balancer.Picker
|
||||||
grpc.CallCustomCodec
|
grpc.CallCustomCodec
|
||||||
grpc.Code
|
grpc.Code
|
||||||
|
@ -144,9 +142,7 @@ grpc.NewGZIPCompressor
|
||||||
grpc.NewGZIPDecompressor
|
grpc.NewGZIPDecompressor
|
||||||
grpc.RPCCompressor
|
grpc.RPCCompressor
|
||||||
grpc.RPCDecompressor
|
grpc.RPCDecompressor
|
||||||
grpc.RoundRobin
|
|
||||||
grpc.ServiceConfig
|
grpc.ServiceConfig
|
||||||
grpc.WithBalancer
|
|
||||||
grpc.WithBalancerName
|
grpc.WithBalancerName
|
||||||
grpc.WithCompressor
|
grpc.WithCompressor
|
||||||
grpc.WithDecompressor
|
grpc.WithDecompressor
|
||||||
|
@ -156,9 +152,6 @@ grpc.WithServiceConfig
|
||||||
grpc.WithTimeout
|
grpc.WithTimeout
|
||||||
http.CloseNotifier
|
http.CloseNotifier
|
||||||
info.SecurityVersion
|
info.SecurityVersion
|
||||||
naming.Resolver
|
|
||||||
naming.Update
|
|
||||||
naming.Watcher
|
|
||||||
resolver.Backend
|
resolver.Backend
|
||||||
resolver.GRPCLB' "${SC_OUT}"
|
resolver.GRPCLB' "${SC_OUT}"
|
||||||
|
|
||||||
|
|
|
@ -68,9 +68,6 @@ type subBalancerWithConfig struct {
|
||||||
balancer balancer.Balancer
|
balancer balancer.Balancer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sbc *subBalancerWithConfig) UpdateBalancerState(state connectivity.State, picker balancer.Picker) {
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateState overrides balancer.ClientConn, to keep state and picker.
|
// UpdateState overrides balancer.ClientConn, to keep state and picker.
|
||||||
func (sbc *subBalancerWithConfig) UpdateState(state balancer.State) {
|
func (sbc *subBalancerWithConfig) UpdateState(state balancer.State) {
|
||||||
sbc.mu.Lock()
|
sbc.mu.Lock()
|
||||||
|
@ -97,11 +94,7 @@ func (sbc *subBalancerWithConfig) startBalancer() {
|
||||||
b := sbc.builder.Build(sbc, balancer.BuildOptions{})
|
b := sbc.builder.Build(sbc, balancer.BuildOptions{})
|
||||||
sbc.group.logger.Infof("Created child policy %p of type %v", b, sbc.builder.Name())
|
sbc.group.logger.Infof("Created child policy %p of type %v", b, sbc.builder.Name())
|
||||||
sbc.balancer = b
|
sbc.balancer = b
|
||||||
if ub, ok := b.(balancer.V2Balancer); ok {
|
b.UpdateClientConnState(sbc.ccState)
|
||||||
ub.UpdateClientConnState(sbc.ccState)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.HandleResolvedAddrs(sbc.ccState.ResolverState.Addresses, nil)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sbc *subBalancerWithConfig) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
func (sbc *subBalancerWithConfig) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||||
|
@ -113,11 +106,7 @@ func (sbc *subBalancerWithConfig) updateSubConnState(sc balancer.SubConn, state
|
||||||
// happen.
|
// happen.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if ub, ok := b.(balancer.V2Balancer); ok {
|
b.UpdateSubConnState(sc, state)
|
||||||
ub.UpdateSubConnState(sc, state)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.HandleSubConnStateChange(sc, state.ConnectivityState)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sbc *subBalancerWithConfig) updateClientConnState(s balancer.ClientConnState) error {
|
func (sbc *subBalancerWithConfig) updateClientConnState(s balancer.ClientConnState) error {
|
||||||
|
@ -134,11 +123,7 @@ func (sbc *subBalancerWithConfig) updateClientConnState(s balancer.ClientConnSta
|
||||||
// it's the lower priority, but it can still get address updates.
|
// it's the lower priority, but it can still get address updates.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if ub, ok := b.(balancer.V2Balancer); ok {
|
return b.UpdateClientConnState(s)
|
||||||
return ub.UpdateClientConnState(s)
|
|
||||||
}
|
|
||||||
b.HandleResolvedAddrs(s.ResolverState.Addresses, nil)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sbc *subBalancerWithConfig) stopBalancer() {
|
func (sbc *subBalancerWithConfig) stopBalancer() {
|
||||||
|
@ -148,7 +133,7 @@ func (sbc *subBalancerWithConfig) stopBalancer() {
|
||||||
|
|
||||||
type pickerState struct {
|
type pickerState struct {
|
||||||
weight uint32
|
weight uint32
|
||||||
picker balancer.V2Picker
|
picker balancer.Picker
|
||||||
state connectivity.State
|
state connectivity.State
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -583,7 +568,7 @@ func buildPickerAndState(m map[internal.LocalityID]*pickerState) balancer.State
|
||||||
aggregatedState = connectivity.TransientFailure
|
aggregatedState = connectivity.TransientFailure
|
||||||
}
|
}
|
||||||
if aggregatedState == connectivity.TransientFailure {
|
if aggregatedState == connectivity.TransientFailure {
|
||||||
return balancer.State{ConnectivityState: aggregatedState, Picker: base.NewErrPickerV2(balancer.ErrTransientFailure)}
|
return balancer.State{ConnectivityState: aggregatedState, Picker: base.NewErrPicker(balancer.ErrTransientFailure)}
|
||||||
}
|
}
|
||||||
return balancer.State{ConnectivityState: aggregatedState, Picker: newPickerGroup(readyPickerWithWeights)}
|
return balancer.State{ConnectivityState: aggregatedState, Picker: newPickerGroup(readyPickerWithWeights)}
|
||||||
}
|
}
|
||||||
|
@ -620,7 +605,7 @@ func (pg *pickerGroup) Pick(info balancer.PickInfo) (balancer.PickResult, error)
|
||||||
if pg.length <= 0 {
|
if pg.length <= 0 {
|
||||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
||||||
}
|
}
|
||||||
p := pg.w.Next().(balancer.V2Picker)
|
p := pg.w.Next().(balancer.Picker)
|
||||||
return p.Pick(info)
|
return p.Pick(info)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -630,13 +615,13 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
type loadReportPicker struct {
|
type loadReportPicker struct {
|
||||||
p balancer.V2Picker
|
p balancer.Picker
|
||||||
|
|
||||||
id internal.LocalityID
|
id internal.LocalityID
|
||||||
loadStore lrs.Store
|
loadStore lrs.Store
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLoadReportPicker(p balancer.V2Picker, id internal.LocalityID, loadStore lrs.Store) *loadReportPicker {
|
func newLoadReportPicker(p balancer.Picker, id internal.LocalityID, loadStore lrs.Store) *loadReportPicker {
|
||||||
return &loadReportPicker{
|
return &loadReportPicker{
|
||||||
p: p,
|
p: p,
|
||||||
id: id,
|
id: id,
|
||||||
|
|
|
@ -49,7 +49,7 @@ func init() {
|
||||||
DefaultSubBalancerCloseTimeout = time.Millisecond
|
DefaultSubBalancerCloseTimeout = time.Millisecond
|
||||||
}
|
}
|
||||||
|
|
||||||
func subConnFromPicker(p balancer.V2Picker) func() balancer.SubConn {
|
func subConnFromPicker(p balancer.Picker) func() balancer.SubConn {
|
||||||
return func() balancer.SubConn {
|
return func() balancer.SubConn {
|
||||||
scst, _ := p.Pick(balancer.PickInfo{})
|
scst, _ := p.Pick(balancer.PickInfo{})
|
||||||
return scst.SubConn
|
return scst.SubConn
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
|
|
||||||
"google.golang.org/grpc/attributes"
|
"google.golang.org/grpc/attributes"
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/connectivity"
|
|
||||||
"google.golang.org/grpc/internal/buffer"
|
"google.golang.org/grpc/internal/buffer"
|
||||||
"google.golang.org/grpc/internal/grpclog"
|
"google.golang.org/grpc/internal/grpclog"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
|
@ -46,7 +45,7 @@ var (
|
||||||
|
|
||||||
// newEDSBalancer is a helper function to build a new edsBalancer and will be
|
// newEDSBalancer is a helper function to build a new edsBalancer and will be
|
||||||
// overridden in unittests.
|
// overridden in unittests.
|
||||||
newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.V2Balancer, error) {
|
newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) {
|
||||||
builder := balancer.Get(edsName)
|
builder := balancer.Get(edsName)
|
||||||
if builder == nil {
|
if builder == nil {
|
||||||
return nil, fmt.Errorf("xds: no balancer builder with name %v", edsName)
|
return nil, fmt.Errorf("xds: no balancer builder with name %v", edsName)
|
||||||
|
@ -54,7 +53,7 @@ var (
|
||||||
// We directly pass the parent clientConn to the
|
// We directly pass the parent clientConn to the
|
||||||
// underlying edsBalancer because the cdsBalancer does
|
// underlying edsBalancer because the cdsBalancer does
|
||||||
// not deal with subConns.
|
// not deal with subConns.
|
||||||
return builder.Build(cc, opts).(balancer.V2Balancer), nil
|
return builder.Build(cc, opts), nil
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -151,7 +150,7 @@ type cdsBalancer struct {
|
||||||
updateCh *buffer.Unbounded
|
updateCh *buffer.Unbounded
|
||||||
client xdsClientInterface
|
client xdsClientInterface
|
||||||
cancelWatch func()
|
cancelWatch func()
|
||||||
edsLB balancer.V2Balancer
|
edsLB balancer.Balancer
|
||||||
clusterToWatch string
|
clusterToWatch string
|
||||||
|
|
||||||
logger *grpclog.PrefixLogger
|
logger *grpclog.PrefixLogger
|
||||||
|
@ -354,11 +353,3 @@ func (b *cdsBalancer) isClosed() bool {
|
||||||
b.mu.Unlock()
|
b.mu.Unlock()
|
||||||
return closed
|
return closed
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *cdsBalancer) HandleSubConnStateChange(sc balancer.SubConn, state connectivity.State) {
|
|
||||||
b.logger.Errorf("UpdateSubConnState should be called instead of HandleSubConnStateChange")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *cdsBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
|
||||||
b.logger.Errorf("UpdateClientConnState should be called instead of HandleResolvedAddrs")
|
|
||||||
}
|
|
||||||
|
|
|
@ -219,11 +219,11 @@ func edsCCS(service string, enableLRS bool, xdsClient interface{}) balancer.Clie
|
||||||
func setup() (*cdsBalancer, *testEDSBalancer, func()) {
|
func setup() (*cdsBalancer, *testEDSBalancer, func()) {
|
||||||
builder := cdsBB{}
|
builder := cdsBB{}
|
||||||
tcc := &testClientConn{}
|
tcc := &testClientConn{}
|
||||||
cdsB := builder.Build(tcc, balancer.BuildOptions{}).(balancer.V2Balancer)
|
cdsB := builder.Build(tcc, balancer.BuildOptions{})
|
||||||
|
|
||||||
edsB := newTestEDSBalancer()
|
edsB := newTestEDSBalancer()
|
||||||
oldEDSBalancerBuilder := newEDSBalancer
|
oldEDSBalancerBuilder := newEDSBalancer
|
||||||
newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.V2Balancer, error) {
|
newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) {
|
||||||
return edsB, nil
|
return edsB, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,6 @@ import (
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/internal/buffer"
|
"google.golang.org/grpc/internal/buffer"
|
||||||
"google.golang.org/grpc/internal/grpclog"
|
"google.golang.org/grpc/internal/grpclog"
|
||||||
"google.golang.org/grpc/resolver"
|
|
||||||
"google.golang.org/grpc/serviceconfig"
|
"google.golang.org/grpc/serviceconfig"
|
||||||
"google.golang.org/grpc/xds/internal/balancer/lrs"
|
"google.golang.org/grpc/xds/internal/balancer/lrs"
|
||||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
xdsclient "google.golang.org/grpc/xds/internal/client"
|
||||||
|
@ -104,8 +103,6 @@ type edsBalancerImplInterface interface {
|
||||||
close()
|
close()
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ balancer.V2Balancer = (*edsBalancer)(nil) // Assert that we implement V2Balancer
|
|
||||||
|
|
||||||
// edsBalancer manages xdsClient and the actual EDS balancer implementation that
|
// edsBalancer manages xdsClient and the actual EDS balancer implementation that
|
||||||
// does load balancing.
|
// does load balancing.
|
||||||
//
|
//
|
||||||
|
@ -210,14 +207,6 @@ type subConnStateUpdate struct {
|
||||||
state balancer.SubConnState
|
state balancer.SubConnState
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *edsBalancer) HandleSubConnStateChange(sc balancer.SubConn, state connectivity.State) {
|
|
||||||
x.logger.Errorf("UpdateSubConnState should be called instead of HandleSubConnStateChange")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *edsBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
|
||||||
x.logger.Errorf("UpdateClientConnState should be called instead of HandleResolvedAddrs")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *edsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
func (x *edsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||||
update := &subConnStateUpdate{
|
update := &subConnStateUpdate{
|
||||||
sc: sc,
|
sc: sc,
|
||||||
|
|
|
@ -396,10 +396,6 @@ type edsBalancerWrapperCC struct {
|
||||||
func (ebwcc *edsBalancerWrapperCC) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
func (ebwcc *edsBalancerWrapperCC) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||||
return ebwcc.parent.newSubConn(ebwcc.priority, addrs, opts)
|
return ebwcc.parent.newSubConn(ebwcc.priority, addrs, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ebwcc *edsBalancerWrapperCC) UpdateBalancerState(state connectivity.State, picker balancer.Picker) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ebwcc *edsBalancerWrapperCC) UpdateState(state balancer.State) {
|
func (ebwcc *edsBalancerWrapperCC) UpdateState(state balancer.State) {
|
||||||
ebwcc.parent.enqueueChildBalancerStateUpdate(ebwcc.priority, state)
|
ebwcc.parent.enqueueChildBalancerStateUpdate(ebwcc.priority, state)
|
||||||
}
|
}
|
||||||
|
@ -426,11 +422,11 @@ func (edsImpl *edsBalancerImpl) close() {
|
||||||
|
|
||||||
type dropPicker struct {
|
type dropPicker struct {
|
||||||
drops []*dropper
|
drops []*dropper
|
||||||
p balancer.V2Picker
|
p balancer.Picker
|
||||||
loadStore lrs.Store
|
loadStore lrs.Store
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDropPicker(p balancer.V2Picker, drops []*dropper, loadStore lrs.Store) *dropPicker {
|
func newDropPicker(p balancer.Picker, drops []*dropper, loadStore lrs.Store) *dropPicker {
|
||||||
return &dropPicker{
|
return &dropPicker{
|
||||||
drops: drops,
|
drops: drops,
|
||||||
p: p,
|
p: p,
|
||||||
|
|
|
@ -46,7 +46,7 @@ func (edsImpl *edsBalancerImpl) handlePriorityChange() {
|
||||||
// Everything was removed by EDS.
|
// Everything was removed by EDS.
|
||||||
if !edsImpl.priorityLowest.isSet() {
|
if !edsImpl.priorityLowest.isSet() {
|
||||||
edsImpl.priorityInUse = newPriorityTypeUnset()
|
edsImpl.priorityInUse = newPriorityTypeUnset()
|
||||||
edsImpl.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPickerV2(balancer.ErrTransientFailure)})
|
edsImpl.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(balancer.ErrTransientFailure)})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ func (edsImpl *edsBalancerImpl) handlePriorityChange() {
|
||||||
// We don't have an old state to send to parent, but we also don't
|
// We don't have an old state to send to parent, but we also don't
|
||||||
// want parent to keep using picker from old_priorityInUse. Send an
|
// want parent to keep using picker from old_priorityInUse. Send an
|
||||||
// update to trigger block picks until a new picker is ready.
|
// update to trigger block picks until a new picker is ready.
|
||||||
edsImpl.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: base.NewErrPickerV2(balancer.ErrNoSubConnAvailable)})
|
edsImpl.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable)})
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,6 @@ import (
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/balancer/roundrobin"
|
"google.golang.org/grpc/balancer/roundrobin"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/resolver"
|
|
||||||
"google.golang.org/grpc/xds/internal"
|
"google.golang.org/grpc/xds/internal"
|
||||||
"google.golang.org/grpc/xds/internal/balancer/balancergroup"
|
"google.golang.org/grpc/xds/internal/balancer/balancergroup"
|
||||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
xdsclient "google.golang.org/grpc/xds/internal/client"
|
||||||
|
@ -504,16 +503,21 @@ type testInlineUpdateBalancer struct {
|
||||||
cc balancer.ClientConn
|
cc balancer.ClientConn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *testInlineUpdateBalancer) HandleSubConnStateChange(sc balancer.SubConn, state connectivity.State) {
|
func (tb *testInlineUpdateBalancer) ResolverError(error) {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *testInlineUpdateBalancer) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var errTestInlineStateUpdate = fmt.Errorf("don't like addresses, empty or not")
|
var errTestInlineStateUpdate = fmt.Errorf("don't like addresses, empty or not")
|
||||||
|
|
||||||
func (tb *testInlineUpdateBalancer) HandleResolvedAddrs(a []resolver.Address, err error) {
|
func (tb *testInlineUpdateBalancer) UpdateClientConnState(balancer.ClientConnState) error {
|
||||||
tb.cc.UpdateState(balancer.State{
|
tb.cc.UpdateState(balancer.State{
|
||||||
ConnectivityState: connectivity.Ready,
|
ConnectivityState: connectivity.Ready,
|
||||||
Picker: &testutils.TestConstPicker{Err: errTestInlineStateUpdate},
|
Picker: &testutils.TestConstPicker{Err: errTestInlineStateUpdate},
|
||||||
})
|
})
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*testInlineUpdateBalancer) Close() {
|
func (*testInlineUpdateBalancer) Close() {
|
||||||
|
|
|
@ -55,7 +55,7 @@ func init() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func subConnFromPicker(p balancer.V2Picker) func() balancer.SubConn {
|
func subConnFromPicker(p balancer.Picker) func() balancer.SubConn {
|
||||||
return func() balancer.SubConn {
|
return func() balancer.SubConn {
|
||||||
scst, _ := p.Pick(balancer.PickInfo{})
|
scst, _ := p.Pick(balancer.PickInfo{})
|
||||||
return scst.SubConn
|
return scst.SubConn
|
||||||
|
@ -288,11 +288,15 @@ type fakeBalancer struct {
|
||||||
cc balancer.ClientConn
|
cc balancer.ClientConn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *fakeBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
func (b *fakeBalancer) ResolverError(error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *fakeBalancer) HandleSubConnStateChange(sc balancer.SubConn, state connectivity.State) {
|
func (b *fakeBalancer) UpdateClientConnState(balancer.ClientConnState) error {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fakeBalancer) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -70,7 +70,7 @@ type TestClientConn struct {
|
||||||
NewSubConnCh chan balancer.SubConn // the last 10 subconn created.
|
NewSubConnCh chan balancer.SubConn // the last 10 subconn created.
|
||||||
RemoveSubConnCh chan balancer.SubConn // the last 10 subconn removed.
|
RemoveSubConnCh chan balancer.SubConn // the last 10 subconn removed.
|
||||||
|
|
||||||
NewPickerCh chan balancer.V2Picker // the last picker updated.
|
NewPickerCh chan balancer.Picker // the last picker updated.
|
||||||
NewStateCh chan connectivity.State // the last state.
|
NewStateCh chan connectivity.State // the last state.
|
||||||
|
|
||||||
subConnIdx int
|
subConnIdx int
|
||||||
|
@ -85,7 +85,7 @@ func NewTestClientConn(t *testing.T) *TestClientConn {
|
||||||
NewSubConnCh: make(chan balancer.SubConn, 10),
|
NewSubConnCh: make(chan balancer.SubConn, 10),
|
||||||
RemoveSubConnCh: make(chan balancer.SubConn, 10),
|
RemoveSubConnCh: make(chan balancer.SubConn, 10),
|
||||||
|
|
||||||
NewPickerCh: make(chan balancer.V2Picker, 1),
|
NewPickerCh: make(chan balancer.Picker, 1),
|
||||||
NewStateCh: make(chan connectivity.State, 1),
|
NewStateCh: make(chan connectivity.State, 1),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -285,15 +285,20 @@ type testConstBalancer struct {
|
||||||
cc balancer.ClientConn
|
cc balancer.ClientConn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *testConstBalancer) HandleSubConnStateChange(sc balancer.SubConn, state connectivity.State) {
|
func (tb *testConstBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||||
tb.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: &TestConstPicker{Err: ErrTestConstPicker}})
|
tb.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: &TestConstPicker{Err: ErrTestConstPicker}})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *testConstBalancer) HandleResolvedAddrs(a []resolver.Address, err error) {
|
func (tb *testConstBalancer) ResolverError(error) {
|
||||||
if len(a) == 0 {
|
panic("not implemented")
|
||||||
return
|
}
|
||||||
|
|
||||||
|
func (tb *testConstBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||||
|
if len(s.ResolverState.Addresses) == 0 {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
tb.cc.NewSubConn(a, balancer.NewSubConnOptions{})
|
tb.cc.NewSubConn(s.ResolverState.Addresses, balancer.NewSubConnOptions{})
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*testConstBalancer) Close() {
|
func (*testConstBalancer) Close() {
|
||||||
|
|
Загрузка…
Ссылка в новой задаче