зеркало из https://github.com/docker/compose.git
Merge pull request #8 from rumpl/feat-client-connection
Configure client connection backoff
This commit is contained in:
Коммит
2a99ecdeec
|
@ -36,6 +36,7 @@ import (
|
|||
|
||||
v1 "github.com/docker/api/backend/v1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/backoff"
|
||||
)
|
||||
|
||||
// NewContext is a context that is canceled when a signal is
|
||||
|
@ -53,7 +54,20 @@ func NewContext() (context.Context, func()) {
|
|||
|
||||
// New returns a GRPC client
|
||||
func New(address string, timeout time.Duration) (*Client, error) {
|
||||
conn, err := grpc.Dial(address, grpc.WithInsecure(), grpc.WithBlock(), grpc.WithTimeout(timeout))
|
||||
backoffConfig := backoff.DefaultConfig
|
||||
backoffConfig.MaxDelay = 3 * time.Second
|
||||
backoffConfig.BaseDelay = 10 * time.Millisecond
|
||||
connParams := grpc.ConnectParams{
|
||||
Backoff: backoffConfig,
|
||||
}
|
||||
opts := []grpc.DialOption{
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithConnectParams(connParams),
|
||||
grpc.WithBlock(),
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
conn, err := grpc.DialContext(ctx, address, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ var exampleCommand = cli.Command{
|
|||
// factor out this into a context store package
|
||||
func current(ctx context.Context) context.Context {
|
||||
// test backend address
|
||||
return context.WithValue(ctx, backendAddressKey{}, "127.0.0.1:7654")
|
||||
return context.WithValue(ctx, backendAddressKey{}, "/tmp/backend.sock")
|
||||
}
|
||||
|
||||
func connect(ctx context.Context) (*client.Client, error) {
|
||||
|
@ -79,7 +79,7 @@ func connect(ctx context.Context) (*client.Client, error) {
|
|||
if err != nil {
|
||||
return nil, errors.Wrap(err, "no backend address")
|
||||
}
|
||||
c, err := client.New(address, 500*time.Millisecond)
|
||||
c, err := client.New("unix://"+address, 500*time.Millisecond)
|
||||
if err != nil {
|
||||
if err != context.DeadlineExceeded {
|
||||
return nil, errors.Wrap(err, "connect to backend")
|
||||
|
@ -91,7 +91,8 @@ func connect(ctx context.Context) (*client.Client, error) {
|
|||
if err := cmd.Start(); err != nil {
|
||||
return nil, errors.Wrap(err, "start backend")
|
||||
}
|
||||
return client.New(address, 2*time.Second)
|
||||
cl, e := client.New("unix://"+address, 10*time.Second)
|
||||
return cl, e
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
|
|
@ -74,9 +74,9 @@ func main() {
|
|||
s := server.New()
|
||||
|
||||
// listen on a socket to accept connects
|
||||
l, err := net.Listen("tcp", clix.GlobalString("address"))
|
||||
l, err := net.Listen("unix", clix.GlobalString("address"))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "listen tcp")
|
||||
return errors.Wrap(err, "listen unix socket")
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ github.com/pkg/errors 7f95ac13edff643b8ce5398b6ccab125f8a20c1a
|
|||
github.com/sirupsen/logrus 67a7fdcf741f4d5cee82cb9800994ccfd4393ad0
|
||||
golang.org/x/sync cd5d95a43a6e21273425c7ae415d3df9ea832eeb
|
||||
golang.org/x/sys cf1e2d57716972ea102acf35e82471062c8906a1
|
||||
google.golang.org/grpc 39e8a7b072a67ca2a75f57fa2e0d50995f5b22f6
|
||||
google.golang.org/grpc v1.28.1
|
||||
github.com/golang/protobuf ed6926b37a637426117ccab59282c3839528a700
|
||||
google.golang.org/genproto 83cc0476cb11ea0da33dacd4c6354ab192de6fe6
|
||||
golang.org/x/net d06c31c94caefa2de32f9a8bcc857498fd9c1232
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
# Protocol Buffers for Go with Gadgets
|
||||
#
|
||||
# Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
# http://github.com/gogo/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
regenerate:
|
||||
go install github.com/gogo/protobuf/protoc-gen-gogo
|
||||
protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto
|
||||
|
||||
restore:
|
||||
cp gogo.pb.golden gogo.pb.go
|
||||
|
||||
preserve:
|
||||
cp gogo.pb.go gogo.pb.golden
|
45
vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden
сгенерированный
поставляемый
Normal file
45
vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,45 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: gogo.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
package gogoproto
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import json "encoding/json"
|
||||
import math "math"
|
||||
import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||
|
||||
// Reference proto, json, and math imports to suppress error if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = &json.SyntaxError{}
|
||||
var _ = math.Inf
|
||||
|
||||
var E_Nullable = &proto.ExtensionDesc{
|
||||
ExtendedType: (*google_protobuf.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 51235,
|
||||
Name: "gogoproto.nullable",
|
||||
Tag: "varint,51235,opt,name=nullable",
|
||||
}
|
||||
|
||||
var E_Embed = &proto.ExtensionDesc{
|
||||
ExtendedType: (*google_protobuf.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 51236,
|
||||
Name: "gogoproto.embed",
|
||||
Tag: "varint,51236,opt,name=embed",
|
||||
}
|
||||
|
||||
var E_Customtype = &proto.ExtensionDesc{
|
||||
ExtendedType: (*google_protobuf.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 51237,
|
||||
Name: "gogoproto.customtype",
|
||||
Tag: "bytes,51237,opt,name=customtype",
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterExtension(E_Nullable)
|
||||
proto.RegisterExtension(E_Embed)
|
||||
proto.RegisterExtension(E_Customtype)
|
||||
}
|
118
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go
сгенерированный
поставляемый
118
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go
сгенерированный
поставляемый
|
@ -1,118 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Package descriptor provides functions for obtaining protocol buffer
|
||||
// descriptors for generated Go types.
|
||||
//
|
||||
// These functions cannot go in package proto because they depend on the
|
||||
// generated protobuf descriptor messages, which themselves depend on proto.
|
||||
package descriptor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
|
||||
func extractFile(gz []byte) (*FileDescriptorProto, error) {
|
||||
r, err := gzip.NewReader(bytes.NewReader(gz))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open gzip reader: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
|
||||
}
|
||||
|
||||
fd := new(FileDescriptorProto)
|
||||
if err := proto.Unmarshal(b, fd); err != nil {
|
||||
return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
|
||||
}
|
||||
|
||||
return fd, nil
|
||||
}
|
||||
|
||||
// Message is a proto.Message with a method to return its descriptor.
|
||||
//
|
||||
// Message types generated by the protocol compiler always satisfy
|
||||
// the Message interface.
|
||||
type Message interface {
|
||||
proto.Message
|
||||
Descriptor() ([]byte, []int)
|
||||
}
|
||||
|
||||
// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
|
||||
// describing the given message.
|
||||
func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) {
|
||||
gz, path := msg.Descriptor()
|
||||
fd, err := extractFile(gz)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
|
||||
}
|
||||
|
||||
md = fd.MessageType[path[0]]
|
||||
for _, i := range path[1:] {
|
||||
md = md.NestedType[i]
|
||||
}
|
||||
return fd, md
|
||||
}
|
||||
|
||||
// Is this field a scalar numeric type?
|
||||
func (field *FieldDescriptorProto) IsScalar() bool {
|
||||
if field.Type == nil {
|
||||
return false
|
||||
}
|
||||
switch *field.Type {
|
||||
case FieldDescriptorProto_TYPE_DOUBLE,
|
||||
FieldDescriptorProto_TYPE_FLOAT,
|
||||
FieldDescriptorProto_TYPE_INT64,
|
||||
FieldDescriptorProto_TYPE_UINT64,
|
||||
FieldDescriptorProto_TYPE_INT32,
|
||||
FieldDescriptorProto_TYPE_FIXED64,
|
||||
FieldDescriptorProto_TYPE_FIXED32,
|
||||
FieldDescriptorProto_TYPE_BOOL,
|
||||
FieldDescriptorProto_TYPE_UINT32,
|
||||
FieldDescriptorProto_TYPE_ENUM,
|
||||
FieldDescriptorProto_TYPE_SFIXED32,
|
||||
FieldDescriptorProto_TYPE_SFIXED64,
|
||||
FieldDescriptorProto_TYPE_SINT32,
|
||||
FieldDescriptorProto_TYPE_SINT64:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
2865
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
сгенерированный
поставляемый
2865
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
сгенерированный
поставляемый
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
752
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
сгенерированный
поставляемый
752
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
сгенерированный
поставляемый
|
@ -1,752 +0,0 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: descriptor.proto
|
||||
|
||||
package descriptor
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
sort "sort"
|
||||
strconv "strconv"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
func (this *FileDescriptorSet) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.FileDescriptorSet{")
|
||||
if this.File != nil {
|
||||
s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *FileDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 16)
|
||||
s = append(s, "&descriptor.FileDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Package != nil {
|
||||
s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n")
|
||||
}
|
||||
if this.Dependency != nil {
|
||||
s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n")
|
||||
}
|
||||
if this.PublicDependency != nil {
|
||||
s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n")
|
||||
}
|
||||
if this.WeakDependency != nil {
|
||||
s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n")
|
||||
}
|
||||
if this.MessageType != nil {
|
||||
s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n")
|
||||
}
|
||||
if this.EnumType != nil {
|
||||
s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
|
||||
}
|
||||
if this.Service != nil {
|
||||
s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n")
|
||||
}
|
||||
if this.Extension != nil {
|
||||
s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.SourceCodeInfo != nil {
|
||||
s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n")
|
||||
}
|
||||
if this.Syntax != nil {
|
||||
s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *DescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 14)
|
||||
s = append(s, "&descriptor.DescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Field != nil {
|
||||
s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n")
|
||||
}
|
||||
if this.Extension != nil {
|
||||
s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
|
||||
}
|
||||
if this.NestedType != nil {
|
||||
s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n")
|
||||
}
|
||||
if this.EnumType != nil {
|
||||
s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
|
||||
}
|
||||
if this.ExtensionRange != nil {
|
||||
s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n")
|
||||
}
|
||||
if this.OneofDecl != nil {
|
||||
s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.ReservedRange != nil {
|
||||
s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
|
||||
}
|
||||
if this.ReservedName != nil {
|
||||
s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *DescriptorProto_ExtensionRange) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.DescriptorProto_ExtensionRange{")
|
||||
if this.Start != nil {
|
||||
s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
|
||||
}
|
||||
if this.End != nil {
|
||||
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *DescriptorProto_ReservedRange) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.DescriptorProto_ReservedRange{")
|
||||
if this.Start != nil {
|
||||
s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
|
||||
}
|
||||
if this.End != nil {
|
||||
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *ExtensionRangeOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.ExtensionRangeOptions{")
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *FieldDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 14)
|
||||
s = append(s, "&descriptor.FieldDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Number != nil {
|
||||
s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
|
||||
}
|
||||
if this.Label != nil {
|
||||
s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n")
|
||||
}
|
||||
if this.Type != nil {
|
||||
s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n")
|
||||
}
|
||||
if this.TypeName != nil {
|
||||
s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n")
|
||||
}
|
||||
if this.Extendee != nil {
|
||||
s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n")
|
||||
}
|
||||
if this.DefaultValue != nil {
|
||||
s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n")
|
||||
}
|
||||
if this.OneofIndex != nil {
|
||||
s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n")
|
||||
}
|
||||
if this.JsonName != nil {
|
||||
s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *OneofDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.OneofDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 9)
|
||||
s = append(s, "&descriptor.EnumDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Value != nil {
|
||||
s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.ReservedRange != nil {
|
||||
s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
|
||||
}
|
||||
if this.ReservedName != nil {
|
||||
s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumDescriptorProto_EnumReservedRange) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{")
|
||||
if this.Start != nil {
|
||||
s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
|
||||
}
|
||||
if this.End != nil {
|
||||
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumValueDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.EnumValueDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Number != nil {
|
||||
s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *ServiceDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.ServiceDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Method != nil {
|
||||
s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *MethodDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 10)
|
||||
s = append(s, "&descriptor.MethodDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.InputType != nil {
|
||||
s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n")
|
||||
}
|
||||
if this.OutputType != nil {
|
||||
s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.ClientStreaming != nil {
|
||||
s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n")
|
||||
}
|
||||
if this.ServerStreaming != nil {
|
||||
s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *FileOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 25)
|
||||
s = append(s, "&descriptor.FileOptions{")
|
||||
if this.JavaPackage != nil {
|
||||
s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n")
|
||||
}
|
||||
if this.JavaOuterClassname != nil {
|
||||
s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n")
|
||||
}
|
||||
if this.JavaMultipleFiles != nil {
|
||||
s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n")
|
||||
}
|
||||
if this.JavaGenerateEqualsAndHash != nil {
|
||||
s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n")
|
||||
}
|
||||
if this.JavaStringCheckUtf8 != nil {
|
||||
s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n")
|
||||
}
|
||||
if this.OptimizeFor != nil {
|
||||
s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n")
|
||||
}
|
||||
if this.GoPackage != nil {
|
||||
s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n")
|
||||
}
|
||||
if this.CcGenericServices != nil {
|
||||
s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n")
|
||||
}
|
||||
if this.JavaGenericServices != nil {
|
||||
s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n")
|
||||
}
|
||||
if this.PyGenericServices != nil {
|
||||
s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n")
|
||||
}
|
||||
if this.PhpGenericServices != nil {
|
||||
s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n")
|
||||
}
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.CcEnableArenas != nil {
|
||||
s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n")
|
||||
}
|
||||
if this.ObjcClassPrefix != nil {
|
||||
s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n")
|
||||
}
|
||||
if this.CsharpNamespace != nil {
|
||||
s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n")
|
||||
}
|
||||
if this.SwiftPrefix != nil {
|
||||
s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n")
|
||||
}
|
||||
if this.PhpClassPrefix != nil {
|
||||
s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n")
|
||||
}
|
||||
if this.PhpNamespace != nil {
|
||||
s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n")
|
||||
}
|
||||
if this.PhpMetadataNamespace != nil {
|
||||
s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n")
|
||||
}
|
||||
if this.RubyPackage != nil {
|
||||
s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *MessageOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 9)
|
||||
s = append(s, "&descriptor.MessageOptions{")
|
||||
if this.MessageSetWireFormat != nil {
|
||||
s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n")
|
||||
}
|
||||
if this.NoStandardDescriptorAccessor != nil {
|
||||
s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n")
|
||||
}
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.MapEntry != nil {
|
||||
s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *FieldOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 11)
|
||||
s = append(s, "&descriptor.FieldOptions{")
|
||||
if this.Ctype != nil {
|
||||
s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n")
|
||||
}
|
||||
if this.Packed != nil {
|
||||
s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n")
|
||||
}
|
||||
if this.Jstype != nil {
|
||||
s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n")
|
||||
}
|
||||
if this.Lazy != nil {
|
||||
s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n")
|
||||
}
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.Weak != nil {
|
||||
s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *OneofOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.OneofOptions{")
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.EnumOptions{")
|
||||
if this.AllowAlias != nil {
|
||||
s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n")
|
||||
}
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumValueOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.EnumValueOptions{")
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *ServiceOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.ServiceOptions{")
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *MethodOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.MethodOptions{")
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.IdempotencyLevel != nil {
|
||||
s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *UninterpretedOption) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 11)
|
||||
s = append(s, "&descriptor.UninterpretedOption{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
|
||||
}
|
||||
if this.IdentifierValue != nil {
|
||||
s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n")
|
||||
}
|
||||
if this.PositiveIntValue != nil {
|
||||
s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n")
|
||||
}
|
||||
if this.NegativeIntValue != nil {
|
||||
s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n")
|
||||
}
|
||||
if this.DoubleValue != nil {
|
||||
s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n")
|
||||
}
|
||||
if this.StringValue != nil {
|
||||
s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n")
|
||||
}
|
||||
if this.AggregateValue != nil {
|
||||
s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *UninterpretedOption_NamePart) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.UninterpretedOption_NamePart{")
|
||||
if this.NamePart != nil {
|
||||
s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n")
|
||||
}
|
||||
if this.IsExtension != nil {
|
||||
s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *SourceCodeInfo) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.SourceCodeInfo{")
|
||||
if this.Location != nil {
|
||||
s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *SourceCodeInfo_Location) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 9)
|
||||
s = append(s, "&descriptor.SourceCodeInfo_Location{")
|
||||
if this.Path != nil {
|
||||
s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
|
||||
}
|
||||
if this.Span != nil {
|
||||
s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n")
|
||||
}
|
||||
if this.LeadingComments != nil {
|
||||
s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n")
|
||||
}
|
||||
if this.TrailingComments != nil {
|
||||
s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n")
|
||||
}
|
||||
if this.LeadingDetachedComments != nil {
|
||||
s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *GeneratedCodeInfo) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.GeneratedCodeInfo{")
|
||||
if this.Annotation != nil {
|
||||
s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *GeneratedCodeInfo_Annotation) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 8)
|
||||
s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{")
|
||||
if this.Path != nil {
|
||||
s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
|
||||
}
|
||||
if this.SourceFile != nil {
|
||||
s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n")
|
||||
}
|
||||
if this.Begin != nil {
|
||||
s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n")
|
||||
}
|
||||
if this.End != nil {
|
||||
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringDescriptor(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string {
|
||||
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
|
||||
if e == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
|
||||
keys := make([]int, 0, len(e))
|
||||
for k := range e {
|
||||
keys = append(keys, int(k))
|
||||
}
|
||||
sort.Ints(keys)
|
||||
ss := []string{}
|
||||
for _, k := range keys {
|
||||
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
|
||||
}
|
||||
s += strings.Join(ss, ",") + "})"
|
||||
return s
|
||||
}
|
390
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
сгенерированный
поставляемый
390
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
сгенерированный
поставляемый
|
@ -1,390 +0,0 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package descriptor
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) {
|
||||
if !msg.GetOptions().GetMapEntry() {
|
||||
return nil, nil
|
||||
}
|
||||
return msg.GetField()[0], msg.GetField()[1]
|
||||
}
|
||||
|
||||
func dotToUnderscore(r rune) rune {
|
||||
if r == '.' {
|
||||
return '_'
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) WireType() (wire int) {
|
||||
switch *field.Type {
|
||||
case FieldDescriptorProto_TYPE_DOUBLE:
|
||||
return 1
|
||||
case FieldDescriptorProto_TYPE_FLOAT:
|
||||
return 5
|
||||
case FieldDescriptorProto_TYPE_INT64:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_UINT64:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_INT32:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_UINT32:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_FIXED64:
|
||||
return 1
|
||||
case FieldDescriptorProto_TYPE_FIXED32:
|
||||
return 5
|
||||
case FieldDescriptorProto_TYPE_BOOL:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_STRING:
|
||||
return 2
|
||||
case FieldDescriptorProto_TYPE_GROUP:
|
||||
return 2
|
||||
case FieldDescriptorProto_TYPE_MESSAGE:
|
||||
return 2
|
||||
case FieldDescriptorProto_TYPE_BYTES:
|
||||
return 2
|
||||
case FieldDescriptorProto_TYPE_ENUM:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_SFIXED32:
|
||||
return 5
|
||||
case FieldDescriptorProto_TYPE_SFIXED64:
|
||||
return 1
|
||||
case FieldDescriptorProto_TYPE_SINT32:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_SINT64:
|
||||
return 0
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) {
|
||||
packed := field.IsPacked()
|
||||
wireType := field.WireType()
|
||||
fieldNumber := field.GetNumber()
|
||||
if packed {
|
||||
wireType = 2
|
||||
}
|
||||
x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
|
||||
return x
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) {
|
||||
packed := field.IsPacked3()
|
||||
wireType := field.WireType()
|
||||
fieldNumber := field.GetNumber()
|
||||
if packed {
|
||||
wireType = 2
|
||||
}
|
||||
x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
|
||||
return x
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) GetKey() []byte {
|
||||
x := field.GetKeyUint64()
|
||||
i := 0
|
||||
keybuf := make([]byte, 0)
|
||||
for i = 0; x > 127; i++ {
|
||||
keybuf = append(keybuf, 0x80|uint8(x&0x7F))
|
||||
x >>= 7
|
||||
}
|
||||
keybuf = append(keybuf, uint8(x))
|
||||
return keybuf
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) GetKey3() []byte {
|
||||
x := field.GetKey3Uint64()
|
||||
i := 0
|
||||
keybuf := make([]byte, 0)
|
||||
for i = 0; x > 127; i++ {
|
||||
keybuf = append(keybuf, 0x80|uint8(x&0x7F))
|
||||
x >>= 7
|
||||
}
|
||||
keybuf = append(keybuf, uint8(x))
|
||||
return keybuf
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto {
|
||||
msg := desc.GetMessage(packageName, messageName)
|
||||
if msg == nil {
|
||||
return nil
|
||||
}
|
||||
for _, field := range msg.GetField() {
|
||||
if field.GetName() == fieldName {
|
||||
return field
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto {
|
||||
for _, msg := range file.GetMessageType() {
|
||||
if msg.GetName() == typeName {
|
||||
return msg
|
||||
}
|
||||
nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+"."))
|
||||
if nes != nil {
|
||||
return nes
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto {
|
||||
for _, nes := range msg.GetNestedType() {
|
||||
if nes.GetName() == typeName {
|
||||
return nes
|
||||
}
|
||||
res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+"."))
|
||||
if res != nil {
|
||||
return res
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto {
|
||||
for _, file := range desc.GetFile() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
|
||||
continue
|
||||
}
|
||||
for _, msg := range file.GetMessageType() {
|
||||
if msg.GetName() == typeName {
|
||||
return msg
|
||||
}
|
||||
}
|
||||
for _, msg := range file.GetMessageType() {
|
||||
for _, nes := range msg.GetNestedType() {
|
||||
if nes.GetName() == typeName {
|
||||
return nes
|
||||
}
|
||||
if msg.GetName()+"."+nes.GetName() == typeName {
|
||||
return nes
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool {
|
||||
for _, file := range desc.GetFile() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
|
||||
continue
|
||||
}
|
||||
for _, msg := range file.GetMessageType() {
|
||||
if msg.GetName() == typeName {
|
||||
return file.GetSyntax() == "proto3"
|
||||
}
|
||||
}
|
||||
for _, msg := range file.GetMessageType() {
|
||||
for _, nes := range msg.GetNestedType() {
|
||||
if nes.GetName() == typeName {
|
||||
return file.GetSyntax() == "proto3"
|
||||
}
|
||||
if msg.GetName()+"."+nes.GetName() == typeName {
|
||||
return file.GetSyntax() == "proto3"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (msg *DescriptorProto) IsExtendable() bool {
|
||||
return len(msg.GetExtensionRange()) > 0
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) {
|
||||
parent := desc.GetMessage(packageName, typeName)
|
||||
if parent == nil {
|
||||
return "", nil
|
||||
}
|
||||
if !parent.IsExtendable() {
|
||||
return "", nil
|
||||
}
|
||||
extendee := "." + packageName + "." + typeName
|
||||
for _, file := range desc.GetFile() {
|
||||
for _, ext := range file.GetExtension() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
|
||||
if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if ext.GetExtendee() != extendee {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if ext.GetName() == fieldName {
|
||||
return file.GetPackage(), ext
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) {
|
||||
parent := desc.GetMessage(packageName, typeName)
|
||||
if parent == nil {
|
||||
return "", nil
|
||||
}
|
||||
if !parent.IsExtendable() {
|
||||
return "", nil
|
||||
}
|
||||
extendee := "." + packageName + "." + typeName
|
||||
for _, file := range desc.GetFile() {
|
||||
for _, ext := range file.GetExtension() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
|
||||
if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if ext.GetExtendee() != extendee {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if ext.GetNumber() == fieldNum {
|
||||
return file.GetPackage(), ext
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) {
|
||||
parent := desc.GetMessage(packageName, typeName)
|
||||
if parent == nil {
|
||||
return "", ""
|
||||
}
|
||||
field := parent.GetFieldDescriptor(fieldName)
|
||||
if field == nil {
|
||||
var extPackageName string
|
||||
extPackageName, field = desc.FindExtension(packageName, typeName, fieldName)
|
||||
if field == nil {
|
||||
return "", ""
|
||||
}
|
||||
packageName = extPackageName
|
||||
}
|
||||
typeNames := strings.Split(field.GetTypeName(), ".")
|
||||
if len(typeNames) == 1 {
|
||||
msg := desc.GetMessage(packageName, typeName)
|
||||
if msg == nil {
|
||||
return "", ""
|
||||
}
|
||||
return packageName, msg.GetName()
|
||||
}
|
||||
if len(typeNames) > 2 {
|
||||
for i := 1; i < len(typeNames)-1; i++ {
|
||||
packageName = strings.Join(typeNames[1:len(typeNames)-i], ".")
|
||||
typeName = strings.Join(typeNames[len(typeNames)-i:], ".")
|
||||
msg := desc.GetMessage(packageName, typeName)
|
||||
if msg != nil {
|
||||
typeNames := strings.Split(msg.GetName(), ".")
|
||||
if len(typeNames) == 1 {
|
||||
return packageName, msg.GetName()
|
||||
}
|
||||
return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto {
|
||||
for _, field := range msg.GetField() {
|
||||
if field.GetName() == fieldName {
|
||||
return field
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto {
|
||||
for _, file := range desc.GetFile() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
|
||||
continue
|
||||
}
|
||||
for _, enum := range file.GetEnumType() {
|
||||
if enum.GetName() == typeName {
|
||||
return enum
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsEnum() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_ENUM
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsMessage() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_MESSAGE
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsBytes() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_BYTES
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsRepeated() bool {
|
||||
return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsString() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_STRING
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsBool() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_BOOL
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsRequired() bool {
|
||||
return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsPacked() bool {
|
||||
return f.Options != nil && f.GetOptions().GetPacked()
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsPacked3() bool {
|
||||
if f.IsRepeated() && f.IsScalar() {
|
||||
if f.Options == nil || f.GetOptions().Packed == nil {
|
||||
return true
|
||||
}
|
||||
return f.Options != nil && f.GetOptions().GetPacked()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *DescriptorProto) HasExtension() bool {
|
||||
return len(m.ExtensionRange) > 0
|
||||
}
|
|
@ -93,6 +93,22 @@ To build Go code, there are several options:
|
|||
|
||||
#### Compiling error, undefined: grpc.SupportPackageIsVersion
|
||||
|
||||
##### If you are using Go modules:
|
||||
|
||||
Please ensure your gRPC-Go version is `require`d at the appropriate version in
|
||||
the same module containing the generated `.pb.go` files. For example,
|
||||
`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file:
|
||||
|
||||
```
|
||||
module <your module name>
|
||||
|
||||
require (
|
||||
google.golang.org/grpc v1.27.0
|
||||
)
|
||||
```
|
||||
|
||||
##### If you are *not* using Go modules:
|
||||
|
||||
Please update proto package, gRPC package and rebuild the proto files:
|
||||
- `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
|
||||
- `go get -u google.golang.org/grpc`
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package attributes defines a generic key/value store used in various gRPC
|
||||
// components.
|
||||
//
|
||||
// All APIs in this package are EXPERIMENTAL.
|
||||
package attributes
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Attributes is an immutable struct for storing and retrieving generic
|
||||
// key/value pairs. Keys must be hashable, and users should define their own
|
||||
// types for keys.
|
||||
type Attributes struct {
|
||||
m map[interface{}]interface{}
|
||||
}
|
||||
|
||||
// New returns a new Attributes containing all key/value pairs in kvs. If the
|
||||
// same key appears multiple times, the last value overwrites all previous
|
||||
// values for that key. Panics if len(kvs) is not even.
|
||||
func New(kvs ...interface{}) *Attributes {
|
||||
if len(kvs)%2 != 0 {
|
||||
panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
|
||||
}
|
||||
a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)}
|
||||
for i := 0; i < len(kvs)/2; i++ {
|
||||
a.m[kvs[i*2]] = kvs[i*2+1]
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// WithValues returns a new Attributes containing all key/value pairs in a and
|
||||
// kvs. Panics if len(kvs) is not even. If the same key appears multiple
|
||||
// times, the last value overwrites all previous values for that key. To
|
||||
// remove an existing key, use a nil value.
|
||||
func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
|
||||
if len(kvs)%2 != 0 {
|
||||
panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
|
||||
}
|
||||
n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)}
|
||||
for k, v := range a.m {
|
||||
n.m[k] = v
|
||||
}
|
||||
for i := 0; i < len(kvs)/2; i++ {
|
||||
n.m[kvs[i*2]] = kvs[i*2+1]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Value returns the value associated with these attributes for key, or nil if
|
||||
// no value is associated with key.
|
||||
func (a *Attributes) Value(key interface{}) interface{} {
|
||||
return a.m[key]
|
||||
}
|
|
@ -23,16 +23,36 @@ package grpc
|
|||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/backoff"
|
||||
)
|
||||
|
||||
// DefaultBackoffConfig uses values specified for backoff in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
|
||||
var DefaultBackoffConfig = BackoffConfig{
|
||||
MaxDelay: 120 * time.Second,
|
||||
}
|
||||
|
||||
// BackoffConfig defines the parameters for the default gRPC backoff strategy.
|
||||
//
|
||||
// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
|
||||
type BackoffConfig struct {
|
||||
// MaxDelay is the upper bound of backoff delay.
|
||||
MaxDelay time.Duration
|
||||
}
|
||||
|
||||
// ConnectParams defines the parameters for connecting and retrying. Users are
|
||||
// encouraged to use this instead of the BackoffConfig type defined above. See
|
||||
// here for more details:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
type ConnectParams struct {
|
||||
// Backoff specifies the configuration options for connection backoff.
|
||||
Backoff backoff.Config
|
||||
// MinConnectTimeout is the minimum amount of time we are willing to give a
|
||||
// connection to complete.
|
||||
MinConnectTimeout time.Duration
|
||||
}
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package backoff provides configuration options for backoff.
|
||||
//
|
||||
// More details can be found at:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// All APIs in this package are experimental.
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
// Config defines the configuration options for backoff.
|
||||
type Config struct {
|
||||
// BaseDelay is the amount of time to backoff after the first failure.
|
||||
BaseDelay time.Duration
|
||||
// Multiplier is the factor with which to multiply backoffs after a
|
||||
// failed retry. Should ideally be greater than 1.
|
||||
Multiplier float64
|
||||
// Jitter is the factor with which backoffs are randomized.
|
||||
Jitter float64
|
||||
// MaxDelay is the upper bound of backoff delay.
|
||||
MaxDelay time.Duration
|
||||
}
|
||||
|
||||
// DefaultConfig is a backoff configuration with the default values specfied
|
||||
// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// This should be useful for callers who want to configure backoff with
|
||||
// non-default values only for a subset of the options.
|
||||
var DefaultConfig = Config{
|
||||
BaseDelay: 1.0 * time.Second,
|
||||
Multiplier: 1.6,
|
||||
Jitter: 0.2,
|
||||
MaxDelay: 120 * time.Second,
|
||||
}
|
|
@ -117,6 +117,15 @@ type NewSubConnOptions struct {
|
|||
HealthCheckEnabled bool
|
||||
}
|
||||
|
||||
// State contains the balancer's state relevant to the gRPC ClientConn.
|
||||
type State struct {
|
||||
// State contains the connectivity state of the balancer, which is used to
|
||||
// determine the state of the ClientConn.
|
||||
ConnectivityState connectivity.State
|
||||
// Picker is used to choose connections (SubConns) for RPCs.
|
||||
Picker V2Picker
|
||||
}
|
||||
|
||||
// ClientConn represents a gRPC ClientConn.
|
||||
//
|
||||
// This interface is to be implemented by gRPC. Users should not need a
|
||||
|
@ -137,10 +146,19 @@ type ClientConn interface {
|
|||
//
|
||||
// gRPC will update the connectivity state of the ClientConn, and will call pick
|
||||
// on the new picker to pick new SubConn.
|
||||
//
|
||||
// Deprecated: use UpdateState instead
|
||||
UpdateBalancerState(s connectivity.State, p Picker)
|
||||
|
||||
// UpdateState notifies gRPC that the balancer's internal state has
|
||||
// changed.
|
||||
//
|
||||
// gRPC will update the connectivity state of the ClientConn, and will call pick
|
||||
// on the new picker to pick new SubConns.
|
||||
UpdateState(State)
|
||||
|
||||
// ResolveNow is called by balancer to notify gRPC to do a name resolving.
|
||||
ResolveNow(resolver.ResolveNowOption)
|
||||
ResolveNow(resolver.ResolveNowOptions)
|
||||
|
||||
// Target returns the dial target for this ClientConn.
|
||||
//
|
||||
|
@ -185,11 +203,14 @@ type ConfigParser interface {
|
|||
ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error)
|
||||
}
|
||||
|
||||
// PickOptions contains addition information for the Pick operation.
|
||||
type PickOptions struct {
|
||||
// PickInfo contains additional information for the Pick operation.
|
||||
type PickInfo struct {
|
||||
// FullMethodName is the method name that NewClientStream() is called
|
||||
// with. The canonical format is /service/Method.
|
||||
FullMethodName string
|
||||
// Ctx is the RPC's context, and may contain relevant RPC-level information
|
||||
// like the outgoing header metadata.
|
||||
Ctx context.Context
|
||||
}
|
||||
|
||||
// DoneInfo contains additional information for done.
|
||||
|
@ -215,7 +236,7 @@ var (
|
|||
ErrNoSubConnAvailable = errors.New("no SubConn is available")
|
||||
// ErrTransientFailure indicates all SubConns are in TransientFailure.
|
||||
// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
|
||||
ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
|
||||
ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure"))
|
||||
)
|
||||
|
||||
// Picker is used by gRPC to pick a SubConn to send an RPC.
|
||||
|
@ -223,6 +244,8 @@ var (
|
|||
// internal state has changed.
|
||||
//
|
||||
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
|
||||
//
|
||||
// Deprecated: use V2Picker instead
|
||||
type Picker interface {
|
||||
// Pick returns the SubConn to be used to send the RPC.
|
||||
// The returned SubConn must be one returned by NewSubConn().
|
||||
|
@ -243,18 +266,76 @@ type Picker interface {
|
|||
//
|
||||
// If the returned error is not nil:
|
||||
// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
|
||||
// - If the error is ErrTransientFailure:
|
||||
// - If the error is ErrTransientFailure or implements IsTransientFailure()
|
||||
// bool, returning true:
|
||||
// - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
|
||||
// is called to pick again;
|
||||
// - Otherwise, RPC will fail with unavailable error.
|
||||
// - Else (error is other non-nil error):
|
||||
// - The RPC will fail with unavailable error.
|
||||
// - The RPC will fail with the error's status code, or Unknown if it is
|
||||
// not a status error.
|
||||
//
|
||||
// The returned done() function will be called once the rpc has finished,
|
||||
// with the final status of that RPC. If the SubConn returned is not a
|
||||
// valid SubConn type, done may not be called. done may be nil if balancer
|
||||
// doesn't care about the RPC status.
|
||||
Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error)
|
||||
Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error)
|
||||
}
|
||||
|
||||
// PickResult contains information related to a connection chosen for an RPC.
|
||||
type PickResult struct {
|
||||
// SubConn is the connection to use for this pick, if its state is Ready.
|
||||
// If the state is not Ready, gRPC will block the RPC until a new Picker is
|
||||
// provided by the balancer (using ClientConn.UpdateState). The SubConn
|
||||
// must be one returned by ClientConn.NewSubConn.
|
||||
SubConn SubConn
|
||||
|
||||
// Done is called when the RPC is completed. If the SubConn is not ready,
|
||||
// this will be called with a nil parameter. If the SubConn is not a valid
|
||||
// type, Done may not be called. May be nil if the balancer does not wish
|
||||
// to be notified when the RPC completes.
|
||||
Done func(DoneInfo)
|
||||
}
|
||||
|
||||
type transientFailureError struct {
|
||||
error
|
||||
}
|
||||
|
||||
func (e *transientFailureError) IsTransientFailure() bool { return true }
|
||||
|
||||
// TransientFailureError wraps err in an error implementing
|
||||
// IsTransientFailure() bool, returning true.
|
||||
func TransientFailureError(err error) error {
|
||||
return &transientFailureError{error: err}
|
||||
}
|
||||
|
||||
// V2Picker is used by gRPC to pick a SubConn to send an RPC.
|
||||
// Balancer is expected to generate a new picker from its snapshot every time its
|
||||
// internal state has changed.
|
||||
//
|
||||
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
|
||||
type V2Picker interface {
|
||||
// Pick returns the connection to use for this RPC and related information.
|
||||
//
|
||||
// Pick should not block. If the balancer needs to do I/O or any blocking
|
||||
// or time-consuming work to service this call, it should return
|
||||
// ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when
|
||||
// the Picker is updated (using ClientConn.UpdateState).
|
||||
//
|
||||
// If an error is returned:
|
||||
//
|
||||
// - If the error is ErrNoSubConnAvailable, gRPC will block until a new
|
||||
// Picker is provided by the balancer (using ClientConn.UpdateState).
|
||||
//
|
||||
// - If the error implements IsTransientFailure() bool, returning true,
|
||||
// wait for ready RPCs will wait, but non-wait for ready RPCs will be
|
||||
// terminated with this error's Error() string and status code
|
||||
// Unavailable.
|
||||
//
|
||||
// - Any other errors terminate all RPCs with the code and message
|
||||
// provided. If the error is not a status error, it will be converted by
|
||||
// gRPC to a status error with code Unknown.
|
||||
Pick(info PickInfo) (PickResult, error)
|
||||
}
|
||||
|
||||
// Balancer takes input from gRPC, manages SubConns, and collects and aggregates
|
||||
|
@ -292,8 +373,11 @@ type Balancer interface {
|
|||
|
||||
// SubConnState describes the state of a SubConn.
|
||||
type SubConnState struct {
|
||||
// ConnectivityState is the connectivity state of the SubConn.
|
||||
ConnectivityState connectivity.State
|
||||
// TODO: add last connection error
|
||||
// ConnectionError is set if the ConnectivityState is TransientFailure,
|
||||
// describing the reason the SubConn failed. Otherwise, it is nil.
|
||||
ConnectionError error
|
||||
}
|
||||
|
||||
// ClientConnState describes the state of a ClientConn relevant to the
|
||||
|
@ -305,14 +389,23 @@ type ClientConnState struct {
|
|||
BalancerConfig serviceconfig.LoadBalancingConfig
|
||||
}
|
||||
|
||||
// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
|
||||
// problem with the provided name resolver data.
|
||||
var ErrBadResolverState = errors.New("bad resolver state")
|
||||
|
||||
// V2Balancer is defined for documentation purposes. If a Balancer also
|
||||
// implements V2Balancer, its UpdateClientConnState method will be called
|
||||
// instead of HandleResolvedAddrs and its UpdateSubConnState will be called
|
||||
// instead of HandleSubConnStateChange.
|
||||
type V2Balancer interface {
|
||||
// UpdateClientConnState is called by gRPC when the state of the ClientConn
|
||||
// changes.
|
||||
UpdateClientConnState(ClientConnState)
|
||||
// changes. If the error returned is ErrBadResolverState, the ClientConn
|
||||
// will begin calling ResolveNow on the active name resolver with
|
||||
// exponential backoff until a subsequent call to UpdateClientConnState
|
||||
// returns a nil error. Any other errors are currently ignored.
|
||||
UpdateClientConnState(ClientConnState) error
|
||||
// ResolverError is called by gRPC when the name resolver reports an error.
|
||||
ResolverError(error)
|
||||
// UpdateSubConnState is called by gRPC when the state of a SubConn
|
||||
// changes.
|
||||
UpdateSubConnState(SubConn, SubConnState)
|
||||
|
@ -326,9 +419,8 @@ type V2Balancer interface {
|
|||
//
|
||||
// It's not thread safe.
|
||||
type ConnectivityStateEvaluator struct {
|
||||
numReady uint64 // Number of addrConns in ready state.
|
||||
numConnecting uint64 // Number of addrConns in connecting state.
|
||||
numTransientFailure uint64 // Number of addrConns in transientFailure.
|
||||
numReady uint64 // Number of addrConns in ready state.
|
||||
numConnecting uint64 // Number of addrConns in connecting state.
|
||||
}
|
||||
|
||||
// RecordTransition records state change happening in subConn and based on that
|
||||
|
@ -348,8 +440,6 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne
|
|||
cse.numReady += updateVal
|
||||
case connectivity.Connecting:
|
||||
cse.numConnecting += updateVal
|
||||
case connectivity.TransientFailure:
|
||||
cse.numTransientFailure += updateVal
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,8 @@ package base
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
|
@ -28,34 +30,44 @@ import (
|
|||
)
|
||||
|
||||
type baseBuilder struct {
|
||||
name string
|
||||
pickerBuilder PickerBuilder
|
||||
config Config
|
||||
name string
|
||||
pickerBuilder PickerBuilder
|
||||
v2PickerBuilder V2PickerBuilder
|
||||
config Config
|
||||
}
|
||||
|
||||
func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||
return &baseBalancer{
|
||||
cc: cc,
|
||||
pickerBuilder: bb.pickerBuilder,
|
||||
bal := &baseBalancer{
|
||||
cc: cc,
|
||||
pickerBuilder: bb.pickerBuilder,
|
||||
v2PickerBuilder: bb.v2PickerBuilder,
|
||||
|
||||
subConns: make(map[resolver.Address]balancer.SubConn),
|
||||
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
||||
// Initialize picker to a picker that always return
|
||||
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
||||
// may call UpdateBalancerState with this picker.
|
||||
picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
|
||||
config: bb.config,
|
||||
config: bb.config,
|
||||
}
|
||||
// Initialize picker to a picker that always returns
|
||||
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
||||
// may call UpdateState with this picker.
|
||||
if bb.pickerBuilder != nil {
|
||||
bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
|
||||
} else {
|
||||
bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable)
|
||||
}
|
||||
return bal
|
||||
}
|
||||
|
||||
func (bb *baseBuilder) Name() string {
|
||||
return bb.name
|
||||
}
|
||||
|
||||
var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer
|
||||
|
||||
type baseBalancer struct {
|
||||
cc balancer.ClientConn
|
||||
pickerBuilder PickerBuilder
|
||||
cc balancer.ClientConn
|
||||
pickerBuilder PickerBuilder
|
||||
v2PickerBuilder V2PickerBuilder
|
||||
|
||||
csEvltr *balancer.ConnectivityStateEvaluator
|
||||
state connectivity.State
|
||||
|
@ -63,19 +75,46 @@ type baseBalancer struct {
|
|||
subConns map[resolver.Address]balancer.SubConn
|
||||
scStates map[balancer.SubConn]connectivity.State
|
||||
picker balancer.Picker
|
||||
v2Picker balancer.V2Picker
|
||||
config Config
|
||||
|
||||
resolverErr error // the last error reported by the resolver; cleared on successful resolution
|
||||
connErr error // the last connection error; cleared upon leaving TransientFailure
|
||||
}
|
||||
|
||||
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) {
|
||||
func (b *baseBalancer) ResolverError(err error) {
|
||||
b.resolverErr = err
|
||||
if len(b.subConns) == 0 {
|
||||
b.state = connectivity.TransientFailure
|
||||
}
|
||||
if b.state != connectivity.TransientFailure {
|
||||
// The picker will not change since the balancer does not currently
|
||||
// report an error.
|
||||
return
|
||||
}
|
||||
b.regeneratePicker()
|
||||
if b.picker != nil {
|
||||
b.cc.UpdateBalancerState(b.state, b.picker)
|
||||
} else {
|
||||
b.cc.UpdateState(balancer.State{
|
||||
ConnectivityState: b.state,
|
||||
Picker: b.v2Picker,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||
// TODO: handle s.ResolverState.Err (log if not nil) once implemented.
|
||||
// TODO: handle s.ResolverState.ServiceConfig?
|
||||
if grpclog.V(2) {
|
||||
grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
|
||||
}
|
||||
// Successful resolution; clear resolver error and ensure we return nil.
|
||||
b.resolverErr = nil
|
||||
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
|
||||
addrsSet := make(map[resolver.Address]struct{})
|
||||
for _, a := range s.ResolverState.Addresses {
|
||||
|
@ -101,26 +140,65 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) {
|
|||
// The entry will be deleted in HandleSubConnStateChange.
|
||||
}
|
||||
}
|
||||
// If resolver state contains no addresses, return an error so ClientConn
|
||||
// will trigger re-resolve. Also records this as an resolver error, so when
|
||||
// the overall state turns transient failure, the error message will have
|
||||
// the zero address information.
|
||||
if len(s.ResolverState.Addresses) == 0 {
|
||||
b.ResolverError(errors.New("produced zero addresses"))
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// mergeErrors builds an error from the last connection error and the last
|
||||
// resolver error. Must only be called if b.state is TransientFailure.
|
||||
func (b *baseBalancer) mergeErrors() error {
|
||||
// connErr must always be non-nil unless there are no SubConns, in which
|
||||
// case resolverErr must be non-nil.
|
||||
if b.connErr == nil {
|
||||
return fmt.Errorf("last resolver error: %v", b.resolverErr)
|
||||
}
|
||||
if b.resolverErr == nil {
|
||||
return fmt.Errorf("last connection error: %v", b.connErr)
|
||||
}
|
||||
return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr)
|
||||
}
|
||||
|
||||
// regeneratePicker takes a snapshot of the balancer, and generates a picker
|
||||
// from it. The picker is
|
||||
// - errPicker with ErrTransientFailure if the balancer is in TransientFailure,
|
||||
// - errPicker if the balancer is in TransientFailure,
|
||||
// - built by the pickerBuilder with all READY SubConns otherwise.
|
||||
func (b *baseBalancer) regeneratePicker() {
|
||||
if b.state == connectivity.TransientFailure {
|
||||
b.picker = NewErrPicker(balancer.ErrTransientFailure)
|
||||
if b.pickerBuilder != nil {
|
||||
b.picker = NewErrPicker(balancer.ErrTransientFailure)
|
||||
} else {
|
||||
b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors()))
|
||||
}
|
||||
return
|
||||
}
|
||||
readySCs := make(map[resolver.Address]balancer.SubConn)
|
||||
if b.pickerBuilder != nil {
|
||||
readySCs := make(map[resolver.Address]balancer.SubConn)
|
||||
|
||||
// Filter out all ready SCs from full subConn map.
|
||||
for addr, sc := range b.subConns {
|
||||
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
||||
readySCs[addr] = sc
|
||||
// Filter out all ready SCs from full subConn map.
|
||||
for addr, sc := range b.subConns {
|
||||
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
||||
readySCs[addr] = sc
|
||||
}
|
||||
}
|
||||
b.picker = b.pickerBuilder.Build(readySCs)
|
||||
} else {
|
||||
readySCs := make(map[balancer.SubConn]SubConnInfo)
|
||||
|
||||
// Filter out all ready SCs from full subConn map.
|
||||
for addr, sc := range b.subConns {
|
||||
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
||||
readySCs[sc] = SubConnInfo{Address: addr}
|
||||
}
|
||||
}
|
||||
b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
|
||||
}
|
||||
b.picker = b.pickerBuilder.Build(readySCs)
|
||||
}
|
||||
|
||||
func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
|
@ -139,6 +217,12 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
|||
}
|
||||
return
|
||||
}
|
||||
if oldS == connectivity.TransientFailure && s == connectivity.Connecting {
|
||||
// Once a subconn enters TRANSIENT_FAILURE, ignore subsequent
|
||||
// CONNECTING transitions to prevent the aggregated state from being
|
||||
// always CONNECTING when many backends exist but are all down.
|
||||
return
|
||||
}
|
||||
b.scStates[sc] = s
|
||||
switch s {
|
||||
case connectivity.Idle:
|
||||
|
@ -147,22 +231,27 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
|||
// When an address was removed by resolver, b called RemoveSubConn but
|
||||
// kept the sc's state in scStates. Remove state for this sc here.
|
||||
delete(b.scStates, sc)
|
||||
case connectivity.TransientFailure:
|
||||
// Save error to be reported via picker.
|
||||
b.connErr = state.ConnectionError
|
||||
}
|
||||
|
||||
oldAggrState := b.state
|
||||
b.state = b.csEvltr.RecordTransition(oldS, s)
|
||||
|
||||
// Regenerate picker when one of the following happens:
|
||||
// - this sc became ready from not-ready
|
||||
// - this sc became not-ready from ready
|
||||
// - the aggregated state of balancer became TransientFailure from non-TransientFailure
|
||||
// - the aggregated state of balancer became non-TransientFailure from TransientFailure
|
||||
// - this sc entered or left ready
|
||||
// - the aggregated state of balancer is TransientFailure
|
||||
// (may need to update error message)
|
||||
if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
|
||||
(b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
|
||||
b.state == connectivity.TransientFailure {
|
||||
b.regeneratePicker()
|
||||
}
|
||||
|
||||
b.cc.UpdateBalancerState(b.state, b.picker)
|
||||
if b.picker != nil {
|
||||
b.cc.UpdateBalancerState(b.state, b.picker)
|
||||
} else {
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker})
|
||||
}
|
||||
}
|
||||
|
||||
// Close is a nop because base balancer doesn't have internal state to clean up,
|
||||
|
@ -179,6 +268,19 @@ type errPicker struct {
|
|||
err error // Pick() always returns this err.
|
||||
}
|
||||
|
||||
func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
return nil, nil, p.err
|
||||
}
|
||||
|
||||
// NewErrPickerV2 returns a V2Picker that always returns err on Pick().
|
||||
func NewErrPickerV2(err error) balancer.V2Picker {
|
||||
return &errPickerV2{err: err}
|
||||
}
|
||||
|
||||
type errPickerV2 struct {
|
||||
err error // Pick() always returns this err.
|
||||
}
|
||||
|
||||
func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
||||
return balancer.PickResult{}, p.err
|
||||
}
|
||||
|
|
|
@ -42,6 +42,26 @@ type PickerBuilder interface {
|
|||
Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
|
||||
}
|
||||
|
||||
// V2PickerBuilder creates balancer.V2Picker.
|
||||
type V2PickerBuilder interface {
|
||||
// Build returns a picker that will be used by gRPC to pick a SubConn.
|
||||
Build(info PickerBuildInfo) balancer.V2Picker
|
||||
}
|
||||
|
||||
// PickerBuildInfo contains information needed by the picker builder to
|
||||
// construct a picker.
|
||||
type PickerBuildInfo struct {
|
||||
// ReadySCs is a map from all ready SubConns to the Addresses used to
|
||||
// create them.
|
||||
ReadySCs map[balancer.SubConn]SubConnInfo
|
||||
}
|
||||
|
||||
// SubConnInfo contains information about a SubConn created by the base
|
||||
// balancer.
|
||||
type SubConnInfo struct {
|
||||
Address resolver.Address // the address used to create this SubConn
|
||||
}
|
||||
|
||||
// NewBalancerBuilder returns a balancer builder. The balancers
|
||||
// built by this builder will use the picker builder to build pickers.
|
||||
func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
|
||||
|
@ -62,3 +82,12 @@ func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config)
|
|||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config.
|
||||
func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder {
|
||||
return &baseBuilder{
|
||||
name: name,
|
||||
v2PickerBuilder: pb,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,14 +22,12 @@
|
|||
package roundrobin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/balancer/base"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// Name is the name of round_robin balancer.
|
||||
|
@ -37,7 +35,7 @@ const Name = "round_robin"
|
|||
|
||||
// newBuilder creates a new roundrobin balancer builder.
|
||||
func newBuilder() balancer.Builder {
|
||||
return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
|
||||
return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -46,13 +44,13 @@ func init() {
|
|||
|
||||
type rrPickerBuilder struct{}
|
||||
|
||||
func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
|
||||
grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
|
||||
if len(readySCs) == 0 {
|
||||
return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
|
||||
func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker {
|
||||
grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info)
|
||||
if len(info.ReadySCs) == 0 {
|
||||
return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable)
|
||||
}
|
||||
var scs []balancer.SubConn
|
||||
for _, sc := range readySCs {
|
||||
for sc := range info.ReadySCs {
|
||||
scs = append(scs, sc)
|
||||
}
|
||||
return &rrPicker{
|
||||
|
@ -74,10 +72,10 @@ type rrPicker struct {
|
|||
next int
|
||||
}
|
||||
|
||||
func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
|
||||
p.mu.Lock()
|
||||
sc := p.subConns[p.next]
|
||||
p.next = (p.next + 1) % len(p.subConns)
|
||||
p.mu.Unlock()
|
||||
return sc, nil, nil
|
||||
return balancer.PickResult{SubConn: sc}, nil
|
||||
}
|
||||
|
|
|
@ -24,7 +24,9 @@ import (
|
|||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/buffer"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
|
@ -32,64 +34,17 @@ import (
|
|||
type scStateUpdate struct {
|
||||
sc balancer.SubConn
|
||||
state connectivity.State
|
||||
}
|
||||
|
||||
// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple.
|
||||
// TODO make a general purpose buffer that uses interface{}.
|
||||
type scStateUpdateBuffer struct {
|
||||
c chan *scStateUpdate
|
||||
mu sync.Mutex
|
||||
backlog []*scStateUpdate
|
||||
}
|
||||
|
||||
func newSCStateUpdateBuffer() *scStateUpdateBuffer {
|
||||
return &scStateUpdateBuffer{
|
||||
c: make(chan *scStateUpdate, 1),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *scStateUpdateBuffer) put(t *scStateUpdate) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- t:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, t)
|
||||
}
|
||||
|
||||
func (b *scStateUpdateBuffer) load() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
b.backlog[0] = nil
|
||||
b.backlog = b.backlog[1:]
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get returns the channel that the scStateUpdate will be sent to.
|
||||
//
|
||||
// Upon receiving, the caller should call load to send another
|
||||
// scStateChangeTuple onto the channel if there is any.
|
||||
func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate {
|
||||
return b.c
|
||||
err error
|
||||
}
|
||||
|
||||
// ccBalancerWrapper is a wrapper on top of cc for balancers.
|
||||
// It implements balancer.ClientConn interface.
|
||||
type ccBalancerWrapper struct {
|
||||
cc *ClientConn
|
||||
balancer balancer.Balancer
|
||||
stateChangeQueue *scStateUpdateBuffer
|
||||
ccUpdateCh chan *balancer.ClientConnState
|
||||
done chan struct{}
|
||||
cc *ClientConn
|
||||
balancerMu sync.Mutex // synchronizes calls to the balancer
|
||||
balancer balancer.Balancer
|
||||
scBuffer *buffer.Unbounded
|
||||
done *grpcsync.Event
|
||||
|
||||
mu sync.Mutex
|
||||
subConns map[*acBalancerWrapper]struct{}
|
||||
|
@ -97,11 +52,10 @@ type ccBalancerWrapper struct {
|
|||
|
||||
func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
|
||||
ccb := &ccBalancerWrapper{
|
||||
cc: cc,
|
||||
stateChangeQueue: newSCStateUpdateBuffer(),
|
||||
ccUpdateCh: make(chan *balancer.ClientConnState, 1),
|
||||
done: make(chan struct{}),
|
||||
subConns: make(map[*acBalancerWrapper]struct{}),
|
||||
cc: cc,
|
||||
scBuffer: buffer.NewUnbounded(),
|
||||
done: grpcsync.NewEvent(),
|
||||
subConns: make(map[*acBalancerWrapper]struct{}),
|
||||
}
|
||||
go ccb.watcher()
|
||||
ccb.balancer = b.Build(ccb, bopts)
|
||||
|
@ -113,36 +67,23 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
|
|||
func (ccb *ccBalancerWrapper) watcher() {
|
||||
for {
|
||||
select {
|
||||
case t := <-ccb.stateChangeQueue.get():
|
||||
ccb.stateChangeQueue.load()
|
||||
select {
|
||||
case <-ccb.done:
|
||||
ccb.balancer.Close()
|
||||
return
|
||||
default:
|
||||
case t := <-ccb.scBuffer.Get():
|
||||
ccb.scBuffer.Load()
|
||||
if ccb.done.HasFired() {
|
||||
break
|
||||
}
|
||||
ccb.balancerMu.Lock()
|
||||
su := t.(*scStateUpdate)
|
||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
||||
ub.UpdateSubConnState(t.sc, balancer.SubConnState{ConnectivityState: t.state})
|
||||
ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
|
||||
} else {
|
||||
ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
|
||||
ccb.balancer.HandleSubConnStateChange(su.sc, su.state)
|
||||
}
|
||||
case s := <-ccb.ccUpdateCh:
|
||||
select {
|
||||
case <-ccb.done:
|
||||
ccb.balancer.Close()
|
||||
return
|
||||
default:
|
||||
}
|
||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
||||
ub.UpdateClientConnState(*s)
|
||||
} else {
|
||||
ccb.balancer.HandleResolvedAddrs(s.ResolverState.Addresses, nil)
|
||||
}
|
||||
case <-ccb.done:
|
||||
ccb.balancerMu.Unlock()
|
||||
case <-ccb.done.Done():
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ccb.done:
|
||||
if ccb.done.HasFired() {
|
||||
ccb.balancer.Close()
|
||||
ccb.mu.Lock()
|
||||
scs := ccb.subConns
|
||||
|
@ -151,19 +92,17 @@ func (ccb *ccBalancerWrapper) watcher() {
|
|||
for acbw := range scs {
|
||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||
}
|
||||
ccb.UpdateBalancerState(connectivity.Connecting, nil)
|
||||
ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
|
||||
return
|
||||
default:
|
||||
}
|
||||
ccb.cc.firstResolveEvent.Fire()
|
||||
}
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) close() {
|
||||
close(ccb.done)
|
||||
ccb.done.Fire()
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
|
||||
// When updating addresses for a SubConn, if the address in use is not in
|
||||
// the new addresses, the old ac will be tearDown() and a new ac will be
|
||||
// created. tearDown() generates a state change with Shutdown state, we
|
||||
|
@ -174,30 +113,29 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
|
|||
if sc == nil {
|
||||
return
|
||||
}
|
||||
ccb.stateChangeQueue.put(&scStateUpdate{
|
||||
ccb.scBuffer.Put(&scStateUpdate{
|
||||
sc: sc,
|
||||
state: s,
|
||||
err: err,
|
||||
})
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) {
|
||||
if ccb.cc.curBalancerName != grpclbName {
|
||||
// Filter any grpclb addresses since we don't have the grpclb balancer.
|
||||
s := &ccs.ResolverState
|
||||
for i := 0; i < len(s.Addresses); {
|
||||
if s.Addresses[i].Type == resolver.GRPCLB {
|
||||
copy(s.Addresses[i:], s.Addresses[i+1:])
|
||||
s.Addresses = s.Addresses[:len(s.Addresses)-1]
|
||||
continue
|
||||
}
|
||||
i++
|
||||
}
|
||||
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
||||
ccb.balancerMu.Lock()
|
||||
defer ccb.balancerMu.Unlock()
|
||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
||||
return ub.UpdateClientConnState(*ccs)
|
||||
}
|
||||
select {
|
||||
case <-ccb.ccUpdateCh:
|
||||
default:
|
||||
ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
||||
ccb.balancerMu.Lock()
|
||||
ub.ResolverError(err)
|
||||
ccb.balancerMu.Unlock()
|
||||
}
|
||||
ccb.ccUpdateCh <- ccs
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||
|
@ -250,7 +188,22 @@ func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balanc
|
|||
ccb.cc.csMgr.updateState(s)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
|
||||
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
if ccb.subConns == nil {
|
||||
return
|
||||
}
|
||||
// Update picker before updating state. Even though the ordering here does
|
||||
// not matter, it can lead to multiple calls of Pick in the common start-up
|
||||
// case where we wait for ready and then perform an RPC. If the picker is
|
||||
// updated later, we could call the "connecting" picker when the state is
|
||||
// updated, and then call the "ready" picker after the picker gets updated.
|
||||
ccb.cc.blockingpicker.updatePickerV2(s.Picker)
|
||||
ccb.cc.csMgr.updateState(s.ConnectivityState)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
|
||||
ccb.cc.resolveNow(o)
|
||||
}
|
||||
|
||||
|
@ -292,7 +245,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
|||
|
||||
ac, err := cc.newAddrConn(addrs, opts)
|
||||
if err != nil {
|
||||
grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
|
||||
channelz.Warningf(acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
|
||||
return
|
||||
}
|
||||
acbw.ac = ac
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
|
@ -49,7 +48,7 @@ func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.B
|
|||
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
||||
state: connectivity.Idle,
|
||||
}
|
||||
cc.UpdateBalancerState(connectivity.Idle, bw)
|
||||
cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw})
|
||||
go bw.lbWatcher()
|
||||
return bw
|
||||
}
|
||||
|
@ -243,7 +242,7 @@ func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s conne
|
|||
if bw.state != sa {
|
||||
bw.state = sa
|
||||
}
|
||||
bw.cc.UpdateBalancerState(bw.state, bw)
|
||||
bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw})
|
||||
if s == connectivity.Shutdown {
|
||||
// Remove state for this sc.
|
||||
delete(bw.connSt, sc)
|
||||
|
@ -275,17 +274,17 @@ func (bw *balancerWrapper) Close() {
|
|||
|
||||
// The picker is the balancerWrapper itself.
|
||||
// It either blocks or returns error, consistent with v1 balancer Get().
|
||||
func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) {
|
||||
func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) {
|
||||
failfast := true // Default failfast is true.
|
||||
if ss, ok := rpcInfoFromContext(ctx); ok {
|
||||
if ss, ok := rpcInfoFromContext(info.Ctx); ok {
|
||||
failfast = ss.failfast
|
||||
}
|
||||
a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast})
|
||||
a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return balancer.PickResult{}, toRPCErr(err)
|
||||
}
|
||||
if p != nil {
|
||||
done = func(balancer.DoneInfo) { p() }
|
||||
result.Done = func(balancer.DoneInfo) { p() }
|
||||
defer func() {
|
||||
if err != nil {
|
||||
p()
|
||||
|
@ -297,38 +296,39 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions)
|
|||
defer bw.mu.Unlock()
|
||||
if bw.pickfirst {
|
||||
// Get the first sc in conns.
|
||||
for _, sc := range bw.conns {
|
||||
return sc, done, nil
|
||||
for _, result.SubConn = range bw.conns {
|
||||
return result, nil
|
||||
}
|
||||
return nil, nil, balancer.ErrNoSubConnAvailable
|
||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
||||
}
|
||||
sc, ok1 := bw.conns[resolver.Address{
|
||||
var ok1 bool
|
||||
result.SubConn, ok1 = bw.conns[resolver.Address{
|
||||
Addr: a.Addr,
|
||||
Type: resolver.Backend,
|
||||
ServerName: "",
|
||||
Metadata: a.Metadata,
|
||||
}]
|
||||
s, ok2 := bw.connSt[sc]
|
||||
s, ok2 := bw.connSt[result.SubConn]
|
||||
if !ok1 || !ok2 {
|
||||
// This can only happen due to a race where Get() returned an address
|
||||
// that was subsequently removed by Notify. In this case we should
|
||||
// retry always.
|
||||
return nil, nil, balancer.ErrNoSubConnAvailable
|
||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
||||
}
|
||||
switch s.s {
|
||||
case connectivity.Ready, connectivity.Idle:
|
||||
return sc, done, nil
|
||||
return result, nil
|
||||
case connectivity.Shutdown, connectivity.TransientFailure:
|
||||
// If the returned sc has been shut down or is in transient failure,
|
||||
// return error, and this RPC will fail or wait for another picker (if
|
||||
// non-failfast).
|
||||
return nil, nil, balancer.ErrTransientFailure
|
||||
return balancer.PickResult{}, balancer.ErrTransientFailure
|
||||
default:
|
||||
// For other states (connecting or unknown), the v1 balancer would
|
||||
// traditionally wait until ready and then issue the RPC. Returning
|
||||
// ErrNoSubConnAvailable will be a slight improvement in that it will
|
||||
// allow the balancer to choose another address in case others are
|
||||
// connected.
|
||||
return nil, nil, balancer.ErrNoSubConnAvailable
|
||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,21 +31,23 @@ import (
|
|||
"time"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
|
||||
"google.golang.org/grpc/balancer/base"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/resolver"
|
||||
_ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
|
||||
_ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
|
||||
_ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver.
|
||||
_ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -149,7 +151,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
if channelz.IsOn() {
|
||||
if cc.dopts.channelzParentID != 0 {
|
||||
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
|
||||
channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
|
||||
channelz.AddTraceEvent(cc.channelzID, 0, &channelz.TraceEventDesc{
|
||||
Desc: "Channel Created",
|
||||
Severity: channelz.CtINFO,
|
||||
Parent: &channelz.TraceEventDesc{
|
||||
|
@ -159,10 +161,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
})
|
||||
} else {
|
||||
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
|
||||
channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
|
||||
Desc: "Channel Created",
|
||||
Severity: channelz.CtINFO,
|
||||
})
|
||||
channelz.Info(cc.channelzID, "Channel Created")
|
||||
}
|
||||
cc.csMgr.channelzID = cc.channelzID
|
||||
}
|
||||
|
@ -186,11 +185,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
}
|
||||
|
||||
if cc.dopts.defaultServiceConfigRawJSON != nil {
|
||||
sc, err := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, err)
|
||||
scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
|
||||
if scpr.Err != nil {
|
||||
return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
|
||||
}
|
||||
cc.dopts.defaultServiceConfig = sc
|
||||
cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
|
||||
}
|
||||
cc.mkp = cc.dopts.copts.KeepaliveParams
|
||||
|
||||
|
@ -235,29 +234,28 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
}
|
||||
}
|
||||
if cc.dopts.bs == nil {
|
||||
cc.dopts.bs = backoff.Exponential{
|
||||
MaxDelay: DefaultBackoffConfig.MaxDelay,
|
||||
cc.dopts.bs = backoff.DefaultExponential
|
||||
}
|
||||
|
||||
// Determine the resolver to use.
|
||||
cc.parsedTarget = grpcutil.ParseTarget(cc.target)
|
||||
channelz.Infof(cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme)
|
||||
resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme)
|
||||
if resolverBuilder == nil {
|
||||
// If resolver builder is still nil, the parsed target's scheme is
|
||||
// not registered. Fallback to default resolver and set Endpoint to
|
||||
// the original target.
|
||||
channelz.Infof(cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
|
||||
cc.parsedTarget = resolver.Target{
|
||||
Scheme: resolver.GetDefaultScheme(),
|
||||
Endpoint: target,
|
||||
}
|
||||
resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme)
|
||||
if resolverBuilder == nil {
|
||||
return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme)
|
||||
}
|
||||
}
|
||||
if cc.dopts.resolverBuilder == nil {
|
||||
// Only try to parse target when resolver builder is not already set.
|
||||
cc.parsedTarget = parseTarget(cc.target)
|
||||
grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
|
||||
cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
|
||||
if cc.dopts.resolverBuilder == nil {
|
||||
// If resolver builder is still nil, the parsed target's scheme is
|
||||
// not registered. Fallback to default resolver and set Endpoint to
|
||||
// the original target.
|
||||
grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
|
||||
cc.parsedTarget = resolver.Target{
|
||||
Scheme: resolver.GetDefaultScheme(),
|
||||
Endpoint: target,
|
||||
}
|
||||
cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
|
||||
}
|
||||
} else {
|
||||
cc.parsedTarget = resolver.Target{Endpoint: target}
|
||||
}
|
||||
|
||||
creds := cc.dopts.copts.TransportCredentials
|
||||
if creds != nil && creds.Info().ServerName != "" {
|
||||
cc.authority = creds.Info().ServerName
|
||||
|
@ -297,14 +295,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
}
|
||||
|
||||
// Build the resolver.
|
||||
rWrapper, err := newCCResolverWrapper(cc)
|
||||
rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build resolver: %v", err)
|
||||
}
|
||||
|
||||
cc.mu.Lock()
|
||||
cc.resolverWrapper = rWrapper
|
||||
cc.mu.Unlock()
|
||||
|
||||
// A blocking dial blocks until the clientConn is ready.
|
||||
if cc.dopts.block {
|
||||
for {
|
||||
|
@ -415,12 +413,7 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) {
|
|||
return
|
||||
}
|
||||
csm.state = state
|
||||
if channelz.IsOn() {
|
||||
channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Channel Connectivity change to %v", state),
|
||||
Severity: channelz.CtINFO,
|
||||
})
|
||||
}
|
||||
channelz.Infof(csm.channelzID, "Channel Connectivity change to %v", state)
|
||||
if csm.notifyChan != nil {
|
||||
// There are other goroutines waiting on this channel.
|
||||
close(csm.notifyChan)
|
||||
|
@ -443,7 +436,32 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} {
|
|||
return csm.notifyChan
|
||||
}
|
||||
|
||||
// ClientConn represents a client connection to an RPC server.
|
||||
// ClientConnInterface defines the functions clients need to perform unary and
|
||||
// streaming RPCs. It is implemented by *ClientConn, and is only intended to
|
||||
// be referenced by generated code.
|
||||
type ClientConnInterface interface {
|
||||
// Invoke performs a unary RPC and returns after the response is received
|
||||
// into reply.
|
||||
Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error
|
||||
// NewStream begins a streaming RPC.
|
||||
NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error)
|
||||
}
|
||||
|
||||
// Assert *ClientConn implements ClientConnInterface.
|
||||
var _ ClientConnInterface = (*ClientConn)(nil)
|
||||
|
||||
// ClientConn represents a virtual connection to a conceptual endpoint, to
|
||||
// perform RPCs.
|
||||
//
|
||||
// A ClientConn is free to have zero or more actual connections to the endpoint
|
||||
// based on configuration, load, etc. It is also free to determine which actual
|
||||
// endpoints to use and may change it every RPC, permitting client-side load
|
||||
// balancing.
|
||||
//
|
||||
// A ClientConn encapsulates a range of functionality including name
|
||||
// resolution, TCP connection establishment (with retries and backoff) and TLS
|
||||
// handshakes. It also handles errors on established connections by
|
||||
// re-resolving the name and reconnecting.
|
||||
type ClientConn struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
@ -532,58 +550,104 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
|
|||
}
|
||||
}
|
||||
|
||||
func (cc *ClientConn) updateResolverState(s resolver.State) error {
|
||||
var emptyServiceConfig *ServiceConfig
|
||||
|
||||
func init() {
|
||||
cfg := parseServiceConfig("{}")
|
||||
if cfg.Err != nil {
|
||||
panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
|
||||
}
|
||||
emptyServiceConfig = cfg.Config.(*ServiceConfig)
|
||||
}
|
||||
|
||||
func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
|
||||
if cc.sc != nil {
|
||||
cc.applyServiceConfigAndBalancer(cc.sc, addrs)
|
||||
return
|
||||
}
|
||||
if cc.dopts.defaultServiceConfig != nil {
|
||||
cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs)
|
||||
} else {
|
||||
cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
|
||||
defer cc.firstResolveEvent.Fire()
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
// Check if the ClientConn is already closed. Some fields (e.g.
|
||||
// balancerWrapper) are set to nil when closing the ClientConn, and could
|
||||
// cause nil pointer panic if we don't have this check.
|
||||
if cc.conns == nil {
|
||||
cc.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
|
||||
if cc.dopts.defaultServiceConfig != nil && cc.sc == nil {
|
||||
cc.applyServiceConfig(cc.dopts.defaultServiceConfig)
|
||||
if err != nil {
|
||||
// May need to apply the initial service config in case the resolver
|
||||
// doesn't support service configs, or doesn't provide a service config
|
||||
// with the new addresses.
|
||||
cc.maybeApplyDefaultServiceConfig(nil)
|
||||
|
||||
if cc.balancerWrapper != nil {
|
||||
cc.balancerWrapper.resolverError(err)
|
||||
}
|
||||
|
||||
// No addresses are valid with err set; return early.
|
||||
cc.mu.Unlock()
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
|
||||
var ret error
|
||||
if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
|
||||
cc.maybeApplyDefaultServiceConfig(s.Addresses)
|
||||
// TODO: do we need to apply a failing LB policy if there is no
|
||||
// default, per the error handling design?
|
||||
} else {
|
||||
if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok {
|
||||
cc.applyServiceConfigAndBalancer(sc, s.Addresses)
|
||||
} else {
|
||||
ret = balancer.ErrBadResolverState
|
||||
if cc.balancerWrapper == nil {
|
||||
var err error
|
||||
if s.ServiceConfig.Err != nil {
|
||||
err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err)
|
||||
} else {
|
||||
err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config)
|
||||
}
|
||||
cc.blockingpicker.updatePicker(base.NewErrPicker(err))
|
||||
cc.csMgr.updateState(connectivity.TransientFailure)
|
||||
cc.mu.Unlock()
|
||||
return ret
|
||||
}
|
||||
}
|
||||
} else if sc, ok := s.ServiceConfig.(*ServiceConfig); ok {
|
||||
cc.applyServiceConfig(sc)
|
||||
}
|
||||
|
||||
var balCfg serviceconfig.LoadBalancingConfig
|
||||
if cc.dopts.balancerBuilder == nil {
|
||||
// Only look at balancer types and switch balancer if balancer dial
|
||||
// option is not set.
|
||||
var newBalancerName string
|
||||
if cc.sc != nil && cc.sc.lbConfig != nil {
|
||||
newBalancerName = cc.sc.lbConfig.name
|
||||
balCfg = cc.sc.lbConfig.cfg
|
||||
} else {
|
||||
var isGRPCLB bool
|
||||
for _, a := range s.Addresses {
|
||||
if a.Type == resolver.GRPCLB {
|
||||
isGRPCLB = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if isGRPCLB {
|
||||
newBalancerName = grpclbName
|
||||
} else if cc.sc != nil && cc.sc.LB != nil {
|
||||
newBalancerName = *cc.sc.LB
|
||||
} else {
|
||||
newBalancerName = PickFirstBalancerName
|
||||
}
|
||||
}
|
||||
cc.switchBalancer(newBalancerName)
|
||||
} else if cc.balancerWrapper == nil {
|
||||
// Balancer dial option was set, and this is the first time handling
|
||||
// resolved addresses. Build a balancer with dopts.balancerBuilder.
|
||||
cc.curBalancerName = cc.dopts.balancerBuilder.Name()
|
||||
cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
|
||||
if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil {
|
||||
balCfg = cc.sc.lbConfig.cfg
|
||||
}
|
||||
|
||||
cc.balancerWrapper.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
|
||||
return nil
|
||||
cbn := cc.curBalancerName
|
||||
bw := cc.balancerWrapper
|
||||
cc.mu.Unlock()
|
||||
if cbn != grpclbName {
|
||||
// Filter any grpclb addresses since we don't have the grpclb balancer.
|
||||
for i := 0; i < len(s.Addresses); {
|
||||
if s.Addresses[i].Type == resolver.GRPCLB {
|
||||
copy(s.Addresses[i:], s.Addresses[i+1:])
|
||||
s.Addresses = s.Addresses[:len(s.Addresses)-1]
|
||||
continue
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
|
||||
if ret == nil {
|
||||
ret = uccsErr // prefer ErrBadResolver state since any other error is
|
||||
// currently meaningless to the caller.
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// switchBalancer starts the switching from current balancer to the balancer
|
||||
|
@ -599,9 +663,9 @@ func (cc *ClientConn) switchBalancer(name string) {
|
|||
return
|
||||
}
|
||||
|
||||
grpclog.Infof("ClientConn switching balancer to %q", name)
|
||||
channelz.Infof(cc.channelzID, "ClientConn switching balancer to %q", name)
|
||||
if cc.dopts.balancerBuilder != nil {
|
||||
grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead")
|
||||
channelz.Info(cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead")
|
||||
return
|
||||
}
|
||||
if cc.balancerWrapper != nil {
|
||||
|
@ -609,29 +673,19 @@ func (cc *ClientConn) switchBalancer(name string) {
|
|||
}
|
||||
|
||||
builder := balancer.Get(name)
|
||||
if channelz.IsOn() {
|
||||
if builder == nil {
|
||||
channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName),
|
||||
Severity: channelz.CtWarning,
|
||||
})
|
||||
} else {
|
||||
channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Channel switches to new LB policy %q", name),
|
||||
Severity: channelz.CtINFO,
|
||||
})
|
||||
}
|
||||
}
|
||||
if builder == nil {
|
||||
grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name)
|
||||
channelz.Warningf(cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName)
|
||||
channelz.Infof(cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name)
|
||||
builder = newPickfirstBuilder()
|
||||
} else {
|
||||
channelz.Infof(cc.channelzID, "Channel switches to new LB policy %q", name)
|
||||
}
|
||||
|
||||
cc.curBalancerName = builder.Name()
|
||||
cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
|
||||
}
|
||||
|
||||
func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
|
||||
cc.mu.Lock()
|
||||
if cc.conns == nil {
|
||||
cc.mu.Unlock()
|
||||
|
@ -639,7 +693,7 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi
|
|||
}
|
||||
// TODO(bar switching) send updates to all balancer wrappers when balancer
|
||||
// gracefully switching is supported.
|
||||
cc.balancerWrapper.handleSubConnStateChange(sc, s)
|
||||
cc.balancerWrapper.handleSubConnStateChange(sc, s, err)
|
||||
cc.mu.Unlock()
|
||||
}
|
||||
|
||||
|
@ -648,6 +702,7 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi
|
|||
// Caller needs to make sure len(addrs) > 0.
|
||||
func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
|
||||
ac := &addrConn{
|
||||
state: connectivity.Idle,
|
||||
cc: cc,
|
||||
addrs: addrs,
|
||||
scopts: opts,
|
||||
|
@ -664,7 +719,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
|
|||
}
|
||||
if channelz.IsOn() {
|
||||
ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
|
||||
channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
|
||||
channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{
|
||||
Desc: "Subchannel Created",
|
||||
Severity: channelz.CtINFO,
|
||||
Parent: &channelz.TraceEventDesc{
|
||||
|
@ -736,7 +791,7 @@ func (ac *addrConn) connect() error {
|
|||
}
|
||||
// Update connectivity state within the lock to prevent subsequent or
|
||||
// concurrent calls from resetting the transport more than once.
|
||||
ac.updateConnectivityState(connectivity.Connecting)
|
||||
ac.updateConnectivityState(connectivity.Connecting, nil)
|
||||
ac.mu.Unlock()
|
||||
|
||||
// Start a goroutine connecting to the server asynchronously.
|
||||
|
@ -762,7 +817,7 @@ func (ac *addrConn) connect() error {
|
|||
func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
|
||||
channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
|
||||
if ac.state == connectivity.Shutdown ||
|
||||
ac.state == connectivity.TransientFailure ||
|
||||
ac.state == connectivity.Idle {
|
||||
|
@ -782,7 +837,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
|
|||
break
|
||||
}
|
||||
}
|
||||
grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
|
||||
channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
|
||||
if curAddrFound {
|
||||
ac.addrs = addrs
|
||||
}
|
||||
|
@ -822,7 +877,8 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
|
|||
}
|
||||
|
||||
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||
t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{
|
||||
t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
|
||||
Ctx: ctx,
|
||||
FullMethodName: method,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -831,10 +887,10 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method st
|
|||
return t, done, nil
|
||||
}
|
||||
|
||||
func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error {
|
||||
func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) {
|
||||
if sc == nil {
|
||||
// should never reach here.
|
||||
return fmt.Errorf("got nil pointer for service config")
|
||||
return
|
||||
}
|
||||
cc.sc = sc
|
||||
|
||||
|
@ -850,10 +906,38 @@ func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error {
|
|||
cc.retryThrottler.Store((*retryThrottler)(nil))
|
||||
}
|
||||
|
||||
return nil
|
||||
if cc.dopts.balancerBuilder == nil {
|
||||
// Only look at balancer types and switch balancer if balancer dial
|
||||
// option is not set.
|
||||
var newBalancerName string
|
||||
if cc.sc != nil && cc.sc.lbConfig != nil {
|
||||
newBalancerName = cc.sc.lbConfig.name
|
||||
} else {
|
||||
var isGRPCLB bool
|
||||
for _, a := range addrs {
|
||||
if a.Type == resolver.GRPCLB {
|
||||
isGRPCLB = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if isGRPCLB {
|
||||
newBalancerName = grpclbName
|
||||
} else if cc.sc != nil && cc.sc.LB != nil {
|
||||
newBalancerName = *cc.sc.LB
|
||||
} else {
|
||||
newBalancerName = PickFirstBalancerName
|
||||
}
|
||||
}
|
||||
cc.switchBalancer(newBalancerName)
|
||||
} else if cc.balancerWrapper == nil {
|
||||
// Balancer dial option was set, and this is the first time handling
|
||||
// resolved addresses. Build a balancer with dopts.balancerBuilder.
|
||||
cc.curBalancerName = cc.dopts.balancerBuilder.Name()
|
||||
cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
|
||||
func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
|
||||
cc.mu.RLock()
|
||||
r := cc.resolverWrapper
|
||||
cc.mu.RUnlock()
|
||||
|
@ -875,8 +959,9 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
|
|||
// This API is EXPERIMENTAL.
|
||||
func (cc *ClientConn) ResetConnectBackoff() {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
for ac := range cc.conns {
|
||||
conns := cc.conns
|
||||
cc.mu.Unlock()
|
||||
for ac := range conns {
|
||||
ac.resetConnectBackoff()
|
||||
}
|
||||
}
|
||||
|
@ -923,7 +1008,7 @@ func (cc *ClientConn) Close() error {
|
|||
Severity: channelz.CtINFO,
|
||||
}
|
||||
}
|
||||
channelz.AddTraceEvent(cc.channelzID, ted)
|
||||
channelz.AddTraceEvent(cc.channelzID, 0, ted)
|
||||
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
|
||||
// the entity being deleted, and thus prevent it from being deleted right away.
|
||||
channelz.RemoveEntry(cc.channelzID)
|
||||
|
@ -962,20 +1047,13 @@ type addrConn struct {
|
|||
}
|
||||
|
||||
// Note: this requires a lock on ac.mu.
|
||||
func (ac *addrConn) updateConnectivityState(s connectivity.State) {
|
||||
func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) {
|
||||
if ac.state == s {
|
||||
return
|
||||
}
|
||||
|
||||
updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s)
|
||||
ac.state = s
|
||||
if channelz.IsOn() {
|
||||
channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
|
||||
Desc: updateMsg,
|
||||
Severity: channelz.CtINFO,
|
||||
})
|
||||
}
|
||||
ac.cc.handleSubConnStateChange(ac.acbw, s)
|
||||
channelz.Infof(ac.channelzID, "Subchannel Connectivity change to %v", s)
|
||||
ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
|
||||
}
|
||||
|
||||
// adjustParams updates parameters used to create transports upon
|
||||
|
@ -995,7 +1073,7 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
|
|||
func (ac *addrConn) resetTransport() {
|
||||
for i := 0; ; i++ {
|
||||
if i > 0 {
|
||||
ac.cc.resolveNow(resolver.ResolveNowOption{})
|
||||
ac.cc.resolveNow(resolver.ResolveNowOptions{})
|
||||
}
|
||||
|
||||
ac.mu.Lock()
|
||||
|
@ -1024,7 +1102,7 @@ func (ac *addrConn) resetTransport() {
|
|||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
|
||||
connectDeadline := time.Now().Add(dialDuration)
|
||||
|
||||
ac.updateConnectivityState(connectivity.Connecting)
|
||||
ac.updateConnectivityState(connectivity.Connecting, nil)
|
||||
ac.transport = nil
|
||||
ac.mu.Unlock()
|
||||
|
||||
|
@ -1037,7 +1115,7 @@ func (ac *addrConn) resetTransport() {
|
|||
ac.mu.Unlock()
|
||||
return
|
||||
}
|
||||
ac.updateConnectivityState(connectivity.TransientFailure)
|
||||
ac.updateConnectivityState(connectivity.TransientFailure, err)
|
||||
|
||||
// Backoff.
|
||||
b := ac.resetBackoff
|
||||
|
@ -1093,6 +1171,7 @@ func (ac *addrConn) resetTransport() {
|
|||
// first successful one. It returns the transport, the address and a Event in
|
||||
// the successful case. The Event fires when the returned transport disconnects.
|
||||
func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) {
|
||||
var firstConnErr error
|
||||
for _, addr := range addrs {
|
||||
ac.mu.Lock()
|
||||
if ac.state == connectivity.Shutdown {
|
||||
|
@ -1110,22 +1189,20 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
|
|||
}
|
||||
ac.mu.Unlock()
|
||||
|
||||
if channelz.IsOn() {
|
||||
channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr),
|
||||
Severity: channelz.CtINFO,
|
||||
})
|
||||
}
|
||||
channelz.Infof(ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
|
||||
|
||||
newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline)
|
||||
if err == nil {
|
||||
return newTr, addr, reconnect, nil
|
||||
}
|
||||
if firstConnErr == nil {
|
||||
firstConnErr = err
|
||||
}
|
||||
ac.cc.blockingpicker.updateConnectionError(err)
|
||||
}
|
||||
|
||||
// Couldn't connect to any address.
|
||||
return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address")
|
||||
return nil, resolver.Address{}, nil, firstConnErr
|
||||
}
|
||||
|
||||
// createTransport creates a connection to addr. It returns the transport and a
|
||||
|
@ -1136,10 +1213,16 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
|||
onCloseCalled := make(chan struct{})
|
||||
reconnect := grpcsync.NewEvent()
|
||||
|
||||
authority := ac.cc.authority
|
||||
// addr.ServerName takes precedent over ClientConn authority, if present.
|
||||
if addr.ServerName != "" {
|
||||
authority = addr.ServerName
|
||||
}
|
||||
|
||||
target := transport.TargetInfo{
|
||||
Addr: addr.Addr,
|
||||
Metadata: addr.Metadata,
|
||||
Authority: ac.cc.authority,
|
||||
Authority: authority,
|
||||
}
|
||||
|
||||
once := sync.Once{}
|
||||
|
@ -1152,7 +1235,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
|||
// state to Connecting.
|
||||
//
|
||||
// TODO: this should be Idle when grpc-go properly supports it.
|
||||
ac.updateConnectivityState(connectivity.Connecting)
|
||||
ac.updateConnectivityState(connectivity.Connecting, nil)
|
||||
}
|
||||
})
|
||||
ac.mu.Unlock()
|
||||
|
@ -1167,7 +1250,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
|||
// state to Connecting.
|
||||
//
|
||||
// TODO: this should be Idle when grpc-go properly supports it.
|
||||
ac.updateConnectivityState(connectivity.Connecting)
|
||||
ac.updateConnectivityState(connectivity.Connecting, nil)
|
||||
}
|
||||
})
|
||||
ac.mu.Unlock()
|
||||
|
@ -1188,15 +1271,15 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
|||
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose)
|
||||
if err != nil {
|
||||
// newTr is either nil, or closed.
|
||||
grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err)
|
||||
channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(connectDeadline.Sub(time.Now())):
|
||||
case <-time.After(time.Until(connectDeadline)):
|
||||
// We didn't get the preface in time.
|
||||
newTr.Close()
|
||||
grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
|
||||
channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
|
||||
return nil, nil, errors.New("timed out waiting for server handshake")
|
||||
case <-prefaceReceived:
|
||||
// We got the preface - huzzah! things are good.
|
||||
|
@ -1224,7 +1307,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
|
|||
var healthcheckManagingState bool
|
||||
defer func() {
|
||||
if !healthcheckManagingState {
|
||||
ac.updateConnectivityState(connectivity.Ready)
|
||||
ac.updateConnectivityState(connectivity.Ready, nil)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -1243,7 +1326,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
|
|||
// The health package is not imported to set health check function.
|
||||
//
|
||||
// TODO: add a link to the health check doc in the error message.
|
||||
grpclog.Error("Health check is requested but health check function is not set.")
|
||||
channelz.Error(ac.channelzID, "Health check is requested but health check function is not set.")
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1260,28 +1343,22 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
|
|||
ac.mu.Unlock()
|
||||
return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac)
|
||||
}
|
||||
setConnectivityState := func(s connectivity.State) {
|
||||
setConnectivityState := func(s connectivity.State, lastErr error) {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
if ac.transport != currentTr {
|
||||
return
|
||||
}
|
||||
ac.updateConnectivityState(s)
|
||||
ac.updateConnectivityState(s, lastErr)
|
||||
}
|
||||
// Start the health checking stream.
|
||||
go func() {
|
||||
err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Unimplemented {
|
||||
if channelz.IsOn() {
|
||||
channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
|
||||
Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled",
|
||||
Severity: channelz.CtError,
|
||||
})
|
||||
}
|
||||
grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled")
|
||||
channelz.Error(ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
|
||||
} else {
|
||||
grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err)
|
||||
channelz.Errorf(ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -1331,8 +1408,8 @@ func (ac *addrConn) tearDown(err error) {
|
|||
curTr := ac.transport
|
||||
ac.transport = nil
|
||||
// We have to set the state to Shutdown before anything else to prevent races
|
||||
// between setting the state and logic that waits on context cancelation / etc.
|
||||
ac.updateConnectivityState(connectivity.Shutdown)
|
||||
// between setting the state and logic that waits on context cancellation / etc.
|
||||
ac.updateConnectivityState(connectivity.Shutdown, nil)
|
||||
ac.cancel()
|
||||
ac.curAddr = resolver.Address{}
|
||||
if err == errConnDrain && curTr != nil {
|
||||
|
@ -1346,7 +1423,7 @@ func (ac *addrConn) tearDown(err error) {
|
|||
ac.mu.Lock()
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
|
||||
channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{
|
||||
Desc: "Subchannel Deleted",
|
||||
Severity: channelz.CtINFO,
|
||||
Parent: &channelz.TraceEventDesc{
|
||||
|
@ -1355,7 +1432,7 @@ func (ac *addrConn) tearDown(err error) {
|
|||
},
|
||||
})
|
||||
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
|
||||
// the entity beng deleted, and thus prevent it from being deleted right away.
|
||||
// the entity being deleted, and thus prevent it from being deleted right away.
|
||||
channelz.RemoveEntry(ac.channelzID)
|
||||
}
|
||||
ac.mu.Unlock()
|
||||
|
@ -1445,3 +1522,12 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric {
|
|||
// Deprecated: This error is never returned by grpc and should not be
|
||||
// referenced by users.
|
||||
var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
|
||||
|
||||
func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
|
||||
for _, rb := range cc.dopts.resolvers {
|
||||
if cc.parsedTarget.Scheme == rb.Scheme() {
|
||||
return rb
|
||||
}
|
||||
}
|
||||
return resolver.Get(cc.parsedTarget.Scheme)
|
||||
}
|
||||
|
|
|
@ -24,16 +24,12 @@ package credentials // import "google.golang.org/grpc/credentials"
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/credentials/internal"
|
||||
"google.golang.org/grpc/internal"
|
||||
)
|
||||
|
||||
// PerRPCCredentials defines the common interface for the credentials which need to
|
||||
|
@ -45,7 +41,8 @@ type PerRPCCredentials interface {
|
|||
// context. If a status code is returned, it will be used as the status
|
||||
// for the RPC. uri is the URI of the entry point for the request.
|
||||
// When supported by the underlying implementation, ctx can be used for
|
||||
// timeout and cancellation.
|
||||
// timeout and cancellation. Additionally, RequestInfo data will be
|
||||
// available via ctx to this call.
|
||||
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
|
||||
// it as an arbitrary string.
|
||||
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
|
||||
|
@ -54,6 +51,48 @@ type PerRPCCredentials interface {
|
|||
RequireTransportSecurity() bool
|
||||
}
|
||||
|
||||
// SecurityLevel defines the protection level on an established connection.
|
||||
//
|
||||
// This API is experimental.
|
||||
type SecurityLevel int
|
||||
|
||||
const (
|
||||
// NoSecurity indicates a connection is insecure.
|
||||
// The zero SecurityLevel value is invalid for backward compatibility.
|
||||
NoSecurity SecurityLevel = iota + 1
|
||||
// IntegrityOnly indicates a connection only provides integrity protection.
|
||||
IntegrityOnly
|
||||
// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
|
||||
PrivacyAndIntegrity
|
||||
)
|
||||
|
||||
// String returns SecurityLevel in a string format.
|
||||
func (s SecurityLevel) String() string {
|
||||
switch s {
|
||||
case NoSecurity:
|
||||
return "NoSecurity"
|
||||
case IntegrityOnly:
|
||||
return "IntegrityOnly"
|
||||
case PrivacyAndIntegrity:
|
||||
return "PrivacyAndIntegrity"
|
||||
}
|
||||
return fmt.Sprintf("invalid SecurityLevel: %v", int(s))
|
||||
}
|
||||
|
||||
// CommonAuthInfo contains authenticated information common to AuthInfo implementations.
|
||||
// It should be embedded in a struct implementing AuthInfo to provide additional information
|
||||
// about the credentials.
|
||||
//
|
||||
// This API is experimental.
|
||||
type CommonAuthInfo struct {
|
||||
SecurityLevel SecurityLevel
|
||||
}
|
||||
|
||||
// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct.
|
||||
func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo {
|
||||
return c
|
||||
}
|
||||
|
||||
// ProtocolInfo provides information regarding the gRPC wire protocol version,
|
||||
// security protocol, security protocol version in use, server name, etc.
|
||||
type ProtocolInfo struct {
|
||||
|
@ -61,13 +100,19 @@ type ProtocolInfo struct {
|
|||
ProtocolVersion string
|
||||
// SecurityProtocol is the security protocol in use.
|
||||
SecurityProtocol string
|
||||
// SecurityVersion is the security protocol version.
|
||||
// SecurityVersion is the security protocol version. It is a static version string from the
|
||||
// credentials, not a value that reflects per-connection protocol negotiation. To retrieve
|
||||
// details about the credentials used for a connection, use the Peer's AuthInfo field instead.
|
||||
//
|
||||
// Deprecated: please use Peer.AuthInfo.
|
||||
SecurityVersion string
|
||||
// ServerName is the user-configured server name.
|
||||
ServerName string
|
||||
}
|
||||
|
||||
// AuthInfo defines the common interface for the auth information the users are interested in.
|
||||
// A struct that implements AuthInfo should embed CommonAuthInfo by including additional
|
||||
// information about the credentials in it.
|
||||
type AuthInfo interface {
|
||||
AuthType() string
|
||||
}
|
||||
|
@ -82,7 +127,8 @@ type TransportCredentials interface {
|
|||
// ClientHandshake does the authentication handshake specified by the corresponding
|
||||
// authentication protocol on rawConn for clients. It returns the authenticated
|
||||
// connection and the corresponding auth information about the connection.
|
||||
// Implementations must use the provided context to implement timely cancellation.
|
||||
// The auth information should embed CommonAuthInfo to return additional information about
|
||||
// the credentials. Implementations must use the provided context to implement timely cancellation.
|
||||
// gRPC will try to reconnect if the error returned is a temporary error
|
||||
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
|
||||
// If the returned error is a wrapper error, implementations should make sure that
|
||||
|
@ -92,7 +138,8 @@ type TransportCredentials interface {
|
|||
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
|
||||
// ServerHandshake does the authentication handshake for servers. It returns
|
||||
// the authenticated connection and the corresponding auth information about
|
||||
// the connection.
|
||||
// the connection. The auth information should embed CommonAuthInfo to return additional information
|
||||
// about the credentials.
|
||||
//
|
||||
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
|
||||
ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
|
||||
|
@ -125,145 +172,63 @@ type Bundle interface {
|
|||
NewWithMode(mode string) (Bundle, error)
|
||||
}
|
||||
|
||||
// TLSInfo contains the auth information for a TLS authenticated connection.
|
||||
// It implements the AuthInfo interface.
|
||||
type TLSInfo struct {
|
||||
State tls.ConnectionState
|
||||
// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls.
|
||||
//
|
||||
// This API is experimental.
|
||||
type RequestInfo struct {
|
||||
// The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method")
|
||||
Method string
|
||||
// AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake)
|
||||
AuthInfo AuthInfo
|
||||
}
|
||||
|
||||
// AuthType returns the type of TLSInfo as a string.
|
||||
func (t TLSInfo) AuthType() string {
|
||||
return "tls"
|
||||
// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object.
|
||||
type requestInfoKey struct{}
|
||||
|
||||
// RequestInfoFromContext extracts the RequestInfo from the context if it exists.
|
||||
//
|
||||
// This API is experimental.
|
||||
func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
|
||||
ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo)
|
||||
return
|
||||
}
|
||||
|
||||
// GetSecurityValue returns security info requested by channelz.
|
||||
func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
|
||||
v := &TLSChannelzSecurityValue{
|
||||
StandardName: cipherSuiteLookup[t.State.CipherSuite],
|
||||
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
|
||||
// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
|
||||
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
|
||||
//
|
||||
// This API is experimental.
|
||||
func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error {
|
||||
type internalInfo interface {
|
||||
GetCommonAuthInfo() *CommonAuthInfo
|
||||
}
|
||||
// Currently there's no way to get LocalCertificate info from tls package.
|
||||
if len(t.State.PeerCertificates) > 0 {
|
||||
v.RemoteCertificate = t.State.PeerCertificates[0].Raw
|
||||
ri, _ := RequestInfoFromContext(ctx)
|
||||
if ri.AuthInfo == nil {
|
||||
return errors.New("unable to obtain SecurityLevel from context")
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// tlsCreds is the credentials required for authenticating a connection using TLS.
|
||||
type tlsCreds struct {
|
||||
// TLS configuration
|
||||
config *tls.Config
|
||||
}
|
||||
|
||||
func (c tlsCreds) Info() ProtocolInfo {
|
||||
return ProtocolInfo{
|
||||
SecurityProtocol: "tls",
|
||||
SecurityVersion: "1.2",
|
||||
ServerName: c.config.ServerName,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
|
||||
// use local cfg to avoid clobbering ServerName if using multiple endpoints
|
||||
cfg := cloneTLSConfig(c.config)
|
||||
if cfg.ServerName == "" {
|
||||
colonPos := strings.LastIndex(authority, ":")
|
||||
if colonPos == -1 {
|
||||
colonPos = len(authority)
|
||||
if ci, ok := ri.AuthInfo.(internalInfo); ok {
|
||||
// CommonAuthInfo.SecurityLevel has an invalid value.
|
||||
if ci.GetCommonAuthInfo().SecurityLevel == 0 {
|
||||
return nil
|
||||
}
|
||||
cfg.ServerName = authority[:colonPos]
|
||||
}
|
||||
conn := tls.Client(rawConn, cfg)
|
||||
errChannel := make(chan error, 1)
|
||||
go func() {
|
||||
errChannel <- conn.Handshake()
|
||||
}()
|
||||
select {
|
||||
case err := <-errChannel:
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
if ci.GetCommonAuthInfo().SecurityLevel < level {
|
||||
return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
|
||||
conn := tls.Server(rawConn, c.config)
|
||||
if err := conn.Handshake(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) Clone() TransportCredentials {
|
||||
return NewTLS(c.config)
|
||||
}
|
||||
|
||||
func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
|
||||
c.config.ServerName = serverNameOverride
|
||||
// The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method.
|
||||
return nil
|
||||
}
|
||||
|
||||
const alpnProtoStrH2 = "h2"
|
||||
|
||||
func appendH2ToNextProtos(ps []string) []string {
|
||||
for _, p := range ps {
|
||||
if p == alpnProtoStrH2 {
|
||||
return ps
|
||||
}
|
||||
func init() {
|
||||
internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context {
|
||||
return context.WithValue(ctx, requestInfoKey{}, ri)
|
||||
}
|
||||
ret := make([]string, 0, len(ps)+1)
|
||||
ret = append(ret, ps...)
|
||||
return append(ret, alpnProtoStrH2)
|
||||
}
|
||||
|
||||
// NewTLS uses c to construct a TransportCredentials based on TLS.
|
||||
func NewTLS(c *tls.Config) TransportCredentials {
|
||||
tc := &tlsCreds{cloneTLSConfig(c)}
|
||||
tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
|
||||
return tc
|
||||
}
|
||||
|
||||
// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
|
||||
// serverNameOverride is for testing only. If set to a non empty string,
|
||||
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
|
||||
func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
|
||||
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
|
||||
}
|
||||
|
||||
// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
|
||||
// serverNameOverride is for testing only. If set to a non empty string,
|
||||
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
|
||||
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
|
||||
b, err := ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cp := x509.NewCertPool()
|
||||
if !cp.AppendCertsFromPEM(b) {
|
||||
return nil, fmt.Errorf("credentials: failed to append certificates")
|
||||
}
|
||||
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
|
||||
}
|
||||
|
||||
// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
|
||||
func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
|
||||
}
|
||||
|
||||
// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
|
||||
// file for server.
|
||||
func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
|
||||
}
|
||||
|
||||
// ChannelzSecurityInfo defines the interface that security protocols should implement
|
||||
// in order to provide security info to channelz.
|
||||
//
|
||||
// This API is experimental.
|
||||
type ChannelzSecurityInfo interface {
|
||||
GetSecurityValue() ChannelzSecurityValue
|
||||
}
|
||||
|
@ -271,66 +236,20 @@ type ChannelzSecurityInfo interface {
|
|||
// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
|
||||
// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
|
||||
// and *OtherChannelzSecurityValue.
|
||||
//
|
||||
// This API is experimental.
|
||||
type ChannelzSecurityValue interface {
|
||||
isChannelzSecurityValue()
|
||||
}
|
||||
|
||||
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
|
||||
// from GetSecurityValue(), containing security info like cipher and certificate used.
|
||||
type TLSChannelzSecurityValue struct {
|
||||
ChannelzSecurityValue
|
||||
StandardName string
|
||||
LocalCertificate []byte
|
||||
RemoteCertificate []byte
|
||||
}
|
||||
|
||||
// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
|
||||
// from GetSecurityValue(), which contains protocol specific security info. Note
|
||||
// the Value field will be sent to users of channelz requesting channel info, and
|
||||
// thus sensitive info should better be avoided.
|
||||
//
|
||||
// This API is experimental.
|
||||
type OtherChannelzSecurityValue struct {
|
||||
ChannelzSecurityValue
|
||||
Name string
|
||||
Value proto.Message
|
||||
}
|
||||
|
||||
var cipherSuiteLookup = map[uint16]string{
|
||||
tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||
}
|
||||
|
||||
// cloneTLSConfig returns a shallow clone of the exported
|
||||
// fields of cfg, ignoring the unexported sync.Once, which
|
||||
// contains a mutex and must not be copied.
|
||||
//
|
||||
// If cfg is nil, a new zero tls.Config is returned.
|
||||
//
|
||||
// TODO: inline this function if possible.
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
|
||||
return cfg.Clone()
|
||||
}
|
||||
|
|
0
vendor/google.golang.org/grpc/credentials/tls13.go → vendor/google.golang.org/grpc/credentials/go12.go
сгенерированный
поставляемый
0
vendor/google.golang.org/grpc/credentials/tls13.go → vendor/google.golang.org/grpc/credentials/go12.go
сгенерированный
поставляемый
|
@ -0,0 +1,225 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
|
||||
"google.golang.org/grpc/credentials/internal"
|
||||
)
|
||||
|
||||
// TLSInfo contains the auth information for a TLS authenticated connection.
|
||||
// It implements the AuthInfo interface.
|
||||
type TLSInfo struct {
|
||||
State tls.ConnectionState
|
||||
CommonAuthInfo
|
||||
}
|
||||
|
||||
// AuthType returns the type of TLSInfo as a string.
|
||||
func (t TLSInfo) AuthType() string {
|
||||
return "tls"
|
||||
}
|
||||
|
||||
// GetSecurityValue returns security info requested by channelz.
|
||||
func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
|
||||
v := &TLSChannelzSecurityValue{
|
||||
StandardName: cipherSuiteLookup[t.State.CipherSuite],
|
||||
}
|
||||
// Currently there's no way to get LocalCertificate info from tls package.
|
||||
if len(t.State.PeerCertificates) > 0 {
|
||||
v.RemoteCertificate = t.State.PeerCertificates[0].Raw
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// tlsCreds is the credentials required for authenticating a connection using TLS.
|
||||
type tlsCreds struct {
|
||||
// TLS configuration
|
||||
config *tls.Config
|
||||
}
|
||||
|
||||
func (c tlsCreds) Info() ProtocolInfo {
|
||||
return ProtocolInfo{
|
||||
SecurityProtocol: "tls",
|
||||
SecurityVersion: "1.2",
|
||||
ServerName: c.config.ServerName,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
|
||||
// use local cfg to avoid clobbering ServerName if using multiple endpoints
|
||||
cfg := cloneTLSConfig(c.config)
|
||||
if cfg.ServerName == "" {
|
||||
serverName, _, err := net.SplitHostPort(authority)
|
||||
if err != nil {
|
||||
// If the authority had no host port or if the authority cannot be parsed, use it as-is.
|
||||
serverName = authority
|
||||
}
|
||||
cfg.ServerName = serverName
|
||||
}
|
||||
conn := tls.Client(rawConn, cfg)
|
||||
errChannel := make(chan error, 1)
|
||||
go func() {
|
||||
errChannel <- conn.Handshake()
|
||||
close(errChannel)
|
||||
}()
|
||||
select {
|
||||
case err := <-errChannel:
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
case <-ctx.Done():
|
||||
conn.Close()
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
|
||||
conn := tls.Server(rawConn, c.config)
|
||||
if err := conn.Handshake(); err != nil {
|
||||
conn.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) Clone() TransportCredentials {
|
||||
return NewTLS(c.config)
|
||||
}
|
||||
|
||||
func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
|
||||
c.config.ServerName = serverNameOverride
|
||||
return nil
|
||||
}
|
||||
|
||||
const alpnProtoStrH2 = "h2"
|
||||
|
||||
func appendH2ToNextProtos(ps []string) []string {
|
||||
for _, p := range ps {
|
||||
if p == alpnProtoStrH2 {
|
||||
return ps
|
||||
}
|
||||
}
|
||||
ret := make([]string, 0, len(ps)+1)
|
||||
ret = append(ret, ps...)
|
||||
return append(ret, alpnProtoStrH2)
|
||||
}
|
||||
|
||||
// NewTLS uses c to construct a TransportCredentials based on TLS.
|
||||
func NewTLS(c *tls.Config) TransportCredentials {
|
||||
tc := &tlsCreds{cloneTLSConfig(c)}
|
||||
tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
|
||||
return tc
|
||||
}
|
||||
|
||||
// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
|
||||
// serverNameOverride is for testing only. If set to a non empty string,
|
||||
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
|
||||
func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
|
||||
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
|
||||
}
|
||||
|
||||
// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
|
||||
// serverNameOverride is for testing only. If set to a non empty string,
|
||||
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
|
||||
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
|
||||
b, err := ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cp := x509.NewCertPool()
|
||||
if !cp.AppendCertsFromPEM(b) {
|
||||
return nil, fmt.Errorf("credentials: failed to append certificates")
|
||||
}
|
||||
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
|
||||
}
|
||||
|
||||
// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
|
||||
func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
|
||||
}
|
||||
|
||||
// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
|
||||
// file for server.
|
||||
func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
|
||||
}
|
||||
|
||||
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
|
||||
// from GetSecurityValue(), containing security info like cipher and certificate used.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
type TLSChannelzSecurityValue struct {
|
||||
ChannelzSecurityValue
|
||||
StandardName string
|
||||
LocalCertificate []byte
|
||||
RemoteCertificate []byte
|
||||
}
|
||||
|
||||
var cipherSuiteLookup = map[uint16]string{
|
||||
tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||
}
|
||||
|
||||
// cloneTLSConfig returns a shallow clone of the exported
|
||||
// fields of cfg, ignoring the unexported sync.Once, which
|
||||
// contains a mutex and must not be copied.
|
||||
//
|
||||
// If cfg is nil, a new zero tls.Config is returned.
|
||||
//
|
||||
// TODO: inline this function if possible.
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
|
||||
return cfg.Clone()
|
||||
}
|
|
@ -24,11 +24,12 @@ import (
|
|||
"net"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/backoff"
|
||||
internalbackoff "google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
|
@ -47,7 +48,7 @@ type dialOptions struct {
|
|||
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
bs backoff.Strategy
|
||||
bs internalbackoff.Strategy
|
||||
block bool
|
||||
insecure bool
|
||||
timeout time.Duration
|
||||
|
@ -57,9 +58,7 @@ type dialOptions struct {
|
|||
callOptions []CallOption
|
||||
// This is used by v1 balancer dial option WithBalancer to support v1
|
||||
// balancer, and also by WithBalancerName dial option.
|
||||
balancerBuilder balancer.Builder
|
||||
// This is to support grpclb.
|
||||
resolverBuilder resolver.Builder
|
||||
balancerBuilder balancer.Builder
|
||||
channelzParentID int64
|
||||
disableServiceConfig bool
|
||||
disableRetry bool
|
||||
|
@ -68,6 +67,11 @@ type dialOptions struct {
|
|||
minConnectTimeout func() time.Duration
|
||||
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
|
||||
defaultServiceConfigRawJSON *string
|
||||
// This is used by ccResolverWrapper to backoff between successive calls to
|
||||
// resolver.ResolveNow(). The user will have no need to configure this, but
|
||||
// we need to be able to configure this in tests.
|
||||
resolveNowBackoff func(int) time.Duration
|
||||
resolvers []resolver.Builder
|
||||
}
|
||||
|
||||
// DialOption configures how we set up the connection.
|
||||
|
@ -226,13 +230,6 @@ func WithBalancerName(balancerName string) DialOption {
|
|||
})
|
||||
}
|
||||
|
||||
// withResolverBuilder is only for grpclb.
|
||||
func withResolverBuilder(b resolver.Builder) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.resolverBuilder = b
|
||||
})
|
||||
}
|
||||
|
||||
// WithServiceConfig returns a DialOption which has a channel to read the
|
||||
// service configuration.
|
||||
//
|
||||
|
@ -246,8 +243,28 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption {
|
|||
})
|
||||
}
|
||||
|
||||
// WithConnectParams configures the dialer to use the provided ConnectParams.
|
||||
//
|
||||
// The backoff configuration specified as part of the ConnectParams overrides
|
||||
// all defaults specified in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider
|
||||
// using the backoff.DefaultConfig as a base, in cases where you want to
|
||||
// override only a subset of the backoff configuration.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func WithConnectParams(p ConnectParams) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.bs = internalbackoff.Exponential{Config: p.Backoff}
|
||||
o.minConnectTimeout = func() time.Duration {
|
||||
return p.MinConnectTimeout
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
|
||||
// when backing off after failed connection attempts.
|
||||
//
|
||||
// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
|
||||
func WithBackoffMaxDelay(md time.Duration) DialOption {
|
||||
return WithBackoffConfig(BackoffConfig{MaxDelay: md})
|
||||
}
|
||||
|
@ -255,19 +272,18 @@ func WithBackoffMaxDelay(md time.Duration) DialOption {
|
|||
// WithBackoffConfig configures the dialer to use the provided backoff
|
||||
// parameters after connection failures.
|
||||
//
|
||||
// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
|
||||
// for use.
|
||||
// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
|
||||
func WithBackoffConfig(b BackoffConfig) DialOption {
|
||||
return withBackoff(backoff.Exponential{
|
||||
MaxDelay: b.MaxDelay,
|
||||
})
|
||||
bc := backoff.DefaultConfig
|
||||
bc.MaxDelay = b.MaxDelay
|
||||
return withBackoff(internalbackoff.Exponential{Config: bc})
|
||||
}
|
||||
|
||||
// withBackoff sets the backoff strategy used for connectRetryNum after a failed
|
||||
// connection attempt.
|
||||
//
|
||||
// This can be exported if arbitrary backoff strategies are allowed by gRPC.
|
||||
func withBackoff(bs backoff.Strategy) DialOption {
|
||||
func withBackoff(bs internalbackoff.Strategy) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.bs = bs
|
||||
})
|
||||
|
@ -322,8 +338,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption {
|
|||
// WithTimeout returns a DialOption that configures a timeout for dialing a
|
||||
// ClientConn initially. This is valid if and only if WithBlock() is present.
|
||||
//
|
||||
// Deprecated: use DialContext and context.WithTimeout instead. Will be
|
||||
// supported throughout 1.x.
|
||||
// Deprecated: use DialContext instead of Dial and context.WithTimeout
|
||||
// instead. Will be supported throughout 1.x.
|
||||
func WithTimeout(d time.Duration) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.timeout = d
|
||||
|
@ -341,7 +357,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp
|
|||
}
|
||||
|
||||
func init() {
|
||||
internal.WithResolverBuilder = withResolverBuilder
|
||||
internal.WithHealthCheckFunc = withHealthCheckFunc
|
||||
}
|
||||
|
||||
|
@ -455,6 +470,8 @@ func WithAuthority(a string) DialOption {
|
|||
// WithChannelzParentID returns a DialOption that specifies the channelz ID of
|
||||
// current ClientConn's parent. This function is used in nested channel creation
|
||||
// (e.g. grpclb dial).
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func WithChannelzParentID(id int64) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.channelzParentID = id
|
||||
|
@ -539,6 +556,7 @@ func defaultDialOptions() dialOptions {
|
|||
WriteBufferSize: defaultWriteBufSize,
|
||||
ReadBufferSize: defaultReadBufSize,
|
||||
},
|
||||
resolveNowBackoff: internalbackoff.DefaultExponential.Backoff,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -552,3 +570,25 @@ func withMinConnectDeadline(f func() time.Duration) DialOption {
|
|||
o.minConnectTimeout = f
|
||||
})
|
||||
}
|
||||
|
||||
// withResolveNowBackoff specifies the function that clientconn uses to backoff
|
||||
// between successive calls to resolver.ResolveNow().
|
||||
//
|
||||
// For testing purpose only.
|
||||
func withResolveNowBackoff(f func(int) time.Duration) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.resolveNowBackoff = f
|
||||
})
|
||||
}
|
||||
|
||||
// WithResolvers allows a list of resolver implementations to be registered
|
||||
// locally with the ClientConn without needing to be globally registered via
|
||||
// resolver.Register. They will be matched against the scheme used for the
|
||||
// current Dial only, and will take precedence over the global registry.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func WithResolvers(rs ...resolver.Builder) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.resolvers = append(o.resolvers, rs...)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -46,6 +46,10 @@ type Compressor interface {
|
|||
// coding header. The result must be static; the result cannot change
|
||||
// between calls.
|
||||
Name() string
|
||||
// EXPERIMENTAL: if a Compressor implements
|
||||
// DecompressedSize(compressedBytes []byte) int, gRPC will call it
|
||||
// to determine the size of the buffer allocated for the result of decompression.
|
||||
// Return -1 to indicate unknown size.
|
||||
}
|
||||
|
||||
var registeredCompressor = make(map[string]Compressor)
|
||||
|
|
|
@ -1,19 +1,16 @@
|
|||
module google.golang.org/grpc
|
||||
|
||||
go 1.11
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.26.0 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/envoyproxy/go-control-plane v0.9.4
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||
github.com/golang/mock v1.1.1
|
||||
github.com/golang/protobuf v1.2.0
|
||||
github.com/golang/protobuf v1.3.3
|
||||
github.com/google/go-cmp v0.2.0
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135
|
||||
google.golang.org/appengine v1.1.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55
|
||||
)
|
||||
|
|
|
@ -26,72 +26,78 @@
|
|||
// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
|
||||
package grpclog // import "google.golang.org/grpc/grpclog"
|
||||
|
||||
import "os"
|
||||
import (
|
||||
"os"
|
||||
|
||||
var logger = newLoggerV2()
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
SetLoggerV2(newLoggerV2())
|
||||
}
|
||||
|
||||
// V reports whether verbosity level l is at least the requested verbose level.
|
||||
func V(l int) bool {
|
||||
return logger.V(l)
|
||||
return grpclog.Logger.V(l)
|
||||
}
|
||||
|
||||
// Info logs to the INFO log.
|
||||
func Info(args ...interface{}) {
|
||||
logger.Info(args...)
|
||||
grpclog.Logger.Info(args...)
|
||||
}
|
||||
|
||||
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
|
||||
func Infof(format string, args ...interface{}) {
|
||||
logger.Infof(format, args...)
|
||||
grpclog.Logger.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
|
||||
func Infoln(args ...interface{}) {
|
||||
logger.Infoln(args...)
|
||||
grpclog.Logger.Infoln(args...)
|
||||
}
|
||||
|
||||
// Warning logs to the WARNING log.
|
||||
func Warning(args ...interface{}) {
|
||||
logger.Warning(args...)
|
||||
grpclog.Logger.Warning(args...)
|
||||
}
|
||||
|
||||
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
|
||||
func Warningf(format string, args ...interface{}) {
|
||||
logger.Warningf(format, args...)
|
||||
grpclog.Logger.Warningf(format, args...)
|
||||
}
|
||||
|
||||
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
|
||||
func Warningln(args ...interface{}) {
|
||||
logger.Warningln(args...)
|
||||
grpclog.Logger.Warningln(args...)
|
||||
}
|
||||
|
||||
// Error logs to the ERROR log.
|
||||
func Error(args ...interface{}) {
|
||||
logger.Error(args...)
|
||||
grpclog.Logger.Error(args...)
|
||||
}
|
||||
|
||||
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
logger.Errorf(format, args...)
|
||||
grpclog.Logger.Errorf(format, args...)
|
||||
}
|
||||
|
||||
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
func Errorln(args ...interface{}) {
|
||||
logger.Errorln(args...)
|
||||
grpclog.Logger.Errorln(args...)
|
||||
}
|
||||
|
||||
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
|
||||
// It calls os.Exit() with exit code 1.
|
||||
func Fatal(args ...interface{}) {
|
||||
logger.Fatal(args...)
|
||||
grpclog.Logger.Fatal(args...)
|
||||
// Make sure fatal logs will exit.
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
|
||||
// It calles os.Exit() with exit code 1.
|
||||
// It calls os.Exit() with exit code 1.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
logger.Fatalf(format, args...)
|
||||
grpclog.Logger.Fatalf(format, args...)
|
||||
// Make sure fatal logs will exit.
|
||||
os.Exit(1)
|
||||
}
|
||||
|
@ -99,7 +105,7 @@ func Fatalf(format string, args ...interface{}) {
|
|||
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
|
||||
// It calle os.Exit()) with exit code 1.
|
||||
func Fatalln(args ...interface{}) {
|
||||
logger.Fatalln(args...)
|
||||
grpclog.Logger.Fatalln(args...)
|
||||
// Make sure fatal logs will exit.
|
||||
os.Exit(1)
|
||||
}
|
||||
|
@ -108,19 +114,19 @@ func Fatalln(args ...interface{}) {
|
|||
//
|
||||
// Deprecated: use Info.
|
||||
func Print(args ...interface{}) {
|
||||
logger.Info(args...)
|
||||
grpclog.Logger.Info(args...)
|
||||
}
|
||||
|
||||
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
|
||||
//
|
||||
// Deprecated: use Infof.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
logger.Infof(format, args...)
|
||||
grpclog.Logger.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
|
||||
//
|
||||
// Deprecated: use Infoln.
|
||||
func Println(args ...interface{}) {
|
||||
logger.Infoln(args...)
|
||||
grpclog.Logger.Infoln(args...)
|
||||
}
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
|
||||
package grpclog
|
||||
|
||||
import "google.golang.org/grpc/internal/grpclog"
|
||||
|
||||
// Logger mimics golang's standard Logger as an interface.
|
||||
//
|
||||
// Deprecated: use LoggerV2.
|
||||
|
@ -35,7 +37,7 @@ type Logger interface {
|
|||
//
|
||||
// Deprecated: use SetLoggerV2.
|
||||
func SetLogger(l Logger) {
|
||||
logger = &loggerWrapper{Logger: l}
|
||||
grpclog.Logger = &loggerWrapper{Logger: l}
|
||||
}
|
||||
|
||||
// loggerWrapper wraps Logger into a LoggerV2.
|
||||
|
|
|
@ -24,6 +24,8 @@ import (
|
|||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
|
||||
// LoggerV2 does underlying logging work for grpclog.
|
||||
|
@ -65,7 +67,8 @@ type LoggerV2 interface {
|
|||
// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
|
||||
// Not mutex-protected, should be called before any gRPC functions.
|
||||
func SetLoggerV2(l LoggerV2) {
|
||||
logger = l
|
||||
grpclog.Logger = l
|
||||
grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -193,3 +196,19 @@ func (g *loggerT) Fatalf(format string, args ...interface{}) {
|
|||
func (g *loggerT) V(l int) bool {
|
||||
return l <= g.v
|
||||
}
|
||||
|
||||
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
|
||||
// DepthLoggerV2, the below functions will be called with the appropriate stack
|
||||
// depth set for trivial functions the logger may ignore.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
type DepthLoggerV2 interface {
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
InfoDepth(depth int, args ...interface{})
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
WarningDepth(depth int, args ...interface{})
|
||||
// ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
ErrorDepth(depth int, args ...interface{})
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
FatalDepth(depth int, args ...interface{})
|
||||
}
|
||||
|
|
|
@ -33,20 +33,20 @@ import (
|
|||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const maxDelay = 120 * time.Second
|
||||
|
||||
var backoffStrategy = backoff.Exponential{MaxDelay: maxDelay}
|
||||
var backoffFunc = func(ctx context.Context, retries int) bool {
|
||||
d := backoffStrategy.Backoff(retries)
|
||||
timer := time.NewTimer(d)
|
||||
select {
|
||||
case <-timer.C:
|
||||
return true
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
return false
|
||||
var (
|
||||
backoffStrategy = backoff.DefaultExponential
|
||||
backoffFunc = func(ctx context.Context, retries int) bool {
|
||||
d := backoffStrategy.Backoff(retries)
|
||||
timer := time.NewTimer(d)
|
||||
select {
|
||||
case <-timer.C:
|
||||
return true
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
internal.HealthCheckFunc = clientHealthCheck
|
||||
|
@ -56,7 +56,7 @@ const healthCheckMethod = "/grpc.health.v1.Health/Watch"
|
|||
|
||||
// This function implements the protocol defined at:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
|
||||
func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), service string) error {
|
||||
func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), service string) error {
|
||||
tryCnt := 0
|
||||
|
||||
retryConnection:
|
||||
|
@ -70,7 +70,7 @@ retryConnection:
|
|||
if ctx.Err() != nil {
|
||||
return nil
|
||||
}
|
||||
setConnectivityState(connectivity.Connecting)
|
||||
setConnectivityState(connectivity.Connecting, nil)
|
||||
rawS, err := newStream(healthCheckMethod)
|
||||
if err != nil {
|
||||
continue retryConnection
|
||||
|
@ -79,7 +79,7 @@ retryConnection:
|
|||
s, ok := rawS.(grpc.ClientStream)
|
||||
// Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes.
|
||||
if !ok {
|
||||
setConnectivityState(connectivity.Ready)
|
||||
setConnectivityState(connectivity.Ready, nil)
|
||||
return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS)
|
||||
}
|
||||
|
||||
|
@ -95,22 +95,22 @@ retryConnection:
|
|||
|
||||
// Reports healthy for the LBing purposes if health check is not implemented in the server.
|
||||
if status.Code(err) == codes.Unimplemented {
|
||||
setConnectivityState(connectivity.Ready)
|
||||
setConnectivityState(connectivity.Ready, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED.
|
||||
if err != nil {
|
||||
setConnectivityState(connectivity.TransientFailure)
|
||||
setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but received health check RPC error: %v", err))
|
||||
continue retryConnection
|
||||
}
|
||||
|
||||
// As a message has been received, removes the need for backoff for the next retry by reseting the try count.
|
||||
// As a message has been received, removes the need for backoff for the next retry by resetting the try count.
|
||||
tryCnt = 0
|
||||
if resp.Status == healthpb.HealthCheckResponse_SERVING {
|
||||
setConnectivityState(connectivity.Ready)
|
||||
setConnectivityState(connectivity.Ready, nil)
|
||||
} else {
|
||||
setConnectivityState(connectivity.TransientFailure)
|
||||
setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but health check failed. status=%s", resp.Status))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,15 +1,16 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: grpc/health/v1/health.proto
|
||||
|
||||
package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1"
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
package grpc_health_v1
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
|
@ -21,7 +22,7 @@ var _ = math.Inf
|
|||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type HealthCheckResponse_ServingStatus int32
|
||||
|
||||
|
@ -38,6 +39,7 @@ var HealthCheckResponse_ServingStatus_name = map[int32]string{
|
|||
2: "NOT_SERVING",
|
||||
3: "SERVICE_UNKNOWN",
|
||||
}
|
||||
|
||||
var HealthCheckResponse_ServingStatus_value = map[string]int32{
|
||||
"UNKNOWN": 0,
|
||||
"SERVING": 1,
|
||||
|
@ -48,8 +50,9 @@ var HealthCheckResponse_ServingStatus_value = map[string]int32{
|
|||
func (x HealthCheckResponse_ServingStatus) String() string {
|
||||
return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
|
||||
}
|
||||
|
||||
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_health_6b1a06aa67f91efd, []int{1, 0}
|
||||
return fileDescriptor_e265fd9d4e077217, []int{1, 0}
|
||||
}
|
||||
|
||||
type HealthCheckRequest struct {
|
||||
|
@ -63,16 +66,17 @@ func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
|
|||
func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*HealthCheckRequest) ProtoMessage() {}
|
||||
func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_health_6b1a06aa67f91efd, []int{0}
|
||||
return fileDescriptor_e265fd9d4e077217, []int{0}
|
||||
}
|
||||
|
||||
func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *HealthCheckRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HealthCheckRequest.Merge(dst, src)
|
||||
func (m *HealthCheckRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HealthCheckRequest.Merge(m, src)
|
||||
}
|
||||
func (m *HealthCheckRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_HealthCheckRequest.Size(m)
|
||||
|
@ -101,16 +105,17 @@ func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
|
|||
func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*HealthCheckResponse) ProtoMessage() {}
|
||||
func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_health_6b1a06aa67f91efd, []int{1}
|
||||
return fileDescriptor_e265fd9d4e077217, []int{1}
|
||||
}
|
||||
|
||||
func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *HealthCheckResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HealthCheckResponse.Merge(dst, src)
|
||||
func (m *HealthCheckResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HealthCheckResponse.Merge(m, src)
|
||||
}
|
||||
func (m *HealthCheckResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_HealthCheckResponse.Size(m)
|
||||
|
@ -129,18 +134,43 @@ func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
|
|||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value)
|
||||
proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest")
|
||||
proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse")
|
||||
proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value)
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_e265fd9d4e077217) }
|
||||
|
||||
var fileDescriptor_e265fd9d4e077217 = []byte{
|
||||
// 297 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
|
||||
0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
|
||||
0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
|
||||
0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
|
||||
0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
|
||||
0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
|
||||
0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
|
||||
0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
|
||||
0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
|
||||
0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
|
||||
0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3,
|
||||
0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac,
|
||||
0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10,
|
||||
0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc,
|
||||
0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4,
|
||||
0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74,
|
||||
0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20,
|
||||
0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff,
|
||||
0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
var _ grpc.ClientConnInterface
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
const _ = grpc.SupportPackageIsVersion6
|
||||
|
||||
// HealthClient is the client API for Health service.
|
||||
//
|
||||
|
@ -168,10 +198,10 @@ type HealthClient interface {
|
|||
}
|
||||
|
||||
type healthClient struct {
|
||||
cc *grpc.ClientConn
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewHealthClient(cc *grpc.ClientConn) HealthClient {
|
||||
func NewHealthClient(cc grpc.ClientConnInterface) HealthClient {
|
||||
return &healthClient{cc}
|
||||
}
|
||||
|
||||
|
@ -239,6 +269,17 @@ type HealthServer interface {
|
|||
Watch(*HealthCheckRequest, Health_WatchServer) error
|
||||
}
|
||||
|
||||
// UnimplementedHealthServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedHealthServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedHealthServer) Check(ctx context.Context, req *HealthCheckRequest) (*HealthCheckResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
|
||||
}
|
||||
func (*UnimplementedHealthServer) Watch(req *HealthCheckRequest, srv Health_WatchServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
|
||||
}
|
||||
|
||||
func RegisterHealthServer(s *grpc.Server, srv HealthServer) {
|
||||
s.RegisterService(&_Health_serviceDesc, srv)
|
||||
}
|
||||
|
@ -300,28 +341,3 @@ var _Health_serviceDesc = grpc.ServiceDesc{
|
|||
},
|
||||
Metadata: "grpc/health/v1/health.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_6b1a06aa67f91efd) }
|
||||
|
||||
var fileDescriptor_health_6b1a06aa67f91efd = []byte{
|
||||
// 297 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
|
||||
0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
|
||||
0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
|
||||
0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
|
||||
0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
|
||||
0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
|
||||
0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
|
||||
0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
|
||||
0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
|
||||
0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
|
||||
0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3,
|
||||
0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac,
|
||||
0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10,
|
||||
0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc,
|
||||
0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4,
|
||||
0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74,
|
||||
0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20,
|
||||
0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff,
|
||||
0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
|
||||
// Server implements `service Health`.
|
||||
type Server struct {
|
||||
mu sync.Mutex
|
||||
mu sync.RWMutex
|
||||
// If shutdown is true, it's expected all serving status is NOT_SERVING, and
|
||||
// will stay in NOT_SERVING.
|
||||
shutdown bool
|
||||
|
@ -54,8 +54,8 @@ func NewServer() *Server {
|
|||
|
||||
// Check implements `service Health`.
|
||||
func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
if servingStatus, ok := s.statusMap[in.Service]; ok {
|
||||
return &healthpb.HealthCheckResponse{
|
||||
Status: servingStatus,
|
||||
|
@ -139,7 +139,7 @@ func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.H
|
|||
// Shutdown sets all serving status to NOT_SERVING, and configures the server to
|
||||
// ignore all future status changes.
|
||||
//
|
||||
// This changes serving status for all services. To set status for a perticular
|
||||
// This changes serving status for all services. To set status for a particular
|
||||
// services, call SetServingStatus().
|
||||
func (s *Server) Shutdown() {
|
||||
s.mu.Lock()
|
||||
|
@ -153,7 +153,7 @@ func (s *Server) Shutdown() {
|
|||
// Resume sets all serving status to SERVING, and configures the server to
|
||||
// accept all future status changes.
|
||||
//
|
||||
// This changes serving status for all services. To set status for a perticular
|
||||
// This changes serving status for all services. To set status for a particular
|
||||
// services, call SetServingStatus().
|
||||
func (s *Server) Resume() {
|
||||
s.mu.Lock()
|
||||
|
|
|
@ -25,44 +25,39 @@ package backoff
|
|||
import (
|
||||
"time"
|
||||
|
||||
grpcbackoff "google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
)
|
||||
|
||||
// Strategy defines the methodology for backing off after a grpc connection
|
||||
// failure.
|
||||
//
|
||||
type Strategy interface {
|
||||
// Backoff returns the amount of time to wait before the next retry given
|
||||
// the number of consecutive failures.
|
||||
Backoff(retries int) time.Duration
|
||||
}
|
||||
|
||||
const (
|
||||
// baseDelay is the amount of time to wait before retrying after the first
|
||||
// failure.
|
||||
baseDelay = 1.0 * time.Second
|
||||
// factor is applied to the backoff after each retry.
|
||||
factor = 1.6
|
||||
// jitter provides a range to randomize backoff delays.
|
||||
jitter = 0.2
|
||||
)
|
||||
// DefaultExponential is an exponential backoff implementation using the
|
||||
// default values for all the configurable knobs defined in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig}
|
||||
|
||||
// Exponential implements exponential backoff algorithm as defined in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
type Exponential struct {
|
||||
// MaxDelay is the upper bound of backoff delay.
|
||||
MaxDelay time.Duration
|
||||
// Config contains all options to configure the backoff algorithm.
|
||||
Config grpcbackoff.Config
|
||||
}
|
||||
|
||||
// Backoff returns the amount of time to wait before the next retry given the
|
||||
// number of retries.
|
||||
func (bc Exponential) Backoff(retries int) time.Duration {
|
||||
if retries == 0 {
|
||||
return baseDelay
|
||||
return bc.Config.BaseDelay
|
||||
}
|
||||
backoff, max := float64(baseDelay), float64(bc.MaxDelay)
|
||||
backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay)
|
||||
for backoff < max && retries > 0 {
|
||||
backoff *= factor
|
||||
backoff *= bc.Config.Multiplier
|
||||
retries--
|
||||
}
|
||||
if backoff > max {
|
||||
|
@ -70,7 +65,7 @@ func (bc Exponential) Backoff(retries int) time.Duration {
|
|||
}
|
||||
// Randomize backoff delays so that if a cluster of requests start at
|
||||
// the same time, they won't operate in lockstep.
|
||||
backoff *= 1 + jitter*(grpcrand.Float64()*2-1)
|
||||
backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1)
|
||||
if backoff < 0 {
|
||||
return 0
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ type Logger interface {
|
|||
}
|
||||
|
||||
// binLogger is the global binary logger for the binary. One of this should be
|
||||
// built at init time from the configuration (environment varialbe or flags).
|
||||
// built at init time from the configuration (environment variable or flags).
|
||||
//
|
||||
// It is used to get a methodLogger for each individual method.
|
||||
var binLogger Logger
|
||||
|
@ -98,7 +98,7 @@ func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
|
|||
// New methodLogger with same service overrides the old one.
|
||||
func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
|
||||
if _, ok := l.services[service]; ok {
|
||||
return fmt.Errorf("conflicting rules for service %v found", service)
|
||||
return fmt.Errorf("conflicting service rules for service %v found", service)
|
||||
}
|
||||
if l.services == nil {
|
||||
l.services = make(map[string]*methodLoggerConfig)
|
||||
|
@ -112,10 +112,10 @@ func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig)
|
|||
// New methodLogger with same method overrides the old one.
|
||||
func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
|
||||
if _, ok := l.blacklist[method]; ok {
|
||||
return fmt.Errorf("conflicting rules for method %v found", method)
|
||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||
}
|
||||
if _, ok := l.methods[method]; ok {
|
||||
return fmt.Errorf("conflicting rules for method %v found", method)
|
||||
return fmt.Errorf("conflicting method rules for method %v found", method)
|
||||
}
|
||||
if l.methods == nil {
|
||||
l.methods = make(map[string]*methodLoggerConfig)
|
||||
|
@ -127,10 +127,10 @@ func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) er
|
|||
// Set blacklist method for "-service/method".
|
||||
func (l *logger) setBlacklist(method string) error {
|
||||
if _, ok := l.blacklist[method]; ok {
|
||||
return fmt.Errorf("conflicting rules for method %v found", method)
|
||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||
}
|
||||
if _, ok := l.methods[method]; ok {
|
||||
return fmt.Errorf("conflicting rules for method %v found", method)
|
||||
return fmt.Errorf("conflicting method rules for method %v found", method)
|
||||
}
|
||||
if l.blacklist == nil {
|
||||
l.blacklist = make(map[string]struct{})
|
||||
|
|
|
@ -43,7 +43,7 @@ import (
|
|||
// Foo.
|
||||
//
|
||||
// If two configs exist for one certain method or service, the one specified
|
||||
// later overrides the privous config.
|
||||
// later overrides the previous config.
|
||||
func NewLoggerFromConfigString(s string) Logger {
|
||||
if s == "" {
|
||||
return nil
|
||||
|
@ -74,7 +74,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
|||
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||
}
|
||||
if m == "*" {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config")
|
||||
return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config")
|
||||
}
|
||||
if suffix != "" {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")
|
||||
|
|
|
@ -63,7 +63,7 @@ func (ns *noopSink) Close() error { return nil }
|
|||
|
||||
// newWriterSink creates a binary log sink with the given writer.
|
||||
//
|
||||
// Write() marshalls the proto message and writes it to the given writer. Each
|
||||
// Write() marshals the proto message and writes it to the given writer. Each
|
||||
// message is prefixed with a 4 byte big endian unsigned integer as the length.
|
||||
//
|
||||
// No buffer is done, Close() doesn't try to close the writer.
|
||||
|
|
85
vendor/google.golang.org/grpc/internal/buffer/unbounded.go
сгенерированный
поставляемый
Normal file
85
vendor/google.golang.org/grpc/internal/buffer/unbounded.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package buffer provides an implementation of an unbounded buffer.
|
||||
package buffer
|
||||
|
||||
import "sync"
|
||||
|
||||
// Unbounded is an implementation of an unbounded buffer which does not use
|
||||
// extra goroutines. This is typically used for passing updates from one entity
|
||||
// to another within gRPC.
|
||||
//
|
||||
// All methods on this type are thread-safe and don't block on anything except
|
||||
// the underlying mutex used for synchronization.
|
||||
//
|
||||
// Unbounded supports values of any type to be stored in it by using a channel
|
||||
// of `interface{}`. This means that a call to Put() incurs an extra memory
|
||||
// allocation, and also that users need a type assertion while reading. For
|
||||
// performance critical code paths, using Unbounded is strongly discouraged and
|
||||
// defining a new type specific implementation of this buffer is preferred. See
|
||||
// internal/transport/transport.go for an example of this.
|
||||
type Unbounded struct {
|
||||
c chan interface{}
|
||||
mu sync.Mutex
|
||||
backlog []interface{}
|
||||
}
|
||||
|
||||
// NewUnbounded returns a new instance of Unbounded.
|
||||
func NewUnbounded() *Unbounded {
|
||||
return &Unbounded{c: make(chan interface{}, 1)}
|
||||
}
|
||||
|
||||
// Put adds t to the unbounded buffer.
|
||||
func (b *Unbounded) Put(t interface{}) {
|
||||
b.mu.Lock()
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- t:
|
||||
b.mu.Unlock()
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, t)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// Load sends the earliest buffered data, if any, onto the read channel
|
||||
// returned by Get(). Users are expected to call this every time they read a
|
||||
// value from the read channel.
|
||||
func (b *Unbounded) Load() {
|
||||
b.mu.Lock()
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
b.backlog[0] = nil
|
||||
b.backlog = b.backlog[1:]
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// Get returns a read channel on which values added to the buffer, via Put(),
|
||||
// are sent on.
|
||||
//
|
||||
// Upon reading a value from this channel, users are expected to call Load() to
|
||||
// send the next buffered value onto the channel if there is any.
|
||||
func (b *Unbounded) Get() <-chan interface{} {
|
||||
return b.c
|
||||
}
|
|
@ -30,7 +30,7 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -216,7 +216,7 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 {
|
|||
// by pid). It returns the unique channelz tracking id assigned to this subchannel.
|
||||
func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
grpclog.Error("a SubChannel's parent id cannot be 0")
|
||||
grpclog.ErrorDepth(0, "a SubChannel's parent id cannot be 0")
|
||||
return 0
|
||||
}
|
||||
id := idGen.genID()
|
||||
|
@ -253,7 +253,7 @@ func RegisterServer(s Server, ref string) int64 {
|
|||
// this listen socket.
|
||||
func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
grpclog.Error("a ListenSocket's parent id cannot be 0")
|
||||
grpclog.ErrorDepth(0, "a ListenSocket's parent id cannot be 0")
|
||||
return 0
|
||||
}
|
||||
id := idGen.genID()
|
||||
|
@ -268,7 +268,7 @@ func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
|
|||
// this normal socket.
|
||||
func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
grpclog.Error("a NormalSocket's parent id cannot be 0")
|
||||
grpclog.ErrorDepth(0, "a NormalSocket's parent id cannot be 0")
|
||||
return 0
|
||||
}
|
||||
id := idGen.genID()
|
||||
|
@ -294,7 +294,19 @@ type TraceEventDesc struct {
|
|||
}
|
||||
|
||||
// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
|
||||
func AddTraceEvent(id int64, desc *TraceEventDesc) {
|
||||
func AddTraceEvent(id int64, depth int, desc *TraceEventDesc) {
|
||||
for d := desc; d != nil; d = d.Parent {
|
||||
switch d.Severity {
|
||||
case CtUNKNOWN:
|
||||
grpclog.InfoDepth(depth+1, d.Desc)
|
||||
case CtINFO:
|
||||
grpclog.InfoDepth(depth+1, d.Desc)
|
||||
case CtWarning:
|
||||
grpclog.WarningDepth(depth+1, d.Desc)
|
||||
case CtError:
|
||||
grpclog.ErrorDepth(depth+1, d.Desc)
|
||||
}
|
||||
}
|
||||
if getMaxTraceEntry() == 0 {
|
||||
return
|
||||
}
|
||||
|
|
100
vendor/google.golang.org/grpc/internal/channelz/logging.go
сгенерированный
поставляемый
Normal file
100
vendor/google.golang.org/grpc/internal/channelz/logging.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
|
||||
// Info logs through grpclog.Info and adds a trace event if channelz is on.
|
||||
func Info(id int64, args ...interface{}) {
|
||||
if IsOn() {
|
||||
AddTraceEvent(id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtINFO,
|
||||
})
|
||||
} else {
|
||||
grpclog.InfoDepth(1, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Infof logs through grpclog.Infof and adds a trace event if channelz is on.
|
||||
func Infof(id int64, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
if IsOn() {
|
||||
AddTraceEvent(id, 1, &TraceEventDesc{
|
||||
Desc: msg,
|
||||
Severity: CtINFO,
|
||||
})
|
||||
} else {
|
||||
grpclog.InfoDepth(1, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Warning logs through grpclog.Warning and adds a trace event if channelz is on.
|
||||
func Warning(id int64, args ...interface{}) {
|
||||
if IsOn() {
|
||||
AddTraceEvent(id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtWarning,
|
||||
})
|
||||
} else {
|
||||
grpclog.WarningDepth(1, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Warningf logs through grpclog.Warningf and adds a trace event if channelz is on.
|
||||
func Warningf(id int64, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
if IsOn() {
|
||||
AddTraceEvent(id, 1, &TraceEventDesc{
|
||||
Desc: msg,
|
||||
Severity: CtWarning,
|
||||
})
|
||||
} else {
|
||||
grpclog.WarningDepth(1, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Error logs through grpclog.Error and adds a trace event if channelz is on.
|
||||
func Error(id int64, args ...interface{}) {
|
||||
if IsOn() {
|
||||
AddTraceEvent(id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtError,
|
||||
})
|
||||
} else {
|
||||
grpclog.ErrorDepth(1, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Errorf logs through grpclog.Errorf and adds a trace event if channelz is on.
|
||||
func Errorf(id int64, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
if IsOn() {
|
||||
AddTraceEvent(id, 1, &TraceEventDesc{
|
||||
Desc: msg,
|
||||
Severity: CtError,
|
||||
})
|
||||
} else {
|
||||
grpclog.ErrorDepth(1, msg)
|
||||
}
|
||||
}
|
|
@ -25,11 +25,14 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
prefix = "GRPC_GO_"
|
||||
retryStr = prefix + "RETRY"
|
||||
prefix = "GRPC_GO_"
|
||||
retryStr = prefix + "RETRY"
|
||||
txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
|
||||
)
|
||||
|
||||
var (
|
||||
// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
|
||||
Retry = strings.EqualFold(os.Getenv(retryStr), "on")
|
||||
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
||||
TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false")
|
||||
)
|
||||
|
|
118
vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
сгенерированный
поставляемый
Normal file
118
vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package grpclog (internal) defines depth logging for grpc.
|
||||
package grpclog
|
||||
|
||||
// Logger is the logger used for the non-depth log functions.
|
||||
var Logger LoggerV2
|
||||
|
||||
// DepthLogger is the logger used for the depth log functions.
|
||||
var DepthLogger DepthLoggerV2
|
||||
|
||||
// InfoDepth logs to the INFO log at the specified depth.
|
||||
func InfoDepth(depth int, args ...interface{}) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.InfoDepth(depth, args...)
|
||||
} else {
|
||||
Logger.Info(args...)
|
||||
}
|
||||
}
|
||||
|
||||
// WarningDepth logs to the WARNING log at the specified depth.
|
||||
func WarningDepth(depth int, args ...interface{}) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.WarningDepth(depth, args...)
|
||||
} else {
|
||||
Logger.Warning(args...)
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorDepth logs to the ERROR log at the specified depth.
|
||||
func ErrorDepth(depth int, args ...interface{}) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.ErrorDepth(depth, args...)
|
||||
} else {
|
||||
Logger.Error(args...)
|
||||
}
|
||||
}
|
||||
|
||||
// FatalDepth logs to the FATAL log at the specified depth.
|
||||
func FatalDepth(depth int, args ...interface{}) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.FatalDepth(depth, args...)
|
||||
} else {
|
||||
Logger.Fatal(args...)
|
||||
}
|
||||
}
|
||||
|
||||
// LoggerV2 does underlying logging work for grpclog.
|
||||
// This is a copy of the LoggerV2 defined in the external grpclog package. It
|
||||
// is defined here to avoid a circular dependency.
|
||||
type LoggerV2 interface {
|
||||
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
|
||||
Info(args ...interface{})
|
||||
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
|
||||
Infoln(args ...interface{})
|
||||
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
|
||||
Infof(format string, args ...interface{})
|
||||
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
|
||||
Warning(args ...interface{})
|
||||
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
|
||||
Warningln(args ...interface{})
|
||||
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
|
||||
Warningf(format string, args ...interface{})
|
||||
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||
Error(args ...interface{})
|
||||
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
Errorln(args ...interface{})
|
||||
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
Errorf(format string, args ...interface{})
|
||||
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatal(args ...interface{})
|
||||
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatalln(args ...interface{})
|
||||
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatalf(format string, args ...interface{})
|
||||
// V reports whether verbosity level l is at least the requested verbose level.
|
||||
V(l int) bool
|
||||
}
|
||||
|
||||
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
|
||||
// DepthLoggerV2, the below functions will be called with the appropriate stack
|
||||
// depth set for trivial functions the logger may ignore.
|
||||
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
|
||||
// It is defined here to avoid a circular dependency.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
type DepthLoggerV2 interface {
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
InfoDepth(depth int, args ...interface{})
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
WarningDepth(depth int, args ...interface{})
|
||||
// ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
ErrorDepth(depth int, args ...interface{})
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
FatalDepth(depth int, args ...interface{})
|
||||
}
|
63
vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
сгенерированный
поставляемый
Normal file
63
vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpclog
|
||||
|
||||
// PrefixLogger does logging with a prefix.
|
||||
//
|
||||
// Logging method on a nil logs without any prefix.
|
||||
type PrefixLogger struct {
|
||||
prefix string
|
||||
}
|
||||
|
||||
// Infof does info logging.
|
||||
func (pl *PrefixLogger) Infof(format string, args ...interface{}) {
|
||||
if pl != nil {
|
||||
// Handle nil, so the tests can pass in a nil logger.
|
||||
format = pl.prefix + format
|
||||
}
|
||||
Logger.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Warningf does warning logging.
|
||||
func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
|
||||
if pl != nil {
|
||||
format = pl.prefix + format
|
||||
}
|
||||
Logger.Warningf(format, args...)
|
||||
}
|
||||
|
||||
// Errorf does error logging.
|
||||
func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
|
||||
if pl != nil {
|
||||
format = pl.prefix + format
|
||||
}
|
||||
Logger.Errorf(format, args...)
|
||||
}
|
||||
|
||||
// Debugf does info logging at verbose level 2.
|
||||
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
|
||||
if Logger.V(2) {
|
||||
pl.Infof(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// NewPrefixLogger creates a prefix logger with the given prefix.
|
||||
func NewPrefixLogger(prefix string) *PrefixLogger {
|
||||
return &PrefixLogger{prefix: prefix}
|
||||
}
|
55
vendor/google.golang.org/grpc/internal/grpcutil/target.go
сгенерированный
поставляемый
Normal file
55
vendor/google.golang.org/grpc/internal/grpcutil/target.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package grpcutil provides a bunch of utility functions to be used across the
|
||||
// gRPC codebase.
|
||||
package grpcutil
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// split2 returns the values from strings.SplitN(s, sep, 2).
|
||||
// If sep is not found, it returns ("", "", false) instead.
|
||||
func split2(s, sep string) (string, string, bool) {
|
||||
spl := strings.SplitN(s, sep, 2)
|
||||
if len(spl) < 2 {
|
||||
return "", "", false
|
||||
}
|
||||
return spl[0], spl[1], true
|
||||
}
|
||||
|
||||
// ParseTarget splits target into a resolver.Target struct containing scheme,
|
||||
// authority and endpoint.
|
||||
//
|
||||
// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
|
||||
// target}.
|
||||
func ParseTarget(target string) (ret resolver.Target) {
|
||||
var ok bool
|
||||
ret.Scheme, ret.Endpoint, ok = split2(target, "://")
|
||||
if !ok {
|
||||
return resolver.Target{Endpoint: target}
|
||||
}
|
||||
ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
|
||||
if !ok {
|
||||
return resolver.Target{Endpoint: target}
|
||||
}
|
||||
return ret
|
||||
}
|
|
@ -28,9 +28,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
// WithResolverBuilder is exported by dialoptions.go
|
||||
WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
|
||||
// WithHealthCheckFunc is not exported by dialoptions.go
|
||||
// WithHealthCheckFunc is set by dialoptions.go
|
||||
WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
|
||||
// HealthCheckFunc is used to provide client-side LB channel health checking
|
||||
HealthCheckFunc HealthChecker
|
||||
|
@ -39,14 +37,17 @@ var (
|
|||
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
|
||||
// default, but tests may wish to set it lower for convenience.
|
||||
KeepaliveMinPingTime = 10 * time.Second
|
||||
// ParseServiceConfig is a function to parse JSON service configs into
|
||||
// opaque data structures.
|
||||
ParseServiceConfig func(sc string) (interface{}, error)
|
||||
// StatusRawProto is exported by status/status.go. This func returns a
|
||||
// pointer to the wrapped Status proto for a given status.Status without a
|
||||
// call to proto.Clone(). The returned Status proto should not be mutated by
|
||||
// the caller.
|
||||
StatusRawProto interface{} // func (*status.Status) *spb.Status
|
||||
// NewRequestInfoContext creates a new context based on the argument context attaching
|
||||
// the passed in RequestInfo to the new context.
|
||||
NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context
|
||||
// ParseServiceConfigForTesting is for creating a fake
|
||||
// ClientConn for resolver testing only
|
||||
ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
|
||||
)
|
||||
|
||||
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
||||
|
@ -57,7 +58,7 @@ var (
|
|||
//
|
||||
// The health checking protocol is defined at:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
|
||||
type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error
|
||||
type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error
|
||||
|
||||
const (
|
||||
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
|
||||
|
|
218
vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go → vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
сгенерированный
поставляемый
218
vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go → vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
сгенерированный
поставляемый
|
@ -33,18 +33,22 @@ import (
|
|||
"time"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
|
||||
// addresses from SRV records. Must not be changed after init time.
|
||||
var EnableSRVLookups = false
|
||||
|
||||
func init() {
|
||||
resolver.Register(NewBuilder())
|
||||
}
|
||||
|
||||
const (
|
||||
defaultPort = "443"
|
||||
defaultFreq = time.Minute * 30
|
||||
defaultDNSSvrPort = "53"
|
||||
golang = "GO"
|
||||
// txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
|
||||
|
@ -94,47 +98,33 @@ var customAuthorityResolver = func(authority string) (netResolver, error) {
|
|||
|
||||
// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
|
||||
func NewBuilder() resolver.Builder {
|
||||
return &dnsBuilder{minFreq: defaultFreq}
|
||||
return &dnsBuilder{}
|
||||
}
|
||||
|
||||
type dnsBuilder struct {
|
||||
// minimum frequency of polling the DNS server.
|
||||
minFreq time.Duration
|
||||
}
|
||||
type dnsBuilder struct{}
|
||||
|
||||
// Build creates and starts a DNS resolver that watches the name resolution of the target.
|
||||
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
|
||||
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
host, port, err := parseTarget(target.Endpoint, defaultPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// IP address.
|
||||
if net.ParseIP(host) != nil {
|
||||
host, _ = formatIP(host)
|
||||
addr := []resolver.Address{{Addr: host + ":" + port}}
|
||||
i := &ipResolver{
|
||||
cc: cc,
|
||||
ip: addr,
|
||||
rn: make(chan struct{}, 1),
|
||||
q: make(chan struct{}),
|
||||
}
|
||||
cc.NewAddress(addr)
|
||||
go i.watcher()
|
||||
return i, nil
|
||||
if ipAddr, ok := formatIP(host); ok {
|
||||
addr := []resolver.Address{{Addr: ipAddr + ":" + port}}
|
||||
cc.UpdateState(resolver.State{Addresses: addr})
|
||||
return deadResolver{}, nil
|
||||
}
|
||||
|
||||
// DNS address (non-IP).
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
d := &dnsResolver{
|
||||
freq: b.minFreq,
|
||||
backoff: backoff.Exponential{MaxDelay: b.minFreq},
|
||||
host: host,
|
||||
port: port,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cc: cc,
|
||||
t: time.NewTimer(0),
|
||||
rn: make(chan struct{}, 1),
|
||||
disableServiceConfig: opts.DisableServiceConfig,
|
||||
}
|
||||
|
@ -150,6 +140,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
|||
|
||||
d.wg.Add(1)
|
||||
go d.watcher()
|
||||
d.ResolveNow(resolver.ResolveNowOptions{})
|
||||
return d, nil
|
||||
}
|
||||
|
||||
|
@ -164,53 +155,23 @@ type netResolver interface {
|
|||
LookupTXT(ctx context.Context, name string) (txts []string, err error)
|
||||
}
|
||||
|
||||
// ipResolver watches for the name resolution update for an IP address.
|
||||
type ipResolver struct {
|
||||
cc resolver.ClientConn
|
||||
ip []resolver.Address
|
||||
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
|
||||
rn chan struct{}
|
||||
q chan struct{}
|
||||
}
|
||||
// deadResolver is a resolver that does nothing.
|
||||
type deadResolver struct{}
|
||||
|
||||
// ResolveNow resend the address it stores, no resolution is needed.
|
||||
func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) {
|
||||
select {
|
||||
case i.rn <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {}
|
||||
|
||||
// Close closes the ipResolver.
|
||||
func (i *ipResolver) Close() {
|
||||
close(i.q)
|
||||
}
|
||||
|
||||
func (i *ipResolver) watcher() {
|
||||
for {
|
||||
select {
|
||||
case <-i.rn:
|
||||
i.cc.NewAddress(i.ip)
|
||||
case <-i.q:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
func (deadResolver) Close() {}
|
||||
|
||||
// dnsResolver watches for the name resolution update for a non-IP target.
|
||||
type dnsResolver struct {
|
||||
freq time.Duration
|
||||
backoff backoff.Exponential
|
||||
retryCount int
|
||||
host string
|
||||
port string
|
||||
resolver netResolver
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
cc resolver.ClientConn
|
||||
host string
|
||||
port string
|
||||
resolver netResolver
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
cc resolver.ClientConn
|
||||
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
|
||||
rn chan struct{}
|
||||
t *time.Timer
|
||||
// wg is used to enforce Close() to return after the watcher() goroutine has finished.
|
||||
// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
|
||||
// replace the real lookup functions with mocked ones to facilitate testing.
|
||||
|
@ -222,7 +183,7 @@ type dnsResolver struct {
|
|||
}
|
||||
|
||||
// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
|
||||
func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
|
||||
func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
|
||||
select {
|
||||
case d.rn <- struct{}{}:
|
||||
default:
|
||||
|
@ -233,7 +194,6 @@ func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
|
|||
func (d *dnsResolver) Close() {
|
||||
d.cancel()
|
||||
d.wg.Wait()
|
||||
d.t.Stop()
|
||||
}
|
||||
|
||||
func (d *dnsResolver) watcher() {
|
||||
|
@ -242,27 +202,15 @@ func (d *dnsResolver) watcher() {
|
|||
select {
|
||||
case <-d.ctx.Done():
|
||||
return
|
||||
case <-d.t.C:
|
||||
case <-d.rn:
|
||||
if !d.t.Stop() {
|
||||
// Before resetting a timer, it should be stopped to prevent racing with
|
||||
// reads on it's channel.
|
||||
<-d.t.C
|
||||
}
|
||||
}
|
||||
|
||||
result, sc := d.lookup()
|
||||
// Next lookup should happen within an interval defined by d.freq. It may be
|
||||
// more often due to exponential retry on empty address list.
|
||||
if len(result) == 0 {
|
||||
d.retryCount++
|
||||
d.t.Reset(d.backoff.Backoff(d.retryCount))
|
||||
state, err := d.lookup()
|
||||
if err != nil {
|
||||
d.cc.ReportError(err)
|
||||
} else {
|
||||
d.retryCount = 0
|
||||
d.t.Reset(d.freq)
|
||||
d.cc.UpdateState(*state)
|
||||
}
|
||||
d.cc.NewServiceConfig(sc)
|
||||
d.cc.NewAddress(result)
|
||||
|
||||
// Sleep to prevent excessive re-resolutions. Incoming resolution requests
|
||||
// will be queued in d.rn.
|
||||
|
@ -276,37 +224,68 @@ func (d *dnsResolver) watcher() {
|
|||
}
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupSRV() []resolver.Address {
|
||||
func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
|
||||
if !EnableSRVLookups {
|
||||
return nil, nil
|
||||
}
|
||||
var newAddrs []resolver.Address
|
||||
_, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
|
||||
if err != nil {
|
||||
grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
|
||||
return nil
|
||||
err = handleDNSError(err, "SRV") // may become nil
|
||||
return nil, err
|
||||
}
|
||||
for _, s := range srvs {
|
||||
lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
|
||||
if err != nil {
|
||||
grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
|
||||
continue
|
||||
}
|
||||
for _, a := range lbAddrs {
|
||||
a, ok := formatIP(a)
|
||||
if !ok {
|
||||
grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
|
||||
err = handleDNSError(err, "A") // may become nil
|
||||
if err == nil {
|
||||
// If there are other SRV records, look them up and ignore this
|
||||
// one that does not exist.
|
||||
continue
|
||||
}
|
||||
addr := a + ":" + strconv.Itoa(int(s.Port))
|
||||
return nil, err
|
||||
}
|
||||
for _, a := range lbAddrs {
|
||||
ip, ok := formatIP(a)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
|
||||
}
|
||||
addr := ip + ":" + strconv.Itoa(int(s.Port))
|
||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
|
||||
}
|
||||
}
|
||||
return newAddrs
|
||||
return newAddrs, nil
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupTXT() string {
|
||||
var filterError = func(err error) error {
|
||||
if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
|
||||
// Timeouts and temporary errors should be communicated to gRPC to
|
||||
// attempt another DNS query (with backoff). Other errors should be
|
||||
// suppressed (they may represent the absence of a TXT record).
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func handleDNSError(err error, lookupType string) error {
|
||||
err = filterError(err)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
|
||||
grpclog.Infoln(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
|
||||
ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host)
|
||||
if err != nil {
|
||||
grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
|
||||
return ""
|
||||
if envconfig.TXTErrIgnore {
|
||||
return nil
|
||||
}
|
||||
if err = handleDNSError(err, "TXT"); err != nil {
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
var res string
|
||||
for _, s := range ss {
|
||||
|
@ -315,40 +294,45 @@ func (d *dnsResolver) lookupTXT() string {
|
|||
|
||||
// TXT record must have "grpc_config=" attribute in order to be used as service config.
|
||||
if !strings.HasPrefix(res, txtAttribute) {
|
||||
grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute)
|
||||
return ""
|
||||
grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
|
||||
// This is not an error; it is the equivalent of not having a service config.
|
||||
return nil
|
||||
}
|
||||
return strings.TrimPrefix(res, txtAttribute)
|
||||
sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
|
||||
return d.cc.ParseServiceConfig(sc)
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupHost() []resolver.Address {
|
||||
func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
|
||||
var newAddrs []resolver.Address
|
||||
addrs, err := d.resolver.LookupHost(d.ctx, d.host)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
|
||||
return nil
|
||||
err = handleDNSError(err, "A")
|
||||
return nil, err
|
||||
}
|
||||
for _, a := range addrs {
|
||||
a, ok := formatIP(a)
|
||||
ip, ok := formatIP(a)
|
||||
if !ok {
|
||||
grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
|
||||
continue
|
||||
return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
|
||||
}
|
||||
addr := a + ":" + d.port
|
||||
addr := ip + ":" + d.port
|
||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr})
|
||||
}
|
||||
return newAddrs
|
||||
return newAddrs, nil
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookup() ([]resolver.Address, string) {
|
||||
newAddrs := d.lookupSRV()
|
||||
// Support fallback to non-balancer address.
|
||||
newAddrs = append(newAddrs, d.lookupHost()...)
|
||||
if d.disableServiceConfig {
|
||||
return newAddrs, ""
|
||||
func (d *dnsResolver) lookup() (*resolver.State, error) {
|
||||
srv, srvErr := d.lookupSRV()
|
||||
addrs, hostErr := d.lookupHost()
|
||||
if hostErr != nil && (srvErr != nil || len(srv) == 0) {
|
||||
return nil, hostErr
|
||||
}
|
||||
sc := d.lookupTXT()
|
||||
return newAddrs, canaryingSC(sc)
|
||||
state := &resolver.State{
|
||||
Addresses: append(addrs, srv...),
|
||||
}
|
||||
if !d.disableServiceConfig {
|
||||
state.ServiceConfig = d.lookupTXT()
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
|
||||
|
@ -434,12 +418,12 @@ func canaryingSC(js string) string {
|
|||
var rcs []rawChoice
|
||||
err := json.Unmarshal([]byte(js), &rcs)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err)
|
||||
grpclog.Warningf("dns: error parsing service config json: %v", err)
|
||||
return ""
|
||||
}
|
||||
cliHostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err)
|
||||
grpclog.Warningf("dns: error getting client hostname: %v", err)
|
||||
return ""
|
||||
}
|
||||
var sc string
|
33
vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
сгенерированный
поставляемый
Normal file
33
vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,33 @@
|
|||
// +build go1.13
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package dns
|
||||
|
||||
import "net"
|
||||
|
||||
func init() {
|
||||
filterError = func(err error) error {
|
||||
if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound {
|
||||
// The name does not exist; not an error.
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
|
@ -26,7 +26,7 @@ const scheme = "passthrough"
|
|||
|
||||
type passthroughBuilder struct{}
|
||||
|
||||
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
|
||||
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
r := &passthroughResolver{
|
||||
target: target,
|
||||
cc: cc,
|
||||
|
@ -48,7 +48,7 @@ func (r *passthroughResolver) start() {
|
|||
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
|
||||
}
|
||||
|
||||
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
|
||||
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
|
||||
|
||||
func (*passthroughResolver) Close() {}
|
||||
|
|
@ -107,8 +107,8 @@ func (*registerStream) isTransportResponseFrame() bool { return false }
|
|||
type headerFrame struct {
|
||||
streamID uint32
|
||||
hf []hpack.HeaderField
|
||||
endStream bool // Valid on server side.
|
||||
initStream func(uint32) (bool, error) // Used only on the client side.
|
||||
endStream bool // Valid on server side.
|
||||
initStream func(uint32) error // Used only on the client side.
|
||||
onWrite func()
|
||||
wq *writeQuota // write quota for the stream created.
|
||||
cleanup *cleanupStream // Valid on the server side.
|
||||
|
@ -637,21 +637,17 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
|||
|
||||
func (l *loopyWriter) originateStream(str *outStream) error {
|
||||
hdr := str.itl.dequeue().(*headerFrame)
|
||||
sendPing, err := hdr.initStream(str.id)
|
||||
if err != nil {
|
||||
if err := hdr.initStream(str.id); err != nil {
|
||||
if err == ErrConnClosing {
|
||||
return err
|
||||
}
|
||||
// Other errors(errStreamDrain) need not close transport.
|
||||
return nil
|
||||
}
|
||||
if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
||||
if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
||||
return err
|
||||
}
|
||||
l.estdStreams[str.id] = str
|
||||
if sendPing {
|
||||
return l.pingHandler(&ping{data: [8]byte{}})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -227,7 +227,9 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
|||
|
||||
if err == nil { // transport has not been closed
|
||||
if ht.stats != nil {
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
}
|
||||
ht.Close()
|
||||
|
@ -289,7 +291,9 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
|||
|
||||
if err == nil {
|
||||
if ht.stats != nil {
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
|
||||
Header: md.Copy(),
|
||||
})
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
@ -334,7 +338,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
|||
Addr: ht.RemoteAddr(),
|
||||
}
|
||||
if req.TLS != nil {
|
||||
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
|
||||
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
|
||||
}
|
||||
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
||||
s.ctx = peer.NewContext(ctx, pr)
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
|
@ -44,8 +45,14 @@ import (
|
|||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// clientConnectionCounter counts the number of connections a client has
|
||||
// initiated (equal to the number of http2Clients created). Must be accessed
|
||||
// atomically.
|
||||
var clientConnectionCounter uint64
|
||||
|
||||
// http2Client implements the ClientTransport interface with HTTP2.
|
||||
type http2Client struct {
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
|
||||
|
@ -62,8 +69,6 @@ type http2Client struct {
|
|||
// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
|
||||
// that the server sent GoAway on this transport.
|
||||
goAway chan struct{}
|
||||
// awakenKeepalive is used to wake up keepalive when after it has gone dormant.
|
||||
awakenKeepalive chan struct{}
|
||||
|
||||
framer *framer
|
||||
// controlBuf delivers all the control related tasks (e.g., window
|
||||
|
@ -77,9 +82,6 @@ type http2Client struct {
|
|||
|
||||
perRPCCreds []credentials.PerRPCCredentials
|
||||
|
||||
// Boolean to keep track of reading activity on transport.
|
||||
// 1 is true and 0 is false.
|
||||
activity uint32 // Accessed atomically.
|
||||
kp keepalive.ClientParameters
|
||||
keepaliveEnabled bool
|
||||
|
||||
|
@ -110,6 +112,16 @@ type http2Client struct {
|
|||
// goAwayReason records the http2.ErrCode and debug data received with the
|
||||
// GoAway frame.
|
||||
goAwayReason GoAwayReason
|
||||
// A condition variable used to signal when the keepalive goroutine should
|
||||
// go dormant. The condition for dormancy is based on the number of active
|
||||
// streams and the `PermitWithoutStream` keepalive client parameter. And
|
||||
// since the number of active streams is guarded by the above mutex, we use
|
||||
// the same for this condition variable as well.
|
||||
kpDormancyCond *sync.Cond
|
||||
// A boolean to track whether the keepalive goroutine is dormant or not.
|
||||
// This is checked before attempting to signal the above condition
|
||||
// variable.
|
||||
kpDormant bool
|
||||
|
||||
// Fields below are for channelz metric collection.
|
||||
channelzID int64 // channelz unique identification number
|
||||
|
@ -119,6 +131,8 @@ type http2Client struct {
|
|||
onClose func()
|
||||
|
||||
bufferPool *bufferPool
|
||||
|
||||
connectionID uint64
|
||||
}
|
||||
|
||||
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
|
||||
|
@ -232,7 +246,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
|||
readerDone: make(chan struct{}),
|
||||
writerDone: make(chan struct{}),
|
||||
goAway: make(chan struct{}),
|
||||
awakenKeepalive: make(chan struct{}, 1),
|
||||
framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
|
||||
fc: &trInFlow{limit: uint32(icwz)},
|
||||
scheme: scheme,
|
||||
|
@ -264,9 +277,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
|||
updateFlowControl: t.updateFlowControl,
|
||||
}
|
||||
}
|
||||
// Make sure awakenKeepalive can't be written upon.
|
||||
// keepalive routine will make it writable, if need be.
|
||||
t.awakenKeepalive <- struct{}{}
|
||||
if t.statsHandler != nil {
|
||||
t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
RemoteAddr: t.remoteAddr,
|
||||
|
@ -281,6 +291,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
|||
t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
|
||||
}
|
||||
if t.keepaliveEnabled {
|
||||
t.kpDormancyCond = sync.NewCond(&t.mu)
|
||||
go t.keepalive()
|
||||
}
|
||||
// Start the reader goroutine for incoming message. Each transport has
|
||||
|
@ -325,6 +336,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
|||
}
|
||||
}
|
||||
|
||||
t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1)
|
||||
|
||||
if err := t.framer.writer.Flush(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -347,6 +360,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
|||
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
|
||||
s := &Stream{
|
||||
ct: t,
|
||||
done: make(chan struct{}),
|
||||
method: callHdr.Method,
|
||||
sendCompress: callHdr.SendCompress,
|
||||
|
@ -380,23 +394,24 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
|||
}
|
||||
|
||||
func (t *http2Client) getPeer() *peer.Peer {
|
||||
pr := &peer.Peer{
|
||||
Addr: t.remoteAddr,
|
||||
return &peer.Peer{
|
||||
Addr: t.remoteAddr,
|
||||
AuthInfo: t.authInfo,
|
||||
}
|
||||
// Attach Auth info if there is any.
|
||||
if t.authInfo != nil {
|
||||
pr.AuthInfo = t.authInfo
|
||||
}
|
||||
return pr
|
||||
}
|
||||
|
||||
func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
|
||||
aud := t.createAudience(callHdr)
|
||||
authData, err := t.getTrAuthData(ctx, aud)
|
||||
ri := credentials.RequestInfo{
|
||||
Method: callHdr.Method,
|
||||
AuthInfo: t.authInfo,
|
||||
}
|
||||
ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri)
|
||||
authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
callAuthData, err := t.getCallAuthData(ctx, aud, callHdr)
|
||||
callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -419,6 +434,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
|||
|
||||
if callHdr.SendCompress != "" {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress})
|
||||
}
|
||||
if dl, ok := ctx.Deadline(); ok {
|
||||
// Send out timeout regardless its value. The server can detect timeout context by itself.
|
||||
|
@ -564,7 +580,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
hdr := &headerFrame{
|
||||
hf: headerFields,
|
||||
endStream: false,
|
||||
initStream: func(id uint32) (bool, error) {
|
||||
initStream: func(id uint32) error {
|
||||
t.mu.Lock()
|
||||
if state := t.state; state != reachable {
|
||||
t.mu.Unlock()
|
||||
|
@ -574,29 +590,19 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
err = ErrConnClosing
|
||||
}
|
||||
cleanup(err)
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
t.activeStreams[id] = s
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.streamsStarted, 1)
|
||||
atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
|
||||
}
|
||||
var sendPing bool
|
||||
// If the number of active streams change from 0 to 1, then check if keepalive
|
||||
// has gone dormant. If so, wake it up.
|
||||
if len(t.activeStreams) == 1 && t.keepaliveEnabled {
|
||||
select {
|
||||
case t.awakenKeepalive <- struct{}{}:
|
||||
sendPing = true
|
||||
// Fill the awakenKeepalive channel again as this channel must be
|
||||
// kept non-writable except at the point that the keepalive()
|
||||
// goroutine is waiting either to be awaken or shutdown.
|
||||
t.awakenKeepalive <- struct{}{}
|
||||
default:
|
||||
}
|
||||
// If the keepalive goroutine has gone dormant, wake it up.
|
||||
if t.kpDormant {
|
||||
t.kpDormancyCond.Signal()
|
||||
}
|
||||
t.mu.Unlock()
|
||||
return sendPing, nil
|
||||
return nil
|
||||
},
|
||||
onOrphaned: cleanup,
|
||||
wq: s.wq,
|
||||
|
@ -674,12 +680,19 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
}
|
||||
}
|
||||
if t.statsHandler != nil {
|
||||
header, ok := metadata.FromOutgoingContext(ctx)
|
||||
if ok {
|
||||
header.Set("user-agent", t.userAgent)
|
||||
} else {
|
||||
header = metadata.Pairs("user-agent", t.userAgent)
|
||||
}
|
||||
outHeader := &stats.OutHeader{
|
||||
Client: true,
|
||||
FullMethod: callHdr.Method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: callHdr.SendCompress,
|
||||
Header: header,
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, outHeader)
|
||||
}
|
||||
|
@ -778,6 +791,11 @@ func (t *http2Client) Close() error {
|
|||
t.state = closing
|
||||
streams := t.activeStreams
|
||||
t.activeStreams = nil
|
||||
if t.kpDormant {
|
||||
// If the keepalive goroutine is blocked on this condition variable, we
|
||||
// should unblock it so that the goroutine eventually exits.
|
||||
t.kpDormancyCond.Signal()
|
||||
}
|
||||
t.mu.Unlock()
|
||||
t.controlBuf.finish()
|
||||
t.cancel()
|
||||
|
@ -853,11 +871,11 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||
return t.controlBuf.put(df)
|
||||
}
|
||||
|
||||
func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
|
||||
func (t *http2Client) getStream(f http2.Frame) *Stream {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
s, ok := t.activeStreams[f.Header().StreamID]
|
||||
return s, ok
|
||||
s := t.activeStreams[f.Header().StreamID]
|
||||
t.mu.Unlock()
|
||||
return s
|
||||
}
|
||||
|
||||
// adjustWindow sends out extra window update over the initial window size
|
||||
|
@ -937,8 +955,8 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
|
|||
t.controlBuf.put(bdpPing)
|
||||
}
|
||||
// Select the right stream to dispatch.
|
||||
s, ok := t.getStream(f)
|
||||
if !ok {
|
||||
s := t.getStream(f)
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
if size > 0 {
|
||||
|
@ -969,8 +987,8 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
|
|||
}
|
||||
|
||||
func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
||||
s, ok := t.getStream(f)
|
||||
if !ok {
|
||||
s := t.getStream(f)
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
if f.ErrCode == http2.ErrCodeRefusedStream {
|
||||
|
@ -1147,8 +1165,8 @@ func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
|
|||
|
||||
// operateHeaders takes action on the decoded headers.
|
||||
func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
s, ok := t.getStream(frame)
|
||||
if !ok {
|
||||
s := t.getStream(frame)
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
endStream := frame.StreamEnded()
|
||||
|
@ -1177,12 +1195,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
|||
inHeader := &stats.InHeader{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: s.header.Copy(),
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, inHeader)
|
||||
} else {
|
||||
inTrailer := &stats.InTrailer{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Trailer: s.trailer.Copy(),
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, inTrailer)
|
||||
}
|
||||
|
@ -1191,6 +1211,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
|||
|
||||
// If headerChan hasn't been closed yet
|
||||
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
|
||||
s.headerValid = true
|
||||
if !endStream {
|
||||
// HEADERS frame block carries a Response-Headers.
|
||||
isHeader = true
|
||||
|
@ -1233,7 +1254,7 @@ func (t *http2Client) reader() {
|
|||
}
|
||||
t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
|
||||
if t.keepaliveEnabled {
|
||||
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
}
|
||||
sf, ok := frame.(*http2.SettingsFrame)
|
||||
if !ok {
|
||||
|
@ -1248,7 +1269,7 @@ func (t *http2Client) reader() {
|
|||
t.controlBuf.throttle()
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
if t.keepaliveEnabled {
|
||||
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
}
|
||||
if err != nil {
|
||||
// Abort an active stream if the http2.Framer returns a
|
||||
|
@ -1292,56 +1313,83 @@ func (t *http2Client) reader() {
|
|||
}
|
||||
}
|
||||
|
||||
func minTime(a, b time.Duration) time.Duration {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
|
||||
func (t *http2Client) keepalive() {
|
||||
p := &ping{data: [8]byte{}}
|
||||
// True iff a ping has been sent, and no data has been received since then.
|
||||
outstandingPing := false
|
||||
// Amount of time remaining before which we should receive an ACK for the
|
||||
// last sent ping.
|
||||
timeoutLeft := time.Duration(0)
|
||||
// Records the last value of t.lastRead before we go block on the timer.
|
||||
// This is required to check for read activity since then.
|
||||
prevNano := time.Now().UnixNano()
|
||||
timer := time.NewTimer(t.kp.Time)
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
|
||||
timer.Reset(t.kp.Time)
|
||||
lastRead := atomic.LoadInt64(&t.lastRead)
|
||||
if lastRead > prevNano {
|
||||
// There has been read activity since the last time we were here.
|
||||
outstandingPing = false
|
||||
// Next timer should fire at kp.Time seconds from lastRead time.
|
||||
timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
|
||||
prevNano = lastRead
|
||||
continue
|
||||
}
|
||||
// Check if keepalive should go dormant.
|
||||
if outstandingPing && timeoutLeft <= 0 {
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
t.mu.Lock()
|
||||
if t.state == closing {
|
||||
// If the transport is closing, we should exit from the
|
||||
// keepalive goroutine here. If not, we could have a race
|
||||
// between the call to Signal() from Close() and the call to
|
||||
// Wait() here, whereby the keepalive goroutine ends up
|
||||
// blocking on the condition variable which will never be
|
||||
// signalled again.
|
||||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
|
||||
// Make awakenKeepalive writable.
|
||||
<-t.awakenKeepalive
|
||||
t.mu.Unlock()
|
||||
select {
|
||||
case <-t.awakenKeepalive:
|
||||
// If the control gets here a ping has been sent
|
||||
// need to reset the timer with keepalive.Timeout.
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
}
|
||||
} else {
|
||||
t.mu.Unlock()
|
||||
// If a ping was sent out previously (because there were active
|
||||
// streams at that point) which wasn't acked and its timeout
|
||||
// hadn't fired, but we got here and are about to go dormant,
|
||||
// we should make sure that we unconditionally send a ping once
|
||||
// we awaken.
|
||||
outstandingPing = false
|
||||
t.kpDormant = true
|
||||
t.kpDormancyCond.Wait()
|
||||
}
|
||||
t.kpDormant = false
|
||||
t.mu.Unlock()
|
||||
|
||||
// We get here either because we were dormant and a new stream was
|
||||
// created which unblocked the Wait() call, or because the
|
||||
// keepalive timer expired. In both cases, we need to send a ping.
|
||||
if !outstandingPing {
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||
}
|
||||
// Send ping.
|
||||
t.controlBuf.put(p)
|
||||
timeoutLeft = t.kp.Timeout
|
||||
outstandingPing = true
|
||||
}
|
||||
|
||||
// By the time control gets here a ping has been sent one way or the other.
|
||||
timer.Reset(t.kp.Timeout)
|
||||
select {
|
||||
case <-timer.C:
|
||||
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
|
||||
timer.Reset(t.kp.Time)
|
||||
continue
|
||||
}
|
||||
infof("transport: closing client transport due to idleness.")
|
||||
t.Close()
|
||||
return
|
||||
case <-t.ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
return
|
||||
}
|
||||
// The amount of time to sleep here is the minimum of kp.Time and
|
||||
// timeoutLeft. This will ensure that we wait only for kp.Time
|
||||
// before sending out the next ping (for cases where the ping is
|
||||
// acked).
|
||||
sleepDuration := minTime(t.kp.Time, timeoutLeft)
|
||||
timeoutLeft -= sleepDuration
|
||||
timer.Reset(sleepDuration)
|
||||
case <-t.ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
|
|
|
@ -62,11 +62,15 @@ var (
|
|||
statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status)
|
||||
)
|
||||
|
||||
// serverConnectionCounter counts the number of connections a server has seen
|
||||
// (equal to the number of http2Servers created). Must be accessed atomically.
|
||||
var serverConnectionCounter uint64
|
||||
|
||||
// http2Server implements the ServerTransport interface with HTTP2.
|
||||
type http2Server struct {
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
ctx context.Context
|
||||
ctxDone <-chan struct{} // Cache the context.Done() chan
|
||||
cancel context.CancelFunc
|
||||
done chan struct{}
|
||||
conn net.Conn
|
||||
loopy *loopyWriter
|
||||
readerDone chan struct{} // sync point to enable testing.
|
||||
|
@ -84,12 +88,8 @@ type http2Server struct {
|
|||
controlBuf *controlBuffer
|
||||
fc *trInFlow
|
||||
stats stats.Handler
|
||||
// Flag to keep track of reading activity on transport.
|
||||
// 1 is true and 0 is false.
|
||||
activity uint32 // Accessed atomically.
|
||||
// Keepalive and max-age parameters for the server.
|
||||
kp keepalive.ServerParameters
|
||||
|
||||
// Keepalive enforcement policy.
|
||||
kep keepalive.EnforcementPolicy
|
||||
// The time instance last ping was received.
|
||||
|
@ -125,6 +125,8 @@ type http2Server struct {
|
|||
channelzID int64 // channelz unique identification number
|
||||
czData *channelzData
|
||||
bufferPool *bufferPool
|
||||
|
||||
connectionID uint64
|
||||
}
|
||||
|
||||
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
|
||||
|
@ -175,6 +177,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
|||
Val: *config.MaxHeaderListSize,
|
||||
})
|
||||
}
|
||||
if config.HeaderTableSize != nil {
|
||||
isettings = append(isettings, http2.Setting{
|
||||
ID: http2.SettingHeaderTableSize,
|
||||
Val: *config.HeaderTableSize,
|
||||
})
|
||||
}
|
||||
if err := framer.fr.WriteSettings(isettings...); err != nil {
|
||||
return nil, connectionErrorf(false, err, "transport: %v", err)
|
||||
}
|
||||
|
@ -206,11 +214,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
|||
if kep.MinTime == 0 {
|
||||
kep.MinTime = defaultKeepalivePolicyMinTime
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
done := make(chan struct{})
|
||||
t := &http2Server{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
ctxDone: ctx.Done(),
|
||||
ctx: context.Background(),
|
||||
done: done,
|
||||
conn: conn,
|
||||
remoteAddr: conn.RemoteAddr(),
|
||||
localAddr: conn.LocalAddr(),
|
||||
|
@ -231,7 +238,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
|||
czData: new(channelzData),
|
||||
bufferPool: newBufferPool(),
|
||||
}
|
||||
t.controlBuf = newControlBuffer(t.ctxDone)
|
||||
t.controlBuf = newControlBuffer(t.done)
|
||||
if dynamicWindow {
|
||||
t.bdpEst = &bdpEstimator{
|
||||
bdp: initialWindowSize,
|
||||
|
@ -249,6 +256,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
|||
if channelz.IsOn() {
|
||||
t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
|
||||
}
|
||||
|
||||
t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
|
||||
|
||||
t.framer.writer.Flush()
|
||||
|
||||
defer func() {
|
||||
|
@ -273,7 +283,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
|||
if err != nil {
|
||||
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
|
||||
}
|
||||
atomic.StoreUint32(&t.activity, 1)
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
sf, ok := frame.(*http2.SettingsFrame)
|
||||
if !ok {
|
||||
return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
|
||||
|
@ -362,12 +372,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
rstCode: http2.ErrCodeRefusedStream,
|
||||
onWrite: func() {},
|
||||
})
|
||||
s.cancel()
|
||||
return false
|
||||
}
|
||||
}
|
||||
t.mu.Lock()
|
||||
if t.state != reachable {
|
||||
t.mu.Unlock()
|
||||
s.cancel()
|
||||
return false
|
||||
}
|
||||
if uint32(len(t.activeStreams)) >= t.maxStreams {
|
||||
|
@ -378,12 +390,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
rstCode: http2.ErrCodeRefusedStream,
|
||||
onWrite: func() {},
|
||||
})
|
||||
s.cancel()
|
||||
return false
|
||||
}
|
||||
if streamID%2 != 1 || streamID <= t.maxStreamID {
|
||||
t.mu.Unlock()
|
||||
// illegal gRPC stream id.
|
||||
errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
|
||||
s.cancel()
|
||||
return true
|
||||
}
|
||||
t.maxStreamID = streamID
|
||||
|
@ -408,6 +422,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
LocalAddr: t.localAddr,
|
||||
Compression: s.recvCompress,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: metadata.MD(state.data.mdata).Copy(),
|
||||
}
|
||||
t.stats.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
|
@ -441,7 +456,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
|||
for {
|
||||
t.controlBuf.throttle()
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
atomic.StoreUint32(&t.activity, 1)
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
if err != nil {
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
|
||||
|
@ -749,7 +764,7 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// WriteHeader sends the header metedata md back to the client.
|
||||
// WriteHeader sends the header metadata md back to the client.
|
||||
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
if s.updateHeaderSent() || s.getState() == streamDone {
|
||||
return ErrIllegalHeaderWrite
|
||||
|
@ -800,7 +815,9 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
|||
if t.stats != nil {
|
||||
// Note: WireLength is not set in outHeader.
|
||||
// TODO(mmukhi): Revisit this later, if needed.
|
||||
outHeader := &stats.OutHeader{}
|
||||
outHeader := &stats.OutHeader{
|
||||
Header: s.header.Copy(),
|
||||
}
|
||||
t.stats.HandleRPC(s.Context(), outHeader)
|
||||
}
|
||||
return nil
|
||||
|
@ -863,7 +880,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
|||
rst := s.getState() == streamActive
|
||||
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
|
||||
if t.stats != nil {
|
||||
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
||||
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -885,7 +904,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||
// TODO(mmukhi, dfawley): Should the server write also return io.EOF?
|
||||
s.cancel()
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
case <-t.done:
|
||||
return ErrConnClosing
|
||||
default:
|
||||
}
|
||||
|
@ -907,7 +926,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||
}
|
||||
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
case <-t.done:
|
||||
return ErrConnClosing
|
||||
default:
|
||||
}
|
||||
|
@ -924,32 +943,35 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||
// after an additional duration of keepalive.Timeout.
|
||||
func (t *http2Server) keepalive() {
|
||||
p := &ping{}
|
||||
var pingSent bool
|
||||
maxIdle := time.NewTimer(t.kp.MaxConnectionIdle)
|
||||
maxAge := time.NewTimer(t.kp.MaxConnectionAge)
|
||||
keepalive := time.NewTimer(t.kp.Time)
|
||||
// NOTE: All exit paths of this function should reset their
|
||||
// respective timers. A failure to do so will cause the
|
||||
// following clean-up to deadlock and eventually leak.
|
||||
// True iff a ping has been sent, and no data has been received since then.
|
||||
outstandingPing := false
|
||||
// Amount of time remaining before which we should receive an ACK for the
|
||||
// last sent ping.
|
||||
kpTimeoutLeft := time.Duration(0)
|
||||
// Records the last value of t.lastRead before we go block on the timer.
|
||||
// This is required to check for read activity since then.
|
||||
prevNano := time.Now().UnixNano()
|
||||
// Initialize the different timers to their default values.
|
||||
idleTimer := time.NewTimer(t.kp.MaxConnectionIdle)
|
||||
ageTimer := time.NewTimer(t.kp.MaxConnectionAge)
|
||||
kpTimer := time.NewTimer(t.kp.Time)
|
||||
defer func() {
|
||||
if !maxIdle.Stop() {
|
||||
<-maxIdle.C
|
||||
}
|
||||
if !maxAge.Stop() {
|
||||
<-maxAge.C
|
||||
}
|
||||
if !keepalive.Stop() {
|
||||
<-keepalive.C
|
||||
}
|
||||
// We need to drain the underlying channel in these timers after a call
|
||||
// to Stop(), only if we are interested in resetting them. Clearly we
|
||||
// are not interested in resetting them here.
|
||||
idleTimer.Stop()
|
||||
ageTimer.Stop()
|
||||
kpTimer.Stop()
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-maxIdle.C:
|
||||
case <-idleTimer.C:
|
||||
t.mu.Lock()
|
||||
idle := t.idle
|
||||
if idle.IsZero() { // The connection is non-idle.
|
||||
t.mu.Unlock()
|
||||
maxIdle.Reset(t.kp.MaxConnectionIdle)
|
||||
idleTimer.Reset(t.kp.MaxConnectionIdle)
|
||||
continue
|
||||
}
|
||||
val := t.kp.MaxConnectionIdle - time.Since(idle)
|
||||
|
@ -958,44 +980,52 @@ func (t *http2Server) keepalive() {
|
|||
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
|
||||
// Gracefully close the connection.
|
||||
t.drain(http2.ErrCodeNo, []byte{})
|
||||
// Resetting the timer so that the clean-up doesn't deadlock.
|
||||
maxIdle.Reset(infinity)
|
||||
return
|
||||
}
|
||||
maxIdle.Reset(val)
|
||||
case <-maxAge.C:
|
||||
idleTimer.Reset(val)
|
||||
case <-ageTimer.C:
|
||||
t.drain(http2.ErrCodeNo, []byte{})
|
||||
maxAge.Reset(t.kp.MaxConnectionAgeGrace)
|
||||
ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
|
||||
select {
|
||||
case <-maxAge.C:
|
||||
case <-ageTimer.C:
|
||||
// Close the connection after grace period.
|
||||
infof("transport: closing server transport due to maximum connection age.")
|
||||
t.Close()
|
||||
// Resetting the timer so that the clean-up doesn't deadlock.
|
||||
maxAge.Reset(infinity)
|
||||
case <-t.ctx.Done():
|
||||
case <-t.done:
|
||||
}
|
||||
return
|
||||
case <-keepalive.C:
|
||||
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
|
||||
pingSent = false
|
||||
keepalive.Reset(t.kp.Time)
|
||||
case <-kpTimer.C:
|
||||
lastRead := atomic.LoadInt64(&t.lastRead)
|
||||
if lastRead > prevNano {
|
||||
// There has been read activity since the last time we were
|
||||
// here. Setup the timer to fire at kp.Time seconds from
|
||||
// lastRead time and continue.
|
||||
outstandingPing = false
|
||||
kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
|
||||
prevNano = lastRead
|
||||
continue
|
||||
}
|
||||
if pingSent {
|
||||
if outstandingPing && kpTimeoutLeft <= 0 {
|
||||
infof("transport: closing server transport due to idleness.")
|
||||
t.Close()
|
||||
// Resetting the timer so that the clean-up doesn't deadlock.
|
||||
keepalive.Reset(infinity)
|
||||
return
|
||||
}
|
||||
pingSent = true
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||
if !outstandingPing {
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||
}
|
||||
t.controlBuf.put(p)
|
||||
kpTimeoutLeft = t.kp.Timeout
|
||||
outstandingPing = true
|
||||
}
|
||||
t.controlBuf.put(p)
|
||||
keepalive.Reset(t.kp.Timeout)
|
||||
case <-t.ctx.Done():
|
||||
// The amount of time to sleep here is the minimum of kp.Time and
|
||||
// timeoutLeft. This will ensure that we wait only for kp.Time
|
||||
// before sending out the next ping (for cases where the ping is
|
||||
// acked).
|
||||
sleepDuration := minTime(t.kp.Time, kpTimeoutLeft)
|
||||
kpTimeoutLeft -= sleepDuration
|
||||
kpTimer.Reset(sleepDuration)
|
||||
case <-t.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -1015,7 +1045,7 @@ func (t *http2Server) Close() error {
|
|||
t.activeStreams = nil
|
||||
t.mu.Unlock()
|
||||
t.controlBuf.finish()
|
||||
t.cancel()
|
||||
close(t.done)
|
||||
err := t.conn.Close()
|
||||
if channelz.IsOn() {
|
||||
channelz.RemoveEntry(t.channelzID)
|
||||
|
@ -1155,7 +1185,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
|||
select {
|
||||
case <-t.drainChan:
|
||||
case <-timer.C:
|
||||
case <-t.ctx.Done():
|
||||
case <-t.done:
|
||||
return
|
||||
}
|
||||
t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData})
|
||||
|
@ -1205,7 +1235,7 @@ func (t *http2Server) getOutFlowWindow() int64 {
|
|||
select {
|
||||
case sz := <-resp:
|
||||
return int64(sz)
|
||||
case <-t.ctxDone:
|
||||
case <-t.done:
|
||||
return -1
|
||||
case <-timer.C:
|
||||
return -2
|
||||
|
|
|
@ -73,10 +73,11 @@ type recvMsg struct {
|
|||
}
|
||||
|
||||
// recvBuffer is an unbounded channel of recvMsg structs.
|
||||
// Note recvBuffer differs from controlBuffer only in that recvBuffer
|
||||
// holds a channel of only recvMsg structs instead of objects implementing "item" interface.
|
||||
// recvBuffer is written to much more often than
|
||||
// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put"
|
||||
//
|
||||
// Note: recvBuffer differs from buffer.Unbounded only in the fact that it
|
||||
// holds a channel of recvMsg structs instead of objects implementing "item"
|
||||
// interface. recvBuffer is written to much more often and using strict recvMsg
|
||||
// structs helps avoid allocation in "recvBuffer.put"
|
||||
type recvBuffer struct {
|
||||
c chan recvMsg
|
||||
mu sync.Mutex
|
||||
|
@ -233,6 +234,7 @@ const (
|
|||
type Stream struct {
|
||||
id uint32
|
||||
st ServerTransport // nil for client side Stream
|
||||
ct *http2Client // nil for server side Stream
|
||||
ctx context.Context // the associated context of the stream
|
||||
cancel context.CancelFunc // always nil for client side Stream
|
||||
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
|
||||
|
@ -251,6 +253,10 @@ type Stream struct {
|
|||
|
||||
headerChan chan struct{} // closed to indicate the end of header metadata.
|
||||
headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
|
||||
// headerValid indicates whether a valid header was received. Only
|
||||
// meaningful after headerChan is closed (always call waitOnHeader() before
|
||||
// reading its value). Not valid on server side.
|
||||
headerValid bool
|
||||
|
||||
// hdrMu protects header and trailer metadata on the server-side.
|
||||
hdrMu sync.Mutex
|
||||
|
@ -303,34 +309,28 @@ func (s *Stream) getState() streamState {
|
|||
return streamState(atomic.LoadUint32((*uint32)(&s.state)))
|
||||
}
|
||||
|
||||
func (s *Stream) waitOnHeader() error {
|
||||
func (s *Stream) waitOnHeader() {
|
||||
if s.headerChan == nil {
|
||||
// On the server headerChan is always nil since a stream originates
|
||||
// only after having received headers.
|
||||
return nil
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
// We prefer success over failure when reading messages because we delay
|
||||
// context error in stream.Read(). To keep behavior consistent, we also
|
||||
// prefer success here.
|
||||
select {
|
||||
case <-s.headerChan:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
return ContextErr(s.ctx.Err())
|
||||
// Close the stream to prevent headers/trailers from changing after
|
||||
// this function returns.
|
||||
s.ct.CloseStream(s, ContextErr(s.ctx.Err()))
|
||||
// headerChan could possibly not be closed yet if closeStream raced
|
||||
// with operateHeaders; wait until it is closed explicitly here.
|
||||
<-s.headerChan
|
||||
case <-s.headerChan:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RecvCompress returns the compression algorithm applied to the inbound
|
||||
// message. It is empty string if there is no compression applied.
|
||||
func (s *Stream) RecvCompress() string {
|
||||
if err := s.waitOnHeader(); err != nil {
|
||||
return ""
|
||||
}
|
||||
s.waitOnHeader()
|
||||
return s.recvCompress
|
||||
}
|
||||
|
||||
|
@ -351,36 +351,27 @@ func (s *Stream) Done() <-chan struct{} {
|
|||
// available. It blocks until i) the metadata is ready or ii) there is no header
|
||||
// metadata or iii) the stream is canceled/expired.
|
||||
//
|
||||
// On server side, it returns the out header after t.WriteHeader is called.
|
||||
// On server side, it returns the out header after t.WriteHeader is called. It
|
||||
// does not block and must not be called until after WriteHeader.
|
||||
func (s *Stream) Header() (metadata.MD, error) {
|
||||
if s.headerChan == nil && s.header != nil {
|
||||
if s.headerChan == nil {
|
||||
// On server side, return the header in stream. It will be the out
|
||||
// header after t.WriteHeader is called.
|
||||
return s.header.Copy(), nil
|
||||
}
|
||||
err := s.waitOnHeader()
|
||||
// Even if the stream is closed, header is returned if available.
|
||||
select {
|
||||
case <-s.headerChan:
|
||||
if s.header == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return s.header.Copy(), nil
|
||||
default:
|
||||
s.waitOnHeader()
|
||||
if !s.headerValid {
|
||||
return nil, s.status.Err()
|
||||
}
|
||||
return nil, err
|
||||
return s.header.Copy(), nil
|
||||
}
|
||||
|
||||
// TrailersOnly blocks until a header or trailers-only frame is received and
|
||||
// then returns true if the stream was trailers-only. If the stream ends
|
||||
// before headers are received, returns true, nil. If a context error happens
|
||||
// first, returns it as a status error. Client-side only.
|
||||
func (s *Stream) TrailersOnly() (bool, error) {
|
||||
err := s.waitOnHeader()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return s.noHeaders, nil
|
||||
// before headers are received, returns true, nil. Client-side only.
|
||||
func (s *Stream) TrailersOnly() bool {
|
||||
s.waitOnHeader()
|
||||
return s.noHeaders
|
||||
}
|
||||
|
||||
// Trailer returns the cached trailer metedata. Note that if it is not called
|
||||
|
@ -534,6 +525,7 @@ type ServerConfig struct {
|
|||
ReadBufferSize int
|
||||
ChannelzParentID int64
|
||||
MaxHeaderListSize *uint32
|
||||
HeaderTableSize *uint32
|
||||
}
|
||||
|
||||
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
||||
|
|
|
@ -20,6 +20,7 @@ package grpc
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
|
@ -31,49 +32,78 @@ import (
|
|||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// v2PickerWrapper wraps a balancer.Picker while providing the
|
||||
// balancer.V2Picker API. It requires a pickerWrapper to generate errors
|
||||
// including the latest connectionError. To be deleted when balancer.Picker is
|
||||
// updated to the balancer.V2Picker API.
|
||||
type v2PickerWrapper struct {
|
||||
picker balancer.Picker
|
||||
connErr *connErr
|
||||
}
|
||||
|
||||
func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
||||
sc, done, err := v.picker.Pick(info.Ctx, info)
|
||||
if err != nil {
|
||||
if err == balancer.ErrTransientFailure {
|
||||
return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError()))
|
||||
}
|
||||
return balancer.PickResult{}, err
|
||||
}
|
||||
return balancer.PickResult{SubConn: sc, Done: done}, nil
|
||||
}
|
||||
|
||||
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
|
||||
// actions and unblock when there's a picker update.
|
||||
type pickerWrapper struct {
|
||||
mu sync.Mutex
|
||||
done bool
|
||||
blockingCh chan struct{}
|
||||
picker balancer.Picker
|
||||
picker balancer.V2Picker
|
||||
|
||||
// The latest connection happened.
|
||||
connErrMu sync.Mutex
|
||||
connErr error
|
||||
// The latest connection error. TODO: remove when V1 picker is deprecated;
|
||||
// balancer should be responsible for providing the error.
|
||||
*connErr
|
||||
}
|
||||
|
||||
func newPickerWrapper() *pickerWrapper {
|
||||
bp := &pickerWrapper{blockingCh: make(chan struct{})}
|
||||
return bp
|
||||
type connErr struct {
|
||||
mu sync.Mutex
|
||||
err error
|
||||
}
|
||||
|
||||
func (bp *pickerWrapper) updateConnectionError(err error) {
|
||||
bp.connErrMu.Lock()
|
||||
bp.connErr = err
|
||||
bp.connErrMu.Unlock()
|
||||
func (c *connErr) updateConnectionError(err error) {
|
||||
c.mu.Lock()
|
||||
c.err = err
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (bp *pickerWrapper) connectionError() error {
|
||||
bp.connErrMu.Lock()
|
||||
err := bp.connErr
|
||||
bp.connErrMu.Unlock()
|
||||
func (c *connErr) connectionError() error {
|
||||
c.mu.Lock()
|
||||
err := c.err
|
||||
c.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func newPickerWrapper() *pickerWrapper {
|
||||
return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}}
|
||||
}
|
||||
|
||||
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
||||
func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||
bp.mu.Lock()
|
||||
if bp.done {
|
||||
bp.mu.Unlock()
|
||||
func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||
pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr})
|
||||
}
|
||||
|
||||
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
||||
func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) {
|
||||
pw.mu.Lock()
|
||||
if pw.done {
|
||||
pw.mu.Unlock()
|
||||
return
|
||||
}
|
||||
bp.picker = p
|
||||
// bp.blockingCh should never be nil.
|
||||
close(bp.blockingCh)
|
||||
bp.blockingCh = make(chan struct{})
|
||||
bp.mu.Unlock()
|
||||
pw.picker = p
|
||||
// pw.blockingCh should never be nil.
|
||||
close(pw.blockingCh)
|
||||
pw.blockingCh = make(chan struct{})
|
||||
pw.mu.Unlock()
|
||||
}
|
||||
|
||||
func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
|
||||
|
@ -100,83 +130,85 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f
|
|||
// - the current picker returns other errors and failfast is false.
|
||||
// - the subConn returned by the current picker is not READY
|
||||
// When one of these situations happens, pick blocks until the picker gets updated.
|
||||
func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||
func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||
var ch chan struct{}
|
||||
|
||||
var lastPickErr error
|
||||
for {
|
||||
bp.mu.Lock()
|
||||
if bp.done {
|
||||
bp.mu.Unlock()
|
||||
pw.mu.Lock()
|
||||
if pw.done {
|
||||
pw.mu.Unlock()
|
||||
return nil, nil, ErrClientConnClosing
|
||||
}
|
||||
|
||||
if bp.picker == nil {
|
||||
ch = bp.blockingCh
|
||||
if pw.picker == nil {
|
||||
ch = pw.blockingCh
|
||||
}
|
||||
if ch == bp.blockingCh {
|
||||
if ch == pw.blockingCh {
|
||||
// This could happen when either:
|
||||
// - bp.picker is nil (the previous if condition), or
|
||||
// - pw.picker is nil (the previous if condition), or
|
||||
// - has called pick on the current picker.
|
||||
bp.mu.Unlock()
|
||||
pw.mu.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if connectionErr := bp.connectionError(); connectionErr != nil {
|
||||
switch ctx.Err() {
|
||||
case context.DeadlineExceeded:
|
||||
return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr)
|
||||
case context.Canceled:
|
||||
return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr)
|
||||
}
|
||||
var errStr string
|
||||
if lastPickErr != nil {
|
||||
errStr = "latest balancer error: " + lastPickErr.Error()
|
||||
} else if connectionErr := pw.connectionError(); connectionErr != nil {
|
||||
errStr = "latest connection error: " + connectionErr.Error()
|
||||
} else {
|
||||
errStr = ctx.Err().Error()
|
||||
}
|
||||
switch ctx.Err() {
|
||||
case context.DeadlineExceeded:
|
||||
return nil, nil, status.Error(codes.DeadlineExceeded, errStr)
|
||||
case context.Canceled:
|
||||
return nil, nil, status.Error(codes.Canceled, errStr)
|
||||
}
|
||||
return nil, nil, ctx.Err()
|
||||
case <-ch:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch = bp.blockingCh
|
||||
p := bp.picker
|
||||
bp.mu.Unlock()
|
||||
ch = pw.blockingCh
|
||||
p := pw.picker
|
||||
pw.mu.Unlock()
|
||||
|
||||
subConn, done, err := p.Pick(ctx, opts)
|
||||
pickResult, err := p.Pick(info)
|
||||
|
||||
if err != nil {
|
||||
switch err {
|
||||
case balancer.ErrNoSubConnAvailable:
|
||||
if err == balancer.ErrNoSubConnAvailable {
|
||||
continue
|
||||
case balancer.ErrTransientFailure:
|
||||
}
|
||||
if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() {
|
||||
if !failfast {
|
||||
lastPickErr = err
|
||||
continue
|
||||
}
|
||||
return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
|
||||
case context.DeadlineExceeded:
|
||||
return nil, nil, status.Error(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled:
|
||||
return nil, nil, status.Error(codes.Canceled, err.Error())
|
||||
default:
|
||||
if _, ok := status.FromError(err); ok {
|
||||
return nil, nil, err
|
||||
}
|
||||
// err is some other error.
|
||||
return nil, nil, status.Error(codes.Unknown, err.Error())
|
||||
return nil, nil, status.Error(codes.Unavailable, err.Error())
|
||||
}
|
||||
if _, ok := status.FromError(err); ok {
|
||||
return nil, nil, err
|
||||
}
|
||||
// err is some other error.
|
||||
return nil, nil, status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
acw, ok := subConn.(*acBalancerWrapper)
|
||||
acw, ok := pickResult.SubConn.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
grpclog.Error("subconn returned from pick is not *acBalancerWrapper")
|
||||
continue
|
||||
}
|
||||
if t, ok := acw.getAddrConn().getReadyTransport(); ok {
|
||||
if channelz.IsOn() {
|
||||
return t, doneChannelzWrapper(acw, done), nil
|
||||
return t, doneChannelzWrapper(acw, pickResult.Done), nil
|
||||
}
|
||||
return t, done, nil
|
||||
return t, pickResult.Done, nil
|
||||
}
|
||||
if done != nil {
|
||||
if pickResult.Done != nil {
|
||||
// Calling done with nil error, no bytes sent and no bytes received.
|
||||
// DoneInfo with default value works.
|
||||
done(balancer.DoneInfo{})
|
||||
pickResult.Done(balancer.DoneInfo{})
|
||||
}
|
||||
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
|
||||
// If ok == false, ac.state is not READY.
|
||||
|
@ -186,12 +218,12 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
|
|||
}
|
||||
}
|
||||
|
||||
func (bp *pickerWrapper) close() {
|
||||
bp.mu.Lock()
|
||||
defer bp.mu.Unlock()
|
||||
if bp.done {
|
||||
func (pw *pickerWrapper) close() {
|
||||
pw.mu.Lock()
|
||||
defer pw.mu.Unlock()
|
||||
if pw.done {
|
||||
return
|
||||
}
|
||||
bp.done = true
|
||||
close(bp.blockingCh)
|
||||
pw.done = true
|
||||
close(pw.blockingCh)
|
||||
}
|
||||
|
|
|
@ -19,12 +19,14 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// PickFirstBalancerName is the name of the pick_first balancer.
|
||||
|
@ -45,35 +47,67 @@ func (*pickfirstBuilder) Name() string {
|
|||
}
|
||||
|
||||
type pickfirstBalancer struct {
|
||||
cc balancer.ClientConn
|
||||
sc balancer.SubConn
|
||||
state connectivity.State
|
||||
cc balancer.ClientConn
|
||||
sc balancer.SubConn
|
||||
}
|
||||
|
||||
var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2
|
||||
|
||||
func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||
if err != nil {
|
||||
if grpclog.V(2) {
|
||||
grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
|
||||
}
|
||||
b.ResolverError(err)
|
||||
return
|
||||
}
|
||||
if b.sc == nil {
|
||||
b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
//TODO(yuxuanli): why not change the cc state to Idle?
|
||||
if grpclog.V(2) {
|
||||
grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
|
||||
b.sc.Connect()
|
||||
} else {
|
||||
b.sc.UpdateAddresses(addrs)
|
||||
b.sc.Connect()
|
||||
}
|
||||
b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s})
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) ResolverError(err error) {
|
||||
switch b.state {
|
||||
case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting:
|
||||
// Set a failing picker if we don't have a good picker.
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
|
||||
Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}},
|
||||
)
|
||||
}
|
||||
if grpclog.V(2) {
|
||||
grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error {
|
||||
if len(cs.ResolverState.Addresses) == 0 {
|
||||
b.ResolverError(errors.New("produced zero addresses"))
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
if b.sc == nil {
|
||||
var err error
|
||||
b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
if grpclog.V(2) {
|
||||
grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
|
||||
}
|
||||
b.state = connectivity.TransientFailure
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
|
||||
Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}},
|
||||
)
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
b.state = connectivity.Idle
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}})
|
||||
b.sc.Connect()
|
||||
} else {
|
||||
b.sc.UpdateAddresses(cs.ResolverState.Addresses)
|
||||
b.sc.Connect()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
|
||||
if grpclog.V(2) {
|
||||
grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
|
||||
}
|
||||
|
@ -83,18 +117,28 @@ func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s conn
|
|||
}
|
||||
return
|
||||
}
|
||||
if s == connectivity.Shutdown {
|
||||
b.state = s.ConnectivityState
|
||||
if s.ConnectivityState == connectivity.Shutdown {
|
||||
b.sc = nil
|
||||
return
|
||||
}
|
||||
|
||||
switch s {
|
||||
switch s.ConnectivityState {
|
||||
case connectivity.Ready, connectivity.Idle:
|
||||
b.cc.UpdateBalancerState(s, &picker{sc: sc})
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}})
|
||||
case connectivity.Connecting:
|
||||
b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable})
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}})
|
||||
case connectivity.TransientFailure:
|
||||
b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure})
|
||||
err := balancer.ErrTransientFailure
|
||||
// TODO: this can be unconditional after the V1 API is removed, as
|
||||
// SubConnState will always contain a connection error.
|
||||
if s.ConnectionError != nil {
|
||||
err = balancer.TransientFailureError(s.ConnectionError)
|
||||
}
|
||||
b.cc.UpdateState(balancer.State{
|
||||
ConnectivityState: s.ConnectivityState,
|
||||
Picker: &picker{err: err},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -102,15 +146,12 @@ func (b *pickfirstBalancer) Close() {
|
|||
}
|
||||
|
||||
type picker struct {
|
||||
err error
|
||||
sc balancer.SubConn
|
||||
result balancer.PickResult
|
||||
err error
|
||||
}
|
||||
|
||||
func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
if p.err != nil {
|
||||
return nil, nil, p.err
|
||||
}
|
||||
return p.sc, nil, nil
|
||||
func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
||||
return p.result, p.err
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -21,6 +21,11 @@
|
|||
package resolver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"google.golang.org/grpc/attributes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
|
@ -69,12 +74,18 @@ func GetDefaultScheme() string {
|
|||
}
|
||||
|
||||
// AddressType indicates the address type returned by name resolution.
|
||||
//
|
||||
// Deprecated: use Attributes in Address instead.
|
||||
type AddressType uint8
|
||||
|
||||
const (
|
||||
// Backend indicates the address is for a backend server.
|
||||
//
|
||||
// Deprecated: use Attributes in Address instead.
|
||||
Backend AddressType = iota
|
||||
// GRPCLB indicates the address is for a grpclb load balancer.
|
||||
//
|
||||
// Deprecated: use Attributes in Address instead.
|
||||
GRPCLB
|
||||
)
|
||||
|
||||
|
@ -83,33 +94,75 @@ const (
|
|||
type Address struct {
|
||||
// Addr is the server address on which a connection will be established.
|
||||
Addr string
|
||||
// Type is the type of this address.
|
||||
Type AddressType
|
||||
|
||||
// ServerName is the name of this address.
|
||||
// If non-empty, the ServerName is used as the transport certification authority for
|
||||
// the address, instead of the hostname from the Dial target string. In most cases,
|
||||
// this should not be set.
|
||||
//
|
||||
// e.g. if Type is GRPCLB, ServerName should be the name of the remote load
|
||||
// If Type is GRPCLB, ServerName should be the name of the remote load
|
||||
// balancer, not the name of the backend.
|
||||
//
|
||||
// WARNING: ServerName must only be populated with trusted values. It
|
||||
// is insecure to populate it with data from untrusted inputs since untrusted
|
||||
// values could be used to bypass the authority checks performed by TLS.
|
||||
ServerName string
|
||||
|
||||
// Attributes contains arbitrary data about this address intended for
|
||||
// consumption by the load balancing policy.
|
||||
Attributes *attributes.Attributes
|
||||
|
||||
// Type is the type of this address.
|
||||
//
|
||||
// Deprecated: use Attributes instead.
|
||||
Type AddressType
|
||||
|
||||
// Metadata is the information associated with Addr, which may be used
|
||||
// to make load balancing decision.
|
||||
//
|
||||
// Deprecated: use Attributes instead.
|
||||
Metadata interface{}
|
||||
}
|
||||
|
||||
// BuildOption includes additional information for the builder to create
|
||||
// BuildOptions includes additional information for the builder to create
|
||||
// the resolver.
|
||||
type BuildOption struct {
|
||||
// DisableServiceConfig indicates whether resolver should fetch service config data.
|
||||
type BuildOptions struct {
|
||||
// DisableServiceConfig indicates whether a resolver implementation should
|
||||
// fetch service config data.
|
||||
DisableServiceConfig bool
|
||||
// DialCreds is the transport credentials used by the ClientConn for
|
||||
// communicating with the target gRPC service (set via
|
||||
// WithTransportCredentials). In cases where a name resolution service
|
||||
// requires the same credentials, the resolver may use this field. In most
|
||||
// cases though, it is not appropriate, and this field may be ignored.
|
||||
DialCreds credentials.TransportCredentials
|
||||
// CredsBundle is the credentials bundle used by the ClientConn for
|
||||
// communicating with the target gRPC service (set via
|
||||
// WithCredentialsBundle). In cases where a name resolution service
|
||||
// requires the same credentials, the resolver may use this field. In most
|
||||
// cases though, it is not appropriate, and this field may be ignored.
|
||||
CredsBundle credentials.Bundle
|
||||
// Dialer is the custom dialer used by the ClientConn for dialling the
|
||||
// target gRPC service (set via WithDialer). In cases where a name
|
||||
// resolution service requires the same dialer, the resolver may use this
|
||||
// field. In most cases though, it is not appropriate, and this field may
|
||||
// be ignored.
|
||||
Dialer func(context.Context, string) (net.Conn, error)
|
||||
}
|
||||
|
||||
// State contains the current Resolver state relevant to the ClientConn.
|
||||
type State struct {
|
||||
Addresses []Address // Resolved addresses for the target
|
||||
// ServiceConfig is the parsed service config; obtained from
|
||||
// serviceconfig.Parse.
|
||||
ServiceConfig serviceconfig.Config
|
||||
// Addresses is the latest set of resolved addresses for the target.
|
||||
Addresses []Address
|
||||
|
||||
// TODO: add Err error
|
||||
// ServiceConfig contains the result from parsing the latest service
|
||||
// config. If it is nil, it indicates no service config is present or the
|
||||
// resolver does not provide service configs.
|
||||
ServiceConfig *serviceconfig.ParseResult
|
||||
|
||||
// Attributes contains arbitrary data about the resolver intended for
|
||||
// consumption by the load balancing policy.
|
||||
Attributes *attributes.Attributes
|
||||
}
|
||||
|
||||
// ClientConn contains the callbacks for resolver to notify any updates
|
||||
|
@ -122,6 +175,10 @@ type State struct {
|
|||
type ClientConn interface {
|
||||
// UpdateState updates the state of the ClientConn appropriately.
|
||||
UpdateState(State)
|
||||
// ReportError notifies the ClientConn that the Resolver encountered an
|
||||
// error. The ClientConn will notify the load balancer and begin calling
|
||||
// ResolveNow on the Resolver with exponential backoff.
|
||||
ReportError(error)
|
||||
// NewAddress is called by resolver to notify ClientConn a new list
|
||||
// of resolved addresses.
|
||||
// The address list should be the complete list of resolved addresses.
|
||||
|
@ -133,6 +190,9 @@ type ClientConn interface {
|
|||
//
|
||||
// Deprecated: Use UpdateState instead.
|
||||
NewServiceConfig(serviceConfig string)
|
||||
// ParseServiceConfig parses the provided service config and returns an
|
||||
// object that provides the parsed config.
|
||||
ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult
|
||||
}
|
||||
|
||||
// Target represents a target for gRPC, as specified in:
|
||||
|
@ -164,14 +224,14 @@ type Builder interface {
|
|||
//
|
||||
// gRPC dial calls Build synchronously, and fails if the returned error is
|
||||
// not nil.
|
||||
Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error)
|
||||
Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error)
|
||||
// Scheme returns the scheme supported by this resolver.
|
||||
// Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
||||
Scheme() string
|
||||
}
|
||||
|
||||
// ResolveNowOption includes additional information for ResolveNow.
|
||||
type ResolveNowOption struct{}
|
||||
// ResolveNowOptions includes additional information for ResolveNow.
|
||||
type ResolveNowOptions struct{}
|
||||
|
||||
// Resolver watches for the updates on the specified target.
|
||||
// Updates include address updates and service config updates.
|
||||
|
@ -180,7 +240,7 @@ type Resolver interface {
|
|||
// again. It's just a hint, resolver can ignore this if it's not necessary.
|
||||
//
|
||||
// It could be called multiple times concurrently.
|
||||
ResolveNow(ResolveNowOption)
|
||||
ResolveNow(ResolveNowOptions)
|
||||
// Close closes the resolver.
|
||||
Close()
|
||||
}
|
||||
|
|
|
@ -21,138 +21,192 @@ package grpc
|
|||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
// ccResolverWrapper is a wrapper on top of cc for resolvers.
|
||||
// It implements resolver.ClientConnection interface.
|
||||
// It implements resolver.ClientConn interface.
|
||||
type ccResolverWrapper struct {
|
||||
cc *ClientConn
|
||||
resolver resolver.Resolver
|
||||
addrCh chan []resolver.Address
|
||||
scCh chan string
|
||||
done uint32 // accessed atomically; set to 1 when closed.
|
||||
curState resolver.State
|
||||
cc *ClientConn
|
||||
resolverMu sync.Mutex
|
||||
resolver resolver.Resolver
|
||||
done *grpcsync.Event
|
||||
curState resolver.State
|
||||
|
||||
pollingMu sync.Mutex
|
||||
polling chan struct{}
|
||||
}
|
||||
|
||||
// split2 returns the values from strings.SplitN(s, sep, 2).
|
||||
// If sep is not found, it returns ("", "", false) instead.
|
||||
func split2(s, sep string) (string, string, bool) {
|
||||
spl := strings.SplitN(s, sep, 2)
|
||||
if len(spl) < 2 {
|
||||
return "", "", false
|
||||
}
|
||||
return spl[0], spl[1], true
|
||||
}
|
||||
|
||||
// parseTarget splits target into a struct containing scheme, authority and
|
||||
// endpoint.
|
||||
//
|
||||
// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
|
||||
// target}.
|
||||
func parseTarget(target string) (ret resolver.Target) {
|
||||
var ok bool
|
||||
ret.Scheme, ret.Endpoint, ok = split2(target, "://")
|
||||
if !ok {
|
||||
return resolver.Target{Endpoint: target}
|
||||
}
|
||||
ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
|
||||
if !ok {
|
||||
return resolver.Target{Endpoint: target}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// newCCResolverWrapper parses cc.target for scheme and gets the resolver
|
||||
// builder for this scheme and builds the resolver. The monitoring goroutine
|
||||
// for it is not started yet and can be created by calling start().
|
||||
//
|
||||
// If withResolverBuilder dial option is set, the specified resolver will be
|
||||
// used instead.
|
||||
func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
|
||||
rb := cc.dopts.resolverBuilder
|
||||
if rb == nil {
|
||||
return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
|
||||
}
|
||||
|
||||
// newCCResolverWrapper uses the resolver.Builder to build a Resolver and
|
||||
// returns a ccResolverWrapper object which wraps the newly built resolver.
|
||||
func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) {
|
||||
ccr := &ccResolverWrapper{
|
||||
cc: cc,
|
||||
addrCh: make(chan []resolver.Address, 1),
|
||||
scCh: make(chan string, 1),
|
||||
cc: cc,
|
||||
done: grpcsync.NewEvent(),
|
||||
}
|
||||
|
||||
var credsClone credentials.TransportCredentials
|
||||
if creds := cc.dopts.copts.TransportCredentials; creds != nil {
|
||||
credsClone = creds.Clone()
|
||||
}
|
||||
rbo := resolver.BuildOptions{
|
||||
DisableServiceConfig: cc.dopts.disableServiceConfig,
|
||||
DialCreds: credsClone,
|
||||
CredsBundle: cc.dopts.copts.CredsBundle,
|
||||
Dialer: cc.dopts.copts.Dialer,
|
||||
}
|
||||
|
||||
var err error
|
||||
ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig})
|
||||
// We need to hold the lock here while we assign to the ccr.resolver field
|
||||
// to guard against a data race caused by the following code path,
|
||||
// rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up
|
||||
// accessing ccr.resolver which is being assigned here.
|
||||
ccr.resolverMu.Lock()
|
||||
defer ccr.resolverMu.Unlock()
|
||||
ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ccr, nil
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
|
||||
ccr.resolver.ResolveNow(o)
|
||||
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
|
||||
ccr.resolverMu.Lock()
|
||||
if !ccr.done.HasFired() {
|
||||
ccr.resolver.ResolveNow(o)
|
||||
}
|
||||
ccr.resolverMu.Unlock()
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) close() {
|
||||
ccr.resolverMu.Lock()
|
||||
ccr.resolver.Close()
|
||||
atomic.StoreUint32(&ccr.done, 1)
|
||||
ccr.done.Fire()
|
||||
ccr.resolverMu.Unlock()
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) isDone() bool {
|
||||
return atomic.LoadUint32(&ccr.done) == 1
|
||||
// poll begins or ends asynchronous polling of the resolver based on whether
|
||||
// err is ErrBadResolverState.
|
||||
func (ccr *ccResolverWrapper) poll(err error) {
|
||||
ccr.pollingMu.Lock()
|
||||
defer ccr.pollingMu.Unlock()
|
||||
if err != balancer.ErrBadResolverState {
|
||||
// stop polling
|
||||
if ccr.polling != nil {
|
||||
close(ccr.polling)
|
||||
ccr.polling = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
if ccr.polling != nil {
|
||||
// already polling
|
||||
return
|
||||
}
|
||||
p := make(chan struct{})
|
||||
ccr.polling = p
|
||||
go func() {
|
||||
for i := 0; ; i++ {
|
||||
ccr.resolveNow(resolver.ResolveNowOptions{})
|
||||
t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i))
|
||||
select {
|
||||
case <-p:
|
||||
t.Stop()
|
||||
return
|
||||
case <-ccr.done.Done():
|
||||
// Resolver has been closed.
|
||||
t.Stop()
|
||||
return
|
||||
case <-t.C:
|
||||
select {
|
||||
case <-p:
|
||||
return
|
||||
default:
|
||||
}
|
||||
// Timer expired; re-resolve.
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) UpdateState(s resolver.State) {
|
||||
if ccr.isDone() {
|
||||
if ccr.done.HasFired() {
|
||||
return
|
||||
}
|
||||
grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s)
|
||||
channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s)
|
||||
if channelz.IsOn() {
|
||||
ccr.addChannelzTraceEvent(s)
|
||||
}
|
||||
ccr.cc.updateResolverState(s)
|
||||
ccr.curState = s
|
||||
ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) ReportError(err error) {
|
||||
if ccr.done.HasFired() {
|
||||
return
|
||||
}
|
||||
channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
|
||||
ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err))
|
||||
}
|
||||
|
||||
// NewAddress is called by the resolver implementation to send addresses to gRPC.
|
||||
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
|
||||
if ccr.isDone() {
|
||||
if ccr.done.HasFired() {
|
||||
return
|
||||
}
|
||||
grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
|
||||
channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs)
|
||||
if channelz.IsOn() {
|
||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
|
||||
}
|
||||
ccr.curState.Addresses = addrs
|
||||
ccr.cc.updateResolverState(ccr.curState)
|
||||
ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
|
||||
}
|
||||
|
||||
// NewServiceConfig is called by the resolver implementation to send service
|
||||
// configs to gRPC.
|
||||
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
|
||||
if ccr.isDone() {
|
||||
if ccr.done.HasFired() {
|
||||
return
|
||||
}
|
||||
grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
|
||||
c, err := parseServiceConfig(sc)
|
||||
if err != nil {
|
||||
channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc)
|
||||
if ccr.cc.dopts.disableServiceConfig {
|
||||
channelz.Info(ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
|
||||
return
|
||||
}
|
||||
scpr := parseServiceConfig(sc)
|
||||
if scpr.Err != nil {
|
||||
channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
|
||||
ccr.poll(balancer.ErrBadResolverState)
|
||||
return
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: c})
|
||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
|
||||
}
|
||||
ccr.curState.ServiceConfig = c
|
||||
ccr.cc.updateResolverState(ccr.curState)
|
||||
ccr.curState.ServiceConfig = scpr
|
||||
ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
|
||||
return parseServiceConfig(scJSON)
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
|
||||
var updates []string
|
||||
oldSC, oldOK := ccr.curState.ServiceConfig.(*ServiceConfig)
|
||||
newSC, newOK := s.ServiceConfig.(*ServiceConfig)
|
||||
var oldSC, newSC *ServiceConfig
|
||||
var oldOK, newOK bool
|
||||
if ccr.curState.ServiceConfig != nil {
|
||||
oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
|
||||
}
|
||||
if s.ServiceConfig != nil {
|
||||
newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
|
||||
}
|
||||
if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
|
||||
updates = append(updates, "service config updated")
|
||||
}
|
||||
|
@ -161,7 +215,7 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
|
|||
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
|
||||
updates = append(updates, "resolver returned new addresses")
|
||||
}
|
||||
channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
|
||||
channelz.AddTraceEvent(ccr.cc.channelzID, 0, &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")),
|
||||
Severity: channelz.CtINFO,
|
||||
})
|
||||
|
|
|
@ -287,13 +287,14 @@ func (o FailFastCallOption) before(c *callInfo) error {
|
|||
}
|
||||
func (o FailFastCallOption) after(c *callInfo) {}
|
||||
|
||||
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
|
||||
func MaxCallRecvMsgSize(s int) CallOption {
|
||||
return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s}
|
||||
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
|
||||
// in bytes the client can receive.
|
||||
func MaxCallRecvMsgSize(bytes int) CallOption {
|
||||
return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes}
|
||||
}
|
||||
|
||||
// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
|
||||
// size the client can receive.
|
||||
// size in bytes the client can receive.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type MaxRecvMsgSizeCallOption struct {
|
||||
MaxRecvMsgSize int
|
||||
|
@ -305,13 +306,14 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
|
|||
}
|
||||
func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {}
|
||||
|
||||
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
|
||||
func MaxCallSendMsgSize(s int) CallOption {
|
||||
return MaxSendMsgSizeCallOption{MaxSendMsgSize: s}
|
||||
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
|
||||
// in bytes the client can send.
|
||||
func MaxCallSendMsgSize(bytes int) CallOption {
|
||||
return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes}
|
||||
}
|
||||
|
||||
// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
|
||||
// size the client can send.
|
||||
// size in bytes the client can send.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type MaxSendMsgSizeCallOption struct {
|
||||
MaxSendMsgSize int
|
||||
|
@ -648,35 +650,58 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
|||
return nil, st.Err()
|
||||
}
|
||||
|
||||
var size int
|
||||
if pf == compressionMade {
|
||||
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
|
||||
// use this decompressor as the default.
|
||||
if dc != nil {
|
||||
d, err = dc.Do(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
size = len(d)
|
||||
} else {
|
||||
dcReader, err := compressor.Decompress(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
// Read from LimitReader with limit max+1. So if the underlying
|
||||
// reader is over limit, the result will be bigger than max.
|
||||
d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
d, size, err = decompress(compressor, d, maxReceiveMessageSize)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
} else {
|
||||
size = len(d)
|
||||
}
|
||||
if len(d) > maxReceiveMessageSize {
|
||||
if size > maxReceiveMessageSize {
|
||||
// TODO: Revisit the error code. Currently keep it consistent with java
|
||||
// implementation.
|
||||
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
|
||||
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize)
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Using compressor, decompress d, returning data and size.
|
||||
// Optionally, if data will be over maxReceiveMessageSize, just return the size.
|
||||
func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) {
|
||||
dcReader, err := compressor.Decompress(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if sizer, ok := compressor.(interface {
|
||||
DecompressedSize(compressedBytes []byte) int
|
||||
}); ok {
|
||||
if size := sizer.DecompressedSize(d); size >= 0 {
|
||||
if size > maxReceiveMessageSize {
|
||||
return nil, size, nil
|
||||
}
|
||||
// size is used as an estimate to size the buffer, but we
|
||||
// will read more data if available.
|
||||
// +MinRead so ReadFrom will not reallocate if size is correct.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
|
||||
bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
||||
return buf.Bytes(), int(bytesRead), err
|
||||
}
|
||||
}
|
||||
// Read from LimitReader with limit max+1. So if the underlying
|
||||
// reader is over limit, the result will be bigger than max.
|
||||
d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
||||
return d, len(d), err
|
||||
}
|
||||
|
||||
// For the two compressor parameters, both should not be set, but if they are,
|
||||
// dc takes precedence over compressor.
|
||||
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
||||
|
@ -848,7 +873,7 @@ type channelzData struct {
|
|||
|
||||
// The SupportPackageIsVersion variables are referenced from generated protocol
|
||||
// buffer files to ensure compatibility with the gRPC version used. The latest
|
||||
// support package version is 5.
|
||||
// support package version is 6.
|
||||
//
|
||||
// Older versions are kept for compatibility. They may be removed if
|
||||
// compatibility cannot be maintained.
|
||||
|
@ -858,6 +883,7 @@ const (
|
|||
SupportPackageIsVersion3 = true
|
||||
SupportPackageIsVersion4 = true
|
||||
SupportPackageIsVersion5 = true
|
||||
SupportPackageIsVersion6 = true
|
||||
)
|
||||
|
||||
const grpcUA = "grpc-go/" + Version
|
||||
|
|
|
@ -116,6 +116,8 @@ type serverOptions struct {
|
|||
dc Decompressor
|
||||
unaryInt UnaryServerInterceptor
|
||||
streamInt StreamServerInterceptor
|
||||
chainUnaryInts []UnaryServerInterceptor
|
||||
chainStreamInts []StreamServerInterceptor
|
||||
inTapHandle tap.ServerInHandle
|
||||
statsHandler stats.Handler
|
||||
maxConcurrentStreams uint32
|
||||
|
@ -130,6 +132,7 @@ type serverOptions struct {
|
|||
readBufferSize int
|
||||
connectionTimeout time.Duration
|
||||
maxHeaderListSize *uint32
|
||||
headerTableSize *uint32
|
||||
}
|
||||
|
||||
var defaultServerOptions = serverOptions{
|
||||
|
@ -310,6 +313,16 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
|
|||
})
|
||||
}
|
||||
|
||||
// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor
|
||||
// for unary RPCs. The first interceptor will be the outer most,
|
||||
// while the last interceptor will be the inner most wrapper around the real call.
|
||||
// All unary interceptors added by this method will be chained.
|
||||
func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
|
||||
})
|
||||
}
|
||||
|
||||
// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
|
||||
// server. Only one stream interceptor can be installed.
|
||||
func StreamInterceptor(i StreamServerInterceptor) ServerOption {
|
||||
|
@ -321,6 +334,16 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption {
|
|||
})
|
||||
}
|
||||
|
||||
// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor
|
||||
// for stream RPCs. The first interceptor will be the outer most,
|
||||
// while the last interceptor will be the inner most wrapper around the real call.
|
||||
// All stream interceptors added by this method will be chained.
|
||||
func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.chainStreamInts = append(o.chainStreamInts, interceptors...)
|
||||
})
|
||||
}
|
||||
|
||||
// InTapHandle returns a ServerOption that sets the tap handle for all the server
|
||||
// transport to be created. Only one can be installed.
|
||||
func InTapHandle(h tap.ServerInHandle) ServerOption {
|
||||
|
@ -343,8 +366,8 @@ func StatsHandler(h stats.Handler) ServerOption {
|
|||
// unknown service handler. The provided method is a bidi-streaming RPC service
|
||||
// handler that will be invoked instead of returning the "unimplemented" gRPC
|
||||
// error whenever a request is received for an unregistered service or method.
|
||||
// The handling function has full access to the Context of the request and the
|
||||
// stream, and the invocation bypasses interceptors.
|
||||
// The handling function and stream interceptor (if set) have full access to
|
||||
// the ServerStream, including its Context.
|
||||
func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.unknownStreamDesc = &StreamDesc{
|
||||
|
@ -377,6 +400,16 @@ func MaxHeaderListSize(s uint32) ServerOption {
|
|||
})
|
||||
}
|
||||
|
||||
// HeaderTableSize returns a ServerOption that sets the size of dynamic
|
||||
// header table for stream.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func HeaderTableSize(s uint32) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.headerTableSize = &s
|
||||
})
|
||||
}
|
||||
|
||||
// NewServer creates a gRPC server which has no service registered and has not
|
||||
// started to accept requests yet.
|
||||
func NewServer(opt ...ServerOption) *Server {
|
||||
|
@ -393,6 +426,8 @@ func NewServer(opt ...ServerOption) *Server {
|
|||
done: grpcsync.NewEvent(),
|
||||
czData: new(channelzData),
|
||||
}
|
||||
chainUnaryServerInterceptors(s)
|
||||
chainStreamServerInterceptors(s)
|
||||
s.cv = sync.NewCond(&s.mu)
|
||||
if EnableTracing {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
|
@ -647,7 +682,7 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
|
|||
s.mu.Lock()
|
||||
s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
|
||||
s.mu.Unlock()
|
||||
grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
|
||||
channelz.Warningf(s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
|
||||
rawConn.Close()
|
||||
}
|
||||
rawConn.SetDeadline(time.Time{})
|
||||
|
@ -686,6 +721,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
|
|||
ReadBufferSize: s.opts.readBufferSize,
|
||||
ChannelzParentID: s.channelzID,
|
||||
MaxHeaderListSize: s.opts.maxHeaderListSize,
|
||||
HeaderTableSize: s.opts.headerTableSize,
|
||||
}
|
||||
st, err := transport.NewServerTransport("http2", c, config)
|
||||
if err != nil {
|
||||
|
@ -693,7 +729,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
|
|||
s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
|
||||
s.mu.Unlock()
|
||||
c.Close()
|
||||
grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
|
||||
channelz.Warning(s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -832,12 +868,12 @@ func (s *Server) incrCallsFailed() {
|
|||
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
|
||||
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
|
||||
if err != nil {
|
||||
grpclog.Errorln("grpc: server failed to encode response: ", err)
|
||||
channelz.Error(s.channelzID, "grpc: server failed to encode response: ", err)
|
||||
return err
|
||||
}
|
||||
compData, err := compress(data, cp, comp)
|
||||
if err != nil {
|
||||
grpclog.Errorln("grpc: server failed to compress response: ", err)
|
||||
channelz.Error(s.channelzID, "grpc: server failed to compress response: ", err)
|
||||
return err
|
||||
}
|
||||
hdr, payload := msgHeader(data, compData)
|
||||
|
@ -852,42 +888,93 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
|
|||
return err
|
||||
}
|
||||
|
||||
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
|
||||
if channelz.IsOn() {
|
||||
s.incrCallsStarted()
|
||||
defer func() {
|
||||
if err != nil && err != io.EOF {
|
||||
s.incrCallsFailed()
|
||||
} else {
|
||||
s.incrCallsSucceeded()
|
||||
}
|
||||
}()
|
||||
// chainUnaryServerInterceptors chains all unary server interceptors into one.
|
||||
func chainUnaryServerInterceptors(s *Server) {
|
||||
// Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will
|
||||
// be executed before any other chained interceptors.
|
||||
interceptors := s.opts.chainUnaryInts
|
||||
if s.opts.unaryInt != nil {
|
||||
interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...)
|
||||
}
|
||||
sh := s.opts.statsHandler
|
||||
if sh != nil {
|
||||
beginTime := time.Now()
|
||||
begin := &stats.Begin{
|
||||
BeginTime: beginTime,
|
||||
|
||||
var chainedInt UnaryServerInterceptor
|
||||
if len(interceptors) == 0 {
|
||||
chainedInt = nil
|
||||
} else if len(interceptors) == 1 {
|
||||
chainedInt = interceptors[0]
|
||||
} else {
|
||||
chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) {
|
||||
return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), begin)
|
||||
defer func() {
|
||||
end := &stats.End{
|
||||
BeginTime: beginTime,
|
||||
EndTime: time.Now(),
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
end.Error = toRPCErr(err)
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), end)
|
||||
}()
|
||||
}
|
||||
if trInfo != nil {
|
||||
defer trInfo.tr.Finish()
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
|
||||
s.opts.unaryInt = chainedInt
|
||||
}
|
||||
|
||||
// getChainUnaryHandler recursively generate the chained UnaryHandler
|
||||
func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler {
|
||||
if curr == len(interceptors)-1 {
|
||||
return finalHandler
|
||||
}
|
||||
|
||||
return func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
|
||||
sh := s.opts.statsHandler
|
||||
if sh != nil || trInfo != nil || channelz.IsOn() {
|
||||
if channelz.IsOn() {
|
||||
s.incrCallsStarted()
|
||||
}
|
||||
var statsBegin *stats.Begin
|
||||
if sh != nil {
|
||||
beginTime := time.Now()
|
||||
statsBegin = &stats.Begin{
|
||||
BeginTime: beginTime,
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), statsBegin)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
}
|
||||
// The deferred error handling for tracing, stats handler and channelz are
|
||||
// combined into one function to reduce stack usage -- a defer takes ~56-64
|
||||
// bytes on the stack, so overflowing the stack will require a stack
|
||||
// re-allocation, which is expensive.
|
||||
//
|
||||
// To maintain behavior similar to separate deferred statements, statements
|
||||
// should be executed in the reverse order. That is, tracing first, stats
|
||||
// handler second, and channelz last. Note that panics *within* defers will
|
||||
// lead to different behavior, but that's an acceptable compromise; that
|
||||
// would be undefined behavior territory anyway.
|
||||
defer func() {
|
||||
if err != nil && err != io.EOF {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
if trInfo != nil {
|
||||
if err != nil && err != io.EOF {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
trInfo.tr.Finish()
|
||||
}
|
||||
|
||||
if sh != nil {
|
||||
end := &stats.End{
|
||||
BeginTime: statsBegin.BeginTime,
|
||||
EndTime: time.Now(),
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
end.Error = toRPCErr(err)
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), end)
|
||||
}
|
||||
|
||||
if channelz.IsOn() {
|
||||
if err != nil && err != io.EOF {
|
||||
s.incrCallsFailed()
|
||||
} else {
|
||||
s.incrCallsSucceeded()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
@ -960,7 +1047,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
if err != nil {
|
||||
if st, ok := status.FromError(err); ok {
|
||||
if e := t.WriteStatus(stream, st); e != nil {
|
||||
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
|
||||
channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e)
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
@ -1005,7 +1092,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
trInfo.tr.SetError()
|
||||
}
|
||||
if e := t.WriteStatus(stream, appStatus); e != nil {
|
||||
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
}
|
||||
if binlog != nil {
|
||||
if h, _ := stream.Header(); h.Len() > 0 {
|
||||
|
@ -1032,9 +1119,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
// The entire stream is done (for unary RPC only).
|
||||
return err
|
||||
}
|
||||
if s, ok := status.FromError(err); ok {
|
||||
if e := t.WriteStatus(stream, s); e != nil {
|
||||
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
if sts, ok := status.FromError(err); ok {
|
||||
if e := t.WriteStatus(stream, sts); e != nil {
|
||||
channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
}
|
||||
} else {
|
||||
switch st := err.(type) {
|
||||
|
@ -1084,34 +1171,52 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
return err
|
||||
}
|
||||
|
||||
// chainStreamServerInterceptors chains all stream server interceptors into one.
|
||||
func chainStreamServerInterceptors(s *Server) {
|
||||
// Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will
|
||||
// be executed before any other chained interceptors.
|
||||
interceptors := s.opts.chainStreamInts
|
||||
if s.opts.streamInt != nil {
|
||||
interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...)
|
||||
}
|
||||
|
||||
var chainedInt StreamServerInterceptor
|
||||
if len(interceptors) == 0 {
|
||||
chainedInt = nil
|
||||
} else if len(interceptors) == 1 {
|
||||
chainedInt = interceptors[0]
|
||||
} else {
|
||||
chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
|
||||
return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
|
||||
}
|
||||
}
|
||||
|
||||
s.opts.streamInt = chainedInt
|
||||
}
|
||||
|
||||
// getChainStreamHandler recursively generate the chained StreamHandler
|
||||
func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler {
|
||||
if curr == len(interceptors)-1 {
|
||||
return finalHandler
|
||||
}
|
||||
|
||||
return func(srv interface{}, ss ServerStream) error {
|
||||
return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
||||
if channelz.IsOn() {
|
||||
s.incrCallsStarted()
|
||||
defer func() {
|
||||
if err != nil && err != io.EOF {
|
||||
s.incrCallsFailed()
|
||||
} else {
|
||||
s.incrCallsSucceeded()
|
||||
}
|
||||
}()
|
||||
}
|
||||
sh := s.opts.statsHandler
|
||||
var statsBegin *stats.Begin
|
||||
if sh != nil {
|
||||
beginTime := time.Now()
|
||||
begin := &stats.Begin{
|
||||
statsBegin = &stats.Begin{
|
||||
BeginTime: beginTime,
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), begin)
|
||||
defer func() {
|
||||
end := &stats.End{
|
||||
BeginTime: beginTime,
|
||||
EndTime: time.Now(),
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
end.Error = toRPCErr(err)
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), end)
|
||||
}()
|
||||
sh.HandleRPC(stream.Context(), statsBegin)
|
||||
}
|
||||
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
|
||||
ss := &serverStream{
|
||||
|
@ -1126,6 +1231,41 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||
statsHandler: sh,
|
||||
}
|
||||
|
||||
if sh != nil || trInfo != nil || channelz.IsOn() {
|
||||
// See comment in processUnaryRPC on defers.
|
||||
defer func() {
|
||||
if trInfo != nil {
|
||||
ss.mu.Lock()
|
||||
if err != nil && err != io.EOF {
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
ss.trInfo.tr.SetError()
|
||||
}
|
||||
ss.trInfo.tr.Finish()
|
||||
ss.trInfo.tr = nil
|
||||
ss.mu.Unlock()
|
||||
}
|
||||
|
||||
if sh != nil {
|
||||
end := &stats.End{
|
||||
BeginTime: statsBegin.BeginTime,
|
||||
EndTime: time.Now(),
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
end.Error = toRPCErr(err)
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), end)
|
||||
}
|
||||
|
||||
if channelz.IsOn() {
|
||||
if err != nil && err != io.EOF {
|
||||
s.incrCallsFailed()
|
||||
} else {
|
||||
s.incrCallsSucceeded()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
ss.binlog = binarylog.GetMethodLogger(stream.Method())
|
||||
if ss.binlog != nil {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
|
@ -1179,16 +1319,6 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
defer func() {
|
||||
ss.mu.Lock()
|
||||
if err != nil && err != io.EOF {
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
ss.trInfo.tr.SetError()
|
||||
}
|
||||
ss.trInfo.tr.Finish()
|
||||
ss.trInfo.tr = nil
|
||||
ss.mu.Unlock()
|
||||
}()
|
||||
}
|
||||
var appErr error
|
||||
var server interface{}
|
||||
|
@ -1259,7 +1389,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
|||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
|
||||
channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.Finish()
|
||||
|
@ -1300,7 +1430,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
|||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
|
||||
channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.Finish()
|
||||
|
|
|
@ -136,9 +136,9 @@ type retryPolicy struct {
|
|||
maxAttempts int
|
||||
|
||||
// Exponential backoff parameters. The initial retry attempt will occur at
|
||||
// random(0, initialBackoffMS). In general, the nth attempt will occur at
|
||||
// random(0, initialBackoff). In general, the nth attempt will occur at
|
||||
// random(0,
|
||||
// min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)).
|
||||
// min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
|
||||
//
|
||||
// These fields are required and must be greater than zero.
|
||||
initialBackoff time.Duration
|
||||
|
@ -261,20 +261,17 @@ type jsonSC struct {
|
|||
}
|
||||
|
||||
func init() {
|
||||
internal.ParseServiceConfig = func(sc string) (interface{}, error) {
|
||||
return parseServiceConfig(sc)
|
||||
}
|
||||
internal.ParseServiceConfigForTesting = parseServiceConfig
|
||||
}
|
||||
|
||||
func parseServiceConfig(js string) (*ServiceConfig, error) {
|
||||
func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||
if len(js) == 0 {
|
||||
return nil, fmt.Errorf("no JSON service config provided")
|
||||
return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")}
|
||||
}
|
||||
var rsc jsonSC
|
||||
err := json.Unmarshal([]byte(js), &rsc)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
return nil, err
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
sc := ServiceConfig{
|
||||
LB: rsc.LoadBalancingPolicy,
|
||||
|
@ -288,7 +285,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
|
|||
if len(lbcfg) != 1 {
|
||||
err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
|
||||
grpclog.Warningf(err.Error())
|
||||
return nil, err
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
var name string
|
||||
var jsonCfg json.RawMessage
|
||||
|
@ -303,17 +300,25 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
|
|||
var err error
|
||||
sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
|
||||
return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)}
|
||||
}
|
||||
} else if string(jsonCfg) != "{}" {
|
||||
grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
|
||||
}
|
||||
break
|
||||
}
|
||||
if sc.lbConfig == nil {
|
||||
// We had a loadBalancingConfig field but did not encounter a
|
||||
// supported policy. The config is considered invalid in this
|
||||
// case.
|
||||
err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found")
|
||||
grpclog.Warningf(err.Error())
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
}
|
||||
|
||||
if rsc.MethodConfig == nil {
|
||||
return &sc, nil
|
||||
return &serviceconfig.ParseResult{Config: &sc}
|
||||
}
|
||||
for _, m := range *rsc.MethodConfig {
|
||||
if m.Name == nil {
|
||||
|
@ -322,7 +327,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
|
|||
d, err := parseDuration(m.Timeout)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
return nil, err
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
|
||||
mc := MethodConfig{
|
||||
|
@ -331,7 +336,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
|
|||
}
|
||||
if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
|
||||
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
return nil, err
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
if m.MaxRequestMessageBytes != nil {
|
||||
if *m.MaxRequestMessageBytes > int64(maxInt) {
|
||||
|
@ -356,13 +361,13 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
|
|||
|
||||
if sc.retryThrottling != nil {
|
||||
if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 {
|
||||
return nil, fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)
|
||||
return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)}
|
||||
}
|
||||
if tr := sc.retryThrottling.TokenRatio; tr <= 0 {
|
||||
return nil, fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)
|
||||
return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)}
|
||||
}
|
||||
}
|
||||
return &sc, nil
|
||||
return &serviceconfig.ParseResult{Config: &sc}
|
||||
}
|
||||
|
||||
func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {
|
||||
|
|
|
@ -22,27 +22,20 @@
|
|||
// This package is EXPERIMENTAL.
|
||||
package serviceconfig
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/internal"
|
||||
)
|
||||
|
||||
// Config represents an opaque data structure holding a service config.
|
||||
type Config interface {
|
||||
isConfig()
|
||||
isServiceConfig()
|
||||
}
|
||||
|
||||
// LoadBalancingConfig represents an opaque data structure holding a load
|
||||
// balancer config.
|
||||
// balancing config.
|
||||
type LoadBalancingConfig interface {
|
||||
isLoadBalancingConfig()
|
||||
}
|
||||
|
||||
// Parse parses the JSON service config provided into an internal form or
|
||||
// returns an error if the config is invalid.
|
||||
func Parse(ServiceConfigJSON string) (Config, error) {
|
||||
c, err := internal.ParseServiceConfig(ServiceConfigJSON)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.(Config), err
|
||||
// ParseResult contains a service config or an error. Exactly one must be
|
||||
// non-nil.
|
||||
type ParseResult struct {
|
||||
Config Config
|
||||
Err error
|
||||
}
|
||||
|
|
|
@ -91,6 +91,8 @@ type InHeader struct {
|
|||
LocalAddr net.Addr
|
||||
// Compression is the compression algorithm used for the RPC.
|
||||
Compression string
|
||||
// Header contains the header metadata received.
|
||||
Header metadata.MD
|
||||
}
|
||||
|
||||
// IsClient indicates if the stats information is from client side.
|
||||
|
@ -104,6 +106,9 @@ type InTrailer struct {
|
|||
Client bool
|
||||
// WireLength is the wire length of trailer.
|
||||
WireLength int
|
||||
// Trailer contains the trailer metadata received from the server. This
|
||||
// field is only valid if this InTrailer is from the client side.
|
||||
Trailer metadata.MD
|
||||
}
|
||||
|
||||
// IsClient indicates if the stats information is from client side.
|
||||
|
@ -146,6 +151,8 @@ type OutHeader struct {
|
|||
LocalAddr net.Addr
|
||||
// Compression is the compression algorithm used for the RPC.
|
||||
Compression string
|
||||
// Header contains the header metadata sent.
|
||||
Header metadata.MD
|
||||
}
|
||||
|
||||
// IsClient indicates if this stats information is from client side.
|
||||
|
@ -159,6 +166,9 @@ type OutTrailer struct {
|
|||
Client bool
|
||||
// WireLength is the wire length of trailer.
|
||||
WireLength int
|
||||
// Trailer contains the trailer metadata sent to the client. This
|
||||
// field is only valid if this OutTrailer is from the server side.
|
||||
Trailer metadata.MD
|
||||
}
|
||||
|
||||
// IsClient indicates if this stats information is from client side.
|
||||
|
@ -176,6 +186,7 @@ type End struct {
|
|||
EndTime time.Time
|
||||
// Trailer contains the trailer metadata received from the server. This
|
||||
// field is only valid if this End is from the client side.
|
||||
// Deprecated: use Trailer in InTrailer instead.
|
||||
Trailer metadata.MD
|
||||
// Error is the error the RPC ended with. It is an error generated from
|
||||
// status.Status and can be converted back to status.Status using
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/encoding"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/balancerload"
|
||||
"google.golang.org/grpc/internal/binarylog"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
|
@ -488,7 +487,7 @@ func (cs *clientStream) shouldRetry(err error) error {
|
|||
pushback := 0
|
||||
hasPushback := false
|
||||
if cs.attempt.s != nil {
|
||||
if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil || !to {
|
||||
if !cs.attempt.s.TrailersOnly() {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -498,13 +497,13 @@ func (cs *clientStream) shouldRetry(err error) error {
|
|||
if len(sps) == 1 {
|
||||
var e error
|
||||
if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
|
||||
grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0])
|
||||
channelz.Infof(cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
|
||||
cs.retryThrottler.throttle() // This counts as a failure for throttling.
|
||||
return err
|
||||
}
|
||||
hasPushback = true
|
||||
} else if len(sps) > 1 {
|
||||
grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps)
|
||||
channelz.Warningf(cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
|
||||
cs.retryThrottler.throttle() // This counts as a failure for throttling.
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -41,9 +41,6 @@ func methodFamily(m string) string {
|
|||
if i := strings.Index(m, "/"); i >= 0 {
|
||||
m = m[:i] // remove everything from second slash
|
||||
}
|
||||
if i := strings.LastIndex(m, "."); i >= 0 {
|
||||
m = m[i+1:] // cut down to last dotted component
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
|
|
|
@ -19,4 +19,4 @@
|
|||
package grpc
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.23.1"
|
||||
const Version = "1.28.1"
|
||||
|
|
Загрузка…
Ссылка в новой задаче