Optimize retry policy.
This commit is contained in:
Родитель
c038e31ca3
Коммит
13ed164567
|
@ -221,9 +221,27 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
|||
considerSecondary = false
|
||||
action = "Retry: Secondary URL returned 404"
|
||||
case err != nil:
|
||||
// NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation
|
||||
if netErr, ok := err.(net.Error); ok && (netErr.Temporary() || netErr.Timeout()) {
|
||||
action = "Retry: net.Error and Temporary() or Timeout()"
|
||||
// NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation.
|
||||
// Use ServiceCode to verify if the error is related to storage service-side,
|
||||
// ServiceCode is set only when error related to storage service happened.
|
||||
if stErr, ok := err.(StorageError); ok {
|
||||
if stErr.Temporary() {
|
||||
action = "Retry: StorageError with error service code and Temporary()"
|
||||
} else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError
|
||||
action = "Retry: StorageError with success status code"
|
||||
} else {
|
||||
action = "NoRetry: StorageError not Temporary() and without retriable status code"
|
||||
}
|
||||
} else if netErr, ok := err.(net.Error); ok {
|
||||
// Use non-retriable net.Error list, but not retriable list.
|
||||
// As there are errors without Temporary() implementation,
|
||||
// while need be retried, like 'connection reset by peer', 'transport connection broken' and etc.
|
||||
// So the SDK do retry for most of the case, unless the error should not be retried for sure.
|
||||
if !isNotRetriable(netErr) {
|
||||
action = "Retry: net.Error and not in the non-retriable list"
|
||||
} else {
|
||||
action = "NoRetry: net.Error and in the non-retriable list"
|
||||
}
|
||||
} else {
|
||||
action = "NoRetry: unrecognized error"
|
||||
}
|
||||
|
@ -237,11 +255,18 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
|||
if err != nil {
|
||||
tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context
|
||||
} else {
|
||||
// TODO: Right now, we've decided to leak the per-try Context until the user's Context is canceled.
|
||||
// Another option is that we wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper.
|
||||
// We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper.
|
||||
// So, when the user closes the Body, the our per-try context gets closed too.
|
||||
// Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context)
|
||||
_ = tryCancel // So, for now, we don't call cancel: cancel()
|
||||
if response == nil || response.Response() == nil {
|
||||
// We do panic in the case response or response.Response() is nil,
|
||||
// as for client, the response should not be nil if request is sent and the operations is executed successfully.
|
||||
// Another option, is that execute the cancel function when response or response.Response() is nil,
|
||||
// as in this case, current per-try has nothing to do in future.
|
||||
panic("invalid state, response should not be nil when the operation is executed successfully")
|
||||
}
|
||||
|
||||
response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body}
|
||||
}
|
||||
break // Don't retry
|
||||
}
|
||||
|
@ -259,6 +284,72 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
|||
})
|
||||
}
|
||||
|
||||
// contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed.
|
||||
type contextCancelReadCloser struct {
|
||||
cf context.CancelFunc
|
||||
body io.ReadCloser
|
||||
}
|
||||
|
||||
func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) {
|
||||
return rc.body.Read(p)
|
||||
}
|
||||
|
||||
func (rc *contextCancelReadCloser) Close() error {
|
||||
err := rc.body.Close()
|
||||
if rc.cf != nil {
|
||||
rc.cf()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// isNotRetriable checks if the provided net.Error isn't retriable.
|
||||
func isNotRetriable(errToParse net.Error) bool {
|
||||
// No error, so this is NOT retriable.
|
||||
if errToParse == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// The error is either temporary or a timeout so it IS retriable (not not retriable).
|
||||
if errToParse.Temporary() || errToParse.Timeout() {
|
||||
return false
|
||||
}
|
||||
|
||||
genericErr := error(errToParse)
|
||||
|
||||
// From here all the error are neither Temporary() nor Timeout().
|
||||
switch err := errToParse.(type) {
|
||||
case *net.OpError:
|
||||
// The net.Error is also a net.OpError but the inner error is nil, so this is not retriable.
|
||||
if err.Err == nil {
|
||||
return true
|
||||
}
|
||||
genericErr = err.Err
|
||||
}
|
||||
|
||||
switch genericErr.(type) {
|
||||
case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError:
|
||||
// If the error is one of the ones listed, then it is NOT retriable.
|
||||
return true
|
||||
}
|
||||
|
||||
// Assume the error is retriable.
|
||||
return false
|
||||
}
|
||||
|
||||
var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent}
|
||||
|
||||
func isSuccessStatusCode(resp *http.Response) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
for _, i := range successStatusCodes {
|
||||
if i == resp.StatusCode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away
|
||||
var logf = func(format string, a ...interface{}) {}
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
|
|||
return n, err // All retries exhausted
|
||||
}
|
||||
|
||||
if netErr, ok := err.(net.Error); ok && (netErr.Timeout() || netErr.Temporary()) {
|
||||
if _, ok := err.(net.Error); ok {
|
||||
continue
|
||||
// Loop around and try to get and read from new stream.
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче