* bring in goldpinger tests

Co-authored-by: Ramiro Gamarra <ragamarr@microsoft.com>

* add integration build tags

* yaml formatting

* remove unnecessary logging line

Co-authored-by: Ramiro Gamarra <ragamarr@microsoft.com>
This commit is contained in:
Mathew Merrick 2020-10-28 23:39:27 -07:00 коммит произвёл GitHub
Родитель f5dff95fe3
Коммит fd16fcc6a7
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
43 изменённых файлов: 5468 добавлений и 3 удалений

Просмотреть файл

@ -120,8 +120,6 @@ stages:
bash <(curl -s https://codecov.io/bash)
gocov convert coverage.out > coverage.json
gocov-xml < coverage.json > coverage.xml
echo listing cluster definitions
ls $(modulePath)/test/e2e/kubernetes/*
workingDirectory: "$(modulePath)"
name: "Coverage"
displayName: "Generate Coverage Reports"
@ -181,7 +179,7 @@ stages:
pathtoPublish: "$(Build.ArtifactStagingDirectory)"
condition: succeeded()
- publish: $(modulePath)/test/e2e/kubernetes/
- publish: $(modulePath)/test/apimodels/
artifact: clusterdefinitions
- task: AzureCLI@1

1
go.sum
Просмотреть файл

@ -85,6 +85,7 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libnetwork v0.5.6 h1:hnGiypBsZR6PW1I8lqaBHh06U6LCJbI3IhOvfsZiymY=
github.com/docker/libnetwork v0.5.6/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=

Просмотреть файл

Просмотреть файл

Просмотреть файл

@ -0,0 +1,36 @@
// +build integration
package goldpinger
import (
"context"
"encoding/json"
"fmt"
"net/http"
)
type Client struct {
Host string
}
func (c *Client) CheckAll(ctx context.Context) (CheckAllJSON, error) {
endpoint := fmt.Sprintf("%s/check_all", c.Host)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
if err != nil {
return CheckAllJSON{}, err
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return CheckAllJSON{}, err
}
defer res.Body.Close()
var jsonResp CheckAllJSON
if err := json.NewDecoder(res.Body).Decode(&jsonResp); err != nil {
return CheckAllJSON{}, err
}
return jsonResp, nil
}

Просмотреть файл

@ -0,0 +1,38 @@
// +build integration
package goldpinger
import "time"
type CheckAllJSON struct {
DNSResults map[string]interface{} `json:"dnsResults"`
Hosts []HostJSON `json:"hosts"`
Responses map[string]PodStatusJSON `json:"responses"`
}
type HostJSON struct {
HostIP string `json:"hostIP"`
PodIP string `json:"podIP"`
PodName string `json:"podName"`
}
type PodStatusJSON struct {
HostIP string `json:"HostIP"`
OK bool `json:"OK"`
PodIP string `json:"PodIP"`
Response ResponseJSON `json:"response"`
}
type ResponseJSON struct {
DNSResults map[string]interface{} `json:"dnsResults"`
PodResults map[string]PingResultJSON `json:"podResults"`
}
type PingResultJSON struct {
HostIP string `json:"HostIP"`
OK bool `json:"OK"`
PodIP string `json:"PodIP"`
PingTime time.Time `json:"PingTime"`
ResponseTimeMilliseconds int `json:"response-time-ms"`
StatusCode int `json:"status-code"`
}

Просмотреть файл

@ -0,0 +1,71 @@
// +build integration
package goldpinger
import "fmt"
type ClusterStats CheckAllJSON
func (c ClusterStats) AllPodsHealthy() bool {
if len(c.Responses) == 0 {
return false
}
for _, podStatus := range c.Responses {
if !podStatus.OK {
return false
}
}
return true
}
func (c ClusterStats) AllPingsHealthy() bool {
if !c.AllPodsHealthy() {
return false
}
for _, podStatus := range c.Responses {
for _, pingResult := range podStatus.Response.PodResults {
if !pingResult.OK {
return false
}
}
}
return true
}
type stringSet map[string]struct{}
func (set stringSet) add(s string) {
set[s] = struct{}{}
}
func (c ClusterStats) PrintStats() {
podCount := len(c.Hosts)
nodes := make(stringSet)
healthyPods := make(stringSet)
pingCount := 0
healthyPingCount := 0
for _, podStatus := range c.Responses {
nodes.add(podStatus.HostIP)
if podStatus.OK {
healthyPods.add(podStatus.PodIP)
}
for _, pingStatus := range podStatus.Response.PodResults {
pingCount++
if pingStatus.OK {
healthyPingCount++
}
}
}
format := "cluster stats - " +
"nodes in use: %d, " +
"pod count: %d, " +
"pod health percentage: %2.2f, " +
"ping health percentage: %2.2f\n"
podHealthPct := (float64(len(healthyPods)) / float64(podCount)) * 100
pingHealthPct := (float64(healthyPingCount) / float64(pingCount)) * 100
fmt.Printf(format, len(nodes), podCount, podHealthPct, pingHealthPct)
}

Просмотреть файл

@ -0,0 +1,213 @@
// +build integration
package k8s
import (
"context"
//"dnc/test/integration/goldpinger"
"errors"
"flag"
"fmt"
"os"
"path/filepath"
"testing"
"time"
"github.com/Azure/azure-container-networking/test/integration/goldpinger"
"github.com/Azure/azure-container-networking/test/integration/retry"
v1 "k8s.io/client-go/kubernetes/typed/apps/v1"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/homedir"
)
var (
defaultRetrier = retry.Retrier{Attempts: 15, Delay: time.Second}
kubeconfig = flag.String("test-kubeconfig", filepath.Join(homedir.HomeDir(), ".kube", "config"), "(optional) absolute path to the kubeconfig file")
delegatedSubnetID = flag.String("delegated-subnet-id", "", "delegated subnet id for node labeling")
delegatedSubnetName = flag.String("subnet-name", "", "subnet name for node labeling")
)
const (
subnetIDNodeLabelEnvVar = "DELEGATED_SUBNET_ID_NODE_LABEL"
subnetNameNodeLabelEnvVar = "SUBNET_NAME_NODE_LABEL"
)
func shouldLabelNodes() bool {
if *delegatedSubnetID == "" {
*delegatedSubnetID = os.Getenv(subnetIDNodeLabelEnvVar)
}
if *delegatedSubnetName == "" {
*delegatedSubnetName = os.Getenv(subnetNameNodeLabelEnvVar)
}
return *delegatedSubnetID != "" && *delegatedSubnetName != ""
}
/*
In order to run the tests below, you need a k8s cluster and its kubeconfig.
If no kubeconfig is passed, the test will attempt to find one in the default location for kubectl config.
The test will also attempt to label the nodes if the appropriate flags or environment variables are set.
Run the tests as follows:
go test -v . [-args test-kubeconfig=<...> delegated-subnet-id=<...> subnet-name=<...>]
todo: consider adding the following scenarios
- [x] All pods should be assigned an IP.
- [ ] All pod IPs should belong to the delegated subnet and not overlap host subnet.
- [x] All pods should be able to ping each other.
- [ ] All pods should be able to ping nodes. Daemonset with hostnetworking?
- [ ] All pods should be able to reach public internet. Enable hosts to ping in goldpinger deployment.
- [x] All scenarios above should be valid during deployment scale up
- [x] All scenarios above should be valid during deployment scale down
- [ ] All scenarios above should be valid during node scale up (i.e more nodes == more NNCs)
- [ ] All scenarios above should be valid during node scale down
todo:
- Need hook for `az aks scale --g <resource group> --n <cluster name> --node-count <prev++> --nodepool-name <np name>`
- Need hook for pubsub client to verify that no secondary CAs are leaked
- Check CNS ipam pool?
- Check NNC in apiserver?
*/
func TestPodScaling(t *testing.T) {
clientset := mustGetClientset(t)
restConfig := mustGetRestConfig(t)
deployment := mustParseDeployment(t, "testdata/goldpinger/deployment.yaml")
ctx := context.Background()
if shouldLabelNodes() {
mustLabelSwiftNodes(t, ctx, clientset, *delegatedSubnetID, *delegatedSubnetName)
} else {
t.Log("swift node labels not passed or set. skipping labeling")
}
rbacCleanUpFn := mustSetUpRBAC(t, ctx, clientset)
deploymentsClient := clientset.AppsV1().Deployments(deployment.Namespace)
mustCreateDeployment(t, ctx, deploymentsClient, deployment)
t.Cleanup(func() {
t.Log("cleaning up resources")
rbacCleanUpFn(t)
if err := deploymentsClient.Delete(ctx, deployment.Name, metav1.DeleteOptions{}); err != nil {
t.Log(err)
}
})
counts := []int{10, 20, 50, 10}
for _, c := range counts {
count := c
t.Run(fmt.Sprintf("replica count %d", count), func(t *testing.T) {
replicaCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
if err := updateReplicaCount(t, replicaCtx, deploymentsClient, deployment.Name, count); err != nil {
t.Fatalf("could not scale deployment: %v", err)
}
t.Run("all pods have IPs assigned", func(t *testing.T) {
podsClient := clientset.CoreV1().Pods(deployment.Namespace)
checkPodIPsFn := func() error {
podList, err := podsClient.List(ctx, metav1.ListOptions{LabelSelector: "app=goldpinger"})
if err != nil {
return err
}
if len(podList.Items) == 0 {
return errors.New("no pods scheduled")
}
for _, pod := range podList.Items {
if pod.Status.Phase == apiv1.PodPending {
return errors.New("some pods still pending")
}
}
for _, pod := range podList.Items {
if pod.Status.PodIP == "" {
return errors.New("a pod has not been allocated an IP")
}
}
return nil
}
err := defaultRetrier.Do(ctx, checkPodIPsFn)
if err != nil {
t.Fatalf("not all pods were allocated IPs: %v", err)
}
t.Log("all pods have been allocated IPs")
})
t.Run("all pods can ping each other", func(t *testing.T) {
pf, err := NewPortForwarder(restConfig)
if err != nil {
t.Fatal(err)
}
portForwardCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
var streamHandle PortForwardStreamHandle
portForwardFn := func() error {
t.Log("attempting port forward")
handle, err := pf.Forward(ctx, "default", "app=goldpinger", 8080, 8080)
if err != nil {
return err
}
streamHandle = handle
return nil
}
if err := defaultRetrier.Do(portForwardCtx, portForwardFn); err != nil {
t.Fatalf("could not start port forward within 30s: %v", err)
}
defer streamHandle.Stop()
gpClient := goldpinger.Client{Host: streamHandle.Url()}
clusterCheckCtx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
clusterCheckFn := func() error {
clusterState, err := gpClient.CheckAll(clusterCheckCtx)
if err != nil {
return err
}
stats := goldpinger.ClusterStats(clusterState)
stats.PrintStats()
if stats.AllPingsHealthy() {
return nil
}
return errors.New("not all pings are healthy")
}
retrier := retry.Retrier{Attempts: 10, Delay: 5 * time.Second}
if err := retrier.Do(clusterCheckCtx, clusterCheckFn); err != nil {
t.Fatalf("cluster could not reach healthy state: %v", err)
}
t.Log("all pings successful!")
})
})
}
}
func updateReplicaCount(t *testing.T, ctx context.Context, deployments v1.DeploymentInterface, name string, replicas int) error {
return defaultRetrier.Do(ctx, func() error {
res, err := deployments.Get(ctx, name, metav1.GetOptions{})
if err != nil {
return err
}
t.Logf("setting deployment %s to %d replicas", name, replicas)
res.Spec.Replicas = int32ptr(int32(replicas))
_, err = deployments.Update(ctx, res, metav1.UpdateOptions{})
return err
})
}

33
test/integration/label.go Normal file
Просмотреть файл

@ -0,0 +1,33 @@
// +build integration
package k8s
import (
"context"
"encoding/json"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
)
type mergePayload struct {
Metadata metadataMerge `json:"metadata"`
}
type metadataMerge struct {
Labels map[string]string `json:"labels"`
}
// AddNodeLabels adds or replaces labels on a node.
func AddNodeLabels(ctx context.Context, nodes corev1.NodeInterface, nodeName string, labels map[string]string) (*apiv1.Node, error) {
mergeData := mergePayload{Metadata: metadataMerge{Labels: labels}}
bs, err := json.Marshal(mergeData)
if err != nil {
return nil, err
}
return nodes.Patch(ctx, nodeName, types.MergePatchType, bs, metav1.PatchOptions{})
}

Просмотреть файл

@ -0,0 +1,108 @@
// +build integration
package k8s
import (
"context"
"fmt"
"io/ioutil"
"net/http"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/portforward"
"k8s.io/client-go/transport/spdy"
)
type PortForwarder struct {
clientset *kubernetes.Clientset
transport http.RoundTripper
upgrader spdy.Upgrader
}
type PortForwardStreamHandle struct {
url string
stopChan chan struct{}
}
func (p *PortForwardStreamHandle) Stop() {
p.stopChan <- struct{}{}
}
func (p *PortForwardStreamHandle) Url() string {
return p.url
}
func NewPortForwarder(restConfig *rest.Config) (*PortForwarder, error) {
clientset, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return nil, fmt.Errorf("could not create clientset: %v", err)
}
transport, upgrader, err := spdy.RoundTripperFor(restConfig)
if err != nil {
return nil, fmt.Errorf("could not create spdy roundtripper: %v", err)
}
return &PortForwarder{
clientset: clientset,
transport: transport,
upgrader: upgrader,
}, nil
}
// todo: can be made more flexible to allow a service to be specified
func (p *PortForwarder) Forward(ctx context.Context, namespace, labelSelector string, localPort, destPort int) (PortForwardStreamHandle, error) {
pods, err := p.clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
if err != nil {
return PortForwardStreamHandle{}, fmt.Errorf("could not list pods in %q with label %q: %v", namespace, labelSelector, err)
}
if len(pods.Items) < 1 {
return PortForwardStreamHandle{}, fmt.Errorf("no pods found in %q with label %q", namespace, labelSelector)
}
podName := pods.Items[0].Name
portForwardURL := p.clientset.CoreV1().RESTClient().Post().
Resource("pods").
Namespace(namespace).
Name(podName).
SubResource("portforward").URL()
stopChan := make(chan struct{}, 1)
errChan := make(chan error, 1)
readyChan := make(chan struct{}, 1)
dialer := spdy.NewDialer(p.upgrader, &http.Client{Transport: p.transport}, http.MethodPost, portForwardURL)
ports := []string{fmt.Sprintf("%d:%d", localPort, destPort)}
pf, err := portforward.New(dialer, ports, stopChan, readyChan, ioutil.Discard, ioutil.Discard)
if err != nil {
return PortForwardStreamHandle{}, fmt.Errorf("could not create portforwarder: %v", err)
}
go func() {
errChan <- pf.ForwardPorts()
}()
var portForwardPort int
select {
case <-ctx.Done():
return PortForwardStreamHandle{}, ctx.Err()
case err := <-errChan:
return PortForwardStreamHandle{}, fmt.Errorf("portforward failed: %v", err)
case <-pf.Ready:
ports, err := pf.GetPorts()
if err != nil {
return PortForwardStreamHandle{}, fmt.Errorf("get portforward port: %v", err)
}
for _, port := range ports {
portForwardPort = int(port.Local)
break
}
if portForwardPort < 1 {
return PortForwardStreamHandle{}, fmt.Errorf("invalid port returned: %d", portForwardPort)
}
}
return PortForwardStreamHandle{
url: fmt.Sprintf("http://localhost:%d", portForwardPort),
stopChan: stopChan,
}, nil
}

Просмотреть файл

@ -0,0 +1,41 @@
// +build integration
//todo: there are more robust retry packages out there, discuss with team
package retry
import (
"context"
"time"
)
// a Retrier can attempt an operation multiple times, based on some thresholds
type Retrier struct {
Attempts int
Delay time.Duration
ExpBackoff bool
}
func (r Retrier) Do(ctx context.Context, f func() error) error {
done := make(chan struct{})
var err error
go func() {
defer func() { done <- struct{}{} }()
for i := 0; i < r.Attempts; i++ {
err = f()
if err == nil {
break
}
time.Sleep(r.Delay)
if r.ExpBackoff {
r.Delay *= 2
}
}
}()
select {
case <-ctx.Done():
return ctx.Err()
case <-done:
return err
}
}

12
test/integration/testdata/goldpinger/cluster-role-binding.yaml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: goldpinger-clusterrolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: goldpinger-clusterrole
subjects:
- kind: ServiceAccount
name: goldpinger-serviceaccount
namespace: default

11
test/integration/testdata/goldpinger/cluster-role.yaml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: goldpinger-clusterrole
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- list

74
test/integration/testdata/goldpinger/deployment.yaml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,74 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: goldpinger-deploy
namespace: default
spec:
replicas: 10
selector:
matchLabels:
app: goldpinger
template:
metadata:
labels:
app: goldpinger
spec:
serviceAccount: "goldpinger-serviceaccount"
securityContext:
runAsNonRoot: true
runAsUser: 1000
fsGroup: 2000
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- goldpinger
topologyKey: "kubernetes.io/hostname"
containers:
- name: goldpinger
env:
- name: HOST
value: "0.0.0.0"
- name: PORT
value: "8080"
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
# - name: HOSTS_TO_RESOLVE
# value: "1.1.1.1 8.8.8.8 www.bing.com"
image: "docker.io/bloomberg/goldpinger:v3.0.0"
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
resources:
limits:
memory: 80Mi
requests:
cpu: 1m
memory: 40Mi
ports:
- containerPort: 8080
name: http
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
periodSeconds: 5

5
test/integration/testdata/goldpinger/service-account.yaml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: goldpinger-serviceaccount
namespace: default

15
test/integration/testdata/goldpinger/service.yaml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: goldpinger
namespace: default
labels:
app: goldpinger
spec:
type: NodePort
ports:
- port: 8080
nodePort: 30080
name: http
selector:
app: goldpinger

Просмотреть файл

@ -0,0 +1,182 @@
// +build integration
package k8s
import (
"context"
//crd "dnc/requestcontroller/kubernetes"
"os"
"testing"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/kubernetes"
typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
typedrbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
const (
DelegatedSubnetIDLabel = "kubernetes.azure.com/podnetwork-delegationguid"
SubnetNameLabel = "kubernetes.azure.com/podnetwork-subnet"
)
func mustGetClientset(t *testing.T) *kubernetes.Clientset {
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
t.Fatal(err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
return clientset
}
func mustGetRestConfig(t *testing.T) *rest.Config {
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
t.Fatal(err)
}
return config
}
func mustParseResource(t *testing.T, path string, out interface{}) {
f, err := os.Open(path)
if err != nil {
t.Fatal(err)
}
defer func() { _ = f.Close() }()
if err := yaml.NewYAMLOrJSONDecoder(f, 0).Decode(out); err != nil {
t.Fatal(err)
}
}
func mustParseDeployment(t *testing.T, path string) appsv1.Deployment {
var depl appsv1.Deployment
mustParseResource(t, path, &depl)
return depl
}
func mustParseServiceAccount(t *testing.T, path string) corev1.ServiceAccount {
var svcAcct corev1.ServiceAccount
mustParseResource(t, path, &svcAcct)
return svcAcct
}
func mustParseClusterRole(t *testing.T, path string) rbacv1.ClusterRole {
var cr rbacv1.ClusterRole
mustParseResource(t, path, &cr)
return cr
}
func mustParseClusterRoleBinding(t *testing.T, path string) rbacv1.ClusterRoleBinding {
var crb rbacv1.ClusterRoleBinding
mustParseResource(t, path, &crb)
return crb
}
func mustCreateDeployment(t *testing.T, ctx context.Context, deployments typedappsv1.DeploymentInterface, d appsv1.Deployment) {
if err := deployments.Delete(ctx, d.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
t.Fatal(err)
}
}
if _, err := deployments.Create(ctx, &d, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
func mustCreateServiceAccount(t *testing.T, ctx context.Context, svcAccounts typedcorev1.ServiceAccountInterface, s corev1.ServiceAccount) {
if err := svcAccounts.Delete(ctx, s.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
t.Fatal(err)
}
}
if _, err := svcAccounts.Create(ctx, &s, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
func mustCreateClusterRole(t *testing.T, ctx context.Context, clusterRoles typedrbacv1.ClusterRoleInterface, cr rbacv1.ClusterRole) {
if err := clusterRoles.Delete(ctx, cr.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
t.Fatal(err)
}
}
if _, err := clusterRoles.Create(ctx, &cr, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
func mustCreateClusterRoleBinding(t *testing.T, ctx context.Context, crBindings typedrbacv1.ClusterRoleBindingInterface, crb rbacv1.ClusterRoleBinding) {
if err := crBindings.Delete(ctx, crb.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
t.Fatal(err)
}
}
if _, err := crBindings.Create(ctx, &crb, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
func mustLabelSwiftNodes(t *testing.T, ctx context.Context, clientset *kubernetes.Clientset, delegatedSubnetID, delegatedSubnetName string) {
swiftNodeLabels := map[string]string{
DelegatedSubnetIDLabel: delegatedSubnetID,
SubnetNameLabel: delegatedSubnetName,
}
res, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
t.Fatalf("could not list nodes: %v", err)
}
for _, node := range res.Items {
_, err := AddNodeLabels(ctx, clientset.CoreV1().Nodes(), node.Name, swiftNodeLabels)
if err != nil {
t.Fatalf("could not add labels to node: %v", err)
}
t.Logf("labels added to node %s", node.Name)
}
}
func mustSetUpRBAC(t *testing.T, ctx context.Context, clientset *kubernetes.Clientset) (cleanUpFunc func(t *testing.T)) {
clusterRole := mustParseClusterRole(t, "testdata/goldpinger/cluster-role.yaml")
clusterRoleBinding := mustParseClusterRoleBinding(t, "testdata/goldpinger/cluster-role-binding.yaml")
serviceAccount := mustParseServiceAccount(t, "testdata/goldpinger/service-account.yaml")
clusterRoles := clientset.RbacV1().ClusterRoles()
clusterRoleBindings := clientset.RbacV1().ClusterRoleBindings()
serviceAccounts := clientset.CoreV1().ServiceAccounts(serviceAccount.Namespace)
mustCreateServiceAccount(t, ctx, serviceAccounts, serviceAccount)
mustCreateClusterRole(t, ctx, clusterRoles, clusterRole)
mustCreateClusterRoleBinding(t, ctx, clusterRoleBindings, clusterRoleBinding)
t.Log("rbac set up")
return func(t *testing.T) {
t.Log("cleaning up rbac")
if err := serviceAccounts.Delete(ctx, serviceAccount.Name, metav1.DeleteOptions{}); err != nil {
t.Log(err)
}
if err := clusterRoleBindings.Delete(ctx, clusterRoleBinding.Name, metav1.DeleteOptions{}); err != nil {
t.Log(err)
}
if err := clusterRoles.Delete(ctx, clusterRole.Name, metav1.DeleteOptions{}); err != nil {
t.Log(err)
}
t.Log("rbac cleaned up")
}
}
func int32ptr(i int32) *int32 { return &i }

13
vendor/github.com/docker/spdystream/CONTRIBUTING.md сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,13 @@
# Contributing to SpdyStream
Want to hack on spdystream? Awesome! Here are instructions to get you
started.
SpdyStream is a part of the [Docker](https://docker.io) project, and follows
the same rules and principles. If you're already familiar with the way
Docker does things, you'll feel right at home.
Otherwise, go read
[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md).
Happy hacking!

191
vendor/github.com/docker/spdystream/LICENSE сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014-2015 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

425
vendor/github.com/docker/spdystream/LICENSE.docs сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,425 @@
Attribution-ShareAlike 4.0 International
=======================================================================
Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.
Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are
intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC-
licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors
Considerations for the public: By using one of our public
licenses, a licensor grants the public permission to use the
licensed material under specified terms and conditions. If
the licensor's permission is not necessary for any reason--for
example, because of any applicable exception or limitation to
copyright--then that use is not regulated by the license. Our
licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other
reasons, including because others have copyright or other
rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More_considerations
for the public:
wiki.creativecommons.org/Considerations_for_licensees
=======================================================================
Creative Commons Attribution-ShareAlike 4.0 International Public
License
By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution-ShareAlike 4.0 International Public License ("Public
License"). To the extent this Public License may be interpreted as a
contract, You are granted the Licensed Rights in consideration of Your
acceptance of these terms and conditions, and the Licensor grants You
such rights in consideration of benefits the Licensor receives from
making the Licensed Material available under these terms and
conditions.
Section 1 -- Definitions.
a. Adapted Material means material subject to Copyright and Similar
Rights that is derived from or based upon the Licensed Material
and in which the Licensed Material is translated, altered,
arranged, transformed, or otherwise modified in a manner requiring
permission under the Copyright and Similar Rights held by the
Licensor. For purposes of this Public License, where the Licensed
Material is a musical work, performance, or sound recording,
Adapted Material is always produced where the Licensed Material is
synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright
and Similar Rights in Your contributions to Adapted Material in
accordance with the terms and conditions of this Public License.
c. BY-SA Compatible License means a license listed at
creativecommons.org/compatiblelicenses, approved by Creative
Commons as essentially the equivalent of this Public License.
d. Copyright and Similar Rights means copyright and/or similar rights
closely related to copyright including, without limitation,
performance, broadcast, sound recording, and Sui Generis Database
Rights, without regard to how the rights are labeled or
categorized. For purposes of this Public License, the rights
specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights.
e. Effective Technological Measures means those measures that, in the
absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright
Treaty adopted on December 20, 1996, and/or similar international
agreements.
f. Exceptions and Limitations means fair use, fair dealing, and/or
any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material.
g. License Elements means the license attributes listed in the name
of a Creative Commons Public License. The License Elements of this
Public License are Attribution and ShareAlike.
h. Licensed Material means the artistic or literary work, database,
or other material to which the Licensor applied this Public
License.
i. Licensed Rights means the rights granted to You subject to the
terms and conditions of this Public License, which are limited to
all Copyright and Similar Rights that apply to Your use of the
Licensed Material and that the Licensor has authority to license.
j. Licensor means the individual(s) or entity(ies) granting rights
under this Public License.
k. Share means to provide material to the public by any means or
process that requires permission under the Licensed Rights, such
as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the
public may access the material from a place and at a time
individually chosen by them.
l. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of
the Council of 11 March 1996 on the legal protection of databases,
as amended and/or succeeded, as well as other essentially
equivalent rights anywhere in the world.
m. You means the individual or entity exercising the Licensed Rights
under this Public License. Your has a corresponding meaning.
Section 2 -- Scope.
a. License grant.
1. Subject to the terms and conditions of this Public License,
the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to
exercise the Licensed Rights in the Licensed Material to:
a. reproduce and Share the Licensed Material, in whole or
in part; and
b. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with
its terms and conditions.
3. Term. The term of this Public License is specified in Section
6(a).
4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material.
5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this
Public License.
b. Additional offer from the Licensor -- Adapted Material.
Every recipient of Adapted Material from You
automatically receives an offer from the Licensor to
exercise the Licensed Rights in the Adapted Material
under the conditions of the Adapter's License You apply.
c. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed
Material.
6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed
Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this
Public License.
3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties.
Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified
form), You must:
a. retain the following if it is supplied by the Licensor
with the Licensed Material:
i. identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if
designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of
warranties;
v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable;
b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or
hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required
information.
3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent
reasonably practicable.
b. ShareAlike.
In addition to the conditions in Section 3(a), if You Share
Adapted Material You produce, the following conditions also apply.
1. The Adapter's License You apply must be a Creative Commons
license with the same License Elements, this version or
later, or a BY-SA Compatible License.
2. You must include the text of, or the URI or hyperlink to, the
Adapter's License You apply. You may satisfy this condition
in any reasonable manner based on the medium, means, and
context in which You Share Adapted Material.
3. You may not offer or impose any additional or different terms
or conditions on, or apply any Effective Technological
Measures to, Adapted Material that restrict exercise of the
rights granted under the Adapter's License You apply.
Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material,
including for purposes of Section 3(b); and
c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and
waiver of all liability.
Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License
terminate automatically.
b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the
violation; or
2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations
of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so
will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License.
Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License.
Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and
conditions.
c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the
Licensor.
d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority.
=======================================================================
Creative Commons is not a party to its public licenses.
Notwithstanding, Creative Commons may elect to apply one of its public
licenses to material it publishes and in those instances will be
considered the "Licensor." Except for the limited purpose of indicating
that material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the public
licenses.
Creative Commons may be contacted at creativecommons.org.

28
vendor/github.com/docker/spdystream/MAINTAINERS сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,28 @@
# Spdystream maintainers file
#
# This file describes who runs the docker/spdystream project and how.
# This is a living document - if you see something out of date or missing, speak up!
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant parser.
#
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]
[Org."Core maintainers"]
people = [
"dmcgowan",
]
[people]
# A reference list of all people associated with the project.
# All other sections should refer to people by their canonical key
# in the people section.
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
[people.dmcgowan]
Name = "Derek McGowan"
Email = "derek@docker.com"
GitHub = "dmcgowan"

77
vendor/github.com/docker/spdystream/README.md сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,77 @@
# SpdyStream
A multiplexed stream library using spdy
## Usage
Client example (connecting to mirroring server without auth)
```go
package main
import (
"fmt"
"github.com/docker/spdystream"
"net"
"net/http"
)
func main() {
conn, err := net.Dial("tcp", "localhost:8080")
if err != nil {
panic(err)
}
spdyConn, err := spdystream.NewConnection(conn, false)
if err != nil {
panic(err)
}
go spdyConn.Serve(spdystream.NoOpStreamHandler)
stream, err := spdyConn.CreateStream(http.Header{}, nil, false)
if err != nil {
panic(err)
}
stream.Wait()
fmt.Fprint(stream, "Writing to stream")
buf := make([]byte, 25)
stream.Read(buf)
fmt.Println(string(buf))
stream.Close()
}
```
Server example (mirroring server without auth)
```go
package main
import (
"github.com/docker/spdystream"
"net"
)
func main() {
listener, err := net.Listen("tcp", "localhost:8080")
if err != nil {
panic(err)
}
for {
conn, err := listener.Accept()
if err != nil {
panic(err)
}
spdyConn, err := spdystream.NewConnection(conn, true)
if err != nil {
panic(err)
}
go spdyConn.Serve(spdystream.MirrorStreamHandler)
}
}
```
## Copyright and license
Copyright © 2014-2015 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.

958
vendor/github.com/docker/spdystream/connection.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,958 @@
package spdystream
import (
"errors"
"fmt"
"io"
"net"
"net/http"
"sync"
"time"
"github.com/docker/spdystream/spdy"
)
var (
ErrInvalidStreamId = errors.New("Invalid stream id")
ErrTimeout = errors.New("Timeout occured")
ErrReset = errors.New("Stream reset")
ErrWriteClosedStream = errors.New("Write on closed stream")
)
const (
FRAME_WORKERS = 5
QUEUE_SIZE = 50
)
type StreamHandler func(stream *Stream)
type AuthHandler func(header http.Header, slot uint8, parent uint32) bool
type idleAwareFramer struct {
f *spdy.Framer
conn *Connection
writeLock sync.Mutex
resetChan chan struct{}
setTimeoutLock sync.Mutex
setTimeoutChan chan time.Duration
timeout time.Duration
}
func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer {
iaf := &idleAwareFramer{
f: framer,
resetChan: make(chan struct{}, 2),
// setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about
// the same time the connection is being closed
setTimeoutChan: make(chan time.Duration, 1),
}
return iaf
}
func (i *idleAwareFramer) monitor() {
var (
timer *time.Timer
expired <-chan time.Time
resetChan = i.resetChan
setTimeoutChan = i.setTimeoutChan
)
Loop:
for {
select {
case timeout := <-i.setTimeoutChan:
i.timeout = timeout
if timeout == 0 {
if timer != nil {
timer.Stop()
}
} else {
if timer == nil {
timer = time.NewTimer(timeout)
expired = timer.C
} else {
timer.Reset(timeout)
}
}
case <-resetChan:
if timer != nil && i.timeout > 0 {
timer.Reset(i.timeout)
}
case <-expired:
i.conn.streamCond.L.Lock()
streams := i.conn.streams
i.conn.streams = make(map[spdy.StreamId]*Stream)
i.conn.streamCond.Broadcast()
i.conn.streamCond.L.Unlock()
go func() {
for _, stream := range streams {
stream.resetStream()
}
i.conn.Close()
}()
case <-i.conn.closeChan:
if timer != nil {
timer.Stop()
}
// Start a goroutine to drain resetChan. This is needed because we've seen
// some unit tests with large numbers of goroutines get into a situation
// where resetChan fills up, at least 1 call to Write() is still trying to
// send to resetChan, the connection gets closed, and this case statement
// attempts to grab the write lock that Write() already has, causing a
// deadlock.
//
// See https://github.com/docker/spdystream/issues/49 for more details.
go func() {
for _ = range resetChan {
}
}()
go func() {
for _ = range setTimeoutChan {
}
}()
i.writeLock.Lock()
close(resetChan)
i.resetChan = nil
i.writeLock.Unlock()
i.setTimeoutLock.Lock()
close(i.setTimeoutChan)
i.setTimeoutChan = nil
i.setTimeoutLock.Unlock()
break Loop
}
}
// Drain resetChan
for _ = range resetChan {
}
}
func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error {
i.writeLock.Lock()
defer i.writeLock.Unlock()
if i.resetChan == nil {
return io.EOF
}
err := i.f.WriteFrame(frame)
if err != nil {
return err
}
i.resetChan <- struct{}{}
return nil
}
func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) {
frame, err := i.f.ReadFrame()
if err != nil {
return nil, err
}
// resetChan should never be closed since it is only closed
// when the connection has closed its closeChan. This closure
// only occurs after all Reads have finished
// TODO (dmcgowan): refactor relationship into connection
i.resetChan <- struct{}{}
return frame, nil
}
func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) {
i.setTimeoutLock.Lock()
defer i.setTimeoutLock.Unlock()
if i.setTimeoutChan == nil {
return
}
i.setTimeoutChan <- timeout
}
type Connection struct {
conn net.Conn
framer *idleAwareFramer
closeChan chan bool
goneAway bool
lastStreamChan chan<- *Stream
goAwayTimeout time.Duration
closeTimeout time.Duration
streamLock *sync.RWMutex
streamCond *sync.Cond
streams map[spdy.StreamId]*Stream
nextIdLock sync.Mutex
receiveIdLock sync.Mutex
nextStreamId spdy.StreamId
receivedStreamId spdy.StreamId
pingIdLock sync.Mutex
pingId uint32
pingChans map[uint32]chan error
shutdownLock sync.Mutex
shutdownChan chan error
hasShutdown bool
// for testing https://github.com/docker/spdystream/pull/56
dataFrameHandler func(*spdy.DataFrame) error
}
// NewConnection creates a new spdy connection from an existing
// network connection.
func NewConnection(conn net.Conn, server bool) (*Connection, error) {
framer, framerErr := spdy.NewFramer(conn, conn)
if framerErr != nil {
return nil, framerErr
}
idleAwareFramer := newIdleAwareFramer(framer)
var sid spdy.StreamId
var rid spdy.StreamId
var pid uint32
if server {
sid = 2
rid = 1
pid = 2
} else {
sid = 1
rid = 2
pid = 1
}
streamLock := new(sync.RWMutex)
streamCond := sync.NewCond(streamLock)
session := &Connection{
conn: conn,
framer: idleAwareFramer,
closeChan: make(chan bool),
goAwayTimeout: time.Duration(0),
closeTimeout: time.Duration(0),
streamLock: streamLock,
streamCond: streamCond,
streams: make(map[spdy.StreamId]*Stream),
nextStreamId: sid,
receivedStreamId: rid,
pingId: pid,
pingChans: make(map[uint32]chan error),
shutdownChan: make(chan error),
}
session.dataFrameHandler = session.handleDataFrame
idleAwareFramer.conn = session
go idleAwareFramer.monitor()
return session, nil
}
// Ping sends a ping frame across the connection and
// returns the response time
func (s *Connection) Ping() (time.Duration, error) {
pid := s.pingId
s.pingIdLock.Lock()
if s.pingId > 0x7ffffffe {
s.pingId = s.pingId - 0x7ffffffe
} else {
s.pingId = s.pingId + 2
}
s.pingIdLock.Unlock()
pingChan := make(chan error)
s.pingChans[pid] = pingChan
defer delete(s.pingChans, pid)
frame := &spdy.PingFrame{Id: pid}
startTime := time.Now()
writeErr := s.framer.WriteFrame(frame)
if writeErr != nil {
return time.Duration(0), writeErr
}
select {
case <-s.closeChan:
return time.Duration(0), errors.New("connection closed")
case err, ok := <-pingChan:
if ok && err != nil {
return time.Duration(0), err
}
break
}
return time.Now().Sub(startTime), nil
}
// Serve handles frames sent from the server, including reply frames
// which are needed to fully initiate connections. Both clients and servers
// should call Serve in a separate goroutine before creating streams.
func (s *Connection) Serve(newHandler StreamHandler) {
// use a WaitGroup to wait for all frames to be drained after receiving
// go-away.
var wg sync.WaitGroup
// Parition queues to ensure stream frames are handled
// by the same worker, ensuring order is maintained
frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS)
for i := 0; i < FRAME_WORKERS; i++ {
frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE)
// Ensure frame queue is drained when connection is closed
go func(frameQueue *PriorityFrameQueue) {
<-s.closeChan
frameQueue.Drain()
}(frameQueues[i])
wg.Add(1)
go func(frameQueue *PriorityFrameQueue) {
// let the WaitGroup know this worker is done
defer wg.Done()
s.frameHandler(frameQueue, newHandler)
}(frameQueues[i])
}
var (
partitionRoundRobin int
goAwayFrame *spdy.GoAwayFrame
)
Loop:
for {
readFrame, err := s.framer.ReadFrame()
if err != nil {
if err != io.EOF {
fmt.Errorf("frame read error: %s", err)
} else {
debugMessage("(%p) EOF received", s)
}
break
}
var priority uint8
var partition int
switch frame := readFrame.(type) {
case *spdy.SynStreamFrame:
if s.checkStreamFrame(frame) {
priority = frame.Priority
partition = int(frame.StreamId % FRAME_WORKERS)
debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId)
s.addStreamFrame(frame)
} else {
debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId)
continue
}
case *spdy.SynReplyFrame:
priority = s.getStreamPriority(frame.StreamId)
partition = int(frame.StreamId % FRAME_WORKERS)
case *spdy.DataFrame:
priority = s.getStreamPriority(frame.StreamId)
partition = int(frame.StreamId % FRAME_WORKERS)
case *spdy.RstStreamFrame:
priority = s.getStreamPriority(frame.StreamId)
partition = int(frame.StreamId % FRAME_WORKERS)
case *spdy.HeadersFrame:
priority = s.getStreamPriority(frame.StreamId)
partition = int(frame.StreamId % FRAME_WORKERS)
case *spdy.PingFrame:
priority = 0
partition = partitionRoundRobin
partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
case *spdy.GoAwayFrame:
// hold on to the go away frame and exit the loop
goAwayFrame = frame
break Loop
default:
priority = 7
partition = partitionRoundRobin
partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
}
frameQueues[partition].Push(readFrame, priority)
}
close(s.closeChan)
// wait for all frame handler workers to indicate they've drained their queues
// before handling the go away frame
wg.Wait()
if goAwayFrame != nil {
s.handleGoAwayFrame(goAwayFrame)
}
// now it's safe to close remote channels and empty s.streams
s.streamCond.L.Lock()
// notify streams that they're now closed, which will
// unblock any stream Read() calls
for _, stream := range s.streams {
stream.closeRemoteChannels()
}
s.streams = make(map[spdy.StreamId]*Stream)
s.streamCond.Broadcast()
s.streamCond.L.Unlock()
}
func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) {
for {
popFrame := frameQueue.Pop()
if popFrame == nil {
return
}
var frameErr error
switch frame := popFrame.(type) {
case *spdy.SynStreamFrame:
frameErr = s.handleStreamFrame(frame, newHandler)
case *spdy.SynReplyFrame:
frameErr = s.handleReplyFrame(frame)
case *spdy.DataFrame:
frameErr = s.dataFrameHandler(frame)
case *spdy.RstStreamFrame:
frameErr = s.handleResetFrame(frame)
case *spdy.HeadersFrame:
frameErr = s.handleHeaderFrame(frame)
case *spdy.PingFrame:
frameErr = s.handlePingFrame(frame)
case *spdy.GoAwayFrame:
frameErr = s.handleGoAwayFrame(frame)
default:
frameErr = fmt.Errorf("unhandled frame type: %T", frame)
}
if frameErr != nil {
fmt.Errorf("frame handling error: %s", frameErr)
}
}
}
func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 {
stream, streamOk := s.getStream(streamId)
if !streamOk {
return 7
}
return stream.priority
}
func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) {
var parent *Stream
if frame.AssociatedToStreamId != spdy.StreamId(0) {
parent, _ = s.getStream(frame.AssociatedToStreamId)
}
stream := &Stream{
streamId: frame.StreamId,
parent: parent,
conn: s,
startChan: make(chan error),
headers: frame.Headers,
finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00,
replyCond: sync.NewCond(new(sync.Mutex)),
dataChan: make(chan []byte),
headerChan: make(chan http.Header),
closeChan: make(chan bool),
}
if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 {
stream.closeRemoteChannels()
}
s.addStream(stream)
}
// checkStreamFrame checks to see if a stream frame is allowed.
// If the stream is invalid, then a reset frame with protocol error
// will be returned.
func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool {
s.receiveIdLock.Lock()
defer s.receiveIdLock.Unlock()
if s.goneAway {
return false
}
validationErr := s.validateStreamId(frame.StreamId)
if validationErr != nil {
go func() {
resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId)
if resetErr != nil {
fmt.Errorf("reset error: %s", resetErr)
}
}()
return false
}
return true
}
func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error {
stream, ok := s.getStream(frame.StreamId)
if !ok {
return fmt.Errorf("Missing stream: %d", frame.StreamId)
}
newHandler(stream)
return nil
}
func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error {
debugMessage("(%p) Reply frame received for %d", s, frame.StreamId)
stream, streamOk := s.getStream(frame.StreamId)
if !streamOk {
debugMessage("Reply frame gone away for %d", frame.StreamId)
// Stream has already gone away
return nil
}
if stream.replied {
// Stream has already received reply
return nil
}
stream.replied = true
// TODO Check for error
if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
s.remoteStreamFinish(stream)
}
close(stream.startChan)
return nil
}
func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error {
stream, streamOk := s.getStream(frame.StreamId)
if !streamOk {
// Stream has already been removed
return nil
}
s.removeStream(stream)
stream.closeRemoteChannels()
if !stream.replied {
stream.replied = true
stream.startChan <- ErrReset
close(stream.startChan)
}
stream.finishLock.Lock()
stream.finished = true
stream.finishLock.Unlock()
return nil
}
func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error {
stream, streamOk := s.getStream(frame.StreamId)
if !streamOk {
// Stream has already gone away
return nil
}
if !stream.replied {
// No reply received...Protocol error?
return nil
}
// TODO limit headers while not blocking (use buffered chan or goroutine?)
select {
case <-stream.closeChan:
return nil
case stream.headerChan <- frame.Headers:
}
if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
s.remoteStreamFinish(stream)
}
return nil
}
func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error {
debugMessage("(%p) Data frame received for %d", s, frame.StreamId)
stream, streamOk := s.getStream(frame.StreamId)
if !streamOk {
debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId)
// Stream has already gone away
return nil
}
if !stream.replied {
debugMessage("(%p) Data frame not replied %d", s, frame.StreamId)
// No reply received...Protocol error?
return nil
}
debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId)
if len(frame.Data) > 0 {
stream.dataLock.RLock()
select {
case <-stream.closeChan:
debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId)
case stream.dataChan <- frame.Data:
debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId)
}
stream.dataLock.RUnlock()
}
if (frame.Flags & spdy.DataFlagFin) != 0x00 {
s.remoteStreamFinish(stream)
}
return nil
}
func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error {
if s.pingId&0x01 != frame.Id&0x01 {
return s.framer.WriteFrame(frame)
}
pingChan, pingOk := s.pingChans[frame.Id]
if pingOk {
close(pingChan)
}
return nil
}
func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error {
debugMessage("(%p) Go away received", s)
s.receiveIdLock.Lock()
if s.goneAway {
s.receiveIdLock.Unlock()
return nil
}
s.goneAway = true
s.receiveIdLock.Unlock()
if s.lastStreamChan != nil {
stream, _ := s.getStream(frame.LastGoodStreamId)
go func() {
s.lastStreamChan <- stream
}()
}
// Do not block frame handler waiting for closure
go s.shutdown(s.goAwayTimeout)
return nil
}
func (s *Connection) remoteStreamFinish(stream *Stream) {
stream.closeRemoteChannels()
stream.finishLock.Lock()
if stream.finished {
// Stream is fully closed, cleanup
s.removeStream(stream)
}
stream.finishLock.Unlock()
}
// CreateStream creates a new spdy stream using the parameters for
// creating the stream frame. The stream frame will be sent upon
// calling this function, however this function does not wait for
// the reply frame. If waiting for the reply is desired, use
// the stream Wait or WaitTimeout function on the stream returned
// by this function.
func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) {
// MUST synchronize stream creation (all the way to writing the frame)
// as stream IDs **MUST** increase monotonically.
s.nextIdLock.Lock()
defer s.nextIdLock.Unlock()
streamId := s.getNextStreamId()
if streamId == 0 {
return nil, fmt.Errorf("Unable to get new stream id")
}
stream := &Stream{
streamId: streamId,
parent: parent,
conn: s,
startChan: make(chan error),
headers: headers,
dataChan: make(chan []byte),
headerChan: make(chan http.Header),
closeChan: make(chan bool),
}
debugMessage("(%p) (%p) Create stream", s, stream)
s.addStream(stream)
return stream, s.sendStream(stream, fin)
}
func (s *Connection) shutdown(closeTimeout time.Duration) {
// TODO Ensure this isn't called multiple times
s.shutdownLock.Lock()
if s.hasShutdown {
s.shutdownLock.Unlock()
return
}
s.hasShutdown = true
s.shutdownLock.Unlock()
var timeout <-chan time.Time
if closeTimeout > time.Duration(0) {
timeout = time.After(closeTimeout)
}
streamsClosed := make(chan bool)
go func() {
s.streamCond.L.Lock()
for len(s.streams) > 0 {
debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams)
s.streamCond.Wait()
}
s.streamCond.L.Unlock()
close(streamsClosed)
}()
var err error
select {
case <-streamsClosed:
// No active streams, close should be safe
err = s.conn.Close()
case <-timeout:
// Force ungraceful close
err = s.conn.Close()
// Wait for cleanup to clear active streams
<-streamsClosed
}
if err != nil {
duration := 10 * time.Minute
time.AfterFunc(duration, func() {
select {
case err, ok := <-s.shutdownChan:
if ok {
fmt.Errorf("Unhandled close error after %s: %s", duration, err)
}
default:
}
})
s.shutdownChan <- err
}
close(s.shutdownChan)
return
}
// Closes spdy connection by sending GoAway frame and initiating shutdown
func (s *Connection) Close() error {
s.receiveIdLock.Lock()
if s.goneAway {
s.receiveIdLock.Unlock()
return nil
}
s.goneAway = true
s.receiveIdLock.Unlock()
var lastStreamId spdy.StreamId
if s.receivedStreamId > 2 {
lastStreamId = s.receivedStreamId - 2
}
goAwayFrame := &spdy.GoAwayFrame{
LastGoodStreamId: lastStreamId,
Status: spdy.GoAwayOK,
}
err := s.framer.WriteFrame(goAwayFrame)
if err != nil {
return err
}
go s.shutdown(s.closeTimeout)
return nil
}
// CloseWait closes the connection and waits for shutdown
// to finish. Note the underlying network Connection
// is not closed until the end of shutdown.
func (s *Connection) CloseWait() error {
closeErr := s.Close()
if closeErr != nil {
return closeErr
}
shutdownErr, ok := <-s.shutdownChan
if ok {
return shutdownErr
}
return nil
}
// Wait waits for the connection to finish shutdown or for
// the wait timeout duration to expire. This needs to be
// called either after Close has been called or the GOAWAYFRAME
// has been received. If the wait timeout is 0, this function
// will block until shutdown finishes. If wait is never called
// and a shutdown error occurs, that error will be logged as an
// unhandled error.
func (s *Connection) Wait(waitTimeout time.Duration) error {
var timeout <-chan time.Time
if waitTimeout > time.Duration(0) {
timeout = time.After(waitTimeout)
}
select {
case err, ok := <-s.shutdownChan:
if ok {
return err
}
case <-timeout:
return ErrTimeout
}
return nil
}
// NotifyClose registers a channel to be called when the remote
// peer inidicates connection closure. The last stream to be
// received by the remote will be sent on the channel. The notify
// timeout will determine the duration between go away received
// and the connection being closed.
func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) {
s.goAwayTimeout = timeout
s.lastStreamChan = c
}
// SetCloseTimeout sets the amount of time close will wait for
// streams to finish before terminating the underlying network
// connection. Setting the timeout to 0 will cause close to
// wait forever, which is the default.
func (s *Connection) SetCloseTimeout(timeout time.Duration) {
s.closeTimeout = timeout
}
// SetIdleTimeout sets the amount of time the connection may sit idle before
// it is forcefully terminated.
func (s *Connection) SetIdleTimeout(timeout time.Duration) {
s.framer.setIdleTimeout(timeout)
}
func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error {
var flags spdy.ControlFlags
if fin {
flags = spdy.ControlFlagFin
}
headerFrame := &spdy.HeadersFrame{
StreamId: stream.streamId,
Headers: headers,
CFHeader: spdy.ControlFrameHeader{Flags: flags},
}
return s.framer.WriteFrame(headerFrame)
}
func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error {
var flags spdy.ControlFlags
if fin {
flags = spdy.ControlFlagFin
}
replyFrame := &spdy.SynReplyFrame{
StreamId: stream.streamId,
Headers: headers,
CFHeader: spdy.ControlFrameHeader{Flags: flags},
}
return s.framer.WriteFrame(replyFrame)
}
func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error {
resetFrame := &spdy.RstStreamFrame{
StreamId: streamId,
Status: status,
}
return s.framer.WriteFrame(resetFrame)
}
func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error {
return s.sendResetFrame(status, stream.streamId)
}
func (s *Connection) sendStream(stream *Stream, fin bool) error {
var flags spdy.ControlFlags
if fin {
flags = spdy.ControlFlagFin
stream.finished = true
}
var parentId spdy.StreamId
if stream.parent != nil {
parentId = stream.parent.streamId
}
streamFrame := &spdy.SynStreamFrame{
StreamId: spdy.StreamId(stream.streamId),
AssociatedToStreamId: spdy.StreamId(parentId),
Headers: stream.headers,
CFHeader: spdy.ControlFrameHeader{Flags: flags},
}
return s.framer.WriteFrame(streamFrame)
}
// getNextStreamId returns the next sequential id
// every call should produce a unique value or an error
func (s *Connection) getNextStreamId() spdy.StreamId {
sid := s.nextStreamId
if sid > 0x7fffffff {
return 0
}
s.nextStreamId = s.nextStreamId + 2
return sid
}
// PeekNextStreamId returns the next sequential id and keeps the next id untouched
func (s *Connection) PeekNextStreamId() spdy.StreamId {
sid := s.nextStreamId
return sid
}
func (s *Connection) validateStreamId(rid spdy.StreamId) error {
if rid > 0x7fffffff || rid < s.receivedStreamId {
return ErrInvalidStreamId
}
s.receivedStreamId = rid + 2
return nil
}
func (s *Connection) addStream(stream *Stream) {
s.streamCond.L.Lock()
s.streams[stream.streamId] = stream
debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId)
s.streamCond.Broadcast()
s.streamCond.L.Unlock()
}
func (s *Connection) removeStream(stream *Stream) {
s.streamCond.L.Lock()
delete(s.streams, stream.streamId)
debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId)
s.streamCond.Broadcast()
s.streamCond.L.Unlock()
}
func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) {
s.streamLock.RLock()
stream, ok = s.streams[streamId]
s.streamLock.RUnlock()
return
}
// FindStream looks up the given stream id and either waits for the
// stream to be found or returns nil if the stream id is no longer
// valid.
func (s *Connection) FindStream(streamId uint32) *Stream {
var stream *Stream
var ok bool
s.streamCond.L.Lock()
stream, ok = s.streams[spdy.StreamId(streamId)]
debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok)
for !ok && streamId >= uint32(s.receivedStreamId) {
s.streamCond.Wait()
stream, ok = s.streams[spdy.StreamId(streamId)]
}
s.streamCond.L.Unlock()
return stream
}
func (s *Connection) CloseChan() <-chan bool {
return s.closeChan
}

38
vendor/github.com/docker/spdystream/handlers.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,38 @@
package spdystream
import (
"io"
"net/http"
)
// MirrorStreamHandler mirrors all streams.
func MirrorStreamHandler(stream *Stream) {
replyErr := stream.SendReply(http.Header{}, false)
if replyErr != nil {
return
}
go func() {
io.Copy(stream, stream)
stream.Close()
}()
go func() {
for {
header, receiveErr := stream.ReceiveHeader()
if receiveErr != nil {
return
}
sendErr := stream.SendHeader(header, false)
if sendErr != nil {
return
}
}
}()
}
// NoopStreamHandler does nothing when stream connects, most
// likely used with RejectAuthHandler which will not allow any
// streams to make it to the stream handler.
func NoOpStreamHandler(stream *Stream) {
stream.SendReply(http.Header{}, false)
}

98
vendor/github.com/docker/spdystream/priority.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,98 @@
package spdystream
import (
"container/heap"
"sync"
"github.com/docker/spdystream/spdy"
)
type prioritizedFrame struct {
frame spdy.Frame
priority uint8
insertId uint64
}
type frameQueue []*prioritizedFrame
func (fq frameQueue) Len() int {
return len(fq)
}
func (fq frameQueue) Less(i, j int) bool {
if fq[i].priority == fq[j].priority {
return fq[i].insertId < fq[j].insertId
}
return fq[i].priority < fq[j].priority
}
func (fq frameQueue) Swap(i, j int) {
fq[i], fq[j] = fq[j], fq[i]
}
func (fq *frameQueue) Push(x interface{}) {
*fq = append(*fq, x.(*prioritizedFrame))
}
func (fq *frameQueue) Pop() interface{} {
old := *fq
n := len(old)
*fq = old[0 : n-1]
return old[n-1]
}
type PriorityFrameQueue struct {
queue *frameQueue
c *sync.Cond
size int
nextInsertId uint64
drain bool
}
func NewPriorityFrameQueue(size int) *PriorityFrameQueue {
queue := make(frameQueue, 0, size)
heap.Init(&queue)
return &PriorityFrameQueue{
queue: &queue,
size: size,
c: sync.NewCond(&sync.Mutex{}),
}
}
func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) {
q.c.L.Lock()
defer q.c.L.Unlock()
for q.queue.Len() >= q.size {
q.c.Wait()
}
pFrame := &prioritizedFrame{
frame: frame,
priority: priority,
insertId: q.nextInsertId,
}
q.nextInsertId = q.nextInsertId + 1
heap.Push(q.queue, pFrame)
q.c.Signal()
}
func (q *PriorityFrameQueue) Pop() spdy.Frame {
q.c.L.Lock()
defer q.c.L.Unlock()
for q.queue.Len() == 0 {
if q.drain {
return nil
}
q.c.Wait()
}
frame := heap.Pop(q.queue).(*prioritizedFrame).frame
q.c.Signal()
return frame
}
func (q *PriorityFrameQueue) Drain() {
q.c.L.Lock()
defer q.c.L.Unlock()
q.drain = true
q.c.Broadcast()
}

187
vendor/github.com/docker/spdystream/spdy/dictionary.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,187 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package spdy
// headerDictionary is the dictionary sent to the zlib compressor/decompressor.
var headerDictionary = []byte{
0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69,
0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68,
0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70,
0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70,
0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65,
0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05,
0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00,
0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00,
0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70,
0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63,
0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f,
0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c,
0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00,
0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70,
0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73,
0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00,
0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68,
0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63,
0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f,
0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74,
0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65,
0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74,
0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10,
0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d,
0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74,
0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67,
0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f,
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00,
0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00,
0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00,
0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00,
0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00,
0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00,
0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74,
0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69,
0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66,
0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68,
0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69,
0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00,
0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f,
0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73,
0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d,
0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d,
0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00,
0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67,
0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d,
0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69,
0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65,
0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74,
0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65,
0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00,
0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72,
0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00,
0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00,
0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79,
0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74,
0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00,
0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61,
0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05,
0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00,
0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72,
0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72,
0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00,
0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65,
0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00,
0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c,
0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72,
0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65,
0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00,
0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61,
0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73,
0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74,
0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79,
0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00,
0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69,
0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77,
0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e,
0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00,
0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00,
0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30,
0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00,
0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31,
0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72,
0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62,
0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73,
0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69,
0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65,
0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00,
0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69,
0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32,
0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35,
0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30,
0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33,
0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37,
0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30,
0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34,
0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31,
0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31,
0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34,
0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34,
0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e,
0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f,
0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65,
0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20,
0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65,
0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f,
0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d,
0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34,
0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30,
0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68,
0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30,
0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64,
0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e,
0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64,
0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65,
0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72,
0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f,
0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74,
0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65,
0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20,
0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20,
0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61,
0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46,
0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41,
0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a,
0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41,
0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20,
0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20,
0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30,
0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e,
0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57,
0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c,
0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61,
0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20,
0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b,
0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f,
0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61,
0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69,
0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67,
0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67,
0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c,
0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c,
0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74,
0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72,
0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c,
0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74,
0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65,
0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65,
0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64,
0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63,
0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69,
0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d,
0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a,
0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e,
}

348
vendor/github.com/docker/spdystream/spdy/read.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,348 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package spdy
import (
"compress/zlib"
"encoding/binary"
"io"
"net/http"
"strings"
)
func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error {
return f.readSynStreamFrame(h, frame)
}
func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error {
return f.readSynReplyFrame(h, frame)
}
func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error {
frame.CFHeader = h
if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
return err
}
if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
return err
}
if frame.Status == 0 {
return &Error{InvalidControlFrame, frame.StreamId}
}
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
return nil
}
func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error {
frame.CFHeader = h
var numSettings uint32
if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil {
return err
}
frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings)
for i := uint32(0); i < numSettings; i++ {
if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil {
return err
}
frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24)
frame.FlagIdValues[i].Id &= 0xffffff
if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil {
return err
}
}
return nil
}
func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error {
frame.CFHeader = h
if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil {
return err
}
if frame.Id == 0 {
return &Error{ZeroStreamId, 0}
}
if frame.CFHeader.Flags != 0 {
return &Error{InvalidControlFrame, StreamId(frame.Id)}
}
return nil
}
func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error {
frame.CFHeader = h
if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil {
return err
}
if frame.CFHeader.Flags != 0 {
return &Error{InvalidControlFrame, frame.LastGoodStreamId}
}
if frame.CFHeader.length != 8 {
return &Error{InvalidControlFrame, frame.LastGoodStreamId}
}
if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
return err
}
return nil
}
func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error {
return f.readHeadersFrame(h, frame)
}
func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error {
frame.CFHeader = h
if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
return err
}
if frame.CFHeader.Flags != 0 {
return &Error{InvalidControlFrame, frame.StreamId}
}
if frame.CFHeader.length != 8 {
return &Error{InvalidControlFrame, frame.StreamId}
}
if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil {
return err
}
return nil
}
func newControlFrame(frameType ControlFrameType) (controlFrame, error) {
ctor, ok := cframeCtor[frameType]
if !ok {
return nil, &Error{Err: InvalidControlFrame}
}
return ctor(), nil
}
var cframeCtor = map[ControlFrameType]func() controlFrame{
TypeSynStream: func() controlFrame { return new(SynStreamFrame) },
TypeSynReply: func() controlFrame { return new(SynReplyFrame) },
TypeRstStream: func() controlFrame { return new(RstStreamFrame) },
TypeSettings: func() controlFrame { return new(SettingsFrame) },
TypePing: func() controlFrame { return new(PingFrame) },
TypeGoAway: func() controlFrame { return new(GoAwayFrame) },
TypeHeaders: func() controlFrame { return new(HeadersFrame) },
TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) },
}
func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error {
if f.headerDecompressor != nil {
f.headerReader.N = payloadSize
return nil
}
f.headerReader = io.LimitedReader{R: f.r, N: payloadSize}
decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary))
if err != nil {
return err
}
f.headerDecompressor = decompressor
return nil
}
// ReadFrame reads SPDY encoded data and returns a decompressed Frame.
func (f *Framer) ReadFrame() (Frame, error) {
var firstWord uint32
if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil {
return nil, err
}
if firstWord&0x80000000 != 0 {
frameType := ControlFrameType(firstWord & 0xffff)
version := uint16(firstWord >> 16 & 0x7fff)
return f.parseControlFrame(version, frameType)
}
return f.parseDataFrame(StreamId(firstWord & 0x7fffffff))
}
func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) {
var length uint32
if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
return nil, err
}
flags := ControlFlags((length & 0xff000000) >> 24)
length &= 0xffffff
header := ControlFrameHeader{version, frameType, flags, length}
cframe, err := newControlFrame(frameType)
if err != nil {
return nil, err
}
if err = cframe.read(header, f); err != nil {
return nil, err
}
return cframe, nil
}
func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) {
var numHeaders uint32
if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil {
return nil, err
}
var e error
h := make(http.Header, int(numHeaders))
for i := 0; i < int(numHeaders); i++ {
var length uint32
if err := binary.Read(r, binary.BigEndian, &length); err != nil {
return nil, err
}
nameBytes := make([]byte, length)
if _, err := io.ReadFull(r, nameBytes); err != nil {
return nil, err
}
name := string(nameBytes)
if name != strings.ToLower(name) {
e = &Error{UnlowercasedHeaderName, streamId}
name = strings.ToLower(name)
}
if h[name] != nil {
e = &Error{DuplicateHeaders, streamId}
}
if err := binary.Read(r, binary.BigEndian, &length); err != nil {
return nil, err
}
value := make([]byte, length)
if _, err := io.ReadFull(r, value); err != nil {
return nil, err
}
valueList := strings.Split(string(value), headerValueSeparator)
for _, v := range valueList {
h.Add(name, v)
}
}
if e != nil {
return h, e
}
return h, nil
}
func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error {
frame.CFHeader = h
var err error
if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
return err
}
if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil {
return err
}
if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil {
return err
}
frame.Priority >>= 5
if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil {
return err
}
reader := f.r
if !f.headerCompressionDisabled {
err := f.uncorkHeaderDecompressor(int64(h.length - 10))
if err != nil {
return err
}
reader = f.headerDecompressor
}
frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
err = &Error{WrongCompressedPayloadSize, 0}
}
if err != nil {
return err
}
for h := range frame.Headers {
if invalidReqHeaders[h] {
return &Error{InvalidHeaderPresent, frame.StreamId}
}
}
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
return nil
}
func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error {
frame.CFHeader = h
var err error
if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
return err
}
reader := f.r
if !f.headerCompressionDisabled {
err := f.uncorkHeaderDecompressor(int64(h.length - 4))
if err != nil {
return err
}
reader = f.headerDecompressor
}
frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
err = &Error{WrongCompressedPayloadSize, 0}
}
if err != nil {
return err
}
for h := range frame.Headers {
if invalidRespHeaders[h] {
return &Error{InvalidHeaderPresent, frame.StreamId}
}
}
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
return nil
}
func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error {
frame.CFHeader = h
var err error
if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
return err
}
reader := f.r
if !f.headerCompressionDisabled {
err := f.uncorkHeaderDecompressor(int64(h.length - 4))
if err != nil {
return err
}
reader = f.headerDecompressor
}
frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
err = &Error{WrongCompressedPayloadSize, 0}
}
if err != nil {
return err
}
var invalidHeaders map[string]bool
if frame.StreamId%2 == 0 {
invalidHeaders = invalidReqHeaders
} else {
invalidHeaders = invalidRespHeaders
}
for h := range frame.Headers {
if invalidHeaders[h] {
return &Error{InvalidHeaderPresent, frame.StreamId}
}
}
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
return nil
}
func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) {
var length uint32
if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
return nil, err
}
var frame DataFrame
frame.StreamId = streamId
frame.Flags = DataFlags(length >> 24)
length &= 0xffffff
frame.Data = make([]byte, length)
if _, err := io.ReadFull(f.r, frame.Data); err != nil {
return nil, err
}
if frame.StreamId == 0 {
return nil, &Error{ZeroStreamId, 0}
}
return &frame, nil
}

275
vendor/github.com/docker/spdystream/spdy/types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,275 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package spdy implements the SPDY protocol (currently SPDY/3), described in
// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3.
package spdy
import (
"bytes"
"compress/zlib"
"io"
"net/http"
)
// Version is the protocol version number that this package implements.
const Version = 3
// ControlFrameType stores the type field in a control frame header.
type ControlFrameType uint16
const (
TypeSynStream ControlFrameType = 0x0001
TypeSynReply = 0x0002
TypeRstStream = 0x0003
TypeSettings = 0x0004
TypePing = 0x0006
TypeGoAway = 0x0007
TypeHeaders = 0x0008
TypeWindowUpdate = 0x0009
)
// ControlFlags are the flags that can be set on a control frame.
type ControlFlags uint8
const (
ControlFlagFin ControlFlags = 0x01
ControlFlagUnidirectional = 0x02
ControlFlagSettingsClearSettings = 0x01
)
// DataFlags are the flags that can be set on a data frame.
type DataFlags uint8
const (
DataFlagFin DataFlags = 0x01
)
// MaxDataLength is the maximum number of bytes that can be stored in one frame.
const MaxDataLength = 1<<24 - 1
// headerValueSepator separates multiple header values.
const headerValueSeparator = "\x00"
// Frame is a single SPDY frame in its unpacked in-memory representation. Use
// Framer to read and write it.
type Frame interface {
write(f *Framer) error
}
// ControlFrameHeader contains all the fields in a control frame header,
// in its unpacked in-memory representation.
type ControlFrameHeader struct {
// Note, high bit is the "Control" bit.
version uint16 // spdy version number
frameType ControlFrameType
Flags ControlFlags
length uint32 // length of data field
}
type controlFrame interface {
Frame
read(h ControlFrameHeader, f *Framer) error
}
// StreamId represents a 31-bit value identifying the stream.
type StreamId uint32
// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM
// frame.
type SynStreamFrame struct {
CFHeader ControlFrameHeader
StreamId StreamId
AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to
Priority uint8 // priority of this frame (3-bit)
Slot uint8 // index in the server's credential vector of the client certificate
Headers http.Header
}
// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame.
type SynReplyFrame struct {
CFHeader ControlFrameHeader
StreamId StreamId
Headers http.Header
}
// RstStreamStatus represents the status that led to a RST_STREAM.
type RstStreamStatus uint32
const (
ProtocolError RstStreamStatus = iota + 1
InvalidStream
RefusedStream
UnsupportedVersion
Cancel
InternalError
FlowControlError
StreamInUse
StreamAlreadyClosed
InvalidCredentials
FrameTooLarge
)
// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM
// frame.
type RstStreamFrame struct {
CFHeader ControlFrameHeader
StreamId StreamId
Status RstStreamStatus
}
// SettingsFlag represents a flag in a SETTINGS frame.
type SettingsFlag uint8
const (
FlagSettingsPersistValue SettingsFlag = 0x1
FlagSettingsPersisted = 0x2
)
// SettingsFlag represents the id of an id/value pair in a SETTINGS frame.
type SettingsId uint32
const (
SettingsUploadBandwidth SettingsId = iota + 1
SettingsDownloadBandwidth
SettingsRoundTripTime
SettingsMaxConcurrentStreams
SettingsCurrentCwnd
SettingsDownloadRetransRate
SettingsInitialWindowSize
SettingsClientCretificateVectorSize
)
// SettingsFlagIdValue is the unpacked, in-memory representation of the
// combined flag/id/value for a setting in a SETTINGS frame.
type SettingsFlagIdValue struct {
Flag SettingsFlag
Id SettingsId
Value uint32
}
// SettingsFrame is the unpacked, in-memory representation of a SPDY
// SETTINGS frame.
type SettingsFrame struct {
CFHeader ControlFrameHeader
FlagIdValues []SettingsFlagIdValue
}
// PingFrame is the unpacked, in-memory representation of a PING frame.
type PingFrame struct {
CFHeader ControlFrameHeader
Id uint32 // unique id for this ping, from server is even, from client is odd.
}
// GoAwayStatus represents the status in a GoAwayFrame.
type GoAwayStatus uint32
const (
GoAwayOK GoAwayStatus = iota
GoAwayProtocolError
GoAwayInternalError
)
// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame.
type GoAwayFrame struct {
CFHeader ControlFrameHeader
LastGoodStreamId StreamId // last stream id which was accepted by sender
Status GoAwayStatus
}
// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame.
type HeadersFrame struct {
CFHeader ControlFrameHeader
StreamId StreamId
Headers http.Header
}
// WindowUpdateFrame is the unpacked, in-memory representation of a
// WINDOW_UPDATE frame.
type WindowUpdateFrame struct {
CFHeader ControlFrameHeader
StreamId StreamId
DeltaWindowSize uint32 // additional number of bytes to existing window size
}
// TODO: Implement credential frame and related methods.
// DataFrame is the unpacked, in-memory representation of a DATA frame.
type DataFrame struct {
// Note, high bit is the "Control" bit. Should be 0 for data frames.
StreamId StreamId
Flags DataFlags
Data []byte // payload data of this frame
}
// A SPDY specific error.
type ErrorCode string
const (
UnlowercasedHeaderName ErrorCode = "header was not lowercased"
DuplicateHeaders = "multiple headers with same name"
WrongCompressedPayloadSize = "compressed payload size was incorrect"
UnknownFrameType = "unknown frame type"
InvalidControlFrame = "invalid control frame"
InvalidDataFrame = "invalid data frame"
InvalidHeaderPresent = "frame contained invalid header"
ZeroStreamId = "stream id zero is disallowed"
)
// Error contains both the type of error and additional values. StreamId is 0
// if Error is not associated with a stream.
type Error struct {
Err ErrorCode
StreamId StreamId
}
func (e *Error) Error() string {
return string(e.Err)
}
var invalidReqHeaders = map[string]bool{
"Connection": true,
"Host": true,
"Keep-Alive": true,
"Proxy-Connection": true,
"Transfer-Encoding": true,
}
var invalidRespHeaders = map[string]bool{
"Connection": true,
"Keep-Alive": true,
"Proxy-Connection": true,
"Transfer-Encoding": true,
}
// Framer handles serializing/deserializing SPDY frames, including compressing/
// decompressing payloads.
type Framer struct {
headerCompressionDisabled bool
w io.Writer
headerBuf *bytes.Buffer
headerCompressor *zlib.Writer
r io.Reader
headerReader io.LimitedReader
headerDecompressor io.ReadCloser
}
// NewFramer allocates a new Framer for a given SPDY connection, represented by
// a io.Writer and io.Reader. Note that Framer will read and write individual fields
// from/to the Reader and Writer, so the caller should pass in an appropriately
// buffered implementation to optimize performance.
func NewFramer(w io.Writer, r io.Reader) (*Framer, error) {
compressBuf := new(bytes.Buffer)
compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary))
if err != nil {
return nil, err
}
framer := &Framer{
w: w,
headerBuf: compressBuf,
headerCompressor: compressor,
r: r,
}
return framer, nil
}

318
vendor/github.com/docker/spdystream/spdy/write.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,318 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package spdy
import (
"encoding/binary"
"io"
"net/http"
"strings"
)
func (frame *SynStreamFrame) write(f *Framer) error {
return f.writeSynStreamFrame(frame)
}
func (frame *SynReplyFrame) write(f *Framer) error {
return f.writeSynReplyFrame(frame)
}
func (frame *RstStreamFrame) write(f *Framer) (err error) {
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
frame.CFHeader.version = Version
frame.CFHeader.frameType = TypeRstStream
frame.CFHeader.Flags = 0
frame.CFHeader.length = 8
// Serialize frame to Writer.
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
return
}
if frame.Status == 0 {
return &Error{InvalidControlFrame, frame.StreamId}
}
if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
return
}
return
}
func (frame *SettingsFrame) write(f *Framer) (err error) {
frame.CFHeader.version = Version
frame.CFHeader.frameType = TypeSettings
frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4)
// Serialize frame to Writer.
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil {
return
}
for _, flagIdValue := range frame.FlagIdValues {
flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id)
if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil {
return
}
}
return
}
func (frame *PingFrame) write(f *Framer) (err error) {
if frame.Id == 0 {
return &Error{ZeroStreamId, 0}
}
frame.CFHeader.version = Version
frame.CFHeader.frameType = TypePing
frame.CFHeader.Flags = 0
frame.CFHeader.length = 4
// Serialize frame to Writer.
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil {
return
}
return
}
func (frame *GoAwayFrame) write(f *Framer) (err error) {
frame.CFHeader.version = Version
frame.CFHeader.frameType = TypeGoAway
frame.CFHeader.Flags = 0
frame.CFHeader.length = 8
// Serialize frame to Writer.
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
return
}
return nil
}
func (frame *HeadersFrame) write(f *Framer) error {
return f.writeHeadersFrame(frame)
}
func (frame *WindowUpdateFrame) write(f *Framer) (err error) {
frame.CFHeader.version = Version
frame.CFHeader.frameType = TypeWindowUpdate
frame.CFHeader.Flags = 0
frame.CFHeader.length = 8
// Serialize frame to Writer.
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil {
return
}
return nil
}
func (frame *DataFrame) write(f *Framer) error {
return f.writeDataFrame(frame)
}
// WriteFrame writes a frame.
func (f *Framer) WriteFrame(frame Frame) error {
return frame.write(f)
}
func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error {
if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil {
return err
}
if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil {
return err
}
flagsAndLength := uint32(h.Flags)<<24 | h.length
if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil {
return err
}
return nil
}
func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) {
n = 0
if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil {
return
}
n += 2
for name, values := range h {
if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil {
return
}
n += 2
name = strings.ToLower(name)
if _, err = io.WriteString(w, name); err != nil {
return
}
n += len(name)
v := strings.Join(values, headerValueSeparator)
if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil {
return
}
n += 2
if _, err = io.WriteString(w, v); err != nil {
return
}
n += len(v)
}
return
}
func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) {
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
// Marshal the headers.
var writer io.Writer = f.headerBuf
if !f.headerCompressionDisabled {
writer = f.headerCompressor
}
if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
return
}
if !f.headerCompressionDisabled {
f.headerCompressor.Flush()
}
// Set ControlFrameHeader.
frame.CFHeader.version = Version
frame.CFHeader.frameType = TypeSynStream
frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10)
// Serialize frame to Writer.
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
return err
}
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
return err
}
if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil {
return err
}
if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil {
return err
}
if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil {
return err
}
if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
return err
}
f.headerBuf.Reset()
return nil
}
func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) {
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
// Marshal the headers.
var writer io.Writer = f.headerBuf
if !f.headerCompressionDisabled {
writer = f.headerCompressor
}
if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
return
}
if !f.headerCompressionDisabled {
f.headerCompressor.Flush()
}
// Set ControlFrameHeader.
frame.CFHeader.version = Version
frame.CFHeader.frameType = TypeSynReply
frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
// Serialize frame to Writer.
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
return
}
if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
return
}
f.headerBuf.Reset()
return
}
func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) {
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
// Marshal the headers.
var writer io.Writer = f.headerBuf
if !f.headerCompressionDisabled {
writer = f.headerCompressor
}
if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
return
}
if !f.headerCompressionDisabled {
f.headerCompressor.Flush()
}
// Set ControlFrameHeader.
frame.CFHeader.version = Version
frame.CFHeader.frameType = TypeHeaders
frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
// Serialize frame to Writer.
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
return
}
if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
return
}
f.headerBuf.Reset()
return
}
func (f *Framer) writeDataFrame(frame *DataFrame) (err error) {
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength {
return &Error{InvalidDataFrame, frame.StreamId}
}
// Serialize frame to Writer.
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
return
}
flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data))
if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil {
return
}
if _, err = f.w.Write(frame.Data); err != nil {
return
}
return nil
}

327
vendor/github.com/docker/spdystream/stream.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,327 @@
package spdystream
import (
"errors"
"fmt"
"io"
"net"
"net/http"
"sync"
"time"
"github.com/docker/spdystream/spdy"
)
var (
ErrUnreadPartialData = errors.New("unread partial data")
)
type Stream struct {
streamId spdy.StreamId
parent *Stream
conn *Connection
startChan chan error
dataLock sync.RWMutex
dataChan chan []byte
unread []byte
priority uint8
headers http.Header
headerChan chan http.Header
finishLock sync.Mutex
finished bool
replyCond *sync.Cond
replied bool
closeLock sync.Mutex
closeChan chan bool
}
// WriteData writes data to stream, sending a dataframe per call
func (s *Stream) WriteData(data []byte, fin bool) error {
s.waitWriteReply()
var flags spdy.DataFlags
if fin {
flags = spdy.DataFlagFin
s.finishLock.Lock()
if s.finished {
s.finishLock.Unlock()
return ErrWriteClosedStream
}
s.finished = true
s.finishLock.Unlock()
}
dataFrame := &spdy.DataFrame{
StreamId: s.streamId,
Flags: flags,
Data: data,
}
debugMessage("(%p) (%d) Writing data frame", s, s.streamId)
return s.conn.framer.WriteFrame(dataFrame)
}
// Write writes bytes to a stream, calling write data for each call.
func (s *Stream) Write(data []byte) (n int, err error) {
err = s.WriteData(data, false)
if err == nil {
n = len(data)
}
return
}
// Read reads bytes from a stream, a single read will never get more
// than what is sent on a single data frame, but a multiple calls to
// read may get data from the same data frame.
func (s *Stream) Read(p []byte) (n int, err error) {
if s.unread == nil {
select {
case <-s.closeChan:
return 0, io.EOF
case read, ok := <-s.dataChan:
if !ok {
return 0, io.EOF
}
s.unread = read
}
}
n = copy(p, s.unread)
if n < len(s.unread) {
s.unread = s.unread[n:]
} else {
s.unread = nil
}
return
}
// ReadData reads an entire data frame and returns the byte array
// from the data frame. If there is unread data from the result
// of a Read call, this function will return an ErrUnreadPartialData.
func (s *Stream) ReadData() ([]byte, error) {
debugMessage("(%p) Reading data from %d", s, s.streamId)
if s.unread != nil {
return nil, ErrUnreadPartialData
}
select {
case <-s.closeChan:
return nil, io.EOF
case read, ok := <-s.dataChan:
if !ok {
return nil, io.EOF
}
return read, nil
}
}
func (s *Stream) waitWriteReply() {
if s.replyCond != nil {
s.replyCond.L.Lock()
for !s.replied {
s.replyCond.Wait()
}
s.replyCond.L.Unlock()
}
}
// Wait waits for the stream to receive a reply.
func (s *Stream) Wait() error {
return s.WaitTimeout(time.Duration(0))
}
// WaitTimeout waits for the stream to receive a reply or for timeout.
// When the timeout is reached, ErrTimeout will be returned.
func (s *Stream) WaitTimeout(timeout time.Duration) error {
var timeoutChan <-chan time.Time
if timeout > time.Duration(0) {
timeoutChan = time.After(timeout)
}
select {
case err := <-s.startChan:
if err != nil {
return err
}
break
case <-timeoutChan:
return ErrTimeout
}
return nil
}
// Close closes the stream by sending an empty data frame with the
// finish flag set, indicating this side is finished with the stream.
func (s *Stream) Close() error {
select {
case <-s.closeChan:
// Stream is now fully closed
s.conn.removeStream(s)
default:
break
}
return s.WriteData([]byte{}, true)
}
// Reset sends a reset frame, putting the stream into the fully closed state.
func (s *Stream) Reset() error {
s.conn.removeStream(s)
return s.resetStream()
}
func (s *Stream) resetStream() error {
// Always call closeRemoteChannels, even if s.finished is already true.
// This makes it so that stream.Close() followed by stream.Reset() allows
// stream.Read() to unblock.
s.closeRemoteChannels()
s.finishLock.Lock()
if s.finished {
s.finishLock.Unlock()
return nil
}
s.finished = true
s.finishLock.Unlock()
resetFrame := &spdy.RstStreamFrame{
StreamId: s.streamId,
Status: spdy.Cancel,
}
return s.conn.framer.WriteFrame(resetFrame)
}
// CreateSubStream creates a stream using the current as the parent
func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) {
return s.conn.CreateStream(headers, s, fin)
}
// SetPriority sets the stream priority, does not affect the
// remote priority of this stream after Open has been called.
// Valid values are 0 through 7, 0 being the highest priority
// and 7 the lowest.
func (s *Stream) SetPriority(priority uint8) {
s.priority = priority
}
// SendHeader sends a header frame across the stream
func (s *Stream) SendHeader(headers http.Header, fin bool) error {
return s.conn.sendHeaders(headers, s, fin)
}
// SendReply sends a reply on a stream, only valid to be called once
// when handling a new stream
func (s *Stream) SendReply(headers http.Header, fin bool) error {
if s.replyCond == nil {
return errors.New("cannot reply on initiated stream")
}
s.replyCond.L.Lock()
defer s.replyCond.L.Unlock()
if s.replied {
return nil
}
err := s.conn.sendReply(headers, s, fin)
if err != nil {
return err
}
s.replied = true
s.replyCond.Broadcast()
return nil
}
// Refuse sends a reset frame with the status refuse, only
// valid to be called once when handling a new stream. This
// may be used to indicate that a stream is not allowed
// when http status codes are not being used.
func (s *Stream) Refuse() error {
if s.replied {
return nil
}
s.replied = true
return s.conn.sendReset(spdy.RefusedStream, s)
}
// Cancel sends a reset frame with the status canceled. This
// can be used at any time by the creator of the Stream to
// indicate the stream is no longer needed.
func (s *Stream) Cancel() error {
return s.conn.sendReset(spdy.Cancel, s)
}
// ReceiveHeader receives a header sent on the other side
// of the stream. This function will block until a header
// is received or stream is closed.
func (s *Stream) ReceiveHeader() (http.Header, error) {
select {
case <-s.closeChan:
break
case header, ok := <-s.headerChan:
if !ok {
return nil, fmt.Errorf("header chan closed")
}
return header, nil
}
return nil, fmt.Errorf("stream closed")
}
// Parent returns the parent stream
func (s *Stream) Parent() *Stream {
return s.parent
}
// Headers returns the headers used to create the stream
func (s *Stream) Headers() http.Header {
return s.headers
}
// String returns the string version of stream using the
// streamId to uniquely identify the stream
func (s *Stream) String() string {
return fmt.Sprintf("stream:%d", s.streamId)
}
// Identifier returns a 32 bit identifier for the stream
func (s *Stream) Identifier() uint32 {
return uint32(s.streamId)
}
// IsFinished returns whether the stream has finished
// sending data
func (s *Stream) IsFinished() bool {
return s.finished
}
// Implement net.Conn interface
func (s *Stream) LocalAddr() net.Addr {
return s.conn.conn.LocalAddr()
}
func (s *Stream) RemoteAddr() net.Addr {
return s.conn.conn.RemoteAddr()
}
// TODO set per stream values instead of connection-wide
func (s *Stream) SetDeadline(t time.Time) error {
return s.conn.conn.SetDeadline(t)
}
func (s *Stream) SetReadDeadline(t time.Time) error {
return s.conn.conn.SetReadDeadline(t)
}
func (s *Stream) SetWriteDeadline(t time.Time) error {
return s.conn.conn.SetWriteDeadline(t)
}
func (s *Stream) closeRemoteChannels() {
s.closeLock.Lock()
defer s.closeLock.Unlock()
select {
case <-s.closeChan:
default:
close(s.closeChan)
}
}

16
vendor/github.com/docker/spdystream/utils.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,16 @@
package spdystream
import (
"log"
"os"
)
var (
DEBUG = os.Getenv("DEBUG")
)
func debugMessage(fmt string, args ...interface{}) {
if DEBUG != "" {
log.Printf(fmt, args...)
}
}

19
vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package httpstream adds multiplexed streaming support to HTTP requests and
// responses via connection upgrades.
package httpstream // import "k8s.io/apimachinery/pkg/util/httpstream"

145
vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,145 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package httpstream
import (
"fmt"
"io"
"net/http"
"strings"
"time"
)
const (
HeaderConnection = "Connection"
HeaderUpgrade = "Upgrade"
HeaderProtocolVersion = "X-Stream-Protocol-Version"
HeaderAcceptedProtocolVersions = "X-Accepted-Stream-Protocol-Versions"
)
// NewStreamHandler defines a function that is called when a new Stream is
// received. If no error is returned, the Stream is accepted; otherwise,
// the stream is rejected. After the reply frame has been sent, replySent is closed.
type NewStreamHandler func(stream Stream, replySent <-chan struct{}) error
// NoOpNewStreamHandler is a stream handler that accepts a new stream and
// performs no other logic.
func NoOpNewStreamHandler(stream Stream, replySent <-chan struct{}) error { return nil }
// Dialer knows how to open a streaming connection to a server.
type Dialer interface {
// Dial opens a streaming connection to a server using one of the protocols
// specified (in order of most preferred to least preferred).
Dial(protocols ...string) (Connection, string, error)
}
// UpgradeRoundTripper is a type of http.RoundTripper that is able to upgrade
// HTTP requests to support multiplexed bidirectional streams. After RoundTrip()
// is invoked, if the upgrade is successful, clients may retrieve the upgraded
// connection by calling UpgradeRoundTripper.Connection().
type UpgradeRoundTripper interface {
http.RoundTripper
// NewConnection validates the response and creates a new Connection.
NewConnection(resp *http.Response) (Connection, error)
}
// ResponseUpgrader knows how to upgrade HTTP requests and responses to
// add streaming support to them.
type ResponseUpgrader interface {
// UpgradeResponse upgrades an HTTP response to one that supports multiplexed
// streams. newStreamHandler will be called asynchronously whenever the
// other end of the upgraded connection creates a new stream.
UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler NewStreamHandler) Connection
}
// Connection represents an upgraded HTTP connection.
type Connection interface {
// CreateStream creates a new Stream with the supplied headers.
CreateStream(headers http.Header) (Stream, error)
// Close resets all streams and closes the connection.
Close() error
// CloseChan returns a channel that is closed when the underlying connection is closed.
CloseChan() <-chan bool
// SetIdleTimeout sets the amount of time the connection may remain idle before
// it is automatically closed.
SetIdleTimeout(timeout time.Duration)
}
// Stream represents a bidirectional communications channel that is part of an
// upgraded connection.
type Stream interface {
io.ReadWriteCloser
// Reset closes both directions of the stream, indicating that neither client
// or server can use it any more.
Reset() error
// Headers returns the headers used to create the stream.
Headers() http.Header
// Identifier returns the stream's ID.
Identifier() uint32
}
// IsUpgradeRequest returns true if the given request is a connection upgrade request
func IsUpgradeRequest(req *http.Request) bool {
for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] {
if strings.Contains(strings.ToLower(h), strings.ToLower(HeaderUpgrade)) {
return true
}
}
return false
}
func negotiateProtocol(clientProtocols, serverProtocols []string) string {
for i := range clientProtocols {
for j := range serverProtocols {
if clientProtocols[i] == serverProtocols[j] {
return clientProtocols[i]
}
}
}
return ""
}
// Handshake performs a subprotocol negotiation. If the client did request a
// subprotocol, Handshake will select the first common value found in
// serverProtocols. If a match is found, Handshake adds a response header
// indicating the chosen subprotocol. If no match is found, HTTP forbidden is
// returned, along with a response header containing the list of protocols the
// server can accept.
func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) {
clientProtocols := req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)]
if len(clientProtocols) == 0 {
return "", fmt.Errorf("unable to upgrade: %s is required", HeaderProtocolVersion)
}
if len(serverProtocols) == 0 {
panic(fmt.Errorf("unable to upgrade: serverProtocols is required"))
}
negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols)
if len(negotiatedProtocol) == 0 {
for i := range serverProtocols {
w.Header().Add(HeaderAcceptedProtocolVersions, serverProtocols[i])
}
err := fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols)
http.Error(w, err.Error(), http.StatusForbidden)
return "", err
}
w.Header().Add(HeaderProtocolVersion, negotiatedProtocol)
return negotiatedProtocol, nil
}

145
vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,145 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spdy
import (
"net"
"net/http"
"sync"
"time"
"github.com/docker/spdystream"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/klog"
)
// connection maintains state about a spdystream.Connection and its associated
// streams.
type connection struct {
conn *spdystream.Connection
streams []httpstream.Stream
streamLock sync.Mutex
newStreamHandler httpstream.NewStreamHandler
}
// NewClientConnection creates a new SPDY client connection.
func NewClientConnection(conn net.Conn) (httpstream.Connection, error) {
spdyConn, err := spdystream.NewConnection(conn, false)
if err != nil {
defer conn.Close()
return nil, err
}
return newConnection(spdyConn, httpstream.NoOpNewStreamHandler), nil
}
// NewServerConnection creates a new SPDY server connection. newStreamHandler
// will be invoked when the server receives a newly created stream from the
// client.
func NewServerConnection(conn net.Conn, newStreamHandler httpstream.NewStreamHandler) (httpstream.Connection, error) {
spdyConn, err := spdystream.NewConnection(conn, true)
if err != nil {
defer conn.Close()
return nil, err
}
return newConnection(spdyConn, newStreamHandler), nil
}
// newConnection returns a new connection wrapping conn. newStreamHandler
// will be invoked when the server receives a newly created stream from the
// client.
func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection {
c := &connection{conn: conn, newStreamHandler: newStreamHandler}
go conn.Serve(c.newSpdyStream)
return c
}
// createStreamResponseTimeout indicates how long to wait for the other side to
// acknowledge the new stream before timing out.
const createStreamResponseTimeout = 30 * time.Second
// Close first sends a reset for all of the connection's streams, and then
// closes the underlying spdystream.Connection.
func (c *connection) Close() error {
c.streamLock.Lock()
for _, s := range c.streams {
// calling Reset instead of Close ensures that all streams are fully torn down
s.Reset()
}
c.streams = make([]httpstream.Stream, 0)
c.streamLock.Unlock()
// now that all streams are fully torn down, it's safe to call close on the underlying connection,
// which should be able to terminate immediately at this point, instead of waiting for any
// remaining graceful stream termination.
return c.conn.Close()
}
// CreateStream creates a new stream with the specified headers and registers
// it with the connection.
func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) {
stream, err := c.conn.CreateStream(headers, nil, false)
if err != nil {
return nil, err
}
if err = stream.WaitTimeout(createStreamResponseTimeout); err != nil {
return nil, err
}
c.registerStream(stream)
return stream, nil
}
// registerStream adds the stream s to the connection's list of streams that
// it owns.
func (c *connection) registerStream(s httpstream.Stream) {
c.streamLock.Lock()
c.streams = append(c.streams, s)
c.streamLock.Unlock()
}
// CloseChan returns a channel that, when closed, indicates that the underlying
// spdystream.Connection has been closed.
func (c *connection) CloseChan() <-chan bool {
return c.conn.CloseChan()
}
// newSpdyStream is the internal new stream handler used by spdystream.Connection.Serve.
// It calls connection's newStreamHandler, giving it the opportunity to accept or reject
// the stream. If newStreamHandler returns an error, the stream is rejected. If not, the
// stream is accepted and registered with the connection.
func (c *connection) newSpdyStream(stream *spdystream.Stream) {
replySent := make(chan struct{})
err := c.newStreamHandler(stream, replySent)
rejectStream := (err != nil)
if rejectStream {
klog.Warningf("Stream rejected: %v", err)
stream.Reset()
return
}
c.registerStream(stream)
stream.SendReply(http.Header{}, rejectStream)
close(replySent)
}
// SetIdleTimeout sets the amount of time the connection may remain idle before
// it is automatically closed.
func (c *connection) SetIdleTimeout(timeout time.Duration) {
c.conn.SetIdleTimeout(timeout)
}

335
vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,335 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spdy
import (
"bufio"
"bytes"
"context"
"crypto/tls"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"net/url"
"strings"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/httpstream"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/third_party/forked/golang/netutil"
)
// SpdyRoundTripper knows how to upgrade an HTTP request to one that supports
// multiplexed streams. After RoundTrip() is invoked, Conn will be set
// and usable. SpdyRoundTripper implements the UpgradeRoundTripper interface.
type SpdyRoundTripper struct {
//tlsConfig holds the TLS configuration settings to use when connecting
//to the remote server.
tlsConfig *tls.Config
/* TODO according to http://golang.org/pkg/net/http/#RoundTripper, a RoundTripper
must be safe for use by multiple concurrent goroutines. If this is absolutely
necessary, we could keep a map from http.Request to net.Conn. In practice,
a client will create an http.Client, set the transport to a new insteace of
SpdyRoundTripper, and use it a single time, so this hopefully won't be an issue.
*/
// conn is the underlying network connection to the remote server.
conn net.Conn
// Dialer is the dialer used to connect. Used if non-nil.
Dialer *net.Dialer
// proxier knows which proxy to use given a request, defaults to http.ProxyFromEnvironment
// Used primarily for mocking the proxy discovery in tests.
proxier func(req *http.Request) (*url.URL, error)
// followRedirects indicates if the round tripper should examine responses for redirects and
// follow them.
followRedirects bool
// requireSameHostRedirects restricts redirect following to only follow redirects to the same host
// as the original request.
requireSameHostRedirects bool
}
var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{}
var _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{}
var _ utilnet.Dialer = &SpdyRoundTripper{}
// NewRoundTripper creates a new SpdyRoundTripper that will use
// the specified tlsConfig.
func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) httpstream.UpgradeRoundTripper {
return NewSpdyRoundTripper(tlsConfig, followRedirects, requireSameHostRedirects)
}
// NewSpdyRoundTripper creates a new SpdyRoundTripper that will use
// the specified tlsConfig. This function is mostly meant for unit tests.
func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper {
return &SpdyRoundTripper{
tlsConfig: tlsConfig,
followRedirects: followRedirects,
requireSameHostRedirects: requireSameHostRedirects,
}
}
// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during
// proxying with a spdy roundtripper.
func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config {
return s.tlsConfig
}
// Dial implements k8s.io/apimachinery/pkg/util/net.Dialer.
func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) {
conn, err := s.dial(req)
if err != nil {
return nil, err
}
if err := req.Write(conn); err != nil {
conn.Close()
return nil, err
}
return conn, nil
}
// dial dials the host specified by req, using TLS if appropriate, optionally
// using a proxy server if one is configured via environment variables.
func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
proxier := s.proxier
if proxier == nil {
proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
}
proxyURL, err := proxier(req)
if err != nil {
return nil, err
}
if proxyURL == nil {
return s.dialWithoutProxy(req.Context(), req.URL)
}
// ensure we use a canonical host with proxyReq
targetHost := netutil.CanonicalAddr(req.URL)
// proxying logic adapted from http://blog.h6t.eu/post/74098062923/golang-websocket-with-http-proxy-support
proxyReq := http.Request{
Method: "CONNECT",
URL: &url.URL{},
Host: targetHost,
}
if pa := s.proxyAuth(proxyURL); pa != "" {
proxyReq.Header = http.Header{}
proxyReq.Header.Set("Proxy-Authorization", pa)
}
proxyDialConn, err := s.dialWithoutProxy(req.Context(), proxyURL)
if err != nil {
return nil, err
}
proxyClientConn := httputil.NewProxyClientConn(proxyDialConn, nil)
_, err = proxyClientConn.Do(&proxyReq)
if err != nil && err != httputil.ErrPersistEOF {
return nil, err
}
rwc, _ := proxyClientConn.Hijack()
if req.URL.Scheme != "https" {
return rwc, nil
}
host, _, err := net.SplitHostPort(targetHost)
if err != nil {
return nil, err
}
tlsConfig := s.tlsConfig
switch {
case tlsConfig == nil:
tlsConfig = &tls.Config{ServerName: host}
case len(tlsConfig.ServerName) == 0:
tlsConfig = tlsConfig.Clone()
tlsConfig.ServerName = host
}
tlsConn := tls.Client(rwc, tlsConfig)
// need to manually call Handshake() so we can call VerifyHostname() below
if err := tlsConn.Handshake(); err != nil {
return nil, err
}
// Return if we were configured to skip validation
if tlsConfig.InsecureSkipVerify {
return tlsConn, nil
}
if err := tlsConn.VerifyHostname(tlsConfig.ServerName); err != nil {
return nil, err
}
return tlsConn, nil
}
// dialWithoutProxy dials the host specified by url, using TLS if appropriate.
func (s *SpdyRoundTripper) dialWithoutProxy(ctx context.Context, url *url.URL) (net.Conn, error) {
dialAddr := netutil.CanonicalAddr(url)
if url.Scheme == "http" {
if s.Dialer == nil {
var d net.Dialer
return d.DialContext(ctx, "tcp", dialAddr)
} else {
return s.Dialer.DialContext(ctx, "tcp", dialAddr)
}
}
// TODO validate the TLSClientConfig is set up?
var conn *tls.Conn
var err error
if s.Dialer == nil {
conn, err = tls.Dial("tcp", dialAddr, s.tlsConfig)
} else {
conn, err = tls.DialWithDialer(s.Dialer, "tcp", dialAddr, s.tlsConfig)
}
if err != nil {
return nil, err
}
// Return if we were configured to skip validation
if s.tlsConfig != nil && s.tlsConfig.InsecureSkipVerify {
return conn, nil
}
host, _, err := net.SplitHostPort(dialAddr)
if err != nil {
return nil, err
}
if s.tlsConfig != nil && len(s.tlsConfig.ServerName) > 0 {
host = s.tlsConfig.ServerName
}
err = conn.VerifyHostname(host)
if err != nil {
return nil, err
}
return conn, nil
}
// proxyAuth returns, for a given proxy URL, the value to be used for the Proxy-Authorization header
func (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string {
if proxyURL == nil || proxyURL.User == nil {
return ""
}
credentials := proxyURL.User.String()
encodedAuth := base64.StdEncoding.EncodeToString([]byte(credentials))
return fmt.Sprintf("Basic %s", encodedAuth)
}
// RoundTrip executes the Request and upgrades it. After a successful upgrade,
// clients may call SpdyRoundTripper.Connection() to retrieve the upgraded
// connection.
func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
header := utilnet.CloneHeader(req.Header)
header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
header.Add(httpstream.HeaderUpgrade, HeaderSpdy31)
var (
conn net.Conn
rawResponse []byte
err error
)
if s.followRedirects {
conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s, s.requireSameHostRedirects)
} else {
clone := utilnet.CloneRequest(req)
clone.Header = header
conn, err = s.Dial(clone)
}
if err != nil {
return nil, err
}
responseReader := bufio.NewReader(
io.MultiReader(
bytes.NewBuffer(rawResponse),
conn,
),
)
resp, err := http.ReadResponse(responseReader, nil)
if err != nil {
if conn != nil {
conn.Close()
}
return nil, err
}
s.conn = conn
return resp, nil
}
// NewConnection validates the upgrade response, creating and returning a new
// httpstream.Connection if there were no errors.
func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) {
connectionHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderConnection))
upgradeHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderUpgrade))
if (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
defer resp.Body.Close()
responseError := ""
responseErrorBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
responseError = "unable to read error from server response"
} else {
// TODO: I don't belong here, I should be abstracted from this class
if obj, _, err := statusCodecs.UniversalDecoder().Decode(responseErrorBytes, nil, &metav1.Status{}); err == nil {
if status, ok := obj.(*metav1.Status); ok {
return nil, &apierrors.StatusError{ErrStatus: *status}
}
}
responseError = string(responseErrorBytes)
responseError = strings.TrimSpace(responseError)
}
return nil, fmt.Errorf("unable to upgrade connection: %s", responseError)
}
return NewClientConnection(s.conn)
}
// statusScheme is private scheme for the decoding here until someone fixes the TODO in NewConnection
var statusScheme = runtime.NewScheme()
// ParameterCodec knows about query parameters used with the meta v1 API spec.
var statusCodecs = serializer.NewCodecFactory(statusScheme)
func init() {
statusScheme.AddUnversionedTypes(metav1.SchemeGroupVersion,
&metav1.Status{},
)
}

107
vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,107 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spdy
import (
"bufio"
"fmt"
"io"
"net"
"net/http"
"strings"
"sync/atomic"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/runtime"
)
const HeaderSpdy31 = "SPDY/3.1"
// responseUpgrader knows how to upgrade HTTP responses. It
// implements the httpstream.ResponseUpgrader interface.
type responseUpgrader struct {
}
// connWrapper is used to wrap a hijacked connection and its bufio.Reader. All
// calls will be handled directly by the underlying net.Conn with the exception
// of Read and Close calls, which will consider data in the bufio.Reader. This
// ensures that data already inside the used bufio.Reader instance is also
// read.
type connWrapper struct {
net.Conn
closed int32
bufReader *bufio.Reader
}
func (w *connWrapper) Read(b []byte) (n int, err error) {
if atomic.LoadInt32(&w.closed) == 1 {
return 0, io.EOF
}
return w.bufReader.Read(b)
}
func (w *connWrapper) Close() error {
err := w.Conn.Close()
atomic.StoreInt32(&w.closed, 1)
return err
}
// NewResponseUpgrader returns a new httpstream.ResponseUpgrader that is
// capable of upgrading HTTP responses using SPDY/3.1 via the
// spdystream package.
func NewResponseUpgrader() httpstream.ResponseUpgrader {
return responseUpgrader{}
}
// UpgradeResponse upgrades an HTTP response to one that supports multiplexed
// streams. newStreamHandler will be called synchronously whenever the
// other end of the upgraded connection creates a new stream.
func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection {
connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection))
upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade))
if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
errorMsg := fmt.Sprintf("unable to upgrade: missing upgrade headers in request: %#v", req.Header)
http.Error(w, errorMsg, http.StatusBadRequest)
return nil
}
hijacker, ok := w.(http.Hijacker)
if !ok {
errorMsg := fmt.Sprintf("unable to upgrade: unable to hijack response")
http.Error(w, errorMsg, http.StatusInternalServerError)
return nil
}
w.Header().Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31)
w.WriteHeader(http.StatusSwitchingProtocols)
conn, bufrw, err := hijacker.Hijack()
if err != nil {
runtime.HandleError(fmt.Errorf("unable to upgrade: error hijacking response: %v", err))
return nil
}
connWithBuf := &connWrapper{Conn: conn, bufReader: bufrw.Reader}
spdyConn, err := NewServerConnection(connWithBuf, newStreamHandler)
if err != nil {
runtime.HandleError(fmt.Errorf("unable to upgrade: error creating SPDY server connection: %v", err))
return nil
}
return spdyConn
}

27
vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,27 @@
package netutil
import (
"net/url"
"strings"
)
// FROM: http://golang.org/src/net/http/client.go
// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
// return true if the string includes a port.
func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
// FROM: http://golang.org/src/net/http/transport.go
var portMap = map[string]string{
"http": "80",
"https": "443",
}
// FROM: http://golang.org/src/net/http/transport.go
// canonicalAddr returns url.Host but always with a ":port" suffix
func CanonicalAddr(url *url.URL) string {
addr := url.Host
if !hasPort(addr) {
return addr + ":" + portMap[url.Scheme]
}
return addr
}

19
vendor/k8s.io/client-go/tools/portforward/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package portforward adds support for SSH-like port forwarding from the client's
// local host to remote containers.
package portforward // import "k8s.io/client-go/tools/portforward"

429
vendor/k8s.io/client-go/tools/portforward/portforward.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,429 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portforward
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"sort"
"strconv"
"strings"
"sync"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/runtime"
)
// PortForwardProtocolV1Name is the subprotocol used for port forwarding.
// TODO move to API machinery and re-unify with kubelet/server/portfoward
const PortForwardProtocolV1Name = "portforward.k8s.io"
// PortForwarder knows how to listen for local connections and forward them to
// a remote pod via an upgraded HTTP request.
type PortForwarder struct {
addresses []listenAddress
ports []ForwardedPort
stopChan <-chan struct{}
dialer httpstream.Dialer
streamConn httpstream.Connection
listeners []io.Closer
Ready chan struct{}
requestIDLock sync.Mutex
requestID int
out io.Writer
errOut io.Writer
}
// ForwardedPort contains a Local:Remote port pairing.
type ForwardedPort struct {
Local uint16
Remote uint16
}
/*
valid port specifications:
5000
- forwards from localhost:5000 to pod:5000
8888:5000
- forwards from localhost:8888 to pod:5000
0:5000
:5000
- selects a random available local port,
forwards from localhost:<random port> to pod:5000
*/
func parsePorts(ports []string) ([]ForwardedPort, error) {
var forwards []ForwardedPort
for _, portString := range ports {
parts := strings.Split(portString, ":")
var localString, remoteString string
if len(parts) == 1 {
localString = parts[0]
remoteString = parts[0]
} else if len(parts) == 2 {
localString = parts[0]
if localString == "" {
// support :5000
localString = "0"
}
remoteString = parts[1]
} else {
return nil, fmt.Errorf("invalid port format '%s'", portString)
}
localPort, err := strconv.ParseUint(localString, 10, 16)
if err != nil {
return nil, fmt.Errorf("error parsing local port '%s': %s", localString, err)
}
remotePort, err := strconv.ParseUint(remoteString, 10, 16)
if err != nil {
return nil, fmt.Errorf("error parsing remote port '%s': %s", remoteString, err)
}
if remotePort == 0 {
return nil, fmt.Errorf("remote port must be > 0")
}
forwards = append(forwards, ForwardedPort{uint16(localPort), uint16(remotePort)})
}
return forwards, nil
}
type listenAddress struct {
address string
protocol string
failureMode string
}
func parseAddresses(addressesToParse []string) ([]listenAddress, error) {
var addresses []listenAddress
parsed := make(map[string]listenAddress)
for _, address := range addressesToParse {
if address == "localhost" {
if _, exists := parsed["127.0.0.1"]; !exists {
ip := listenAddress{address: "127.0.0.1", protocol: "tcp4", failureMode: "all"}
parsed[ip.address] = ip
}
if _, exists := parsed["::1"]; !exists {
ip := listenAddress{address: "::1", protocol: "tcp6", failureMode: "all"}
parsed[ip.address] = ip
}
} else if net.ParseIP(address).To4() != nil {
parsed[address] = listenAddress{address: address, protocol: "tcp4", failureMode: "any"}
} else if net.ParseIP(address) != nil {
parsed[address] = listenAddress{address: address, protocol: "tcp6", failureMode: "any"}
} else {
return nil, fmt.Errorf("%s is not a valid IP", address)
}
}
addresses = make([]listenAddress, len(parsed))
id := 0
for _, v := range parsed {
addresses[id] = v
id++
}
// Sort addresses before returning to get a stable order
sort.Slice(addresses, func(i, j int) bool { return addresses[i].address < addresses[j].address })
return addresses, nil
}
// New creates a new PortForwarder with localhost listen addresses.
func New(dialer httpstream.Dialer, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) {
return NewOnAddresses(dialer, []string{"localhost"}, ports, stopChan, readyChan, out, errOut)
}
// NewOnAddresses creates a new PortForwarder with custom listen addresses.
func NewOnAddresses(dialer httpstream.Dialer, addresses []string, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) {
if len(addresses) == 0 {
return nil, errors.New("you must specify at least 1 address")
}
parsedAddresses, err := parseAddresses(addresses)
if err != nil {
return nil, err
}
if len(ports) == 0 {
return nil, errors.New("you must specify at least 1 port")
}
parsedPorts, err := parsePorts(ports)
if err != nil {
return nil, err
}
return &PortForwarder{
dialer: dialer,
addresses: parsedAddresses,
ports: parsedPorts,
stopChan: stopChan,
Ready: readyChan,
out: out,
errOut: errOut,
}, nil
}
// ForwardPorts formats and executes a port forwarding request. The connection will remain
// open until stopChan is closed.
func (pf *PortForwarder) ForwardPorts() error {
defer pf.Close()
var err error
pf.streamConn, _, err = pf.dialer.Dial(PortForwardProtocolV1Name)
if err != nil {
return fmt.Errorf("error upgrading connection: %s", err)
}
defer pf.streamConn.Close()
return pf.forward()
}
// forward dials the remote host specific in req, upgrades the request, starts
// listeners for each port specified in ports, and forwards local connections
// to the remote host via streams.
func (pf *PortForwarder) forward() error {
var err error
listenSuccess := false
for i := range pf.ports {
port := &pf.ports[i]
err = pf.listenOnPort(port)
switch {
case err == nil:
listenSuccess = true
default:
if pf.errOut != nil {
fmt.Fprintf(pf.errOut, "Unable to listen on port %d: %v\n", port.Local, err)
}
}
}
if !listenSuccess {
return fmt.Errorf("unable to listen on any of the requested ports: %v", pf.ports)
}
if pf.Ready != nil {
close(pf.Ready)
}
// wait for interrupt or conn closure
select {
case <-pf.stopChan:
case <-pf.streamConn.CloseChan():
runtime.HandleError(errors.New("lost connection to pod"))
}
return nil
}
// listenOnPort delegates listener creation and waits for connections on requested bind addresses.
// An error is raised based on address groups (default and localhost) and their failure modes
func (pf *PortForwarder) listenOnPort(port *ForwardedPort) error {
var errors []error
failCounters := make(map[string]int, 2)
successCounters := make(map[string]int, 2)
for _, addr := range pf.addresses {
err := pf.listenOnPortAndAddress(port, addr.protocol, addr.address)
if err != nil {
errors = append(errors, err)
failCounters[addr.failureMode]++
} else {
successCounters[addr.failureMode]++
}
}
if successCounters["all"] == 0 && failCounters["all"] > 0 {
return fmt.Errorf("%s: %v", "Listeners failed to create with the following errors", errors)
}
if failCounters["any"] > 0 {
return fmt.Errorf("%s: %v", "Listeners failed to create with the following errors", errors)
}
return nil
}
// listenOnPortAndAddress delegates listener creation and waits for new connections
// in the background f
func (pf *PortForwarder) listenOnPortAndAddress(port *ForwardedPort, protocol string, address string) error {
listener, err := pf.getListener(protocol, address, port)
if err != nil {
return err
}
pf.listeners = append(pf.listeners, listener)
go pf.waitForConnection(listener, *port)
return nil
}
// getListener creates a listener on the interface targeted by the given hostname on the given port with
// the given protocol. protocol is in net.Listen style which basically admits values like tcp, tcp4, tcp6
func (pf *PortForwarder) getListener(protocol string, hostname string, port *ForwardedPort) (net.Listener, error) {
listener, err := net.Listen(protocol, net.JoinHostPort(hostname, strconv.Itoa(int(port.Local))))
if err != nil {
return nil, fmt.Errorf("unable to create listener: Error %s", err)
}
listenerAddress := listener.Addr().String()
host, localPort, _ := net.SplitHostPort(listenerAddress)
localPortUInt, err := strconv.ParseUint(localPort, 10, 16)
if err != nil {
fmt.Fprintf(pf.out, "Failed to forward from %s:%d -> %d\n", hostname, localPortUInt, port.Remote)
return nil, fmt.Errorf("error parsing local port: %s from %s (%s)", err, listenerAddress, host)
}
port.Local = uint16(localPortUInt)
if pf.out != nil {
fmt.Fprintf(pf.out, "Forwarding from %s -> %d\n", net.JoinHostPort(hostname, strconv.Itoa(int(localPortUInt))), port.Remote)
}
return listener, nil
}
// waitForConnection waits for new connections to listener and handles them in
// the background.
func (pf *PortForwarder) waitForConnection(listener net.Listener, port ForwardedPort) {
for {
conn, err := listener.Accept()
if err != nil {
// TODO consider using something like https://github.com/hydrogen18/stoppableListener?
if !strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") {
runtime.HandleError(fmt.Errorf("error accepting connection on port %d: %v", port.Local, err))
}
return
}
go pf.handleConnection(conn, port)
}
}
func (pf *PortForwarder) nextRequestID() int {
pf.requestIDLock.Lock()
defer pf.requestIDLock.Unlock()
id := pf.requestID
pf.requestID++
return id
}
// handleConnection copies data between the local connection and the stream to
// the remote server.
func (pf *PortForwarder) handleConnection(conn net.Conn, port ForwardedPort) {
defer conn.Close()
if pf.out != nil {
fmt.Fprintf(pf.out, "Handling connection for %d\n", port.Local)
}
requestID := pf.nextRequestID()
// create error stream
headers := http.Header{}
headers.Set(v1.StreamType, v1.StreamTypeError)
headers.Set(v1.PortHeader, fmt.Sprintf("%d", port.Remote))
headers.Set(v1.PortForwardRequestIDHeader, strconv.Itoa(requestID))
errorStream, err := pf.streamConn.CreateStream(headers)
if err != nil {
runtime.HandleError(fmt.Errorf("error creating error stream for port %d -> %d: %v", port.Local, port.Remote, err))
return
}
// we're not writing to this stream
errorStream.Close()
errorChan := make(chan error)
go func() {
message, err := ioutil.ReadAll(errorStream)
switch {
case err != nil:
errorChan <- fmt.Errorf("error reading from error stream for port %d -> %d: %v", port.Local, port.Remote, err)
case len(message) > 0:
errorChan <- fmt.Errorf("an error occurred forwarding %d -> %d: %v", port.Local, port.Remote, string(message))
}
close(errorChan)
}()
// create data stream
headers.Set(v1.StreamType, v1.StreamTypeData)
dataStream, err := pf.streamConn.CreateStream(headers)
if err != nil {
runtime.HandleError(fmt.Errorf("error creating forwarding stream for port %d -> %d: %v", port.Local, port.Remote, err))
return
}
localError := make(chan struct{})
remoteDone := make(chan struct{})
go func() {
// Copy from the remote side to the local port.
if _, err := io.Copy(conn, dataStream); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
runtime.HandleError(fmt.Errorf("error copying from remote stream to local connection: %v", err))
}
// inform the select below that the remote copy is done
close(remoteDone)
}()
go func() {
// inform server we're not sending any more data after copy unblocks
defer dataStream.Close()
// Copy from the local port to the remote side.
if _, err := io.Copy(dataStream, conn); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
runtime.HandleError(fmt.Errorf("error copying from local connection to remote stream: %v", err))
// break out of the select below without waiting for the other copy to finish
close(localError)
}
}()
// wait for either a local->remote error or for copying from remote->local to finish
select {
case <-remoteDone:
case <-localError:
}
// always expect something on errorChan (it may be nil)
err = <-errorChan
if err != nil {
runtime.HandleError(err)
}
}
// Close stops all listeners of PortForwarder.
func (pf *PortForwarder) Close() {
// stop all listeners
for _, l := range pf.listeners {
if err := l.Close(); err != nil {
runtime.HandleError(fmt.Errorf("error closing listener: %v", err))
}
}
}
// GetPorts will return the ports that were forwarded; this can be used to
// retrieve the locally-bound port in cases where the input was port 0. This
// function will signal an error if the Ready channel is nil or if the
// listeners are not ready yet; this function will succeed after the Ready
// channel has been closed.
func (pf *PortForwarder) GetPorts() ([]ForwardedPort, error) {
if pf.Ready == nil {
return nil, fmt.Errorf("no Ready channel provided")
}
select {
case <-pf.Ready:
return pf.ports, nil
default:
return nil, fmt.Errorf("listeners not ready")
}
}

94
vendor/k8s.io/client-go/transport/spdy/spdy.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,94 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spdy
import (
"fmt"
"net/http"
"net/url"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/httpstream/spdy"
restclient "k8s.io/client-go/rest"
)
// Upgrader validates a response from the server after a SPDY upgrade.
type Upgrader interface {
// NewConnection validates the response and creates a new Connection.
NewConnection(resp *http.Response) (httpstream.Connection, error)
}
// RoundTripperFor returns a round tripper and upgrader to use with SPDY.
func RoundTripperFor(config *restclient.Config) (http.RoundTripper, Upgrader, error) {
tlsConfig, err := restclient.TLSConfigFor(config)
if err != nil {
return nil, nil, err
}
upgradeRoundTripper := spdy.NewRoundTripper(tlsConfig, true, false)
wrapper, err := restclient.HTTPWrappersForConfig(config, upgradeRoundTripper)
if err != nil {
return nil, nil, err
}
return wrapper, upgradeRoundTripper, nil
}
// dialer implements the httpstream.Dialer interface.
type dialer struct {
client *http.Client
upgrader Upgrader
method string
url *url.URL
}
var _ httpstream.Dialer = &dialer{}
// NewDialer will create a dialer that connects to the provided URL and upgrades the connection to SPDY.
func NewDialer(upgrader Upgrader, client *http.Client, method string, url *url.URL) httpstream.Dialer {
return &dialer{
client: client,
upgrader: upgrader,
method: method,
url: url,
}
}
func (d *dialer) Dial(protocols ...string) (httpstream.Connection, string, error) {
req, err := http.NewRequest(d.method, d.url.String(), nil)
if err != nil {
return nil, "", fmt.Errorf("error creating request: %v", err)
}
return Negotiate(d.upgrader, d.client, req, protocols...)
}
// Negotiate opens a connection to a remote server and attempts to negotiate
// a SPDY connection. Upon success, it returns the connection and the protocol selected by
// the server. The client transport must use the upgradeRoundTripper - see RoundTripperFor.
func Negotiate(upgrader Upgrader, client *http.Client, req *http.Request, protocols ...string) (httpstream.Connection, string, error) {
for i := range protocols {
req.Header.Add(httpstream.HeaderProtocolVersion, protocols[i])
}
resp, err := client.Do(req)
if err != nil {
return nil, "", fmt.Errorf("error sending request: %v", err)
}
defer resp.Body.Close()
conn, err := upgrader.NewConnection(resp)
if err != nil {
return nil, "", err
}
return conn, resp.Header.Get(httpstream.HeaderProtocolVersion), nil
}

8
vendor/modules.txt поставляемый
Просмотреть файл

@ -47,6 +47,9 @@ github.com/davecgh/go-spew/spew
github.com/docker/libnetwork/driverapi
github.com/docker/libnetwork/drivers/remote/api
github.com/docker/libnetwork/types
# github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96
github.com/docker/spdystream
github.com/docker/spdystream/spdy
# github.com/evanphx/json-patch v4.5.0+incompatible
github.com/evanphx/json-patch
# github.com/fsnotify/fsnotify v1.4.7
@ -335,6 +338,8 @@ k8s.io/apimachinery/pkg/util/clock
k8s.io/apimachinery/pkg/util/diff
k8s.io/apimachinery/pkg/util/errors
k8s.io/apimachinery/pkg/util/framer
k8s.io/apimachinery/pkg/util/httpstream
k8s.io/apimachinery/pkg/util/httpstream/spdy
k8s.io/apimachinery/pkg/util/intstr
k8s.io/apimachinery/pkg/util/json
k8s.io/apimachinery/pkg/util/mergepatch
@ -351,6 +356,7 @@ k8s.io/apimachinery/pkg/util/yaml
k8s.io/apimachinery/pkg/version
k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/client-go v0.18.2
k8s.io/client-go/discovery
@ -551,10 +557,12 @@ k8s.io/client-go/tools/leaderelection
k8s.io/client-go/tools/leaderelection/resourcelock
k8s.io/client-go/tools/metrics
k8s.io/client-go/tools/pager
k8s.io/client-go/tools/portforward
k8s.io/client-go/tools/record
k8s.io/client-go/tools/record/util
k8s.io/client-go/tools/reference
k8s.io/client-go/transport
k8s.io/client-go/transport/spdy
k8s.io/client-go/util/cert
k8s.io/client-go/util/connrotation
k8s.io/client-go/util/flowcontrol