Refactor branch list using src-d/go-git and add GetRefs function (#133)

* Refactor branch list using src-d/go-git and add GetRefs function

* Fix copyright

* Fix to reuse ObjectType

* Add function to filter refs by prefix

* Fix import order

* Optimize if structure
This commit is contained in:
Lauris BH 2018-11-16 00:38:33 +02:00 коммит произвёл Jonas Franz
Родитель d945eda535
Коммит 578ad8f125
456 изменённых файлов: 106080 добавлений и 753 удалений

237
Gopkg.lock сгенерированный
Просмотреть файл

@ -3,36 +3,259 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:b67d9fb93495f20c1650448395a35d8c8a05cc1744bdd9986f5bc69d5d114f22"
name = "github.com/Unknwon/com" name = "github.com/Unknwon/com"
packages = ["."] packages = ["."]
revision = "7677a1d7c1137cd3dd5ba7a076d0c898a1ef4520" pruneopts = "UT"
revision = "41959bdd855fb7db467f78865d5f9044507df1cd"
[[projects]] [[projects]]
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
name = "github.com/davecgh/go-spew" name = "github.com/davecgh/go-spew"
packages = ["spew"] packages = ["spew"]
revision = "6d212800a42e8ab5c146b8ace3490ee17e5225f9" pruneopts = "UT"
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]] [[projects]]
digest = "1:b498b36dbb2b306d1c5205ee5236c9e60352be8f9eea9bf08186723a9f75b4f3"
name = "github.com/emirpasic/gods"
packages = [
"containers",
"lists",
"lists/arraylist",
"trees",
"trees/binaryheap",
"utils",
]
pruneopts = "UT"
revision = "1615341f118ae12f353cc8a983f35b584342c9b3"
version = "v1.12.0"
[[projects]]
branch = "master"
digest = "1:62fe3a7ea2050ecbd753a71889026f83d73329337ada66325cbafd5dea5f713d"
name = "github.com/jbenet/go-context"
packages = ["io"]
pruneopts = "UT"
revision = "d14ea06fba99483203c19d92cfcd13ebe73135f4"
[[projects]]
digest = "1:ae5f4d0779a45e2cb3075d8b3ece6c623e171407f4aac83521392ff06d188871"
name = "github.com/kevinburke/ssh_config"
packages = ["."]
pruneopts = "UT"
revision = "81db2a75821ed34e682567d48be488a1c3121088"
version = "0.5"
[[projects]]
branch = "master"
digest = "1:d1d928e677e2ba3ba94c07b4ed45196cfda312b0131c4bf6d3a8287113be52a2"
name = "github.com/mcuadros/go-version" name = "github.com/mcuadros/go-version"
packages = ["."] packages = ["."]
revision = "257f7b9a7d87427c8d7f89469a5958d57f8abd7c" pruneopts = "UT"
revision = "6d5863ca60fa6fe914b5fd43ed8533d7567c5b0b"
[[projects]] [[projects]]
digest = "1:78bbb1ba5b7c3f2ed0ea1eab57bdd3859aec7e177811563edc41198a760b06af"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
pruneopts = "UT"
revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4"
version = "v1.0.0"
[[projects]]
digest = "1:b2ee62e09bec113cf086d2ce0769efcc7bf79481aba8373fd8f7884e94df3462"
name = "github.com/pelletier/go-buffruneio"
packages = ["."]
pruneopts = "UT"
revision = "c37440a7cf42ac63b919c752ca73a85067e05992"
version = "v0.2.0"
[[projects]]
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
name = "github.com/pmezard/go-difflib" name = "github.com/pmezard/go-difflib"
packages = ["difflib"] packages = ["difflib"]
revision = "d8ed2627bdf02c080bf22230dbb337003b7aba2d" pruneopts = "UT"
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]] [[projects]]
digest = "1:d917313f309bda80d27274d53985bc65651f81a5b66b820749ac7f8ef061fd04"
name = "github.com/sergi/go-diff"
packages = ["diffmatchpatch"]
pruneopts = "UT"
revision = "1744e2970ca51c86172c8190fadad617561ed6e7"
version = "v1.0.0"
[[projects]]
digest = "1:e4ed0afd67bf7be353921665cdac50834c867ff1bba153efc0745b755a7f5905"
name = "github.com/src-d/gcfg"
packages = [
".",
"scanner",
"token",
"types",
]
pruneopts = "UT"
revision = "1ac3a1ac202429a54835fe8408a92880156b489d"
version = "v1.4.0"
[[projects]]
digest = "1:c40d65817cdd41fac9aa7af8bed56927bb2d6d47e4fea566a74880f5c2b1c41e"
name = "github.com/stretchr/testify" name = "github.com/stretchr/testify"
packages = [ packages = [
"assert", "assert",
"require" "require",
] ]
revision = "976c720a22c8eb4eb6a0b4348ad85ad12491a506" pruneopts = "UT"
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
version = "v1.2.2"
[[projects]]
digest = "1:afc0b8068986a01e2d8f449917829753a54f6bd4d1265c2b4ad9cba75560020f"
name = "github.com/xanzy/ssh-agent"
packages = ["."]
pruneopts = "UT"
revision = "640f0ab560aeb89d523bb6ac322b1244d5c3796c"
version = "v0.2.0"
[[projects]]
branch = "master"
digest = "1:9f79783fbed381eae1a46d4a1f759e57063ffa4bcbd254d3ccc544115d0db06b"
name = "golang.org/x/crypto"
packages = [
"cast5",
"curve25519",
"ed25519",
"ed25519/internal/edwards25519",
"internal/chacha20",
"internal/subtle",
"openpgp",
"openpgp/armor",
"openpgp/elgamal",
"openpgp/errors",
"openpgp/packet",
"openpgp/s2k",
"poly1305",
"ssh",
"ssh/agent",
"ssh/knownhosts",
]
pruneopts = "UT"
revision = "e4dc69e5b2fd71dcaf8bd5d054eb936deb78d1fa"
[[projects]]
branch = "master"
digest = "1:76ee51c3f468493aff39dbacc401e8831fbb765104cbf613b89bef01cf4bad70"
name = "golang.org/x/net"
packages = ["context"]
pruneopts = "UT"
revision = "03003ca0c849e57b6ea29a4bab8d3cb6e4d568fe"
[[projects]]
branch = "master"
digest = "1:68a4638398cf8c864a6aa50c8853ebe7935f939aa3acf7e44cb35b67852e9fef"
name = "golang.org/x/sys"
packages = ["windows"]
pruneopts = "UT"
revision = "66b7b1311ac80bbafcd2daeef9a5e6e2cd1e2399"
[[projects]]
digest = "1:8029e9743749d4be5bc9f7d42ea1659471767860f0cdc34d37c3111bd308a295"
name = "golang.org/x/text"
packages = [
"internal/gen",
"internal/triegen",
"internal/ucd",
"transform",
"unicode/cldr",
"unicode/norm",
]
pruneopts = "UT"
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
digest = "1:866df945fc92cd221d2b384dca7de5f3131a6113b9f72a7a8019ae5046abe828"
name = "gopkg.in/src-d/go-billy.v4"
packages = [
".",
"helper/chroot",
"helper/polyfill",
"osfs",
"util",
]
pruneopts = "UT"
revision = "982626487c60a5252e7d0b695ca23fb0fa2fd670"
version = "v4.3.0"
[[projects]]
digest = "1:e625251525d1f5db4be16d133171b031876ff4e29d8546264d8eea8ab906d977"
name = "gopkg.in/src-d/go-git.v4"
packages = [
".",
"config",
"internal/revision",
"plumbing",
"plumbing/cache",
"plumbing/filemode",
"plumbing/format/config",
"plumbing/format/diff",
"plumbing/format/gitignore",
"plumbing/format/idxfile",
"plumbing/format/index",
"plumbing/format/objfile",
"plumbing/format/packfile",
"plumbing/format/pktline",
"plumbing/object",
"plumbing/protocol/packp",
"plumbing/protocol/packp/capability",
"plumbing/protocol/packp/sideband",
"plumbing/revlist",
"plumbing/storer",
"plumbing/transport",
"plumbing/transport/client",
"plumbing/transport/file",
"plumbing/transport/git",
"plumbing/transport/http",
"plumbing/transport/internal/common",
"plumbing/transport/server",
"plumbing/transport/ssh",
"storage",
"storage/filesystem",
"storage/filesystem/dotgit",
"storage/memory",
"utils/binary",
"utils/diff",
"utils/ioutil",
"utils/merkletrie",
"utils/merkletrie/filesystem",
"utils/merkletrie/index",
"utils/merkletrie/internal/frame",
"utils/merkletrie/noder",
]
pruneopts = "UT"
revision = "cd64b4d630b6c2d2b3d72e9615e14f9d58bb5787"
version = "v4.7.1"
[[projects]]
digest = "1:78d374b493e747afa9fbb2119687e3740a7fb8d0ebabddfef0a012593aaecbb3"
name = "gopkg.in/warnings.v0"
packages = ["."]
pruneopts = "UT"
revision = "ec4a0fea49c7b46c2aeb0b51aac55779c607e52b"
version = "v0.1.2"
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "d37e90051cd58dd1f99f808626e82d64eac47f2b2334c6fcb9179bfcf2814622" input-imports = [
"github.com/Unknwon/com",
"github.com/mcuadros/go-version",
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/require",
"gopkg.in/src-d/go-git.v4",
]
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

Просмотреть файл

@ -32,3 +32,7 @@
[prune] [prune]
go-tests = true go-tests = true
unused-packages = true unused-packages = true
[[constraint]]
name = "gopkg.in/src-d/go-git.v4"
version = "4.7.1"

18
ref.go Normal file
Просмотреть файл

@ -0,0 +1,18 @@
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package git
// Reference represents a Git ref.
type Reference struct {
Name string
repo *Repository
Object SHA1 // The id of this commit object
Type string
}
// Commit return the commit of the reference
func (ref *Reference) Commit() (*Commit, error) {
return ref.repo.getCommit(ref.Object)
}

Просмотреть файл

@ -1,4 +1,5 @@
// Copyright 2015 The Gogs Authors. All rights reserved. // Copyright 2015 The Gogs Authors. All rights reserved.
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style // Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
@ -7,6 +8,9 @@ package git
import ( import (
"fmt" "fmt"
"strings" "strings"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
) )
// BranchPrefix base dir of the branch information file store on git // BranchPrefix base dir of the branch information file store on git
@ -60,16 +64,23 @@ func (repo *Repository) SetDefaultBranch(name string) error {
// GetBranches returns all branches of the repository. // GetBranches returns all branches of the repository.
func (repo *Repository) GetBranches() ([]string, error) { func (repo *Repository) GetBranches() ([]string, error) {
stdout, err := NewCommand("for-each-ref", "--format=%(refname)", BranchPrefix).RunInDir(repo.Path) r, err := git.PlainOpen(repo.Path)
if err != nil { if err != nil {
return nil, err return nil, err
} }
refs := strings.Split(stdout, "\n") branchIter, err := r.Branches()
branches := make([]string, len(refs)-1) if err != nil {
for i, ref := range refs[:len(refs)-1] { return nil, err
branches[i] = strings.TrimPrefix(ref, BranchPrefix)
} }
branches := make([]string, 0)
if err = branchIter.ForEach(func(branch *plumbing.Reference) error {
branches = append(branches, branch.Name().Short())
return nil
}); err != nil {
return nil, err
}
return branches, nil return branches, nil
} }

39
repo_branch_test.go Normal file
Просмотреть файл

@ -0,0 +1,39 @@
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package git
import (
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestRepository_GetBranches(t *testing.T) {
bareRepo1Path := filepath.Join(testReposDir, "repo1_bare")
bareRepo1, err := OpenRepository(bareRepo1Path)
assert.NoError(t, err)
branches, err := bareRepo1.GetBranches()
assert.NoError(t, err)
assert.Len(t, branches, 3)
assert.ElementsMatch(t, []string{"branch1", "branch2", "master"}, branches)
}
func BenchmarkRepository_GetBranches(b *testing.B) {
bareRepo1Path := filepath.Join(testReposDir, "repo1_bare")
bareRepo1, err := OpenRepository(bareRepo1Path)
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
_, err := bareRepo1.GetBranches()
if err != nil {
b.Fatal(err)
}
}
}

Просмотреть файл

@ -11,7 +11,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestRepository_GetBranches(t *testing.T) { func TestRepository_GetCommitBranches(t *testing.T) {
bareRepo1Path := filepath.Join(testReposDir, "repo1_bare") bareRepo1Path := filepath.Join(testReposDir, "repo1_bare")
bareRepo1, err := OpenRepository(bareRepo1Path) bareRepo1, err := OpenRepository(bareRepo1Path)
assert.NoError(t, err) assert.NoError(t, err)

51
repo_ref.go Normal file
Просмотреть файл

@ -0,0 +1,51 @@
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package git
import (
"strings"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
)
// GetRefs returns all references of the repository.
func (repo *Repository) GetRefs() ([]*Reference, error) {
return repo.GetRefsFiltered("")
}
// GetRefsFiltered returns all references of the repository that matches patterm exactly or starting with.
func (repo *Repository) GetRefsFiltered(pattern string) ([]*Reference, error) {
r, err := git.PlainOpen(repo.Path)
if err != nil {
return nil, err
}
refsIter, err := r.References()
if err != nil {
return nil, err
}
refs := make([]*Reference, 0)
if err = refsIter.ForEach(func(ref *plumbing.Reference) error {
if ref.Name() != plumbing.HEAD && !ref.Name().IsRemote() &&
(pattern == "" || strings.HasPrefix(ref.Name().String(), pattern)) {
r := &Reference{
Name: ref.Name().String(),
Object: SHA1(ref.Hash()),
Type: string(ObjectCommit),
repo: repo,
}
if ref.Name().IsTag() {
r.Type = string(ObjectTag)
}
refs = append(refs, r)
}
return nil
}); err != nil {
return nil, err
}
return refs, nil
}

49
repo_ref_test.go Normal file
Просмотреть файл

@ -0,0 +1,49 @@
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package git
import (
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestRepository_GetRefs(t *testing.T) {
bareRepo1Path := filepath.Join(testReposDir, "repo1_bare")
bareRepo1, err := OpenRepository(bareRepo1Path)
assert.NoError(t, err)
refs, err := bareRepo1.GetRefs()
assert.NoError(t, err)
assert.Len(t, refs, 4)
expectedRefs := []string{
BranchPrefix + "branch1",
BranchPrefix + "branch2",
BranchPrefix + "master",
TagPrefix + "test",
}
for _, ref := range refs {
assert.Contains(t, expectedRefs, ref.Name)
}
}
func TestRepository_GetRefsFiltered(t *testing.T) {
bareRepo1Path := filepath.Join(testReposDir, "repo1_bare")
bareRepo1, err := OpenRepository(bareRepo1Path)
assert.NoError(t, err)
refs, err := bareRepo1.GetRefsFiltered(TagPrefix)
assert.NoError(t, err)
if assert.Len(t, refs, 1) {
assert.Equal(t, TagPrefix+"test", refs[0].Name)
assert.Equal(t, "tag", refs[0].Type)
assert.Equal(t, "3ad28a9149a2864384548f3d17ed7f38014c9e8a", refs[0].Object.String())
}
}

53
vendor/github.com/Unknwon/com/dir.go сгенерированный поставляемый
Просмотреть файл

@ -32,7 +32,7 @@ func IsDir(dir string) bool {
return f.IsDir() return f.IsDir()
} }
func statDir(dirPath, recPath string, includeDir, isDirOnly bool) ([]string, error) { func statDir(dirPath, recPath string, includeDir, isDirOnly, followSymlinks bool) ([]string, error) {
dir, err := os.Open(dirPath) dir, err := os.Open(dirPath)
if err != nil { if err != nil {
return nil, err return nil, err
@ -56,13 +56,29 @@ func statDir(dirPath, recPath string, includeDir, isDirOnly bool) ([]string, err
if includeDir { if includeDir {
statList = append(statList, relPath+"/") statList = append(statList, relPath+"/")
} }
s, err := statDir(curPath, relPath, includeDir, isDirOnly) s, err := statDir(curPath, relPath, includeDir, isDirOnly, followSymlinks)
if err != nil { if err != nil {
return nil, err return nil, err
} }
statList = append(statList, s...) statList = append(statList, s...)
} else if !isDirOnly { } else if !isDirOnly {
statList = append(statList, relPath) statList = append(statList, relPath)
} else if followSymlinks && fi.Mode()&os.ModeSymlink != 0 {
link, err := os.Readlink(curPath)
if err != nil {
return nil, err
}
if IsDir(link) {
if includeDir {
statList = append(statList, relPath+"/")
}
s, err := statDir(curPath, relPath, includeDir, isDirOnly, followSymlinks)
if err != nil {
return nil, err
}
statList = append(statList, s...)
}
} }
} }
return statList, nil return statList, nil
@ -84,7 +100,26 @@ func StatDir(rootPath string, includeDir ...bool) ([]string, error) {
if len(includeDir) >= 1 { if len(includeDir) >= 1 {
isIncludeDir = includeDir[0] isIncludeDir = includeDir[0]
} }
return statDir(rootPath, "", isIncludeDir, false) return statDir(rootPath, "", isIncludeDir, false, false)
}
// LstatDir gathers information of given directory by depth-first.
// It returns slice of file list, follows symbolic links and includes subdirectories if enabled;
// it returns error and nil slice when error occurs in underlying functions,
// or given path is not a directory or does not exist.
//
// Slice does not include given path itself.
// If subdirectories is enabled, they will have suffix '/'.
func LstatDir(rootPath string, includeDir ...bool) ([]string, error) {
if !IsDir(rootPath) {
return nil, errors.New("not a directory or does not exist: " + rootPath)
}
isIncludeDir := false
if len(includeDir) >= 1 {
isIncludeDir = includeDir[0]
}
return statDir(rootPath, "", isIncludeDir, false, true)
} }
// GetAllSubDirs returns all subdirectories of given root path. // GetAllSubDirs returns all subdirectories of given root path.
@ -93,7 +128,17 @@ func GetAllSubDirs(rootPath string) ([]string, error) {
if !IsDir(rootPath) { if !IsDir(rootPath) {
return nil, errors.New("not a directory or does not exist: " + rootPath) return nil, errors.New("not a directory or does not exist: " + rootPath)
} }
return statDir(rootPath, "", true, true) return statDir(rootPath, "", true, true, false)
}
// LgetAllSubDirs returns all subdirectories of given root path, including
// following symbolic links, if any.
// Slice does not include given path itself.
func LgetAllSubDirs(rootPath string) ([]string, error) {
if !IsDir(rootPath) {
return nil, errors.New("not a directory or does not exist: " + rootPath)
}
return statDir(rootPath, "", true, true, true)
} }
// GetFileListBySuffix returns an ordered list of file paths. // GetFileListBySuffix returns an ordered list of file paths.

4
vendor/github.com/Unknwon/com/math.go сгенерированный поставляемый
Просмотреть файл

@ -14,12 +14,12 @@
package com package com
// PowInt is int type of math.Pow function. // PowInt is int type of math.Pow function.
func PowInt(x int, y int) int { func PowInt(x int, y int) int {
if y <= 0 { if y <= 0 {
return 1 return 1
} else { } else {
if y % 2 == 0 { if y%2 == 0 {
sqrt := PowInt(x, y/2) sqrt := PowInt(x, y/2)
return sqrt * sqrt return sqrt * sqrt
} else { } else {

4
vendor/github.com/davecgh/go-spew/LICENSE сгенерированный поставляемый
Просмотреть файл

@ -1,8 +1,8 @@
ISC License ISC License
Copyright (c) 2012-2013 Dave Collins <dave@davec.name> Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and distribute this software for any Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies. copyright notice and this permission notice appear in all copies.

189
vendor/github.com/davecgh/go-spew/spew/bypass.go сгенерированный поставляемый
Просмотреть файл

@ -1,4 +1,4 @@
// Copyright (c) 2015 Dave Collins <dave@davec.name> // Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
// //
// Permission to use, copy, modify, and distribute this software for any // Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above // purpose with or without fee is hereby granted, provided that the above
@ -16,7 +16,9 @@
// when the code is not running on Google App Engine, compiled by GopherJS, and // when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe" // "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used. // tag is deprecated and thus should not be used.
// +build !js,!appengine,!safe,!disableunsafe // Go versions prior to 1.4 are disabled because they use a different layout
// for interfaces which make the implementation of unsafeReflectValue more complex.
// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew package spew
@ -34,80 +36,49 @@ const (
ptrSize = unsafe.Sizeof((*byte)(nil)) ptrSize = unsafe.Sizeof((*byte)(nil))
) )
var ( type flag uintptr
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
// internal reflect.Value fields. These values are valid before golang
// commit ecccf07e7f9d which changed the format. The are also valid
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = uintptr(ptrSize)
offsetScalar = uintptr(0)
offsetFlag = uintptr(ptrSize * 2)
// flagKindWidth and flagKindShift indicate various bits that the var (
// reflect package uses internally to track kind information. // flagRO indicates whether the value field of a reflect.Value
// // is read-only.
// flagRO indicates whether or not the value field of a reflect.Value is flagRO flag
// read-only.
// // flagAddr indicates whether the address of the reflect.Value's
// flagIndir indicates whether the value field of a reflect.Value is // value may be taken.
// the actual data or a pointer to the data. flagAddr flag
//
// These values are valid before golang commit 90a7c3c86944 which
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = uintptr(flagKindWidth - 1)
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
) )
func init() { // flagKindMask holds the bits that make up the kind
// Older versions of reflect.Value stored small integers directly in the // part of the flags field. In all the supported versions,
// ptr field (which is named val in the older versions). Versions // it is in the lower 5 bits.
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named const flagKindMask = flag(0x1f)
// scalar for this purpose which unfortunately came before the flag
// field, so the offset of the flag field is different for those
// versions.
//
// This code constructs a new reflect.Value from a known small integer
// and checks if the size of the reflect.Value struct indicates it has
// the scalar field. When it does, the offsets are updated accordingly.
vv := reflect.ValueOf(0xf00)
if unsafe.Sizeof(vv) == (ptrSize * 4) {
offsetScalar = ptrSize * 2
offsetFlag = ptrSize * 3
}
// Commit 90a7c3c86944 changed the flag positions such that the low // Different versions of Go have used different
// order bits are the kind. This code extracts the kind from the flags // bit layouts for the flags type. This table
// field and ensures it's the correct type. When it's not, the flag // records the known combinations.
// order has been changed to the newer format, so the flags are updated var okFlags = []struct {
// accordingly. ro, addr flag
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) }{{
upfv := *(*uintptr)(upf) // From Go 1.4 to 1.5
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift) ro: 1 << 5,
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) { addr: 1 << 7,
flagKindShift = 0 }, {
flagRO = 1 << 5 // Up to Go tip.
flagIndir = 1 << 6 ro: 1<<5 | 1<<6,
addr: 1 << 8,
}}
// Commit adf9b30e5594 modified the flags to separate the var flagValOffset = func() uintptr {
// flagRO flag into two bits which specifies whether or not the field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
// field is embedded. This causes flagIndir to move over a bit if !ok {
// and means that flagRO is the combination of either of the panic("reflect.Value has no flag field")
// original flagRO bit and the new bit.
//
// This code detects the change by extracting what used to be
// the indirect bit to ensure it's set. When it's not, the flag
// order has been changed to the newer format, so the flags are
// updated accordingly.
if upfv&flagIndir == 0 {
flagRO = 3 << 5
flagIndir = 1 << 7
}
} }
return field.Offset
}()
// flagField returns a pointer to the flag field of a reflect.Value.
func flagField(v *reflect.Value) *flag {
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
} }
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses // unsafeReflectValue converts the passed reflect.Value into a one that bypasses
@ -119,34 +90,56 @@ func init() {
// This allows us to check for implementations of the Stringer and error // This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and // interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields. // inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { func unsafeReflectValue(v reflect.Value) reflect.Value {
indirects := 1 if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
vt := v.Type() return v
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) }
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) flagFieldPtr := flagField(&v)
if rvf&flagIndir != 0 { *flagFieldPtr &^= flagRO
vt = reflect.PtrTo(v.Type()) *flagFieldPtr |= flagAddr
indirects++ return v
} else if offsetScalar != 0 { }
// The value is in the scalar field when it's not one of the
// reference types. // Sanity checks against future reflect package changes
switch vt.Kind() { // to the type or semantics of the Value.flag field.
case reflect.Uintptr: func init() {
case reflect.Chan: field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
case reflect.Func: if !ok {
case reflect.Map: panic("reflect.Value has no flag field")
case reflect.Ptr: }
case reflect.UnsafePointer: if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
default: panic("reflect.Value flag field has changed kind")
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + }
offsetScalar) type t0 int
var t struct {
A t0
// t0 will have flagEmbedRO set.
t0
// a will have flagStickyRO set
a t0
}
vA := reflect.ValueOf(t).FieldByName("A")
va := reflect.ValueOf(t).FieldByName("a")
vt0 := reflect.ValueOf(t).FieldByName("t0")
// Infer flagRO from the difference between the flags
// for the (otherwise identical) fields in t.
flagPublic := *flagField(&vA)
flagWithRO := *flagField(&va) | *flagField(&vt0)
flagRO = flagPublic ^ flagWithRO
// Infer flagAddr from the difference between a value
// taken from a pointer and not.
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
flagNoPtr := *flagField(&vA)
flagPtr := *flagField(&vPtrA)
flagAddr = flagNoPtr ^ flagPtr
// Check that the inferred flags tally with one of the known versions.
for _, f := range okFlags {
if flagRO == f.ro && flagAddr == f.addr {
return
} }
} }
panic("reflect.Value read-only flag has changed semantics")
pv := reflect.NewAt(vt, upv)
rv = pv
for i := 0; i < indirects; i++ {
rv = rv.Elem()
}
return rv
} }

4
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go сгенерированный поставляемый
Просмотреть файл

@ -1,4 +1,4 @@
// Copyright (c) 2015 Dave Collins <dave@davec.name> // Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
// //
// Permission to use, copy, modify, and distribute this software for any // Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above // purpose with or without fee is hereby granted, provided that the above
@ -16,7 +16,7 @@
// when the code is running on Google App Engine, compiled by GopherJS, or // when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe" // "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used. // tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe // +build js appengine safe disableunsafe !go1.4
package spew package spew

4
vendor/github.com/davecgh/go-spew/spew/common.go сгенерированный поставляемый
Просмотреть файл

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013 Dave Collins <dave@davec.name> * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
* *
* Permission to use, copy, modify, and distribute this software for any * Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) {
w.Write(closeParenBytes) w.Write(closeParenBytes)
} }
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' // printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
// prefix to Writer w. // prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) { func printHexPtr(w io.Writer, p uintptr) {
// Null pointer. // Null pointer.

11
vendor/github.com/davecgh/go-spew/spew/config.go сгенерированный поставляемый
Просмотреть файл

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013 Dave Collins <dave@davec.name> * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
* *
* Permission to use, copy, modify, and distribute this software for any * Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
@ -67,6 +67,15 @@ type ConfigState struct {
// Google App Engine or with the "safe" build tag specified. // Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool DisablePointerMethods bool
// DisablePointerAddresses specifies whether to disable the printing of
// pointer addresses. This is useful when diffing data structures in tests.
DisablePointerAddresses bool
// DisableCapacities specifies whether to disable the printing of capacities
// for arrays, slices, maps and channels. This is useful when diffing
// data structures in tests.
DisableCapacities bool
// ContinueOnMethod specifies whether or not recursion should continue once // ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false, // a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer // means it will print the results of invoking the custom error or Stringer

11
vendor/github.com/davecgh/go-spew/spew/doc.go сгенерированный поставляемый
Просмотреть файл

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013 Dave Collins <dave@davec.name> * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
* *
* Permission to use, copy, modify, and distribute this software for any * Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
@ -91,6 +91,15 @@ The following configuration options are available:
which only accept pointer receivers from non-pointer variables. which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default. Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* ContinueOnMethod * ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default. methods. Recursion after method invocation is disabled by default.

18
vendor/github.com/davecgh/go-spew/spew/dump.go сгенерированный поставляемый
Просмотреть файл

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013 Dave Collins <dave@davec.name> * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
* *
* Permission to use, copy, modify, and distribute this software for any * Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
@ -35,16 +35,16 @@ var (
// cCharRE is a regular expression that matches a cgo char. // cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them. // It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
// cUnsignedCharRE is a regular expression that matches a cgo unsigned // cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump // char. It is used to detect unsigned character arrays to hexdump
// them. // them.
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
// cUint8tCharRE is a regular expression that matches a cgo uint8_t. // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them. // It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
) )
// dumpState contains information about the state of a dump operation. // dumpState contains information about the state of a dump operation.
@ -129,7 +129,7 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
d.w.Write(closeParenBytes) d.w.Write(closeParenBytes)
// Display pointer information. // Display pointer information.
if len(pointerChain) > 0 { if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes) d.w.Write(openParenBytes)
for i, addr := range pointerChain { for i, addr := range pointerChain {
if i > 0 { if i > 0 {
@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
// Display dereferenced value. // Display dereferenced value.
d.w.Write(openParenBytes) d.w.Write(openParenBytes)
switch { switch {
case nilFound == true: case nilFound:
d.w.Write(nilAngleBytes) d.w.Write(nilAngleBytes)
case cycleFound == true: case cycleFound:
d.w.Write(circularBytes) d.w.Write(circularBytes)
default: default:
@ -282,13 +282,13 @@ func (d *dumpState) dump(v reflect.Value) {
case reflect.Map, reflect.String: case reflect.Map, reflect.String:
valueLen = v.Len() valueLen = v.Len()
} }
if valueLen != 0 || valueCap != 0 { if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes) d.w.Write(openParenBytes)
if valueLen != 0 { if valueLen != 0 {
d.w.Write(lenEqualsBytes) d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10) printInt(d.w, int64(valueLen), 10)
} }
if valueCap != 0 { if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 { if valueLen != 0 {
d.w.Write(spaceBytes) d.w.Write(spaceBytes)
} }

6
vendor/github.com/davecgh/go-spew/spew/format.go сгенерированный поставляемый
Просмотреть файл

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013 Dave Collins <dave@davec.name> * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
* *
* Permission to use, copy, modify, and distribute this software for any * Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
// Display dereferenced value. // Display dereferenced value.
switch { switch {
case nilFound == true: case nilFound:
f.fs.Write(nilAngleBytes) f.fs.Write(nilAngleBytes)
case cycleFound == true: case cycleFound:
f.fs.Write(circularShortBytes) f.fs.Write(circularShortBytes)
default: default:

2
vendor/github.com/davecgh/go-spew/spew/spew.go сгенерированный поставляемый
Просмотреть файл

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013 Dave Collins <dave@davec.name> * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
* *
* Permission to use, copy, modify, and distribute this software for any * Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above

41
vendor/github.com/emirpasic/gods/LICENSE сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,41 @@
Copyright (c) 2015, Emir Pasic
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------------------
AVL Tree:
Copyright (c) 2017 Benjamin Scher Purcell <benjapurcell@gmail.com>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

35
vendor/github.com/emirpasic/gods/containers/containers.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,35 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package containers provides core interfaces and functions for data structures.
//
// Container is the base interface for all data structures to implement.
//
// Iterators provide stateful iterators.
//
// Enumerable provides Ruby inspired (each, select, map, find, any?, etc.) container functions.
//
// Serialization provides serializers (marshalers) and deserializers (unmarshalers).
package containers
import "github.com/emirpasic/gods/utils"
// Container is base interface that all data structures implement.
type Container interface {
Empty() bool
Size() int
Clear()
Values() []interface{}
}
// GetSortedValues returns sorted container's elements with respect to the passed comparator.
// Does not effect the ordering of elements within the container.
func GetSortedValues(container Container, comparator utils.Comparator) []interface{} {
values := container.Values()
if len(values) < 2 {
return values
}
utils.Sort(values, comparator)
return values
}

61
vendor/github.com/emirpasic/gods/containers/enumerable.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,61 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package containers
// EnumerableWithIndex provides functions for ordered containers whose values can be fetched by an index.
type EnumerableWithIndex interface {
// Each calls the given function once for each element, passing that element's index and value.
Each(func(index int, value interface{}))
// Map invokes the given function once for each element and returns a
// container containing the values returned by the given function.
// TODO would appreciate help on how to enforce this in containers (don't want to type assert when chaining)
// Map(func(index int, value interface{}) interface{}) Container
// Select returns a new container containing all elements for which the given function returns a true value.
// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
// Select(func(index int, value interface{}) bool) Container
// Any passes each element of the container to the given function and
// returns true if the function ever returns true for any element.
Any(func(index int, value interface{}) bool) bool
// All passes each element of the container to the given function and
// returns true if the function returns true for all elements.
All(func(index int, value interface{}) bool) bool
// Find passes each element of the container to the given function and returns
// the first (index,value) for which the function is true or -1,nil otherwise
// if no element matches the criteria.
Find(func(index int, value interface{}) bool) (int, interface{})
}
// EnumerableWithKey provides functions for ordered containers whose values whose elements are key/value pairs.
type EnumerableWithKey interface {
// Each calls the given function once for each element, passing that element's key and value.
Each(func(key interface{}, value interface{}))
// Map invokes the given function once for each element and returns a container
// containing the values returned by the given function as key/value pairs.
// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
// Map(func(key interface{}, value interface{}) (interface{}, interface{})) Container
// Select returns a new container containing all elements for which the given function returns a true value.
// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
// Select(func(key interface{}, value interface{}) bool) Container
// Any passes each element of the container to the given function and
// returns true if the function ever returns true for any element.
Any(func(key interface{}, value interface{}) bool) bool
// All passes each element of the container to the given function and
// returns true if the function returns true for all elements.
All(func(key interface{}, value interface{}) bool) bool
// Find passes each element of the container to the given function and returns
// the first (key,value) for which the function is true or nil,nil otherwise if no element
// matches the criteria.
Find(func(key interface{}, value interface{}) bool) (interface{}, interface{})
}

109
vendor/github.com/emirpasic/gods/containers/iterator.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,109 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package containers
// IteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index.
type IteratorWithIndex interface {
// Next moves the iterator to the next element and returns true if there was a next element in the container.
// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
// Modifies the state of the iterator.
Next() bool
// Value returns the current element's value.
// Does not modify the state of the iterator.
Value() interface{}
// Index returns the current element's index.
// Does not modify the state of the iterator.
Index() int
// Begin resets the iterator to its initial state (one-before-first)
// Call Next() to fetch the first element if any.
Begin()
// First moves the iterator to the first element and returns true if there was a first element in the container.
// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
First() bool
}
// IteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs.
type IteratorWithKey interface {
// Next moves the iterator to the next element and returns true if there was a next element in the container.
// If Next() returns true, then next element's key and value can be retrieved by Key() and Value().
// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
// Modifies the state of the iterator.
Next() bool
// Value returns the current element's value.
// Does not modify the state of the iterator.
Value() interface{}
// Key returns the current element's key.
// Does not modify the state of the iterator.
Key() interface{}
// Begin resets the iterator to its initial state (one-before-first)
// Call Next() to fetch the first element if any.
Begin()
// First moves the iterator to the first element and returns true if there was a first element in the container.
// If First() returns true, then first element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator.
First() bool
}
// ReverseIteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index.
//
// Essentially it is the same as IteratorWithIndex, but provides additional:
//
// Prev() function to enable traversal in reverse
//
// Last() function to move the iterator to the last element.
//
// End() function to move the iterator past the last element (one-past-the-end).
type ReverseIteratorWithIndex interface {
// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
Prev() bool
// End moves the iterator past the last element (one-past-the-end).
// Call Prev() to fetch the last element if any.
End()
// Last moves the iterator to the last element and returns true if there was a last element in the container.
// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
Last() bool
IteratorWithIndex
}
// ReverseIteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs.
//
// Essentially it is the same as IteratorWithKey, but provides additional:
//
// Prev() function to enable traversal in reverse
//
// Last() function to move the iterator to the last element.
type ReverseIteratorWithKey interface {
// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
// If Prev() returns true, then previous element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator.
Prev() bool
// End moves the iterator past the last element (one-past-the-end).
// Call Prev() to fetch the last element if any.
End()
// Last moves the iterator to the last element and returns true if there was a last element in the container.
// If Last() returns true, then last element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator.
Last() bool
IteratorWithKey
}

17
vendor/github.com/emirpasic/gods/containers/serialization.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,17 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package containers
// JSONSerializer provides JSON serialization
type JSONSerializer interface {
// ToJSON outputs the JSON representation of containers's elements.
ToJSON() ([]byte, error)
}
// JSONDeserializer provides JSON deserialization
type JSONDeserializer interface {
// FromJSON populates containers's elements from the input JSON representation.
FromJSON([]byte) error
}

228
vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,228 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package arraylist implements the array list.
//
// Structure is not thread safe.
//
// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29
package arraylist
import (
"fmt"
"strings"
"github.com/emirpasic/gods/lists"
"github.com/emirpasic/gods/utils"
)
func assertListImplementation() {
var _ lists.List = (*List)(nil)
}
// List holds the elements in a slice
type List struct {
elements []interface{}
size int
}
const (
growthFactor = float32(2.0) // growth by 100%
shrinkFactor = float32(0.25) // shrink when size is 25% of capacity (0 means never shrink)
)
// New instantiates a new list and adds the passed values, if any, to the list
func New(values ...interface{}) *List {
list := &List{}
if len(values) > 0 {
list.Add(values...)
}
return list
}
// Add appends a value at the end of the list
func (list *List) Add(values ...interface{}) {
list.growBy(len(values))
for _, value := range values {
list.elements[list.size] = value
list.size++
}
}
// Get returns the element at index.
// Second return parameter is true if index is within bounds of the array and array is not empty, otherwise false.
func (list *List) Get(index int) (interface{}, bool) {
if !list.withinRange(index) {
return nil, false
}
return list.elements[index], true
}
// Remove removes the element at the given index from the list.
func (list *List) Remove(index int) {
if !list.withinRange(index) {
return
}
list.elements[index] = nil // cleanup reference
copy(list.elements[index:], list.elements[index+1:list.size]) // shift to the left by one (slow operation, need ways to optimize this)
list.size--
list.shrink()
}
// Contains checks if elements (one or more) are present in the set.
// All elements have to be present in the set for the method to return true.
// Performance time complexity of n^2.
// Returns true if no arguments are passed at all, i.e. set is always super-set of empty set.
func (list *List) Contains(values ...interface{}) bool {
for _, searchValue := range values {
found := false
for _, element := range list.elements {
if element == searchValue {
found = true
break
}
}
if !found {
return false
}
}
return true
}
// Values returns all elements in the list.
func (list *List) Values() []interface{} {
newElements := make([]interface{}, list.size, list.size)
copy(newElements, list.elements[:list.size])
return newElements
}
//IndexOf returns index of provided element
func (list *List) IndexOf(value interface{}) int {
if list.size == 0 {
return -1
}
for index, element := range list.elements {
if element == value {
return index
}
}
return -1
}
// Empty returns true if list does not contain any elements.
func (list *List) Empty() bool {
return list.size == 0
}
// Size returns number of elements within the list.
func (list *List) Size() int {
return list.size
}
// Clear removes all elements from the list.
func (list *List) Clear() {
list.size = 0
list.elements = []interface{}{}
}
// Sort sorts values (in-place) using.
func (list *List) Sort(comparator utils.Comparator) {
if len(list.elements) < 2 {
return
}
utils.Sort(list.elements[:list.size], comparator)
}
// Swap swaps the two values at the specified positions.
func (list *List) Swap(i, j int) {
if list.withinRange(i) && list.withinRange(j) {
list.elements[i], list.elements[j] = list.elements[j], list.elements[i]
}
}
// Insert inserts values at specified index position shifting the value at that position (if any) and any subsequent elements to the right.
// Does not do anything if position is negative or bigger than list's size
// Note: position equal to list's size is valid, i.e. append.
func (list *List) Insert(index int, values ...interface{}) {
if !list.withinRange(index) {
// Append
if index == list.size {
list.Add(values...)
}
return
}
l := len(values)
list.growBy(l)
list.size += l
copy(list.elements[index+l:], list.elements[index:list.size-l])
copy(list.elements[index:], values)
}
// Set the value at specified index
// Does not do anything if position is negative or bigger than list's size
// Note: position equal to list's size is valid, i.e. append.
func (list *List) Set(index int, value interface{}) {
if !list.withinRange(index) {
// Append
if index == list.size {
list.Add(value)
}
return
}
list.elements[index] = value
}
// String returns a string representation of container
func (list *List) String() string {
str := "ArrayList\n"
values := []string{}
for _, value := range list.elements[:list.size] {
values = append(values, fmt.Sprintf("%v", value))
}
str += strings.Join(values, ", ")
return str
}
// Check that the index is within bounds of the list
func (list *List) withinRange(index int) bool {
return index >= 0 && index < list.size
}
func (list *List) resize(cap int) {
newElements := make([]interface{}, cap, cap)
copy(newElements, list.elements)
list.elements = newElements
}
// Expand the array if necessary, i.e. capacity will be reached if we add n elements
func (list *List) growBy(n int) {
// When capacity is reached, grow by a factor of growthFactor and add number of elements
currentCapacity := cap(list.elements)
if list.size+n >= currentCapacity {
newCapacity := int(growthFactor * float32(currentCapacity+n))
list.resize(newCapacity)
}
}
// Shrink the array if necessary, i.e. when size is shrinkFactor percent of current capacity
func (list *List) shrink() {
if shrinkFactor == 0.0 {
return
}
// Shrink when size is at shrinkFactor * capacity
currentCapacity := cap(list.elements)
if list.size <= int(float32(currentCapacity)*shrinkFactor) {
list.resize(list.size)
}
}

79
vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,79 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package arraylist
import "github.com/emirpasic/gods/containers"
func assertEnumerableImplementation() {
var _ containers.EnumerableWithIndex = (*List)(nil)
}
// Each calls the given function once for each element, passing that element's index and value.
func (list *List) Each(f func(index int, value interface{})) {
iterator := list.Iterator()
for iterator.Next() {
f(iterator.Index(), iterator.Value())
}
}
// Map invokes the given function once for each element and returns a
// container containing the values returned by the given function.
func (list *List) Map(f func(index int, value interface{}) interface{}) *List {
newList := &List{}
iterator := list.Iterator()
for iterator.Next() {
newList.Add(f(iterator.Index(), iterator.Value()))
}
return newList
}
// Select returns a new container containing all elements for which the given function returns a true value.
func (list *List) Select(f func(index int, value interface{}) bool) *List {
newList := &List{}
iterator := list.Iterator()
for iterator.Next() {
if f(iterator.Index(), iterator.Value()) {
newList.Add(iterator.Value())
}
}
return newList
}
// Any passes each element of the collection to the given function and
// returns true if the function ever returns true for any element.
func (list *List) Any(f func(index int, value interface{}) bool) bool {
iterator := list.Iterator()
for iterator.Next() {
if f(iterator.Index(), iterator.Value()) {
return true
}
}
return false
}
// All passes each element of the collection to the given function and
// returns true if the function returns true for all elements.
func (list *List) All(f func(index int, value interface{}) bool) bool {
iterator := list.Iterator()
for iterator.Next() {
if !f(iterator.Index(), iterator.Value()) {
return false
}
}
return true
}
// Find passes each element of the container to the given function and returns
// the first (index,value) for which the function is true or -1,nil otherwise
// if no element matches the criteria.
func (list *List) Find(f func(index int, value interface{}) bool) (int, interface{}) {
iterator := list.Iterator()
for iterator.Next() {
if f(iterator.Index(), iterator.Value()) {
return iterator.Index(), iterator.Value()
}
}
return -1, nil
}

83
vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,83 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package arraylist
import "github.com/emirpasic/gods/containers"
func assertIteratorImplementation() {
var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil)
}
// Iterator holding the iterator's state
type Iterator struct {
list *List
index int
}
// Iterator returns a stateful iterator whose values can be fetched by an index.
func (list *List) Iterator() Iterator {
return Iterator{list: list, index: -1}
}
// Next moves the iterator to the next element and returns true if there was a next element in the container.
// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
// Modifies the state of the iterator.
func (iterator *Iterator) Next() bool {
if iterator.index < iterator.list.size {
iterator.index++
}
return iterator.list.withinRange(iterator.index)
}
// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) Prev() bool {
if iterator.index >= 0 {
iterator.index--
}
return iterator.list.withinRange(iterator.index)
}
// Value returns the current element's value.
// Does not modify the state of the iterator.
func (iterator *Iterator) Value() interface{} {
return iterator.list.elements[iterator.index]
}
// Index returns the current element's index.
// Does not modify the state of the iterator.
func (iterator *Iterator) Index() int {
return iterator.index
}
// Begin resets the iterator to its initial state (one-before-first)
// Call Next() to fetch the first element if any.
func (iterator *Iterator) Begin() {
iterator.index = -1
}
// End moves the iterator past the last element (one-past-the-end).
// Call Prev() to fetch the last element if any.
func (iterator *Iterator) End() {
iterator.index = iterator.list.size
}
// First moves the iterator to the first element and returns true if there was a first element in the container.
// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) First() bool {
iterator.Begin()
return iterator.Next()
}
// Last moves the iterator to the last element and returns true if there was a last element in the container.
// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) Last() bool {
iterator.End()
return iterator.Prev()
}

29
vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,29 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package arraylist
import (
"encoding/json"
"github.com/emirpasic/gods/containers"
)
func assertSerializationImplementation() {
var _ containers.JSONSerializer = (*List)(nil)
var _ containers.JSONDeserializer = (*List)(nil)
}
// ToJSON outputs the JSON representation of list's elements.
func (list *List) ToJSON() ([]byte, error) {
return json.Marshal(list.elements[:list.size])
}
// FromJSON populates list's elements from the input JSON representation.
func (list *List) FromJSON(data []byte) error {
err := json.Unmarshal(data, &list.elements)
if err == nil {
list.size = len(list.elements)
}
return err
}

33
vendor/github.com/emirpasic/gods/lists/lists.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,33 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package lists provides an abstract List interface.
//
// In computer science, a list or sequence is an abstract data type that represents an ordered sequence of values, where the same value may occur more than once. An instance of a list is a computer representation of the mathematical concept of a finite sequence; the (potentially) infinite analog of a list is a stream. Lists are a basic example of containers, as they contain other values. If the same value occurs multiple times, each occurrence is considered a distinct item.
//
// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29
package lists
import (
"github.com/emirpasic/gods/containers"
"github.com/emirpasic/gods/utils"
)
// List interface that all lists implement
type List interface {
Get(index int) (interface{}, bool)
Remove(index int)
Add(values ...interface{})
Contains(values ...interface{}) bool
Sort(comparator utils.Comparator)
Swap(index1, index2 int)
Insert(index int, values ...interface{})
Set(index int, value interface{})
containers.Container
// Empty() bool
// Size() int
// Clear()
// Values() []interface{}
}

163
vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,163 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package binaryheap implements a binary heap backed by array list.
//
// Comparator defines this heap as either min or max heap.
//
// Structure is not thread safe.
//
// References: http://en.wikipedia.org/wiki/Binary_heap
package binaryheap
import (
"fmt"
"github.com/emirpasic/gods/lists/arraylist"
"github.com/emirpasic/gods/trees"
"github.com/emirpasic/gods/utils"
"strings"
)
func assertTreeImplementation() {
var _ trees.Tree = (*Heap)(nil)
}
// Heap holds elements in an array-list
type Heap struct {
list *arraylist.List
Comparator utils.Comparator
}
// NewWith instantiates a new empty heap tree with the custom comparator.
func NewWith(comparator utils.Comparator) *Heap {
return &Heap{list: arraylist.New(), Comparator: comparator}
}
// NewWithIntComparator instantiates a new empty heap with the IntComparator, i.e. elements are of type int.
func NewWithIntComparator() *Heap {
return &Heap{list: arraylist.New(), Comparator: utils.IntComparator}
}
// NewWithStringComparator instantiates a new empty heap with the StringComparator, i.e. elements are of type string.
func NewWithStringComparator() *Heap {
return &Heap{list: arraylist.New(), Comparator: utils.StringComparator}
}
// Push adds a value onto the heap and bubbles it up accordingly.
func (heap *Heap) Push(values ...interface{}) {
if len(values) == 1 {
heap.list.Add(values[0])
heap.bubbleUp()
} else {
// Reference: https://en.wikipedia.org/wiki/Binary_heap#Building_a_heap
for _, value := range values {
heap.list.Add(value)
}
size := heap.list.Size()/2 + 1
for i := size; i >= 0; i-- {
heap.bubbleDownIndex(i)
}
}
}
// Pop removes top element on heap and returns it, or nil if heap is empty.
// Second return parameter is true, unless the heap was empty and there was nothing to pop.
func (heap *Heap) Pop() (value interface{}, ok bool) {
value, ok = heap.list.Get(0)
if !ok {
return
}
lastIndex := heap.list.Size() - 1
heap.list.Swap(0, lastIndex)
heap.list.Remove(lastIndex)
heap.bubbleDown()
return
}
// Peek returns top element on the heap without removing it, or nil if heap is empty.
// Second return parameter is true, unless the heap was empty and there was nothing to peek.
func (heap *Heap) Peek() (value interface{}, ok bool) {
return heap.list.Get(0)
}
// Empty returns true if heap does not contain any elements.
func (heap *Heap) Empty() bool {
return heap.list.Empty()
}
// Size returns number of elements within the heap.
func (heap *Heap) Size() int {
return heap.list.Size()
}
// Clear removes all elements from the heap.
func (heap *Heap) Clear() {
heap.list.Clear()
}
// Values returns all elements in the heap.
func (heap *Heap) Values() []interface{} {
return heap.list.Values()
}
// String returns a string representation of container
func (heap *Heap) String() string {
str := "BinaryHeap\n"
values := []string{}
for _, value := range heap.list.Values() {
values = append(values, fmt.Sprintf("%v", value))
}
str += strings.Join(values, ", ")
return str
}
// Performs the "bubble down" operation. This is to place the element that is at the root
// of the heap in its correct place so that the heap maintains the min/max-heap order property.
func (heap *Heap) bubbleDown() {
heap.bubbleDownIndex(0)
}
// Performs the "bubble down" operation. This is to place the element that is at the index
// of the heap in its correct place so that the heap maintains the min/max-heap order property.
func (heap *Heap) bubbleDownIndex(index int) {
size := heap.list.Size()
for leftIndex := index<<1 + 1; leftIndex < size; leftIndex = index<<1 + 1 {
rightIndex := index<<1 + 2
smallerIndex := leftIndex
leftValue, _ := heap.list.Get(leftIndex)
rightValue, _ := heap.list.Get(rightIndex)
if rightIndex < size && heap.Comparator(leftValue, rightValue) > 0 {
smallerIndex = rightIndex
}
indexValue, _ := heap.list.Get(index)
smallerValue, _ := heap.list.Get(smallerIndex)
if heap.Comparator(indexValue, smallerValue) > 0 {
heap.list.Swap(index, smallerIndex)
} else {
break
}
index = smallerIndex
}
}
// Performs the "bubble up" operation. This is to place a newly inserted
// element (i.e. last element in the list) in its correct place so that
// the heap maintains the min/max-heap order property.
func (heap *Heap) bubbleUp() {
index := heap.list.Size() - 1
for parentIndex := (index - 1) >> 1; index > 0; parentIndex = (index - 1) >> 1 {
indexValue, _ := heap.list.Get(index)
parentValue, _ := heap.list.Get(parentIndex)
if heap.Comparator(parentValue, indexValue) <= 0 {
break
}
heap.list.Swap(index, parentIndex)
index = parentIndex
}
}
// Check that the index is within bounds of the list
func (heap *Heap) withinRange(index int) bool {
return index >= 0 && index < heap.list.Size()
}

84
vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,84 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package binaryheap
import "github.com/emirpasic/gods/containers"
func assertIteratorImplementation() {
var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil)
}
// Iterator returns a stateful iterator whose values can be fetched by an index.
type Iterator struct {
heap *Heap
index int
}
// Iterator returns a stateful iterator whose values can be fetched by an index.
func (heap *Heap) Iterator() Iterator {
return Iterator{heap: heap, index: -1}
}
// Next moves the iterator to the next element and returns true if there was a next element in the container.
// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
// Modifies the state of the iterator.
func (iterator *Iterator) Next() bool {
if iterator.index < iterator.heap.Size() {
iterator.index++
}
return iterator.heap.withinRange(iterator.index)
}
// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) Prev() bool {
if iterator.index >= 0 {
iterator.index--
}
return iterator.heap.withinRange(iterator.index)
}
// Value returns the current element's value.
// Does not modify the state of the iterator.
func (iterator *Iterator) Value() interface{} {
value, _ := iterator.heap.list.Get(iterator.index)
return value
}
// Index returns the current element's index.
// Does not modify the state of the iterator.
func (iterator *Iterator) Index() int {
return iterator.index
}
// Begin resets the iterator to its initial state (one-before-first)
// Call Next() to fetch the first element if any.
func (iterator *Iterator) Begin() {
iterator.index = -1
}
// End moves the iterator past the last element (one-past-the-end).
// Call Prev() to fetch the last element if any.
func (iterator *Iterator) End() {
iterator.index = iterator.heap.Size()
}
// First moves the iterator to the first element and returns true if there was a first element in the container.
// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) First() bool {
iterator.Begin()
return iterator.Next()
}
// Last moves the iterator to the last element and returns true if there was a last element in the container.
// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) Last() bool {
iterator.End()
return iterator.Prev()
}

22
vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,22 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package binaryheap
import "github.com/emirpasic/gods/containers"
func assertSerializationImplementation() {
var _ containers.JSONSerializer = (*Heap)(nil)
var _ containers.JSONDeserializer = (*Heap)(nil)
}
// ToJSON outputs the JSON representation of the heap.
func (heap *Heap) ToJSON() ([]byte, error) {
return heap.list.ToJSON()
}
// FromJSON populates the heap from the input JSON representation.
func (heap *Heap) FromJSON(data []byte) error {
return heap.list.FromJSON(data)
}

21
vendor/github.com/emirpasic/gods/trees/trees.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,21 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package trees provides an abstract Tree interface.
//
// In computer science, a tree is a widely used abstract data type (ADT) or data structure implementing this ADT that simulates a hierarchical tree structure, with a root value and subtrees of children with a parent node, represented as a set of linked nodes.
//
// Reference: https://en.wikipedia.org/wiki/Tree_%28data_structure%29
package trees
import "github.com/emirpasic/gods/containers"
// Tree interface that all trees implement
type Tree interface {
containers.Container
// Empty() bool
// Size() int
// Clear()
// Values() []interface{}
}

251
vendor/github.com/emirpasic/gods/utils/comparator.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,251 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package utils
import "time"
// Comparator will make type assertion (see IntComparator for example),
// which will panic if a or b are not of the asserted type.
//
// Should return a number:
// negative , if a < b
// zero , if a == b
// positive , if a > b
type Comparator func(a, b interface{}) int
// StringComparator provides a fast comparison on strings
func StringComparator(a, b interface{}) int {
s1 := a.(string)
s2 := b.(string)
min := len(s2)
if len(s1) < len(s2) {
min = len(s1)
}
diff := 0
for i := 0; i < min && diff == 0; i++ {
diff = int(s1[i]) - int(s2[i])
}
if diff == 0 {
diff = len(s1) - len(s2)
}
if diff < 0 {
return -1
}
if diff > 0 {
return 1
}
return 0
}
// IntComparator provides a basic comparison on int
func IntComparator(a, b interface{}) int {
aAsserted := a.(int)
bAsserted := b.(int)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Int8Comparator provides a basic comparison on int8
func Int8Comparator(a, b interface{}) int {
aAsserted := a.(int8)
bAsserted := b.(int8)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Int16Comparator provides a basic comparison on int16
func Int16Comparator(a, b interface{}) int {
aAsserted := a.(int16)
bAsserted := b.(int16)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Int32Comparator provides a basic comparison on int32
func Int32Comparator(a, b interface{}) int {
aAsserted := a.(int32)
bAsserted := b.(int32)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Int64Comparator provides a basic comparison on int64
func Int64Comparator(a, b interface{}) int {
aAsserted := a.(int64)
bAsserted := b.(int64)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UIntComparator provides a basic comparison on uint
func UIntComparator(a, b interface{}) int {
aAsserted := a.(uint)
bAsserted := b.(uint)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UInt8Comparator provides a basic comparison on uint8
func UInt8Comparator(a, b interface{}) int {
aAsserted := a.(uint8)
bAsserted := b.(uint8)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UInt16Comparator provides a basic comparison on uint16
func UInt16Comparator(a, b interface{}) int {
aAsserted := a.(uint16)
bAsserted := b.(uint16)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UInt32Comparator provides a basic comparison on uint32
func UInt32Comparator(a, b interface{}) int {
aAsserted := a.(uint32)
bAsserted := b.(uint32)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UInt64Comparator provides a basic comparison on uint64
func UInt64Comparator(a, b interface{}) int {
aAsserted := a.(uint64)
bAsserted := b.(uint64)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Float32Comparator provides a basic comparison on float32
func Float32Comparator(a, b interface{}) int {
aAsserted := a.(float32)
bAsserted := b.(float32)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Float64Comparator provides a basic comparison on float64
func Float64Comparator(a, b interface{}) int {
aAsserted := a.(float64)
bAsserted := b.(float64)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// ByteComparator provides a basic comparison on byte
func ByteComparator(a, b interface{}) int {
aAsserted := a.(byte)
bAsserted := b.(byte)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// RuneComparator provides a basic comparison on rune
func RuneComparator(a, b interface{}) int {
aAsserted := a.(rune)
bAsserted := b.(rune)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// TimeComparator provides a basic comparison on time.Time
func TimeComparator(a, b interface{}) int {
aAsserted := a.(time.Time)
bAsserted := b.(time.Time)
switch {
case aAsserted.After(bAsserted):
return 1
case aAsserted.Before(bAsserted):
return -1
default:
return 0
}
}

29
vendor/github.com/emirpasic/gods/utils/sort.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,29 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package utils
import "sort"
// Sort sorts values (in-place) with respect to the given comparator.
//
// Uses Go's sort (hybrid of quicksort for large and then insertion sort for smaller slices).
func Sort(values []interface{}, comparator Comparator) {
sort.Sort(sortable{values, comparator})
}
type sortable struct {
values []interface{}
comparator Comparator
}
func (s sortable) Len() int {
return len(s.values)
}
func (s sortable) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
}
func (s sortable) Less(i, j int) bool {
return s.comparator(s.values[i], s.values[j]) < 0
}

47
vendor/github.com/emirpasic/gods/utils/utils.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,47 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package utils provides common utility functions.
//
// Provided functionalities:
// - sorting
// - comparators
package utils
import (
"fmt"
"strconv"
)
// ToString converts a value to string.
func ToString(value interface{}) string {
switch value.(type) {
case string:
return value.(string)
case int8:
return strconv.FormatInt(int64(value.(int8)), 10)
case int16:
return strconv.FormatInt(int64(value.(int16)), 10)
case int32:
return strconv.FormatInt(int64(value.(int32)), 10)
case int64:
return strconv.FormatInt(int64(value.(int64)), 10)
case uint8:
return strconv.FormatUint(uint64(value.(uint8)), 10)
case uint16:
return strconv.FormatUint(uint64(value.(uint16)), 10)
case uint32:
return strconv.FormatUint(uint64(value.(uint32)), 10)
case uint64:
return strconv.FormatUint(uint64(value.(uint64)), 10)
case float32:
return strconv.FormatFloat(float64(value.(float32)), 'g', -1, 64)
case float64:
return strconv.FormatFloat(float64(value.(float64)), 'g', -1, 64)
case bool:
return strconv.FormatBool(value.(bool))
default:
return fmt.Sprintf("%+v", value)
}
}

21
vendor/github.com/jbenet/go-context/LICENSE сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Juan Batiz-Benet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

120
vendor/github.com/jbenet/go-context/io/ctxio.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,120 @@
// Package ctxio provides io.Reader and io.Writer wrappers that
// respect context.Contexts. Use these at the interface between
// your context code and your io.
//
// WARNING: read the code. see how writes and reads will continue
// until you cancel the io. Maybe this package should provide
// versions of io.ReadCloser and io.WriteCloser that automatically
// call .Close when the context expires. But for now -- since in my
// use cases I have long-lived connections with ephemeral io wrappers
// -- this has yet to be a need.
package ctxio
import (
"io"
context "golang.org/x/net/context"
)
type ioret struct {
n int
err error
}
type Writer interface {
io.Writer
}
type ctxWriter struct {
w io.Writer
ctx context.Context
}
// NewWriter wraps a writer to make it respect given Context.
// If there is a blocking write, the returned Writer will return
// whenever the context is cancelled (the return values are n=0
// and err=ctx.Err().)
//
// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying
// write-- there is no way to do that with the standard go io
// interface. So the read and write _will_ happen or hang. So, use
// this sparingly, make sure to cancel the read or write as necesary
// (e.g. closing a connection whose context is up, etc.)
//
// Furthermore, in order to protect your memory from being read
// _after_ you've cancelled the context, this io.Writer will
// first make a **copy** of the buffer.
func NewWriter(ctx context.Context, w io.Writer) *ctxWriter {
if ctx == nil {
ctx = context.Background()
}
return &ctxWriter{ctx: ctx, w: w}
}
func (w *ctxWriter) Write(buf []byte) (int, error) {
buf2 := make([]byte, len(buf))
copy(buf2, buf)
c := make(chan ioret, 1)
go func() {
n, err := w.w.Write(buf2)
c <- ioret{n, err}
close(c)
}()
select {
case r := <-c:
return r.n, r.err
case <-w.ctx.Done():
return 0, w.ctx.Err()
}
}
type Reader interface {
io.Reader
}
type ctxReader struct {
r io.Reader
ctx context.Context
}
// NewReader wraps a reader to make it respect given Context.
// If there is a blocking read, the returned Reader will return
// whenever the context is cancelled (the return values are n=0
// and err=ctx.Err().)
//
// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying
// write-- there is no way to do that with the standard go io
// interface. So the read and write _will_ happen or hang. So, use
// this sparingly, make sure to cancel the read or write as necesary
// (e.g. closing a connection whose context is up, etc.)
//
// Furthermore, in order to protect your memory from being read
// _before_ you've cancelled the context, this io.Reader will
// allocate a buffer of the same size, and **copy** into the client's
// if the read succeeds in time.
func NewReader(ctx context.Context, r io.Reader) *ctxReader {
return &ctxReader{ctx: ctx, r: r}
}
func (r *ctxReader) Read(buf []byte) (int, error) {
buf2 := make([]byte, len(buf))
c := make(chan ioret, 1)
go func() {
n, err := r.r.Read(buf2)
c <- ioret{n, err}
close(c)
}()
select {
case ret := <-c:
copy(buf, buf2)
return ret.n, ret.err
case <-r.ctx.Done():
return 0, r.ctx.Err()
}
}

1
vendor/github.com/kevinburke/ssh_config/.gitattributes сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
testdata/dos-lines eol=crlf

1
vendor/github.com/kevinburke/ssh_config/.gitignore сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
/bazel-*

15
vendor/github.com/kevinburke/ssh_config/.travis.yml сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,15 @@
go_import_path: github.com/kevinburke/ssh_config
language: go
go:
- 1.9.x
- 1.10.x
- 1.11.x
- master
before_script:
- go get -u ./...
script:
- make race-test

4
vendor/github.com/kevinburke/ssh_config/AUTHORS.txt сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
Eugene Terentev <eugene@terentev.net>
Kevin Burke <kev@inburke.com>
Sergey Lukjanov <me@slukjanov.name>
Wayne Ashley Berry <wayneashleyberry@gmail.com>

49
vendor/github.com/kevinburke/ssh_config/LICENSE сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,49 @@
Copyright (c) 2017 Kevin Burke.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
===================
The lexer and parser borrow heavily from github.com/pelletier/go-toml. The
license for that project is copied below.
The MIT License (MIT)
Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

32
vendor/github.com/kevinburke/ssh_config/Makefile сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,32 @@
BUMP_VERSION := $(GOPATH)/bin/bump_version
MEGACHECK := $(GOPATH)/bin/megacheck
WRITE_MAILMAP := $(GOPATH)/bin/write_mailmap
IGNORES := 'github.com/kevinburke/ssh_config/config.go:U1000 github.com/kevinburke/ssh_config/config.go:S1002 github.com/kevinburke/ssh_config/token.go:U1000'
$(MEGACHECK):
go get honnef.co/go/tools/cmd/megacheck
lint: $(MEGACHECK)
go vet ./...
$(MEGACHECK) --ignore=$(IGNORES) ./...
test: lint
@# the timeout helps guard against infinite recursion
go test -timeout=250ms ./...
race-test: lint
go test -timeout=500ms -race ./...
$(BUMP_VERSION):
go get -u github.com/kevinburke/bump_version
release: test | $(BUMP_VERSION)
$(BUMP_VERSION) minor config.go
force: ;
AUTHORS.txt: force | $(WRITE_MAILMAP)
$(WRITE_MAILMAP) > AUTHORS.txt
authors: AUTHORS.txt

81
vendor/github.com/kevinburke/ssh_config/README.md сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,81 @@
# ssh_config
This is a Go parser for `ssh_config` files. Importantly, this parser attempts
to preserve comments in a given file, so you can manipulate a `ssh_config` file
from a program, if your heart desires.
It's designed to be used with the excellent
[x/crypto/ssh](https://golang.org/x/crypto/ssh) package, which handles SSH
negotiation but isn't very easy to configure.
The `ssh_config` `Get()` and `GetStrict()` functions will attempt to read values
from `$HOME/.ssh/config` and fall back to `/etc/ssh/ssh_config`. The first
argument is the host name to match on, and the second argument is the key you
want to retrieve.
```go
port := ssh_config.Get("myhost", "Port")
```
You can also load a config file and read values from it.
```go
var config = `
Host *.test
Compression yes
`
cfg, err := ssh_config.Decode(strings.NewReader(config))
fmt.Println(cfg.Get("example.test", "Port"))
```
Some SSH arguments have default values - for example, the default value for
`KeyboardAuthentication` is `"yes"`. If you call Get(), and no value for the
given Host/keyword pair exists in the config, we'll return a default for the
keyword if one exists.
### Manipulating SSH config files
Here's how you can manipulate an SSH config file, and then write it back to
disk.
```go
f, _ := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "config"))
cfg, _ := ssh_config.Decode(f)
for _, host := range cfg.Hosts {
fmt.Println("patterns:", host.Patterns)
for _, node := range host.Nodes {
// Manipulate the nodes as you see fit, or use a type switch to
// distinguish between Empty, KV, and Include nodes.
fmt.Println(node.String())
}
}
// Print the config to stdout:
fmt.Println(cfg.String())
```
## Spec compliance
Wherever possible we try to implement the specification as documented in
the `ssh_config` manpage. Unimplemented features should be present in the
[issues][issues] list.
Notably, the `Match` directive is currently unsupported.
[issues]: https://github.com/kevinburke/ssh_config/issues
## Errata
This is the second [comment-preserving configuration parser][blog] I've written, after
[an /etc/hosts parser][hostsfile]. Eventually, I will write one for every Linux
file format.
[blog]: https://kev.inburke.com/kevin/more-comment-preserving-configuration-parsers/
[hostsfile]: https://github.com/kevinburke/hostsfile
## Donating
Donations free up time to make improvements to the library, and respond to
bug reports. You can send donations via Paypal's "Send Money" feature to
kev@inburke.com. Donations are not tax deductible in the USA.

639
vendor/github.com/kevinburke/ssh_config/config.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,639 @@
// Package ssh_config provides tools for manipulating SSH config files.
//
// Importantly, this parser attempts to preserve comments in a given file, so
// you can manipulate a `ssh_config` file from a program, if your heart desires.
//
// The Get() and GetStrict() functions will attempt to read values from
// $HOME/.ssh/config, falling back to /etc/ssh/ssh_config. The first argument is
// the host name to match on ("example.com"), and the second argument is the key
// you want to retrieve ("Port"). The keywords are case insensitive.
//
// port := ssh_config.Get("myhost", "Port")
//
// You can also manipulate an SSH config file and then print it or write it back
// to disk.
//
// f, _ := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "config"))
// cfg, _ := ssh_config.Decode(f)
// for _, host := range cfg.Hosts {
// fmt.Println("patterns:", host.Patterns)
// for _, node := range host.Nodes {
// fmt.Println(node.String())
// }
// }
//
// // Write the cfg back to disk:
// fmt.Println(cfg.String())
//
// BUG: the Match directive is currently unsupported; parsing a config with
// a Match directive will trigger an error.
package ssh_config
import (
"bytes"
"errors"
"fmt"
"io"
"os"
osuser "os/user"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
)
const version = "0.5"
type configFinder func() string
// UserSettings checks ~/.ssh and /etc/ssh for configuration files. The config
// files are parsed and cached the first time Get() or GetStrict() is called.
type UserSettings struct {
IgnoreErrors bool
systemConfig *Config
systemConfigFinder configFinder
userConfig *Config
userConfigFinder configFinder
loadConfigs sync.Once
onceErr error
}
func homedir() string {
user, err := osuser.Current()
if err == nil {
return user.HomeDir
} else {
return os.Getenv("HOME")
}
}
func userConfigFinder() string {
return filepath.Join(homedir(), ".ssh", "config")
}
// DefaultUserSettings is the default UserSettings and is used by Get and
// GetStrict. It checks both $HOME/.ssh/config and /etc/ssh/ssh_config for keys,
// and it will return parse errors (if any) instead of swallowing them.
var DefaultUserSettings = &UserSettings{
IgnoreErrors: false,
systemConfigFinder: systemConfigFinder,
userConfigFinder: userConfigFinder,
}
func systemConfigFinder() string {
return filepath.Join("/", "etc", "ssh", "ssh_config")
}
func findVal(c *Config, alias, key string) (string, error) {
if c == nil {
return "", nil
}
val, err := c.Get(alias, key)
if err != nil || val == "" {
return "", err
}
if err := validate(key, val); err != nil {
return "", err
}
return val, nil
}
// Get finds the first value for key within a declaration that matches the
// alias. Get returns the empty string if no value was found, or if IgnoreErrors
// is false and we could not parse the configuration file. Use GetStrict to
// disambiguate the latter cases.
//
// The match for key is case insensitive.
//
// Get is a wrapper around DefaultUserSettings.Get.
func Get(alias, key string) string {
return DefaultUserSettings.Get(alias, key)
}
// GetStrict finds the first value for key within a declaration that matches the
// alias. If key has a default value and no matching configuration is found, the
// default will be returned. For more information on default values and the way
// patterns are matched, see the manpage for ssh_config.
//
// error will be non-nil if and only if a user's configuration file or the
// system configuration file could not be parsed, and u.IgnoreErrors is false.
//
// GetStrict is a wrapper around DefaultUserSettings.GetStrict.
func GetStrict(alias, key string) (string, error) {
return DefaultUserSettings.GetStrict(alias, key)
}
// Get finds the first value for key within a declaration that matches the
// alias. Get returns the empty string if no value was found, or if IgnoreErrors
// is false and we could not parse the configuration file. Use GetStrict to
// disambiguate the latter cases.
//
// The match for key is case insensitive.
func (u *UserSettings) Get(alias, key string) string {
val, err := u.GetStrict(alias, key)
if err != nil {
return ""
}
return val
}
// GetStrict finds the first value for key within a declaration that matches the
// alias. If key has a default value and no matching configuration is found, the
// default will be returned. For more information on default values and the way
// patterns are matched, see the manpage for ssh_config.
//
// error will be non-nil if and only if a user's configuration file or the
// system configuration file could not be parsed, and u.IgnoreErrors is false.
func (u *UserSettings) GetStrict(alias, key string) (string, error) {
u.loadConfigs.Do(func() {
// can't parse user file, that's ok.
var filename string
if u.userConfigFinder == nil {
filename = userConfigFinder()
} else {
filename = u.userConfigFinder()
}
var err error
u.userConfig, err = parseFile(filename)
if err != nil && os.IsNotExist(err) == false {
u.onceErr = err
return
}
if u.systemConfigFinder == nil {
filename = systemConfigFinder()
} else {
filename = u.systemConfigFinder()
}
u.systemConfig, err = parseFile(filename)
if err != nil && os.IsNotExist(err) == false {
u.onceErr = err
return
}
})
if u.onceErr != nil && u.IgnoreErrors == false {
return "", u.onceErr
}
val, err := findVal(u.userConfig, alias, key)
if err != nil || val != "" {
return val, err
}
val2, err2 := findVal(u.systemConfig, alias, key)
if err2 != nil || val2 != "" {
return val2, err2
}
return Default(key), nil
}
func parseFile(filename string) (*Config, error) {
return parseWithDepth(filename, 0)
}
func parseWithDepth(filename string, depth uint8) (*Config, error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
return decode(f, isSystem(filename), depth)
}
func isSystem(filename string) bool {
// TODO i'm not sure this is the best way to detect a system repo
return strings.HasPrefix(filepath.Clean(filename), "/etc/ssh")
}
// Decode reads r into a Config, or returns an error if r could not be parsed as
// an SSH config file.
func Decode(r io.Reader) (*Config, error) {
return decode(r, false, 0)
}
func decode(r io.Reader, system bool, depth uint8) (c *Config, err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
if e, ok := r.(error); ok && e == ErrDepthExceeded {
err = e
return
}
err = errors.New(r.(string))
}
}()
c = parseSSH(lexSSH(r), system, depth)
return c, err
}
// Config represents an SSH config file.
type Config struct {
// A list of hosts to match against. The file begins with an implicit
// "Host *" declaration matching all hosts.
Hosts []*Host
depth uint8
position Position
}
// Get finds the first value in the configuration that matches the alias and
// contains key. Get returns the empty string if no value was found, or if the
// Config contains an invalid conditional Include value.
//
// The match for key is case insensitive.
func (c *Config) Get(alias, key string) (string, error) {
lowerKey := strings.ToLower(key)
for _, host := range c.Hosts {
if !host.Matches(alias) {
continue
}
for _, node := range host.Nodes {
switch t := node.(type) {
case *Empty:
continue
case *KV:
// "keys are case insensitive" per the spec
lkey := strings.ToLower(t.Key)
if lkey == "match" {
panic("can't handle Match directives")
}
if lkey == lowerKey {
return t.Value, nil
}
case *Include:
val := t.Get(alias, key)
if val != "" {
return val, nil
}
default:
return "", fmt.Errorf("unknown Node type %v", t)
}
}
}
return "", nil
}
// String returns a string representation of the Config file.
func (c Config) String() string {
return marshal(c).String()
}
func (c Config) MarshalText() ([]byte, error) {
return marshal(c).Bytes(), nil
}
func marshal(c Config) *bytes.Buffer {
var buf bytes.Buffer
for i := range c.Hosts {
buf.WriteString(c.Hosts[i].String())
}
return &buf
}
// Pattern is a pattern in a Host declaration. Patterns are read-only values;
// create a new one with NewPattern().
type Pattern struct {
str string // Its appearance in the file, not the value that gets compiled.
regex *regexp.Regexp
not bool // True if this is a negated match
}
// String prints the string representation of the pattern.
func (p Pattern) String() string {
return p.str
}
// Copied from regexp.go with * and ? removed.
var specialBytes = []byte(`\.+()|[]{}^$`)
func special(b byte) bool {
return bytes.IndexByte(specialBytes, b) >= 0
}
// NewPattern creates a new Pattern for matching hosts. NewPattern("*") creates
// a Pattern that matches all hosts.
//
// From the manpage, a pattern consists of zero or more non-whitespace
// characters, `*' (a wildcard that matches zero or more characters), or `?' (a
// wildcard that matches exactly one character). For example, to specify a set
// of declarations for any host in the ".co.uk" set of domains, the following
// pattern could be used:
//
// Host *.co.uk
//
// The following pattern would match any host in the 192.168.0.[0-9] network range:
//
// Host 192.168.0.?
func NewPattern(s string) (*Pattern, error) {
if s == "" {
return nil, errors.New("ssh_config: empty pattern")
}
negated := false
if s[0] == '!' {
negated = true
s = s[1:]
}
var buf bytes.Buffer
buf.WriteByte('^')
for i := 0; i < len(s); i++ {
// A byte loop is correct because all metacharacters are ASCII.
switch b := s[i]; b {
case '*':
buf.WriteString(".*")
case '?':
buf.WriteString(".?")
default:
// borrowing from QuoteMeta here.
if special(b) {
buf.WriteByte('\\')
}
buf.WriteByte(b)
}
}
buf.WriteByte('$')
r, err := regexp.Compile(buf.String())
if err != nil {
return nil, err
}
return &Pattern{str: s, regex: r, not: negated}, nil
}
// Host describes a Host directive and the keywords that follow it.
type Host struct {
// A list of host patterns that should match this host.
Patterns []*Pattern
// A Node is either a key/value pair or a comment line.
Nodes []Node
// EOLComment is the comment (if any) terminating the Host line.
EOLComment string
hasEquals bool
leadingSpace uint16 // TODO: handle spaces vs tabs here.
// The file starts with an implicit "Host *" declaration.
implicit bool
}
// Matches returns true if the Host matches for the given alias. For
// a description of the rules that provide a match, see the manpage for
// ssh_config.
func (h *Host) Matches(alias string) bool {
found := false
for i := range h.Patterns {
if h.Patterns[i].regex.MatchString(alias) {
if h.Patterns[i].not == true {
// Negated match. "A pattern entry may be negated by prefixing
// it with an exclamation mark (`!'). If a negated entry is
// matched, then the Host entry is ignored, regardless of
// whether any other patterns on the line match. Negated matches
// are therefore useful to provide exceptions for wildcard
// matches."
return false
}
found = true
}
}
return found
}
// String prints h as it would appear in a config file. Minor tweaks may be
// present in the whitespace in the printed file.
func (h *Host) String() string {
var buf bytes.Buffer
if h.implicit == false {
buf.WriteString(strings.Repeat(" ", int(h.leadingSpace)))
buf.WriteString("Host")
if h.hasEquals {
buf.WriteString(" = ")
} else {
buf.WriteString(" ")
}
for i, pat := range h.Patterns {
buf.WriteString(pat.String())
if i < len(h.Patterns)-1 {
buf.WriteString(" ")
}
}
if h.EOLComment != "" {
buf.WriteString(" #")
buf.WriteString(h.EOLComment)
}
buf.WriteByte('\n')
}
for i := range h.Nodes {
buf.WriteString(h.Nodes[i].String())
buf.WriteByte('\n')
}
return buf.String()
}
// Node represents a line in a Config.
type Node interface {
Pos() Position
String() string
}
// KV is a line in the config file that contains a key, a value, and possibly
// a comment.
type KV struct {
Key string
Value string
Comment string
hasEquals bool
leadingSpace uint16 // Space before the key. TODO handle spaces vs tabs.
position Position
}
// Pos returns k's Position.
func (k *KV) Pos() Position {
return k.position
}
// String prints k as it was parsed in the config file. There may be slight
// changes to the whitespace between values.
func (k *KV) String() string {
if k == nil {
return ""
}
equals := " "
if k.hasEquals {
equals = " = "
}
line := fmt.Sprintf("%s%s%s%s", strings.Repeat(" ", int(k.leadingSpace)), k.Key, equals, k.Value)
if k.Comment != "" {
line += " #" + k.Comment
}
return line
}
// Empty is a line in the config file that contains only whitespace or comments.
type Empty struct {
Comment string
leadingSpace uint16 // TODO handle spaces vs tabs.
position Position
}
// Pos returns e's Position.
func (e *Empty) Pos() Position {
return e.position
}
// String prints e as it was parsed in the config file.
func (e *Empty) String() string {
if e == nil {
return ""
}
if e.Comment == "" {
return ""
}
return fmt.Sprintf("%s#%s", strings.Repeat(" ", int(e.leadingSpace)), e.Comment)
}
// Include holds the result of an Include directive, including the config files
// that have been parsed as part of that directive. At most 5 levels of Include
// statements will be parsed.
type Include struct {
// Comment is the contents of any comment at the end of the Include
// statement.
Comment string
parsed bool
// an include directive can include several different files, and wildcards
directives []string
mu sync.Mutex
// 1:1 mapping between matches and keys in files array; matches preserves
// ordering
matches []string
// actual filenames are listed here
files map[string]*Config
leadingSpace uint16
position Position
depth uint8
hasEquals bool
}
const maxRecurseDepth = 5
// ErrDepthExceeded is returned if too many Include directives are parsed.
// Usually this indicates a recursive loop (an Include directive pointing to the
// file it contains).
var ErrDepthExceeded = errors.New("ssh_config: max recurse depth exceeded")
func removeDups(arr []string) []string {
// Use map to record duplicates as we find them.
encountered := make(map[string]bool, len(arr))
result := make([]string, 0)
for v := range arr {
if encountered[arr[v]] == false {
encountered[arr[v]] = true
result = append(result, arr[v])
}
}
return result
}
// NewInclude creates a new Include with a list of file globs to include.
// Configuration files are parsed greedily (e.g. as soon as this function runs).
// Any error encountered while parsing nested configuration files will be
// returned.
func NewInclude(directives []string, hasEquals bool, pos Position, comment string, system bool, depth uint8) (*Include, error) {
if depth > maxRecurseDepth {
return nil, ErrDepthExceeded
}
inc := &Include{
Comment: comment,
directives: directives,
files: make(map[string]*Config),
position: pos,
leadingSpace: uint16(pos.Col) - 1,
depth: depth,
hasEquals: hasEquals,
}
// no need for inc.mu.Lock() since nothing else can access this inc
matches := make([]string, 0)
for i := range directives {
var path string
if filepath.IsAbs(directives[i]) {
path = directives[i]
} else if system {
path = filepath.Join("/etc/ssh", directives[i])
} else {
path = filepath.Join(homedir(), ".ssh", directives[i])
}
theseMatches, err := filepath.Glob(path)
if err != nil {
return nil, err
}
matches = append(matches, theseMatches...)
}
matches = removeDups(matches)
inc.matches = matches
for i := range matches {
config, err := parseWithDepth(matches[i], depth)
if err != nil {
return nil, err
}
inc.files[matches[i]] = config
}
return inc, nil
}
// Pos returns the position of the Include directive in the larger file.
func (i *Include) Pos() Position {
return i.position
}
// Get finds the first value in the Include statement matching the alias and the
// given key.
func (inc *Include) Get(alias, key string) string {
inc.mu.Lock()
defer inc.mu.Unlock()
// TODO: we search files in any order which is not correct
for i := range inc.matches {
cfg := inc.files[inc.matches[i]]
if cfg == nil {
panic("nil cfg")
}
val, err := cfg.Get(alias, key)
if err == nil && val != "" {
return val
}
}
return ""
}
// String prints out a string representation of this Include directive. Note
// included Config files are not printed as part of this representation.
func (inc *Include) String() string {
equals := " "
if inc.hasEquals {
equals = " = "
}
line := fmt.Sprintf("%sInclude%s%s", strings.Repeat(" ", int(inc.leadingSpace)), equals, strings.Join(inc.directives, " "))
if inc.Comment != "" {
line += " #" + inc.Comment
}
return line
}
var matchAll *Pattern
func init() {
var err error
matchAll, err = NewPattern("*")
if err != nil {
panic(err)
}
}
func newConfig() *Config {
return &Config{
Hosts: []*Host{
&Host{
implicit: true,
Patterns: []*Pattern{matchAll},
Nodes: make([]Node, 0),
},
},
depth: 0,
}
}

241
vendor/github.com/kevinburke/ssh_config/lexer.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,241 @@
package ssh_config
import (
"io"
buffruneio "github.com/pelletier/go-buffruneio"
)
// Define state functions
type sshLexStateFn func() sshLexStateFn
type sshLexer struct {
input *buffruneio.Reader // Textual source
buffer []rune // Runes composing the current token
tokens chan token
line uint32
col uint16
endbufferLine uint32
endbufferCol uint16
}
func (s *sshLexer) lexComment(previousState sshLexStateFn) sshLexStateFn {
return func() sshLexStateFn {
growingString := ""
for next := s.peek(); next != '\n' && next != eof; next = s.peek() {
if next == '\r' && s.follow("\r\n") {
break
}
growingString += string(next)
s.next()
}
s.emitWithValue(tokenComment, growingString)
s.skip()
return previousState
}
}
// lex the space after an equals sign in a function
func (s *sshLexer) lexRspace() sshLexStateFn {
for {
next := s.peek()
if !isSpace(next) {
break
}
s.skip()
}
return s.lexRvalue
}
func (s *sshLexer) lexEquals() sshLexStateFn {
for {
next := s.peek()
if next == '=' {
s.emit(tokenEquals)
s.skip()
return s.lexRspace
}
// TODO error handling here; newline eof etc.
if !isSpace(next) {
break
}
s.skip()
}
return s.lexRvalue
}
func (s *sshLexer) lexKey() sshLexStateFn {
growingString := ""
for r := s.peek(); isKeyChar(r); r = s.peek() {
// simplified a lot here
if isSpace(r) || r == '=' {
s.emitWithValue(tokenKey, growingString)
s.skip()
return s.lexEquals
}
growingString += string(r)
s.next()
}
s.emitWithValue(tokenKey, growingString)
return s.lexEquals
}
func (s *sshLexer) lexRvalue() sshLexStateFn {
growingString := ""
for {
next := s.peek()
switch next {
case '\r':
if s.follow("\r\n") {
s.emitWithValue(tokenString, growingString)
s.skip()
return s.lexVoid
}
case '\n':
s.emitWithValue(tokenString, growingString)
s.skip()
return s.lexVoid
case '#':
s.emitWithValue(tokenString, growingString)
s.skip()
return s.lexComment(s.lexVoid)
case eof:
s.next()
}
if next == eof {
break
}
growingString += string(next)
s.next()
}
s.emit(tokenEOF)
return nil
}
func (s *sshLexer) read() rune {
r, _, err := s.input.ReadRune()
if err != nil {
panic(err)
}
if r == '\n' {
s.endbufferLine++
s.endbufferCol = 1
} else {
s.endbufferCol++
}
return r
}
func (s *sshLexer) next() rune {
r := s.read()
if r != eof {
s.buffer = append(s.buffer, r)
}
return r
}
func (s *sshLexer) lexVoid() sshLexStateFn {
for {
next := s.peek()
switch next {
case '#':
s.skip()
return s.lexComment(s.lexVoid)
case '\r':
fallthrough
case '\n':
s.emit(tokenEmptyLine)
s.skip()
continue
}
if isSpace(next) {
s.skip()
}
if isKeyStartChar(next) {
return s.lexKey
}
// removed IsKeyStartChar and lexKey. probably will need to readd
if next == eof {
s.next()
break
}
}
s.emit(tokenEOF)
return nil
}
func (s *sshLexer) ignore() {
s.buffer = make([]rune, 0)
s.line = s.endbufferLine
s.col = s.endbufferCol
}
func (s *sshLexer) skip() {
s.next()
s.ignore()
}
func (s *sshLexer) emit(t tokenType) {
s.emitWithValue(t, string(s.buffer))
}
func (s *sshLexer) emitWithValue(t tokenType, value string) {
tok := token{
Position: Position{s.line, s.col},
typ: t,
val: value,
}
s.tokens <- tok
s.ignore()
}
func (s *sshLexer) peek() rune {
r, _, err := s.input.ReadRune()
if err != nil {
panic(err)
}
s.input.UnreadRune()
return r
}
func (s *sshLexer) follow(next string) bool {
for _, expectedRune := range next {
r, _, err := s.input.ReadRune()
defer s.input.UnreadRune()
if err != nil {
panic(err)
}
if expectedRune != r {
return false
}
}
return true
}
func (s *sshLexer) run() {
for state := s.lexVoid; state != nil; {
state = state()
}
close(s.tokens)
}
func lexSSH(input io.Reader) chan token {
bufferedInput := buffruneio.NewReader(input)
l := &sshLexer{
input: bufferedInput,
tokens: make(chan token),
line: 1,
col: 1,
endbufferLine: 1,
endbufferCol: 1,
}
go l.run()
return l.tokens
}

185
vendor/github.com/kevinburke/ssh_config/parser.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,185 @@
package ssh_config
import (
"fmt"
"strings"
)
type sshParser struct {
flow chan token
config *Config
tokensBuffer []token
currentTable []string
seenTableKeys []string
// /etc/ssh parser or local parser - used to find the default for relative
// filepaths in the Include directive
system bool
depth uint8
}
type sshParserStateFn func() sshParserStateFn
// Formats and panics an error message based on a token
func (p *sshParser) raiseErrorf(tok *token, msg string, args ...interface{}) {
// TODO this format is ugly
panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...))
}
func (p *sshParser) raiseError(tok *token, err error) {
if err == ErrDepthExceeded {
panic(err)
}
// TODO this format is ugly
panic(tok.Position.String() + ": " + err.Error())
}
func (p *sshParser) run() {
for state := p.parseStart; state != nil; {
state = state()
}
}
func (p *sshParser) peek() *token {
if len(p.tokensBuffer) != 0 {
return &(p.tokensBuffer[0])
}
tok, ok := <-p.flow
if !ok {
return nil
}
p.tokensBuffer = append(p.tokensBuffer, tok)
return &tok
}
func (p *sshParser) getToken() *token {
if len(p.tokensBuffer) != 0 {
tok := p.tokensBuffer[0]
p.tokensBuffer = p.tokensBuffer[1:]
return &tok
}
tok, ok := <-p.flow
if !ok {
return nil
}
return &tok
}
func (p *sshParser) parseStart() sshParserStateFn {
tok := p.peek()
// end of stream, parsing is finished
if tok == nil {
return nil
}
switch tok.typ {
case tokenComment, tokenEmptyLine:
return p.parseComment
case tokenKey:
return p.parseKV
case tokenEOF:
return nil
default:
p.raiseErrorf(tok, fmt.Sprintf("unexpected token %q\n", tok))
}
return nil
}
func (p *sshParser) parseKV() sshParserStateFn {
key := p.getToken()
hasEquals := false
val := p.getToken()
if val.typ == tokenEquals {
hasEquals = true
val = p.getToken()
}
comment := ""
tok := p.peek()
if tok == nil {
tok = &token{typ: tokenEOF}
}
if tok.typ == tokenComment && tok.Position.Line == val.Position.Line {
tok = p.getToken()
comment = tok.val
}
if strings.ToLower(key.val) == "match" {
// https://github.com/kevinburke/ssh_config/issues/6
p.raiseErrorf(val, "ssh_config: Match directive parsing is unsupported")
return nil
}
if strings.ToLower(key.val) == "host" {
strPatterns := strings.Split(val.val, " ")
patterns := make([]*Pattern, 0)
for i := range strPatterns {
if strPatterns[i] == "" {
continue
}
pat, err := NewPattern(strPatterns[i])
if err != nil {
p.raiseErrorf(val, "Invalid host pattern: %v", err)
return nil
}
patterns = append(patterns, pat)
}
p.config.Hosts = append(p.config.Hosts, &Host{
Patterns: patterns,
Nodes: make([]Node, 0),
EOLComment: comment,
hasEquals: hasEquals,
})
return p.parseStart
}
lastHost := p.config.Hosts[len(p.config.Hosts)-1]
if strings.ToLower(key.val) == "include" {
inc, err := NewInclude(strings.Split(val.val, " "), hasEquals, key.Position, comment, p.system, p.depth+1)
if err == ErrDepthExceeded {
p.raiseError(val, err)
return nil
}
if err != nil {
p.raiseErrorf(val, "Error parsing Include directive: %v", err)
return nil
}
lastHost.Nodes = append(lastHost.Nodes, inc)
return p.parseStart
}
kv := &KV{
Key: key.val,
Value: val.val,
Comment: comment,
hasEquals: hasEquals,
leadingSpace: uint16(key.Position.Col) - 1,
position: key.Position,
}
lastHost.Nodes = append(lastHost.Nodes, kv)
return p.parseStart
}
func (p *sshParser) parseComment() sshParserStateFn {
comment := p.getToken()
lastHost := p.config.Hosts[len(p.config.Hosts)-1]
lastHost.Nodes = append(lastHost.Nodes, &Empty{
Comment: comment.val,
// account for the "#" as well
leadingSpace: comment.Position.Col - 2,
position: comment.Position,
})
return p.parseStart
}
func parseSSH(flow chan token, system bool, depth uint8) *Config {
result := newConfig()
result.position = Position{1, 1}
parser := &sshParser{
flow: flow,
config: result,
tokensBuffer: make([]token, 0),
currentTable: make([]string, 0),
seenTableKeys: make([]string, 0),
system: system,
depth: depth,
}
parser.run()
return result
}

25
vendor/github.com/kevinburke/ssh_config/position.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,25 @@
package ssh_config
import "fmt"
// Position of a document element within a SSH document.
//
// Line and Col are both 1-indexed positions for the element's line number and
// column number, respectively. Values of zero or less will cause Invalid(),
// to return true.
type Position struct {
Line uint32 // line within the document
Col uint16 // column within the line
}
// String representation of the position.
// Displays 1-indexed line and column numbers.
func (p Position) String() string {
return fmt.Sprintf("(%d, %d)", p.Line, p.Col)
}
// Invalid returns whether or not the position is valid (i.e. with negative or
// null values)
func (p Position) Invalid() bool {
return p.Line <= 0 || p.Col <= 0
}

49
vendor/github.com/kevinburke/ssh_config/token.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,49 @@
package ssh_config
import "fmt"
type token struct {
Position
typ tokenType
val string
}
func (t token) String() string {
switch t.typ {
case tokenEOF:
return "EOF"
}
return fmt.Sprintf("%q", t.val)
}
type tokenType int
const (
eof = -(iota + 1)
)
const (
tokenError tokenType = iota
tokenEOF
tokenEmptyLine
tokenComment
tokenKey
tokenEquals
tokenString
)
func isSpace(r rune) bool {
return r == ' ' || r == '\t'
}
func isKeyStartChar(r rune) bool {
return !(isSpace(r) || r == '\r' || r == '\n' || r == eof)
}
// I'm not sure that this is correct
func isKeyChar(r rune) bool {
// Keys start with the first character that isn't whitespace or [ and end
// with the last non-whitespace character before the equals sign. Keys
// cannot contain a # character."
return !(r == '\r' || r == '\n' || r == eof || r == '=')
}

162
vendor/github.com/kevinburke/ssh_config/validators.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,162 @@
package ssh_config
import (
"fmt"
"strconv"
"strings"
)
// Default returns the default value for the given keyword, for example "22" if
// the keyword is "Port". Default returns the empty string if the keyword has no
// default, or if the keyword is unknown. Keyword matching is case-insensitive.
//
// Default values are provided by OpenSSH_7.4p1 on a Mac.
func Default(keyword string) string {
return defaults[strings.ToLower(keyword)]
}
// Arguments where the value must be "yes" or "no" and *only* yes or no.
var yesnos = map[string]bool{
strings.ToLower("BatchMode"): true,
strings.ToLower("CanonicalizeFallbackLocal"): true,
strings.ToLower("ChallengeResponseAuthentication"): true,
strings.ToLower("CheckHostIP"): true,
strings.ToLower("ClearAllForwardings"): true,
strings.ToLower("Compression"): true,
strings.ToLower("EnableSSHKeysign"): true,
strings.ToLower("ExitOnForwardFailure"): true,
strings.ToLower("ForwardAgent"): true,
strings.ToLower("ForwardX11"): true,
strings.ToLower("ForwardX11Trusted"): true,
strings.ToLower("GatewayPorts"): true,
strings.ToLower("GSSAPIAuthentication"): true,
strings.ToLower("GSSAPIDelegateCredentials"): true,
strings.ToLower("HostbasedAuthentication"): true,
strings.ToLower("IdentitiesOnly"): true,
strings.ToLower("KbdInteractiveAuthentication"): true,
strings.ToLower("NoHostAuthenticationForLocalhost"): true,
strings.ToLower("PasswordAuthentication"): true,
strings.ToLower("PermitLocalCommand"): true,
strings.ToLower("PubkeyAuthentication"): true,
strings.ToLower("RhostsRSAAuthentication"): true,
strings.ToLower("RSAAuthentication"): true,
strings.ToLower("StreamLocalBindUnlink"): true,
strings.ToLower("TCPKeepAlive"): true,
strings.ToLower("UseKeychain"): true,
strings.ToLower("UsePrivilegedPort"): true,
strings.ToLower("VisualHostKey"): true,
}
var uints = map[string]bool{
strings.ToLower("CanonicalizeMaxDots"): true,
strings.ToLower("CompressionLevel"): true, // 1 to 9
strings.ToLower("ConnectionAttempts"): true,
strings.ToLower("ConnectTimeout"): true,
strings.ToLower("NumberOfPasswordPrompts"): true,
strings.ToLower("Port"): true,
strings.ToLower("ServerAliveCountMax"): true,
strings.ToLower("ServerAliveInterval"): true,
}
func mustBeYesOrNo(lkey string) bool {
return yesnos[lkey]
}
func mustBeUint(lkey string) bool {
return uints[lkey]
}
func validate(key, val string) error {
lkey := strings.ToLower(key)
if mustBeYesOrNo(lkey) && (val != "yes" && val != "no") {
return fmt.Errorf("ssh_config: value for key %q must be 'yes' or 'no', got %q", key, val)
}
if mustBeUint(lkey) {
_, err := strconv.ParseUint(val, 10, 64)
if err != nil {
return fmt.Errorf("ssh_config: %v", err)
}
}
return nil
}
var defaults = map[string]string{
strings.ToLower("AddKeysToAgent"): "no",
strings.ToLower("AddressFamily"): "any",
strings.ToLower("BatchMode"): "no",
strings.ToLower("CanonicalizeFallbackLocal"): "yes",
strings.ToLower("CanonicalizeHostname"): "no",
strings.ToLower("CanonicalizeMaxDots"): "1",
strings.ToLower("ChallengeResponseAuthentication"): "yes",
strings.ToLower("CheckHostIP"): "yes",
// TODO is this still the correct cipher
strings.ToLower("Cipher"): "3des",
strings.ToLower("Ciphers"): "chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,aes128-cbc,aes192-cbc,aes256-cbc",
strings.ToLower("ClearAllForwardings"): "no",
strings.ToLower("Compression"): "no",
strings.ToLower("CompressionLevel"): "6",
strings.ToLower("ConnectionAttempts"): "1",
strings.ToLower("ControlMaster"): "no",
strings.ToLower("EnableSSHKeysign"): "no",
strings.ToLower("EscapeChar"): "~",
strings.ToLower("ExitOnForwardFailure"): "no",
strings.ToLower("FingerprintHash"): "sha256",
strings.ToLower("ForwardAgent"): "no",
strings.ToLower("ForwardX11"): "no",
strings.ToLower("ForwardX11Timeout"): "20m",
strings.ToLower("ForwardX11Trusted"): "no",
strings.ToLower("GatewayPorts"): "no",
strings.ToLower("GlobalKnownHostsFile"): "/etc/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts2",
strings.ToLower("GSSAPIAuthentication"): "no",
strings.ToLower("GSSAPIDelegateCredentials"): "no",
strings.ToLower("HashKnownHosts"): "no",
strings.ToLower("HostbasedAuthentication"): "no",
strings.ToLower("HostbasedKeyTypes"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa",
strings.ToLower("HostKeyAlgorithms"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa",
// HostName has a dynamic default (the value passed at the command line).
strings.ToLower("IdentitiesOnly"): "no",
strings.ToLower("IdentityFile"): "~/.ssh/identity",
// IPQoS has a dynamic default based on interactive or non-interactive
// sessions.
strings.ToLower("KbdInteractiveAuthentication"): "yes",
strings.ToLower("KexAlgorithms"): "curve25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1",
strings.ToLower("LogLevel"): "INFO",
strings.ToLower("MACs"): "umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1",
strings.ToLower("NoHostAuthenticationForLocalhost"): "no",
strings.ToLower("NumberOfPasswordPrompts"): "3",
strings.ToLower("PasswordAuthentication"): "yes",
strings.ToLower("PermitLocalCommand"): "no",
strings.ToLower("Port"): "22",
strings.ToLower("PreferredAuthentications"): "gssapi-with-mic,hostbased,publickey,keyboard-interactive,password",
strings.ToLower("Protocol"): "2",
strings.ToLower("ProxyUseFdpass"): "no",
strings.ToLower("PubkeyAcceptedKeyTypes"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa",
strings.ToLower("PubkeyAuthentication"): "yes",
strings.ToLower("RekeyLimit"): "default none",
strings.ToLower("RhostsRSAAuthentication"): "no",
strings.ToLower("RSAAuthentication"): "yes",
strings.ToLower("ServerAliveCountMax"): "3",
strings.ToLower("ServerAliveInterval"): "0",
strings.ToLower("StreamLocalBindMask"): "0177",
strings.ToLower("StreamLocalBindUnlink"): "no",
strings.ToLower("StrictHostKeyChecking"): "ask",
strings.ToLower("TCPKeepAlive"): "yes",
strings.ToLower("Tunnel"): "no",
strings.ToLower("TunnelDevice"): "any:any",
strings.ToLower("UpdateHostKeys"): "no",
strings.ToLower("UseKeychain"): "no",
strings.ToLower("UsePrivilegedPort"): "no",
strings.ToLower("UserKnownHostsFile"): "~/.ssh/known_hosts ~/.ssh/known_hosts2",
strings.ToLower("VerifyHostKeyDNS"): "no",
strings.ToLower("VisualHostKey"): "no",
strings.ToLower("XAuthLocation"): "/usr/X11R6/bin/xauth",
}

14
vendor/github.com/mcuadros/go-version/compare.go сгенерированный поставляемый
Просмотреть файл

@ -23,6 +23,8 @@ var specialForms = map[string]int{
"pl": 1, "pl": 1,
} }
var unknownForm int = -7
// Compares two version number strings, for a particular relationship // Compares two version number strings, for a particular relationship
// //
// Usage // Usage
@ -155,5 +157,15 @@ func numVersion(value string) int {
return special return special
} }
return -7 return unknownForm
}
func ValidSimpleVersionFormat(value string) bool {
normalized := Normalize(value)
for _, component := range prepVersion(normalized) {
if numVersion(component) == unknownForm {
return false
}
}
return true
} }

33
vendor/github.com/mcuadros/go-version/group.go сгенерированный поставляемый
Просмотреть файл

@ -4,6 +4,7 @@ import (
"regexp" "regexp"
"strconv" "strconv"
"strings" "strings"
"sync"
) )
type ConstraintGroup struct { type ConstraintGroup struct {
@ -241,8 +242,38 @@ func (self *ConstraintGroup) parseConstraint(constraint string) []*Constraint {
return []*Constraint{{constraint, stabilityModifier}} return []*Constraint{{constraint, stabilityModifier}}
} }
// PCRegMap : PreCompiled Regex Map
type PCRegMap struct {
sync.RWMutex
m map[string]*regexp.Regexp
}
// MustCompile : to replace regexp.MustCompile in RegFind.
func (p *PCRegMap) MustCompile(pattern string) *regexp.Regexp {
p.RLock()
ret, exist := p.m[pattern]
p.RUnlock()
if exist {
return ret
}
ret = regexp.MustCompile(pattern)
p.Lock()
p.m[pattern] = ret
p.Unlock()
return ret
}
var (
regexpCache *PCRegMap
)
func init() {
regexpCache = new(PCRegMap)
regexpCache.m = make(map[string]*regexp.Regexp)
}
func RegFind(pattern, subject string) []string { func RegFind(pattern, subject string) []string {
reg := regexp.MustCompile(pattern) reg := regexpCache.MustCompile(pattern)
matched := reg.FindAllStringSubmatch(subject, -1) matched := reg.FindAllStringSubmatch(subject, -1)
if matched != nil { if matched != nil {

21
vendor/github.com/mitchellh/go-homedir/LICENSE сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2013 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

14
vendor/github.com/mitchellh/go-homedir/README.md сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,14 @@
# go-homedir
This is a Go library for detecting the user's home directory without
the use of cgo, so the library can be used in cross-compilation environments.
Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
for a user, and `homedir.Expand()` to expand the `~` in a path to the home
directory.
**Why not just use `os/user`?** The built-in `os/user` package requires
cgo on Darwin systems. This means that any Go code that uses that package
cannot cross compile. But 99% of the time the use for `os/user` is just to
retrieve the home directory, which we can do for the current user without
cgo. This library does that, enabling cross-compilation.

1
vendor/github.com/mitchellh/go-homedir/go.mod сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
module github.com/mitchellh/go-homedir

157
vendor/github.com/mitchellh/go-homedir/homedir.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,157 @@
package homedir
import (
"bytes"
"errors"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
)
// DisableCache will disable caching of the home directory. Caching is enabled
// by default.
var DisableCache bool
var homedirCache string
var cacheLock sync.RWMutex
// Dir returns the home directory for the executing user.
//
// This uses an OS-specific method for discovering the home directory.
// An error is returned if a home directory cannot be detected.
func Dir() (string, error) {
if !DisableCache {
cacheLock.RLock()
cached := homedirCache
cacheLock.RUnlock()
if cached != "" {
return cached, nil
}
}
cacheLock.Lock()
defer cacheLock.Unlock()
var result string
var err error
if runtime.GOOS == "windows" {
result, err = dirWindows()
} else {
// Unix-like system, so just assume Unix
result, err = dirUnix()
}
if err != nil {
return "", err
}
homedirCache = result
return result, nil
}
// Expand expands the path to include the home directory if the path
// is prefixed with `~`. If it isn't prefixed with `~`, the path is
// returned as-is.
func Expand(path string) (string, error) {
if len(path) == 0 {
return path, nil
}
if path[0] != '~' {
return path, nil
}
if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
return "", errors.New("cannot expand user-specific home dir")
}
dir, err := Dir()
if err != nil {
return "", err
}
return filepath.Join(dir, path[1:]), nil
}
func dirUnix() (string, error) {
homeEnv := "HOME"
if runtime.GOOS == "plan9" {
// On plan9, env vars are lowercase.
homeEnv = "home"
}
// First prefer the HOME environmental variable
if home := os.Getenv(homeEnv); home != "" {
return home, nil
}
var stdout bytes.Buffer
// If that fails, try OS specific commands
if runtime.GOOS == "darwin" {
cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`)
cmd.Stdout = &stdout
if err := cmd.Run(); err == nil {
result := strings.TrimSpace(stdout.String())
if result != "" {
return result, nil
}
}
} else {
cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
// If the error is ErrNotFound, we ignore it. Otherwise, return it.
if err != exec.ErrNotFound {
return "", err
}
} else {
if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
// username:password:uid:gid:gecos:home:shell
passwdParts := strings.SplitN(passwd, ":", 7)
if len(passwdParts) > 5 {
return passwdParts[5], nil
}
}
}
}
// If all else fails, try the shell
stdout.Reset()
cmd := exec.Command("sh", "-c", "cd && pwd")
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
return "", err
}
result := strings.TrimSpace(stdout.String())
if result == "" {
return "", errors.New("blank output when reading home directory")
}
return result, nil
}
func dirWindows() (string, error) {
// First prefer the HOME environmental variable
if home := os.Getenv("HOME"); home != "" {
return home, nil
}
// Prefer standard environment variable USERPROFILE
if home := os.Getenv("USERPROFILE"); home != "" {
return home, nil
}
drive := os.Getenv("HOMEDRIVE")
path := os.Getenv("HOMEPATH")
home := drive + path
if drive == "" || path == "" {
return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank")
}
return home, nil
}

1
vendor/github.com/pelletier/go-buffruneio/.gitignore сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
*.test

7
vendor/github.com/pelletier/go-buffruneio/.travis.yml сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,7 @@
language: go
sudo: false
go:
- 1.3.3
- 1.4.3
- 1.5.3
- tip

62
vendor/github.com/pelletier/go-buffruneio/README.md сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,62 @@
# buffruneio
[![Tests Status](https://travis-ci.org/pelletier/go-buffruneio.svg?branch=master)](https://travis-ci.org/pelletier/go-buffruneio)
[![GoDoc](https://godoc.org/github.com/pelletier/go-buffruneio?status.svg)](https://godoc.org/github.com/pelletier/go-buffruneio)
Buffruneio is a wrapper around bufio to provide buffered runes access with
unlimited unreads.
```go
import "github.com/pelletier/go-buffruneio"
```
## Examples
```go
import (
"fmt"
"github.com/pelletier/go-buffruneio"
"strings"
)
reader := buffruneio.NewReader(strings.NewReader("abcd"))
fmt.Println(reader.ReadRune()) // 'a'
fmt.Println(reader.ReadRune()) // 'b'
fmt.Println(reader.ReadRune()) // 'c'
reader.UnreadRune()
reader.UnreadRune()
fmt.Println(reader.ReadRune()) // 'b'
fmt.Println(reader.ReadRune()) // 'c'
```
## Documentation
The documentation and additional examples are available at
[godoc.org](http://godoc.org/github.com/pelletier/go-buffruneio).
## Contribute
Feel free to report bugs and patches using GitHub's pull requests system on
[pelletier/go-toml](https://github.com/pelletier/go-buffruneio). Any feedback is
much appreciated!
## LICENSE
Copyright (c) 2016 Thomas Pelletier
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

117
vendor/github.com/pelletier/go-buffruneio/buffruneio.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,117 @@
// Package buffruneio is a wrapper around bufio to provide buffered runes access with unlimited unreads.
package buffruneio
import (
"bufio"
"container/list"
"errors"
"io"
)
// Rune to indicate end of file.
const (
EOF = -(iota + 1)
)
// ErrNoRuneToUnread is returned by UnreadRune() when the read index is already at the beginning of the buffer.
var ErrNoRuneToUnread = errors.New("no rune to unwind")
// Reader implements runes buffering for an io.Reader object.
type Reader struct {
buffer *list.List
current *list.Element
input *bufio.Reader
}
// NewReader returns a new Reader.
func NewReader(rd io.Reader) *Reader {
return &Reader{
buffer: list.New(),
input: bufio.NewReader(rd),
}
}
type runeWithSize struct {
r rune
size int
}
func (rd *Reader) feedBuffer() error {
r, size, err := rd.input.ReadRune()
if err != nil {
if err != io.EOF {
return err
}
r = EOF
}
newRuneWithSize := runeWithSize{r, size}
rd.buffer.PushBack(newRuneWithSize)
if rd.current == nil {
rd.current = rd.buffer.Back()
}
return nil
}
// ReadRune reads the next rune from buffer, or from the underlying reader if needed.
func (rd *Reader) ReadRune() (rune, int, error) {
if rd.current == rd.buffer.Back() || rd.current == nil {
err := rd.feedBuffer()
if err != nil {
return EOF, 0, err
}
}
runeWithSize := rd.current.Value.(runeWithSize)
rd.current = rd.current.Next()
return runeWithSize.r, runeWithSize.size, nil
}
// UnreadRune pushes back the previously read rune in the buffer, extending it if needed.
func (rd *Reader) UnreadRune() error {
if rd.current == rd.buffer.Front() {
return ErrNoRuneToUnread
}
if rd.current == nil {
rd.current = rd.buffer.Back()
} else {
rd.current = rd.current.Prev()
}
return nil
}
// Forget removes runes stored before the current stream position index.
func (rd *Reader) Forget() {
if rd.current == nil {
rd.current = rd.buffer.Back()
}
for ; rd.current != rd.buffer.Front(); rd.buffer.Remove(rd.current.Prev()) {
}
}
// PeekRune returns at most the next n runes, reading from the uderlying source if
// needed. Does not move the current index. It includes EOF if reached.
func (rd *Reader) PeekRunes(n int) []rune {
res := make([]rune, 0, n)
cursor := rd.current
for i := 0; i < n; i++ {
if cursor == nil {
err := rd.feedBuffer()
if err != nil {
return res
}
cursor = rd.buffer.Back()
}
if cursor != nil {
r := cursor.Value.(runeWithSize).r
res = append(res, r)
if r == EOF {
return res
}
cursor = cursor.Next()
}
}
return res
}

54
vendor/github.com/pmezard/go-difflib/difflib/difflib.go сгенерированный поставляемый
Просмотреть файл

@ -559,10 +559,14 @@ type UnifiedDiff struct {
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
buf := bufio.NewWriter(writer) buf := bufio.NewWriter(writer)
defer buf.Flush() defer buf.Flush()
w := func(format string, args ...interface{}) error { wf := func(format string, args ...interface{}) error {
_, err := buf.WriteString(fmt.Sprintf(format, args...)) _, err := buf.WriteString(fmt.Sprintf(format, args...))
return err return err
} }
ws := func(s string) error {
_, err := buf.WriteString(s)
return err
}
if len(diff.Eol) == 0 { if len(diff.Eol) == 0 {
diff.Eol = "\n" diff.Eol = "\n"
@ -581,26 +585,28 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
if len(diff.ToDate) > 0 { if len(diff.ToDate) > 0 {
toDate = "\t" + diff.ToDate toDate = "\t" + diff.ToDate
} }
err := w("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) if diff.FromFile != "" || diff.ToFile != "" {
if err != nil { err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
return err if err != nil {
} return err
err = w("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) }
if err != nil { err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
return err if err != nil {
return err
}
} }
} }
first, last := g[0], g[len(g)-1] first, last := g[0], g[len(g)-1]
range1 := formatRangeUnified(first.I1, last.I2) range1 := formatRangeUnified(first.I1, last.I2)
range2 := formatRangeUnified(first.J1, last.J2) range2 := formatRangeUnified(first.J1, last.J2)
if err := w("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
return err return err
} }
for _, c := range g { for _, c := range g {
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
if c.Tag == 'e' { if c.Tag == 'e' {
for _, line := range diff.A[i1:i2] { for _, line := range diff.A[i1:i2] {
if err := w(" " + line); err != nil { if err := ws(" " + line); err != nil {
return err return err
} }
} }
@ -608,14 +614,14 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
} }
if c.Tag == 'r' || c.Tag == 'd' { if c.Tag == 'r' || c.Tag == 'd' {
for _, line := range diff.A[i1:i2] { for _, line := range diff.A[i1:i2] {
if err := w("-" + line); err != nil { if err := ws("-" + line); err != nil {
return err return err
} }
} }
} }
if c.Tag == 'r' || c.Tag == 'i' { if c.Tag == 'r' || c.Tag == 'i' {
for _, line := range diff.B[j1:j2] { for _, line := range diff.B[j1:j2] {
if err := w("+" + line); err != nil { if err := ws("+" + line); err != nil {
return err return err
} }
} }
@ -669,12 +675,18 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
buf := bufio.NewWriter(writer) buf := bufio.NewWriter(writer)
defer buf.Flush() defer buf.Flush()
var diffErr error var diffErr error
w := func(format string, args ...interface{}) { wf := func(format string, args ...interface{}) {
_, err := buf.WriteString(fmt.Sprintf(format, args...)) _, err := buf.WriteString(fmt.Sprintf(format, args...))
if diffErr == nil && err != nil { if diffErr == nil && err != nil {
diffErr = err diffErr = err
} }
} }
ws := func(s string) {
_, err := buf.WriteString(s)
if diffErr == nil && err != nil {
diffErr = err
}
}
if len(diff.Eol) == 0 { if len(diff.Eol) == 0 {
diff.Eol = "\n" diff.Eol = "\n"
@ -700,15 +712,17 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
if len(diff.ToDate) > 0 { if len(diff.ToDate) > 0 {
toDate = "\t" + diff.ToDate toDate = "\t" + diff.ToDate
} }
w("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) if diff.FromFile != "" || diff.ToFile != "" {
w("--- %s%s%s", diff.ToFile, toDate, diff.Eol) wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
}
} }
first, last := g[0], g[len(g)-1] first, last := g[0], g[len(g)-1]
w("***************" + diff.Eol) ws("***************" + diff.Eol)
range1 := formatRangeContext(first.I1, last.I2) range1 := formatRangeContext(first.I1, last.I2)
w("*** %s ****%s", range1, diff.Eol) wf("*** %s ****%s", range1, diff.Eol)
for _, c := range g { for _, c := range g {
if c.Tag == 'r' || c.Tag == 'd' { if c.Tag == 'r' || c.Tag == 'd' {
for _, cc := range g { for _, cc := range g {
@ -716,7 +730,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
continue continue
} }
for _, line := range diff.A[cc.I1:cc.I2] { for _, line := range diff.A[cc.I1:cc.I2] {
w(prefix[cc.Tag] + line) ws(prefix[cc.Tag] + line)
} }
} }
break break
@ -724,7 +738,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
} }
range2 := formatRangeContext(first.J1, last.J2) range2 := formatRangeContext(first.J1, last.J2)
w("--- %s ----%s", range2, diff.Eol) wf("--- %s ----%s", range2, diff.Eol)
for _, c := range g { for _, c := range g {
if c.Tag == 'r' || c.Tag == 'i' { if c.Tag == 'r' || c.Tag == 'i' {
for _, cc := range g { for _, cc := range g {
@ -732,7 +746,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
continue continue
} }
for _, line := range diff.B[cc.J1:cc.J2] { for _, line := range diff.B[cc.J1:cc.J2] {
w(prefix[cc.Tag] + line) ws(prefix[cc.Tag] + line)
} }
} }
break break

25
vendor/github.com/sergi/go-diff/AUTHORS сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,25 @@
# This is the official list of go-diff authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# Please keep the list sorted.
Danny Yoo <dannyyoo@google.com>
James Kolb <jkolb@google.com>
Jonathan Amsterdam <jba@google.com>
Markus Zimmermann <markus.zimmermann@nethead.at> <markus.zimmermann@symflower.com> <zimmski@gmail.com>
Matt Kovars <akaskik@gmail.com>
Örjan Persson <orjan@spotify.com>
Osman Masood <oamasood@gmail.com>
Robert Carlsen <rwcarlsen@gmail.com>
Rory Flynn <roryflynn@users.noreply.github.com>
Sergi Mansilla <sergi.mansilla@gmail.com>
Shatrugna Sadhu <ssadhu@apcera.com>
Shawn Smith <shawnpsmith@gmail.com>
Stas Maksimov <maksimov@gmail.com>
Tor Arvid Lund <torarvid@gmail.com>
Zac Bergquist <zbergquist99@gmail.com>

32
vendor/github.com/sergi/go-diff/CONTRIBUTORS сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,32 @@
# This is the official list of people who can contribute
# (and typically have contributed) code to the go-diff
# repository.
#
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, ACME Inc. employees would be listed here
# but not in AUTHORS, because ACME Inc. would hold the copyright.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file.
#
# Names should be added to this file like so:
# Name <email address>
#
# Please keep the list sorted.
Danny Yoo <dannyyoo@google.com>
James Kolb <jkolb@google.com>
Jonathan Amsterdam <jba@google.com>
Markus Zimmermann <markus.zimmermann@nethead.at> <markus.zimmermann@symflower.com> <zimmski@gmail.com>
Matt Kovars <akaskik@gmail.com>
Örjan Persson <orjan@spotify.com>
Osman Masood <oamasood@gmail.com>
Robert Carlsen <rwcarlsen@gmail.com>
Rory Flynn <roryflynn@users.noreply.github.com>
Sergi Mansilla <sergi.mansilla@gmail.com>
Shatrugna Sadhu <ssadhu@apcera.com>
Shawn Smith <shawnpsmith@gmail.com>
Stas Maksimov <maksimov@gmail.com>
Tor Arvid Lund <torarvid@gmail.com>
Zac Bergquist <zbergquist99@gmail.com>

20
vendor/github.com/sergi/go-diff/LICENSE сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,20 @@
Copyright (c) 2012-2016 The go-diff Authors. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

1344
vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go сгенерированный поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

46
vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,46 @@
// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
// https://github.com/sergi/go-diff
// See the included LICENSE file for license details.
//
// go-diff is a Go implementation of Google's Diff, Match, and Patch library
// Original library is Copyright (c) 2006 Google Inc.
// http://code.google.com/p/google-diff-match-patch/
// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text.
package diffmatchpatch
import (
"time"
)
// DiffMatchPatch holds the configuration for diff-match-patch operations.
type DiffMatchPatch struct {
// Number of seconds to map a diff before giving up (0 for infinity).
DiffTimeout time.Duration
// Cost of an empty edit operation in terms of edit characters.
DiffEditCost int
// How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match).
MatchDistance int
// When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match.
PatchDeleteThreshold float64
// Chunk size for context length.
PatchMargin int
// The number of bits in an int.
MatchMaxBits int
// At what point is no match declared (0.0 = perfection, 1.0 = very loose).
MatchThreshold float64
}
// New creates a new DiffMatchPatch object with default parameters.
func New() *DiffMatchPatch {
// Defaults.
return &DiffMatchPatch{
DiffTimeout: time.Second,
DiffEditCost: 4,
MatchThreshold: 0.5,
MatchDistance: 1000,
PatchDeleteThreshold: 0.5,
PatchMargin: 4,
MatchMaxBits: 32,
}
}

160
vendor/github.com/sergi/go-diff/diffmatchpatch/match.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,160 @@
// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
// https://github.com/sergi/go-diff
// See the included LICENSE file for license details.
//
// go-diff is a Go implementation of Google's Diff, Match, and Patch library
// Original library is Copyright (c) 2006 Google Inc.
// http://code.google.com/p/google-diff-match-patch/
package diffmatchpatch
import (
"math"
)
// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'.
// Returns -1 if no match found.
func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int {
// Check for null inputs not needed since null can't be passed in C#.
loc = int(math.Max(0, math.Min(float64(loc), float64(len(text)))))
if text == pattern {
// Shortcut (potentially not guaranteed by the algorithm)
return 0
} else if len(text) == 0 {
// Nothing to match.
return -1
} else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern {
// Perfect match at the perfect spot! (Includes case of null pattern)
return loc
}
// Do a fuzzy compare.
return dmp.MatchBitap(text, pattern, loc)
}
// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm.
// Returns -1 if no match was found.
func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int {
// Initialise the alphabet.
s := dmp.MatchAlphabet(pattern)
// Highest score beyond which we give up.
scoreThreshold := dmp.MatchThreshold
// Is there a nearby exact match? (speedup)
bestLoc := indexOf(text, pattern, loc)
if bestLoc != -1 {
scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc,
pattern), scoreThreshold)
// What about in the other direction? (speedup)
bestLoc = lastIndexOf(text, pattern, loc+len(pattern))
if bestLoc != -1 {
scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc,
pattern), scoreThreshold)
}
}
// Initialise the bit arrays.
matchmask := 1 << uint((len(pattern) - 1))
bestLoc = -1
var binMin, binMid int
binMax := len(pattern) + len(text)
lastRd := []int{}
for d := 0; d < len(pattern); d++ {
// Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level.
binMin = 0
binMid = binMax
for binMin < binMid {
if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold {
binMin = binMid
} else {
binMax = binMid
}
binMid = (binMax-binMin)/2 + binMin
}
// Use the result from this iteration as the maximum for the next.
binMax = binMid
start := int(math.Max(1, float64(loc-binMid+1)))
finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern)))
rd := make([]int, finish+2)
rd[finish+1] = (1 << uint(d)) - 1
for j := finish; j >= start; j-- {
var charMatch int
if len(text) <= j-1 {
// Out of range.
charMatch = 0
} else if _, ok := s[text[j-1]]; !ok {
charMatch = 0
} else {
charMatch = s[text[j-1]]
}
if d == 0 {
// First pass: exact match.
rd[j] = ((rd[j+1] << 1) | 1) & charMatch
} else {
// Subsequent passes: fuzzy match.
rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1]
}
if (rd[j] & matchmask) != 0 {
score := dmp.matchBitapScore(d, j-1, loc, pattern)
// This match will almost certainly be better than any existing match. But check anyway.
if score <= scoreThreshold {
// Told you so.
scoreThreshold = score
bestLoc = j - 1
if bestLoc > loc {
// When passing loc, don't exceed our current distance from loc.
start = int(math.Max(1, float64(2*loc-bestLoc)))
} else {
// Already passed loc, downhill from here on in.
break
}
}
}
}
if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold {
// No hope for a (better) match at greater error levels.
break
}
lastRd = rd
}
return bestLoc
}
// matchBitapScore computes and returns the score for a match with e errors and x location.
func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 {
accuracy := float64(e) / float64(len(pattern))
proximity := math.Abs(float64(loc - x))
if dmp.MatchDistance == 0 {
// Dodge divide by zero error.
if proximity == 0 {
return accuracy
}
return 1.0
}
return accuracy + (proximity / float64(dmp.MatchDistance))
}
// MatchAlphabet initialises the alphabet for the Bitap algorithm.
func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int {
s := map[byte]int{}
charPattern := []byte(pattern)
for _, c := range charPattern {
_, ok := s[c]
if !ok {
s[c] = 0
}
}
i := 0
for _, c := range charPattern {
value := s[c] | int(uint(1)<<uint((len(pattern)-i-1)))
s[c] = value
i++
}
return s
}

23
vendor/github.com/sergi/go-diff/diffmatchpatch/mathutil.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,23 @@
// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
// https://github.com/sergi/go-diff
// See the included LICENSE file for license details.
//
// go-diff is a Go implementation of Google's Diff, Match, and Patch library
// Original library is Copyright (c) 2006 Google Inc.
// http://code.google.com/p/google-diff-match-patch/
package diffmatchpatch
func min(x, y int) int {
if x < y {
return x
}
return y
}
func max(x, y int) int {
if x > y {
return x
}
return y
}

556
vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,556 @@
// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
// https://github.com/sergi/go-diff
// See the included LICENSE file for license details.
//
// go-diff is a Go implementation of Google's Diff, Match, and Patch library
// Original library is Copyright (c) 2006 Google Inc.
// http://code.google.com/p/google-diff-match-patch/
package diffmatchpatch
import (
"bytes"
"errors"
"math"
"net/url"
"regexp"
"strconv"
"strings"
)
// Patch represents one patch operation.
type Patch struct {
diffs []Diff
Start1 int
Start2 int
Length1 int
Length2 int
}
// String emulates GNU diff's format.
// Header: @@ -382,8 +481,9 @@
// Indices are printed as 1-based, not 0-based.
func (p *Patch) String() string {
var coords1, coords2 string
if p.Length1 == 0 {
coords1 = strconv.Itoa(p.Start1) + ",0"
} else if p.Length1 == 1 {
coords1 = strconv.Itoa(p.Start1 + 1)
} else {
coords1 = strconv.Itoa(p.Start1+1) + "," + strconv.Itoa(p.Length1)
}
if p.Length2 == 0 {
coords2 = strconv.Itoa(p.Start2) + ",0"
} else if p.Length2 == 1 {
coords2 = strconv.Itoa(p.Start2 + 1)
} else {
coords2 = strconv.Itoa(p.Start2+1) + "," + strconv.Itoa(p.Length2)
}
var text bytes.Buffer
_, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n")
// Escape the body of the patch with %xx notation.
for _, aDiff := range p.diffs {
switch aDiff.Type {
case DiffInsert:
_, _ = text.WriteString("+")
case DiffDelete:
_, _ = text.WriteString("-")
case DiffEqual:
_, _ = text.WriteString(" ")
}
_, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1))
_, _ = text.WriteString("\n")
}
return unescaper.Replace(text.String())
}
// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits.
func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch {
if len(text) == 0 {
return patch
}
pattern := text[patch.Start2 : patch.Start2+patch.Length1]
padding := 0
// Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length.
for strings.Index(text, pattern) != strings.LastIndex(text, pattern) &&
len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin {
padding += dmp.PatchMargin
maxStart := max(0, patch.Start2-padding)
minEnd := min(len(text), patch.Start2+patch.Length1+padding)
pattern = text[maxStart:minEnd]
}
// Add one chunk for good luck.
padding += dmp.PatchMargin
// Add the prefix.
prefix := text[max(0, patch.Start2-padding):patch.Start2]
if len(prefix) != 0 {
patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...)
}
// Add the suffix.
suffix := text[patch.Start2+patch.Length1 : min(len(text), patch.Start2+patch.Length1+padding)]
if len(suffix) != 0 {
patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix})
}
// Roll back the start points.
patch.Start1 -= len(prefix)
patch.Start2 -= len(prefix)
// Extend the lengths.
patch.Length1 += len(prefix) + len(suffix)
patch.Length2 += len(prefix) + len(suffix)
return patch
}
// PatchMake computes a list of patches.
func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch {
if len(opt) == 1 {
diffs, _ := opt[0].([]Diff)
text1 := dmp.DiffText1(diffs)
return dmp.PatchMake(text1, diffs)
} else if len(opt) == 2 {
text1 := opt[0].(string)
switch t := opt[1].(type) {
case string:
diffs := dmp.DiffMain(text1, t, true)
if len(diffs) > 2 {
diffs = dmp.DiffCleanupSemantic(diffs)
diffs = dmp.DiffCleanupEfficiency(diffs)
}
return dmp.PatchMake(text1, diffs)
case []Diff:
return dmp.patchMake2(text1, t)
}
} else if len(opt) == 3 {
return dmp.PatchMake(opt[0], opt[2])
}
return []Patch{}
}
// patchMake2 computes a list of patches to turn text1 into text2.
// text2 is not provided, diffs are the delta between text1 and text2.
func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch {
// Check for null inputs not needed since null can't be passed in C#.
patches := []Patch{}
if len(diffs) == 0 {
return patches // Get rid of the null case.
}
patch := Patch{}
charCount1 := 0 // Number of characters into the text1 string.
charCount2 := 0 // Number of characters into the text2 string.
// Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info.
prepatchText := text1
postpatchText := text1
for i, aDiff := range diffs {
if len(patch.diffs) == 0 && aDiff.Type != DiffEqual {
// A new patch starts here.
patch.Start1 = charCount1
patch.Start2 = charCount2
}
switch aDiff.Type {
case DiffInsert:
patch.diffs = append(patch.diffs, aDiff)
patch.Length2 += len(aDiff.Text)
postpatchText = postpatchText[:charCount2] +
aDiff.Text + postpatchText[charCount2:]
case DiffDelete:
patch.Length1 += len(aDiff.Text)
patch.diffs = append(patch.diffs, aDiff)
postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):]
case DiffEqual:
if len(aDiff.Text) <= 2*dmp.PatchMargin &&
len(patch.diffs) != 0 && i != len(diffs)-1 {
// Small equality inside a patch.
patch.diffs = append(patch.diffs, aDiff)
patch.Length1 += len(aDiff.Text)
patch.Length2 += len(aDiff.Text)
}
if len(aDiff.Text) >= 2*dmp.PatchMargin {
// Time for a new patch.
if len(patch.diffs) != 0 {
patch = dmp.PatchAddContext(patch, prepatchText)
patches = append(patches, patch)
patch = Patch{}
// Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch.
prepatchText = postpatchText
charCount1 = charCount2
}
}
}
// Update the current character count.
if aDiff.Type != DiffInsert {
charCount1 += len(aDiff.Text)
}
if aDiff.Type != DiffDelete {
charCount2 += len(aDiff.Text)
}
}
// Pick up the leftover patch if not empty.
if len(patch.diffs) != 0 {
patch = dmp.PatchAddContext(patch, prepatchText)
patches = append(patches, patch)
}
return patches
}
// PatchDeepCopy returns an array that is identical to a given an array of patches.
func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch {
patchesCopy := []Patch{}
for _, aPatch := range patches {
patchCopy := Patch{}
for _, aDiff := range aPatch.diffs {
patchCopy.diffs = append(patchCopy.diffs, Diff{
aDiff.Type,
aDiff.Text,
})
}
patchCopy.Start1 = aPatch.Start1
patchCopy.Start2 = aPatch.Start2
patchCopy.Length1 = aPatch.Length1
patchCopy.Length2 = aPatch.Length2
patchesCopy = append(patchesCopy, patchCopy)
}
return patchesCopy
}
// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied.
func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) {
if len(patches) == 0 {
return text, []bool{}
}
// Deep copy the patches so that no changes are made to originals.
patches = dmp.PatchDeepCopy(patches)
nullPadding := dmp.PatchAddPadding(patches)
text = nullPadding + text + nullPadding
patches = dmp.PatchSplitMax(patches)
x := 0
// delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22.
delta := 0
results := make([]bool, len(patches))
for _, aPatch := range patches {
expectedLoc := aPatch.Start2 + delta
text1 := dmp.DiffText1(aPatch.diffs)
var startLoc int
endLoc := -1
if len(text1) > dmp.MatchMaxBits {
// PatchSplitMax will only provide an oversized pattern in the case of a monster delete.
startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc)
if startLoc != -1 {
endLoc = dmp.MatchMain(text,
text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits)
if endLoc == -1 || startLoc >= endLoc {
// Can't find valid trailing context. Drop this patch.
startLoc = -1
}
}
} else {
startLoc = dmp.MatchMain(text, text1, expectedLoc)
}
if startLoc == -1 {
// No match found. :(
results[x] = false
// Subtract the delta for this failed patch from subsequent patches.
delta -= aPatch.Length2 - aPatch.Length1
} else {
// Found a match. :)
results[x] = true
delta = startLoc - expectedLoc
var text2 string
if endLoc == -1 {
text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))]
} else {
text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))]
}
if text1 == text2 {
// Perfect match, just shove the Replacement text in.
text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):]
} else {
// Imperfect match. Run a diff to get a framework of equivalent indices.
diffs := dmp.DiffMain(text1, text2, false)
if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold {
// The end points match, but the content is unacceptably bad.
results[x] = false
} else {
diffs = dmp.DiffCleanupSemanticLossless(diffs)
index1 := 0
for _, aDiff := range aPatch.diffs {
if aDiff.Type != DiffEqual {
index2 := dmp.DiffXIndex(diffs, index1)
if aDiff.Type == DiffInsert {
// Insertion
text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:]
} else if aDiff.Type == DiffDelete {
// Deletion
startIndex := startLoc + index2
text = text[:startIndex] +
text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:]
}
}
if aDiff.Type != DiffDelete {
index1 += len(aDiff.Text)
}
}
}
}
}
x++
}
// Strip the padding off.
text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))]
return text, results
}
// PatchAddPadding adds some padding on text start and end so that edges can match something.
// Intended to be called only from within patchApply.
func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string {
paddingLength := dmp.PatchMargin
nullPadding := ""
for x := 1; x <= paddingLength; x++ {
nullPadding += string(x)
}
// Bump all the patches forward.
for i := range patches {
patches[i].Start1 += paddingLength
patches[i].Start2 += paddingLength
}
// Add some padding on start of first diff.
if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual {
// Add nullPadding equality.
patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...)
patches[0].Start1 -= paddingLength // Should be 0.
patches[0].Start2 -= paddingLength // Should be 0.
patches[0].Length1 += paddingLength
patches[0].Length2 += paddingLength
} else if paddingLength > len(patches[0].diffs[0].Text) {
// Grow first equality.
extraLength := paddingLength - len(patches[0].diffs[0].Text)
patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text
patches[0].Start1 -= extraLength
patches[0].Start2 -= extraLength
patches[0].Length1 += extraLength
patches[0].Length2 += extraLength
}
// Add some padding on end of last diff.
last := len(patches) - 1
if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual {
// Add nullPadding equality.
patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding})
patches[last].Length1 += paddingLength
patches[last].Length2 += paddingLength
} else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) {
// Grow last equality.
lastDiff := patches[last].diffs[len(patches[last].diffs)-1]
extraLength := paddingLength - len(lastDiff.Text)
patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength]
patches[last].Length1 += extraLength
patches[last].Length2 += extraLength
}
return nullPadding
}
// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm.
// Intended to be called only from within patchApply.
func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch {
patchSize := dmp.MatchMaxBits
for x := 0; x < len(patches); x++ {
if patches[x].Length1 <= patchSize {
continue
}
bigpatch := patches[x]
// Remove the big old patch.
patches = append(patches[:x], patches[x+1:]...)
x--
Start1 := bigpatch.Start1
Start2 := bigpatch.Start2
precontext := ""
for len(bigpatch.diffs) != 0 {
// Create one of several smaller patches.
patch := Patch{}
empty := true
patch.Start1 = Start1 - len(precontext)
patch.Start2 = Start2 - len(precontext)
if len(precontext) != 0 {
patch.Length1 = len(precontext)
patch.Length2 = len(precontext)
patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext})
}
for len(bigpatch.diffs) != 0 && patch.Length1 < patchSize-dmp.PatchMargin {
diffType := bigpatch.diffs[0].Type
diffText := bigpatch.diffs[0].Text
if diffType == DiffInsert {
// Insertions are harmless.
patch.Length2 += len(diffText)
Start2 += len(diffText)
patch.diffs = append(patch.diffs, bigpatch.diffs[0])
bigpatch.diffs = bigpatch.diffs[1:]
empty = false
} else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize {
// This is a large deletion. Let it pass in one chunk.
patch.Length1 += len(diffText)
Start1 += len(diffText)
empty = false
patch.diffs = append(patch.diffs, Diff{diffType, diffText})
bigpatch.diffs = bigpatch.diffs[1:]
} else {
// Deletion or equality. Only take as much as we can stomach.
diffText = diffText[:min(len(diffText), patchSize-patch.Length1-dmp.PatchMargin)]
patch.Length1 += len(diffText)
Start1 += len(diffText)
if diffType == DiffEqual {
patch.Length2 += len(diffText)
Start2 += len(diffText)
} else {
empty = false
}
patch.diffs = append(patch.diffs, Diff{diffType, diffText})
if diffText == bigpatch.diffs[0].Text {
bigpatch.diffs = bigpatch.diffs[1:]
} else {
bigpatch.diffs[0].Text =
bigpatch.diffs[0].Text[len(diffText):]
}
}
}
// Compute the head context for the next patch.
precontext = dmp.DiffText2(patch.diffs)
precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):]
postcontext := ""
// Append the end context for this patch.
if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin {
postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin]
} else {
postcontext = dmp.DiffText1(bigpatch.diffs)
}
if len(postcontext) != 0 {
patch.Length1 += len(postcontext)
patch.Length2 += len(postcontext)
if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual {
patch.diffs[len(patch.diffs)-1].Text += postcontext
} else {
patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext})
}
}
if !empty {
x++
patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...)
}
}
}
return patches
}
// PatchToText takes a list of patches and returns a textual representation.
func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string {
var text bytes.Buffer
for _, aPatch := range patches {
_, _ = text.WriteString(aPatch.String())
}
return text.String()
}
// PatchFromText parses a textual representation of patches and returns a List of Patch objects.
func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) {
patches := []Patch{}
if len(textline) == 0 {
return patches, nil
}
text := strings.Split(textline, "\n")
textPointer := 0
patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$")
var patch Patch
var sign uint8
var line string
for textPointer < len(text) {
if !patchHeader.MatchString(text[textPointer]) {
return patches, errors.New("Invalid patch string: " + text[textPointer])
}
patch = Patch{}
m := patchHeader.FindStringSubmatch(text[textPointer])
patch.Start1, _ = strconv.Atoi(m[1])
if len(m[2]) == 0 {
patch.Start1--
patch.Length1 = 1
} else if m[2] == "0" {
patch.Length1 = 0
} else {
patch.Start1--
patch.Length1, _ = strconv.Atoi(m[2])
}
patch.Start2, _ = strconv.Atoi(m[3])
if len(m[4]) == 0 {
patch.Start2--
patch.Length2 = 1
} else if m[4] == "0" {
patch.Length2 = 0
} else {
patch.Start2--
patch.Length2, _ = strconv.Atoi(m[4])
}
textPointer++
for textPointer < len(text) {
if len(text[textPointer]) > 0 {
sign = text[textPointer][0]
} else {
textPointer++
continue
}
line = text[textPointer][1:]
line = strings.Replace(line, "+", "%2b", -1)
line, _ = url.QueryUnescape(line)
if sign == '-' {
// Deletion.
patch.diffs = append(patch.diffs, Diff{DiffDelete, line})
} else if sign == '+' {
// Insertion.
patch.diffs = append(patch.diffs, Diff{DiffInsert, line})
} else if sign == ' ' {
// Minor equality.
patch.diffs = append(patch.diffs, Diff{DiffEqual, line})
} else if sign == '@' {
// Start of next patch.
break
} else {
// WTF?
return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line))
}
textPointer++
}
patches = append(patches, patch)
}
return patches, nil
}

88
vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,88 @@
// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
// https://github.com/sergi/go-diff
// See the included LICENSE file for license details.
//
// go-diff is a Go implementation of Google's Diff, Match, and Patch library
// Original library is Copyright (c) 2006 Google Inc.
// http://code.google.com/p/google-diff-match-patch/
package diffmatchpatch
import (
"strings"
"unicode/utf8"
)
// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI.
// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc.
var unescaper = strings.NewReplacer(
"%21", "!", "%7E", "~", "%27", "'",
"%28", "(", "%29", ")", "%3B", ";",
"%2F", "/", "%3F", "?", "%3A", ":",
"%40", "@", "%26", "&", "%3D", "=",
"%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*")
// indexOf returns the first index of pattern in str, starting at str[i].
func indexOf(str string, pattern string, i int) int {
if i > len(str)-1 {
return -1
}
if i <= 0 {
return strings.Index(str, pattern)
}
ind := strings.Index(str[i:], pattern)
if ind == -1 {
return -1
}
return ind + i
}
// lastIndexOf returns the last index of pattern in str, starting at str[i].
func lastIndexOf(str string, pattern string, i int) int {
if i < 0 {
return -1
}
if i >= len(str) {
return strings.LastIndex(str, pattern)
}
_, size := utf8.DecodeRuneInString(str[i:])
return strings.LastIndex(str[:i+size], pattern)
}
// runesIndexOf returns the index of pattern in target, starting at target[i].
func runesIndexOf(target, pattern []rune, i int) int {
if i > len(target)-1 {
return -1
}
if i <= 0 {
return runesIndex(target, pattern)
}
ind := runesIndex(target[i:], pattern)
if ind == -1 {
return -1
}
return ind + i
}
func runesEqual(r1, r2 []rune) bool {
if len(r1) != len(r2) {
return false
}
for i, c := range r1 {
if c != r2[i] {
return false
}
}
return true
}
// runesIndex is the equivalent of strings.Index for rune slices.
func runesIndex(r1, r2 []rune) int {
last := len(r1) - len(r2)
for i := 0; i <= last; i++ {
if runesEqual(r1[i:i+len(r2)], r2) {
return i
}
}
return -1
}

28
vendor/github.com/src-d/gcfg/LICENSE сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,28 @@
Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go
Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

4
vendor/github.com/src-d/gcfg/README сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
Gcfg reads INI-style configuration files into Go structs;
supports user-defined types and subsections.
Package docs: https://godoc.org/gopkg.in/gcfg.v1

145
vendor/github.com/src-d/gcfg/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,145 @@
// Package gcfg reads "INI-style" text-based configuration files with
// "name=value" pairs grouped into sections (gcfg files).
//
// This package is still a work in progress; see the sections below for planned
// changes.
//
// Syntax
//
// The syntax is based on that used by git config:
// http://git-scm.com/docs/git-config#_syntax .
// There are some (planned) differences compared to the git config format:
// - improve data portability:
// - must be encoded in UTF-8 (for now) and must not contain the 0 byte
// - include and "path" type is not supported
// (path type may be implementable as a user-defined type)
// - internationalization
// - section and variable names can contain unicode letters, unicode digits
// (as defined in http://golang.org/ref/spec#Characters ) and hyphens
// (U+002D), starting with a unicode letter
// - disallow potentially ambiguous or misleading definitions:
// - `[sec.sub]` format is not allowed (deprecated in gitconfig)
// - `[sec ""]` is not allowed
// - use `[sec]` for section name "sec" and empty subsection name
// - (planned) within a single file, definitions must be contiguous for each:
// - section: '[secA]' -> '[secB]' -> '[secA]' is an error
// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error
// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error
//
// Data structure
//
// The functions in this package read values into a user-defined struct.
// Each section corresponds to a struct field in the config struct, and each
// variable in a section corresponds to a data field in the section struct.
// The mapping of each section or variable name to fields is done either based
// on the "gcfg" struct tag or by matching the name of the section or variable,
// ignoring case. In the latter case, hyphens '-' in section and variable names
// correspond to underscores '_' in field names.
// Fields must be exported; to use a section or variable name starting with a
// letter that is neither upper- or lower-case, prefix the field name with 'X'.
// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .)
//
// For sections with subsections, the corresponding field in config must be a
// map, rather than a struct, with string keys and pointer-to-struct values.
// Values for subsection variables are stored in the map with the subsection
// name used as the map key.
// (Note that unlike section and variable names, subsection names are case
// sensitive.)
// When using a map, and there is a section with the same section name but
// without a subsection name, its values are stored with the empty string used
// as the key.
// It is possible to provide default values for subsections in the section
// "default-<sectionname>" (or by setting values in the corresponding struct
// field "Default_<sectionname>").
//
// The functions in this package panic if config is not a pointer to a struct,
// or when a field is not of a suitable type (either a struct or a map with
// string keys and pointer-to-struct values).
//
// Parsing of values
//
// The section structs in the config struct may contain single-valued or
// multi-valued variables. Variables of unnamed slice type (that is, a type
// starting with `[]`) are treated as multi-value; all others (including named
// slice types) are treated as single-valued variables.
//
// Single-valued variables are handled based on the type as follows.
// Unnamed pointer types (that is, types starting with `*`) are dereferenced,
// and if necessary, a new instance is allocated.
//
// For types implementing the encoding.TextUnmarshaler interface, the
// UnmarshalText method is used to set the value. Implementing this method is
// the recommended way for parsing user-defined types.
//
// For fields of string kind, the value string is assigned to the field, after
// unquoting and unescaping as needed.
// For fields of bool kind, the field is set to true if the value is "true",
// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or
// "0", ignoring case. In addition, single-valued bool fields can be specified
// with a "blank" value (variable name without equals sign and value); in such
// case the value is set to true.
//
// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as
// decimal or hexadecimal (if having '0x' prefix). (This is to prevent
// unintuitively handling zero-padded numbers as octal.) Other types having
// [u]int* as the underlying type, such as os.FileMode and uintptr allow
// decimal, hexadecimal, or octal values.
// Parsing mode for integer types can be overridden using the struct tag option
// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters
// (each standing for decimal, hexadecimal, and octal, respectively.)
//
// All other types are parsed using fmt.Sscanf with the "%v" verb.
//
// For multi-valued variables, each individual value is parsed as above and
// appended to the slice. If the first value is specified as a "blank" value
// (variable name without equals sign and value), a new slice is allocated;
// that is any values previously set in the slice will be ignored.
//
// The types subpackage for provides helpers for parsing "enum-like" and integer
// types.
//
// Error handling
//
// There are 3 types of errors:
//
// - programmer errors / panics:
// - invalid configuration structure
// - data errors:
// - fatal errors:
// - invalid configuration syntax
// - warnings:
// - data that doesn't belong to any part of the config structure
//
// Programmer errors trigger panics. These are should be fixed by the programmer
// before releasing code that uses gcfg.
//
// Data errors cause gcfg to return a non-nil error value. This includes the
// case when there are extra unknown key-value definitions in the configuration
// data (extra data).
// However, in some occasions it is desirable to be able to proceed in
// situations when the only data error is that of extra data.
// These errors are handled at a different (warning) priority and can be
// filtered out programmatically. To ignore extra data warnings, wrap the
// gcfg.Read*Into invocation into a call to gcfg.FatalOnly.
//
// TODO
//
// The following is a list of changes under consideration:
// - documentation
// - self-contained syntax documentation
// - more practical examples
// - move TODOs to issue tracker (eventually)
// - syntax
// - reconsider valid escape sequences
// (gitconfig doesn't support \r in value, \t in subsection name, etc.)
// - reading / parsing gcfg files
// - define internal representation structure
// - support multiple inputs (readers, strings, files)
// - support declaring encoding (?)
// - support varying fields sets for subsections (?)
// - writing gcfg files
// - error handling
// - make error context accessible programmatically?
// - limit input size?
//
package gcfg // import "github.com/src-d/gcfg"

41
vendor/github.com/src-d/gcfg/errors.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,41 @@
package gcfg
import (
"gopkg.in/warnings.v0"
)
// FatalOnly filters the results of a Read*Into invocation and returns only
// fatal errors. That is, errors (warnings) indicating data for unknown
// sections / variables is ignored. Example invocation:
//
// err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile))
// if err != nil {
// ...
//
func FatalOnly(err error) error {
return warnings.FatalOnly(err)
}
func isFatal(err error) bool {
_, ok := err.(extraData)
return !ok
}
type extraData struct {
section string
subsection *string
variable *string
}
func (e extraData) Error() string {
s := "can't store data at section \"" + e.section + "\""
if e.subsection != nil {
s += ", subsection \"" + *e.subsection + "\""
}
if e.variable != nil {
s += ", variable \"" + *e.variable + "\""
}
return s
}
var _ error = extraData{}

7
vendor/github.com/src-d/gcfg/go1_0.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,7 @@
// +build !go1.2
package gcfg
type textUnmarshaler interface {
UnmarshalText(text []byte) error
}

9
vendor/github.com/src-d/gcfg/go1_2.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,9 @@
// +build go1.2
package gcfg
import (
"encoding"
)
type textUnmarshaler encoding.TextUnmarshaler

273
vendor/github.com/src-d/gcfg/read.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,273 @@
package gcfg
import (
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"github.com/src-d/gcfg/scanner"
"github.com/src-d/gcfg/token"
"gopkg.in/warnings.v0"
)
var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b'}
// no error: invalid literals should be caught by scanner
func unquote(s string) string {
u, q, esc := make([]rune, 0, len(s)), false, false
for _, c := range s {
if esc {
uc, ok := unescape[c]
switch {
case ok:
u = append(u, uc)
fallthrough
case !q && c == '\n':
esc = false
continue
}
panic("invalid escape sequence")
}
switch c {
case '"':
q = !q
case '\\':
esc = true
default:
u = append(u, c)
}
}
if q {
panic("missing end quote")
}
if esc {
panic("invalid escape sequence")
}
return string(u)
}
func read(c *warnings.Collector, callback func(string, string, string, string, bool) error,
fset *token.FileSet, file *token.File, src []byte) error {
//
var s scanner.Scanner
var errs scanner.ErrorList
s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0)
sect, sectsub := "", ""
pos, tok, lit := s.Scan()
errfn := func(msg string) error {
return fmt.Errorf("%s: %s", fset.Position(pos), msg)
}
for {
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
switch tok {
case token.EOF:
return nil
case token.EOL, token.COMMENT:
pos, tok, lit = s.Scan()
case token.LBRACK:
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
if tok != token.IDENT {
if err := c.Collect(errfn("expected section name")); err != nil {
return err
}
}
sect, sectsub = lit, ""
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
if tok == token.STRING {
sectsub = unquote(lit)
if sectsub == "" {
if err := c.Collect(errfn("empty subsection name")); err != nil {
return err
}
}
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
}
if tok != token.RBRACK {
if sectsub == "" {
if err := c.Collect(errfn("expected subsection name or right bracket")); err != nil {
return err
}
}
if err := c.Collect(errfn("expected right bracket")); err != nil {
return err
}
}
pos, tok, lit = s.Scan()
if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil {
return err
}
}
// If a section/subsection header was found, ensure a
// container object is created, even if there are no
// variables further down.
err := c.Collect(callback(sect, sectsub, "", "", true))
if err != nil {
return err
}
case token.IDENT:
if sect == "" {
if err := c.Collect(errfn("expected section header")); err != nil {
return err
}
}
n := lit
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
return errs.Err()
}
blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, ""
if !blank {
if tok != token.ASSIGN {
if err := c.Collect(errfn("expected '='")); err != nil {
return err
}
}
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
if tok != token.STRING {
if err := c.Collect(errfn("expected value")); err != nil {
return err
}
}
v = unquote(lit)
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil {
return err
}
}
}
err := c.Collect(callback(sect, sectsub, n, v, blank))
if err != nil {
return err
}
default:
if sect == "" {
if err := c.Collect(errfn("expected section header")); err != nil {
return err
}
}
if err := c.Collect(errfn("expected section header or variable declaration")); err != nil {
return err
}
}
}
panic("never reached")
}
func readInto(config interface{}, fset *token.FileSet, file *token.File,
src []byte) error {
//
c := warnings.NewCollector(isFatal)
firstPassCallback := func(s string, ss string, k string, v string, bv bool) error {
return set(c, config, s, ss, k, v, bv, false)
}
err := read(c, firstPassCallback, fset, file, src)
if err != nil {
return err
}
secondPassCallback := func(s string, ss string, k string, v string, bv bool) error {
return set(c, config, s, ss, k, v, bv, true)
}
err = read(c, secondPassCallback, fset, file, src)
if err != nil {
return err
}
return c.Done()
}
// ReadWithCallback reads gcfg formatted data from reader and calls
// callback with each section and option found.
//
// Callback is called with section, subsection, option key, option value
// and blank value flag as arguments.
//
// When a section is found, callback is called with nil subsection, option key
// and option value.
//
// When a subsection is found, callback is called with nil option key and
// option value.
//
// If blank value flag is true, it means that the value was not set for an option
// (as opposed to set to empty string).
//
// If callback returns an error, ReadWithCallback terminates with an error too.
func ReadWithCallback(reader io.Reader, callback func(string, string, string, string, bool) error) error {
src, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(src))
c := warnings.NewCollector(isFatal)
return read(c, callback, fset, file, src)
}
// ReadInto reads gcfg formatted data from reader and sets the values into the
// corresponding fields in config.
func ReadInto(config interface{}, reader io.Reader) error {
src, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(src))
return readInto(config, fset, file, src)
}
// ReadStringInto reads gcfg formatted data from str and sets the values into
// the corresponding fields in config.
func ReadStringInto(config interface{}, str string) error {
r := strings.NewReader(str)
return ReadInto(config, r)
}
// ReadFileInto reads gcfg formatted data from the file filename and sets the
// values into the corresponding fields in config.
func ReadFileInto(config interface{}, filename string) error {
f, err := os.Open(filename)
if err != nil {
return err
}
defer f.Close()
src, err := ioutil.ReadAll(f)
if err != nil {
return err
}
fset := token.NewFileSet()
file := fset.AddFile(filename, fset.Base(), len(src))
return readInto(config, fset, file, src)
}

121
vendor/github.com/src-d/gcfg/scanner/errors.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,121 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package scanner
import (
"fmt"
"io"
"sort"
)
import (
"github.com/src-d/gcfg/token"
)
// In an ErrorList, an error is represented by an *Error.
// The position Pos, if valid, points to the beginning of
// the offending token, and the error condition is described
// by Msg.
//
type Error struct {
Pos token.Position
Msg string
}
// Error implements the error interface.
func (e Error) Error() string {
if e.Pos.Filename != "" || e.Pos.IsValid() {
// don't print "<unknown position>"
// TODO(gri) reconsider the semantics of Position.IsValid
return e.Pos.String() + ": " + e.Msg
}
return e.Msg
}
// ErrorList is a list of *Errors.
// The zero value for an ErrorList is an empty ErrorList ready to use.
//
type ErrorList []*Error
// Add adds an Error with given position and error message to an ErrorList.
func (p *ErrorList) Add(pos token.Position, msg string) {
*p = append(*p, &Error{pos, msg})
}
// Reset resets an ErrorList to no errors.
func (p *ErrorList) Reset() { *p = (*p)[0:0] }
// ErrorList implements the sort Interface.
func (p ErrorList) Len() int { return len(p) }
func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p ErrorList) Less(i, j int) bool {
e := &p[i].Pos
f := &p[j].Pos
if e.Filename < f.Filename {
return true
}
if e.Filename == f.Filename {
return e.Offset < f.Offset
}
return false
}
// Sort sorts an ErrorList. *Error entries are sorted by position,
// other errors are sorted by error message, and before any *Error
// entry.
//
func (p ErrorList) Sort() {
sort.Sort(p)
}
// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
func (p *ErrorList) RemoveMultiples() {
sort.Sort(p)
var last token.Position // initial last.Line is != any legal error line
i := 0
for _, e := range *p {
if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
last = e.Pos
(*p)[i] = e
i++
}
}
(*p) = (*p)[0:i]
}
// An ErrorList implements the error interface.
func (p ErrorList) Error() string {
switch len(p) {
case 0:
return "no errors"
case 1:
return p[0].Error()
}
return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
}
// Err returns an error equivalent to this error list.
// If the list is empty, Err returns nil.
func (p ErrorList) Err() error {
if len(p) == 0 {
return nil
}
return p
}
// PrintError is a utility function that prints a list of errors to w,
// one error per line, if the err parameter is an ErrorList. Otherwise
// it prints the err string.
//
func PrintError(w io.Writer, err error) {
if list, ok := err.(ErrorList); ok {
for _, e := range list {
fmt.Fprintf(w, "%s\n", e)
}
} else if err != nil {
fmt.Fprintf(w, "%s\n", err)
}
}

342
vendor/github.com/src-d/gcfg/scanner/scanner.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,342 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package scanner implements a scanner for gcfg configuration text.
// It takes a []byte as source which can then be tokenized
// through repeated calls to the Scan method.
//
// Note that the API for the scanner package may change to accommodate new
// features or implementation changes in gcfg.
//
package scanner
import (
"fmt"
"path/filepath"
"unicode"
"unicode/utf8"
)
import (
"github.com/src-d/gcfg/token"
)
// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
// encountered and a handler was installed, the handler is called with a
// position and an error message. The position points to the beginning of
// the offending token.
//
type ErrorHandler func(pos token.Position, msg string)
// A Scanner holds the scanner's internal state while processing
// a given text. It can be allocated as part of another data
// structure but must be initialized via Init before use.
//
type Scanner struct {
// immutable state
file *token.File // source file handle
dir string // directory portion of file.Name()
src []byte // source
err ErrorHandler // error reporting; or nil
mode Mode // scanning mode
// scanning state
ch rune // current character
offset int // character offset
rdOffset int // reading offset (position after current character)
lineOffset int // current line offset
nextVal bool // next token is expected to be a value
// public state - ok to modify
ErrorCount int // number of errors encountered
}
// Read the next Unicode char into s.ch.
// s.ch < 0 means end-of-file.
//
func (s *Scanner) next() {
if s.rdOffset < len(s.src) {
s.offset = s.rdOffset
if s.ch == '\n' {
s.lineOffset = s.offset
s.file.AddLine(s.offset)
}
r, w := rune(s.src[s.rdOffset]), 1
switch {
case r == 0:
s.error(s.offset, "illegal character NUL")
case r >= 0x80:
// not ASCII
r, w = utf8.DecodeRune(s.src[s.rdOffset:])
if r == utf8.RuneError && w == 1 {
s.error(s.offset, "illegal UTF-8 encoding")
}
}
s.rdOffset += w
s.ch = r
} else {
s.offset = len(s.src)
if s.ch == '\n' {
s.lineOffset = s.offset
s.file.AddLine(s.offset)
}
s.ch = -1 // eof
}
}
// A mode value is a set of flags (or 0).
// They control scanner behavior.
//
type Mode uint
const (
ScanComments Mode = 1 << iota // return comments as COMMENT tokens
)
// Init prepares the scanner s to tokenize the text src by setting the
// scanner at the beginning of src. The scanner uses the file set file
// for position information and it adds line information for each line.
// It is ok to re-use the same file when re-scanning the same file as
// line information which is already present is ignored. Init causes a
// panic if the file size does not match the src size.
//
// Calls to Scan will invoke the error handler err if they encounter a
// syntax error and err is not nil. Also, for each error encountered,
// the Scanner field ErrorCount is incremented by one. The mode parameter
// determines how comments are handled.
//
// Note that Init may call err if there is an error in the first character
// of the file.
//
func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
// Explicitly initialize all fields since a scanner may be reused.
if file.Size() != len(src) {
panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
}
s.file = file
s.dir, _ = filepath.Split(file.Name())
s.src = src
s.err = err
s.mode = mode
s.ch = ' '
s.offset = 0
s.rdOffset = 0
s.lineOffset = 0
s.ErrorCount = 0
s.nextVal = false
s.next()
}
func (s *Scanner) error(offs int, msg string) {
if s.err != nil {
s.err(s.file.Position(s.file.Pos(offs)), msg)
}
s.ErrorCount++
}
func (s *Scanner) scanComment() string {
// initial [;#] already consumed
offs := s.offset - 1 // position of initial [;#]
for s.ch != '\n' && s.ch >= 0 {
s.next()
}
return string(s.src[offs:s.offset])
}
func isLetter(ch rune) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch)
}
func isDigit(ch rune) bool {
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
}
func (s *Scanner) scanIdentifier() string {
offs := s.offset
for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' {
s.next()
}
return string(s.src[offs:s.offset])
}
func (s *Scanner) scanEscape(val bool) {
offs := s.offset
ch := s.ch
s.next() // always make progress
switch ch {
case '\\', '"':
// ok
case 'n', 't', 'b':
if val {
break // ok
}
fallthrough
default:
s.error(offs, "unknown escape sequence")
}
}
func (s *Scanner) scanString() string {
// '"' opening already consumed
offs := s.offset - 1
for s.ch != '"' {
ch := s.ch
s.next()
if ch == '\n' || ch < 0 {
s.error(offs, "string not terminated")
break
}
if ch == '\\' {
s.scanEscape(false)
}
}
s.next()
return string(s.src[offs:s.offset])
}
func stripCR(b []byte) []byte {
c := make([]byte, len(b))
i := 0
for _, ch := range b {
if ch != '\r' {
c[i] = ch
i++
}
}
return c[:i]
}
func (s *Scanner) scanValString() string {
offs := s.offset
hasCR := false
end := offs
inQuote := false
loop:
for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' {
ch := s.ch
s.next()
switch {
case inQuote && ch == '\\':
s.scanEscape(true)
case !inQuote && ch == '\\':
if s.ch == '\r' {
hasCR = true
s.next()
}
if s.ch != '\n' {
s.scanEscape(true)
} else {
s.next()
}
case ch == '"':
inQuote = !inQuote
case ch == '\r':
hasCR = true
case ch < 0 || inQuote && ch == '\n':
s.error(offs, "string not terminated")
break loop
}
if inQuote || !isWhiteSpace(ch) {
end = s.offset
}
}
lit := s.src[offs:end]
if hasCR {
lit = stripCR(lit)
}
return string(lit)
}
func isWhiteSpace(ch rune) bool {
return ch == ' ' || ch == '\t' || ch == '\r'
}
func (s *Scanner) skipWhitespace() {
for isWhiteSpace(s.ch) {
s.next()
}
}
// Scan scans the next token and returns the token position, the token,
// and its literal string if applicable. The source end is indicated by
// token.EOF.
//
// If the returned token is a literal (token.IDENT, token.STRING) or
// token.COMMENT, the literal string has the corresponding value.
//
// If the returned token is token.ILLEGAL, the literal string is the
// offending character.
//
// In all other cases, Scan returns an empty literal string.
//
// For more tolerant parsing, Scan will return a valid token if
// possible even if a syntax error was encountered. Thus, even
// if the resulting token sequence contains no illegal tokens,
// a client may not assume that no error occurred. Instead it
// must check the scanner's ErrorCount or the number of calls
// of the error handler, if there was one installed.
//
// Scan adds line information to the file added to the file
// set with Init. Token positions are relative to that file
// and thus relative to the file set.
//
func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
scanAgain:
s.skipWhitespace()
// current token start
pos = s.file.Pos(s.offset)
// determine token value
switch ch := s.ch; {
case s.nextVal:
lit = s.scanValString()
tok = token.STRING
s.nextVal = false
case isLetter(ch):
lit = s.scanIdentifier()
tok = token.IDENT
default:
s.next() // always make progress
switch ch {
case -1:
tok = token.EOF
case '\n':
tok = token.EOL
case '"':
tok = token.STRING
lit = s.scanString()
case '[':
tok = token.LBRACK
case ']':
tok = token.RBRACK
case ';', '#':
// comment
lit = s.scanComment()
if s.mode&ScanComments == 0 {
// skip comment
goto scanAgain
}
tok = token.COMMENT
case '=':
tok = token.ASSIGN
s.nextVal = true
default:
s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch))
tok = token.ILLEGAL
lit = string(ch)
}
}
return
}

332
vendor/github.com/src-d/gcfg/set.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,332 @@
package gcfg
import (
"bytes"
"encoding/gob"
"fmt"
"math/big"
"reflect"
"strings"
"unicode"
"unicode/utf8"
"github.com/src-d/gcfg/types"
"gopkg.in/warnings.v0"
)
type tag struct {
ident string
intMode string
}
func newTag(ts string) tag {
t := tag{}
s := strings.Split(ts, ",")
t.ident = s[0]
for _, tse := range s[1:] {
if strings.HasPrefix(tse, "int=") {
t.intMode = tse[len("int="):]
}
}
return t
}
func fieldFold(v reflect.Value, name string) (reflect.Value, tag) {
var n string
r0, _ := utf8.DecodeRuneInString(name)
if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) {
n = "X"
}
n += strings.Replace(name, "-", "_", -1)
f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool {
if !v.FieldByName(fieldName).CanSet() {
return false
}
f, _ := v.Type().FieldByName(fieldName)
t := newTag(f.Tag.Get("gcfg"))
if t.ident != "" {
return strings.EqualFold(t.ident, name)
}
return strings.EqualFold(n, fieldName)
})
if !ok {
return reflect.Value{}, tag{}
}
return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg"))
}
type setter func(destp interface{}, blank bool, val string, t tag) error
var errUnsupportedType = fmt.Errorf("unsupported type")
var errBlankUnsupported = fmt.Errorf("blank value not supported for type")
var setters = []setter{
typeSetter, textUnmarshalerSetter, kindSetter, scanSetter,
}
func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error {
dtu, ok := d.(textUnmarshaler)
if !ok {
return errUnsupportedType
}
if blank {
return errBlankUnsupported
}
return dtu.UnmarshalText([]byte(val))
}
func boolSetter(d interface{}, blank bool, val string, t tag) error {
if blank {
reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true))
return nil
}
b, err := types.ParseBool(val)
if err == nil {
reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b))
}
return err
}
func intMode(mode string) types.IntMode {
var m types.IntMode
if strings.ContainsAny(mode, "dD") {
m |= types.Dec
}
if strings.ContainsAny(mode, "hH") {
m |= types.Hex
}
if strings.ContainsAny(mode, "oO") {
m |= types.Oct
}
return m
}
var typeModes = map[reflect.Type]types.IntMode{
reflect.TypeOf(int(0)): types.Dec | types.Hex,
reflect.TypeOf(int8(0)): types.Dec | types.Hex,
reflect.TypeOf(int16(0)): types.Dec | types.Hex,
reflect.TypeOf(int32(0)): types.Dec | types.Hex,
reflect.TypeOf(int64(0)): types.Dec | types.Hex,
reflect.TypeOf(uint(0)): types.Dec | types.Hex,
reflect.TypeOf(uint8(0)): types.Dec | types.Hex,
reflect.TypeOf(uint16(0)): types.Dec | types.Hex,
reflect.TypeOf(uint32(0)): types.Dec | types.Hex,
reflect.TypeOf(uint64(0)): types.Dec | types.Hex,
// use default mode (allow dec/hex/oct) for uintptr type
reflect.TypeOf(big.Int{}): types.Dec | types.Hex,
}
func intModeDefault(t reflect.Type) types.IntMode {
m, ok := typeModes[t]
if !ok {
m = types.Dec | types.Hex | types.Oct
}
return m
}
func intSetter(d interface{}, blank bool, val string, t tag) error {
if blank {
return errBlankUnsupported
}
mode := intMode(t.intMode)
if mode == 0 {
mode = intModeDefault(reflect.TypeOf(d).Elem())
}
return types.ParseInt(d, val, mode)
}
func stringSetter(d interface{}, blank bool, val string, t tag) error {
if blank {
return errBlankUnsupported
}
dsp, ok := d.(*string)
if !ok {
return errUnsupportedType
}
*dsp = val
return nil
}
var kindSetters = map[reflect.Kind]setter{
reflect.String: stringSetter,
reflect.Bool: boolSetter,
reflect.Int: intSetter,
reflect.Int8: intSetter,
reflect.Int16: intSetter,
reflect.Int32: intSetter,
reflect.Int64: intSetter,
reflect.Uint: intSetter,
reflect.Uint8: intSetter,
reflect.Uint16: intSetter,
reflect.Uint32: intSetter,
reflect.Uint64: intSetter,
reflect.Uintptr: intSetter,
}
var typeSetters = map[reflect.Type]setter{
reflect.TypeOf(big.Int{}): intSetter,
}
func typeSetter(d interface{}, blank bool, val string, tt tag) error {
t := reflect.ValueOf(d).Type().Elem()
setter, ok := typeSetters[t]
if !ok {
return errUnsupportedType
}
return setter(d, blank, val, tt)
}
func kindSetter(d interface{}, blank bool, val string, tt tag) error {
k := reflect.ValueOf(d).Type().Elem().Kind()
setter, ok := kindSetters[k]
if !ok {
return errUnsupportedType
}
return setter(d, blank, val, tt)
}
func scanSetter(d interface{}, blank bool, val string, tt tag) error {
if blank {
return errBlankUnsupported
}
return types.ScanFully(d, val, 'v')
}
func newValue(c *warnings.Collector, sect string, vCfg reflect.Value,
vType reflect.Type) (reflect.Value, error) {
//
pv := reflect.New(vType)
dfltName := "default-" + sect
dfltField, _ := fieldFold(vCfg, dfltName)
var err error
if dfltField.IsValid() {
b := bytes.NewBuffer(nil)
ge := gob.NewEncoder(b)
if err = c.Collect(ge.EncodeValue(dfltField)); err != nil {
return pv, err
}
gd := gob.NewDecoder(bytes.NewReader(b.Bytes()))
if err = c.Collect(gd.DecodeValue(pv.Elem())); err != nil {
return pv, err
}
}
return pv, nil
}
func set(c *warnings.Collector, cfg interface{}, sect, sub, name string,
value string, blankValue bool, subsectPass bool) error {
//
vPCfg := reflect.ValueOf(cfg)
if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct {
panic(fmt.Errorf("config must be a pointer to a struct"))
}
vCfg := vPCfg.Elem()
vSect, _ := fieldFold(vCfg, sect)
if !vSect.IsValid() {
err := extraData{section: sect}
return c.Collect(err)
}
isSubsect := vSect.Kind() == reflect.Map
if subsectPass != isSubsect {
return nil
}
if isSubsect {
vst := vSect.Type()
if vst.Key().Kind() != reflect.String ||
vst.Elem().Kind() != reflect.Ptr ||
vst.Elem().Elem().Kind() != reflect.Struct {
panic(fmt.Errorf("map field for section must have string keys and "+
" pointer-to-struct values: section %q", sect))
}
if vSect.IsNil() {
vSect.Set(reflect.MakeMap(vst))
}
k := reflect.ValueOf(sub)
pv := vSect.MapIndex(k)
if !pv.IsValid() {
vType := vSect.Type().Elem().Elem()
var err error
if pv, err = newValue(c, sect, vCfg, vType); err != nil {
return err
}
vSect.SetMapIndex(k, pv)
}
vSect = pv.Elem()
} else if vSect.Kind() != reflect.Struct {
panic(fmt.Errorf("field for section must be a map or a struct: "+
"section %q", sect))
} else if sub != "" {
err := extraData{section: sect, subsection: &sub}
return c.Collect(err)
}
// Empty name is a special value, meaning that only the
// section/subsection object is to be created, with no values set.
if name == "" {
return nil
}
vVar, t := fieldFold(vSect, name)
if !vVar.IsValid() {
var err error
if isSubsect {
err = extraData{section: sect, subsection: &sub, variable: &name}
} else {
err = extraData{section: sect, variable: &name}
}
return c.Collect(err)
}
// vVal is either single-valued var, or newly allocated value within multi-valued var
var vVal reflect.Value
// multi-value if unnamed slice type
isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice ||
vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice
if isMulti && vVar.Kind() == reflect.Ptr {
if vVar.IsNil() {
vVar.Set(reflect.New(vVar.Type().Elem()))
}
vVar = vVar.Elem()
}
if isMulti && blankValue {
vVar.Set(reflect.Zero(vVar.Type()))
return nil
}
if isMulti {
vVal = reflect.New(vVar.Type().Elem()).Elem()
} else {
vVal = vVar
}
isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr
isNew := isDeref && vVal.IsNil()
// vAddr is address of value to set (dereferenced & allocated as needed)
var vAddr reflect.Value
switch {
case isNew:
vAddr = reflect.New(vVal.Type().Elem())
case isDeref && !isNew:
vAddr = vVal
default:
vAddr = vVal.Addr()
}
vAddrI := vAddr.Interface()
err, ok := error(nil), false
for _, s := range setters {
err = s(vAddrI, blankValue, value, t)
if err == nil {
ok = true
break
}
if err != errUnsupportedType {
return err
}
}
if !ok {
// in case all setters returned errUnsupportedType
return err
}
if isNew { // set reference if it was dereferenced and newly allocated
vVal.Set(vAddr)
}
if isMulti { // append if multi-valued
vVar.Set(reflect.Append(vVar, vVal))
}
return nil
}

435
vendor/github.com/src-d/gcfg/token/position.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,435 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// TODO(gri) consider making this a separate package outside the go directory.
package token
import (
"fmt"
"sort"
"sync"
)
// -----------------------------------------------------------------------------
// Positions
// Position describes an arbitrary source position
// including the file, line, and column location.
// A Position is valid if the line number is > 0.
//
type Position struct {
Filename string // filename, if any
Offset int // offset, starting at 0
Line int // line number, starting at 1
Column int // column number, starting at 1 (character count)
}
// IsValid returns true if the position is valid.
func (pos *Position) IsValid() bool { return pos.Line > 0 }
// String returns a string in one of several forms:
//
// file:line:column valid position with file name
// line:column valid position without file name
// file invalid position with file name
// - invalid position without file name
//
func (pos Position) String() string {
s := pos.Filename
if pos.IsValid() {
if s != "" {
s += ":"
}
s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
}
if s == "" {
s = "-"
}
return s
}
// Pos is a compact encoding of a source position within a file set.
// It can be converted into a Position for a more convenient, but much
// larger, representation.
//
// The Pos value for a given file is a number in the range [base, base+size],
// where base and size are specified when adding the file to the file set via
// AddFile.
//
// To create the Pos value for a specific source offset, first add
// the respective file to the current file set (via FileSet.AddFile)
// and then call File.Pos(offset) for that file. Given a Pos value p
// for a specific file set fset, the corresponding Position value is
// obtained by calling fset.Position(p).
//
// Pos values can be compared directly with the usual comparison operators:
// If two Pos values p and q are in the same file, comparing p and q is
// equivalent to comparing the respective source file offsets. If p and q
// are in different files, p < q is true if the file implied by p was added
// to the respective file set before the file implied by q.
//
type Pos int
// The zero value for Pos is NoPos; there is no file and line information
// associated with it, and NoPos().IsValid() is false. NoPos is always
// smaller than any other Pos value. The corresponding Position value
// for NoPos is the zero value for Position.
//
const NoPos Pos = 0
// IsValid returns true if the position is valid.
func (p Pos) IsValid() bool {
return p != NoPos
}
// -----------------------------------------------------------------------------
// File
// A File is a handle for a file belonging to a FileSet.
// A File has a name, size, and line offset table.
//
type File struct {
set *FileSet
name string // file name as provided to AddFile
base int // Pos value range for this file is [base...base+size]
size int // file size as provided to AddFile
// lines and infos are protected by set.mutex
lines []int
infos []lineInfo
}
// Name returns the file name of file f as registered with AddFile.
func (f *File) Name() string {
return f.name
}
// Base returns the base offset of file f as registered with AddFile.
func (f *File) Base() int {
return f.base
}
// Size returns the size of file f as registered with AddFile.
func (f *File) Size() int {
return f.size
}
// LineCount returns the number of lines in file f.
func (f *File) LineCount() int {
f.set.mutex.RLock()
n := len(f.lines)
f.set.mutex.RUnlock()
return n
}
// AddLine adds the line offset for a new line.
// The line offset must be larger than the offset for the previous line
// and smaller than the file size; otherwise the line offset is ignored.
//
func (f *File) AddLine(offset int) {
f.set.mutex.Lock()
if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size {
f.lines = append(f.lines, offset)
}
f.set.mutex.Unlock()
}
// SetLines sets the line offsets for a file and returns true if successful.
// The line offsets are the offsets of the first character of each line;
// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
// An empty file has an empty line offset table.
// Each line offset must be larger than the offset for the previous line
// and smaller than the file size; otherwise SetLines fails and returns
// false.
//
func (f *File) SetLines(lines []int) bool {
// verify validity of lines table
size := f.size
for i, offset := range lines {
if i > 0 && offset <= lines[i-1] || size <= offset {
return false
}
}
// set lines table
f.set.mutex.Lock()
f.lines = lines
f.set.mutex.Unlock()
return true
}
// SetLinesForContent sets the line offsets for the given file content.
func (f *File) SetLinesForContent(content []byte) {
var lines []int
line := 0
for offset, b := range content {
if line >= 0 {
lines = append(lines, line)
}
line = -1
if b == '\n' {
line = offset + 1
}
}
// set lines table
f.set.mutex.Lock()
f.lines = lines
f.set.mutex.Unlock()
}
// A lineInfo object describes alternative file and line number
// information (such as provided via a //line comment in a .go
// file) for a given file offset.
type lineInfo struct {
// fields are exported to make them accessible to gob
Offset int
Filename string
Line int
}
// AddLineInfo adds alternative file and line number information for
// a given file offset. The offset must be larger than the offset for
// the previously added alternative line info and smaller than the
// file size; otherwise the information is ignored.
//
// AddLineInfo is typically used to register alternative position
// information for //line filename:line comments in source files.
//
func (f *File) AddLineInfo(offset int, filename string, line int) {
f.set.mutex.Lock()
if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
f.infos = append(f.infos, lineInfo{offset, filename, line})
}
f.set.mutex.Unlock()
}
// Pos returns the Pos value for the given file offset;
// the offset must be <= f.Size().
// f.Pos(f.Offset(p)) == p.
//
func (f *File) Pos(offset int) Pos {
if offset > f.size {
panic("illegal file offset")
}
return Pos(f.base + offset)
}
// Offset returns the offset for the given file position p;
// p must be a valid Pos value in that file.
// f.Offset(f.Pos(offset)) == offset.
//
func (f *File) Offset(p Pos) int {
if int(p) < f.base || int(p) > f.base+f.size {
panic("illegal Pos value")
}
return int(p) - f.base
}
// Line returns the line number for the given file position p;
// p must be a Pos value in that file or NoPos.
//
func (f *File) Line(p Pos) int {
// TODO(gri) this can be implemented much more efficiently
return f.Position(p).Line
}
func searchLineInfos(a []lineInfo, x int) int {
return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
}
// info returns the file name, line, and column number for a file offset.
func (f *File) info(offset int) (filename string, line, column int) {
filename = f.name
if i := searchInts(f.lines, offset); i >= 0 {
line, column = i+1, offset-f.lines[i]+1
}
if len(f.infos) > 0 {
// almost no files have extra line infos
if i := searchLineInfos(f.infos, offset); i >= 0 {
alt := &f.infos[i]
filename = alt.Filename
if i := searchInts(f.lines, alt.Offset); i >= 0 {
line += alt.Line - i - 1
}
}
}
return
}
func (f *File) position(p Pos) (pos Position) {
offset := int(p) - f.base
pos.Offset = offset
pos.Filename, pos.Line, pos.Column = f.info(offset)
return
}
// Position returns the Position value for the given file position p;
// p must be a Pos value in that file or NoPos.
//
func (f *File) Position(p Pos) (pos Position) {
if p != NoPos {
if int(p) < f.base || int(p) > f.base+f.size {
panic("illegal Pos value")
}
pos = f.position(p)
}
return
}
// -----------------------------------------------------------------------------
// FileSet
// A FileSet represents a set of source files.
// Methods of file sets are synchronized; multiple goroutines
// may invoke them concurrently.
//
type FileSet struct {
mutex sync.RWMutex // protects the file set
base int // base offset for the next file
files []*File // list of files in the order added to the set
last *File // cache of last file looked up
}
// NewFileSet creates a new file set.
func NewFileSet() *FileSet {
s := new(FileSet)
s.base = 1 // 0 == NoPos
return s
}
// Base returns the minimum base offset that must be provided to
// AddFile when adding the next file.
//
func (s *FileSet) Base() int {
s.mutex.RLock()
b := s.base
s.mutex.RUnlock()
return b
}
// AddFile adds a new file with a given filename, base offset, and file size
// to the file set s and returns the file. Multiple files may have the same
// name. The base offset must not be smaller than the FileSet's Base(), and
// size must not be negative.
//
// Adding the file will set the file set's Base() value to base + size + 1
// as the minimum base value for the next file. The following relationship
// exists between a Pos value p for a given file offset offs:
//
// int(p) = base + offs
//
// with offs in the range [0, size] and thus p in the range [base, base+size].
// For convenience, File.Pos may be used to create file-specific position
// values from a file offset.
//
func (s *FileSet) AddFile(filename string, base, size int) *File {
s.mutex.Lock()
defer s.mutex.Unlock()
if base < s.base || size < 0 {
panic("illegal base or size")
}
// base >= s.base && size >= 0
f := &File{s, filename, base, size, []int{0}, nil}
base += size + 1 // +1 because EOF also has a position
if base < 0 {
panic("token.Pos offset overflow (> 2G of source code in file set)")
}
// add the file to the file set
s.base = base
s.files = append(s.files, f)
s.last = f
return f
}
// Iterate calls f for the files in the file set in the order they were added
// until f returns false.
//
func (s *FileSet) Iterate(f func(*File) bool) {
for i := 0; ; i++ {
var file *File
s.mutex.RLock()
if i < len(s.files) {
file = s.files[i]
}
s.mutex.RUnlock()
if file == nil || !f(file) {
break
}
}
}
func searchFiles(a []*File, x int) int {
return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
}
func (s *FileSet) file(p Pos) *File {
// common case: p is in last file
if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
return f
}
// p is not in last file - search all files
if i := searchFiles(s.files, int(p)); i >= 0 {
f := s.files[i]
// f.base <= int(p) by definition of searchFiles
if int(p) <= f.base+f.size {
s.last = f
return f
}
}
return nil
}
// File returns the file that contains the position p.
// If no such file is found (for instance for p == NoPos),
// the result is nil.
//
func (s *FileSet) File(p Pos) (f *File) {
if p != NoPos {
s.mutex.RLock()
f = s.file(p)
s.mutex.RUnlock()
}
return
}
// Position converts a Pos in the fileset into a general Position.
func (s *FileSet) Position(p Pos) (pos Position) {
if p != NoPos {
s.mutex.RLock()
if f := s.file(p); f != nil {
pos = f.position(p)
}
s.mutex.RUnlock()
}
return
}
// -----------------------------------------------------------------------------
// Helper functions
func searchInts(a []int, x int) int {
// This function body is a manually inlined version of:
//
// return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
//
// With better compiler optimizations, this may not be needed in the
// future, but at the moment this change improves the go/printer
// benchmark performance by ~30%. This has a direct impact on the
// speed of gofmt and thus seems worthwhile (2011-04-29).
// TODO(gri): Remove this when compilers have caught up.
i, j := 0, len(a)
for i < j {
h := i + (j-i)/2 // avoid overflow when computing h
// i ≤ h < j
if a[h] <= x {
i = h + 1
} else {
j = h
}
}
return i - 1
}

56
vendor/github.com/src-d/gcfg/token/serialize.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,56 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package token
type serializedFile struct {
// fields correspond 1:1 to fields with same (lower-case) name in File
Name string
Base int
Size int
Lines []int
Infos []lineInfo
}
type serializedFileSet struct {
Base int
Files []serializedFile
}
// Read calls decode to deserialize a file set into s; s must not be nil.
func (s *FileSet) Read(decode func(interface{}) error) error {
var ss serializedFileSet
if err := decode(&ss); err != nil {
return err
}
s.mutex.Lock()
s.base = ss.Base
files := make([]*File, len(ss.Files))
for i := 0; i < len(ss.Files); i++ {
f := &ss.Files[i]
files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos}
}
s.files = files
s.last = nil
s.mutex.Unlock()
return nil
}
// Write calls encode to serialize the file set s.
func (s *FileSet) Write(encode func(interface{}) error) error {
var ss serializedFileSet
s.mutex.Lock()
ss.Base = s.base
files := make([]serializedFile, len(s.files))
for i, f := range s.files {
files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos}
}
ss.Files = files
s.mutex.Unlock()
return encode(ss)
}

83
vendor/github.com/src-d/gcfg/token/token.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,83 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package token defines constants representing the lexical tokens of the gcfg
// configuration syntax and basic operations on tokens (printing, predicates).
//
// Note that the API for the token package may change to accommodate new
// features or implementation changes in gcfg.
//
package token
import "strconv"
// Token is the set of lexical tokens of the gcfg configuration syntax.
type Token int
// The list of tokens.
const (
// Special tokens
ILLEGAL Token = iota
EOF
COMMENT
literal_beg
// Identifiers and basic type literals
// (these tokens stand for classes of literals)
IDENT // section-name, variable-name
STRING // "subsection-name", variable value
literal_end
operator_beg
// Operators and delimiters
ASSIGN // =
LBRACK // [
RBRACK // ]
EOL // \n
operator_end
)
var tokens = [...]string{
ILLEGAL: "ILLEGAL",
EOF: "EOF",
COMMENT: "COMMENT",
IDENT: "IDENT",
STRING: "STRING",
ASSIGN: "=",
LBRACK: "[",
RBRACK: "]",
EOL: "\n",
}
// String returns the string corresponding to the token tok.
// For operators and delimiters, the string is the actual token character
// sequence (e.g., for the token ASSIGN, the string is "="). For all other
// tokens the string corresponds to the token constant name (e.g. for the
// token IDENT, the string is "IDENT").
//
func (tok Token) String() string {
s := ""
if 0 <= tok && tok < Token(len(tokens)) {
s = tokens[tok]
}
if s == "" {
s = "token(" + strconv.Itoa(int(tok)) + ")"
}
return s
}
// Predicates
// IsLiteral returns true for tokens corresponding to identifiers
// and basic type literals; it returns false otherwise.
//
func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
// IsOperator returns true for tokens corresponding to operators and
// delimiters; it returns false otherwise.
//
func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }

23
vendor/github.com/src-d/gcfg/types/bool.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,23 @@
package types
// BoolValues defines the name and value mappings for ParseBool.
var BoolValues = map[string]interface{}{
"true": true, "yes": true, "on": true, "1": true,
"false": false, "no": false, "off": false, "0": false,
}
var boolParser = func() *EnumParser {
ep := &EnumParser{}
ep.AddVals(BoolValues)
return ep
}()
// ParseBool parses bool values according to the definitions in BoolValues.
// Parsing is case-insensitive.
func ParseBool(s string) (bool, error) {
v, err := boolParser.Parse(s)
if err != nil {
return false, err
}
return v.(bool), nil
}

4
vendor/github.com/src-d/gcfg/types/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
// Package types defines helpers for type conversions.
//
// The API for this package is not finalized yet.
package types

44
vendor/github.com/src-d/gcfg/types/enum.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,44 @@
package types
import (
"fmt"
"reflect"
"strings"
)
// EnumParser parses "enum" values; i.e. a predefined set of strings to
// predefined values.
type EnumParser struct {
Type string // type name; if not set, use type of first value added
CaseMatch bool // if true, matching of strings is case-sensitive
// PrefixMatch bool
vals map[string]interface{}
}
// AddVals adds strings and values to an EnumParser.
func (ep *EnumParser) AddVals(vals map[string]interface{}) {
if ep.vals == nil {
ep.vals = make(map[string]interface{})
}
for k, v := range vals {
if ep.Type == "" {
ep.Type = reflect.TypeOf(v).Name()
}
if !ep.CaseMatch {
k = strings.ToLower(k)
}
ep.vals[k] = v
}
}
// Parse parses the string and returns the value or an error.
func (ep EnumParser) Parse(s string) (interface{}, error) {
if !ep.CaseMatch {
s = strings.ToLower(s)
}
v, ok := ep.vals[s]
if !ok {
return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s)
}
return v, nil
}

86
vendor/github.com/src-d/gcfg/types/int.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,86 @@
package types
import (
"fmt"
"strings"
)
// An IntMode is a mode for parsing integer values, representing a set of
// accepted bases.
type IntMode uint8
// IntMode values for ParseInt; can be combined using binary or.
const (
Dec IntMode = 1 << iota
Hex
Oct
)
// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`.
func (m IntMode) String() string {
var modes []string
if m&Dec != 0 {
modes = append(modes, "Dec")
}
if m&Hex != 0 {
modes = append(modes, "Hex")
}
if m&Oct != 0 {
modes = append(modes, "Oct")
}
return "IntMode(" + strings.Join(modes, "|") + ")"
}
var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix")
func prefix0(val string) bool {
return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0")
}
func prefix0x(val string) bool {
return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x")
}
// ParseInt parses val using mode into intptr, which must be a pointer to an
// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases
// when mode permits ambiguity of base; otherwise the prefix can be omitted.
func ParseInt(intptr interface{}, val string, mode IntMode) error {
val = strings.TrimSpace(val)
verb := byte(0)
switch mode {
case Dec:
verb = 'd'
case Dec + Hex:
if prefix0x(val) {
verb = 'v'
} else {
verb = 'd'
}
case Dec + Oct:
if prefix0(val) && !prefix0x(val) {
verb = 'v'
} else {
verb = 'd'
}
case Dec + Hex + Oct:
verb = 'v'
case Hex:
if prefix0x(val) {
verb = 'v'
} else {
verb = 'x'
}
case Oct:
verb = 'o'
case Hex + Oct:
if prefix0(val) {
verb = 'v'
} else {
return errIntAmbig
}
}
if verb == 0 {
panic("unsupported mode")
}
return ScanFully(intptr, val, verb)
}

23
vendor/github.com/src-d/gcfg/types/scan.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,23 @@
package types
import (
"fmt"
"io"
"reflect"
)
// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr.
func ScanFully(ptr interface{}, val string, verb byte) error {
t := reflect.ValueOf(ptr).Elem().Type()
// attempt to read extra bytes to make sure the value is consumed
var b []byte
n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b)
switch {
case n < 1 || n == 1 && err != io.EOF:
return fmt.Errorf("failed to parse %q as %v: %v", val, t, err)
case n > 1:
return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b))
}
// n == 1 && err == io.EOF
return nil
}

22
vendor/github.com/stretchr/testify/LICENCE.txt сгенерированный поставляемый
Просмотреть файл

@ -1,22 +0,0 @@
Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell
Please consider promoting this project if you find it useful.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

484
vendor/github.com/stretchr/testify/assert/assertion_format.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,484 @@
/*
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
*/
package assert
import (
http "net/http"
url "net/url"
time "time"
)
// Conditionf uses a Comparison to assert a complex condition.
func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Condition(t, comp, append([]interface{}{msg}, args...)...)
}
// Containsf asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
}
// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return DirExists(t, path, append([]interface{}{msg}, args...)...)
}
// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
// the number of appearances of each of them in both lists should match.
//
// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
}
// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
// assert.Emptyf(t, obj, "error message %s", "formatted")
func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Empty(t, object, append([]interface{}{msg}, args...)...)
}
// Equalf asserts that two objects are equal.
//
// assert.Equalf(t, 123, 123, "error message %s", "formatted")
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses). Function equality
// cannot be determined and will always fail.
func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Equal(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
//
// actualObj, err := SomeFunction()
// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
}
// EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal.
//
// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123))
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
// if assert.Errorf(t, err, "error message %s", "formatted") {
// assert.Equal(t, expectedErrorf, err)
// }
func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Error(t, err, append([]interface{}{msg}, args...)...)
}
// Exactlyf asserts that two objects are equal in value and type.
//
// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// Failf reports a failure through
func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Fail(t, failureMessage, append([]interface{}{msg}, args...)...)
}
// FailNowf fails test
func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...)
}
// Falsef asserts that the specified value is false.
//
// assert.Falsef(t, myBool, "error message %s", "formatted")
func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return False(t, value, append([]interface{}{msg}, args...)...)
}
// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return FileExists(t, path, append([]interface{}{msg}, args...)...)
}
// HTTPBodyContainsf asserts that a specified handler returns a
// body that contains a string.
//
// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
}
// HTTPBodyNotContainsf asserts that a specified handler returns a
// body that does not contain a string.
//
// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
}
// HTTPErrorf asserts that a specified handler returns an error status code.
//
// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
}
// HTTPRedirectf asserts that a specified handler returns a redirect status code.
//
// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
}
// HTTPSuccessf asserts that a specified handler returns a success status code.
//
// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
}
// Implementsf asserts that an object is implemented by the specified interface.
//
// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
}
// InDeltaf asserts that the two numerals are within delta of each other.
//
// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
// InDeltaSlicef is the same as InDelta, except it compares two slices.
func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
// InEpsilonf asserts that expected and actual have a relative error less than epsilon
func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
}
// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
}
// IsTypef asserts that the specified objects are of the same type.
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...)
}
// JSONEqf asserts that two JSON strings are equivalent.
//
// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// Lenf asserts that the specified object has specific length.
// Lenf also fails if the object has a type that len() not accept.
//
// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Len(t, object, length, append([]interface{}{msg}, args...)...)
}
// Nilf asserts that the specified object is nil.
//
// assert.Nilf(t, err, "error message %s", "formatted")
func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Nil(t, object, append([]interface{}{msg}, args...)...)
}
// NoErrorf asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
// if assert.NoErrorf(t, err, "error message %s", "formatted") {
// assert.Equal(t, expectedObj, actualObj)
// }
func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NoError(t, err, append([]interface{}{msg}, args...)...)
}
// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
}
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1])
// }
func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotEmpty(t, object, append([]interface{}{msg}, args...)...)
}
// NotEqualf asserts that the specified values are NOT equal.
//
// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses).
func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// NotNilf asserts that the specified object is not nil.
//
// assert.NotNilf(t, err, "error message %s", "formatted")
func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotNil(t, object, append([]interface{}{msg}, args...)...)
}
// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
//
// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotPanics(t, f, append([]interface{}{msg}, args...)...)
}
// NotRegexpf asserts that a specified regexp does not match a string.
//
// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
}
// NotSubsetf asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
//
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...)
}
// NotZerof asserts that i is not the zero value for its type.
func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotZero(t, i, append([]interface{}{msg}, args...)...)
}
// Panicsf asserts that the code inside the specified PanicTestFunc panics.
//
// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Panics(t, f, append([]interface{}{msg}, args...)...)
}
// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
}
// Regexpf asserts that a specified regexp matches a string.
//
// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
}
// Subsetf asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
//
// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Subset(t, list, subset, append([]interface{}{msg}, args...)...)
}
// Truef asserts that the specified value is true.
//
// assert.Truef(t, myBool, "error message %s", "formatted")
func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return True(t, value, append([]interface{}{msg}, args...)...)
}
// WithinDurationf asserts that the two times are within duration delta of each other.
//
// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
// Zerof asserts that i is the zero value for its type.
func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Zero(t, i, append([]interface{}{msg}, args...)...)
}

5
vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,5 @@
{{.CommentFormat}}
func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool {
if h, ok := t.(tHelper); ok { h.Helper() }
return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}})
}

784
vendor/github.com/stretchr/testify/assert/assertion_forward.go сгенерированный поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

1
vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl сгенерированный поставляемый
Просмотреть файл

@ -1,4 +1,5 @@
{{.CommentWithoutT "a"}} {{.CommentWithoutT "a"}}
func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {
if h, ok := a.t.(tHelper); ok { h.Helper() }
return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
} }

748
vendor/github.com/stretchr/testify/assert/assertions.go сгенерированный поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

2
vendor/github.com/stretchr/testify/assert/forward_assertions.go сгенерированный поставляемый
Просмотреть файл

@ -13,4 +13,4 @@ func New(t TestingT) *Assertions {
} }
} }
//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl //go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs

81
vendor/github.com/stretchr/testify/assert/http_assertions.go сгенерированный поставляемый
Просмотреть файл

@ -8,16 +8,17 @@ import (
"strings" "strings"
) )
// httpCode is a helper that returns HTTP code of the response. It returns -1 // httpCode is a helper that returns HTTP code of the response. It returns -1 and
// if building a new request fails. // an error if building a new request fails.
func httpCode(handler http.HandlerFunc, method, url string, values url.Values) int { func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
w := httptest.NewRecorder() w := httptest.NewRecorder()
req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) req, err := http.NewRequest(method, url, nil)
if err != nil { if err != nil {
return -1 return -1, err
} }
req.URL.RawQuery = values.Encode()
handler(w, req) handler(w, req)
return w.Code return w.Code, nil
} }
// HTTPSuccess asserts that a specified handler returns a success status code. // HTTPSuccess asserts that a specified handler returns a success status code.
@ -25,12 +26,22 @@ func httpCode(handler http.HandlerFunc, method, url string, values url.Values) i
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
// //
// Returns whether the assertion was successful (true) or not (false). // Returns whether the assertion was successful (true) or not (false).
func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
code := httpCode(handler, method, url, values) if h, ok := t.(tHelper); ok {
if code == -1 { h.Helper()
}
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
return false return false
} }
return code >= http.StatusOK && code <= http.StatusPartialContent
isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
if !isSuccessCode {
Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
}
return isSuccessCode
} }
// HTTPRedirect asserts that a specified handler returns a redirect status code. // HTTPRedirect asserts that a specified handler returns a redirect status code.
@ -38,12 +49,22 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value
// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
// //
// Returns whether the assertion was successful (true) or not (false). // Returns whether the assertion was successful (true) or not (false).
func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
code := httpCode(handler, method, url, values) if h, ok := t.(tHelper); ok {
if code == -1 { h.Helper()
}
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
return false return false
} }
return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
if !isRedirectCode {
Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
}
return isRedirectCode
} }
// HTTPError asserts that a specified handler returns an error status code. // HTTPError asserts that a specified handler returns an error status code.
@ -51,12 +72,22 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu
// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
// //
// Returns whether the assertion was successful (true) or not (false). // Returns whether the assertion was successful (true) or not (false).
func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
code := httpCode(handler, method, url, values) if h, ok := t.(tHelper); ok {
if code == -1 { h.Helper()
}
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
return false return false
} }
return code >= http.StatusBadRequest
isErrorCode := code >= http.StatusBadRequest
if !isErrorCode {
Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
}
return isErrorCode
} }
// HTTPBody is a helper that returns HTTP body of the response. It returns // HTTPBody is a helper that returns HTTP body of the response. It returns
@ -74,10 +105,13 @@ func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) s
// HTTPBodyContains asserts that a specified handler returns a // HTTPBodyContains asserts that a specified handler returns a
// body that contains a string. // body that contains a string.
// //
// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") // assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
// //
// Returns whether the assertion was successful (true) or not (false). // Returns whether the assertion was successful (true) or not (false).
func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
body := HTTPBody(handler, method, url, values) body := HTTPBody(handler, method, url, values)
contains := strings.Contains(body, fmt.Sprint(str)) contains := strings.Contains(body, fmt.Sprint(str))
@ -91,10 +125,13 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string,
// HTTPBodyNotContains asserts that a specified handler returns a // HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string. // body that does not contain a string.
// //
// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") // assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
// //
// Returns whether the assertion was successful (true) or not (false). // Returns whether the assertion was successful (true) or not (false).
func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
body := HTTPBody(handler, method, url, values) body := HTTPBody(handler, method, url, values)
contains := strings.Contains(body, fmt.Sprint(str)) contains := strings.Contains(body, fmt.Sprint(str))

2
vendor/github.com/stretchr/testify/require/forward_requirements.go сгенерированный поставляемый
Просмотреть файл

@ -13,4 +13,4 @@ func New(t TestingT) *Assertions {
} }
} }
//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl //go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl -include-format-funcs

1128
vendor/github.com/stretchr/testify/require/require.go сгенерированный поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

6
vendor/github.com/stretchr/testify/require/require.go.tmpl сгенерированный поставляемый
Просмотреть файл

@ -1,6 +1,6 @@
{{.Comment}} {{.Comment}}
func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { func {{.DocInfo.Name}}(t TestingT, {{.Params}}) {
if !assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return }
t.FailNow() if h, ok := t.(tHelper); ok { h.Helper() }
} t.FailNow()
} }

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше