зеркало из https://github.com/golang/dep.git
dep: Update gps to v0.13.0; semver to latest 2.x
Includes small changes required to compile; follow-up commit will flesh out the Required implementation on manifest.
This commit is contained in:
Родитель
0687487f02
Коммит
06af1bbffb
4
init.go
4
init.go
|
@ -149,9 +149,9 @@ func runInit(args []string) error {
|
|||
ondisk[pr] = v
|
||||
pp := gps.ProjectProperties{}
|
||||
switch v.Type() {
|
||||
case "branch", "version", "rev":
|
||||
case gps.IsBranch, gps.IsVersion, gps.IsRevision:
|
||||
pp.Constraint = v
|
||||
case "semver":
|
||||
case gps.IsSemver:
|
||||
c, _ := gps.NewSemverConstraint("^" + v.String())
|
||||
pp.Constraint = c
|
||||
}
|
||||
|
|
4
lock.go
4
lock.go
|
@ -114,9 +114,9 @@ func (l *lock) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
switch v.Type() {
|
||||
case "branch":
|
||||
case gps.IsBranch:
|
||||
ld.Branch = v.String()
|
||||
case "semver", "version":
|
||||
case gps.IsSemver, gps.IsVersion:
|
||||
ld.Version = v.String()
|
||||
}
|
||||
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
{
|
||||
"memo": "5dae54d01074eeca29bbf86ad922fa1885bf8847a6893eb06072226267d54fc9",
|
||||
"memo": "47c0ec3d677d1d5c01778bc81836801e009c641797708b8fcd773dce954c7714",
|
||||
"projects": [
|
||||
{
|
||||
"name": "github.com/Masterminds/semver",
|
||||
"branch": "2.x",
|
||||
"revision": "b3ef6b1808e9889dfb8767ce7068db923a3d07de",
|
||||
"revision": "94ad6eaf8457cf85a68c9b53fa42e9b1b8683783",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
|
@ -35,8 +35,8 @@
|
|||
},
|
||||
{
|
||||
"name": "github.com/sdboyer/gps",
|
||||
"version": "v0.12.0",
|
||||
"revision": "9ca61cb4e9851c80bb537e7d8e1be56e18e03cc9",
|
||||
"version": "v0.13.0",
|
||||
"revision": "41fd676e835d91ab7307597cb753e08d47065da1",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
|
|
25
manifest.go
25
manifest.go
|
@ -17,12 +17,14 @@ type manifest struct {
|
|||
Dependencies gps.ProjectConstraints
|
||||
Ovr gps.ProjectConstraints
|
||||
Ignores []string
|
||||
//Required []string
|
||||
}
|
||||
|
||||
type rawManifest struct {
|
||||
Dependencies map[string]possibleProps `json:"dependencies,omitempty"`
|
||||
Overrides map[string]possibleProps `json:"overrides,omitempty"`
|
||||
Ignores []string `json:"ignores,omitempty"`
|
||||
//Required []string `json:"required,omitempty"`
|
||||
}
|
||||
|
||||
type possibleProps struct {
|
||||
|
@ -108,6 +110,7 @@ func (m *manifest) MarshalJSON() ([]byte, error) {
|
|||
Dependencies: make(map[string]possibleProps, len(m.Dependencies)),
|
||||
Overrides: make(map[string]possibleProps, len(m.Ovr)),
|
||||
Ignores: m.Ignores,
|
||||
//Required: m.Required,
|
||||
}
|
||||
|
||||
for n, pp := range m.Dependencies {
|
||||
|
@ -132,11 +135,11 @@ func toPossible(pp gps.ProjectProperties) (p possibleProps) {
|
|||
|
||||
if v, ok := pp.Constraint.(gps.Version); ok {
|
||||
switch v.Type() {
|
||||
case "rev": // will be changed to revision upstream soon
|
||||
case gps.IsRevision:
|
||||
p.Revision = v.String()
|
||||
case "branch":
|
||||
case gps.IsBranch:
|
||||
p.Branch = v.String()
|
||||
case "semver", "version":
|
||||
case gps.IsSemver, gps.IsVersion:
|
||||
p.Version = v.String()
|
||||
}
|
||||
} else {
|
||||
|
@ -167,7 +170,7 @@ func (m *manifest) Overrides() gps.ProjectConstraints {
|
|||
return m.Ovr
|
||||
}
|
||||
|
||||
func (m *manifest) IgnorePackages() map[string]bool {
|
||||
func (m *manifest) IgnoredPackages() map[string]bool {
|
||||
if len(m.Ignores) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -179,3 +182,17 @@ func (m *manifest) IgnorePackages() map[string]bool {
|
|||
|
||||
return mp
|
||||
}
|
||||
|
||||
func (m *manifest) RequiredPackages() map[string]bool {
|
||||
//if len(m.Required) == 0 {
|
||||
//return nil
|
||||
//}
|
||||
|
||||
//mp := make(map[string]bool, len(m.Required))
|
||||
//for _, i := range m.Required {
|
||||
//mp[i] = true
|
||||
//}
|
||||
|
||||
//return mp
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
"version": ">=0.8.0, <1.0.0"
|
||||
},
|
||||
"github.com/sdboyer/gps": {
|
||||
"version": ">=0.12.0, <1.0.0"
|
||||
"version": ">=0.13.0, <1.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -187,7 +187,7 @@ func runStatusAll(p *project, sm *gps.SourceMgr) error {
|
|||
|
||||
// Only if we have a non-rev and non-plain version do/can we display
|
||||
// anything wrt the version's updateability.
|
||||
if bs.Version != nil && bs.Version.Type() != "version" {
|
||||
if bs.Version != nil && bs.Version.Type() != gps.IsVersion {
|
||||
c, has := p.m.Dependencies[proj.Ident().ProjectRoot]
|
||||
if !has {
|
||||
c.Constraint = gps.Any()
|
||||
|
@ -209,7 +209,7 @@ func runStatusAll(p *project, sm *gps.SourceMgr) error {
|
|||
// For branch constraints this should be the
|
||||
// most recent revision on the selected
|
||||
// branch.
|
||||
if tv, ok := v.(gps.PairedVersion); ok && v.Type() == "branch" {
|
||||
if tv, ok := v.(gps.PairedVersion); ok && v.Type() == gps.IsBranch {
|
||||
bs.Latest = tv.Underlying()
|
||||
} else {
|
||||
bs.Latest = v
|
||||
|
@ -289,9 +289,9 @@ func formatVersion(v gps.Version) string {
|
|||
return ""
|
||||
}
|
||||
switch v.Type() {
|
||||
case "branch":
|
||||
case gps.IsBranch:
|
||||
return "branch " + v.String()
|
||||
case "rev":
|
||||
case gps.IsRevision:
|
||||
r := v.String()
|
||||
if len(r) > 7 {
|
||||
r = r[:7]
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.7
|
||||
- tip
|
||||
|
||||
# Setting sudo access to false will let Travis CI use containers rather than
|
||||
|
@ -12,5 +12,14 @@ go:
|
|||
# - http://docs.travis-ci.com/user/workers/standard-infrastructure/
|
||||
sudo: false
|
||||
|
||||
script:
|
||||
- GO15VENDOREXPERIMENT=1 make setup
|
||||
- GO15VENDOREXPERIMENT=1 make test
|
||||
|
||||
notifications:
|
||||
irc: "irc.freenode.net#masterminds"
|
||||
webhooks:
|
||||
urls:
|
||||
- https://webhooks.gitter.im/e/06e3328629952dabe3e0
|
||||
on_success: change # options: [always|never|change] default: always
|
||||
on_failure: always # options: [always|never|change] default: always
|
||||
on_start: never # options: [always|never|change] default: always
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
.PHONY: setup
|
||||
setup:
|
||||
go get -u gopkg.in/alecthomas/gometalinter.v1
|
||||
gometalinter.v1 --install
|
||||
|
||||
.PHONY: test
|
||||
test: validate lint
|
||||
@echo "==> Running tests"
|
||||
go test -v
|
||||
|
||||
.PHONY: validate
|
||||
validate:
|
||||
@echo "==> Running static validations"
|
||||
@gometalinter.v1 \
|
||||
--disable-all \
|
||||
--enable deadcode \
|
||||
--severity deadcode:error \
|
||||
--enable gofmt \
|
||||
--enable gosimple \
|
||||
--enable ineffassign \
|
||||
--enable misspell \
|
||||
--enable vet \
|
||||
--tests \
|
||||
--vendor \
|
||||
--deadline 60s \
|
||||
./... || exit_code=1
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
@echo "==> Running linters"
|
||||
@gometalinter.v1 \
|
||||
--disable-all \
|
||||
--enable golint \
|
||||
--vendor \
|
||||
--deadline 60s \
|
||||
./... || :
|
|
@ -12,11 +12,33 @@ platform:
|
|||
install:
|
||||
- go version
|
||||
- go env
|
||||
- go get -u gopkg.in/alecthomas/gometalinter.v1
|
||||
- set PATH=%PATH%;%GOPATH%\bin
|
||||
- gometalinter.v1.exe --install
|
||||
|
||||
build_script:
|
||||
- go install -v ./...
|
||||
|
||||
test_script:
|
||||
- "gometalinter.v1 \
|
||||
--disable-all \
|
||||
--enable deadcode \
|
||||
--severity deadcode:error \
|
||||
--enable gofmt \
|
||||
--enable gosimple \
|
||||
--enable ineffassign \
|
||||
--enable misspell \
|
||||
--enable vet \
|
||||
--tests \
|
||||
--vendor \
|
||||
--deadline 60s \
|
||||
./... || cmd /C EXIT 0"
|
||||
- "gometalinter.v1 \
|
||||
--disable-all \
|
||||
--enable golint \
|
||||
--vendor \
|
||||
--deadline 60s \
|
||||
./... || cmd /C EXIT 0"
|
||||
- go test -v
|
||||
|
||||
deploy: off
|
||||
|
|
|
@ -92,19 +92,19 @@ func constraintEq(c1, c2 Constraint) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
if tc1.min != nil {
|
||||
if !tc1.minIsZero() {
|
||||
if !(tc1.includeMin == tc2.includeMin && tc1.min.Equal(tc2.min)) {
|
||||
return false
|
||||
}
|
||||
} else if tc2.min != nil {
|
||||
} else if !tc2.minIsZero() {
|
||||
return false
|
||||
}
|
||||
|
||||
if tc1.max != nil {
|
||||
if !tc1.maxIsInf() {
|
||||
if !(tc1.includeMax == tc2.includeMax && tc1.max.Equal(tc2.max)) {
|
||||
return false
|
||||
}
|
||||
} else if tc2.max != nil {
|
||||
} else if !tc2.maxIsInf() {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -136,7 +136,7 @@ func constraintEq(c1, c2 Constraint) bool {
|
|||
}
|
||||
|
||||
// newV is a helper to create a new Version object.
|
||||
func newV(major, minor, patch int64) *Version {
|
||||
func newV(major, minor, patch uint64) *Version {
|
||||
return &Version{
|
||||
major: major,
|
||||
minor: minor,
|
||||
|
@ -405,12 +405,18 @@ func TestBidirectionalSerialization(t *testing.T) {
|
|||
{"4.1.0", true},
|
||||
{"!=4.1.0", true},
|
||||
{">=1.1.0", true},
|
||||
{">=1.1.0, <2.0.0", true},
|
||||
{">1.0.0, <=1.1.0", true},
|
||||
{"<=1.1.0", true},
|
||||
{">=1.1.0, <2.0.0, !=1.2.3", true},
|
||||
{">=1.1.0, <2.0.0, !=1.2.3 || >3.0.0", true},
|
||||
{">=1.1.0, <2.0.0, !=1.2.3 || >=3.0.0", true},
|
||||
{">=1.1.7, <1.3.0", true}, // tilde width
|
||||
{">=1.1.0, <=2.0.0", true}, // no unary op on lte max
|
||||
{">1.1.3, <2.0.0", true}, // no unary op on gt min
|
||||
{">1.1.0, <=2.0.0", true}, // no unary op on gt min and lte max
|
||||
{">=1.1.0, <=1.2.0", true}, // no unary op on lte max
|
||||
{">1.1.1, <1.2.0", true}, // no unary op on gt min
|
||||
{">1.1.7, <=2.0.0", true}, // no unary op on gt min and lte max
|
||||
{">1.1.7, <=2.0.0", true}, // no unary op on gt min and lte max
|
||||
{">=0.1.7, <1.0.0", true}, // carat shifting below 1.0.0
|
||||
{">=0.1.7, <0.3.0", true}, // carat shifting width below 1.0.0
|
||||
}
|
||||
|
||||
for _, fix := range tests {
|
||||
|
@ -430,6 +436,27 @@ func TestBidirectionalSerialization(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPreferUnaryOpForm(t *testing.T) {
|
||||
tests := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{">=0.1.7, <0.2.0", "^0.1.7"}, // carat shifting below 1.0.0
|
||||
{">=1.1.0, <2.0.0", "^1.1.0"},
|
||||
{">=1.1.0, <2.0.0, !=1.2.3", "^1.1.0, !=1.2.3"},
|
||||
}
|
||||
|
||||
for _, fix := range tests {
|
||||
c, err := NewConstraint(fix.in)
|
||||
if err != nil {
|
||||
t.Errorf("Valid constraint string produced unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if fix.out != c.String() {
|
||||
t.Errorf("Constraint %q was not transformed into expected output string %q", fix.in, fix.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRewriteRange(t *testing.T) {
|
||||
tests := []struct {
|
||||
c string
|
||||
|
@ -484,7 +511,7 @@ func TestUnionErr(t *testing.T) {
|
|||
},
|
||||
)
|
||||
fail := u1.Matches(newV(2, 5, 0))
|
||||
failstr := `2.5.0 is greater than or equal to the maximum of >=1.0.0, <2.0.0
|
||||
failstr := `2.5.0 is greater than or equal to the maximum of ^1.0.0
|
||||
2.5.0 is less than the minimum of >=3.0.0, <=4.0.0`
|
||||
if fail.Error() != failstr {
|
||||
t.Errorf("Did not get expected failure message from union, got %q", fail)
|
||||
|
|
|
@ -20,7 +20,7 @@ func (rc rangeConstraint) Matches(v *Version) error {
|
|||
rc: rc,
|
||||
}
|
||||
|
||||
if rc.min != nil {
|
||||
if !rc.minIsZero() {
|
||||
// TODO ensure sane handling of prerelease versions (which are strictly
|
||||
// less than the normal version, but should be admitted in a geq range)
|
||||
cmp := rc.min.Compare(v)
|
||||
|
@ -37,7 +37,7 @@ func (rc rangeConstraint) Matches(v *Version) error {
|
|||
}
|
||||
}
|
||||
|
||||
if rc.max != nil {
|
||||
if !rc.maxIsInf() {
|
||||
// TODO ensure sane handling of prerelease versions (which are strictly
|
||||
// less than the normal version, but should be admitted in a geq range)
|
||||
cmp := rc.max.Compare(v)
|
||||
|
@ -83,6 +83,14 @@ func (rc rangeConstraint) dup() rangeConstraint {
|
|||
}
|
||||
}
|
||||
|
||||
func (rc rangeConstraint) minIsZero() bool {
|
||||
return rc.min == nil
|
||||
}
|
||||
|
||||
func (rc rangeConstraint) maxIsInf() bool {
|
||||
return rc.max == nil
|
||||
}
|
||||
|
||||
func (rc rangeConstraint) Intersect(c Constraint) Constraint {
|
||||
switch oc := c.(type) {
|
||||
case any:
|
||||
|
@ -105,8 +113,8 @@ func (rc rangeConstraint) Intersect(c Constraint) Constraint {
|
|||
includeMax: rc.includeMax,
|
||||
}
|
||||
|
||||
if oc.min != nil {
|
||||
if nr.min == nil || nr.min.LessThan(oc.min) {
|
||||
if !oc.minIsZero() {
|
||||
if nr.minIsZero() || nr.min.LessThan(oc.min) {
|
||||
nr.min = oc.min
|
||||
nr.includeMin = oc.includeMin
|
||||
} else if oc.min.Equal(nr.min) && !oc.includeMin {
|
||||
|
@ -115,8 +123,8 @@ func (rc rangeConstraint) Intersect(c Constraint) Constraint {
|
|||
}
|
||||
}
|
||||
|
||||
if oc.max != nil {
|
||||
if nr.max == nil || nr.max.GreaterThan(oc.max) {
|
||||
if !oc.maxIsInf() {
|
||||
if nr.maxIsInf() || nr.max.GreaterThan(oc.max) {
|
||||
nr.max = oc.max
|
||||
nr.includeMax = oc.includeMax
|
||||
} else if oc.max.Equal(nr.max) && !oc.includeMax {
|
||||
|
@ -132,7 +140,7 @@ func (rc rangeConstraint) Intersect(c Constraint) Constraint {
|
|||
}
|
||||
}
|
||||
|
||||
if nr.min == nil || nr.max == nil {
|
||||
if nr.minIsZero() || nr.maxIsInf() {
|
||||
return nr
|
||||
}
|
||||
|
||||
|
@ -209,7 +217,7 @@ func (rc rangeConstraint) Union(c Constraint) Constraint {
|
|||
// Only possibility left is gt
|
||||
return unionConstraint{rc.dup(), oc}
|
||||
case rangeConstraint:
|
||||
if (rc.min == nil && oc.max == nil) || (rc.max == nil && oc.min == nil) {
|
||||
if (rc.minIsZero() && oc.maxIsInf()) || (rc.maxIsInf() && oc.minIsZero()) {
|
||||
rcl, ocl := len(rc.excl), len(oc.excl)
|
||||
// Quick check for open case
|
||||
if rcl == 0 && ocl == 0 {
|
||||
|
@ -273,8 +281,8 @@ func (rc rangeConstraint) Union(c Constraint) Constraint {
|
|||
)
|
||||
|
||||
// Pick the min
|
||||
if rc.min != nil {
|
||||
if oc.min == nil || rc.min.GreaterThan(oc.min) || (rc.min.Equal(oc.min) && !rc.includeMin && oc.includeMin) {
|
||||
if !rc.minIsZero() {
|
||||
if oc.minIsZero() || rc.min.GreaterThan(oc.min) || (rc.min.Equal(oc.min) && !rc.includeMin && oc.includeMin) {
|
||||
info |= rminlt
|
||||
nc.min = oc.min
|
||||
nc.includeMin = oc.includeMin
|
||||
|
@ -283,15 +291,15 @@ func (rc rangeConstraint) Union(c Constraint) Constraint {
|
|||
nc.min = rc.min
|
||||
nc.includeMin = rc.includeMin
|
||||
}
|
||||
} else if oc.min != nil {
|
||||
} else if !oc.minIsZero() {
|
||||
info |= lminlt
|
||||
nc.min = rc.min
|
||||
nc.includeMin = rc.includeMin
|
||||
}
|
||||
|
||||
// Pick the max
|
||||
if rc.max != nil {
|
||||
if oc.max == nil || rc.max.LessThan(oc.max) || (rc.max.Equal(oc.max) && !rc.includeMax && oc.includeMax) {
|
||||
if !rc.maxIsInf() {
|
||||
if oc.maxIsInf() || rc.max.LessThan(oc.max) || (rc.max.Equal(oc.max) && !rc.includeMax && oc.includeMax) {
|
||||
info |= rmaxgt
|
||||
nc.max = oc.max
|
||||
nc.includeMax = oc.includeMax
|
||||
|
@ -300,7 +308,7 @@ func (rc rangeConstraint) Union(c Constraint) Constraint {
|
|||
nc.max = rc.max
|
||||
nc.includeMax = rc.includeMax
|
||||
}
|
||||
} else if oc.max != nil {
|
||||
} else if oc.maxIsInf() {
|
||||
info |= lmaxgt
|
||||
nc.max = rc.max
|
||||
nc.includeMax = rc.includeMax
|
||||
|
@ -346,14 +354,14 @@ func (rc rangeConstraint) Union(c Constraint) Constraint {
|
|||
// Note also that this does *not* compare excluded versions - it only compares
|
||||
// range endpoints.
|
||||
func (rc rangeConstraint) isSupersetOf(rc2 rangeConstraint) bool {
|
||||
if rc.min != nil {
|
||||
if rc2.min == nil || rc.min.GreaterThan(rc2.min) || (rc.min.Equal(rc2.min) && !rc.includeMin && rc2.includeMin) {
|
||||
if !rc.minIsZero() {
|
||||
if rc2.minIsZero() || rc.min.GreaterThan(rc2.min) || (rc.min.Equal(rc2.min) && !rc.includeMin && rc2.includeMin) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if rc.max != nil {
|
||||
if rc2.max == nil || rc.max.LessThan(rc2.max) || (rc.max.Equal(rc2.max) && !rc.includeMax && rc2.includeMax) {
|
||||
if !rc.maxIsInf() {
|
||||
if rc2.maxIsInf() || rc.max.LessThan(rc2.max) || (rc.max.Equal(rc2.max) && !rc.includeMax && rc2.includeMax) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -362,22 +370,66 @@ func (rc rangeConstraint) isSupersetOf(rc2 rangeConstraint) bool {
|
|||
}
|
||||
|
||||
func (rc rangeConstraint) String() string {
|
||||
// TODO express using caret or tilde, where applicable
|
||||
var pieces []string
|
||||
if rc.min != nil {
|
||||
if rc.includeMin {
|
||||
pieces = append(pieces, fmt.Sprintf(">=%s", rc.min))
|
||||
} else {
|
||||
pieces = append(pieces, fmt.Sprintf(">%s", rc.min))
|
||||
|
||||
// We need to trigger the standard verbose handling from various points, so
|
||||
// wrap it in a function.
|
||||
noshort := func() {
|
||||
if !rc.minIsZero() {
|
||||
if rc.includeMin {
|
||||
pieces = append(pieces, fmt.Sprintf(">=%s", rc.min))
|
||||
} else {
|
||||
pieces = append(pieces, fmt.Sprintf(">%s", rc.min))
|
||||
}
|
||||
}
|
||||
|
||||
if !rc.maxIsInf() {
|
||||
if rc.includeMax {
|
||||
pieces = append(pieces, fmt.Sprintf("<=%s", rc.max))
|
||||
} else {
|
||||
pieces = append(pieces, fmt.Sprintf("<%s", rc.max))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if rc.max != nil {
|
||||
if rc.includeMax {
|
||||
pieces = append(pieces, fmt.Sprintf("<=%s", rc.max))
|
||||
} else {
|
||||
pieces = append(pieces, fmt.Sprintf("<%s", rc.max))
|
||||
// Handle the possibility that we might be able to express the range
|
||||
// with a carat or tilde, as we prefer those forms.
|
||||
switch {
|
||||
case rc.minIsZero() && rc.maxIsInf():
|
||||
// This if is internal because it's useful to know for the other cases
|
||||
// that we don't have special values at both bounds
|
||||
if len(rc.excl) == 0 {
|
||||
// Shouldn't be possible to reach from anything that can be done
|
||||
// outside the package, but best to cover it and be safe
|
||||
return "*"
|
||||
}
|
||||
case rc.minIsZero(), rc.includeMax, !rc.includeMin:
|
||||
// tilde and carat could never apply here
|
||||
noshort()
|
||||
case !rc.maxIsInf() && rc.max.Minor() == 0 && rc.max.Patch() == 0: // basic carat
|
||||
if rc.min.Major() == rc.max.Major()-1 && rc.min.Major() != 0 {
|
||||
pieces = append(pieces, fmt.Sprintf("^%s", rc.min))
|
||||
} else {
|
||||
// range is too wide for carat, need standard operators
|
||||
noshort()
|
||||
}
|
||||
case !rc.maxIsInf() && rc.max.Major() != 0 && rc.max.Patch() == 0: // basic tilde
|
||||
if rc.min.Minor() == rc.max.Minor()-1 && rc.min.Major() == rc.max.Major() {
|
||||
pieces = append(pieces, fmt.Sprintf("~%s", rc.min))
|
||||
} else {
|
||||
// range is too wide for tilde, need standard operators
|
||||
noshort()
|
||||
}
|
||||
case !rc.maxIsInf() && rc.max.Major() == 0 && rc.max.Patch() == 0 && rc.max.Minor() != 0:
|
||||
// below 1.0.0, tilde is meaningless but carat is shifted to the
|
||||
// right (so it basically behaves the same as tilde does above 1.0.0)
|
||||
if rc.min.Minor() == rc.max.Minor()-1 {
|
||||
pieces = append(pieces, fmt.Sprintf("^%s", rc.min))
|
||||
} else {
|
||||
noshort()
|
||||
}
|
||||
default:
|
||||
noshort()
|
||||
}
|
||||
|
||||
for _, e := range rc.excl {
|
||||
|
|
|
@ -46,7 +46,7 @@ const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
|
|||
|
||||
// Version represents a single semantic version.
|
||||
type Version struct {
|
||||
major, minor, patch int64
|
||||
major, minor, patch uint64
|
||||
pre string
|
||||
metadata string
|
||||
original string
|
||||
|
@ -84,8 +84,8 @@ func NewVersion(v string) (*Version, error) {
|
|||
original: v,
|
||||
}
|
||||
|
||||
var temp int64
|
||||
temp, err := strconv.ParseInt(m[1], 10, 32)
|
||||
var temp uint64
|
||||
temp, err := strconv.ParseUint(m[1], 10, 32)
|
||||
if err != nil {
|
||||
bvs := badVersionSegment{e: err}
|
||||
if CacheVersions {
|
||||
|
@ -99,7 +99,7 @@ func NewVersion(v string) (*Version, error) {
|
|||
sv.major = temp
|
||||
|
||||
if m[2] != "" {
|
||||
temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 32)
|
||||
temp, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 32)
|
||||
if err != nil {
|
||||
bvs := badVersionSegment{e: err}
|
||||
if CacheVersions {
|
||||
|
@ -116,7 +116,7 @@ func NewVersion(v string) (*Version, error) {
|
|||
}
|
||||
|
||||
if m[3] != "" {
|
||||
temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 32)
|
||||
temp, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 32)
|
||||
if err != nil {
|
||||
bvs := badVersionSegment{e: err}
|
||||
if CacheVersions {
|
||||
|
@ -166,17 +166,17 @@ func (v *Version) Original() string {
|
|||
}
|
||||
|
||||
// Major returns the major version.
|
||||
func (v *Version) Major() int64 {
|
||||
func (v *Version) Major() uint64 {
|
||||
return v.major
|
||||
}
|
||||
|
||||
// Minor returns the minor version.
|
||||
func (v *Version) Minor() int64 {
|
||||
func (v *Version) Minor() uint64 {
|
||||
return v.minor
|
||||
}
|
||||
|
||||
// Patch returns the patch version.
|
||||
func (v *Version) Patch() int64 {
|
||||
func (v *Version) Patch() uint64 {
|
||||
return v.patch
|
||||
}
|
||||
|
||||
|
@ -297,7 +297,7 @@ func (v *Version) Union(c Constraint) Constraint {
|
|||
func (Version) _private() {}
|
||||
func (Version) _real() {}
|
||||
|
||||
func compareSegment(v, o int64) int {
|
||||
func compareSegment(v, o uint64) int {
|
||||
if v < o {
|
||||
return -1
|
||||
}
|
||||
|
|
|
@ -28,7 +28,9 @@ way. It is a distillation of the ideas behind language package managers like
|
|||
[cargo](https://crates.io/) (and others) into a library, artisanally
|
||||
handcrafted with ❤️ for Go's specific requirements.
|
||||
|
||||
`gps` is [on track](https://github.com/Masterminds/glide/pull/384) to become the engine behind [glide](https://glide.sh).
|
||||
`gps` is [on track](https://github.com/Masterminds/glide/issues/565) to become
|
||||
the engine behind [glide](https://glide.sh). It also powers the new, (hopefully)
|
||||
official Go tooling, which we plan to make public at the beginning of 2017.
|
||||
|
||||
The wiki has a [general introduction to the `gps`
|
||||
approach](https://github.com/sdboyer/gps/wiki/Introduction-to-gps), as well
|
||||
|
|
|
@ -16,34 +16,29 @@ import (
|
|||
|
||||
var osList []string
|
||||
var archList []string
|
||||
var stdlib = make(map[string]bool)
|
||||
|
||||
const stdlibPkgs string = "archive archive/tar archive/zip bufio builtin bytes compress compress/bzip2 compress/flate compress/gzip compress/lzw compress/zlib container container/heap container/list container/ring context crypto crypto/aes crypto/cipher crypto/des crypto/dsa crypto/ecdsa crypto/elliptic crypto/hmac crypto/md5 crypto/rand crypto/rc4 crypto/rsa crypto/sha1 crypto/sha256 crypto/sha512 crypto/subtle crypto/tls crypto/x509 crypto/x509/pkix database database/sql database/sql/driver debug debug/dwarf debug/elf debug/gosym debug/macho debug/pe debug/plan9obj encoding encoding/ascii85 encoding/asn1 encoding/base32 encoding/base64 encoding/binary encoding/csv encoding/gob encoding/hex encoding/json encoding/pem encoding/xml errors expvar flag fmt go go/ast go/build go/constant go/doc go/format go/importer go/parser go/printer go/scanner go/token go/types hash hash/adler32 hash/crc32 hash/crc64 hash/fnv html html/template image image/color image/color/palette image/draw image/gif image/jpeg image/png index index/suffixarray io io/ioutil log log/syslog math math/big math/cmplx math/rand mime mime/multipart mime/quotedprintable net net/http net/http/cgi net/http/cookiejar net/http/fcgi net/http/httptest net/http/httputil net/http/pprof net/mail net/rpc net/rpc/jsonrpc net/smtp net/textproto net/url os os/exec os/signal os/user path path/filepath reflect regexp regexp/syntax runtime runtime/cgo runtime/debug runtime/msan runtime/pprof runtime/race runtime/trace sort strconv strings sync sync/atomic syscall testing testing/iotest testing/quick text text/scanner text/tabwriter text/template text/template/parse time unicode unicode/utf16 unicode/utf8 unsafe"
|
||||
|
||||
// Before appengine moved to google.golang.org/appengine, it had a magic
|
||||
// stdlib-like import path. We have to ignore all of these.
|
||||
const appenginePkgs string = "appengine/aetest appengine/blobstore appengine/capability appengine/channel appengine/cloudsql appengine/cmd appengine/cmd/aebundler appengine/cmd/aedeploy appengine/cmd/aefix appengine/datastore appengine/delay appengine/demos appengine/demos/guestbook appengine/demos/guestbook/templates appengine/demos/helloworld appengine/file appengine/image appengine/internal appengine/internal/aetesting appengine/internal/app_identity appengine/internal/base appengine/internal/blobstore appengine/internal/capability appengine/internal/channel appengine/internal/datastore appengine/internal/image appengine/internal/log appengine/internal/mail appengine/internal/memcache appengine/internal/modules appengine/internal/remote_api appengine/internal/search appengine/internal/socket appengine/internal/system appengine/internal/taskqueue appengine/internal/urlfetch appengine/internal/user appengine/internal/xmpp appengine/log appengine/mail appengine/memcache appengine/module appengine/remote_api appengine/runtime appengine/search appengine/socket appengine/taskqueue appengine/urlfetch appengine/user appengine/xmpp"
|
||||
|
||||
func init() {
|
||||
// The supported systems are listed in
|
||||
// https://github.com/golang/go/blob/master/src/go/build/syslist.go
|
||||
// The lists are not exported so we need to duplicate them here.
|
||||
// The lists are not exported, so we need to duplicate them here.
|
||||
osListString := "android darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris windows"
|
||||
osList = strings.Split(osListString, " ")
|
||||
|
||||
archListString := "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc s390 s390x sparc sparc64"
|
||||
archList = strings.Split(archListString, " ")
|
||||
}
|
||||
|
||||
for _, pkg := range strings.Split(stdlibPkgs, " ") {
|
||||
stdlib[pkg] = true
|
||||
}
|
||||
for _, pkg := range strings.Split(appenginePkgs, " ") {
|
||||
stdlib[pkg] = true
|
||||
// Stored as a var so that tests can swap it out. Ugh globals, ugh.
|
||||
var isStdLib = doIsStdLib
|
||||
|
||||
// This was loving taken from src/cmd/go/pkg.go in Go's code (isStandardImportPath).
|
||||
func doIsStdLib(path string) bool {
|
||||
i := strings.Index(path, "/")
|
||||
if i < 0 {
|
||||
i = len(path)
|
||||
}
|
||||
|
||||
// Also ignore C
|
||||
// TODO(sdboyer) actually figure out how to deal with cgo
|
||||
stdlib["C"] = true
|
||||
return !strings.Contains(path[:i], ".")
|
||||
}
|
||||
|
||||
// ListPackages reports Go package information about all directories in the tree
|
||||
|
@ -256,11 +251,11 @@ func ListPackages(fileRoot, importRoot string) (PackageTree, error) {
|
|||
lim = append(lim, imp)
|
||||
// ignore stdlib done this way, b/c that's what the go tooling does
|
||||
case strings.HasPrefix(imp, "./"):
|
||||
if stdlib[imp[2:]] {
|
||||
if isStdLib(imp[2:]) {
|
||||
lim = append(lim, imp)
|
||||
}
|
||||
case strings.HasPrefix(imp, "../"):
|
||||
if stdlib[imp[3:]] {
|
||||
if isStdLib(imp[3:]) {
|
||||
lim = append(lim, imp)
|
||||
}
|
||||
}
|
||||
|
@ -599,7 +594,7 @@ func wmToReach(workmap map[string]wm, basedir string) map[string][]string {
|
|||
|
||||
var dfe func(string, []string) bool
|
||||
|
||||
// dfe is the depth-first-explorer that computes safe, error-free external
|
||||
// dfe is the depth-first-explorer that computes a safe, error-free external
|
||||
// reach map.
|
||||
//
|
||||
// pkg is the import path of the pkg currently being visited; path is the
|
||||
|
|
|
@ -58,7 +58,7 @@ type bridge struct {
|
|||
|
||||
// Global factory func to create a bridge. This exists solely to allow tests to
|
||||
// override it with a custom bridge and sm.
|
||||
var mkBridge func(*solver, SourceManager) sourceBridge = func(s *solver, sm SourceManager) sourceBridge {
|
||||
var mkBridge = func(s *solver, sm SourceManager) sourceBridge {
|
||||
return &bridge{
|
||||
sm: sm,
|
||||
s: s,
|
||||
|
@ -377,7 +377,7 @@ func (vtu versionTypeUnion) String() string {
|
|||
// This should generally not be called, but is required for the interface. If it
|
||||
// is called, we have a bigger problem (the type has escaped the solver); thus,
|
||||
// panic.
|
||||
func (vtu versionTypeUnion) Type() string {
|
||||
func (vtu versionTypeUnion) Type() VersionType {
|
||||
panic("versionTypeUnion should never need to answer a Type() call; it is solver internal-only")
|
||||
}
|
||||
|
||||
|
|
|
@ -179,14 +179,20 @@ type ProjectConstraint struct {
|
|||
Constraint Constraint
|
||||
}
|
||||
|
||||
// ProjectConstraints is a map of projects, as identified by their import path
|
||||
// roots (ProjectRoots) to the corresponding ProjectProperties.
|
||||
//
|
||||
// They are the standard form in which Manifests declare their required
|
||||
// dependency properties - constraints and network locations - as well as the
|
||||
// form in which RootManifests declare their overrides.
|
||||
type ProjectConstraints map[ProjectRoot]ProjectProperties
|
||||
|
||||
type workingConstraint struct {
|
||||
Ident ProjectIdentifier
|
||||
Constraint Constraint
|
||||
overrNet, overrConstraint bool
|
||||
}
|
||||
|
||||
type ProjectConstraints map[ProjectRoot]ProjectProperties
|
||||
|
||||
func pcSliceToMap(l []ProjectConstraint, r ...[]ProjectConstraint) ProjectConstraints {
|
||||
final := make(ProjectConstraints)
|
||||
|
||||
|
|
|
@ -620,3 +620,27 @@ func ufmt(u *url.URL) string {
|
|||
return fmt.Sprintf("host=%q, path=%q, opaque=%q, scheme=%q, user=%#v, pass=%#v, rawpath=%q, rawq=%q, frag=%q",
|
||||
u.Host, u.Path, u.Opaque, u.Scheme, user, pass, u.RawPath, u.RawQuery, u.Fragment)
|
||||
}
|
||||
|
||||
func TestIsStdLib(t *testing.T) {
|
||||
fix := []struct {
|
||||
ip string
|
||||
is bool
|
||||
}{
|
||||
{"appengine", true},
|
||||
{"net/http", true},
|
||||
{"github.com/anything", false},
|
||||
{"foo", true},
|
||||
}
|
||||
|
||||
for _, f := range fix {
|
||||
r := doIsStdLib(f.ip)
|
||||
if r != f.is {
|
||||
if r {
|
||||
t.Errorf("%s was marked stdlib but should not have been", f.ip)
|
||||
} else {
|
||||
t.Errorf("%s was not marked stdlib but should have been", f.ip)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,15 +34,15 @@ func (s *solver) HashInputs() []byte {
|
|||
buf.WriteString(pd.Constraint.String())
|
||||
}
|
||||
|
||||
// The stdlib and old appengine packages play the same functional role in
|
||||
// solving as ignores. Because they change, albeit quite infrequently, we
|
||||
// have to include them in the hash.
|
||||
buf.WriteString(stdlibPkgs)
|
||||
buf.WriteString(appenginePkgs)
|
||||
|
||||
// Write each of the packages, or the errors that were found for a
|
||||
// particular subpath, into the hash.
|
||||
// particular subpath, into the hash. We need to do this in a
|
||||
// deterministic order, so expand and sort the map.
|
||||
var pkgs []PackageOrErr
|
||||
for _, perr := range s.rpt.Packages {
|
||||
pkgs = append(pkgs, perr)
|
||||
}
|
||||
sort.Sort(sortPackageOrErr(pkgs))
|
||||
for _, perr := range pkgs {
|
||||
if perr.Err != nil {
|
||||
buf.WriteString(perr.Err.Error())
|
||||
} else {
|
||||
|
@ -58,14 +58,26 @@ func (s *solver) HashInputs() []byte {
|
|||
}
|
||||
}
|
||||
|
||||
// Add the package ignores, if any.
|
||||
// Write any require packages given in the root manifest.
|
||||
if len(s.req) > 0 {
|
||||
// Dump and sort the reqnores
|
||||
req := make([]string, 0, len(s.req))
|
||||
for pkg := range s.req {
|
||||
req = append(req, pkg)
|
||||
}
|
||||
sort.Strings(req)
|
||||
|
||||
for _, reqp := range req {
|
||||
buf.WriteString(reqp)
|
||||
}
|
||||
}
|
||||
|
||||
// Add the ignored packages, if any.
|
||||
if len(s.ig) > 0 {
|
||||
// Dump and sort the ignores
|
||||
ig := make([]string, len(s.ig))
|
||||
k := 0
|
||||
ig := make([]string, 0, len(s.ig))
|
||||
for pkg := range s.ig {
|
||||
ig[k] = pkg
|
||||
k++
|
||||
ig = append(ig, pkg)
|
||||
}
|
||||
sort.Strings(ig)
|
||||
|
||||
|
@ -91,3 +103,25 @@ func (s *solver) HashInputs() []byte {
|
|||
hd := sha256.Sum256(buf.Bytes())
|
||||
return hd[:]
|
||||
}
|
||||
|
||||
type sortPackageOrErr []PackageOrErr
|
||||
|
||||
func (s sortPackageOrErr) Len() int { return len(s) }
|
||||
func (s sortPackageOrErr) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
func (s sortPackageOrErr) Less(i, j int) bool {
|
||||
a, b := s[i], s[j]
|
||||
if a.Err != nil || b.Err != nil {
|
||||
// Sort errors last.
|
||||
if b.Err == nil {
|
||||
return false
|
||||
}
|
||||
if a.Err == nil {
|
||||
return true
|
||||
}
|
||||
// And then by string.
|
||||
return a.Err.Error() < b.Err.Error()
|
||||
}
|
||||
// And finally, sort by import path.
|
||||
return a.P.ImportPath < b.P.ImportPath
|
||||
}
|
||||
|
|
|
@ -29,8 +29,6 @@ func TestHashInputs(t *testing.T) {
|
|||
"1.0.0",
|
||||
"b",
|
||||
"1.0.0",
|
||||
stdlibPkgs,
|
||||
appenginePkgs,
|
||||
"root",
|
||||
"root",
|
||||
"a",
|
||||
|
@ -48,7 +46,7 @@ func TestHashInputs(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestHashInputsIgnores(t *testing.T) {
|
||||
func TestHashInputsReqsIgs(t *testing.T) {
|
||||
fix := basicFixtures["shared dependency with overlapping constraints"]
|
||||
|
||||
rm := fix.rootmanifest().(simpleRootManifest).dup()
|
||||
|
@ -77,8 +75,6 @@ func TestHashInputsIgnores(t *testing.T) {
|
|||
"1.0.0",
|
||||
"b",
|
||||
"1.0.0",
|
||||
stdlibPkgs,
|
||||
appenginePkgs,
|
||||
"root",
|
||||
"",
|
||||
"root",
|
||||
|
@ -97,6 +93,86 @@ func TestHashInputsIgnores(t *testing.T) {
|
|||
if !bytes.Equal(dig, correct) {
|
||||
t.Errorf("Hashes are not equal")
|
||||
}
|
||||
|
||||
// Add requires
|
||||
rm.req = map[string]bool{
|
||||
"baz": true,
|
||||
"qux": true,
|
||||
}
|
||||
|
||||
params.Manifest = rm
|
||||
|
||||
s, err = Prepare(params, newdepspecSM(fix.ds, nil))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error while prepping solver: %s", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
dig = s.HashInputs()
|
||||
h = sha256.New()
|
||||
|
||||
elems = []string{
|
||||
"a",
|
||||
"1.0.0",
|
||||
"b",
|
||||
"1.0.0",
|
||||
"root",
|
||||
"",
|
||||
"root",
|
||||
"a",
|
||||
"b",
|
||||
"baz",
|
||||
"qux",
|
||||
"bar",
|
||||
"foo",
|
||||
"depspec-sm-builtin",
|
||||
"1.0.0",
|
||||
}
|
||||
for _, v := range elems {
|
||||
h.Write([]byte(v))
|
||||
}
|
||||
correct = h.Sum(nil)
|
||||
|
||||
if !bytes.Equal(dig, correct) {
|
||||
t.Errorf("Hashes are not equal")
|
||||
}
|
||||
|
||||
// remove ignores, just test requires alone
|
||||
rm.ig = nil
|
||||
params.Manifest = rm
|
||||
|
||||
s, err = Prepare(params, newdepspecSM(fix.ds, nil))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error while prepping solver: %s", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
dig = s.HashInputs()
|
||||
h = sha256.New()
|
||||
|
||||
elems = []string{
|
||||
"a",
|
||||
"1.0.0",
|
||||
"b",
|
||||
"1.0.0",
|
||||
"root",
|
||||
"",
|
||||
"root",
|
||||
"a",
|
||||
"b",
|
||||
"baz",
|
||||
"qux",
|
||||
"depspec-sm-builtin",
|
||||
"1.0.0",
|
||||
}
|
||||
for _, v := range elems {
|
||||
h.Write([]byte(v))
|
||||
}
|
||||
correct = h.Sum(nil)
|
||||
|
||||
if !bytes.Equal(dig, correct) {
|
||||
t.Errorf("Hashes are not equal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashInputsOverrides(t *testing.T) {
|
||||
|
@ -129,8 +205,6 @@ func TestHashInputsOverrides(t *testing.T) {
|
|||
"1.0.0",
|
||||
"b",
|
||||
"1.0.0",
|
||||
stdlibPkgs,
|
||||
appenginePkgs,
|
||||
"root",
|
||||
"",
|
||||
"root",
|
||||
|
@ -162,8 +236,6 @@ func TestHashInputsOverrides(t *testing.T) {
|
|||
"1.0.0",
|
||||
"b",
|
||||
"1.0.0",
|
||||
stdlibPkgs,
|
||||
appenginePkgs,
|
||||
"root",
|
||||
"",
|
||||
"root",
|
||||
|
@ -198,8 +270,6 @@ func TestHashInputsOverrides(t *testing.T) {
|
|||
"1.0.0",
|
||||
"b",
|
||||
"1.0.0",
|
||||
stdlibPkgs,
|
||||
appenginePkgs,
|
||||
"root",
|
||||
"",
|
||||
"root",
|
||||
|
@ -236,8 +306,6 @@ func TestHashInputsOverrides(t *testing.T) {
|
|||
"fluglehorn",
|
||||
"b",
|
||||
"1.0.0",
|
||||
stdlibPkgs,
|
||||
appenginePkgs,
|
||||
"root",
|
||||
"",
|
||||
"root",
|
||||
|
@ -277,8 +345,6 @@ func TestHashInputsOverrides(t *testing.T) {
|
|||
"1.0.0",
|
||||
"b",
|
||||
"1.0.0",
|
||||
stdlibPkgs,
|
||||
appenginePkgs,
|
||||
"root",
|
||||
"",
|
||||
"root",
|
||||
|
@ -319,8 +385,6 @@ func TestHashInputsOverrides(t *testing.T) {
|
|||
"fluglehorn",
|
||||
"b",
|
||||
"1.0.0",
|
||||
stdlibPkgs,
|
||||
appenginePkgs,
|
||||
"root",
|
||||
"",
|
||||
"root",
|
||||
|
|
|
@ -39,11 +39,26 @@ type RootManifest interface {
|
|||
// them can harm the ecosystem as a whole.
|
||||
Overrides() ProjectConstraints
|
||||
|
||||
// IngorePackages returns a set of import paths to ignore. These import
|
||||
// IngoredPackages returns a set of import paths to ignore. These import
|
||||
// paths can be within the root project, or part of other projects. Ignoring
|
||||
// a package means that both it and its (unique) imports will be disregarded
|
||||
// by all relevant solver operations.
|
||||
IgnorePackages() map[string]bool
|
||||
//
|
||||
// It is an error to include a package in both the ignored and required
|
||||
// sets.
|
||||
IgnoredPackages() map[string]bool
|
||||
|
||||
// RequiredPackages returns a set of import paths to require. These packages
|
||||
// are required to be present in any solution. The list can include main
|
||||
// packages.
|
||||
//
|
||||
// It is meaningless to specify packages that are within the
|
||||
// PackageTree of the ProjectRoot (though not an error, because the
|
||||
// RootManifest itself does not report a ProjectRoot).
|
||||
//
|
||||
// It is an error to include a package in both the ignored and required
|
||||
// sets.
|
||||
RequiredPackages() map[string]bool
|
||||
}
|
||||
|
||||
// SimpleManifest is a helper for tools to enumerate manifest data. It's
|
||||
|
@ -72,7 +87,7 @@ func (m SimpleManifest) TestDependencyConstraints() ProjectConstraints {
|
|||
// Also, for tests.
|
||||
type simpleRootManifest struct {
|
||||
c, tc, ovr ProjectConstraints
|
||||
ig map[string]bool
|
||||
ig, req map[string]bool
|
||||
}
|
||||
|
||||
func (m simpleRootManifest) DependencyConstraints() ProjectConstraints {
|
||||
|
@ -84,15 +99,19 @@ func (m simpleRootManifest) TestDependencyConstraints() ProjectConstraints {
|
|||
func (m simpleRootManifest) Overrides() ProjectConstraints {
|
||||
return m.ovr
|
||||
}
|
||||
func (m simpleRootManifest) IgnorePackages() map[string]bool {
|
||||
func (m simpleRootManifest) IgnoredPackages() map[string]bool {
|
||||
return m.ig
|
||||
}
|
||||
func (m simpleRootManifest) RequiredPackages() map[string]bool {
|
||||
return m.req
|
||||
}
|
||||
func (m simpleRootManifest) dup() simpleRootManifest {
|
||||
m2 := simpleRootManifest{
|
||||
c: make(ProjectConstraints, len(m.c)),
|
||||
tc: make(ProjectConstraints, len(m.tc)),
|
||||
ovr: make(ProjectConstraints, len(m.ovr)),
|
||||
ig: make(map[string]bool, len(m.ig)),
|
||||
req: make(map[string]bool, len(m.req)),
|
||||
}
|
||||
|
||||
for k, v := range m.c {
|
||||
|
@ -107,13 +126,17 @@ func (m simpleRootManifest) dup() simpleRootManifest {
|
|||
for k, v := range m.ig {
|
||||
m2.ig[k] = v
|
||||
}
|
||||
for k, v := range m.req {
|
||||
m2.req[k] = v
|
||||
}
|
||||
|
||||
return m2
|
||||
}
|
||||
|
||||
// prepManifest ensures a manifest is prepared and safe for use by the solver.
|
||||
// This is mostly about ensuring that no outside routine can modify the manifest
|
||||
// while the solver is in-flight.
|
||||
// while the solver is in-flight, but it also filters out any empty
|
||||
// ProjectProperties.
|
||||
//
|
||||
// This is achieved by copying the manifest's data into a new SimpleManifest.
|
||||
func prepManifest(m Manifest) Manifest {
|
||||
|
@ -130,9 +153,28 @@ func prepManifest(m Manifest) Manifest {
|
|||
}
|
||||
|
||||
for k, d := range deps {
|
||||
// A zero-value ProjectProperties is equivalent to one with an
|
||||
// anyConstraint{} in terms of how the solver will treat it. However, we
|
||||
// normalize between these two by omitting such instances entirely, as
|
||||
// it negates some possibility for false mismatches in input hashing.
|
||||
if d.Constraint == nil {
|
||||
if d.NetworkName == "" {
|
||||
continue
|
||||
}
|
||||
d.Constraint = anyConstraint{}
|
||||
}
|
||||
|
||||
rm.Deps[k] = d
|
||||
}
|
||||
|
||||
for k, d := range ddeps {
|
||||
if d.Constraint == nil {
|
||||
if d.NetworkName == "" {
|
||||
continue
|
||||
}
|
||||
d.Constraint = anyConstraint{}
|
||||
}
|
||||
|
||||
rm.TestDeps[k] = d
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
package gps
|
||||
|
||||
import "testing"
|
||||
|
||||
// Test that prep manifest sanitizes manifests appropriately
|
||||
func TestPrepManifest(t *testing.T) {
|
||||
m := SimpleManifest{
|
||||
Deps: ProjectConstraints{
|
||||
ProjectRoot("foo"): ProjectProperties{},
|
||||
ProjectRoot("bar"): ProjectProperties{
|
||||
NetworkName: "whatever",
|
||||
},
|
||||
},
|
||||
TestDeps: ProjectConstraints{
|
||||
ProjectRoot("baz"): ProjectProperties{},
|
||||
ProjectRoot("qux"): ProjectProperties{
|
||||
NetworkName: "whatever",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
prepped := prepManifest(m)
|
||||
d := prepped.DependencyConstraints()
|
||||
td := prepped.TestDependencyConstraints()
|
||||
if len(d) != 1 {
|
||||
t.Error("prepManifest did not eliminate empty ProjectProperties from deps map")
|
||||
}
|
||||
if len(td) != 1 {
|
||||
t.Error("prepManifest did not eliminate empty ProjectProperties from test deps map")
|
||||
}
|
||||
|
||||
if d[ProjectRoot("bar")].Constraint != any {
|
||||
t.Error("prepManifest did not normalize nil constraint to anyConstraint in deps map")
|
||||
}
|
||||
if td[ProjectRoot("qux")].Constraint != any {
|
||||
t.Error("prepManifest did not normalize nil constraint to anyConstraint in test deps map")
|
||||
}
|
||||
}
|
|
@ -28,6 +28,9 @@ type solution struct {
|
|||
// WriteDepTree takes a basedir and a Lock, and exports all the projects
|
||||
// listed in the lock to the appropriate target location within the basedir.
|
||||
//
|
||||
// If the goal is to populate a vendor directory, basedir should be the absolute
|
||||
// path to that vendor directory, not its parent (a project root, typically).
|
||||
//
|
||||
// It requires a SourceManager to do the work, and takes a flag indicating
|
||||
// whether or not to strip vendor directories contained in the exported
|
||||
// dependencies.
|
||||
|
|
|
@ -294,7 +294,7 @@ func mkrevlock(pairs ...string) fixLock {
|
|||
return l
|
||||
}
|
||||
|
||||
// mksolution makes creates a map of project identifiers to their LockedProject
|
||||
// mksolution creates a map of project identifiers to their LockedProject
|
||||
// result, which is sufficient to act as a solution fixture for the purposes of
|
||||
// most tests.
|
||||
//
|
||||
|
@ -1558,7 +1558,7 @@ func (b *depspecBridge) ListPackages(id ProjectIdentifier, v Version) (PackageTr
|
|||
return b.sm.(fixSM).ListPackages(id, v)
|
||||
}
|
||||
|
||||
func (sm *depspecBridge) vendorCodeExists(id ProjectIdentifier) (bool, error) {
|
||||
func (b *depspecBridge) vendorCodeExists(id ProjectIdentifier) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -149,6 +149,29 @@ var bimodalFixtures = map[string]bimodalFixture{
|
|||
"b 1.1.0",
|
||||
),
|
||||
},
|
||||
// Constraints apply only if the project that declares them has a
|
||||
// reachable import - non-root
|
||||
"constraints activated by import, transitive": {
|
||||
ds: []depspec{
|
||||
dsp(mkDepspec("root 0.0.0"),
|
||||
pkg("root", "root/foo", "b"),
|
||||
pkg("root/foo", "a"),
|
||||
),
|
||||
dsp(mkDepspec("a 1.0.0", "b 1.0.0"),
|
||||
pkg("a"),
|
||||
),
|
||||
dsp(mkDepspec("b 1.0.0"),
|
||||
pkg("b"),
|
||||
),
|
||||
dsp(mkDepspec("b 1.1.0"),
|
||||
pkg("b"),
|
||||
),
|
||||
},
|
||||
r: mksolution(
|
||||
"a 1.0.0",
|
||||
"b 1.1.0",
|
||||
),
|
||||
},
|
||||
// Import jump is in a dep, and points to a transitive dep - but only in not
|
||||
// the first version we try
|
||||
"transitive bm-add on older version": {
|
||||
|
@ -725,6 +748,166 @@ var bimodalFixtures = map[string]bimodalFixture{
|
|||
"bar from baz 1.0.0",
|
||||
),
|
||||
},
|
||||
"require package": {
|
||||
ds: []depspec{
|
||||
dsp(mkDepspec("root 0.0.0", "bar 1.0.0"),
|
||||
pkg("root", "foo")),
|
||||
dsp(mkDepspec("foo 1.0.0"),
|
||||
pkg("foo", "bar")),
|
||||
dsp(mkDepspec("bar 1.0.0"),
|
||||
pkg("bar")),
|
||||
dsp(mkDepspec("baz 1.0.0"),
|
||||
pkg("baz")),
|
||||
},
|
||||
require: []string{"baz"},
|
||||
r: mksolution(
|
||||
"foo 1.0.0",
|
||||
"bar 1.0.0",
|
||||
"baz 1.0.0",
|
||||
),
|
||||
},
|
||||
"require subpackage": {
|
||||
ds: []depspec{
|
||||
dsp(mkDepspec("root 0.0.0", "bar 1.0.0"),
|
||||
pkg("root", "foo")),
|
||||
dsp(mkDepspec("foo 1.0.0"),
|
||||
pkg("foo", "bar")),
|
||||
dsp(mkDepspec("bar 1.0.0"),
|
||||
pkg("bar")),
|
||||
dsp(mkDepspec("baz 1.0.0"),
|
||||
pkg("baz", "baz/qux"),
|
||||
pkg("baz/qux")),
|
||||
},
|
||||
require: []string{"baz/qux"},
|
||||
r: mksolution(
|
||||
"foo 1.0.0",
|
||||
"bar 1.0.0",
|
||||
mklp("baz 1.0.0", "qux"),
|
||||
),
|
||||
},
|
||||
"require impossible subpackage": {
|
||||
ds: []depspec{
|
||||
dsp(mkDepspec("root 0.0.0", "baz 1.0.0"),
|
||||
pkg("root", "foo")),
|
||||
dsp(mkDepspec("foo 1.0.0"),
|
||||
pkg("foo")),
|
||||
dsp(mkDepspec("baz 1.0.0"),
|
||||
pkg("baz")),
|
||||
dsp(mkDepspec("baz 2.0.0"),
|
||||
pkg("baz", "baz/qux"),
|
||||
pkg("baz/qux")),
|
||||
},
|
||||
require: []string{"baz/qux"},
|
||||
fail: &noVersionError{
|
||||
pn: mkPI("baz"),
|
||||
fails: []failedVersion{
|
||||
{
|
||||
v: NewVersion("2.0.0"),
|
||||
f: &versionNotAllowedFailure{
|
||||
goal: mkAtom("baz 2.0.0"),
|
||||
failparent: []dependency{mkDep("root", "baz 1.0.0", "baz/qux")},
|
||||
c: NewVersion("1.0.0"),
|
||||
},
|
||||
},
|
||||
{
|
||||
v: NewVersion("1.0.0"),
|
||||
f: &checkeeHasProblemPackagesFailure{
|
||||
goal: mkAtom("baz 1.0.0"),
|
||||
failpkg: map[string]errDeppers{
|
||||
"baz/qux": errDeppers{
|
||||
err: nil, // nil indicates package is missing
|
||||
deppers: []atom{
|
||||
mkAtom("root"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"require subpkg conflicts with other dep constraint": {
|
||||
ds: []depspec{
|
||||
dsp(mkDepspec("root 0.0.0"),
|
||||
pkg("root", "foo")),
|
||||
dsp(mkDepspec("foo 1.0.0", "baz 1.0.0"),
|
||||
pkg("foo", "baz")),
|
||||
dsp(mkDepspec("baz 1.0.0"),
|
||||
pkg("baz")),
|
||||
dsp(mkDepspec("baz 2.0.0"),
|
||||
pkg("baz", "baz/qux"),
|
||||
pkg("baz/qux")),
|
||||
},
|
||||
require: []string{"baz/qux"},
|
||||
fail: &noVersionError{
|
||||
pn: mkPI("baz"),
|
||||
fails: []failedVersion{
|
||||
{
|
||||
v: NewVersion("2.0.0"),
|
||||
f: &versionNotAllowedFailure{
|
||||
goal: mkAtom("baz 2.0.0"),
|
||||
failparent: []dependency{mkDep("foo 1.0.0", "baz 1.0.0", "baz")},
|
||||
c: NewVersion("1.0.0"),
|
||||
},
|
||||
},
|
||||
{
|
||||
v: NewVersion("1.0.0"),
|
||||
f: &checkeeHasProblemPackagesFailure{
|
||||
goal: mkAtom("baz 1.0.0"),
|
||||
failpkg: map[string]errDeppers{
|
||||
"baz/qux": errDeppers{
|
||||
err: nil, // nil indicates package is missing
|
||||
deppers: []atom{
|
||||
mkAtom("root"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"require independent subpkg conflicts with other dep constraint": {
|
||||
ds: []depspec{
|
||||
dsp(mkDepspec("root 0.0.0"),
|
||||
pkg("root", "foo")),
|
||||
dsp(mkDepspec("foo 1.0.0", "baz 1.0.0"),
|
||||
pkg("foo", "baz")),
|
||||
dsp(mkDepspec("baz 1.0.0"),
|
||||
pkg("baz")),
|
||||
dsp(mkDepspec("baz 2.0.0"),
|
||||
pkg("baz"),
|
||||
pkg("baz/qux")),
|
||||
},
|
||||
require: []string{"baz/qux"},
|
||||
fail: &noVersionError{
|
||||
pn: mkPI("baz"),
|
||||
fails: []failedVersion{
|
||||
{
|
||||
v: NewVersion("2.0.0"),
|
||||
f: &versionNotAllowedFailure{
|
||||
goal: mkAtom("baz 2.0.0"),
|
||||
failparent: []dependency{mkDep("foo 1.0.0", "baz 1.0.0", "baz")},
|
||||
c: NewVersion("1.0.0"),
|
||||
},
|
||||
},
|
||||
{
|
||||
v: NewVersion("1.0.0"),
|
||||
f: &checkeeHasProblemPackagesFailure{
|
||||
goal: mkAtom("baz 1.0.0"),
|
||||
failpkg: map[string]errDeppers{
|
||||
"baz/qux": errDeppers{
|
||||
err: nil, // nil indicates package is missing
|
||||
deppers: []atom{
|
||||
mkAtom("root"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// tpkg is a representation of a single package. It has its own import path, as
|
||||
|
@ -760,6 +943,8 @@ type bimodalFixture struct {
|
|||
changeall bool
|
||||
// pkgs to ignore
|
||||
ignore []string
|
||||
// pkgs to require
|
||||
require []string
|
||||
}
|
||||
|
||||
func (f bimodalFixture) name() string {
|
||||
|
@ -784,10 +969,14 @@ func (f bimodalFixture) rootmanifest() RootManifest {
|
|||
tc: pcSliceToMap(f.ds[0].devdeps),
|
||||
ovr: f.ovr,
|
||||
ig: make(map[string]bool),
|
||||
req: make(map[string]bool),
|
||||
}
|
||||
for _, ig := range f.ignore {
|
||||
m.ig[ig] = true
|
||||
}
|
||||
for _, req := range f.require {
|
||||
m.req[req] = true
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ func init() {
|
|||
flag.StringVar(&fixtorun, "gps.fix", "", "A single fixture to run in TestBasicSolves or TestBimodalSolves")
|
||||
mkBridge(nil, nil)
|
||||
overrideMkBridge()
|
||||
overrideIsStdLib()
|
||||
}
|
||||
|
||||
// sets the mkBridge global func to one that allows virtualized RootDirs
|
||||
|
@ -39,6 +40,14 @@ func overrideMkBridge() {
|
|||
}
|
||||
}
|
||||
|
||||
// sets the isStdLib func to always return false, otherwise it would identify
|
||||
// pretty much all of our fixtures as being stdlib and skip everything
|
||||
func overrideIsStdLib() {
|
||||
isStdLib = func(path string) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
var stderrlog = log.New(os.Stderr, "", 0)
|
||||
|
||||
func fixSolve(params SolveParameters, sm SourceManager) (Solution, error) {
|
||||
|
@ -283,6 +292,8 @@ func TestRootLockNoVersionPairMatching(t *testing.T) {
|
|||
fixtureSolveSimpleChecks(fix, res, err, t)
|
||||
}
|
||||
|
||||
// TestBadSolveOpts exercises the different possible inputs to a solver that can
|
||||
// be determined as invalid in Prepare(), without any further work
|
||||
func TestBadSolveOpts(t *testing.T) {
|
||||
pn := strconv.FormatInt(rand.Int63(), 36)
|
||||
fix := basicFixtures["no dependencies"]
|
||||
|
@ -354,6 +365,28 @@ func TestBadSolveOpts(t *testing.T) {
|
|||
} else if !strings.Contains(err.Error(), "foo, but without any non-zero properties") {
|
||||
t.Error("Prepare should have given error override with empty ProjectProperties, but gave:", err)
|
||||
}
|
||||
|
||||
params.Manifest = simpleRootManifest{
|
||||
ig: map[string]bool{"foo": true},
|
||||
req: map[string]bool{"foo": true},
|
||||
}
|
||||
_, err = Prepare(params, sm)
|
||||
if err == nil {
|
||||
t.Errorf("Should have errored on pkg both ignored and required")
|
||||
} else if !strings.Contains(err.Error(), "was given as both a required and ignored package") {
|
||||
t.Error("Prepare should have given error with single ignore/require conflict error, but gave:", err)
|
||||
}
|
||||
|
||||
params.Manifest = simpleRootManifest{
|
||||
ig: map[string]bool{"foo": true, "bar": true},
|
||||
req: map[string]bool{"foo": true, "bar": true},
|
||||
}
|
||||
_, err = Prepare(params, sm)
|
||||
if err == nil {
|
||||
t.Errorf("Should have errored on pkg both ignored and required")
|
||||
} else if !strings.Contains(err.Error(), "multiple packages given as both required and ignored:") {
|
||||
t.Error("Prepare should have given error with multiple ignore/require conflict error, but gave:", err)
|
||||
}
|
||||
params.Manifest = nil
|
||||
|
||||
params.ToChange = []ProjectRoot{"foo"}
|
||||
|
|
|
@ -128,10 +128,12 @@ type solver struct {
|
|||
// removal.
|
||||
unsel *unselected
|
||||
|
||||
// Map of packages to ignore. Derived by converting SolveParameters.Ignore
|
||||
// into a map during solver prep - which also, nicely, deduplicates it.
|
||||
// Map of packages to ignore.
|
||||
ig map[string]bool
|
||||
|
||||
// Map of packages to require.
|
||||
req map[string]bool
|
||||
|
||||
// A stack of all the currently active versionQueues in the solver. The set
|
||||
// of projects represented here corresponds closely to what's in s.sel,
|
||||
// although s.sel will always contain the root project, and s.vqs never
|
||||
|
@ -215,12 +217,30 @@ func Prepare(params SolveParameters, sm SourceManager) (Solver, error) {
|
|||
|
||||
s := &solver{
|
||||
params: params,
|
||||
ig: params.Manifest.IgnorePackages(),
|
||||
ig: params.Manifest.IgnoredPackages(),
|
||||
req: params.Manifest.RequiredPackages(),
|
||||
ovr: params.Manifest.Overrides(),
|
||||
tl: params.TraceLogger,
|
||||
rpt: params.RootPackageTree.dup(),
|
||||
}
|
||||
|
||||
if len(s.ig) != 0 {
|
||||
var both []string
|
||||
for pkg := range params.Manifest.RequiredPackages() {
|
||||
if s.ig[pkg] {
|
||||
both = append(both, pkg)
|
||||
}
|
||||
}
|
||||
switch len(both) {
|
||||
case 0:
|
||||
break
|
||||
case 1:
|
||||
return nil, badOptsFailure(fmt.Sprintf("%q was given as both a required and ignored package", both[0]))
|
||||
default:
|
||||
return nil, badOptsFailure(fmt.Sprintf("multiple packages given as both required and ignored: %s", strings.Join(both, ", ")))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the ignore and overrides maps are at least initialized
|
||||
if s.ig == nil {
|
||||
s.ig = make(map[string]bool)
|
||||
|
@ -481,11 +501,32 @@ func (s *solver) selectRoot() error {
|
|||
// If we're looking for root's deps, get it from opts and local root
|
||||
// analysis, rather than having the sm do it
|
||||
mdeps := s.ovr.overrideAll(s.rm.DependencyConstraints().merge(s.rm.TestDependencyConstraints()))
|
||||
|
||||
// Err is not possible at this point, as it could only come from
|
||||
// listPackages(), which if we're here already succeeded for root
|
||||
reach := s.rpt.ExternalReach(true, true, s.ig).ListExternalImports()
|
||||
|
||||
// If there are any requires, slide them into the reach list, as well.
|
||||
if len(s.req) > 0 {
|
||||
reqs := make([]string, 0, len(s.req))
|
||||
|
||||
// Make a map of both imported and required pkgs to skip, to avoid
|
||||
// duplication. Technically, a slice would probably be faster (given
|
||||
// small size and bounds check elimination), but this is a one-time op,
|
||||
// so it doesn't matter.
|
||||
skip := make(map[string]bool, len(s.req))
|
||||
for _, r := range reach {
|
||||
if s.req[r] {
|
||||
skip[r] = true
|
||||
}
|
||||
}
|
||||
|
||||
for r := range s.req {
|
||||
if !skip[r] {
|
||||
reqs = append(reqs, r)
|
||||
}
|
||||
}
|
||||
|
||||
reach = append(reach, reqs...)
|
||||
}
|
||||
|
||||
deps, err := s.intersectConstraintsWithImports(mdeps, reach)
|
||||
if err != nil {
|
||||
// TODO(sdboyer) this could well happen; handle it with a more graceful error
|
||||
|
@ -572,7 +613,6 @@ func (s *solver) getImportsAndConstraintsOf(a atomWithPackages) ([]completeDep,
|
|||
// are available, or Any() where they are not.
|
||||
func (s *solver) intersectConstraintsWithImports(deps []workingConstraint, reach []string) ([]completeDep, error) {
|
||||
// Create a radix tree with all the projects we know from the manifest
|
||||
// TODO(sdboyer) make this smarter once we allow non-root inputs as 'projects'
|
||||
xt := radix.New()
|
||||
for _, dep := range deps {
|
||||
xt.Insert(string(dep.Ident.ProjectRoot), dep)
|
||||
|
@ -582,10 +622,8 @@ func (s *solver) intersectConstraintsWithImports(deps []workingConstraint, reach
|
|||
// the trie, assume (mostly) it's a correct correspondence.
|
||||
dmap := make(map[ProjectRoot]completeDep)
|
||||
for _, rp := range reach {
|
||||
// If it's a stdlib package, skip it.
|
||||
// TODO(sdboyer) this just hardcodes us to the packages in tip - should we
|
||||
// have go version magic here, too?
|
||||
if stdlib[rp] {
|
||||
// If it's a stdlib-shaped package, skip it.
|
||||
if isStdLib(rp) {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -661,7 +699,7 @@ func (s *solver) createVersionQueue(bmi bimodalIdentifier) (*versionQueue, error
|
|||
// Project exists only in vendor (and in some manifest somewhere)
|
||||
// TODO(sdboyer) mark this for special handling, somehow?
|
||||
} else {
|
||||
return nil, fmt.Errorf("Project '%s' could not be located.", id)
|
||||
return nil, fmt.Errorf("project '%s' could not be located", id)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -152,6 +152,9 @@ func NewSourceManager(an ProjectAnalyzer, cachedir string) (*SourceMgr, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// CouldNotCreateLockError describe failure modes in which creating a SourceMgr
|
||||
// did not succeed because there was an error while attempting to create the
|
||||
// on-disk lock file.
|
||||
type CouldNotCreateLockError struct {
|
||||
Path string
|
||||
Err error
|
||||
|
@ -268,7 +271,7 @@ func (sm *SourceMgr) ExportProject(id ProjectIdentifier, v Version, to string) e
|
|||
return src.exportVersionTo(v, to)
|
||||
}
|
||||
|
||||
// DeduceRootProject takes an import path and deduces the corresponding
|
||||
// DeduceProjectRoot takes an import path and deduces the corresponding
|
||||
// project/source root.
|
||||
//
|
||||
// Note that some import paths may require network activity to correctly
|
||||
|
|
|
@ -154,7 +154,7 @@ func (s *solver) traceInfo(args ...interface{}) {
|
|||
case string:
|
||||
msg = tracePrefix(fmt.Sprintf(data, args[1:]...), "| ", "| ")
|
||||
case traceError:
|
||||
preflen += 1
|
||||
preflen++
|
||||
// We got a special traceError, use its custom method
|
||||
msg = tracePrefix(data.traceString(), "| ", failCharSp)
|
||||
case error:
|
||||
|
|
|
@ -32,18 +32,17 @@ import (
|
|||
// portions that correspond to a repository root:
|
||||
// github.com/sdboyer/gps
|
||||
//
|
||||
// While not a panacea, defining ProjectRoot at least allows us to clearly
|
||||
// identify when one of these path-ish strings is *supposed* to have certain
|
||||
// semantics.
|
||||
// While not a panacea, having ProjectRoot allows gps to clearly indicate via
|
||||
// the type system when a path-ish string must have particular semantics.
|
||||
type ProjectRoot string
|
||||
|
||||
// A ProjectIdentifier is, more or less, the name of a dependency. It is related
|
||||
// to, but differs in two keys ways from, an import path.
|
||||
// A ProjectIdentifier provides the name and source location of a dependency. It
|
||||
// is related to, but differs in two keys ways from, an plain import path.
|
||||
//
|
||||
// First, ProjectIdentifiers do not identify a single package. Rather, they
|
||||
// encompasses the whole tree of packages rooted at and including their
|
||||
// ProjectRoot. In gps' current design, this ProjectRoot must correspond to the
|
||||
// root of a repository, though this may change in the future.
|
||||
// encompasses the whole tree of packages, including tree's root - the
|
||||
// ProjectRoot. In gps' current design, this ProjectRoot almost always
|
||||
// corresponds to the root of a repository.
|
||||
//
|
||||
// Second, ProjectIdentifiers can optionally carry a NetworkName, which
|
||||
// identifies where the underlying source code can be located on the network.
|
||||
|
@ -63,14 +62,15 @@ type ProjectRoot string
|
|||
// Note that gps makes no guarantees about the actual import paths contained in
|
||||
// a repository aligning with ImportRoot. If tools, or their users, specify an
|
||||
// alternate NetworkName that contains a repository with incompatible internal
|
||||
// import paths, gps will fail. (gps does no import rewriting.)
|
||||
// import paths, gps' solving operations will error. (gps does no import
|
||||
// rewriting.)
|
||||
//
|
||||
// Also note that if different projects' manifests report a different
|
||||
// NetworkName for a given ImportRoot, it is a solve failure. Everyone has to
|
||||
// agree on where a given import path should be sourced from.
|
||||
//
|
||||
// If NetworkName is not explicitly set, gps will derive the network address from
|
||||
// the ImportRoot using a similar algorithm to that of the official go tooling.
|
||||
// the ImportRoot using a similar algorithm to that utilized by `go get`.
|
||||
type ProjectIdentifier struct {
|
||||
ProjectRoot ProjectRoot
|
||||
NetworkName string
|
||||
|
@ -112,9 +112,9 @@ func (i ProjectIdentifier) eq(j ProjectIdentifier) bool {
|
|||
// 2. The LEFT (the receiver) NetworkName is non-empty, and the right
|
||||
// NetworkName is empty.
|
||||
//
|
||||
// *This is, very much intentionally, an asymmetric binary relation.* It's
|
||||
// specifically intended to facilitate the case where we allow for a
|
||||
// ProjectIdentifier with an explicit NetworkName to match one without.
|
||||
// *This is asymmetry in this binary relation is intentional.* It facilitates
|
||||
// the case where we allow for a ProjectIdentifier with an explicit NetworkName
|
||||
// to match one without.
|
||||
func (i ProjectIdentifier) equiv(j ProjectIdentifier) bool {
|
||||
if i.ProjectRoot != j.ProjectRoot {
|
||||
return false
|
||||
|
@ -166,10 +166,11 @@ type ProjectProperties struct {
|
|||
// Package represents a Go package. It contains a subset of the information
|
||||
// go/build.Package does.
|
||||
type Package struct {
|
||||
ImportPath, CommentPath string
|
||||
Name string
|
||||
Imports []string
|
||||
TestImports []string
|
||||
Name string // Package name, as declared in the package statement
|
||||
ImportPath string // Full import path, including the prefix provided to ListPackages()
|
||||
CommentPath string // Import path given in the comment on the package statement
|
||||
Imports []string // Imports from all go and cgo files
|
||||
TestImports []string // Imports from all go test files (in go/build parlance: both TestImports and XTestImports)
|
||||
}
|
||||
|
||||
// bimodalIdentifiers are used to track work to be done in the unselected queue.
|
||||
|
|
|
@ -6,6 +6,18 @@ import (
|
|||
"github.com/Masterminds/semver"
|
||||
)
|
||||
|
||||
// VersionType indicates a type for a Version that conveys some additional
|
||||
// semantics beyond that which is literally embedded on the Go type.
|
||||
type VersionType uint8
|
||||
|
||||
// VersionTypes for the four major classes of version we deal with
|
||||
const (
|
||||
IsRevision VersionType = iota
|
||||
IsVersion
|
||||
IsSemver
|
||||
IsBranch
|
||||
)
|
||||
|
||||
// Version represents one of the different types of versions used by gps.
|
||||
//
|
||||
// Version composes Constraint, because all versions can be used as a constraint
|
||||
|
@ -22,7 +34,7 @@ type Version interface {
|
|||
Constraint
|
||||
|
||||
// Indicates the type of version - Revision, Branch, Version, or Semver
|
||||
Type() string
|
||||
Type() VersionType
|
||||
}
|
||||
|
||||
// PairedVersion represents a normal Version, but paired with its corresponding,
|
||||
|
@ -107,8 +119,9 @@ func (r Revision) String() string {
|
|||
return string(r)
|
||||
}
|
||||
|
||||
func (r Revision) Type() string {
|
||||
return "rev"
|
||||
// Type indicates the type of version - for revisions, "revision".
|
||||
func (r Revision) Type() VersionType {
|
||||
return IsRevision
|
||||
}
|
||||
|
||||
// Matches is the Revision acting as a constraint; it checks to see if the provided
|
||||
|
@ -145,6 +158,9 @@ func (r Revision) MatchesAny(c Constraint) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// Intersect computes the intersection of the Constraint with the provided
|
||||
// Constraint. For Revisions, this can only be another, exactly equal
|
||||
// Revision, or a PairedVersion whose underlying Revision is exactly equal.
|
||||
func (r Revision) Intersect(c Constraint) Constraint {
|
||||
switch tc := c.(type) {
|
||||
case anyConstraint:
|
||||
|
@ -175,8 +191,8 @@ func (v branchVersion) String() string {
|
|||
return string(v.name)
|
||||
}
|
||||
|
||||
func (v branchVersion) Type() string {
|
||||
return "branch"
|
||||
func (v branchVersion) Type() VersionType {
|
||||
return IsBranch
|
||||
}
|
||||
|
||||
func (v branchVersion) Matches(v2 Version) bool {
|
||||
|
@ -248,8 +264,8 @@ func (v plainVersion) String() string {
|
|||
return string(v)
|
||||
}
|
||||
|
||||
func (r plainVersion) Type() string {
|
||||
return "version"
|
||||
func (v plainVersion) Type() VersionType {
|
||||
return IsVersion
|
||||
}
|
||||
|
||||
func (v plainVersion) Matches(v2 Version) bool {
|
||||
|
@ -327,8 +343,8 @@ func (v semVersion) String() string {
|
|||
return str
|
||||
}
|
||||
|
||||
func (r semVersion) Type() string {
|
||||
return "semver"
|
||||
func (v semVersion) Type() VersionType {
|
||||
return IsSemver
|
||||
}
|
||||
|
||||
func (v semVersion) Matches(v2 Version) bool {
|
||||
|
@ -407,7 +423,7 @@ func (v versionPair) String() string {
|
|||
return v.v.String()
|
||||
}
|
||||
|
||||
func (v versionPair) Type() string {
|
||||
func (v versionPair) Type() VersionType {
|
||||
return v.v.Type()
|
||||
}
|
||||
|
||||
|
@ -545,12 +561,15 @@ func compareVersionType(l, r Version) int {
|
|||
// 2.0 spec (as implemented by github.com/Masterminds/semver lib), with one
|
||||
// exception:
|
||||
// - Semver versions with a prerelease are after *all* non-prerelease semver.
|
||||
// Against each other, they are sorted first by their numerical component, then
|
||||
// Within this subset they are sorted first by their numerical component, then
|
||||
// lexicographically by their prerelease version.
|
||||
// - All branches are next, and sort lexicographically against each other.
|
||||
// - All non-semver versions (tags) are next, and sort lexicographically
|
||||
// against each other.
|
||||
// - Revisions are last, and sort lexicographically against each other.
|
||||
// - The default branch(es) is next; the exact semantics of that are specific
|
||||
// to the underlying source.
|
||||
// - All other branches come next, sorted lexicographically.
|
||||
// - All non-semver versions (tags) are next, sorted lexicographically.
|
||||
// - Revisions, if any, are last, sorted lexicographically. Revisions do not
|
||||
// typically appear in version lists, so the only invariant we maintain is
|
||||
// determinism - deeper semantics, like chronology or topology, do not matter.
|
||||
//
|
||||
// So, given a slice of the following versions:
|
||||
//
|
||||
|
@ -571,14 +590,13 @@ func SortForUpgrade(vl []Version) {
|
|||
//
|
||||
// This is *not* the same as reversing SortForUpgrade (or you could simply
|
||||
// sort.Reverse()). The type precedence is the same, including the semver vs.
|
||||
// semver-with-prerelease relation. Lexicographic comparisons within non-semver
|
||||
// tags, branches, and revisions remains the same as well; because we treat
|
||||
// these domains as having no ordering relations (chronology), there can be no
|
||||
// real concept of "upgrade" vs "downgrade", so there is no reason to reverse
|
||||
// them.
|
||||
// semver-with-prerelease relation. Lexicographical comparisons within
|
||||
// non-semver tags, branches, and revisions remains the same as well; because we
|
||||
// treat these domains as having no ordering relation, there can be no real
|
||||
// concept of "upgrade" vs "downgrade", so there is no reason to reverse them.
|
||||
//
|
||||
// Thus, the only binary relation that is reversed for downgrade is within-type
|
||||
// comparisons for semver (with and without prerelease).
|
||||
// comparisons for semver.
|
||||
//
|
||||
// So, given a slice of the following versions:
|
||||
//
|
||||
|
|
|
@ -32,10 +32,10 @@ type fakeFailBridge struct {
|
|||
*bridge
|
||||
}
|
||||
|
||||
var vqerr = fmt.Errorf("vqerr")
|
||||
var errVQ = fmt.Errorf("vqerr")
|
||||
|
||||
func (fb *fakeFailBridge) ListVersions(id ProjectIdentifier) ([]Version, error) {
|
||||
return nil, vqerr
|
||||
return nil, errVQ
|
||||
}
|
||||
|
||||
func TestVersionQueueSetup(t *testing.T) {
|
||||
|
|
|
@ -12,6 +12,8 @@ func TestVersionSorts(t *testing.T) {
|
|||
v6 := NewVersion("2.0.5.2")
|
||||
v7 := newDefaultBranch("unwrapped")
|
||||
v8 := NewVersion("20.0.5.2")
|
||||
v9 := NewVersion("v1.5.5-beta.4")
|
||||
v10 := NewVersion("v3.0.1-alpha.1")
|
||||
|
||||
start := []Version{
|
||||
v1,
|
||||
|
@ -22,6 +24,8 @@ func TestVersionSorts(t *testing.T) {
|
|||
v6,
|
||||
v7,
|
||||
v8,
|
||||
v9,
|
||||
v10,
|
||||
rev,
|
||||
}
|
||||
|
||||
|
@ -32,6 +36,7 @@ func TestVersionSorts(t *testing.T) {
|
|||
|
||||
edown := []Version{
|
||||
v3, v4, v5, // semvers
|
||||
v9, v10, // prerelease semver
|
||||
v7, v1, v2, // floating/branches
|
||||
v6, v8, // plain versions
|
||||
rev, // revs
|
||||
|
@ -39,6 +44,7 @@ func TestVersionSorts(t *testing.T) {
|
|||
|
||||
eup := []Version{
|
||||
v5, v4, v3, // semvers
|
||||
v10, v9, // prerelease semver
|
||||
v7, v1, v2, // floating/branches
|
||||
v6, v8, // plain versions
|
||||
rev, // revs
|
||||
|
|
Загрузка…
Ссылка в новой задаче