Improvements to project existence handling

This commit is contained in:
Sam Boyer 2016-04-05 15:19:14 -04:00
Родитель bb4ecffd68
Коммит bfb5a46318
6 изменённых файлов: 154 добавлений и 66 удалений

Просмотреть файл

@ -635,16 +635,22 @@ func (sm *depspecSourceManager) ListVersions(name ProjectName) (pi []Version, er
return
}
func (sm *depspecSourceManager) ProjectExists(name ProjectName) bool {
func (sm *depspecSourceManager) RepoExists(name ProjectName) (bool, error) {
for _, ds := range sm.specs {
if name == ds.name.Name {
return true
return true, nil
}
}
return false
return false, nil
}
func (sm *depspecSourceManager) VendorCodeExists(name ProjectName) (bool, error) {
return false, nil
}
func (sm *depspecSourceManager) Release() {}
// enforce interfaces
var _ Manifest = depspec{}
var _ Lock = dummyLock{}

Просмотреть файл

@ -33,20 +33,19 @@ var VTCTCompat = [...]ConstraintType{
type ProjectExistence uint8
const (
// DoesNotExist indicates that a particular project URI cannot be located,
// at any level. It is represented as 1, rather than 0, to differentiate it
// from the zero-value (which is ExistenceUnknown).
DoesNotExist ProjectExistence = 1 << iota
// ExistsInLock indicates that a project exists (i.e., is mentioned in) a
// lock file.
// TODO not sure if it makes sense to have this IF it's just the source
// manager's responsibility for putting this together - the implication is
// that this is the root lock file, right?
ExistsInLock
ExistsInLock = 1 << iota
// ExistsInVendor indicates that a project exists in a vendor directory at
// the predictable location based on import path. It does NOT imply, much
// ExistsInManifest indicates that a project exists (i.e., is mentioned in)
// a manifest.
ExistsInManifest
// ExistsInVendorRoot indicates that a project exists in a vendor directory
// at the predictable location based on import path. It does NOT imply, much
// less guarantee, any of the following:
// - That the code at the expected location under vendor is at the version
// given in a lock file
@ -56,11 +55,11 @@ const (
// unexpected/nested location under vendor
// - That the full repository history is available. In fact, the
// assumption should be that if only this flag is on, the full repository
// history is likely not available locally
// history is likely not available (locally)
//
// In short, the information encoded in this flag should in no way be
// construed as exhaustive.
ExistsInVendor
// In short, the information encoded in this flag should not be construed as
// exhaustive.
ExistsInVendorRoot
// ExistsInCache indicates that a project exists on-disk in the local cache.
// It does not guarantee that an upstream exists, thus it cannot imply
@ -75,22 +74,11 @@ const (
// ExistsUpstream indicates that a project repository was locatable at the
// path provided by a project's URI (a base import path).
ExistsUpstream
// Indicates that the upstream project, in addition to existing, is also
// accessible.
//
// Different hosting providers treat unauthorized access differently:
// GitHub, for example, returns 404 (or the equivalent) when attempting unauthorized
// access, whereas BitBucket returns 403 (or 302 login redirect). Thus,
// while the ExistsUpstream and UpstreamAccessible bits should always only
// be on or off together when interacting with Github, it is possible that a
// BitBucket provider might report ExistsUpstream, but not UpstreamAccessible.
//
// For most purposes, non-existence and inaccessibility are treated the
// same, but clearly delineating the two allows slightly improved UX.
UpstreamAccessible
// The zero value; indicates that no work has yet been done to determine the
// existence level of a project.
ExistenceUnknown ProjectExistence = 0
)
const (
// Bitmask for existence levels that are managed by the ProjectManager
pmexLvls ProjectExistence = ExistsInVendorRoot | ExistsInCache | ExistsUpstream
// Bitmask for existence levels that are managed by the SourceManager
smexLvls ProjectExistence = ExistsInLock | ExistsInManifest
)

Просмотреть файл

@ -2,6 +2,8 @@ package vsolver
import (
"fmt"
"os"
"path"
"sort"
"sync"
@ -12,6 +14,7 @@ import (
type ProjectManager interface {
GetInfoAt(Version) (ProjectInfo, error)
ListVersions() ([]Version, error)
CheckExistence(ProjectExistence) bool
}
type ProjectAnalyzer interface {
@ -25,7 +28,9 @@ type projectManager struct {
cacheroot, vendordir string
// Object for the cache repository
crepo *repo
ex ProjectExistence
// Indicates the extent to which we have searched for, and verified, the
// existence of the project/repo.
ex existence
// Analyzer, created from the injected factory
an ProjectAnalyzer
// Whether the cache has the latest info on versions
@ -41,6 +46,13 @@ type projectManager struct {
dc *projectDataCache
}
type existence struct {
// The existence levels for which a search/check has been performed
s ProjectExistence
// The existence levels verified to be present through searching
f ProjectExistence
}
// TODO figure out shape of versions, then implement marshaling/unmarshaling
type projectDataCache struct {
Version string `json:"version"` // TODO use this
@ -61,12 +73,20 @@ type repo struct {
}
func (pm *projectManager) GetInfoAt(v Version) (ProjectInfo, error) {
// Technically, we could attempt to return straight from the metadata cache
// even if the repo cache doesn't exist on disk. But that would allow weird
// state inconsistencies (cache exists, but no repo...how does that even
// happen?) that it'd be better to just not allow so that we don't have to
// think about it elsewhere
if !pm.CheckExistence(ExistsInCache) {
return ProjectInfo{}, fmt.Errorf("Project repository cache for %s does not exist", pm.n)
}
if pi, exists := pm.dc.Infos[v.Underlying]; exists {
return pi, nil
}
pm.crepo.mut.Lock()
err := pm.crepo.r.UpdateVersion(v.Info)
pm.crepo.mut.Unlock()
if err != nil {
@ -84,6 +104,7 @@ func (pm *projectManager) GetInfoAt(v Version) (ProjectInfo, error) {
func (pm *projectManager) ListVersions() (vlist []Version, err error) {
if !pm.cvsync {
pm.ex.s |= ExistsInCache | ExistsUpstream
pm.vlist, err = pm.crepo.getCurrentVersionPairs()
if err != nil {
// TODO More-er proper-er error
@ -91,6 +112,7 @@ func (pm *projectManager) ListVersions() (vlist []Version, err error) {
return nil, err
}
pm.ex.f |= ExistsInCache | ExistsUpstream
pm.cvsync = true
// Process the version data into the cache
@ -112,6 +134,34 @@ func (pm *projectManager) ListVersions() (vlist []Version, err error) {
return pm.vlist, nil
}
// CheckExistence provides a direct method for querying existence levels of the
// project. It will only perform actual searches
func (pm *projectManager) CheckExistence(ex ProjectExistence) bool {
if pm.ex.s&ex != ex {
if ex&ExistsInVendorRoot != 0 && pm.ex.s&ExistsInVendorRoot == 0 {
pm.ex.s |= ExistsInVendorRoot
fi, err := os.Stat(path.Join(pm.vendordir, string(pm.n)))
if err != nil && fi.IsDir() {
pm.ex.f |= ExistsInVendorRoot
}
}
if ex&ExistsInCache != 0 && pm.ex.s&ExistsInCache == 0 {
pm.ex.s |= ExistsInCache
if pm.crepo.r.CheckLocal() {
pm.ex.f |= ExistsInCache
}
}
if ex&ExistsUpstream != 0 && pm.ex.s&ExistsUpstream == 0 {
//pm.ex.s |= ExistsUpstream
// TODO maybe need a method to do this as cheaply as possible,
// per-repo type
}
}
return ex&pm.ex.f == ex
}
func (r *repo) getCurrentVersionPairs() (vlist []Version, err error) {
r.mut.Lock()

Просмотреть файл

@ -131,7 +131,6 @@ func solveAndBasicChecks(fix fixture, t *testing.T) Result {
}
return result
}
func getFailureCausingProjects(err error) (projs []string) {

Просмотреть файл

@ -118,16 +118,33 @@ func (s *solver) createVersionQueue(ref ProjectName) (*versionQueue, error) {
return newVersionQueue(ref, nil, s.sm)
}
if !s.sm.ProjectExists(ref) {
// TODO this check needs to incorporate/admit the possibility that the
// upstream no longer exists, but there's something valid in vendor/
if s.l.Level >= logrus.WarnLevel {
s.l.WithFields(logrus.Fields{
"name": ref,
}).Warn("Upstream project does not exist")
}
return nil, newSolveError(fmt.Sprintf("Project '%s' could not be located.", ref), cannotResolve)
exists, err := s.sm.RepoExists(ref)
if err != nil {
return nil, err
}
if !exists {
exists, err = s.sm.VendorCodeExists(ref)
if err != nil {
return nil, err
}
if exists {
// Project exists only in vendor (and in some manifest somewhere)
// TODO mark this for special handling, somehow?
if s.l.Level >= logrus.WarnLevel {
s.l.WithFields(logrus.Fields{
"name": ref,
}).Warn("Code found in vendor for project, but no history was found upstream or in cache")
}
} else {
if s.l.Level >= logrus.WarnLevel {
s.l.WithFields(logrus.Fields{
"name": ref,
}).Warn("Upstream project does not exist")
}
return nil, newSolveError(fmt.Sprintf("Project '%s' could not be located.", ref), cannotResolve)
}
}
lockv := s.getLockVersionIfValid(ref)
q, err := newVersionQueue(ref, lockv, s.sm)
@ -306,13 +323,6 @@ func (s *solver) satisfiable(pi ProjectAtom) error {
}
}
if !s.sm.ProjectExists(pi.Name) {
// Can get here if the lock file specifies a now-nonexistent project
// TODO this check needs to incorporate/accept the possibility that the
// upstream no longer exists, but there's something valid in vendor/
return newSolveError(fmt.Sprintf("Project '%s' could not be located.", pi.Name), cannotResolve)
}
deps, err := s.getDependenciesOf(pi)
if err != nil {
// An err here would be from the package fetcher; pass it straight back

Просмотреть файл

@ -4,6 +4,7 @@ import (
"encoding/json"
"fmt"
"os"
"path"
"github.com/Masterminds/vcs"
)
@ -11,7 +12,10 @@ import (
type SourceManager interface {
GetProjectInfo(ProjectAtom) (ProjectInfo, error)
ListVersions(ProjectName) ([]Version, error)
ProjectExists(ProjectName) bool
RepoExists(ProjectName) (bool, error)
VendorCodeExists(ProjectName) (bool, error)
Release()
// Flush()
}
// ExistenceError is a specialized error type that, in addition to the standard
@ -34,7 +38,8 @@ type sourceManager struct {
cachedir, basedir string
pms map[ProjectName]*pmState
anafac func(ProjectName) ProjectAnalyzer
sortup bool
// Whether to sort versions for upgrade or downgrade
sortup bool
//pme map[ProjectName]error
}
@ -48,18 +53,34 @@ type pmState struct {
vlist []Version // TODO temporary until we have a coherent, overall cache structure
}
func NewSourceManager(cachedir, basedir string, upgrade bool) (SourceManager, error) {
// TODO try to create dir if doesn't exist
func NewSourceManager(cachedir, basedir string, upgrade, force bool) (SourceManager, error) {
err := os.MkdirAll(cachedir, 0777)
if err != nil {
return nil, err
}
glpath := path.Join(cachedir, "sm.lock")
_, err = os.Stat(glpath)
if err != nil && !force {
return nil, fmt.Errorf("Another process has locked the cachedir, or crashed without cleaning itself properly. Pass force=true to override.")
}
_, err = os.OpenFile(glpath, os.O_CREATE|os.O_RDONLY, 0700) // is 0700 sane for this purpose?
if err != nil {
return nil, fmt.Errorf("Failed to create global cache lock file at %s with err %s", glpath, err)
}
return &sourceManager{
cachedir: cachedir,
pms: make(map[ProjectName]*pmState),
sortup: upgrade,
}, nil
// TODO drop file lock on cachedir somewhere, here. Caller needs a panic
// recovery in a defer to be really proper, though
}
func (sm *sourceManager) Release() {
os.Remove(path.Join(sm.cachedir, "sm.lock"))
}
func (sm *sourceManager) GetProjectInfo(pa ProjectAtom) (ProjectInfo, error) {
pmc, err := sm.getProjectManager(pa.Name)
if err != nil {
@ -87,8 +108,22 @@ func (sm *sourceManager) ListVersions(n ProjectName) ([]Version, error) {
return pmc.vlist, err
}
func (sm *sourceManager) ProjectExists(n ProjectName) bool {
panic("not implemented")
func (sm *sourceManager) VendorCodeExists(n ProjectName) (bool, error) {
pms, err := sm.getProjectManager(n)
if err != nil {
return false, err
}
return pms.pm.CheckExistence(ExistsInCache) || pms.pm.CheckExistence(ExistsUpstream), nil
}
func (sm *sourceManager) RepoExists(n ProjectName) (bool, error) {
pms, err := sm.getProjectManager(n)
if err != nil {
return false, err
}
return pms.pm.CheckExistence(ExistsInVendorRoot), nil
}
// getProjectManager gets the project manager for the given ProjectName.
@ -102,7 +137,7 @@ func (sm *sourceManager) getProjectManager(n ProjectName) (*pmState, error) {
//return nil, pme
}
repodir := fmt.Sprintf("%s/src/%s", sm.cachedir, n)
repodir := path.Join(sm.cachedir, "src", string(n))
r, err := vcs.NewRepo(string(n), repodir)
if err != nil {
// TODO be better
@ -110,8 +145,7 @@ func (sm *sourceManager) getProjectManager(n ProjectName) (*pmState, error) {
}
// Ensure cache dir exists
// TODO be better
metadir := fmt.Sprintf("%s/metadata/%s", sm.cachedir, n)
metadir := path.Join(sm.cachedir, "metadata", string(n))
err = os.MkdirAll(metadir, 0777)
if err != nil {
// TODO be better
@ -119,10 +153,11 @@ func (sm *sourceManager) getProjectManager(n ProjectName) (*pmState, error) {
}
pms := &pmState{}
fi, err := os.Stat(metadir + "/cache.json")
cpath := path.Join(metadir, "cache.json")
fi, err := os.Stat(cpath)
var dc *projectDataCache
if fi != nil {
pms.cf, err = os.OpenFile(metadir+"/cache.json", os.O_RDWR, 0777)
pms.cf, err = os.OpenFile(cpath, os.O_RDWR, 0777)
if err != nil {
// TODO be better
return nil, err
@ -134,7 +169,7 @@ func (sm *sourceManager) getProjectManager(n ProjectName) (*pmState, error) {
return nil, err
}
} else {
pms.cf, err = os.Create(metadir + "/cache.json")
pms.cf, err = os.Create(cpath)
if err != nil {
// TODO be better
return nil, err