- flush files before the dir results
 - make sure we still filter cached readDir results
This commit is contained in:
Colin Marc 2014-10-23 15:58:00 +02:00
Родитель 3ef088f406
Коммит 9599336b95
3 изменённых файлов: 17 добавлений и 23 удалений

Просмотреть файл

@ -50,7 +50,7 @@ func stat(client *hdfs.Client, fullPath string) (os.FileInfo, error) {
return res, nil return res, nil
} }
func readDir(client *hdfs.Client, dir string, glob string) ([]os.FileInfo, error) { func readDir(client *hdfs.Client, dir string) ([]os.FileInfo, error) {
if cachedRes, exists := readDirCache[dir]; exists { if cachedRes, exists := readDirCache[dir]; exists {
return cachedRes, nil return cachedRes, nil
} }
@ -66,17 +66,5 @@ func readDir(client *hdfs.Client, dir string, glob string) ([]os.FileInfo, error
statCache[childPath] = fi statCache[childPath] = fi
} }
if glob != "" {
matched := make([]os.FileInfo, 0, len(res))
for _, fi := range res {
match, _ := path.Match(glob, fi.Name())
if match {
matched = append(matched, fi)
}
}
return matched, nil
} else {
return res, nil return res, nil
} }
}

Просмотреть файл

@ -47,11 +47,12 @@ func ls(paths []string, long, all bool) {
var tw *tabwriter.Writer var tw *tabwriter.Writer
if long { if long {
tw = defaultTabWriter() tw = defaultTabWriter()
defer tw.Flush() printFiles(tw, files, true, all)
tw.Flush()
} else {
printFiles(nil, files, false, all)
} }
printFiles(tw, files, long, all)
for _, dir := range dirs { for _, dir := range dirs {
fmt.Printf("\n%s/:\n", dir) fmt.Printf("\n%s/:\n", dir)
printDir(client, dir, long, all) printDir(client, dir, long, all)
@ -60,7 +61,7 @@ func ls(paths []string, long, all bool) {
} }
func printDir(client *hdfs.Client, dir string, long, all bool) { func printDir(client *hdfs.Client, dir string, long, all bool) {
files, err := readDir(client, dir, "") files, err := readDir(client, dir)
if err != nil { if err != nil {
fatal(err) fatal(err)
} }

Просмотреть файл

@ -65,12 +65,12 @@ func hasGlob(fragment string) bool {
// expandGlobs recursively expands globs in a filepath. It assumes the paths // expandGlobs recursively expands globs in a filepath. It assumes the paths
// are already cleaned and normalize (ie, absolute). // are already cleaned and normalize (ie, absolute).
func expandGlobs(client *hdfs.Client, p string) ([]string, error) { func expandGlobs(client *hdfs.Client, globbedPath string) ([]string, error) {
if !hasGlob(p) { if !hasGlob(globbedPath) {
return []string{p}, nil return []string{globbedPath}, nil
} }
parts := strings.Split(p, "/")[1:] parts := strings.Split(globbedPath, "/")[1:]
res := make([]string, 0) res := make([]string, 0)
splitAt := 0 splitAt := 0
for splitAt, _ = range parts { for splitAt, _ = range parts {
@ -82,12 +82,17 @@ func expandGlobs(client *hdfs.Client, p string) ([]string, error) {
base := "/" + path.Join(parts[:splitAt]...) base := "/" + path.Join(parts[:splitAt]...)
glob := parts[splitAt] glob := parts[splitAt]
remainder := path.Join(parts[splitAt+1:]...) remainder := path.Join(parts[splitAt+1:]...)
list, err := readDir(client, base, glob) list, err := readDir(client, base)
if err != nil { if err != nil {
return nil, err return nil, err
} }
for _, fi := range list { for _, fi := range list {
match, _ := path.Match(glob, fi.Name())
if !match {
continue
}
newPath := path.Join(base, fi.Name(), remainder) newPath := path.Join(base, fi.Name(), remainder)
children, err := expandGlobs(client, newPath) children, err := expandGlobs(client, newPath)
if err != nil { if err != nil {