From 7316d32e0f41bd4d9d9573c216abb32a44f1fa15 Mon Sep 17 00:00:00 2001 From: Jordan Krage Date: Tue, 29 Aug 2017 14:01:01 -0500 Subject: [PATCH] gps: source cache: add persistent BoltDB cache --- internal/gps/source_cache_bolt.go | 437 +++++++++++++++++ internal/gps/source_cache_bolt_encode.go | 439 ++++++++++++++++++ internal/gps/source_cache_bolt_encode_test.go | 103 ++++ internal/gps/source_cache_bolt_test.go | 294 ++++++++++++ internal/gps/source_cache_test.go | 103 +++- 5 files changed, 1363 insertions(+), 13 deletions(-) create mode 100644 internal/gps/source_cache_bolt.go create mode 100644 internal/gps/source_cache_bolt_encode.go create mode 100644 internal/gps/source_cache_bolt_encode_test.go create mode 100644 internal/gps/source_cache_bolt_test.go diff --git a/internal/gps/source_cache_bolt.go b/internal/gps/source_cache_bolt.go new file mode 100644 index 0000000000..ec60d1411a --- /dev/null +++ b/internal/gps/source_cache_bolt.go @@ -0,0 +1,437 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gps + +import ( + "bytes" + "encoding/binary" + "fmt" + "log" + "os" + "path/filepath" + "time" + + "github.com/boltdb/bolt" + "github.com/golang/dep/internal/gps/pkgtree" + "github.com/pkg/errors" +) + +// singleSourceCacheBolt implements a singleSourceCache backed by a persistent BoltDB file. +// Stored values are timestamped, and the `epoch` field limits the age of returned values. +// Database access methods are safe for concurrent use with each other (excluding close). +// +// Implementation: +// +// At the top level there are buckets for (1) versions and (2) revisions. +// +// 1) Versions buckets hold version keys with revision values: +// +// Bucket: "versions:" +// Keys: "branch:", "defaultBranch:", "ver:" +// Values: "" +// +// 2) Revision buckets hold (a) manifest and lock data for various ProjectAnalyzers, +// (b) package trees, and (c) version lists. +// +// Bucket: "rev:" +// +// a) Manifest and Lock info are stored in a bucket derived from ProjectAnalyzer.Info: +// +// Sub-Bucket: "info:.:" +// Sub-Bucket: "manifest", "lock" +// Keys/Values: Manifest or Lock fields +// +// b) Package tree buckets contain package import path keys and package-or-error buckets: +// +// Sub-Bucket: "ptree:" +// Sub-Bucket: "" +// Key/Values: PackageOrErr fields +// +// c) Revision-versions buckets contain lists of version values: +// +// Sub-Bucket: "versions:" +// Keys: "" +// Values: "branch:", "defaultBranch:", "ver:" +type singleSourceCacheBolt struct { + ProjectRoot + db *bolt.DB + epoch int64 // getters will not return values older than this unix timestamp + logger *log.Logger // info logging +} + +// newBoltCache returns a new singleSourceCacheBolt backed by a project's BoltDB file under the cache directory. +func newBoltCache(cd string, pi ProjectIdentifier, epoch int64, logger *log.Logger) (*singleSourceCacheBolt, error) { + path := sourceCachePath(cd, pi.normalizedSource()) + ".db" + dir := filepath.Dir(path) + if fi, err := os.Stat(dir); os.IsNotExist(err) { + if err := os.MkdirAll(dir, os.ModeDir|os.ModePerm); err != nil { + return nil, errors.Wrapf(err, "failed to create source cache directory: %s", dir) + } + } else if err != nil { + return nil, errors.Wrapf(err, "failed to check source cache directory: ", dir) + } else if !fi.IsDir() { + return nil, errors.Wrapf(err, "source cache path is not directory: %s", dir) + } + db, err := bolt.Open(path, 0600, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return nil, err + } + return &singleSourceCacheBolt{ + ProjectRoot: pi.ProjectRoot, + db: db, + epoch: epoch, + logger: logger, + }, nil +} + +// close releases all database resources. +// Must not be called concurrently with any other methods. +func (s *singleSourceCacheBolt) close() error { + return errors.Wrapf(s.db.Close(), "error closing Bolt database %q", s.db.String()) +} + +func (s *singleSourceCacheBolt) setManifestAndLock(rev Revision, ai ProjectAnalyzerInfo, m Manifest, l Lock) { + err := s.updateBucket("rev:"+string(rev), func(b *bolt.Bucket) error { + pre := "info:" + ai.String() + ":" + if err := cachePrefixDelete(b, pre); err != nil { + return err + } + info, err := b.CreateBucket(cacheTimestampedKey(pre, time.Now())) + if err != nil { + return err + } + + // Manifest + mb, err := info.CreateBucket([]byte("manifest")) + if err != nil { + return err + } + if err := cachePutManifest(mb, m); err != nil { + return errors.Wrap(err, "failed to put manifest") + } + if l == nil { + return nil + } + + // Lock + lb, err := info.CreateBucket([]byte("lock")) + if err != nil { + return err + } + return errors.Wrap(cachePutLock(lb, l), "failed to put lock") + }) + if err != nil { + s.logger.Println(errors.Wrapf(err, "failed to cache manifest/lock for revision %q, analyzer: %v", rev, ai)) + } +} + +func (s *singleSourceCacheBolt) getManifestAndLock(rev Revision, ai ProjectAnalyzerInfo) (m Manifest, l Lock, ok bool) { + err := s.viewBucket("rev:"+string(rev), func(b *bolt.Bucket) error { + info := cacheFindLatestValid(b, "info:"+ai.String()+":", s.epoch) + if info == nil { + return nil + } + + // Manifest + mb := info.Bucket([]byte("manifest")) + if mb == nil { + return nil + } + var err error + m, err = cacheGetManifest(mb) + if err != nil { + return errors.Wrap(err, "failed to get manifest") + } + + // Lock + lb := info.Bucket([]byte("lock")) + if lb == nil { + ok = true + return nil + } + l, err = cacheGetLock(lb) + if err != nil { + return errors.Wrap(err, "failed to get lock") + } + + ok = true + return nil + }) + if err != nil { + s.logger.Println(errors.Wrapf(err, "failed to get cached manifest/lock for revision %q, analyzer: %v", rev, ai)) + } + return +} + +func (s *singleSourceCacheBolt) setPackageTree(rev Revision, ptree pkgtree.PackageTree) { + err := s.updateBucket("rev:"+string(rev), func(b *bolt.Bucket) error { + if err := cachePrefixDelete(b, "ptree:"); err != nil { + return err + } + ptrees, err := b.CreateBucket(cacheTimestampedKey("ptree:", time.Now())) + if err != nil { + return err + } + + for ip, poe := range ptree.Packages { + pb, err := ptrees.CreateBucket([]byte(ip)) + if err != nil { + return err + } + + if err := cachePutPackageOrErr(pb, poe); err != nil { + return err + } + } + return nil + }) + if err != nil { + s.logger.Println(errors.Wrapf(err, "failed to cache package tree for revision %q", rev)) + } +} + +func (s *singleSourceCacheBolt) getPackageTree(rev Revision) (ptree pkgtree.PackageTree, ok bool) { + err := s.viewBucket("rev:"+string(rev), func(b *bolt.Bucket) error { + ptrees := cacheFindLatestValid(b, "ptree:", s.epoch) + if ptrees == nil { + return nil + } + + pkgs := make(map[string]pkgtree.PackageOrErr) + err := ptrees.ForEach(func(ip, _ []byte) error { + poe := cacheGetPackageOrErr(ptrees.Bucket(ip)) + if poe.Err == nil { + poe.P.ImportPath = string(ip) + } + pkgs[string(ip)] = poe + return nil + }) + if err != nil { + return err + } + ptree.ImportRoot = string(s.ProjectRoot) + ptree.Packages = pkgs + ok = true + return nil + }) + if err != nil { + s.logger.Println(errors.Wrapf(err, "failed to get cached package tree for revision %q", rev)) + } + return +} + +func (s *singleSourceCacheBolt) markRevisionExists(rev Revision) { + err := s.updateBucket("rev:"+string(rev), func(versions *bolt.Bucket) error { + return nil + }) + if err != nil { + s.logger.Println(errors.Wrapf(err, "failed to mark revision %q in cache", rev)) + } +} + +func (s *singleSourceCacheBolt) setVersionMap(pvs []PairedVersion) { + err := s.db.Update(func(tx *bolt.Tx) error { + if err := cachePrefixDelete(tx, "versions:"); err != nil { + return err + } + vk := cacheTimestampedKey("versions:", time.Now()) + versions, err := tx.CreateBucket(vk) + if err != nil { + return err + } + + c := tx.Cursor() + pre := []byte("rev:") + for k, _ := c.Seek(pre); bytes.HasPrefix(k, pre); k, _ = c.Next() { + rb := tx.Bucket(k) + if err := cachePrefixDelete(rb, "versions:"); err != nil { + return err + } + } + + for _, pv := range pvs { + uv, rev := pv.Unpair(), pv.Revision() + uvB, err := cacheEncodeUnpairedVersion(uv) + if err != nil { + return errors.Wrapf(err, "failed to encode unpaired version: %v", uv) + } + + if err := versions.Put(uvB, []byte(rev)); err != nil { + return errors.Wrap(err, "failed to put version->revision") + } + + b, err := tx.CreateBucketIfNotExists([]byte("rev:" + rev)) + if err != nil { + return errors.Wrapf(err, "failed to create bucket for revision: %s", rev) + } + if err := cachePrefixDelete(b, "versions:"); err != nil { + return err + } + versions, err := b.CreateBucket(vk) + if err != nil { + return errors.Wrapf(err, "failed to create bucket for revision versions: %s", rev) + } + i, err := versions.NextSequence() + if err != nil { + return errors.Wrapf(err, "failed to generate sequence number for revision: %s", rev) + } + k := [8]byte{} + binary.BigEndian.PutUint64(k[:], i) + if err := versions.Put(k[:], uvB); err != nil { + return errors.Wrap(err, "failed to put revision->version") + } + } + return nil + }) + if err != nil { + s.logger.Println(errors.Wrap(err, "failed to cache version map")) + } +} + +func (s *singleSourceCacheBolt) getVersionsFor(rev Revision) (uvs []UnpairedVersion, ok bool) { + err := s.viewBucket("rev:"+string(rev), func(b *bolt.Bucket) error { + versions := cacheFindLatestValid(b, "versions:", s.epoch) + if versions == nil { + return nil + } + + ok = true + + return versions.ForEach(func(_, v []byte) error { + uv, err := cacheDecodeUnpairedVersion(v) + if err != nil { + return err + } + uvs = append(uvs, uv) + return nil + }) + }) + if err != nil { + s.logger.Println(errors.Wrapf(err, "failed to get cached versions for revision %q", rev)) + return nil, false + } + return +} + +func (s *singleSourceCacheBolt) getAllVersions() []PairedVersion { + var pvs []PairedVersion + err := s.db.View(func(tx *bolt.Tx) error { + versions := cacheFindLatestValid(tx, "versions:", s.epoch) + if versions == nil { + return nil + } + + return versions.ForEach(func(k, v []byte) error { + uv, err := cacheDecodeUnpairedVersion(k) + if err != nil { + return errors.Wrapf(err, "failed to decode unpaired version: %s", k) + } + pvs = append(pvs, uv.Pair(Revision(v))) + return nil + }) + }) + if err != nil { + s.logger.Println(errors.Wrap(err, "failed to get all cached versions")) + return nil + } + return pvs +} + +func (s *singleSourceCacheBolt) getRevisionFor(uv UnpairedVersion) (rev Revision, ok bool) { + err := s.db.View(func(tx *bolt.Tx) error { + versions := cacheFindLatestValid(tx, "versions:", s.epoch) + if versions == nil { + return nil + } + + k, err := cacheEncodeUnpairedVersion(uv) + if err != nil { + return err + } + v := versions.Get(k) + if len(v) > 0 { + rev = Revision(v) + ok = true + } + return nil + }) + if err != nil { + s.logger.Println(errors.Wrapf(err, "failed to get cached revision for unpaired version: %v", uv)) + } + return +} + +func (s *singleSourceCacheBolt) toRevision(v Version) (rev Revision, ok bool) { + switch t := v.(type) { + case Revision: + return t, true + case PairedVersion: + return t.Revision(), true + case UnpairedVersion: + return s.getRevisionFor(t) + default: + s.logger.Println(fmt.Sprintf("failed to get cached revision for version %v: unknown type %T", v, v)) + return "", false + } +} + +func (s *singleSourceCacheBolt) toUnpaired(v Version) (uv UnpairedVersion, ok bool) { + const errMsg = "failed to get cached unpaired version for version: %v" + switch t := v.(type) { + case UnpairedVersion: + return t, true + case PairedVersion: + return t.Unpair(), true + case Revision: + err := s.viewBucket("rev:"+string(t), func(b *bolt.Bucket) error { + versions := cacheFindLatestValid(b, "versions:", s.epoch) + if versions == nil { + return nil + } + + _, v := versions.Cursor().First() + if len(v) == 0 { + return nil + } + var err error + uv, err = cacheDecodeUnpairedVersion(v) + if err != nil { + return err + } + + ok = true + return nil + }) + if err != nil { + s.logger.Println(errors.Wrapf(err, errMsg, v)) + } + return + default: + s.logger.Println(fmt.Sprintf(errMsg, v)) + return + } +} + +// viewBucket executes view with the named bucket, if it exists. +func (s *singleSourceCacheBolt) viewBucket(name string, view func(b *bolt.Bucket) error) error { + return s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(name)) + if b == nil { + return nil + } + return view(b) + }) +} + +// updateBucket executes update with the named bucket, creating it first if necessary. +func (s *singleSourceCacheBolt) updateBucket(name string, update func(b *bolt.Bucket) error) error { + return s.db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte(name)) + if err != nil { + return errors.Wrapf(err, "failed to create bucket: %s", name) + } + return update(b) + }) +} diff --git a/internal/gps/source_cache_bolt_encode.go b/internal/gps/source_cache_bolt_encode.go new file mode 100644 index 0000000000..02567c24aa --- /dev/null +++ b/internal/gps/source_cache_bolt_encode.go @@ -0,0 +1,439 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gps + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + "time" + + "github.com/boltdb/bolt" + "github.com/golang/dep/internal/gps/pkgtree" + "github.com/pkg/errors" +) + +// cacheEncodeUnpairedVersion returns an encoded UnpairedVersion. +func cacheEncodeUnpairedVersion(uv UnpairedVersion) ([]byte, error) { + var pre string + switch uv.Type() { + case IsBranch: + if uv.(branchVersion).isDefault { + pre = "defaultBranch:" + } else { + pre = "branch:" + } + case IsSemver, IsVersion: + pre = "ver:" + default: + return nil, fmt.Errorf("unrecognized version type: %s", uv.Type()) + } + return []byte(pre + uv.String()), nil +} + +// cacheDecodeUnpairedVersion decodes and returns a new UnpairedVersion. +func cacheDecodeUnpairedVersion(b []byte) (UnpairedVersion, error) { + const br, dbr, ver = "branch:", "defaultBranch:", "ver:" + s := string(b) + switch { + case strings.HasPrefix(s, br): + return NewBranch(strings.TrimPrefix(s, br)), nil + case strings.HasPrefix(s, dbr): + return newDefaultBranch(strings.TrimPrefix(s, dbr)), nil + case strings.HasPrefix(s, ver): + return NewVersion(strings.TrimPrefix(s, ver)), nil + default: + return nil, fmt.Errorf("unrecognized prefix: %s", s) + } +} + +// cacheDecodeProjectProperties returns a new ProjectRoot and ProjectProperties with the +// data encoded in the key/value pair. +func cacheDecodeProjectProperties(k, v []byte) (ProjectRoot, ProjectProperties, error) { + var pp ProjectProperties + ks := strings.SplitN(string(k), ",", 2) + ip := ProjectRoot(ks[0]) + if len(ks) > 1 { + pp.Source = ks[1] + } + if len(v) == 0 { + pp.Constraint = Any() + } else { + const br, dbr, ver, rev = "branch:", "defaultBranch:", "ver:", "rev:" + vs := string(v) + switch { + case strings.HasPrefix(vs, br): + pp.Constraint = NewBranch(strings.TrimPrefix(vs, br)) + + case strings.HasPrefix(vs, dbr): + pp.Constraint = newDefaultBranch(strings.TrimPrefix(vs, dbr)) + + case strings.HasPrefix(vs, ver): + vs = strings.TrimPrefix(vs, ver) + if c, err := NewSemverConstraint(vs); err != nil { + pp.Constraint = NewVersion(vs) + } else { + pp.Constraint = c + } + + case strings.HasPrefix(vs, rev): + pp.Constraint = Revision(strings.TrimPrefix(vs, rev)) + + default: + return "", ProjectProperties{}, fmt.Errorf("unrecognized prefix: %s", vs) + } + } + + return ip, pp, nil +} + +// cacheEncodeProjectProperties returns a key/value pair containing the encoded +// ProjectRoot and ProjectProperties. +func cacheEncodeProjectProperties(ip ProjectRoot, pp ProjectProperties) ([]byte, []byte, error) { + k := string(ip) + if len(pp.Source) > 0 { + k += "," + pp.Source + } + if pp.Constraint == nil || IsAny(pp.Constraint) { + return []byte(k), []byte{}, nil + } + + if v, ok := pp.Constraint.(Version); ok { + var val string + switch v.Type() { + case IsRevision: + val = "rev:" + v.String() + case IsBranch: + if v.(branchVersion).isDefault { + val = "defaultBranch:" + v.String() + } else { + val = "branch:" + v.String() + } + case IsSemver, IsVersion: + val = "ver:" + v.String() + default: + return nil, nil, fmt.Errorf("unrecognized VersionType: %v", v.Type()) + } + return []byte(k), []byte(val), nil + } + + // Has to be a semver range. + v := pp.Constraint.String() + return []byte(k), []byte("ver:" + v), nil +} + +// cachePutManifest stores a Manifest in the bolt.Bucket. +func cachePutManifest(b *bolt.Bucket, m Manifest) error { + // Constraints + cs, err := b.CreateBucket([]byte("cs")) + if err != nil { + return err + } + for ip, pp := range m.DependencyConstraints() { + k, v, err := cacheEncodeProjectProperties(ip, pp) + if err != nil { + return err + } + if err := cs.Put(k, v); err != nil { + return err + } + } + + rm, ok := m.(RootManifest) + if !ok { + return nil + } + + // Ignored + var igPkgs []string + for ip, ok := range rm.IgnoredPackages() { + if ok { + igPkgs = append(igPkgs, ip) + } + } + if len(igPkgs) > 0 { + v := []byte(strings.Join(igPkgs, ",")) + if err := b.Put([]byte("ig"), v); err != nil { + return err + } + } + + // Overrides + ovr, err := b.CreateBucket([]byte("ovr")) + if err != nil { + return err + } + for ip, pp := range rm.Overrides() { + k, v, err := cacheEncodeProjectProperties(ip, pp) + if err != nil { + return err + } + if err := ovr.Put(k, v); err != nil { + return err + } + } + + // Required + var reqPkgs []string + for ip, ok := range rm.RequiredPackages() { + if ok { + reqPkgs = append(reqPkgs, ip) + } + } + if len(reqPkgs) > 0 { + v := []byte(strings.Join(reqPkgs, ",")) + if err := b.Put([]byte("req"), v); err != nil { + return err + } + } + + return nil +} + +// cacheGetManifest returns a new RootManifest with the data retrieved from the bolt.Bucket. +func cacheGetManifest(b *bolt.Bucket) (RootManifest, error) { + m := &cachedManifest{ + constraints: make(ProjectConstraints), + overrides: make(ProjectConstraints), + ignored: make(map[string]bool), + required: make(map[string]bool), + } + + // Constraints + if cs := b.Bucket([]byte("cs")); cs != nil { + err := cs.ForEach(func(k, v []byte) error { + ip, pp, err := cacheDecodeProjectProperties(k, v) + if err != nil { + return err + } + m.constraints[ip] = pp + return nil + }) + if err != nil { + return nil, errors.Wrap(err, "failed to get constraints") + } + } + + // Ignored + if ig := b.Get([]byte("ig")); len(ig) > 0 { + for _, ip := range splitString(string(ig), ",") { + m.ignored[ip] = true + } + } + + // Overrides + if os := b.Bucket([]byte("ovr")); os != nil { + err := os.ForEach(func(k, v []byte) error { + ip, pp, err := cacheDecodeProjectProperties(k, v) + if err != nil { + return err + } + m.overrides[ip] = pp + return nil + }) + if err != nil { + return nil, errors.Wrap(err, "failed to get overrides") + } + } + + // Required + if req := b.Get([]byte("req")); len(req) > 0 { + for _, ip := range splitString(string(req), ",") { + m.required[ip] = true + } + } + + return m, nil +} + +// cachePutLockedProject stores the LockedProject as fields in the bolt.Bucket. +func cachePutLockedProject(b *bolt.Bucket, lp LockedProject) error { + rev, branch, ver := VersionComponentStrings(lp.Version()) + for _, field := range []struct{ k, v string }{ + {"branch", branch}, + {"pkgs", strings.Join(lp.pkgs, ",")}, + {"rev", rev}, + {"src", string(lp.Ident().Source)}, + {"ver", ver}, + } { + if len(field.v) > 0 { + if err := b.Put([]byte(field.k), []byte(field.v)); err != nil { + return errors.Wrap(err, "failed to put locked project") + } + } + } + return nil +} + +// cacheGetLockedProject returns a new LockedProject with fields from the bolt.Bucket. +func cacheGetLockedProject(b *bolt.Bucket) (lp LockedProject, err error) { + br := string(b.Get([]byte("branch"))) + pkgs := splitString(string(b.Get([]byte("pkgs"))), ",") + r := string(b.Get([]byte("rev"))) + pi := ProjectIdentifier{Source: string(b.Get([]byte("src")))} + v := string(b.Get([]byte("ver"))) + + var ver Version = Revision(r) + if v != "" { + if br != "" { + err = errors.New("both branch and version specified") + return + } + ver = NewVersion(v).Pair(Revision(r)) + } else if br != "" { + ver = NewBranch(br).Pair(Revision(r)) + } else if r == "" { + err = errors.New("no branch, version, or revision") + return + } + + lp = NewLockedProject(pi, ver, pkgs) + return +} + +// cachePutLock stores the Lock as fields in the bolt.Bucket. +func cachePutLock(b *bolt.Bucket, l Lock) error { + // InputHash + if v := l.InputHash(); len(v) > 0 { + if err := b.Put([]byte("hash"), v); err != nil { + return errors.Wrap(err, "failed to put hash") + } + } + + // Projects + if len(l.Projects()) > 0 { + for _, lp := range l.Projects() { + lb, err := b.CreateBucket([]byte("lock:" + lp.pi.ProjectRoot)) + if err != nil { + return errors.Wrapf(err, "failed to create bucket for project identifier: %v", lp.pi) + } + if err := cachePutLockedProject(lb, lp); err != nil { + return err + } + } + } + + return nil +} + +// cacheGetLock returns a new *cacheLock with the fields retrieved from the bolt.Bucket. +func cacheGetLock(b *bolt.Bucket) (*cachedLock, error) { + l := &cachedLock{ + inputHash: b.Get([]byte("hash")), + } + c := b.Cursor() + p := []byte("lock:") + for k, _ := c.Seek(p); bytes.HasPrefix(k, p); k, _ = c.Next() { + lp, err := cacheGetLockedProject(b.Bucket(k)) + if err != nil { + return nil, errors.Wrap(err, "failed to get lock") + } + lp.pi.ProjectRoot = ProjectRoot(bytes.TrimPrefix(k, p)) + l.projects = append(l.projects, lp) + } + return l, nil +} + +// cachePutPackageOrError stores the pkgtree.PackageOrErr as fields in the bolt.Bucket. +func cachePutPackageOrErr(b *bolt.Bucket, poe pkgtree.PackageOrErr) error { + if poe.Err != nil { + err := b.Put([]byte("err"), []byte(poe.Err.Error())) + if err != nil { + return errors.Wrapf(err, "failed to put error: %v", poe.Err) + } + } else { + for _, f := range []struct{ k, v string }{ + {"cp", poe.P.CommentPath}, + {"ip", strings.Join(poe.P.Imports, ",")}, + {"nm", poe.P.Name}, + {"tip", strings.Join(poe.P.TestImports, ",")}, + } { + if len(f.v) > 0 { + err := b.Put([]byte(f.k), []byte(f.v)) + if err != nil { + return errors.Wrapf(err, "failed to put package: %v", poe.P) + } + } + } + } + return nil +} + +// cacheGetPackageOrErr returns a new pkgtree.PackageOrErr with fields retrieved +// from the bolt.Bucket. +func cacheGetPackageOrErr(b *bolt.Bucket) pkgtree.PackageOrErr { + if v := b.Get([]byte("err")); len(v) > 0 { + return pkgtree.PackageOrErr{ + Err: errors.New(string(v)), + } + } + return pkgtree.PackageOrErr{ + P: pkgtree.Package{ + CommentPath: string(b.Get([]byte("cp"))), + Imports: splitString(string(b.Get([]byte("ip"))), ","), + Name: string(b.Get([]byte("nm"))), + TestImports: splitString(string(b.Get([]byte("tip"))), ","), + }, + } +} + +//cacheTimestampedKey returns a prefixed key with a trailing timestamp +func cacheTimestampedKey(pre string, t time.Time) []byte { + b := make([]byte, len(pre)+8) + copy(b, pre) + binary.BigEndian.PutUint64(b[len(pre):], uint64(t.Unix())) + return b +} + +// boltTxOrBucket is a minimal interface satisfied by bolt.Tx and bolt.Bucket. +type boltTxOrBucket interface { + Cursor() *bolt.Cursor + DeleteBucket([]byte) error + Bucket([]byte) *bolt.Bucket +} + +// cachePrefixDelete prefix scans and deletes each bucket. +func cachePrefixDelete(tob boltTxOrBucket, pre string) error { + c := tob.Cursor() + p := []byte(pre) + for k, _ := c.Seek(p); bytes.HasPrefix(k, p); k, _ = c.Next() { + if err := tob.DeleteBucket(k); err != nil { + return errors.Wrapf(err, "failed to delete bucket: %s", k) + } + } + return nil +} + +// cacheFindLatestValid prefix scans for the latest bucket which is timestamped >= epoch, +// or returns nil if none exists. +func cacheFindLatestValid(tob boltTxOrBucket, pre string, epoch int64) *bolt.Bucket { + c := tob.Cursor() + p := []byte(pre) + var latest []byte + for k, _ := c.Seek(p); bytes.HasPrefix(k, p); k, _ = c.Next() { + latest = k + } + if latest == nil { + return nil + } + ts := bytes.TrimPrefix(latest, p) + if len(ts) != 8 { + return nil + } + if int64(binary.BigEndian.Uint64(ts)) < epoch { + return nil + } + return tob.Bucket(latest) +} + +// splitString delegates to strings.Split, but returns nil in place of a single empty element. +func splitString(s, sep string) []string { + r := strings.Split(s, sep) + if len(r) == 1 && r[0] == "" { + return nil + } + return r +} diff --git a/internal/gps/source_cache_bolt_encode_test.go b/internal/gps/source_cache_bolt_encode_test.go new file mode 100644 index 0000000000..e5b817cf46 --- /dev/null +++ b/internal/gps/source_cache_bolt_encode_test.go @@ -0,0 +1,103 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gps + +import ( + "bytes" + "testing" + "time" +) + +func TestCacheEncodingUnpairedVersion(t *testing.T) { + for _, test := range []struct { + enc string + uv UnpairedVersion + }{ + {"defaultBranch:test", newDefaultBranch("test")}, + {"branch:test", NewBranch("test")}, + {"ver:test", NewVersion("test")}, + } { + t.Run(test.enc, func(t *testing.T) { + b, err := cacheEncodeUnpairedVersion(test.uv) + if err != nil { + t.Error("failed to encode", err) + } else if got := string(b); got != test.enc { + t.Error("unexpected encoded result:", got) + } + + got, err := cacheDecodeUnpairedVersion([]byte(test.enc)) + if err != nil { + t.Error("failed to decode:", err) + } else if !got.identical(test.uv) { + t.Errorf("decoded non-identical UnpairedVersion:\n\t(GOT): %#v\n\t(WNT): %#v", got, test.uv) + } + }) + } +} + +func TestCacheEncodingProjectProperties(t *testing.T) { + for _, test := range []struct { + k, v string + ip ProjectRoot + pp ProjectProperties + }{ + {"root", "defaultBranch:test", + "root", ProjectProperties{"", newDefaultBranch("test")}}, + {"root,source", "branch:test", + "root", ProjectProperties{"source", NewBranch("test")}}, + {"root", "ver:^1.0.0", + "root", ProjectProperties{"", testSemverConstraint(t, "^1.0.0")}}, + {"root,source", "rev:test", + "root", ProjectProperties{"source", Revision("test")}}, + } { + t.Run(test.k+"/"+test.v, func(t *testing.T) { + kb, vb, err := cacheEncodeProjectProperties(test.ip, test.pp) + k, v := string(kb), string(vb) + if err != nil { + t.Error("failed to encode", err) + } else { + if k != test.k { + t.Error("unexpected encoded key:", k) + } + if v != test.v { + t.Error("unexpected encoded value:", v) + } + } + + ip, pp, err := cacheDecodeProjectProperties([]byte(test.k), []byte(test.v)) + if err != nil { + t.Error("failed to decode:", err) + } else { + if ip != test.ip { + t.Errorf("decoded unexpected ProjectRoot:\n\t(GOT): %#v\n\t(WNT): %#v", ip, test.ip) + } + if pp.Source != test.pp.Source { + t.Errorf("decoded unexpected ProjectRoot.Source:\n\t(GOT): %s\n\t (WNT): %s", pp.Source, test.pp.Source) + } + if !pp.Constraint.identical(test.pp.Constraint) { + t.Errorf("decoded non-identical ProjectRoot.Constraint:\n\t(GOT): %#v\n\t(WNT): %#v", pp.Constraint, test.pp.Constraint) + } + } + }) + } +} + +func TestCacheEncodingTimestampedKey(t *testing.T) { + for _, test := range []struct { + ts time.Time + suffix []byte + }{ + {time.Unix(0, 0), []byte{0, 0, 0, 0, 0, 0, 0, 0}}, + {time.Unix(100, 0), []byte{0, 0, 0, 0, 0, 0, 0, 100}}, + {time.Unix(255, 0), []byte{0, 0, 0, 0, 0, 0, 0, 255}}, + {time.Unix(1+1<<8+1<<16+1<<24, 0), []byte{0, 0, 0, 0, 1, 1, 1, 1}}, + {time.Unix(255<<48, 0), []byte{0, 255, 0, 0, 0, 0, 0, 0}}, + } { + b := cacheTimestampedKey("pre:", test.ts) + if !bytes.Equal(b, append([]byte("pre:"), test.suffix...)) { + t.Errorf("unexpected suffix:\n\t(GOT):%v\n\t(WNT):%v", b[4:], test.suffix) + } + } +} diff --git a/internal/gps/source_cache_bolt_test.go b/internal/gps/source_cache_bolt_test.go new file mode 100644 index 0000000000..7f2f43e9c3 --- /dev/null +++ b/internal/gps/source_cache_bolt_test.go @@ -0,0 +1,294 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gps + +import ( + "io/ioutil" + "log" + "testing" + "time" + + "github.com/golang/dep/internal/gps/pkgtree" + "github.com/golang/dep/internal/test" +) + +func TestBoltCacheTimeout(t *testing.T) { + const root = "example.com/test" + cpath, err := ioutil.TempDir("", "singlesourcecache") + if err != nil { + t.Fatalf("Failed to create temp cache dir: %s", err) + } + pi := ProjectIdentifier{ProjectRoot: root} + logger := log.New(test.Writer{t}, "", 0) + + start := time.Now() + c, err := newBoltCache(cpath, pi, start.Unix(), logger) + if err != nil { + t.Fatal(err) + } + defer c.close() + + rev := Revision("test") + ai := ProjectAnalyzerInfo{Name: "name", Version: 42} + + manifest := &cachedManifest{ + constraints: ProjectConstraints{ + ProjectRoot("foo"): ProjectProperties{ + Constraint: Any(), + }, + ProjectRoot("bar"): ProjectProperties{ + Source: "whatever", + Constraint: testSemverConstraint(t, "> 1.3"), + }, + }, + overrides: ProjectConstraints{ + ProjectRoot("b"): ProjectProperties{ + Constraint: testSemverConstraint(t, "2.0.0"), + }, + }, + } + + lock := &cachedLock{ + inputHash: []byte("test_hash"), + projects: []LockedProject{ + NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), []string{"gps"}), + NewLockedProject(mkPI("github.com/sdboyer/gps2"), NewVersion("v0.10.0"), nil), + NewLockedProject(mkPI("github.com/sdboyer/gps3"), NewVersion("v0.10.0"), []string{"gps", "flugle"}), + NewLockedProject(mkPI("foo"), NewVersion("nada"), []string{"foo"}), + NewLockedProject(mkPI("github.com/sdboyer/gps4"), NewVersion("v0.10.0"), []string{"flugle", "gps"}), + }, + } + + ptree := pkgtree.PackageTree{ + ImportRoot: root, + Packages: map[string]pkgtree.PackageOrErr{ + "simple": { + P: pkgtree.Package{ + ImportPath: "simple", + CommentPath: "comment", + Name: "simple", + Imports: []string{ + "github.com/golang/dep/internal/gps", + "sort", + }, + }, + }, + "m1p": { + P: pkgtree.Package{ + ImportPath: "m1p", + CommentPath: "", + Name: "m1p", + Imports: []string{ + "github.com/golang/dep/internal/gps", + "os", + "sort", + }, + }, + }, + }, + } + + pvs := []PairedVersion{ + NewBranch("originalbranch").Pair("rev1"), + NewVersion("originalver").Pair("rev2"), + } + + // Write values timestamped > start. + { + c.setManifestAndLock(rev, ai, manifest, lock) + c.setPackageTree(rev, ptree) + c.setVersionMap(pvs) + } + // Read back values timestamped > start. + { + gotM, gotL, ok := c.getManifestAndLock(rev, ai) + if !ok { + t.Error("no manifest and lock found for revision") + } + compareManifests(t, manifest, gotM) + if dl := DiffLocks(lock, gotL); dl != nil { + t.Errorf("lock differences:\n\t %#v", dl) + } + + got, ok := c.getPackageTree(rev) + if !ok { + t.Errorf("no package tree found:\n\t(WNT): %#v", ptree) + } + comparePackageTree(t, ptree, got) + + gotV := c.getAllVersions() + if len(gotV) != len(pvs) { + t.Errorf("unexpected versions:\n\t(GOT): %#v\n\t(WNT): %#v", gotV, pvs) + } else { + SortPairedForDowngrade(gotV) + for i := range pvs { + if !pvs[i].identical(gotV[i]) { + t.Errorf("unexpected versions:\n\t(GOT): %#v\n\t(WNT): %#v", gotV, pvs) + break + } + } + } + } + + if err := c.close(); err != nil { + t.Fatal("failed to close cache:", err) + } + + // Read with a later epoch. Expect no values, since all timestamped < after. + { + after := time.Now() + if after.Unix() <= start.Unix() { + // Ensure a future timestamp. + after = start.Add(10 * time.Second) + } + c, err = newBoltCache(cpath, pi, after.Unix(), logger) + if err != nil { + t.Fatal(err) + } + + m, l, ok := c.getManifestAndLock(rev, ai) + if ok { + t.Errorf("expected no cached info, but got:\n\tManifest: %#v\n\tLock: %#v\n", m, l) + } + + ptree, ok := c.getPackageTree(rev) + if ok { + t.Errorf("expected no cached package tree, but got:\n\t%#v", ptree) + } + + pvs := c.getAllVersions() + if len(pvs) > 0 { + t.Errorf("expected no cached versions, but got:\n\t%#v", pvs) + } + } + + if err := c.close(); err != nil { + t.Fatal("failed to close cache:", err) + } + + // Re-connect with the original epoch. + c, err = newBoltCache(cpath, pi, start.Unix(), logger) + if err != nil { + t.Fatal(err) + } + // Read values timestamped > start. + { + gotM, gotL, ok := c.getManifestAndLock(rev, ai) + if !ok { + t.Error("no manifest and lock found for revision") + } + compareManifests(t, manifest, gotM) + if dl := DiffLocks(lock, gotL); dl != nil { + t.Errorf("lock differences:\n\t %#v", dl) + } + + got, ok := c.getPackageTree(rev) + if !ok { + t.Errorf("no package tree found:\n\t(WNT): %#v", ptree) + } + comparePackageTree(t, ptree, got) + + gotV := c.getAllVersions() + if len(gotV) != len(pvs) { + t.Errorf("unexpected versions:\n\t(GOT): %#v\n\t(WNT): %#v", gotV, pvs) + } else { + SortPairedForDowngrade(gotV) + for i := range pvs { + if !pvs[i].identical(gotV[i]) { + t.Errorf("unexpected versions:\n\t(GOT): %#v\n\t(WNT): %#v", gotV, pvs) + break + } + } + } + } + + // New values. + newManifest := &cachedManifest{ + constraints: ProjectConstraints{ + ProjectRoot("foo"): ProjectProperties{ + Constraint: NewBranch("master"), + }, + ProjectRoot("bar"): ProjectProperties{ + Source: "whatever", + Constraint: testSemverConstraint(t, "> 1.5"), + }, + }, + } + + newLock := &cachedLock{ + inputHash: []byte("new_test_hash"), + projects: []LockedProject{ + NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v1"), []string{"gps"}), + }, + } + + newPtree := pkgtree.PackageTree{ + ImportRoot: root, + Packages: map[string]pkgtree.PackageOrErr{ + "simple": { + P: pkgtree.Package{ + ImportPath: "simple", + CommentPath: "newcomment", + Name: "simple", + Imports: []string{ + "github.com/golang/dep/internal/gps42", + "test", + }, + }, + }, + "m1p": { + P: pkgtree.Package{ + ImportPath: "m1p", + CommentPath: "", + Name: "m1p", + Imports: []string{ + "os", + }, + }, + }, + }, + } + + newPVS := []PairedVersion{ + NewBranch("newbranch").Pair("revA"), + NewVersion("newver").Pair("revB"), + } + // Overwrite with new values with newer timestamps. + { + c.setManifestAndLock(rev, ai, newManifest, newLock) + c.setPackageTree(rev, newPtree) + c.setVersionMap(newPVS) + } + // Read new values. + { + gotM, gotL, ok := c.getManifestAndLock(rev, ai) + if !ok { + t.Error("no manifest and lock found for revision") + } + compareManifests(t, newManifest, gotM) + if dl := DiffLocks(newLock, gotL); dl != nil { + t.Errorf("lock differences:\n\t %#v", dl) + } + + got, ok := c.getPackageTree(rev) + if !ok { + t.Errorf("no package tree found:\n\t(WNT): %#v", newPtree) + } + comparePackageTree(t, newPtree, got) + + gotV := c.getAllVersions() + if len(gotV) != len(newPVS) { + t.Errorf("unexpected versions:\n\t(GOT): %#v\n\t(WNT): %#v", gotV, newPVS) + } else { + SortPairedForDowngrade(gotV) + for i := range newPVS { + if !newPVS[i].identical(gotV[i]) { + t.Errorf("unexpected versions:\n\t(GOT): %#v\n\t(WNT): %#v", gotV, newPVS) + break + } + } + } + } +} diff --git a/internal/gps/source_cache_test.go b/internal/gps/source_cache_test.go index 7980876997..37a4506f81 100644 --- a/internal/gps/source_cache_test.go +++ b/internal/gps/source_cache_test.go @@ -5,29 +5,64 @@ package gps import ( + "io/ioutil" + "log" "sort" "testing" + "time" "github.com/golang/dep/internal/gps/pkgtree" + "github.com/golang/dep/internal/test" "github.com/pkg/errors" ) +func Test_singleSourceCache(t *testing.T) { + newMem := func(*testing.T, string, string) singleSourceCache { return newMemoryCache() } + t.Run("mem", singleSourceCacheTest{newCache: newMem}.run) + + epoch := time.Now().Unix() + newBolt := func(t *testing.T, cachedir, root string) singleSourceCache { + pi := mkPI(root).normalize() + c, err := newBoltCache(cachedir, pi, epoch, log.New(test.Writer{t}, "", 0)) + if err != nil { + t.Fatal(err) + } + return c + } + t.Run("bolt/open", singleSourceCacheTest{newCache: newBolt}.run) + t.Run("bolt/refresh", singleSourceCacheTest{newCache: newBolt, persistent: true}.run) +} + var testAnalyzerInfo = ProjectAnalyzerInfo{ Name: "test-analyzer", Version: 1, } -func TestSingleSourceCache(t *testing.T) { +type singleSourceCacheTest struct { + newCache func(*testing.T, string, string) singleSourceCache + persistent bool +} + +// run tests singleSourceCache methods of caches returned by test.newCache. +// For test.persistent caches, test.newCache is periodically called mid-test to ensure persistence. +func (test singleSourceCacheTest) run(t *testing.T) { const root = "example.com/test" + cpath, err := ioutil.TempDir("", "singlesourcecache") + if err != nil { + t.Fatalf("Failed to create temp cache dir: %s", err) + } t.Run("info", func(t *testing.T) { const rev Revision = "revision" - c := newMemoryCache() + c := test.newCache(t, cpath, root) + defer c.close() var m Manifest = &cachedManifest{ constraints: ProjectConstraints{ - ProjectRoot("foo"): ProjectProperties{}, + ProjectRoot("foo"): ProjectProperties{ + Constraint: Any(), + }, ProjectRoot("bar"): ProjectProperties{ Source: "whatever", Constraint: testSemverConstraint(t, "> 1.3"), @@ -35,7 +70,7 @@ func TestSingleSourceCache(t *testing.T) { }, overrides: ProjectConstraints{ ProjectRoot("b"): ProjectProperties{ - Constraint: NewVersion("2.0.0"), + Constraint: testSemverConstraint(t, "2.0.0"), }, }, ignored: map[string]bool{ @@ -51,14 +86,19 @@ func TestSingleSourceCache(t *testing.T) { inputHash: []byte("test_hash"), projects: []LockedProject{ NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), []string{"gps"}), - NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), nil), - NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), []string{"gps", "flugle"}), + NewLockedProject(mkPI("github.com/sdboyer/gps2"), NewVersion("v0.10.0"), nil), + NewLockedProject(mkPI("github.com/sdboyer/gps3"), NewVersion("v0.10.0"), []string{"gps", "flugle"}), NewLockedProject(mkPI("foo"), NewVersion("nada"), []string{"foo"}), - NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), []string{"flugle", "gps"}), + NewLockedProject(mkPI("github.com/sdboyer/gps4"), NewVersion("v0.10.0"), []string{"flugle", "gps"}), }, } c.setManifestAndLock(rev, testAnalyzerInfo, m, l) + if test.persistent { + c.close() + c = test.newCache(t, cpath, root) + } + gotM, gotL, ok := c.getManifestAndLock(rev, testAnalyzerInfo) if !ok { t.Error("no manifest and lock found for revision") @@ -71,12 +111,13 @@ func TestSingleSourceCache(t *testing.T) { m = &cachedManifest{ constraints: ProjectConstraints{ ProjectRoot("foo"): ProjectProperties{ - Source: "whatever", + Source: "whatever", + Constraint: Any(), }, }, overrides: ProjectConstraints{ ProjectRoot("bar"): ProjectProperties{ - Constraint: NewVersion("2.0.0"), + Constraint: testSemverConstraint(t, "2.0.0"), }, }, ignored: map[string]bool{ @@ -92,12 +133,17 @@ func TestSingleSourceCache(t *testing.T) { inputHash: []byte("different_test_hash"), projects: []LockedProject{ NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0").Pair("278a227dfc3d595a33a77ff3f841fd8ca1bc8cd0"), []string{"gps"}), - NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.11.0"), []string{"gps"}), - NewLockedProject(mkPI("github.com/sdboyer/gps"), Revision("278a227dfc3d595a33a77ff3f841fd8ca1bc8cd0"), []string{"gps"}), + NewLockedProject(mkPI("github.com/sdboyer/gps2"), NewVersion("v0.11.0"), []string{"gps"}), + NewLockedProject(mkPI("github.com/sdboyer/gps3"), Revision("278a227dfc3d595a33a77ff3f841fd8ca1bc8cd0"), []string{"gps"}), }, } c.setManifestAndLock(rev, testAnalyzerInfo, m, l) + if test.persistent { + c.close() + c = test.newCache(t, cpath, root) + } + gotM, gotL, ok = c.getManifestAndLock(rev, testAnalyzerInfo) if !ok { t.Error("no manifest and lock found for revision") @@ -109,7 +155,8 @@ func TestSingleSourceCache(t *testing.T) { }) t.Run("pkgTree", func(t *testing.T) { - c := newMemoryCache() + c := test.newCache(t, cpath, root) + defer c.close() const rev Revision = "rev_adsfjkl" @@ -117,6 +164,11 @@ func TestSingleSourceCache(t *testing.T) { t.Fatalf("unexpected result before setting package tree: %v", got) } + if test.persistent { + c.close() + c = test.newCache(t, cpath, root) + } + pt := pkgtree.PackageTree{ ImportRoot: root, Packages: map[string]pkgtree.PackageOrErr{ @@ -147,12 +199,22 @@ func TestSingleSourceCache(t *testing.T) { } c.setPackageTree(rev, pt) + if test.persistent { + c.close() + c = test.newCache(t, cpath, root) + } + got, ok := c.getPackageTree(rev) if !ok { t.Errorf("no package tree found:\n\t(WNT): %#v", pt) } comparePackageTree(t, pt, got) + if test.persistent { + c.close() + c = test.newCache(t, cpath, root) + } + pt = pkgtree.PackageTree{ ImportRoot: root, Packages: map[string]pkgtree.PackageOrErr{ @@ -163,6 +225,11 @@ func TestSingleSourceCache(t *testing.T) { } c.setPackageTree(rev, pt) + if test.persistent { + c.close() + c = test.newCache(t, cpath, root) + } + got, ok = c.getPackageTree(rev) if !ok { t.Errorf("no package tree found:\n\t(WNT): %#v", pt) @@ -171,7 +238,8 @@ func TestSingleSourceCache(t *testing.T) { }) t.Run("versions", func(t *testing.T) { - c := newMemoryCache() + c := test.newCache(t, cpath, root) + defer c.close() const rev1, rev2 = "rev1", "rev2" const br, ver = "branch_name", "2.10" @@ -182,6 +250,11 @@ func TestSingleSourceCache(t *testing.T) { SortPairedForDowngrade(versions) c.setVersionMap(versions) + if test.persistent { + c.close() + c = test.newCache(t, cpath, root) + } + t.Run("getAllVersions", func(t *testing.T) { got := c.getAllVersions() if len(got) != len(versions) { @@ -270,6 +343,10 @@ func TestSingleSourceCache(t *testing.T) { // compareManifests compares two manifests and reports differences as test errors. func compareManifests(t *testing.T, want, got Manifest) { + if (want == nil || got == nil) && (got != nil || want != nil) { + t.Errorf("one manifest is nil:\n\t(GOT): %#v\n\t(WNT): %#v", got, want) + return + } { want, got := want.DependencyConstraints(), got.DependencyConstraints() if !projectConstraintsEqual(want, got) {