Sloop was not able to reduce the size on disk after GC runs. This also plays a big role in memory consumption. (#114)
Upgraded Badger version. Added flattening at start up time. Fixed the event count spreading issue which resulted in uneven data distribution across partitions. Moved to drop prefix as it yields better space claim. Added feature flag for switching to delete prefix. Also changed the numberfversions to 0 so that delete prefix would reclaim space. dgraph-io/badger#1228 Fixed the issue of unclaimed !badger!move prefixes which are never cleaned up. Details: dgraph-io/badger#1288 Added support in debugging pages to see internal keys.
This commit is contained in:
Родитель
3506d40246
Коммит
c595202d01
15
go.mod
15
go.mod
|
@ -5,31 +5,34 @@ go 1.13
|
|||
require (
|
||||
cloud.google.com/go v0.49.0 // indirect
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect
|
||||
github.com/DataDog/zstd v1.4.5 // indirect
|
||||
github.com/Jeffail/gabs/v2 v2.2.0
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/coreos/bbolt v1.3.1-coreos.6 // indirect
|
||||
github.com/coreos/etcd v3.3.15+incompatible // indirect
|
||||
github.com/dgraph-io/badger v1.6.0
|
||||
github.com/dgraph-io/badger/v2 v2.0.0
|
||||
github.com/dgryski/go-farm v0.0.0-20191112170834-c2139c5d712b // indirect
|
||||
github.com/dgraph-io/badger/v2 v2.0.3
|
||||
github.com/dgraph-io/ristretto v0.0.2 // indirect
|
||||
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/gogo/protobuf v1.3.1
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/golang/protobuf v1.4.0
|
||||
github.com/googleapis/gnostic v0.3.1 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.3 // indirect
|
||||
github.com/imdario/mergo v0.3.8 // indirect
|
||||
github.com/nsf/jsondiff v0.0.0-20190712045011-8443391ee9b6
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/profile v1.4.0
|
||||
github.com/prometheus/client_golang v1.2.1
|
||||
github.com/prometheus/client_model v0.0.0-20191202183732-d1d2010b5bee // indirect
|
||||
github.com/prometheus/procfs v0.0.8 // indirect
|
||||
github.com/spf13/afero v1.2.2
|
||||
github.com/stretchr/testify v1.4.0
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 // indirect
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 // indirect
|
||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 // indirect
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
|
||||
google.golang.org/appengine v1.6.5 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.7 // indirect
|
||||
|
|
117
go.sum
117
go.sum
|
@ -30,27 +30,23 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
|||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=
|
||||
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/Jeffail/gabs/v2 v2.1.0 h1:6dV9GGOjoQgzWTQEltZPXlJdFloxvIq7DwqgxMCbq30=
|
||||
github.com/Jeffail/gabs/v2 v2.1.0/go.mod h1:xCn81vdHKxFUuWWAaD5jCTQDNPBMh5pPs9IJ+NcziBI=
|
||||
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
|
||||
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/Jeffail/gabs/v2 v2.2.0 h1:7touC+WzbQ7LO5+mwgxT44miyTqAVCOlIWLA6PiIB5w=
|
||||
github.com/Jeffail/gabs/v2 v2.2.0/go.mod h1:xCn81vdHKxFUuWWAaD5jCTQDNPBMh5pPs9IJ+NcziBI=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI=
|
||||
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/VictoriaMetrics/fastcache v1.5.1/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE=
|
||||
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
|
@ -63,14 +59,12 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
|
|||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
|
||||
github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA=
|
||||
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/coocood/freecache v1.1.0/go.mod h1:ePwxCDzOYvARfHdr1pByNct1at3CoKnsipOHwKlNbzI=
|
||||
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
|
@ -92,15 +86,25 @@ github.com/dgraph-io/badger v1.6.0 h1:DshxFxZWXUcO0xX476VJC07Xsr6ZCBVRHKZ93Oh7Ev
|
|||
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
|
||||
github.com/dgraph-io/badger/v2 v2.0.0 h1:Cr05o2TUd2IcLbEY0aGd8mbjm1YyQpy+dswo3BcDXrE=
|
||||
github.com/dgraph-io/badger/v2 v2.0.0/go.mod h1:YoRSIp1LmAJ7zH7tZwRvjNMUYLxB4wl3ebYkaIruZ04=
|
||||
github.com/dgraph-io/ristretto v0.0.0-20190801024210-18ba08fdea80 h1:ZVYvevH/zd9ygtRNosrnlGdvI6CEuUPwZ3EV0lfdGuM=
|
||||
github.com/dgraph-io/ristretto v0.0.0-20190801024210-18ba08fdea80/go.mod h1:UvZmzj8odp3S1nli6yEb1vLME8iJFBrRcw8rAJEiu9Q=
|
||||
github.com/dgraph-io/badger/v2 v2.0.1-rc1.0.20200417090009-0edfe98dbc31 h1:4jeB8+a7ZBbFqVTvHilyJ1w0WaThwOXmnbqAcqdbHBk=
|
||||
github.com/dgraph-io/badger/v2 v2.0.1-rc1.0.20200417090009-0edfe98dbc31/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM=
|
||||
github.com/dgraph-io/badger/v2 v2.0.2 h1:uBAA5oM9Gz9TrP01v9LxBGztE5rhtGeBxpF1IvxGGtw=
|
||||
github.com/dgraph-io/badger/v2 v2.0.2/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM=
|
||||
github.com/dgraph-io/badger/v2 v2.0.3 h1:inzdf6VF/NZ+tJ8RwwYMjJMvsOALTHYdozn0qSl6XJI=
|
||||
github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM=
|
||||
github.com/dgraph-io/ristretto v0.0.0-20191025175511-c1f00be0418e h1:aeUNgwup7PnDOBAD1BOKAqzb/W/NksOj6r3dwKKuqfg=
|
||||
github.com/dgraph-io/ristretto v0.0.0-20191025175511-c1f00be0418e/go.mod h1:edzKIzGvqUCMzhTVWbiTSe75zD9Xxq0GtSBtFmaUTZs=
|
||||
github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3 h1:MQLRM35Pp0yAyBYksjbj1nZI/w6eyRY/mWoM1sFf4kU=
|
||||
github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
|
||||
github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po=
|
||||
github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-farm v0.0.0-20191112170834-c2139c5d712b h1:SeiGBzKrEtuDddnBABHkp4kq9sBGE9nuYmk6FPTg0zg=
|
||||
github.com/dgryski/go-farm v0.0.0-20191112170834-c2139c5d712b/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
|
||||
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
|
@ -172,21 +176,16 @@ github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+
|
|||
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
|
||||
github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/goburrow/cache v0.1.0/go.mod h1:8oxkfud4hvjO4tNjEKZfEd+LrpDVDlBIauGYsWGEzio=
|
||||
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.0 h1:G8O7TerXerS4F6sx9OV7/nRfJdnXgHZu/S/7F2SN+UE=
|
||||
github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
|
@ -196,6 +195,12 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
|
|||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
|
@ -205,6 +210,8 @@ github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
|||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
|
@ -213,6 +220,7 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI
|
|||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
|
@ -223,12 +231,9 @@ github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1a
|
|||
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
|
@ -240,8 +245,6 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
|||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
|
||||
github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
|
@ -295,12 +298,10 @@ github.com/nsf/jsondiff v0.0.0-20190712045011-8443391ee9b6/go.mod h1:uFMI8w+ref4
|
|||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
|
||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
|
@ -308,15 +309,16 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR
|
|||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.4.0 h1:uCmaf4vVbWAOZz36k1hrQD7ijGRzLwaME8Am/7a4jZI=
|
||||
github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE=
|
||||
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8=
|
||||
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||
github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI=
|
||||
github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
|
@ -327,16 +329,10 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:
|
|||
github.com/prometheus/client_model v0.0.0-20191202183732-d1d2010b5bee h1:iBZPTYkGLvdu6+A5TsMUJQkQX9Ad4aCEnSQtdxPuTCQ=
|
||||
github.com/prometheus/client_model v0.0.0-20191202183732-d1d2010b5bee/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
|
||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||
github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE=
|
||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/procfs v0.0.4 h1:w8DjqFMJDjuVwdZBQoOozr4MVWOnwF7RcL/7uxBjY78=
|
||||
github.com/prometheus/procfs v0.0.4/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
|
@ -347,11 +343,9 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR
|
|||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9 h1:5Cp3cVwpQP4aCQ6jx6dNLP3IarbYiuStmIzYu+BjQwY=
|
||||
github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
|
@ -383,7 +377,6 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1
|
|||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
|
||||
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
|
@ -393,11 +386,8 @@ go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL
|
|||
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
|
@ -410,8 +400,6 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 h1:0hQKqeLdqlt5iIwVOBErRisrHJAN57yOiPRQItI20fU=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
|
@ -452,14 +440,13 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
|
|||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc=
|
||||
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
|
@ -492,14 +479,13 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0=
|
||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b h1:3S2h5FadpNr0zUUCVZjlKIEYF+KaX/OBplTGo89CYHI=
|
||||
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 h1:opSr2sbRXk5X5/givKrrKj9HXxFpW2sdCiP8MJSKLQY=
|
||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -537,6 +523,7 @@ golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtn
|
|||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
|
||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
|
||||
|
@ -550,8 +537,6 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
|||
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI=
|
||||
google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
|
@ -566,8 +551,13 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx
|
|||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
@ -578,8 +568,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS
|
|||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o=
|
||||
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
|
@ -602,29 +590,16 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
k8s.io/api v0.0.0-20191003035645-10e821c09743 h1:pK/at0jfyf3JXDNMJjZjPF6nglECcKUuIWfZU9UPTFY=
|
||||
k8s.io/api v0.0.0-20191003035645-10e821c09743/go.mod h1:uO3sqSrudYAYLDvkW5ph6lZtwlcN7mUlfE80fNPY8EE=
|
||||
k8s.io/api v0.17.0 h1:H9d/lw+VkZKEVIUc8F3wgiQ+FUXTTr21M87jXLU7yqM=
|
||||
k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20191003041335-07ddaef0d8ff h1:OOnK/TDElaHLmLw8NyqsljEkPBX1mdXC6BGIEvy2cxI=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20191003041335-07ddaef0d8ff/go.mod h1:LBlViQpiTQHMeJf+uN/VaXT7GCaXlghGVhmV+UEb234=
|
||||
k8s.io/apiextensions-apiserver v0.17.0 h1:+XgcGxqaMztkbbvsORgCmHIb4uImHKvTjNyu7b8gRnA=
|
||||
k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8=
|
||||
k8s.io/apimachinery v0.0.0-20191003035458-c930edf45883 h1:Tzrafa5uSnENx/KQjS4EFWiBzGeMcpCVBPD+8w1ZNiw=
|
||||
k8s.io/apimachinery v0.0.0-20191003035458-c930edf45883/go.mod h1:3rOMKKJmoWw7dJkRxGjW26hYSWvYV5nrieoTsmWq1jw=
|
||||
k8s.io/apimachinery v0.17.0 h1:xRBnuie9rXcPxUkDizUsGvPf1cnlZCFu210op7J7LJo=
|
||||
k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
|
||||
k8s.io/apiserver v0.0.0-20191003040607-fc290d820806/go.mod h1:Swj9Gfw8OhUriX4pzWCRfj/PDeV1zd929BnmONNUMDw=
|
||||
k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg=
|
||||
k8s.io/client-go v0.0.0-20191003035859-a746c2f219b7 h1:aYZcOBQ/tfxKSpyXgdLXtAlOrrteqtBEHUfaDMrrQK4=
|
||||
k8s.io/client-go v0.0.0-20191003035859-a746c2f219b7/go.mod h1:6LOleLJHIuJ9sIFNLPjRLTsWNYHk6MyS9VawVsBRe4g=
|
||||
k8s.io/client-go v0.17.0 h1:8QOGvUGdqDMFrm9sD6IUFl256BcffynGoe80sxgTEDg=
|
||||
k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k=
|
||||
k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o=
|
||||
k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
|
||||
k8s.io/code-generator v0.0.0-20191003035328-700b1226c0bd/go.mod h1:HC9p4y3SBN+txSs8x57qmNPXFZ/CxdCHiDTNnocCSEw=
|
||||
k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
|
||||
k8s.io/component-base v0.0.0-20191003040350-be468d5d6790/go.mod h1:9mA4uQCNQ0qIetCGWbprquCMokZ5N5FDKDtftfE8bXU=
|
||||
k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
|
@ -632,11 +607,8 @@ k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUc
|
|||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf h1:EYm5AW/UUDbnmnI+gK0TJDVK9qPLhM+sRHYanNKw0EQ=
|
||||
k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
|
||||
k8s.io/utils v0.0.0-20190920012459-5008bf6f8cd6 h1:rfepARh/ECp66dk9TTmT//1PBkHffjnxhdOrgH4m+eA=
|
||||
k8s.io/utils v0.0.0-20190920012459-5008bf6f8cd6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6 h1:p0Ai3qVtkbCG/Af26dBmU0E1W58NID3hSSh7cMyylpM=
|
||||
k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
|
@ -647,7 +619,6 @@ modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs
|
|||
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
|
||||
sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA=
|
||||
sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
|
|
|
@ -2,11 +2,12 @@ package common
|
|||
|
||||
import (
|
||||
"github.com/dgraph-io/badger/v2"
|
||||
"github.com/golang/glog"
|
||||
"github.com/salesforce/sloop/pkg/sloop/store/untyped/badgerwrap"
|
||||
)
|
||||
|
||||
func deleteKeys(db badgerwrap.DB, keysForDelete [][]byte) (error, int) {
|
||||
deletedKeysInThisBatch := 0
|
||||
func deleteKeys(db badgerwrap.DB, keysForDelete [][]byte) (error, uint64) {
|
||||
var deletedKeysInThisBatch uint64 = 0
|
||||
err := db.Update(func(txn badgerwrap.Txn) error {
|
||||
for _, key := range keysForDelete {
|
||||
err := txn.Delete(key)
|
||||
|
@ -25,25 +26,27 @@ func deleteKeys(db badgerwrap.DB, keysForDelete [][]byte) (error, int) {
|
|||
return nil, deletedKeysInThisBatch
|
||||
}
|
||||
|
||||
func DeleteKeysWithPrefix(keyPrefix []byte, db badgerwrap.DB, deletionBatchSize int) (error, int, int) {
|
||||
numOfKeysToDelete := 0
|
||||
numOfKeysDeleted := 0
|
||||
keysLeftToDelete := true
|
||||
// deletes the keys with a given prefix
|
||||
func DeleteKeysWithPrefix(keyPrefix string, db badgerwrap.DB, deletionBatchSize int, numOfKeysToDelete uint64) (error, uint64, uint64) {
|
||||
|
||||
for keysLeftToDelete {
|
||||
// as deletion does not lock db there is a possibility that the keys for a given prefix are added while old ones are deleted. In this case it can get into a race condition.
|
||||
// In order to avoid this, count of existing keys is used which match the given prefix and deletion ends when this number of keys have been deleted
|
||||
|
||||
var numOfKeysDeleted uint64 = 0
|
||||
for numOfKeysDeleted < numOfKeysToDelete {
|
||||
|
||||
keysThisBatch := make([][]byte, 0, deletionBatchSize)
|
||||
|
||||
// getting the keys to delete that have the given prefix
|
||||
_ = db.View(func(txn badgerwrap.Txn) error {
|
||||
iterOpt := badger.DefaultIteratorOptions
|
||||
iterOpt.Prefix = keyPrefix
|
||||
iterOpt.AllVersions = false
|
||||
iterOpt.PrefetchValues = false
|
||||
iterOpt.InternalAccess = true
|
||||
it := txn.NewIterator(iterOpt)
|
||||
defer it.Close()
|
||||
|
||||
for it.Seek(keyPrefix); it.ValidForPrefix(keyPrefix); it.Next() {
|
||||
// TODO: Investigate if Seek() can be used instead of rewind
|
||||
for it.Rewind(); it.ValidForPrefix([]byte(keyPrefix)) || it.ValidForPrefix([]byte("!badger!move"+keyPrefix)); it.Next() {
|
||||
keyToDel := it.Item().KeyCopy(nil)
|
||||
keysThisBatch = append(keysThisBatch, keyToDel)
|
||||
if len(keysThisBatch) == deletionBatchSize {
|
||||
|
@ -57,30 +60,29 @@ func DeleteKeysWithPrefix(keyPrefix []byte, db badgerwrap.DB, deletionBatchSize
|
|||
// deleting the keys in batch
|
||||
if len(keysThisBatch) > 0 {
|
||||
err, deletedKeysInThisBatch := deleteKeys(db, keysThisBatch)
|
||||
numOfKeysToDelete += len(keysThisBatch)
|
||||
numOfKeysDeleted += deletedKeysInThisBatch
|
||||
if err != nil {
|
||||
glog.Errorf("Error encountered while deleting keys with prefix: '%v', numberOfKeysDeleted: '%v' numOfKeysToDelete: '%v'", keyPrefix, numOfKeysDeleted, numOfKeysToDelete)
|
||||
return err, numOfKeysDeleted, numOfKeysToDelete
|
||||
}
|
||||
}
|
||||
|
||||
if len(keysThisBatch) < deletionBatchSize {
|
||||
keysLeftToDelete = false
|
||||
}
|
||||
}
|
||||
|
||||
return nil, numOfKeysDeleted, numOfKeysToDelete
|
||||
|
||||
}
|
||||
|
||||
func GetTotalKeyCount(db badgerwrap.DB) uint64 {
|
||||
// returns the number of keys in DB with given prefix. If prefix is not provided it gives count of all keys
|
||||
func GetTotalKeyCount(db badgerwrap.DB, keyPrefix string) uint64 {
|
||||
var totalKeyCount uint64 = 0
|
||||
keyPrefixToMatch := []byte(keyPrefix)
|
||||
_ = db.View(func(txn badgerwrap.Txn) error {
|
||||
iterOpt := badger.DefaultIteratorOptions
|
||||
iterOpt.PrefetchValues = false
|
||||
if len(keyPrefixToMatch) != 0 {
|
||||
iterOpt.Prefix = keyPrefixToMatch
|
||||
}
|
||||
it := txn.NewIterator(iterOpt)
|
||||
defer it.Close()
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
for it.Rewind(); it.ValidForPrefix(keyPrefixToMatch); it.Next() {
|
||||
totalKeyCount++
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -12,46 +12,30 @@ var commonPrefix = "/commonprefix/001546405200/"
|
|||
func Test_Db_Utilities_DeleteKeysWithPrefix_DeleteAllKeys(t *testing.T) {
|
||||
db := helper_get_db(t)
|
||||
helper_add_keys_to_db(t, db, helper_testKeys_with_common_prefix(commonPrefix))
|
||||
err, numOfDeletedKeys, numOfKeysToDelete := DeleteKeysWithPrefix([]byte(commonPrefix), db, 10)
|
||||
err, numOfDeletedKeys, numOfKeysToDelete := DeleteKeysWithPrefix(commonPrefix, db, 10, 4)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 4, numOfDeletedKeys)
|
||||
assert.Equal(t, 4, numOfKeysToDelete)
|
||||
assert.Equal(t, uint64(4), numOfDeletedKeys)
|
||||
assert.Equal(t, uint64(4), numOfKeysToDelete)
|
||||
}
|
||||
|
||||
func Test_Db_Utilities_DeleteKeysWithPrefix_DeleteNoKeys(t *testing.T) {
|
||||
db := helper_get_db(t)
|
||||
helper_add_keys_to_db(t, db, helper_testKeys_with_common_prefix(commonPrefix))
|
||||
err, numOfDeletedKeys, numOfKeysToDelete := DeleteKeysWithPrefix([]byte(commonPrefix+"random"), db, 10)
|
||||
err, numOfDeletedKeys, numOfKeysToDelete := DeleteKeysWithPrefix(commonPrefix+"random", db, 10, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 0, numOfDeletedKeys)
|
||||
assert.Equal(t, 0, numOfKeysToDelete)
|
||||
assert.Equal(t, uint64(0), numOfDeletedKeys)
|
||||
assert.Equal(t, uint64(0), numOfKeysToDelete)
|
||||
}
|
||||
|
||||
func Test_Db_Utilities_DeleteKeysWithPrefix_DeleteSomeKeys(t *testing.T) {
|
||||
db := helper_get_db(t)
|
||||
// DB has 8 keys
|
||||
helper_add_keys_to_db(t, db, helper_testKeys_with_common_prefix(commonPrefix))
|
||||
helper_add_keys_to_db(t, db, helper_testKeys_with_common_prefix("randomStuff"+commonPrefix))
|
||||
err, numOfDeletedKeys, numOfKeysToDelete := DeleteKeysWithPrefix([]byte(commonPrefix), db, 10)
|
||||
err, numOfDeletedKeys, numOfKeysToDelete := DeleteKeysWithPrefix(commonPrefix, db, 10, 4)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 4, numOfDeletedKeys)
|
||||
assert.Equal(t, 4, numOfKeysToDelete)
|
||||
}
|
||||
|
||||
func Test_Db_Utilities_GetTotalKeyCount_SomeKeys(t *testing.T) {
|
||||
db := helper_get_db(t)
|
||||
helper_add_keys_to_db(t, db, helper_testKeys_with_common_prefix(commonPrefix))
|
||||
helper_add_keys_to_db(t, db, helper_testKeys_with_common_prefix("randomStuff"+commonPrefix))
|
||||
numberOfKeys := GetTotalKeyCount(db)
|
||||
|
||||
// expected count is 8 as each call to helper_add_keys_to_db adds keys in 4 tables
|
||||
expectedNumberOfKeys := 8
|
||||
assert.Equal(t, uint64(expectedNumberOfKeys), numberOfKeys)
|
||||
}
|
||||
|
||||
func Test_Db_Utilities_GetTotalKeyCount_NoKeys(t *testing.T) {
|
||||
db := helper_get_db(t)
|
||||
numberOfKeys := GetTotalKeyCount(db)
|
||||
assert.Equal(t, uint64(0), numberOfKeys)
|
||||
assert.Equal(t, uint64(4), numOfDeletedKeys)
|
||||
assert.Equal(t, uint64(4), numOfKeysToDelete)
|
||||
}
|
||||
|
||||
func helper_get_db(t *testing.T) badgerwrap.DB {
|
||||
|
@ -84,3 +68,20 @@ func helper_testKeys_with_common_prefix(prefix string) []string {
|
|||
prefix + "Pod/user-w/sync-123/sam-partition-test",
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Db_Utilities_GetTotalKeyCount_SomeKeys(t *testing.T) {
|
||||
db := helper_get_db(t)
|
||||
helper_add_keys_to_db(t, db, helper_testKeys_with_common_prefix(commonPrefix))
|
||||
helper_add_keys_to_db(t, db, helper_testKeys_with_common_prefix("randomStuff"+commonPrefix))
|
||||
|
||||
numberOfKeys := GetTotalKeyCount(db, commonPrefix)
|
||||
// expected count is 4 as each call to helper_add_keys_to_db adds keys in 4 tables, only the common prefix ones would return
|
||||
expectedNumberOfKeys := 4
|
||||
assert.Equal(t, uint64(expectedNumberOfKeys), numberOfKeys)
|
||||
}
|
||||
|
||||
func Test_Db_Utilities_GetTotalKeyCount_NoKeys(t *testing.T) {
|
||||
db := helper_get_db(t)
|
||||
numberOfKeys := GetTotalKeyCount(db, "")
|
||||
assert.Equal(t, uint64(0), numberOfKeys)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"github.com/dgraph-io/badger/v2"
|
||||
"github.com/golang/glog"
|
||||
"github.com/salesforce/sloop/pkg/sloop/store/untyped/badgerwrap"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type SloopKey struct {
|
||||
TableName string
|
||||
PartitionID string
|
||||
}
|
||||
|
||||
// returns TableName, PartitionId, error.
|
||||
func GetSloopKey(item badgerwrap.Item) (SloopKey, error) {
|
||||
key := item.Key()
|
||||
err, parts := ParseKey(string(key))
|
||||
if err != nil {
|
||||
return SloopKey{}, err
|
||||
}
|
||||
|
||||
var tableName = parts[1]
|
||||
var partitionId = parts[2]
|
||||
return SloopKey{tableName, partitionId}, nil
|
||||
}
|
||||
|
||||
type PartitionInfo struct {
|
||||
TotalKeyCount uint64
|
||||
TableNameToKeyCountMap map[string]uint64
|
||||
}
|
||||
|
||||
// prints all the keys histogram. It can help debugging when needed.
|
||||
func PrintKeyHistogram(db badgerwrap.DB) {
|
||||
partitionTableNameToKeyCountMap, totalKeyCount := GetPartitionsInfo(db)
|
||||
glog.V(2).Infof("TotalkeyCount: %v", totalKeyCount)
|
||||
|
||||
for partitionID, partitionInfo := range partitionTableNameToKeyCountMap {
|
||||
for tableName, keyCount := range partitionInfo.TableNameToKeyCountMap {
|
||||
glog.V(2).Infof("TableName: %v, PartitionId: %v, keyCount: %v", tableName, partitionID, keyCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the sorted list of partitionIDs from the given partitions Info map
|
||||
func GetSortedPartitionIDs(partitionsInfoMap map[string]*PartitionInfo) []string {
|
||||
var sortedListOfPartitionIds []string
|
||||
|
||||
for partitionID, _ := range partitionsInfoMap {
|
||||
sortedListOfPartitionIds = append(sortedListOfPartitionIds, partitionID)
|
||||
}
|
||||
|
||||
// Sorted numbered strings here is ok since they are all of the same length
|
||||
sort.Strings(sortedListOfPartitionIds)
|
||||
return sortedListOfPartitionIds
|
||||
}
|
||||
|
||||
// Gets the Information for partitions to key Count Map
|
||||
// Returns Partitions to KeyCount Map, Partitions TableName to Key Count and total key count
|
||||
func GetPartitionsInfo(db badgerwrap.DB) (map[string]*PartitionInfo, uint64) {
|
||||
var totalKeyCount uint64 = 0
|
||||
partitionIDToPartitionInfoMap := make(map[string]*PartitionInfo)
|
||||
|
||||
_ = db.View(func(txn badgerwrap.Txn) error {
|
||||
iterOpt := badger.DefaultIteratorOptions
|
||||
iterOpt.PrefetchValues = false
|
||||
it := txn.NewIterator(iterOpt)
|
||||
defer it.Close()
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
sloopKey, err := GetSloopKey(item)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to parse information about key: %x", item.Key())
|
||||
continue
|
||||
}
|
||||
if partitionIDToPartitionInfoMap[sloopKey.PartitionID] == nil {
|
||||
partitionIDToPartitionInfoMap[sloopKey.PartitionID] = &PartitionInfo{0, make(map[string]uint64)}
|
||||
}
|
||||
|
||||
partitionIDToPartitionInfoMap[sloopKey.PartitionID].TotalKeyCount++
|
||||
partitionIDToPartitionInfoMap[sloopKey.PartitionID].TableNameToKeyCountMap[sloopKey.TableName]++
|
||||
totalKeyCount++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return partitionIDToPartitionInfoMap, totalKeyCount
|
||||
}
|
|
@ -50,26 +50,22 @@ func updateEventCountTable(
|
|||
|
||||
// Truncate long-lived events to available partitions
|
||||
// This avoids filling in data that will go beyond the current time range
|
||||
|
||||
// Default truncate as computedLastTs -1 * PartitionDuration.
|
||||
// Essentially only allowing events in 1 partition by default. This scenario will only be hit for first event on new sloop with no data.
|
||||
truncateTs := computedLastTs.Add(-1 * untyped.GetPartitionDuration())
|
||||
|
||||
// If there is only one partition, use minPartitionStartTime to ensure we receive events
|
||||
// in that partition.
|
||||
// Otherwise add events to all partitions from MinPartitionEndTime to computedTs.
|
||||
// This ensures no events are added to the very last partition which may get garbage collected soon.
|
||||
if ok, minPartition, maxPartition := tables.EventCountTable().GetMinMaxPartitions(txn); ok {
|
||||
if minPartitionStartTime, minPartitionEndTime, err := untyped.GetTimeRangeForPartition(minPartition); err == nil {
|
||||
if minPartition == maxPartition {
|
||||
truncateTs = minPartitionStartTime
|
||||
} else {
|
||||
truncateTs = minPartitionEndTime
|
||||
}
|
||||
}
|
||||
ok, minPartition, maxPartition := tables.GetMinAndMaxPartitionWithTxn(txn)
|
||||
if !ok {
|
||||
return errors.Wrap(err, "There was an error in GetMinAndMaxPartitionWithTxn")
|
||||
}
|
||||
|
||||
computedFirstTs, computedLastTs, computedCount = adjustForMaxLookback(computedFirstTs, computedLastTs, computedCount, truncateTs)
|
||||
_, minPartitionEndTime, err := untyped.GetTimeRangeForPartition(minPartition)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
maxPartitionStartTime, maxPartitionEndTime, err := untyped.GetTimeRangeForPartition(maxPartition)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
computedFirstTs, computedLastTs, computedCount = adjustForAvailablePartitions(computedFirstTs, computedLastTs, computedCount, minPartitionEndTime, maxPartitionStartTime, maxPartitionEndTime)
|
||||
|
||||
eventCountByMinute := spreadOutEvents(computedFirstTs, computedLastTs, computedCount)
|
||||
|
||||
|
@ -246,16 +242,41 @@ func computeEventsDiff(prevEventInfo *kubeextractor.EventInfo, newEventInfo *kub
|
|||
return prevEventInfo.LastTimestamp, newEventInfo.LastTimestamp, newCount
|
||||
}
|
||||
|
||||
// When you first bring up Sloop it can read in events that have been occurring for an extremely long time (many months)
|
||||
// We dont want to spread them out beyond the maxLookback because they can create huge transactions that fail and will
|
||||
// immediately kick in GC.
|
||||
// Returns a new firstTs, lastTs and Count
|
||||
func adjustForMaxLookback(firstTs time.Time, lastTs time.Time, count int, truncateTs time.Time) (time.Time, time.Time, int) {
|
||||
if firstTs.After(truncateTs) {
|
||||
return firstTs, lastTs, count
|
||||
func adjustForAvailablePartitions(firstTs time.Time, lastTs time.Time, count int, minPartitionEndTime time.Time, maxPartitionStartTime time.Time, maxPartitionEndTime time.Time) (time.Time, time.Time, int) {
|
||||
beginTruncateTs := minPartitionEndTime
|
||||
endTruncateTs := maxPartitionEndTime
|
||||
|
||||
// If begin and end are the same, there is only one partition.
|
||||
// Allow event count to add to only this partition.
|
||||
if beginTruncateTs == endTruncateTs {
|
||||
beginTruncateTs = maxPartitionStartTime
|
||||
}
|
||||
|
||||
// There is no overlap with begin and end, there is no event count to return
|
||||
if lastTs.Before(beginTruncateTs) || firstTs.After(endTruncateTs) {
|
||||
return beginTruncateTs, endTruncateTs, 0
|
||||
}
|
||||
|
||||
totalSeconds := lastTs.Sub(firstTs).Seconds()
|
||||
beforeSeconds := truncateTs.Sub(firstTs).Seconds()
|
||||
pctEventsToKeep := (totalSeconds - beforeSeconds) / totalSeconds
|
||||
return truncateTs, lastTs, int(float64(count) * pctEventsToKeep)
|
||||
secondsToKeep := totalSeconds
|
||||
|
||||
// if firstTs is before beginTruncateTs. Truncate to beginTruncateTs.
|
||||
// else set beginTruncateTs to firstTs
|
||||
if firstTs.Before(beginTruncateTs) {
|
||||
secondsToKeep = secondsToKeep - beginTruncateTs.Sub(firstTs).Seconds()
|
||||
} else {
|
||||
beginTruncateTs = firstTs
|
||||
}
|
||||
|
||||
// if lastTs is after endTruncateTs. Truncate to endTruncateTs
|
||||
// else set endTruncateTs to lastTs
|
||||
if lastTs.After(endTruncateTs) {
|
||||
secondsToKeep = secondsToKeep - lastTs.Sub(endTruncateTs).Seconds()
|
||||
} else {
|
||||
endTruncateTs = lastTs
|
||||
}
|
||||
|
||||
pctEventsToKeep := secondsToKeep / totalSeconds
|
||||
return beginTruncateTs, endTruncateTs, int(float64(count) * pctEventsToKeep)
|
||||
}
|
||||
|
|
|
@ -231,7 +231,9 @@ func addEventCount(t *testing.T, tables typed.Tables, timeStamp *timestamp.Times
|
|||
involvedObject, err := kubeextractor.ExtractInvolvedObject(watchRec.Payload)
|
||||
assert.Nil(t, err)
|
||||
|
||||
metadata := &kubeextractor.KubeMetadata{Name: "someName", Namespace: "someNamespace"}
|
||||
err = tables.Db().Update(func(txn badgerwrap.Txn) error {
|
||||
updateKubeWatchTable(tables, txn, &watchRec, metadata, true)
|
||||
// For dedupe to work we need a record written to the watch table
|
||||
err2 := updateEventCountTable(tables, txn, &watchRec, &resourceMetadata, &involvedObject, someMaxLookback)
|
||||
if err2 != nil {
|
||||
|
@ -399,16 +401,102 @@ func Test_computeEventsDiff_PartiallyOverlapping(t *testing.T) {
|
|||
assert.Equal(t, someEventTs4, t2)
|
||||
}
|
||||
|
||||
func Test_adjustForMaxLookback_ShortEventNoChange(t *testing.T) {
|
||||
first, last, count := adjustForMaxLookback(someEventTs3, someEventTs4, 100, someEventTs1)
|
||||
assert.Equal(t, someEventTs3, first)
|
||||
assert.Equal(t, someEventTs4, last)
|
||||
assert.Equal(t, 100, count)
|
||||
func Test_adjustForAvailablePartitions_NonOverlapping(t *testing.T) {
|
||||
//func adjustForAvailablePartitions(firstTs time.Time, lastTs time.Time, count int, minPartitionEndTime time.Time, maxPartitionStartTime time.Time, maxPartitionEndTime time.Time)
|
||||
|
||||
// No Overlap, EventCountEndTime is before minPartitionEndTime
|
||||
var eventCountStartTime = someEventTs1
|
||||
var eventCountEndTime = eventCountStartTime.Add(1 * time.Hour)
|
||||
var minPartitionEndTime = eventCountStartTime.Add(10 * time.Hour)
|
||||
var maxPartitionEndTime = eventCountStartTime.Add(12 * time.Hour)
|
||||
var maxPartitionStartTime = maxPartitionEndTime.Add(-1 * time.Hour)
|
||||
|
||||
beginTS, endTS, count := adjustForAvailablePartitions(eventCountStartTime, eventCountEndTime, 100, minPartitionEndTime, maxPartitionStartTime, maxPartitionEndTime)
|
||||
assert.Equal(t, 0, count)
|
||||
assert.Equal(t, minPartitionEndTime, beginTS)
|
||||
assert.Equal(t, maxPartitionEndTime, endTS)
|
||||
|
||||
// No Overlap, EventCountStartTime is after maxPartitionEndTime
|
||||
eventCountStartTime = someEventTs1
|
||||
eventCountEndTime = eventCountStartTime.Add(1 * time.Hour)
|
||||
minPartitionEndTime = eventCountStartTime.Add(-10 * time.Hour)
|
||||
maxPartitionEndTime = eventCountStartTime.Add(-12 * time.Hour)
|
||||
maxPartitionStartTime = maxPartitionEndTime.Add(-1 * time.Hour)
|
||||
beginTS, endTS, count = adjustForAvailablePartitions(eventCountStartTime, eventCountEndTime, 100, minPartitionEndTime, maxPartitionStartTime, maxPartitionEndTime)
|
||||
assert.Equal(t, 0, count)
|
||||
assert.Equal(t, minPartitionEndTime, beginTS)
|
||||
assert.Equal(t, maxPartitionEndTime, endTS)
|
||||
}
|
||||
|
||||
func Test_adjustForMaxLookback_LongEventGetsTruncated(t *testing.T) {
|
||||
first, last, count := adjustForMaxLookback(someEventTs1, someEventTs4, 1000, someEventTs3)
|
||||
assert.Equal(t, someEventTs3, first)
|
||||
assert.Equal(t, someEventTs4, last)
|
||||
assert.Equal(t, 333, count)
|
||||
func Test_adjustForAvailablePartitions_Overlapping(t *testing.T) {
|
||||
|
||||
// Overlap, EventCountStartTime is before minPartitionEndTime and EventCountEndTime is after maxPartitionEndTime
|
||||
eventCountStartTime := someEventTs1
|
||||
eventCountEndTime := eventCountStartTime.Add(10 * time.Hour)
|
||||
minPartitionEndTime := eventCountStartTime.Add(3 * time.Hour)
|
||||
maxPartitionEndTime := eventCountStartTime.Add(5 * time.Hour)
|
||||
maxPartitionStartTime := maxPartitionEndTime.Add(-1 * time.Hour)
|
||||
beginTS, endTS, count := adjustForAvailablePartitions(eventCountStartTime, eventCountEndTime, 100, minPartitionEndTime, maxPartitionStartTime, maxPartitionEndTime)
|
||||
assert.Equal(t, 20, count)
|
||||
assert.Equal(t, minPartitionEndTime, beginTS)
|
||||
assert.Equal(t, maxPartitionEndTime, endTS)
|
||||
|
||||
// Overlap, EventCountStartTime is between minPartitionEndTime and maxPartition and also EventCountEndTime is after maxPartitionEndTime
|
||||
eventCountStartTime = someEventTs1
|
||||
eventCountEndTime = eventCountStartTime.Add(10 * time.Hour)
|
||||
minPartitionEndTime = eventCountStartTime.Add(-3 * time.Hour)
|
||||
maxPartitionEndTime = eventCountStartTime.Add(5 * time.Hour)
|
||||
maxPartitionStartTime = maxPartitionEndTime.Add(-1 * time.Hour)
|
||||
beginTS, endTS, count = adjustForAvailablePartitions(eventCountStartTime, eventCountEndTime, 100, minPartitionEndTime, maxPartitionStartTime, maxPartitionEndTime)
|
||||
assert.Equal(t, 50, count)
|
||||
assert.Equal(t, eventCountStartTime, beginTS)
|
||||
assert.Equal(t, maxPartitionEndTime, endTS)
|
||||
|
||||
// Overlap, EventCountStartTime is before minPartitionEndTime but EventCountEndTime is between minPartitionEndTime and maxPartitionEndTime
|
||||
eventCountStartTime = someEventTs1
|
||||
eventCountEndTime = eventCountStartTime.Add(10 * time.Hour)
|
||||
minPartitionEndTime = eventCountStartTime.Add(2 * time.Hour)
|
||||
maxPartitionEndTime = eventCountStartTime.Add(15 * time.Hour)
|
||||
maxPartitionStartTime = maxPartitionEndTime.Add(-1 * time.Hour)
|
||||
beginTS, endTS, count = adjustForAvailablePartitions(eventCountStartTime, eventCountEndTime, 100, minPartitionEndTime, maxPartitionStartTime, maxPartitionEndTime)
|
||||
assert.Equal(t, 80, count)
|
||||
assert.Equal(t, minPartitionEndTime, beginTS)
|
||||
assert.Equal(t, eventCountEndTime, endTS)
|
||||
|
||||
// Overlap, Both EventCountStart and end time are between minPartitionEndTime and maxPartitionEndTime
|
||||
eventCountStartTime = someEventTs1
|
||||
eventCountEndTime = eventCountStartTime.Add(10 * time.Hour)
|
||||
minPartitionEndTime = eventCountStartTime.Add(-2 * time.Hour)
|
||||
maxPartitionEndTime = eventCountStartTime.Add(15 * time.Hour)
|
||||
maxPartitionStartTime = maxPartitionEndTime.Add(-1 * time.Hour)
|
||||
beginTS, endTS, count = adjustForAvailablePartitions(eventCountStartTime, eventCountEndTime, 100, minPartitionEndTime, maxPartitionStartTime, maxPartitionEndTime)
|
||||
assert.Equal(t, 100, count)
|
||||
assert.Equal(t, eventCountStartTime, beginTS)
|
||||
assert.Equal(t, eventCountEndTime, endTS)
|
||||
|
||||
// Two extra cases to confirm that even count is spread on the last partition but when
|
||||
// there is only one partition event count is spread to it.
|
||||
// Overlap, EventCount Start and end are outside of min and max partitions.
|
||||
// There are two partitions and event count is spread to only one
|
||||
eventCountStartTime = someEventTs1
|
||||
eventCountEndTime = eventCountStartTime.Add(10 * time.Hour)
|
||||
minPartitionEndTime = eventCountStartTime.Add(3 * time.Hour)
|
||||
maxPartitionEndTime = eventCountStartTime.Add(4 * time.Hour)
|
||||
maxPartitionStartTime = maxPartitionEndTime.Add(-1 * time.Hour)
|
||||
beginTS, endTS, count = adjustForAvailablePartitions(eventCountStartTime, eventCountEndTime, 100, minPartitionEndTime, maxPartitionStartTime, maxPartitionEndTime)
|
||||
assert.Equal(t, 10, count)
|
||||
assert.Equal(t, minPartitionEndTime, beginTS)
|
||||
assert.Equal(t, maxPartitionEndTime, endTS)
|
||||
|
||||
// Overlap, EventCount Start and end are outside of min and max partitions.
|
||||
// There is only one partitions and event count is still spread to it.
|
||||
eventCountStartTime = someEventTs1
|
||||
eventCountEndTime = eventCountStartTime.Add(10 * time.Hour)
|
||||
minPartitionEndTime = eventCountStartTime.Add(3 * time.Hour)
|
||||
maxPartitionEndTime = eventCountStartTime.Add(3 * time.Hour)
|
||||
maxPartitionStartTime = maxPartitionEndTime.Add(-1 * time.Hour)
|
||||
beginTS, endTS, count = adjustForAvailablePartitions(eventCountStartTime, eventCountEndTime, 100, minPartitionEndTime, maxPartitionStartTime, maxPartitionEndTime)
|
||||
assert.Equal(t, 10, count)
|
||||
assert.Equal(t, maxPartitionStartTime, beginTS)
|
||||
assert.Equal(t, maxPartitionEndTime, endTS)
|
||||
}
|
||||
|
|
|
@ -66,7 +66,9 @@ type SloopConfig struct {
|
|||
BadgerNumOfCompactors int `json:"badgerNumOfCompactors"`
|
||||
BadgerNumL0Tables int `json:"badgerNumLevelZeroTables"`
|
||||
BadgerNumL0TablesStall int `json:"badgerNumLevelZeroTables"`
|
||||
BadgerSyncWrites bool `json:"badgerBadgerSyncWrites"`
|
||||
BadgerSyncWrites bool `json:"badgerSyncWrites"`
|
||||
BadgerVLogFileIOMapping bool `json:"badgerVLogFileIOMapping"`
|
||||
EnableDeleteKeys bool `json:"enableDeleteKeys"`
|
||||
}
|
||||
|
||||
func registerFlags(fs *flag.FlagSet, config *SloopConfig) {
|
||||
|
@ -94,7 +96,7 @@ func registerFlags(fs *flag.FlagSet, config *SloopConfig) {
|
|||
fs.StringVar(&config.ApiServerHost, "apiserver-host", "", "Kubernetes API server endpoint")
|
||||
fs.BoolVar(&config.WatchCrds, "watch-crds", true, "Watch for activity for CRDs")
|
||||
fs.StringVar(&config.RestoreDatabaseFile, "restore-database-file", "", "Restore database from backup file into current context.")
|
||||
fs.Float64Var(&config.BadgerDiscardRatio, "badger-discard-ratio", 0.1, "Badger value log GC uses this value to decide if it wants to compact a vlog file. Smaller values free more disk space but use more computing resources")
|
||||
fs.Float64Var(&config.BadgerDiscardRatio, "badger-discard-ratio", 0.99, "Badger value log GC uses this value to decide if it wants to compact a vlog file. The lower the value of discardRatio the higher the number of !badger!move keys. And thus more the number of !badger!move keys, the size on disk keeps on increasing over time.")
|
||||
fs.Float64Var(&config.ThresholdForGC, "gc-threshold", 0.8, "Threshold for GC to start garbage collecting")
|
||||
fs.DurationVar(&config.BadgerVLogGCFreq, "badger-vlog-gc-freq", time.Minute*1, "Frequency of running badger's ValueLogGC")
|
||||
fs.Int64Var(&config.BadgerMaxTableSize, "badger-max-table-size", 0, "Max LSM table size in bytes. 0 = use badger default")
|
||||
|
@ -102,13 +104,15 @@ func registerFlags(fs *flag.FlagSet, config *SloopConfig) {
|
|||
fs.IntVar(&config.BadgerLevSizeMultiplier, "badger-level-size-multiplier", 0, "The ratio between the maximum sizes of contiguous levels in the LSM. 0 = use badger default")
|
||||
fs.BoolVar(&config.BadgerKeepL0InMemory, "badger-keep-l0-in-memory", true, "Keeps all level 0 tables in memory for faster writes and compactions")
|
||||
fs.Int64Var(&config.BadgerVLogFileSize, "badger-vlog-file-size", 0, "Max size in bytes per value log file. 0 = use badger default")
|
||||
fs.UintVar(&config.BadgerVLogMaxEntries, "badger-vlog-max-entries", 0, "Max number of entries per value log files. 0 = use badger default")
|
||||
fs.UintVar(&config.BadgerVLogMaxEntries, "badger-vlog-max-entries", 200000, "Max number of entries per value log files. 0 = use badger default")
|
||||
fs.BoolVar(&config.BadgerUseLSMOnlyOptions, "badger-use-lsm-only-options", true, "Sets a higher valueThreshold so values would be collocated with LSM tree reducing vlog disk usage")
|
||||
fs.BoolVar(&config.BadgerEnableEventLogging, "badger-enable-event-logging", false, "Turns on badger event logging")
|
||||
fs.IntVar(&config.BadgerNumOfCompactors, "badger-number-of-compactors", 0, "Number of compactors for badger")
|
||||
fs.IntVar(&config.BadgerNumL0Tables, "badger-number-of-level-zero-tables", 0, "Number of level zero tables for badger")
|
||||
fs.IntVar(&config.BadgerNumL0TablesStall, "badger-number-of-zero-tables-stall", 0, "Number of Level 0 tables that once reached causes the DB to stall until compaction succeeds")
|
||||
fs.BoolVar(&config.BadgerSyncWrites, "badger-sync-writes", true, "Sync Writes ensures writes are synced to disk if set to true")
|
||||
fs.BoolVar(&config.EnableDeleteKeys, "enable-delete-keys", false, "Use delete prefixes instead of dropPrefix for GC")
|
||||
fs.BoolVar(&config.BadgerVLogFileIOMapping, "badger-vlog-fileIO-mapping", false, "Indicates which file loading mode should be used for the value log data, in memory constrained environments the value is recommended to be true")
|
||||
}
|
||||
|
||||
// This will first check if a config file is specified on cmd line using a temporary flagSet
|
||||
|
|
|
@ -72,6 +72,7 @@ func RealMain() error {
|
|||
BadgerSyncWrites: conf.BadgerSyncWrites,
|
||||
BadgerLevelOneSize: conf.BadgerLevelOneSize,
|
||||
BadgerLevSizeMultiplier: conf.BadgerLevSizeMultiplier,
|
||||
BadgerVLogFileIOMapping: conf.BadgerVLogFileIOMapping,
|
||||
}
|
||||
db, err := untyped.OpenStore(factory, storeConfig)
|
||||
if err != nil {
|
||||
|
@ -132,6 +133,7 @@ func RealMain() error {
|
|||
BadgerVLogGCFreq: conf.BadgerVLogGCFreq,
|
||||
DeletionBatchSize: conf.DeletionBatchSize,
|
||||
GCThreshold: conf.ThresholdForGC,
|
||||
EnableDeleteKeys: conf.EnableDeleteKeys,
|
||||
}
|
||||
storemgr = storemanager.NewStoreManager(tables, storeCfg, fs)
|
||||
storemgr.Start()
|
||||
|
|
|
@ -22,6 +22,7 @@ type Tables interface {
|
|||
GetMinAndMaxPartition() (bool, string, string, error)
|
||||
GetTableNames() []string
|
||||
GetTables() []interface{}
|
||||
GetMinAndMaxPartitionWithTxn(badgerwrap.Txn) (bool, string, string)
|
||||
}
|
||||
|
||||
type MinMaxPartitionsGetter interface {
|
||||
|
@ -67,31 +68,40 @@ func (t *tablesImpl) Db() badgerwrap.DB {
|
|||
}
|
||||
|
||||
func (t *tablesImpl) GetMinAndMaxPartition() (bool, string, string, error) {
|
||||
allPartitions := []string{}
|
||||
var ok bool
|
||||
var minPartition string
|
||||
var maxPartition string
|
||||
err := t.db.View(func(txn badgerwrap.Txn) error {
|
||||
for _, table := range t.GetTables() {
|
||||
coerced, canCoerce := table.(MinMaxPartitionsGetter)
|
||||
if !canCoerce {
|
||||
glog.Errorf("Expected type to implement GetMinMaxPartitions but failed")
|
||||
continue
|
||||
}
|
||||
ok, minPar, maxPar := coerced.GetMinMaxPartitions(txn)
|
||||
if ok {
|
||||
allPartitions = append(allPartitions, minPar, maxPar)
|
||||
}
|
||||
}
|
||||
ok, minPartition, maxPartition = t.GetMinAndMaxPartitionWithTxn(txn)
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return false, "", "", err
|
||||
}
|
||||
|
||||
return ok, minPartition, maxPartition, nil
|
||||
}
|
||||
|
||||
func (t *tablesImpl) GetMinAndMaxPartitionWithTxn(txn badgerwrap.Txn) (bool, string, string) {
|
||||
allPartitions := []string{}
|
||||
for _, table := range t.GetTables() {
|
||||
coerced, canCoerce := table.(MinMaxPartitionsGetter)
|
||||
if !canCoerce {
|
||||
glog.Errorf("Expected type to implement GetMinMaxPartitions but failed")
|
||||
continue
|
||||
}
|
||||
ok, minPar, maxPar := coerced.GetMinMaxPartitions(txn)
|
||||
if ok {
|
||||
allPartitions = append(allPartitions, minPar, maxPar)
|
||||
}
|
||||
}
|
||||
if len(allPartitions) == 0 {
|
||||
return false, "", "", nil
|
||||
return false, "", ""
|
||||
}
|
||||
|
||||
sort.Strings(allPartitions)
|
||||
return true, allPartitions[0], allPartitions[len(allPartitions)-1], nil
|
||||
return true, allPartitions[0], allPartitions[len(allPartitions)-1]
|
||||
}
|
||||
|
||||
func (t *tablesImpl) GetTableNames() []string {
|
||||
|
|
|
@ -29,7 +29,7 @@ type DB interface {
|
|||
Tables(withKeysCount bool) []badger.TableInfo
|
||||
Backup(w io.Writer, since uint64) (uint64, error)
|
||||
// DropAll() error
|
||||
// Flatten(workers int) error
|
||||
Flatten(workers int) error
|
||||
// GetMergeOperator(key []byte, f MergeFunc, dur time.Duration) *MergeOperator
|
||||
// GetSequence(key []byte, bandwidth uint64) (*Sequence, error)
|
||||
// KeySplits(prefix []byte) []string
|
||||
|
|
|
@ -80,6 +80,10 @@ func (b *BadgerDb) Backup(w io.Writer, since uint64) (uint64, error) {
|
|||
return b.db.Backup(w, since)
|
||||
}
|
||||
|
||||
func (b *BadgerDb) Flatten(workers int) error {
|
||||
return b.db.Flatten(workers)
|
||||
}
|
||||
|
||||
func (b *BadgerDb) Load(r io.Reader, maxPendingWrites int) error {
|
||||
return b.db.Load(r, maxPendingWrites)
|
||||
}
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
package badgerwrap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
@ -79,10 +78,6 @@ func (b *MockDb) DropPrefix(prefix []byte) error {
|
|||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if len(b.data) == 0 {
|
||||
return fmt.Errorf("enable to delete prefix: %s from empty table", string(prefix))
|
||||
}
|
||||
|
||||
for key, _ := range b.data {
|
||||
exists := strings.HasPrefix(key, string(prefix))
|
||||
if exists {
|
||||
|
@ -110,6 +105,10 @@ func (b *MockDb) Tables(withKeysCount bool) []badger.TableInfo {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *MockDb) Flatten(workers int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *MockDb) Backup(w io.Writer, since uint64) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ package untyped
|
|||
import (
|
||||
"fmt"
|
||||
badger "github.com/dgraph-io/badger/v2"
|
||||
"github.com/dgraph-io/badger/v2/options"
|
||||
"github.com/golang/glog"
|
||||
"github.com/salesforce/sloop/pkg/sloop/store/untyped/badgerwrap"
|
||||
"os"
|
||||
|
@ -31,6 +32,7 @@ type Config struct {
|
|||
BadgerSyncWrites bool
|
||||
BadgerLevelOneSize int64
|
||||
BadgerLevSizeMultiplier int
|
||||
BadgerVLogFileIOMapping bool
|
||||
}
|
||||
|
||||
func OpenStore(factory badgerwrap.Factory, config *Config) (badgerwrap.DB, error) {
|
||||
|
@ -78,6 +80,8 @@ func OpenStore(factory badgerwrap.Factory, config *Config) (badgerwrap.DB, error
|
|||
opts = opts.WithNumLevelZeroTablesStall(config.BadgerNumL0TablesStall)
|
||||
}
|
||||
|
||||
opts.WithSyncWrites(config.BadgerSyncWrites)
|
||||
|
||||
if config.BadgerLevelOneSize != 0 {
|
||||
opts = opts.WithLevelOneSize(config.BadgerLevelOneSize)
|
||||
}
|
||||
|
@ -86,13 +90,21 @@ func OpenStore(factory badgerwrap.Factory, config *Config) (badgerwrap.DB, error
|
|||
opts = opts.WithLevelSizeMultiplier(config.BadgerLevSizeMultiplier)
|
||||
}
|
||||
|
||||
if config.BadgerVLogFileIOMapping {
|
||||
opts = opts.WithValueLogLoadingMode(options.FileIO)
|
||||
}
|
||||
|
||||
opts = opts.WithSyncWrites(config.BadgerSyncWrites)
|
||||
|
||||
// https://github.com/dgraph-io/badger/issues/1228
|
||||
opts = opts.WithNumVersionsToKeep(0)
|
||||
|
||||
db, err := factory.Open(opts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("badger.OpenStore failed with: %v", err)
|
||||
}
|
||||
|
||||
db.Flatten(5)
|
||||
glog.Infof("BadgerDB Options: %+v", opts)
|
||||
|
||||
partitionDuration = config.ConfigPartitionDuration
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/salesforce/sloop/pkg/sloop/common"
|
||||
"github.com/salesforce/sloop/pkg/sloop/store/untyped/badgerwrap"
|
||||
"github.com/spf13/afero"
|
||||
"os"
|
||||
|
@ -29,6 +30,7 @@ var (
|
|||
metricCleanedBadgerLsmSizeMb = promauto.NewGauge(prometheus.GaugeOpts{Name: "sloop_delta_aftergc_badger_lsmsizemb"})
|
||||
metricCleanedBadgerVLogFileCount = promauto.NewGauge(prometheus.GaugeOpts{Name: "sloop_delta_aftergc_badger_vlogfilecount"})
|
||||
metricCleanedBadgerVLogSizeMb = promauto.NewGauge(prometheus.GaugeOpts{Name: "sloop_delta_aftergc_badger_vlogsizemb"})
|
||||
metricTotalKeysCount = promauto.NewGauge(prometheus.GaugeOpts{Name: "sloop_total_key_count"})
|
||||
)
|
||||
|
||||
type storeStats struct {
|
||||
|
@ -40,6 +42,7 @@ type storeStats struct {
|
|||
DiskVlogFileCount int
|
||||
LevelToKeyCount map[int]uint64
|
||||
LevelToTableCount map[int]int
|
||||
TotalKeyCount uint64
|
||||
}
|
||||
|
||||
func generateStats(storeRoot string, db badgerwrap.DB, fs *afero.Afero) *storeStats {
|
||||
|
@ -58,7 +61,7 @@ func generateStats(storeRoot string, db badgerwrap.DB, fs *afero.Afero) *storeSt
|
|||
ret.DiskLsmBytes = int64(extByteCount[sstExt])
|
||||
ret.DiskVlogFileCount = extFileCount[vlogExt]
|
||||
ret.DiskVlogBytes = int64(extByteCount[vlogExt])
|
||||
|
||||
ret.TotalKeyCount = common.GetTotalKeyCount(db, "")
|
||||
tables := db.Tables(true)
|
||||
for _, table := range tables {
|
||||
glog.V(2).Infof("BadgerDB TABLE id=%v keycount=%v level=%v left=%q right=%q", table.ID, table.KeyCount, table.Level, string(table.Left), string(table.Right))
|
||||
|
@ -77,6 +80,9 @@ func getDirSizeRecursive(root string, fs *afero.Afero) (uint64, map[string]int,
|
|||
var extByteCount = make(map[string]uint64)
|
||||
|
||||
err := fs.Walk(root, func(path string, info os.FileInfo, err error) error {
|
||||
if info == nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
totalSize += uint64(info.Size())
|
||||
ext := filepath.Ext(path)
|
||||
|
@ -104,6 +110,7 @@ func emitMetrics(stats *storeStats) {
|
|||
metricBadgerLsmSizeMb.Set(float64(stats.DiskLsmBytes / 1024 / 1024))
|
||||
metricBadgerVLogFileCount.Set(float64(stats.DiskVlogFileCount))
|
||||
metricBadgerVLogSizeMb.Set(float64(stats.DiskVlogBytes / 1024 / 1024))
|
||||
metricTotalKeysCount.Set(float64(stats.TotalKeyCount))
|
||||
}
|
||||
|
||||
func emitGCMetrics(stats *storeStats) {
|
||||
|
|
|
@ -15,7 +15,6 @@ import (
|
|||
"github.com/salesforce/sloop/pkg/sloop/common"
|
||||
"github.com/salesforce/sloop/pkg/sloop/store/typed"
|
||||
"github.com/salesforce/sloop/pkg/sloop/store/untyped"
|
||||
"github.com/salesforce/sloop/pkg/sloop/store/untyped/badgerwrap"
|
||||
"github.com/spf13/afero"
|
||||
"math"
|
||||
"sync"
|
||||
|
@ -49,6 +48,7 @@ type Config struct {
|
|||
BadgerVLogGCFreq time.Duration
|
||||
DeletionBatchSize int
|
||||
GCThreshold float64
|
||||
EnableDeleteKeys bool
|
||||
}
|
||||
|
||||
type StoreManager struct {
|
||||
|
@ -101,7 +101,7 @@ func (sm *StoreManager) gcLoop() {
|
|||
metricGcRunCount.Inc()
|
||||
before := time.Now()
|
||||
metricGcRunning.Set(1)
|
||||
cleanUpPerformed, numOfDeletedKeys, numOfKeysToDelete, err := doCleanup(sm.tables, sm.config.TimeLimit, sm.config.SizeLimitBytes, sm.stats, sm.config.DeletionBatchSize, sm.config.GCThreshold)
|
||||
cleanUpPerformed, numOfDeletedKeys, numOfKeysToDelete, err := doCleanup(sm.tables, sm.config.TimeLimit, sm.config.SizeLimitBytes, sm.stats, sm.config.DeletionBatchSize, sm.config.GCThreshold, sm.config.EnableDeleteKeys)
|
||||
metricGcCleanUpPerformed.Set(common.BoolToFloat(cleanUpPerformed))
|
||||
metricGcDeletedNumberOfKeys.Set(float64(numOfDeletedKeys))
|
||||
metricGcNumberOfKeysToDelete.Set(float64(numOfKeysToDelete))
|
||||
|
@ -141,7 +141,7 @@ func (sm *StoreManager) vlogGcLoop() {
|
|||
metricValueLogGcRunning.Set(0)
|
||||
metricValueLogGcRunCount.Add(1)
|
||||
metricValueLogGcLatency.Set(time.Since(before).Seconds())
|
||||
glog.Infof("RunValueLogGC(%v) run took %v and returned %q", sm.config.BadgerDiscardRatio, time.Since(before), err)
|
||||
glog.Infof("RunValueLogGC(%v) run took %v and returned '%v'", sm.config.BadgerDiscardRatio, time.Since(before), err)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
@ -173,14 +173,53 @@ func (sm *StoreManager) refreshStats() *storeStats {
|
|||
return sm.stats
|
||||
}
|
||||
|
||||
func doCleanup(tables typed.Tables, timeLimit time.Duration, sizeLimitBytes int, stats *storeStats, deletionBatchSize int, gcThreshold float64) (bool, int64, int64, error) {
|
||||
func doCleanup(tables typed.Tables, timeLimit time.Duration, sizeLimitBytes int, stats *storeStats, deletionBatchSize int, gcThreshold float64, enableDeletePrefix bool) (bool, int64, int64, error) {
|
||||
anyCleanupPerformed := false
|
||||
var totalNumOfDeletedKeys int64 = 0
|
||||
var totalNumOfKeysToDelete int64 = 0
|
||||
partitionsToDelete, partitionsInfoMap := getPartitionsToDelete(tables, timeLimit, sizeLimitBytes, stats.DiskSizeBytes, gcThreshold)
|
||||
|
||||
beforeGCTime := time.Now()
|
||||
for _, partitionToDelete := range partitionsToDelete {
|
||||
partitionInfo := partitionsInfoMap[partitionToDelete]
|
||||
numOfDeletedKeysForPrefix, numOfKeysToDeleteForPrefix, errMessages := deletePartition(partitionToDelete, tables, deletionBatchSize, enableDeletePrefix, partitionInfo)
|
||||
anyCleanupPerformed = true
|
||||
if len(errMessages) != 0 {
|
||||
var errMsg string
|
||||
for _, er := range errMessages {
|
||||
errMsg += er + ","
|
||||
}
|
||||
return false, totalNumOfDeletedKeys, totalNumOfKeysToDelete, fmt.Errorf(errMsg)
|
||||
}
|
||||
|
||||
glog.Infof("Removed number of keys so far: %v ", totalNumOfDeletedKeys)
|
||||
totalNumOfDeletedKeys += int64(numOfDeletedKeysForPrefix)
|
||||
totalNumOfKeysToDelete += int64(numOfKeysToDeleteForPrefix)
|
||||
|
||||
}
|
||||
|
||||
elapsed := time.Since(beforeGCTime)
|
||||
glog.Infof("Deletion/dropPrefix of prefixes took %v:", elapsed)
|
||||
|
||||
if enableDeletePrefix {
|
||||
beforeDropPrefix := time.Now()
|
||||
glog.Infof("Deleted %d keys", totalNumOfDeletedKeys)
|
||||
// dropping prefix to force compression in case of keys deleted
|
||||
err := tables.Db().DropPrefix([]byte{})
|
||||
glog.Infof("Drop prefix took %v with error: %v", time.Since(beforeDropPrefix), err)
|
||||
}
|
||||
return anyCleanupPerformed, totalNumOfDeletedKeys, totalNumOfKeysToDelete, nil
|
||||
}
|
||||
|
||||
func getMinAndMaxPartitionsAndSetMetrics(tables typed.Tables) (bool, string, string) {
|
||||
ok, minPartition, maxPartition, err := tables.GetMinAndMaxPartition()
|
||||
if err != nil {
|
||||
return false, 0, 0, fmt.Errorf("failed to get min partition : %s, max partition: %s, err:%v", minPartition, maxPartition, err)
|
||||
glog.Errorf("failed to get min partition : %s, max partition: %s, err:%v", minPartition, maxPartition, err)
|
||||
return false, "", ""
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return false, 0, 0, nil
|
||||
return false, "", ""
|
||||
}
|
||||
|
||||
minPartitionAge, err := untyped.GetAgeOfPartitionInHours(minPartition)
|
||||
|
@ -193,72 +232,72 @@ func doCleanup(tables typed.Tables, timeLimit time.Duration, sizeLimitBytes int,
|
|||
metricAgeOfMaximumPartition.Set(maxPartitionAge)
|
||||
}
|
||||
|
||||
var totalNumOfDeletedKeys int64 = 0
|
||||
var totalNumOfKeysToDelete int64 = 0
|
||||
anyCleanupPerformed := false
|
||||
minPartitionStartTime, err := untyped.GetTimeForPartition(minPartition)
|
||||
if err != nil {
|
||||
return false, 0, 0, err
|
||||
}
|
||||
|
||||
var numOfKeysToDeleteForFileSizeCondition int64 = 0
|
||||
isFileSizeConditionMet, garbageCollectionRatio := cleanUpFileSizeCondition(stats, sizeLimitBytes, gcThreshold)
|
||||
|
||||
if isFileSizeConditionMet {
|
||||
numOfKeysToDeleteForFileSizeCondition = getNumberOfKeysToDelete(tables.Db(), garbageCollectionRatio)
|
||||
}
|
||||
|
||||
beforeGCTime := time.Now()
|
||||
for cleanUpTimeCondition(minPartition, maxPartition, timeLimit) || numOfKeysToDeleteForFileSizeCondition > 0 {
|
||||
|
||||
numOfDeletedKeysforPrefix, numOfKeysToDeleteForPrefix, errMessages := deletePartition(minPartition, tables, deletionBatchSize)
|
||||
totalNumOfDeletedKeys += int64(numOfDeletedKeysforPrefix)
|
||||
totalNumOfKeysToDelete += int64(numOfKeysToDeleteForPrefix)
|
||||
anyCleanupPerformed = true
|
||||
minPartitionStartTime = minPartitionStartTime.Add(untyped.GetPartitionDuration())
|
||||
minPartition = untyped.GetPartitionId(minPartitionStartTime)
|
||||
|
||||
minPartitionAge, err := untyped.GetAgeOfPartitionInHours(minPartition)
|
||||
if err != nil || minPartitionAge < 0 {
|
||||
return false, totalNumOfDeletedKeys, totalNumOfKeysToDelete, fmt.Errorf("minimun partition age: %f cannot be less than zero", minPartitionAge)
|
||||
}
|
||||
|
||||
metricAgeOfMinimumPartition.Set(minPartitionAge)
|
||||
if len(errMessages) != 0 {
|
||||
var errMsg string
|
||||
for _, er := range errMessages {
|
||||
errMsg += er + ","
|
||||
}
|
||||
return false, totalNumOfDeletedKeys, totalNumOfKeysToDelete, fmt.Errorf(errMsg)
|
||||
}
|
||||
|
||||
glog.Infof("Deleted Number of keys so far: %v ", totalNumOfDeletedKeys)
|
||||
if numOfKeysToDeleteForFileSizeCondition > totalNumOfDeletedKeys {
|
||||
numOfKeysToDeleteForFileSizeCondition -= totalNumOfDeletedKeys
|
||||
} else {
|
||||
// Deleted number of keys is greater or equal. We have reached the required deletion.
|
||||
numOfKeysToDeleteForFileSizeCondition = 0
|
||||
}
|
||||
|
||||
glog.Infof("Remaining number of keys to delete: %v ", numOfKeysToDeleteForFileSizeCondition)
|
||||
}
|
||||
|
||||
elapsed := time.Since(beforeGCTime)
|
||||
glog.Infof("Deletion of prefixes took %v and removed %d keys with error: %v", elapsed, totalNumOfDeletedKeys, err)
|
||||
|
||||
beforeDropPrefix := time.Now()
|
||||
|
||||
// dropping prefix to force compression
|
||||
err = tables.Db().DropPrefix([]byte{})
|
||||
glog.Infof("Drop prefix took %v with error: %v", time.Since(beforeDropPrefix), err)
|
||||
return anyCleanupPerformed, totalNumOfDeletedKeys, totalNumOfKeysToDelete, nil
|
||||
return true, minPartition, maxPartition
|
||||
}
|
||||
|
||||
func deletePartition(minPartition string, tables typed.Tables, deletionBatchSize int) (int, int, []string) {
|
||||
totalNumOfDeletedKeysforPrefix := 0
|
||||
totalNumOfKeysToDeleteForPrefix := 0
|
||||
numOfDeletedKeysforPrefix := 0
|
||||
numOfKeysToDeleteForPrefix := 0
|
||||
func getPartitionsToDeleteWhenSizeConditionHasBeenMet(sizeLimitBytes int, diskSizeBytes int64, gcThreshold float64, totalKeysCount uint64, partitionMap map[string]*common.PartitionInfo, sortedPartitionsList []string) []string {
|
||||
var partitionsToDelete []string
|
||||
var keysToBeCollected uint64 = 0
|
||||
garbageCollectionRatio := getGarbageCollectionRatio(float64(diskSizeBytes), sizeLimitBytes, gcThreshold)
|
||||
numOfKeysToDeleteForFileSizeCondition := getNumberOfKeysToDelete(garbageCollectionRatio, totalKeysCount)
|
||||
index := 0
|
||||
for keysToBeCollected < numOfKeysToDeleteForFileSizeCondition {
|
||||
keysToBeCollected += partitionMap[sortedPartitionsList[index]].TotalKeyCount
|
||||
partitionsToDelete = append(partitionsToDelete, sortedPartitionsList[index])
|
||||
index++
|
||||
}
|
||||
|
||||
return partitionsToDelete
|
||||
}
|
||||
|
||||
func getPartitionsToDelete(tables typed.Tables, timeLimit time.Duration, sizeLimitBytes int, diskSizeBytes int64, gcThreshold float64) ([]string, map[string]*common.PartitionInfo) {
|
||||
|
||||
ok, minPartition, maxPartition := getMinAndMaxPartitionsAndSetMetrics(tables)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// check if size condition has been met
|
||||
sizeConditionMet := hasFilesOnDiskExceededThreshold(diskSizeBytes, sizeLimitBytes, gcThreshold)
|
||||
|
||||
needCleanUp := sizeConditionMet || cleanUpTimeCondition(minPartition, maxPartition, timeLimit)
|
||||
if !needCleanUp {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
partitionMap, totalKeysCount := common.GetPartitionsInfo(tables.Db())
|
||||
var partitionsToDelete []string
|
||||
sortedPartitionsList := common.GetSortedPartitionIDs(partitionMap)
|
||||
|
||||
if sizeConditionMet {
|
||||
partitionsToDelete = getPartitionsToDeleteWhenSizeConditionHasBeenMet(sizeLimitBytes, diskSizeBytes, gcThreshold, totalKeysCount, partitionMap, sortedPartitionsList)
|
||||
}
|
||||
|
||||
// if all the partitions have to be cleaned uo there is no need to further check for time condition
|
||||
if len(partitionsToDelete) == len(sortedPartitionsList) {
|
||||
return partitionsToDelete, partitionMap
|
||||
}
|
||||
|
||||
// Is clean up condition still not met for partitions collected for size
|
||||
currentLastPartitionToDeleteIndex := len(partitionsToDelete)
|
||||
for currentLastPartitionToDeleteIndex < len(sortedPartitionsList) && cleanUpTimeCondition(sortedPartitionsList[currentLastPartitionToDeleteIndex], maxPartition, timeLimit) {
|
||||
partitionsToDelete = append(partitionsToDelete, sortedPartitionsList[currentLastPartitionToDeleteIndex])
|
||||
currentLastPartitionToDeleteIndex++
|
||||
}
|
||||
|
||||
minPartitionAge, err := untyped.GetAgeOfPartitionInHours(sortedPartitionsList[currentLastPartitionToDeleteIndex])
|
||||
if err == nil {
|
||||
metricAgeOfMinimumPartition.Set(minPartitionAge)
|
||||
}
|
||||
|
||||
return partitionsToDelete, partitionMap
|
||||
}
|
||||
|
||||
func deletePartition(minPartition string, tables typed.Tables, deletionBatchSize int, enableDeleteKeys bool, partitionInfo *common.PartitionInfo) (uint64, uint64, []string) {
|
||||
var totalNumOfDeletedKeysForPrefix uint64 = 0
|
||||
var totalNumOfKeysToDeleteForPrefix uint64 = 0
|
||||
var numOfDeletedKeysForPrefix uint64 = 0
|
||||
var numOfKeysToDeleteForPrefix uint64 = 0
|
||||
|
||||
partStart, partEnd, err := untyped.GetTimeRangeForPartition(minPartition)
|
||||
glog.Infof("GC removing partition %q with data from %v to %v (err %v)", minPartition, partStart, partEnd, err)
|
||||
|
@ -266,19 +305,32 @@ func deletePartition(minPartition string, tables typed.Tables, deletionBatchSize
|
|||
for _, tableName := range tables.GetTableNames() {
|
||||
prefix := fmt.Sprintf("/%s/%s", tableName, minPartition)
|
||||
start := time.Now()
|
||||
err, numOfDeletedKeysforPrefix, numOfKeysToDeleteForPrefix = common.DeleteKeysWithPrefix([]byte(prefix), tables.Db(), deletionBatchSize)
|
||||
metricGcDeletedNumberOfKeysByTable.WithLabelValues(fmt.Sprintf("%v", tableName)).Set(float64(numOfDeletedKeysforPrefix))
|
||||
numberOfKeysToRemove := partitionInfo.TableNameToKeyCountMap[tableName]
|
||||
if enableDeleteKeys {
|
||||
err, numOfDeletedKeysForPrefix, numOfKeysToDeleteForPrefix = common.DeleteKeysWithPrefix(prefix, tables.Db(), deletionBatchSize, numberOfKeysToRemove)
|
||||
metricGcDeletedNumberOfKeysByTable.WithLabelValues(fmt.Sprintf("%v", tableName)).Set(float64(numOfDeletedKeysForPrefix))
|
||||
} else {
|
||||
err = tables.Db().DropPrefix([]byte(prefix))
|
||||
|
||||
// !badger!move keys for the given prefix should also be cleaned up. For details: https://github.com/dgraph-io/badger/issues/1288
|
||||
err = tables.Db().DropPrefix([]byte("!badger!move" + prefix))
|
||||
|
||||
// there will be same deletions for dropPrefix as the tables are locked when prefixes are dropped
|
||||
numOfDeletedKeysForPrefix = numberOfKeysToRemove
|
||||
numOfKeysToDeleteForPrefix = numberOfKeysToRemove
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
glog.Infof("Call to DeleteKeysWithPrefix(%v) took %v and removed %d keys with error: %v", prefix, elapsed, numOfDeletedKeysforPrefix, err)
|
||||
glog.Infof("Call to DropPrefix(%v) took %v and removed %d keys with error: %v", prefix, elapsed, numOfDeletedKeysForPrefix, err)
|
||||
if err != nil {
|
||||
errMessages = append(errMessages, fmt.Sprintf("failed to cleanup with min key: %s, elapsed: %v,err: %v,", prefix, elapsed, err))
|
||||
}
|
||||
|
||||
totalNumOfDeletedKeysforPrefix += numOfDeletedKeysforPrefix
|
||||
totalNumOfDeletedKeysForPrefix += numOfDeletedKeysForPrefix
|
||||
totalNumOfKeysToDeleteForPrefix += numOfKeysToDeleteForPrefix
|
||||
}
|
||||
|
||||
return totalNumOfDeletedKeysforPrefix, totalNumOfKeysToDeleteForPrefix, errMessages
|
||||
return totalNumOfDeletedKeysForPrefix, totalNumOfKeysToDeleteForPrefix, errMessages
|
||||
}
|
||||
|
||||
func cleanUpTimeCondition(minPartition string, maxPartition string, timeLimit time.Duration) bool {
|
||||
|
@ -303,25 +355,39 @@ func cleanUpTimeCondition(minPartition string, maxPartition string, timeLimit ti
|
|||
return false
|
||||
}
|
||||
|
||||
func cleanUpFileSizeCondition(stats *storeStats, sizeLimitBytes int, gcThreshold float64) (bool, float64) {
|
||||
|
||||
// gcthreshold is the threshold when reached would trigger the garbage collection. Its because we want to proactively start GC when the size limit is about to hit.
|
||||
sizeThreshold := gcThreshold * float64(sizeLimitBytes)
|
||||
currentDiskSize := float64(stats.DiskSizeBytes)
|
||||
if currentDiskSize > sizeThreshold {
|
||||
glog.Infof("Start cleaning up because current file size: %v exceeds file size threshold: %v", stats.DiskSizeBytes, sizeThreshold)
|
||||
|
||||
garbageCollectionRatio := (currentDiskSize - sizeThreshold) / currentDiskSize
|
||||
return true, garbageCollectionRatio
|
||||
func cleanUpFileSizeCondition(stats *storeStats, sizeLimitBytes int, gcThreshold float64, enableDeleteKeys bool, numOfKeysToDeleteForFileSizeCondition int64) bool {
|
||||
if enableDeleteKeys {
|
||||
return numOfKeysToDeleteForFileSizeCondition > 0
|
||||
} else {
|
||||
return hasFilesOnDiskExceededThreshold(stats.DiskSizeBytes, sizeLimitBytes, gcThreshold)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Can not clean up, disk size: %v is not exceeding size limit: %v yet", stats.DiskSizeBytes, uint64(sizeLimitBytes))
|
||||
return false, 0.0
|
||||
}
|
||||
|
||||
func getNumberOfKeysToDelete(db badgerwrap.DB, garbageCollectionRatio float64) int64 {
|
||||
totalKeyCount := float64(common.GetTotalKeyCount(db))
|
||||
metricTotalNumberOfKeys.Set(totalKeyCount)
|
||||
func hasFilesOnDiskExceededThreshold(diskSizeBytes int64, sizeLimitBytes int, gcThreshold float64) bool {
|
||||
|
||||
// gcThreshold is the threshold when reached would trigger the garbage collection. Its because we want to proactively start GC when the size limit is about to hit.
|
||||
sizeThreshold := gcThreshold * float64(sizeLimitBytes)
|
||||
currentDiskSize := float64(diskSizeBytes)
|
||||
if currentDiskSize > sizeThreshold {
|
||||
glog.Infof("Start cleaning up because current file size: %v exceeds file size threshold: %v", diskSizeBytes, sizeThreshold)
|
||||
return true
|
||||
}
|
||||
glog.V(2).Infof("Can not clean up, disk size: %v is not exceeding size limit: %v yet", diskSizeBytes, uint64(sizeLimitBytes))
|
||||
return false
|
||||
}
|
||||
|
||||
func getGarbageCollectionRatio(currentDiskSize float64, sizeLimitBytes int, gcThreshold float64) float64 {
|
||||
sizeThreshold := gcThreshold * float64(sizeLimitBytes)
|
||||
if currentDiskSize > sizeThreshold {
|
||||
|
||||
garbageCollectionRatio := (currentDiskSize - sizeThreshold) / currentDiskSize
|
||||
return garbageCollectionRatio
|
||||
}
|
||||
return 0.0
|
||||
}
|
||||
|
||||
func getNumberOfKeysToDelete(garbageCollectionRatio float64, totalKeyCount uint64) uint64 {
|
||||
metricTotalNumberOfKeys.Set(float64(totalKeyCount))
|
||||
|
||||
if garbageCollectionRatio <= 0 || garbageCollectionRatio > 1 {
|
||||
// print float here and below
|
||||
|
@ -329,6 +395,6 @@ func getNumberOfKeysToDelete(db badgerwrap.DB, garbageCollectionRatio float64) i
|
|||
return 0
|
||||
}
|
||||
|
||||
keysToDelete := garbageCollectionRatio * totalKeyCount
|
||||
return int64(math.Ceil(keysToDelete))
|
||||
keysToDelete := garbageCollectionRatio * float64(totalKeyCount)
|
||||
return uint64(math.Ceil(keysToDelete))
|
||||
}
|
||||
|
|
|
@ -37,9 +37,12 @@ func Test_cleanUpFileSizeCondition_True(t *testing.T) {
|
|||
DiskSizeBytes: 10,
|
||||
}
|
||||
|
||||
flag, ratio := cleanUpFileSizeCondition(stats, 5, 1)
|
||||
flag := cleanUpFileSizeCondition(stats, 5, 1, true, 1000)
|
||||
assert.True(t, flag)
|
||||
assert.Equal(t, 0.5, ratio)
|
||||
|
||||
flag = cleanUpFileSizeCondition(stats, 5, 1, false, 0)
|
||||
assert.True(t, flag)
|
||||
|
||||
}
|
||||
|
||||
func Test_cleanUpFileSizeCondition_False(t *testing.T) {
|
||||
|
@ -47,9 +50,11 @@ func Test_cleanUpFileSizeCondition_False(t *testing.T) {
|
|||
DiskSizeBytes: 10,
|
||||
}
|
||||
|
||||
flag, ratio := cleanUpFileSizeCondition(stats, 100, 0.8)
|
||||
flag := cleanUpFileSizeCondition(stats, 100, 0.8, true, 0)
|
||||
assert.False(t, flag)
|
||||
|
||||
flag = cleanUpFileSizeCondition(stats, 100, 0.8, false, 0)
|
||||
assert.False(t, flag)
|
||||
assert.Equal(t, 0.0, ratio)
|
||||
}
|
||||
|
||||
func Test_cleanUpTimeCondition(t *testing.T) {
|
||||
|
@ -77,7 +82,7 @@ func help_get_db(t *testing.T) badgerwrap.DB {
|
|||
key1 := typed.NewWatchTableKey(partitionId, someKind+"a", someNamespace, someName, someTs).String()
|
||||
key2 := typed.NewResourceSummaryKey(someTs, someKind+"b", someNamespace, someName, someUid).String()
|
||||
key3 := typed.NewEventCountKey(someTs, someKind+"c", someNamespace, someName, someUid).String()
|
||||
key4 := typed.NewWatchActivityKey(untyped.GetPartitionId(someTs), someKind+"d", someNamespace, someName, someUid).String()
|
||||
key4 := typed.NewWatchActivityKey(partitionId, someKind+"d", someNamespace, someName, someUid).String()
|
||||
|
||||
wtval := &typed.KubeWatchResult{Kind: someKind}
|
||||
rtval := &typed.ResourceSummary{DeletedAtEnd: false}
|
||||
|
@ -124,7 +129,7 @@ func Test_doCleanup_true(t *testing.T) {
|
|||
DiskSizeBytes: 10,
|
||||
}
|
||||
|
||||
flag, _, _, err := doCleanup(tables, time.Hour, 2, stats, 10, 1)
|
||||
flag, _, _, err := doCleanup(tables, time.Hour, 2, stats, 10, 1, false)
|
||||
assert.True(t, flag)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
@ -137,25 +142,63 @@ func Test_doCleanup_false(t *testing.T) {
|
|||
DiskSizeBytes: 10,
|
||||
}
|
||||
|
||||
flag, _, _, err := doCleanup(tables, time.Hour, 1000, stats, 10, 1)
|
||||
flag, _, _, err := doCleanup(tables, time.Hour, 1000, stats, 10, 1, false)
|
||||
assert.False(t, flag)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func Test_getNumberOfKeysToDelete_Success(t *testing.T) {
|
||||
func Test_getPartitionsToDelete(t *testing.T) {
|
||||
db := help_get_db(t)
|
||||
keysToDelete := getNumberOfKeysToDelete(db, 0.5)
|
||||
assert.Equal(t, int64(2), keysToDelete)
|
||||
tables := typed.NewTableList(db)
|
||||
|
||||
partitionsToDelete, _ := getPartitionsToDelete(tables, time.Hour, 2, 10, 0.9)
|
||||
assert.Equal(t, len(partitionsToDelete), 1)
|
||||
|
||||
partitionsToDelete, _ = getPartitionsToDelete(tables, time.Hour, 20, 10, 0.9)
|
||||
assert.Equal(t, len(partitionsToDelete), 0)
|
||||
}
|
||||
|
||||
func Test_getGarbageCollectionRatio(t *testing.T) {
|
||||
ratio := getGarbageCollectionRatio(1000, 900, 0.9)
|
||||
assert.Equal(t, 0.19, ratio)
|
||||
|
||||
ratio = getGarbageCollectionRatio(1000, 900, 1)
|
||||
assert.Equal(t, 0.1, ratio)
|
||||
|
||||
ratio = getGarbageCollectionRatio(900, 1000, 0.9)
|
||||
assert.Equal(t, 0.0, ratio)
|
||||
}
|
||||
|
||||
func Test_hasFilesOnDiskExceededThreshold(t *testing.T) {
|
||||
hasExceeded := hasFilesOnDiskExceededThreshold(1000, 1000, 0.9)
|
||||
assert.True(t, hasExceeded)
|
||||
|
||||
hasExceeded = hasFilesOnDiskExceededThreshold(1100, 1000, 1)
|
||||
assert.True(t, hasExceeded)
|
||||
|
||||
hasExceeded = hasFilesOnDiskExceededThreshold(900, 1000, 0.9)
|
||||
assert.False(t, hasExceeded)
|
||||
}
|
||||
|
||||
func Test_getNumberOfKeysToDelete(t *testing.T) {
|
||||
numKeysToDelete := getNumberOfKeysToDelete(0, 1000)
|
||||
assert.Equal(t, uint64(0), numKeysToDelete)
|
||||
|
||||
numKeysToDelete = getNumberOfKeysToDelete(0.1, 1000)
|
||||
assert.Equal(t, uint64(100), numKeysToDelete)
|
||||
}
|
||||
|
||||
func Test_getNumberOfKeysToDelete_Success(t *testing.T) {
|
||||
keysToDelete := getNumberOfKeysToDelete(0.5, 4)
|
||||
assert.Equal(t, uint64(2), keysToDelete)
|
||||
}
|
||||
|
||||
func Test_getNumberOfKeysToDelete_Failure(t *testing.T) {
|
||||
db := help_get_db(t)
|
||||
keysToDelete := getNumberOfKeysToDelete(db, 0)
|
||||
assert.Equal(t, int64(0), keysToDelete)
|
||||
keysToDelete := getNumberOfKeysToDelete(0, 4)
|
||||
assert.Equal(t, uint64(0), keysToDelete)
|
||||
}
|
||||
|
||||
func Test_getNumberOfKeysToDelete_TestCeiling(t *testing.T) {
|
||||
db := help_get_db(t)
|
||||
keysToDelete := getNumberOfKeysToDelete(db, 0.33)
|
||||
assert.Equal(t, int64(2), keysToDelete)
|
||||
keysToDelete := getNumberOfKeysToDelete(0.33, 4)
|
||||
assert.Equal(t, uint64(2), keysToDelete)
|
||||
}
|
||||
|
|
|
@ -28,7 +28,14 @@ For full license text, see LICENSE.txt file in the repo root or https://opensour
|
|||
|
||||
<table border="1">
|
||||
<tr><td>Total keys</td><td>{{.TotalKeys}}</td></tr>
|
||||
<tr><td>Total Sloop Keys</td><td><pre>{{.TotalSloopKeys}}</pre></td></tr>
|
||||
<tr><td>Total Estimated Size</td><td><pre>{{.TotalEstimatedSize}}</pre></td></tr>
|
||||
<tr><td>Deleted Keys</td><td><pre>{{.DeletedKeys}}</pre></td></tr>
|
||||
<tr><td>Total Internal Keys</td><td><pre>{{.TotalInternalKeys}}</pre></td></tr>
|
||||
<tr><td>Total Internal Keys Size</td><td><pre>{{.TotalInternalKeysSize}}</pre></td></tr>
|
||||
<tr><td>Total Internal Head Keys</td><td><pre>{{.TotalHeadKeys}}</pre></td></tr>
|
||||
<tr><td>Total Internal Move Keys</td><td><pre>{{.TotalMoveKeys}}</pre></td></tr>
|
||||
<tr><td>Total Internal Discard Keys</td><td><pre>{{.TotalDiscardKeys}}</pre></td></tr>
|
||||
</table>
|
||||
|
||||
<br/>
|
||||
|
|
|
@ -22,6 +22,7 @@ For full license text, see LICENSE.txt file in the repo root or https://opensour
|
|||
<option value="ressum">ressum</option>
|
||||
<option value="eventcount">eventcount</option>
|
||||
<option value="watchactivity">watchactivity</option>
|
||||
<option value="internal">internal</option>
|
||||
</select><br><br>
|
||||
|
||||
<label for="keymatch">Key RegEx Filter:</label><br>
|
||||
|
@ -33,13 +34,19 @@ For full license text, see LICENSE.txt file in the repo root or https://opensour
|
|||
<input type="submit">
|
||||
</form>
|
||||
</td></tr></table>
|
||||
<br/>
|
||||
<br/><br/>
|
||||
|
||||
<table border="1">
|
||||
<tr><td>Total Keys Matched</td><td>{{.KeysMatched}}</td></tr>
|
||||
<tr><td>Total Size of Matched Keys</td><td><pre>{{.TotalSize}}</pre></td></tr>
|
||||
<tr><td>Total Keys Searched</td><td><pre>{{.TotalKeys}}</pre></td></tr>
|
||||
</table>
|
||||
<br/><br/><br/>
|
||||
<b>Key List</b>:<br/>
|
||||
<ol>
|
||||
{{range .}}
|
||||
<li><a href='/debug/view?k={{.}}'>{{.}}</a>
|
||||
{{end}}
|
||||
{{range $key, $value := .Keys}}
|
||||
<li><a href='/debug/view?k={{.}}'>{{.}}</a>
|
||||
{{end}}
|
||||
</ol>
|
||||
</body>
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ For full license text, see LICENSE.txt file in the repo root or https://opensour
|
|||
<th>Left</th>
|
||||
<th>Right</th>
|
||||
<th>ID</th>
|
||||
<th>Size</th>
|
||||
</tr>
|
||||
{{range .}}
|
||||
<tr>
|
||||
|
@ -29,6 +30,7 @@ For full license text, see LICENSE.txt file in the repo root or https://opensour
|
|||
<td>{{.LeftKey}}</td>
|
||||
<td>{{.RightKey}}</td>
|
||||
<td>{{.ID}}</td>
|
||||
<td>{{.Size}}</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</table>
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type keyView struct {
|
||||
|
@ -123,24 +124,36 @@ func listKeysHandler(tables typed.Tables) http.HandlerFunc {
|
|||
var keys []string
|
||||
|
||||
count := 0
|
||||
totalCount := 0
|
||||
var totalSize int64 = 0
|
||||
err = tables.Db().View(func(txn badgerwrap.Txn) error {
|
||||
keyPrefix := "/" + table + "/"
|
||||
keyPrefix := ""
|
||||
if table != "internal" {
|
||||
keyPrefix = "/" + table + "/"
|
||||
}
|
||||
|
||||
iterOpt := badger.DefaultIteratorOptions
|
||||
iterOpt.Prefix = []byte(keyPrefix)
|
||||
iterOpt.AllVersions = true
|
||||
iterOpt.InternalAccess = true
|
||||
itr := txn.NewIterator(iterOpt)
|
||||
defer itr.Close()
|
||||
|
||||
for itr.Seek([]byte(keyPrefix)); itr.ValidForPrefix([]byte(keyPrefix)); itr.Next() {
|
||||
// TODO: Investigate if Seek() can be used instead of rewind
|
||||
for itr.Rewind(); itr.ValidForPrefix([]byte(keyPrefix)); itr.Next() {
|
||||
totalCount++
|
||||
thisKey := string(itr.Item().Key())
|
||||
if keyRegEx.MatchString(thisKey) {
|
||||
keys = append(keys, thisKey)
|
||||
count += 1
|
||||
totalSize += itr.Item().EstimatedSize()
|
||||
if count >= maxRows {
|
||||
glog.Infof("Reached max rows: %v", maxRows)
|
||||
glog.Infof("Number of rows : %v has reached max rows: %v", count, maxRows)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -155,8 +168,12 @@ func listKeysHandler(tables typed.Tables) http.HandlerFunc {
|
|||
logWebError(err, "failed to parse template", request, writer)
|
||||
return
|
||||
}
|
||||
|
||||
err = t.ExecuteTemplate(writer, debugListKeysTemplateFile, keys)
|
||||
var result keysData
|
||||
result.Keys = keys
|
||||
result.TotalKeys = totalCount
|
||||
result.TotalSize = totalSize
|
||||
result.KeysMatched = count
|
||||
err = t.ExecuteTemplate(writer, debugListKeysTemplateFile, result)
|
||||
if err != nil {
|
||||
logWebError(err, "Template.ExecuteTemplate failed", request, writer)
|
||||
return
|
||||
|
@ -172,28 +189,24 @@ type sloopKeyInfo struct {
|
|||
AverageSize int64
|
||||
}
|
||||
|
||||
type sloopKey struct {
|
||||
TableName string
|
||||
PartitionID string
|
||||
}
|
||||
|
||||
type histogram struct {
|
||||
HistogramMap map[sloopKey]*sloopKeyInfo
|
||||
TotalKeys int
|
||||
DeletedKeys int
|
||||
HistogramMap map[common.SloopKey]*sloopKeyInfo
|
||||
TotalKeys int
|
||||
TotalSloopKeys int
|
||||
TotalEstimatedSize int64
|
||||
DeletedKeys int
|
||||
TotalInternalKeys int
|
||||
TotalInternalKeysSize int64
|
||||
TotalHeadKeys int
|
||||
TotalMoveKeys int
|
||||
TotalDiscardKeys int
|
||||
}
|
||||
|
||||
// returns TableName, PartitionId, error.
|
||||
func parseSloopKey(item badgerwrap.Item) (string, string, error) {
|
||||
key := item.Key()
|
||||
err, parts := common.ParseKey(string(key))
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
var tableName = parts[1]
|
||||
var partitionId = parts[2]
|
||||
return tableName, partitionId, nil
|
||||
type keysData struct {
|
||||
Keys []string
|
||||
TotalKeys int
|
||||
TotalSize int64
|
||||
KeysMatched int
|
||||
}
|
||||
|
||||
func histogramHandler(tables typed.Tables) http.HandlerFunc {
|
||||
|
@ -210,39 +223,61 @@ func histogramHandler(tables typed.Tables) http.HandlerFunc {
|
|||
iterOpt := badger.DefaultIteratorOptions
|
||||
iterOpt.Prefix = []byte(prefix)
|
||||
iterOpt.PrefetchValues = false
|
||||
iterOpt.AllVersions = true
|
||||
iterOpt.InternalAccess = true
|
||||
itr := txn.NewIterator(iterOpt)
|
||||
defer itr.Close()
|
||||
|
||||
totalKeys := 0
|
||||
var totalEstimatedSize int64 = 0
|
||||
var totalInternalKeysSize int64 = 0
|
||||
totalDeletedExpiredKeys := 0
|
||||
var sloopMap = make(map[sloopKey]*sloopKeyInfo)
|
||||
totalInternalKeys := 0
|
||||
totalMoveKeys := 0
|
||||
totalHeadKeys := 0
|
||||
totalDiscardKeys := 0
|
||||
totalSloopKeys := 0
|
||||
var sloopMap = make(map[common.SloopKey]*sloopKeyInfo)
|
||||
for itr.Rewind(); itr.Valid(); itr.Next() {
|
||||
item := itr.Item()
|
||||
tableName, partitionId, err := parseSloopKey(item)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse information about key: %x",
|
||||
item.Key())
|
||||
}
|
||||
size := item.EstimatedSize()
|
||||
totalEstimatedSize += size
|
||||
totalKeys++
|
||||
|
||||
if item.IsDeletedOrExpired() {
|
||||
totalDeletedExpiredKeys++
|
||||
}
|
||||
|
||||
size := item.EstimatedSize()
|
||||
sloopKey := sloopKey{tableName, partitionId}
|
||||
if sloopMap[sloopKey] == nil {
|
||||
sloopMap[sloopKey] = &sloopKeyInfo{size, size, 1, size, size}
|
||||
if strings.HasPrefix(string(item.Key()), "!badger") {
|
||||
totalInternalKeys++
|
||||
totalInternalKeysSize += item.EstimatedSize()
|
||||
if strings.HasPrefix(string(item.Key()), "!badger!head") {
|
||||
totalHeadKeys++
|
||||
} else if strings.HasPrefix(string(item.Key()), "!badger!move") {
|
||||
totalMoveKeys++
|
||||
} else if strings.HasPrefix(string(item.Key()), "!badger!discard") {
|
||||
totalDiscardKeys++
|
||||
}
|
||||
} else {
|
||||
sloopMap[sloopKey].TotalKeys++
|
||||
sloopMap[sloopKey].TotalSize += size
|
||||
sloopMap[sloopKey].AverageSize = sloopMap[sloopKey].TotalSize / sloopMap[sloopKey].TotalKeys
|
||||
if size < sloopMap[sloopKey].MinimumSize {
|
||||
sloopMap[sloopKey].MinimumSize = size
|
||||
totalSloopKeys++
|
||||
sloopKey, err := common.GetSloopKey(item)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse information about key: %x",
|
||||
item.Key())
|
||||
}
|
||||
|
||||
if size > sloopMap[sloopKey].MaximumSize {
|
||||
sloopMap[sloopKey].MaximumSize = size
|
||||
if sloopMap[sloopKey] == nil {
|
||||
sloopMap[sloopKey] = &sloopKeyInfo{size, size, 1, size, size}
|
||||
} else {
|
||||
sloopMap[sloopKey].TotalKeys++
|
||||
sloopMap[sloopKey].TotalSize += size
|
||||
sloopMap[sloopKey].AverageSize = sloopMap[sloopKey].TotalSize / sloopMap[sloopKey].TotalKeys
|
||||
if size < sloopMap[sloopKey].MinimumSize {
|
||||
sloopMap[sloopKey].MinimumSize = size
|
||||
}
|
||||
|
||||
if size > sloopMap[sloopKey].MaximumSize {
|
||||
sloopMap[sloopKey].MaximumSize = size
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -250,6 +285,13 @@ func histogramHandler(tables typed.Tables) http.HandlerFunc {
|
|||
result.TotalKeys = totalKeys
|
||||
result.DeletedKeys = totalDeletedExpiredKeys
|
||||
result.HistogramMap = sloopMap
|
||||
result.TotalDiscardKeys = totalDiscardKeys
|
||||
result.TotalEstimatedSize = totalEstimatedSize
|
||||
result.TotalHeadKeys = totalHeadKeys
|
||||
result.TotalInternalKeys = totalInternalKeys
|
||||
result.TotalMoveKeys = totalMoveKeys
|
||||
result.TotalInternalKeysSize = totalInternalKeysSize
|
||||
result.TotalSloopKeys = totalSloopKeys
|
||||
return nil
|
||||
})
|
||||
|
||||
|
@ -311,6 +353,7 @@ type badgerTableInfo struct {
|
|||
RightKey string
|
||||
KeyCount uint64
|
||||
ID uint64
|
||||
Size uint64
|
||||
}
|
||||
|
||||
func debugBadgerTablesHandler(db badgerwrap.DB) http.HandlerFunc {
|
||||
|
@ -328,6 +371,7 @@ func debugBadgerTablesHandler(db badgerwrap.DB) http.HandlerFunc {
|
|||
RightKey: string(table.Right),
|
||||
KeyCount: table.KeyCount,
|
||||
ID: table.ID,
|
||||
Size: table.EstimatedSz,
|
||||
}
|
||||
data = append(data, thisTable)
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче