This commit is contained in:
Brad Rydzewski 2016-04-18 21:22:23 -07:00
Родитель 00714d350b
Коммит 609e1040f9
335 изменённых файлов: 204525 добавлений и 6 удалений

31
.drone.yml Normal file
Просмотреть файл

@ -0,0 +1,31 @@
debug: true
build:
image: golang:1.5
environment:
- GO15VENDOREXPERIMENT=1
commands:
- make deps
- make gen
- make test
- make test_mysql
- make build
compose:
mysql:
image: mysql:5.6.27
environment:
- MYSQL_DATABASE=test
- MYSQL_ALLOW_EMPTY_PASSWORD=yes
#publish:
# docker:
# username: drone
# password: $$DOCKER_PASS
# email: $$DOCKER_EMAIL
# repo: lgtm/lgtm
# tag:
# - "latest"
# - "1.0.0"
# when:
# branch: master

13
.gitignore поставляемый Normal file
Просмотреть файл

@ -0,0 +1,13 @@
lgtm
*.sqlite
*.db
*.txt
*_gen.go
*.out
.DS_Store
.env
bindata.go
web/react/node_modules
web/static/files/script.js
#web/static/files/*.css

16
Dockerfile Normal file
Просмотреть файл

@ -0,0 +1,16 @@
# Build the drone executable on a x64 Linux host:
#
# go build --ldflags '-extldflags "-static"' -o lgtm
#
# Build the docker image:
#
# docker build --rm=true -t lgtm/lgtm .
FROM centurylink/ca-certs
EXPOSE 8000
ENV DATABASE_DRIVER=sqlite3
ENV DATABASE_DATASOURCE=/var/lib/lgtm/lgtm.sqlite
ADD lgtm /lgtm
ENTRYPOINT ["/lgtm"]

202
LICESNSE Normal file
Просмотреть файл

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
Makefile Normal file
Просмотреть файл

@ -0,0 +1,25 @@
PACKAGES = $(shell go list ./... | grep -v /vendor/)
all: build
deps:
go get -u github.com/jteeuwen/go-bindata/...
go get -u github.com/elazarl/go-bindata-assetfs/...
go get -u github.com/vektra/mockery/...
gen:
go generate github.com/bradrydzewski/lgtm/web/static
go generate github.com/bradrydzewski/lgtm/web/template
go generate github.com/bradrydzewski/lgtm/notifier
go generate github.com/bradrydzewski/lgtm/remote
go generate github.com/bradrydzewski/lgtm/store/migration
go generate github.com/bradrydzewski/lgtm/store
build:
go build --ldflags '-extldflags "-static" -X github.com/drone/drone/version.VersionDev=$(CI_BUILD_NUMBER)' -o lgtm
test:
@for PKG in $(PACKAGES); do go test -cover -coverprofile $$GOPATH/src/$$PKG/coverage.out $$PKG; done;
test_mysql:
DATABASE_DRIVER="mysql" DATABASE_DATASOURCE="root@tcp(127.0.0.1:3306)/test?parseTime=true" go test -v -cover github.com/bradrydzewski/lgtm/store/datastore

Просмотреть файл

@ -1,7 +1,2 @@
![lgtm](https://avatars3.githubusercontent.com/u/16172772?v=3&s=200)
LGTM is a simple pull request approval system using GitHub protected branches and simple MAINTAINERS files. For more informations see https://lgtm.co/docs
# lgtm
Discuss and file LGTM issues here
Join our online chat at https://gitter.im/lgtmco/lgtm

88
api/maintainer.go Normal file
Просмотреть файл

@ -0,0 +1,88 @@
package api
import (
"github.com/bradrydzewski/lgtm/cache"
"github.com/bradrydzewski/lgtm/model"
"github.com/bradrydzewski/lgtm/remote"
"github.com/bradrydzewski/lgtm/router/middleware/session"
"github.com/bradrydzewski/lgtm/store"
log "github.com/Sirupsen/logrus"
"github.com/gin-gonic/gin"
)
// GetMaintainer gets the MAINTAINER configuration file.
func GetMaintainer(c *gin.Context) {
var (
owner = c.Param("owner")
name = c.Param("repo")
user = session.User(c)
)
repo, err := store.GetRepoOwnerName(c, owner, name)
if err != nil {
log.Errorf("Error getting repository %s. %s", name, err)
c.AbortWithStatus(404)
return
}
file, err := remote.GetContents(c, user, repo, "MAINTAINERS")
if err != nil {
log.Debugf("no MAINTAINERS file for %s. Checking for team members.", repo.Slug)
members, merr := cache.GetMembers(c, user, repo.Owner)
if merr != nil {
log.Errorf("Error getting repository %s. %s", repo.Slug, err)
log.Errorf("Error getting org members %s. %s", repo.Owner, merr)
c.String(404, "MAINTAINERS file not found. %s", err)
return
} else {
log.Printf("found %v members", len(members))
for _, member := range members {
file = append(file, member.Login...)
file = append(file, '\n')
}
}
}
maintainer, err := model.ParseMaintainer(file)
if err != nil {
log.Errorf("Error parsing MAINTAINERS file for %s. %s", repo.Slug, err)
c.String(500, "Error parsing MAINTAINERS file. %s.", err)
return
}
c.JSON(200, maintainer)
}
// GetMaintainer gets the MAINTAINER configuration file and returns
// a subset of the file with members belonging to the specified organization.
func GetMaintainerOrg(c *gin.Context) {
var (
owner = c.Param("owner")
name = c.Param("repo")
team = c.Param("org")
user = session.User(c)
)
repo, err := store.GetRepoOwnerName(c, owner, name)
if err != nil {
log.Errorf("Error getting repository %s. %s", name, err)
c.AbortWithStatus(404)
return
}
file, err := remote.GetContents(c, user, repo, "MAINTAINERS")
if err != nil {
log.Errorf("Error getting repository %s. %s", repo.Slug, err)
c.String(404, "MAINTAINERS file not found. %s", err)
return
}
maintainer, err := model.ParseMaintainer(file)
if err != nil {
log.Errorf("Error parsing MAINTAINERS file for %s. %s", repo.Slug, err)
c.String(500, "Error parsing MAINTAINERS file. %s.", err)
return
}
subset, err := model.FromOrg(maintainer, team)
if err != nil {
log.Errorf("Error getting subset of MAINTAINERS file for %s/%s. %s", repo.Slug, team, err)
c.String(500, "Error getting subset of MAINTAINERS file. %s.", err)
return
}
c.JSON(200, subset)
}

145
api/repos.go Normal file
Просмотреть файл

@ -0,0 +1,145 @@
package api
import (
"fmt"
"github.com/bradrydzewski/lgtm/cache"
"github.com/bradrydzewski/lgtm/model"
"github.com/bradrydzewski/lgtm/remote"
"github.com/bradrydzewski/lgtm/router/middleware/session"
"github.com/bradrydzewski/lgtm/shared/httputil"
"github.com/bradrydzewski/lgtm/shared/token"
"github.com/bradrydzewski/lgtm/store"
log "github.com/Sirupsen/logrus"
"github.com/gin-gonic/gin"
)
// GetRepos gets the active repository list.
func GetRepos(c *gin.Context) {
user := session.User(c)
repos, err := cache.GetRepos(c, user)
if err != nil {
log.Errorf("Error getting remote repository list. %s", err)
c.String(500, "Error getting remote repository list")
return
}
// copy the slice since we are going to mutate it and don't
// want any nasty data races if the slice came from the cache.
repoc := make([]*model.Repo, len(repos))
copy(repoc, repos)
repom, err := store.GetRepoIntersectMap(c, repos)
if err != nil {
log.Errorf("Error getting active repository list. %s", err)
c.String(500, "Error getting active repository list")
return
}
// merges the slice of active and remote repositories favoring
// and swapping in local repository information when possible.
for i, repo := range repoc {
repo_, ok := repom[repo.Slug]
if ok {
repoc[i] = repo_
}
}
c.IndentedJSON(200, repoc)
}
// GetRepo gets the repository by slug.
func GetRepo(c *gin.Context) {
var (
owner = c.Param("owner")
name = c.Param("repo")
)
repo, err := store.GetRepoOwnerName(c, owner, name)
if err != nil {
log.Errorf("Error getting repository %s. %s", name, err)
c.String(404, "Error getting repository %s", name)
return
}
c.JSON(200, repo)
}
// PostRepo activates a new repository.
func PostRepo(c *gin.Context) {
var (
owner = c.Param("owner")
name = c.Param("repo")
user = session.User(c)
)
// verify repo doesn't already exist
if _, err := store.GetRepoOwnerName(c, owner, name); err == nil {
c.AbortWithStatus(409)
c.String(409, "Error activating a repository that is already active.")
return
}
repo, err := remote.GetRepo(c, user, owner, name)
if err != nil {
c.String(404, "Error finding repository in GitHub. %s")
return
}
repo.UserID = user.ID
repo.Secret = model.Rand()
// creates a token to authorize the link callback url
t := token.New(token.HookToken, repo.Slug)
sig, err := t.Sign(repo.Secret)
if err != nil {
c.String(500, "Error activating repository. %s")
return
}
// create the hook callback url
link := fmt.Sprintf(
"%s/hook?access_token=%s",
httputil.GetURL(c.Request),
sig,
)
err = remote.SetHook(c, user, repo, link)
if err != nil {
c.String(500, "Error creating hook. %s", err)
return
}
err = store.CreateRepo(c, repo)
if err != nil {
c.String(500, "Error activating the repository. %s", err)
return
}
c.IndentedJSON(200, repo)
}
// DeleteRepo deletes a repository configuration.
func DeleteRepo(c *gin.Context) {
var (
owner = c.Param("owner")
name = c.Param("repo")
user = session.User(c)
)
repo, err := store.GetRepoOwnerName(c, owner, name)
if err != nil {
log.Errorf("Error getting repository %s. %s", name, err)
c.AbortWithStatus(404)
return
}
err = store.DeleteRepo(c, repo)
if err != nil {
log.Errorf("Error deleting repository %s. %s", name, err)
c.AbortWithStatus(500)
return
}
link := fmt.Sprintf(
"%s/hook",
httputil.GetURL(c.Request),
)
err = remote.DelHook(c, user, repo, link)
if err != nil {
log.Errorf("Error deleting repository hook for %s. %s", name, err)
}
c.String(200, "")
}

26
api/teams.go Normal file
Просмотреть файл

@ -0,0 +1,26 @@
package api
import (
"github.com/bradrydzewski/lgtm/cache"
"github.com/bradrydzewski/lgtm/model"
"github.com/bradrydzewski/lgtm/router/middleware/session"
log "github.com/Sirupsen/logrus"
"github.com/gin-gonic/gin"
)
// GetTeams gets the list of user teams.
func GetTeams(c *gin.Context) {
user := session.User(c)
teams, err := cache.GetTeams(c, user)
if err != nil {
log.Errorf("Error getting team list. %s", err)
c.String(500, "Error getting team list")
return
}
teams = append(teams, &model.Team{
Login: user.Login,
Avatar: user.Avatar,
})
c.JSON(200, teams)
}

12
api/users.go Normal file
Просмотреть файл

@ -0,0 +1,12 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/bradrydzewski/lgtm/router/middleware/session"
)
// GetUser gets the currently authenticated user.
func GetUser(c *gin.Context) {
c.JSON(200, session.User(c))
}

35
cache/cache.go поставляемый Normal file
Просмотреть файл

@ -0,0 +1,35 @@
package cache
//go:generate mockery -name Cache -output mock -case=underscore
import (
"time"
"github.com/koding/cache"
"golang.org/x/net/context"
)
type Cache interface {
Get(string) (interface{}, error)
Set(string, interface{}) error
}
func Get(c context.Context, key string) (interface{}, error) {
return FromContext(c).Get(key)
}
func Set(c context.Context, key string, value interface{}) error {
return FromContext(c).Set(key, value)
}
// Default creates an in-memory cache with the default
// 30 minute expiration period.
func Default() Cache {
return NewTTL(time.Minute * 30)
}
// NewTTL returns an in-memory cache with the specified
// ttl expiration period.
func NewTTL(t time.Duration) Cache {
return cache.NewMemoryWithTTL(t)
}

34
cache/cache_test.go поставляемый Normal file
Просмотреть файл

@ -0,0 +1,34 @@
package cache
import (
"testing"
"github.com/franela/goblin"
"github.com/gin-gonic/gin"
)
func TestCache(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Cache", func() {
var c *gin.Context
g.BeforeEach(func() {
c = new(gin.Context)
ToContext(c, Default())
})
g.It("Should set and get an item", func() {
Set(c, "foo", "bar")
v, e := Get(c, "foo")
g.Assert(v).Equal("bar")
g.Assert(e == nil).IsTrue()
})
g.It("Should return nil when item not found", func() {
v, e := Get(c, "foo")
g.Assert(v == nil).IsTrue()
g.Assert(e == nil).IsFalse()
})
})
}

23
cache/context.go поставляемый Normal file
Просмотреть файл

@ -0,0 +1,23 @@
package cache
import (
"golang.org/x/net/context"
)
const key = "cache"
// Setter defines a context that enables setting values.
type Setter interface {
Set(string, interface{})
}
// FromContext returns the Cache associated with this context.
func FromContext(c context.Context) Cache {
return c.Value(key).(Cache)
}
// ToContext adds the Cache to this context if it supports
// the Setter interface.
func ToContext(c Setter, cache Cache) {
c.Set(key, cache)
}

95
cache/helper.go поставляемый Normal file
Просмотреть файл

@ -0,0 +1,95 @@
package cache
import (
"fmt"
"golang.org/x/net/context"
"github.com/bradrydzewski/lgtm/model"
"github.com/bradrydzewski/lgtm/remote"
)
// GetRepos returns the list of user repositories from the cache
// associated with the current context.
func GetRepos(c context.Context, user *model.User) ([]*model.Repo, error) {
key := fmt.Sprintf("repos:%s",
user.Login,
)
// if we fetch from the cache we can return immediately
val, err := FromContext(c).Get(key)
if err == nil {
return val.([]*model.Repo), nil
}
// else we try to grab from the remote system and
// populate our cache.
repos, err := remote.GetRepos(c, user)
if err != nil {
return nil, err
}
FromContext(c).Set(key, repos)
return repos, nil
}
// GetTeams returns the list of user teams from the cache
// associated with the current context.
func GetTeams(c context.Context, user *model.User) ([]*model.Team, error) {
key := fmt.Sprintf("teams:%s",
user.Login,
)
// if we fetch from the cache we can return immediately
val, err := FromContext(c).Get(key)
if err == nil {
return val.([]*model.Team), nil
}
// else we try to grab from the remote system and
// populate our cache.
teams, err := remote.GetTeams(c, user)
if err != nil {
return nil, err
}
FromContext(c).Set(key, teams)
return teams, nil
}
// GetPerm returns the user permissions repositories from the cache
// associated with the current repository.
func GetPerm(c context.Context, user *model.User, owner, name string) (*model.Perm, error) {
key := fmt.Sprintf("perms:%s:%s/%s",
user.Login,
owner,
name,
)
// if we fetch from the cache we can return immediately
val, err := FromContext(c).Get(key)
if err == nil {
return val.(*model.Perm), nil
}
// else we try to grab from the remote system and
// populate our cache.
perm, err := remote.GetPerm(c, user, owner, name)
if err != nil {
return nil, err
}
FromContext(c).Set(key, perm)
return perm, nil
}
// GetMembers returns the team members from the cache.
func GetMembers(c context.Context, user *model.User, team string) ([]*model.Member, error) {
key := fmt.Sprintf("members:%s",
team,
)
// if we fetch from the cache we can return immediately
val, err := FromContext(c).Get(key)
if err == nil {
return val.([]*model.Member), nil
}
// else we try to grab from the remote system and
// populate our cache.
members, err := remote.GetMembers(c, user, team)
if err != nil {
return nil, err
}
FromContext(c).Set(key, members)
return members, nil
}

157
cache/helper_test.go поставляемый Normal file
Просмотреть файл

@ -0,0 +1,157 @@
package cache
import (
"errors"
"fmt"
"testing"
"github.com/bradrydzewski/lgtm/model"
"github.com/bradrydzewski/lgtm/remote"
"github.com/bradrydzewski/lgtm/remote/mock"
"github.com/franela/goblin"
"github.com/gin-gonic/gin"
)
func TestHelper(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Cache helpers", func() {
var c *gin.Context
var r *mock.Remote
g.BeforeEach(func() {
c = new(gin.Context)
ToContext(c, Default())
r = new(mock.Remote)
remote.ToContext(c, r)
})
g.It("Should get permissions from remote", func() {
r.On("GetPerm", fakeUser, fakeRepo.Owner, fakeRepo.Name).Return(fakePerm, nil).Once()
p, err := GetPerm(c, fakeUser, fakeRepo.Owner, fakeRepo.Name)
g.Assert(p).Equal(fakePerm)
g.Assert(err).Equal(nil)
})
g.It("Should get permissions from cache", func() {
key := fmt.Sprintf("perms:%s:%s/%s",
fakeUser.Login,
fakeRepo.Owner,
fakeRepo.Name,
)
Set(c, key, fakePerm)
r.On("GetPerm", fakeUser, fakeRepo.Owner, fakeRepo.Name).Return(nil, fakeErr).Once()
p, err := GetPerm(c, fakeUser, fakeRepo.Owner, fakeRepo.Name)
g.Assert(p).Equal(fakePerm)
g.Assert(err).Equal(nil)
})
g.It("Should get permissions error", func() {
r.On("GetPerm", fakeUser, fakeRepo.Owner, fakeRepo.Name).Return(nil, fakeErr).Once()
p, err := GetPerm(c, fakeUser, fakeRepo.Owner, fakeRepo.Name)
g.Assert(p == nil).IsTrue()
g.Assert(err).Equal(fakeErr)
})
g.It("Should set and get repos", func() {
r.On("GetRepos", fakeUser).Return(fakeRepos, nil).Once()
p, err := GetRepos(c, fakeUser)
g.Assert(p).Equal(fakeRepos)
g.Assert(err).Equal(nil)
})
g.It("Should get repos", func() {
key := fmt.Sprintf("repos:%s",
fakeUser.Login,
)
Set(c, key, fakeRepos)
r.On("GetRepos", fakeUser).Return(nil, fakeErr).Once()
p, err := GetRepos(c, fakeUser)
g.Assert(p).Equal(fakeRepos)
g.Assert(err).Equal(nil)
})
g.It("Should get repos error", func() {
r.On("GetRepos", fakeUser).Return(nil, fakeErr).Once()
p, err := GetRepos(c, fakeUser)
g.Assert(p == nil).IsTrue()
g.Assert(err).Equal(fakeErr)
})
g.It("Should set and get teams", func() {
r.On("GetTeams", fakeUser).Return(fakeTeams, nil).Once()
p, err := GetTeams(c, fakeUser)
g.Assert(p).Equal(fakeTeams)
g.Assert(err).Equal(nil)
})
g.It("Should get teams", func() {
key := fmt.Sprintf("teams:%s",
fakeUser.Login,
)
Set(c, key, fakeTeams)
r.On("GetTeams", fakeUser).Return(nil, fakeErr).Once()
p, err := GetTeams(c, fakeUser)
g.Assert(p).Equal(fakeTeams)
g.Assert(err).Equal(nil)
})
g.It("Should get team error", func() {
r.On("GetTeams", fakeUser).Return(nil, fakeErr).Once()
p, err := GetTeams(c, fakeUser)
g.Assert(p == nil).IsTrue()
g.Assert(err).Equal(fakeErr)
})
g.It("Should set and get members", func() {
r.On("GetMembers", fakeUser, "drone").Return(fakeMembers, nil).Once()
p, err := GetMembers(c, fakeUser, "drone")
g.Assert(p).Equal(fakeMembers)
g.Assert(err).Equal(nil)
})
g.It("Should get members", func() {
key := "members:drone"
Set(c, key, fakeMembers)
r.On("GetMembers", fakeUser, "drone").Return(nil, fakeErr).Once()
p, err := GetMembers(c, fakeUser, "drone")
g.Assert(p).Equal(fakeMembers)
g.Assert(err).Equal(nil)
})
g.It("Should get member error", func() {
r.On("GetMembers", fakeUser, "drone").Return(nil, fakeErr).Once()
p, err := GetMembers(c, fakeUser, "drone")
g.Assert(p == nil).IsTrue()
g.Assert(err).Equal(fakeErr)
})
})
}
var (
fakeErr = errors.New("Not Found")
fakeUser = &model.User{Login: "octocat"}
fakePerm = &model.Perm{Pull: true, Push: true, Admin: true}
fakeRepo = &model.Repo{Owner: "octocat", Name: "Hello-World"}
fakeRepos = []*model.Repo{
{Owner: "octocat", Name: "Hello-World"},
{Owner: "octocat", Name: "hello-world"},
{Owner: "octocat", Name: "Spoon-Knife"},
}
fakeTeams = []*model.Team{
{Login: "drone"},
{Login: "docker"},
}
fakeMembers = []*model.Member{
{Login: "octocat"},
}
)

42
cache/mock/cache.go поставляемый Normal file
Просмотреть файл

@ -0,0 +1,42 @@
package mock
import "github.com/bradrydzewski/lgtm/cache"
import "github.com/stretchr/testify/mock"
type Cache struct {
mock.Mock
}
func (_m *Cache) Get(_a0 string) (interface{}, error) {
ret := _m.Called(_a0)
var r0 interface{}
if rf, ok := ret.Get(0).(func(string) interface{}); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(interface{})
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *Cache) Set(_a0 string, _a1 interface{}) error {
ret := _m.Called(_a0, _a1)
var r0 error
if rf, ok := ret.Get(0).(func(string, interface{}) error); ok {
r0 = rf(_a0, _a1)
} else {
r0 = ret.Error(0)
}
return r0
}

50
main.go Normal file
Просмотреть файл

@ -0,0 +1,50 @@
package main
import (
"net/http"
"time"
"github.com/bradrydzewski/lgtm/router"
"github.com/bradrydzewski/lgtm/router/middleware"
"github.com/Sirupsen/logrus"
"github.com/gin-gonic/contrib/ginrus"
"github.com/ianschenck/envflag"
_ "github.com/joho/godotenv/autoload"
)
var (
addr = envflag.String("SERVER_ADDR", ":8000", "")
cert = envflag.String("SERVER_CERT", "", "")
key = envflag.String("SERVER_KEY", "", "")
debug = envflag.Bool("DEBUG", false, "")
)
func main() {
envflag.Parse()
if *debug {
logrus.SetLevel(logrus.DebugLevel)
} else {
logrus.SetLevel(logrus.WarnLevel)
}
handler := router.Load(
ginrus.Ginrus(logrus.StandardLogger(), time.RFC3339, true),
middleware.Version,
middleware.Store(),
middleware.Remote(),
middleware.Cache(),
)
if *cert != "" {
logrus.Fatal(
http.ListenAndServeTLS(*addr, *cert, *key, handler),
)
} else {
logrus.Fatal(
http.ListenAndServe(*addr, handler),
)
}
}

6
model/comment.go Normal file
Просмотреть файл

@ -0,0 +1,6 @@
package model
type Comment struct {
Author string
Body string
}

47
model/config.go Normal file
Просмотреть файл

@ -0,0 +1,47 @@
package model
import (
"regexp"
"github.com/BurntSushi/toml"
)
type Config struct {
Approvals int `json:"approvals" toml:"approvals"`
Pattern string `json:"pattern" toml:"pattern"`
SelfApprovalOff bool `json:"self_approval_off" toml:"self_approval_off"`
re *regexp.Regexp
}
// ParseConfig parses a projects .lgtm file
func ParseConfig(data []byte) (*Config, error) {
return ParseConfigStr(string(data))
}
// ParseConfigStr parses a projects .lgtm file in string format.
func ParseConfigStr(data string) (*Config, error) {
c := new(Config)
_, err := toml.Decode(data, c)
if err != nil {
return nil, err
}
if c.Approvals == 0 {
c.Approvals = 2
}
if len(c.Pattern) == 0 {
c.Pattern = "(?i)LGTM"
}
c.re, err = regexp.Compile(c.Pattern)
return c, err
}
// IsMatch returns true if the text matches the regular
// epxression pattern.
func (c *Config) IsMatch(text string) bool {
if c.re == nil {
// this should never happen
return false
}
return c.re.MatchString(text)
}

7
model/hook.go Normal file
Просмотреть файл

@ -0,0 +1,7 @@
package model
type Hook struct {
Repo *Repo
Issue *Issue
Comment *Comment
}

7
model/issue.go Normal file
Просмотреть файл

@ -0,0 +1,7 @@
package model
type Issue struct {
Number int
Title string
Author string
}

190
model/maintainer.go Normal file
Просмотреть файл

@ -0,0 +1,190 @@
package model
import (
"bufio"
"bytes"
"fmt"
"regexp"
"strings"
"github.com/BurntSushi/toml"
)
// Person represets an individual in the MAINTAINERS file.
type Person struct {
Name string `json:"name" toml:"name"`
Email string `json:"email" toml:"email"`
Login string `json:"login" toml:"login"`
}
// Org represents a group, team or subset of users.
type Org struct {
People []string `json:"people" toml:"people"`
}
// Maintainer represents a MAINTAINERS file.
type Maintainer struct {
People map[string]*Person `json:"people" toml:"people"`
Org map[string]*Org `json:"org" toml:"org"`
}
// ParseMaintainer parses a projects MAINTAINERS file and returns
// the list of maintainers.
func ParseMaintainer(data []byte) (*Maintainer, error) {
return ParseMaintainerStr(string(data))
}
// ParseMaintainerStr parses a projects MAINTAINERS file in string
// format and returns the list of maintainers.
func ParseMaintainerStr(data string) (*Maintainer, error) {
m, err := parseMaintainerToml(data)
if err != nil {
m, err = parseMaintainerText(data)
if err != nil {
return nil, err
}
}
return m, nil
}
// FromOrg returns a new Maintainer file with a subset of people
// that are part of the specified org.
func FromOrg(from *Maintainer, name string) (*Maintainer, error) {
m := new(Maintainer)
m.Org = map[string]*Org{}
m.People = map[string]*Person{}
var members []string
switch {
case from.Org == nil:
return nil, fmt.Errorf("No organization section")
case from.People == nil:
return nil, fmt.Errorf("No people section")
case len(from.People) == 0:
return nil, fmt.Errorf("No people section")
}
org, ok := from.Org[name]
if !ok {
return nil, fmt.Errorf("No organization section for %s", name)
}
for _, login := range org.People {
person, ok := from.People[login]
if !ok {
continue
}
m.People[login] = person
members = append(members, person.Login)
}
m.Org["core"] = &Org{members}
return m, nil
}
func parseMaintainerToml(data string) (*Maintainer, error) {
m := new(Maintainer)
_, err := toml.Decode(data, m)
if err != nil {
return nil, err
}
if m.People == nil {
return nil, fmt.Errorf("Invalid Toml format. Missing people section.")
}
// if the person is defined in the file, but the Login field is
// empty, we can use the map key as the Login value. This is mainly
// here to support Docker projects, which use GitHub instead of Login
for k, v := range m.People {
if len(v.Login) == 0 {
v.Login = k
}
}
return m, nil
}
func parseMaintainerText(data string) (*Maintainer, error) {
m := new(Maintainer)
m.People = map[string]*Person{}
buf := bytes.NewBufferString(data)
reader := bufio.NewReader(buf)
for {
line, _, err := reader.ReadLine()
if err != nil {
break
}
item := parseln(string(line))
if len(item) == 0 {
continue
}
person := parseLogin(item)
if person == nil {
person = parseLoginMeta(item)
}
if person == nil {
person = parseLoginEmail(item)
}
if person == nil {
return nil, fmt.Errorf("Invalid file format.")
}
m.People[person.Login] = person
}
return m, nil
}
func parseln(s string) string {
if s == "" || string(s[0]) == "#" {
return ""
}
index := strings.Index(s, " #")
if index > -1 {
s = strings.TrimSpace(s[0:index])
}
return s
}
// regular expression determines if a line in the maintainers
// file only has the single GitHub username and no other metadata.
var reLogin = regexp.MustCompile("^\\w[\\w-]+$")
// regular expression determines if a line in the maintainers
// file has the username and metadata.
var reLoginMeta = regexp.MustCompile("(.+) <(.+)> \\(@(.+)\\)")
// regular expression determines if a line in the maintainers
// file has the username and email.
var reLoginEmail = regexp.MustCompile("(.+) <(.+)>")
func parseLoginMeta(line string) *Person {
matches := reLoginMeta.FindStringSubmatch(line)
if len(matches) != 4 {
return nil
}
return &Person{
Name: strings.TrimSpace(matches[1]),
Email: strings.TrimSpace(matches[2]),
Login: strings.TrimSpace(matches[3]),
}
}
func parseLoginEmail(line string) *Person {
matches := reLoginEmail.FindStringSubmatch(line)
if len(matches) != 3 {
return nil
}
return &Person{
Login: strings.TrimSpace(matches[1]),
Email: strings.TrimSpace(matches[2]),
}
}
func parseLogin(line string) *Person {
line = strings.TrimSpace(line)
if !reLogin.MatchString(line) {
return nil
}
return &Person{
Login: line,
}
}

76
model/maintainer_test.go Normal file
Просмотреть файл

@ -0,0 +1,76 @@
package model
import "testing"
func TestParseMaintainer(t *testing.T) {
var files = []string{maintainerFile, maintainerFileEmail, maintainerFileSimple, maintainerFileMixed, maintainerFileToml}
for _, file := range files {
parsed, err := ParseMaintainerStr(file)
if err != nil {
t.Error(err)
return
}
if len(parsed.People) != len(people) {
t.Errorf("Wanted %d maintainers, got %d", len(people), len(parsed.People))
return
}
for _, want := range people {
got, ok := parsed.People[want.Login]
if !ok {
t.Errorf("Wanted user %s in file", want.Login)
} else if want.Login != got.Login {
t.Errorf("Wanted login %s, got %s", want.Login, got.Login)
}
}
}
}
var people = []Person{
{Login: "bradrydzewski"},
{Login: "mattnorris"},
}
var maintainerFile = `
Brad Rydzewski <brad.rydzewski@mail.com> (@bradrydzewski)
Matt Norris <matt.norris@mail.com> (@mattnorris)
`
var maintainerFileEmail = `
bradrydzewski <brad.rydzewski@mail.com>
mattnorris <matt.norris@mail.com>
`
// simple format with usernames only. includes
// spaces and comments.
var maintainerFileSimple = `
bradrydzewski
mattnorris`
// simple format with usernames only. includes
// spaces and comments.
var maintainerFileMixed = `
bradrydzewski
Matt Norris <matt.norris@mail.com> (@mattnorris)
`
// advanced toml format for the maintainers file.
var maintainerFileToml = `
[org]
[org.core]
people = [
"mattnorris",
"bradrydzewski",
]
[people]
[people.bradrydzewski]
name = "Brad Rydzewski"
email = "brad.rydzewski@mail.com"
login = "bradrydzewski"
[people.mattnorris]
name = "Matt Norris"
email = "matt.norris@mail.com"
login = "mattnorris"
`

18
model/repo.go Normal file
Просмотреть файл

@ -0,0 +1,18 @@
package model
type Repo struct {
ID int64 `json:"id,omitempty" meddler:"repo_id,pk"`
UserID int64 `json:"-" meddler:"repo_user_id"`
Owner string `json:"owner" meddler:"repo_owner"`
Name string `json:"name" meddler:"repo_name"`
Slug string `json:"slug" meddler:"repo_slug"`
Link string `json:"link_url" meddler:"repo_link"`
Private bool `json:"private" meddler:"repo_private"`
Secret string `json:"-" meddler:"repo_secret"`
}
type Perm struct {
Pull bool
Push bool
Admin bool
}

10
model/team.go Normal file
Просмотреть файл

@ -0,0 +1,10 @@
package model
type Team struct {
Login string `json:"login"`
Avatar string `json:"avatar"`
}
type Member struct {
Login string `json:"login"`
}

10
model/user.go Normal file
Просмотреть файл

@ -0,0 +1,10 @@
package model
type User struct {
ID int64 `json:"id" meddler:"user_id,pk"`
Login string `json:"login" meddler:"user_login"`
Email string `json:"email" meddler:"user_email"`
Token string `json:"-" meddler:"user_token"`
Avatar string `json:"avatar" meddler:"user_avatar"`
Secret string `json:"-" meddler:"user_secret"`
}

35
model/util.go Normal file
Просмотреть файл

@ -0,0 +1,35 @@
package model
import (
"crypto/rand"
"io"
)
// standard characters allowed in token string.
var chars = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789")
// default token length
var length = 32
// Rand generates a 32-bit random string.
func Rand() string {
b := make([]byte, length)
r := make([]byte, length+(length/4)) // storage for random bytes.
clen := byte(len(chars))
maxrb := byte(256 - (256 % len(chars)))
i := 0
for {
io.ReadFull(rand.Reader, r)
for _, c := range r {
if c >= maxrb {
// Skip this number to avoid modulo bias.
continue
}
b[i] = chars[c%clen]
i++
if i == length {
return string(b)
}
}
}
}

21
notifier/context.go Normal file
Просмотреть файл

@ -0,0 +1,21 @@
package notifier
import "golang.org/x/net/context"
const key = "sender"
// Setter defines a context that enables setting values.
type Setter interface {
Set(string, interface{})
}
// FromContext returns the Sender associated with this context.
func FromContext(c context.Context) Sender {
return c.Value(key).(Sender)
}
// ToContext adds the Sender to this context if it supports
// the Setter interface.
func ToContext(c Setter, s Sender) {
c.Set(key, s)
}

Просмотреть файл

@ -0,0 +1,4 @@
package github
// TODO(bradrydzewski) ability to notify users of a new pull request requiring
// approval by commenting on the GitHub pull request.

Просмотреть файл

@ -0,0 +1,4 @@
package hipchat
// TODO(bradrydzewski) ability to notify users of a new pull request requiring
// approval by sending a notification to a hipchat room.

18
notifier/sender.go Normal file
Просмотреть файл

@ -0,0 +1,18 @@
package notifier
import "golang.org/x/net/context"
//go:generate mockery -name Notifier -output mock -case=underscore
// Sender defines a notification provider that is capable of sending out
// notifications to a list of maintainers or reviewers. An example provider
// might be a Slack or GitHub bot.
type Sender interface {
Send(*Notification) error
}
// Send sends a notification to the list of maintainers indicating a commit is
// ready for their review and possible approval.
func Send(c context.Context, n *Notification) error {
return FromContext(c).Send(n)
}

4
notifier/slack/slack.go Normal file
Просмотреть файл

@ -0,0 +1,4 @@
package slack
// TODO(bradrydzewski) ability to notify users of a new pull request requiring
// approval by sending a notification to a slack room.

24
notifier/types.go Normal file
Просмотреть файл

@ -0,0 +1,24 @@
package notifier
// Notification represents a notification that we are sending to a list of
// maintainers indicating a commit is ready for their review and, hopefully,
// approval.
type Notification struct {
Reviewers []*Reviewer
Commit *Commit
}
// Reviewer represents a repository maintainer or contributor that is being
// notified of a commit to review.
type Reviewer struct {
Login string
Email string
}
// Commit represents the commit for which we are notifiying the maintainers.
type Commit struct {
Repo string
Message string
Author string
Link string
}

21
remote/context.go Normal file
Просмотреть файл

@ -0,0 +1,21 @@
package remote
import "golang.org/x/net/context"
const key = "remote"
// Setter defines a context that enables setting values.
type Setter interface {
Set(string, interface{})
}
// FromContext returns the Remote client associated with this context.
func FromContext(c context.Context) Remote {
return c.Value(key).(Remote)
}
// ToContext adds the Remote client to this context if it supports
// the Setter interface.
func ToContext(c Setter, client Remote) {
c.Set(key, client)
}

146
remote/github/client.go Normal file
Просмотреть файл

@ -0,0 +1,146 @@
package github
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"golang.org/x/oauth2"
)
const (
pathLogin = "%slogin?access_token=%s"
pathUser = "%sapi/user"
pathRepos = "%sapi/user/repos"
pathRepo = "%sapi/repos/%s"
pathConf = "%sapi/repos/%s/maintainers"
pathBranch = "%srepos/%s/%s/branches/%s"
)
type Client struct {
client *http.Client
base string // base url
}
// NewClient returns a client at the specified url.
func NewClient(uri string) *Client {
return &Client{http.DefaultClient, uri}
}
// NewClientToken returns a client at the specified url that
// authenticates all outbound requests with the given token.
func NewClientToken(uri, token string) *Client {
config := new(oauth2.Config)
auther := config.Client(oauth2.NoContext, &oauth2.Token{AccessToken: token})
return &Client{auther, uri}
}
// SetClient sets the default http client. This should be
// used in conjunction with golang.org/x/oauth2 to
// authenticate requests to the server.
func (c *Client) SetClient(client *http.Client) {
c.client = client
}
func (c *Client) Branch(owner, name, branch string) (*Branch, error) {
out := new(Branch)
uri := fmt.Sprintf(pathBranch, c.base, owner, name, branch)
err := c.get(uri, out)
return out, err
}
func (c *Client) BranchProtect(owner, name, branch string, in *Branch) error {
uri := fmt.Sprintf(pathBranch, c.base, owner, name, branch)
return c.patch(uri, in, nil)
}
//
// http request helper functions
//
// helper function for making an http GET request.
func (c *Client) get(rawurl string, out interface{}) error {
return c.do(rawurl, "GET", nil, out)
}
// helper function for making an http POST request.
func (c *Client) post(rawurl string, in, out interface{}) error {
return c.do(rawurl, "POST", in, out)
}
// helper function for making an http PUT request.
func (c *Client) put(rawurl string, in, out interface{}) error {
return c.do(rawurl, "PUT", in, out)
}
// helper function for making an http PATCH request.
func (c *Client) patch(rawurl string, in, out interface{}) error {
return c.do(rawurl, "PATCH", in, out)
}
// helper function for making an http DELETE request.
func (c *Client) delete(rawurl string) error {
return c.do(rawurl, "DELETE", nil, nil)
}
// helper function to make an http request
func (c *Client) do(rawurl, method string, in, out interface{}) error {
// executes the http request and returns the body as
// and io.ReadCloser
body, err := c.stream(rawurl, method, in, out)
if err != nil {
return err
}
defer body.Close()
// if a json response is expected, parse and return
// the json response.
if out != nil {
return json.NewDecoder(body).Decode(out)
}
return nil
}
// helper function to stream an http request
func (c *Client) stream(rawurl, method string, in, out interface{}) (io.ReadCloser, error) {
uri, err := url.Parse(rawurl)
if err != nil {
return nil, err
}
// if we are posting or putting data, we need to
// write it to the body of the request.
var buf io.ReadWriter
if in != nil {
buf = new(bytes.Buffer)
err := json.NewEncoder(buf).Encode(in)
if err != nil {
return nil, err
}
}
// creates a new http request to bitbucket.
req, err := http.NewRequest(method, uri.String(), buf)
if err != nil {
return nil, err
}
if in != nil {
req.Header.Set("Content-Type", "application/json")
}
req.Header.Set("Accept", "application/vnd.github.loki-preview+json")
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode > http.StatusPartialContent {
defer resp.Body.Close()
out, _ := ioutil.ReadAll(resp.Body)
return nil, fmt.Errorf(string(out))
}
return resp.Body, nil
}

332
remote/github/github.go Normal file
Просмотреть файл

@ -0,0 +1,332 @@
package github
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/bradrydzewski/lgtm/model"
"github.com/bradrydzewski/lgtm/shared/httputil"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
)
// name of the status message posted to GitHub
const context = "approvals/lgtm"
type Github struct {
URL string
API string
Client string
Secret string
Scopes []string
}
func (g *Github) GetUser(res http.ResponseWriter, req *http.Request) (*model.User, error) {
var config = &oauth2.Config{
ClientID: g.Client,
ClientSecret: g.Secret,
RedirectURL: fmt.Sprintf("%s/login", httputil.GetURL(req)),
Endpoint: oauth2.Endpoint{
AuthURL: fmt.Sprintf("%s/login/oauth/authorize", g.URL),
TokenURL: fmt.Sprintf("%s/login/oauth/access_token", g.URL),
},
Scopes: g.Scopes,
}
// get the oauth code from the incoming request. if no code is present
// redirec the user to GitHub login to retrieve a code.
var code = req.FormValue("code")
if len(code) == 0 {
state := fmt.Sprintln(time.Now().Unix())
http.Redirect(res, req, config.AuthCodeURL(state), http.StatusSeeOther)
return nil, nil
}
// exchanges the oauth2 code for an access token
token, err := config.Exchange(oauth2.NoContext, code)
if err != nil {
return nil, fmt.Errorf("Error exchanging token. %s", err)
}
// get the currently authenticated user details for the access token
client := setupClient(g.API, token.AccessToken)
user, _, err := client.Users.Get("")
if err != nil {
return nil, fmt.Errorf("Error fetching user. %s", err)
}
return &model.User{
Login: *user.Login,
Token: token.AccessToken,
Avatar: *user.AvatarURL,
}, nil
}
func (g *Github) GetUserToken(token string) (string, error) {
client := setupClient(g.API, token)
user, _, err := client.Users.Get("")
if err != nil {
return "", fmt.Errorf("Error fetching user. %s", err)
}
return *user.Login, nil
}
func (g *Github) GetTeams(user *model.User) ([]*model.Team, error) {
client := setupClient(g.API, user.Token)
orgs, _, err := client.Organizations.List("", &github.ListOptions{PerPage: 100})
if err != nil {
return nil, fmt.Errorf("Error fetching teams. %s", err)
}
teams := []*model.Team{}
for _, org := range orgs {
team := model.Team{
Login: *org.Login,
Avatar: *org.AvatarURL,
}
teams = append(teams, &team)
}
return teams, nil
}
func (g *Github) GetMembers(user *model.User, team string) ([]*model.Member, error) {
client := setupClient(g.API, user.Token)
teams, _, err := client.Organizations.ListTeams(team, &github.ListOptions{PerPage: 100})
if err != nil {
return nil, fmt.Errorf("Error accessing team list. %s", err)
}
var id int
for _, team := range teams {
if strings.ToLower(*team.Name) == "maintainers" {
id = *team.ID
break
}
}
if id == 0 {
return nil, fmt.Errorf("Error finding approvers team. %s", err)
}
opts := github.OrganizationListTeamMembersOptions{}
opts.PerPage = 100
teammates, _, err := client.Organizations.ListTeamMembers(id, &opts)
if err != nil {
return nil, fmt.Errorf("Error fetching team members. %s", err)
}
var members []*model.Member
for _, teammate := range teammates {
members = append(members, &model.Member{
Login: *teammate.Login,
})
}
return members, nil
}
func (g *Github) GetRepo(user *model.User, owner, name string) (*model.Repo, error) {
client := setupClient(g.API, user.Token)
repo_, _, err := client.Repositories.Get(owner, name)
if err != nil {
return nil, fmt.Errorf("Error fetching repository. %s", err)
}
return &model.Repo{
Owner: owner,
Name: name,
Slug: *repo_.FullName,
Link: *repo_.HTMLURL,
Private: *repo_.Private,
}, nil
}
func (g *Github) GetPerm(user *model.User, owner, name string) (*model.Perm, error) {
client := setupClient(g.API, user.Token)
repo, _, err := client.Repositories.Get(owner, name)
if err != nil {
return nil, fmt.Errorf("Error fetching repository. %s", err)
}
m := &model.Perm{}
m.Admin = (*repo.Permissions)["admin"]
m.Push = (*repo.Permissions)["push"]
m.Pull = (*repo.Permissions)["pull"]
return m, nil
}
func (g *Github) GetRepos(u *model.User) ([]*model.Repo, error) {
client := setupClient(g.API, u.Token)
all, err := GetUserRepos(client)
if err != nil {
return nil, err
}
repos := []*model.Repo{}
for _, repo := range all {
// only list repositories that I can admin
if repo.Permissions == nil || (*repo.Permissions)["admin"] == false {
continue
}
repos = append(repos, &model.Repo{
Owner: *repo.Owner.Login,
Name: *repo.Name,
Slug: *repo.FullName,
Link: *repo.HTMLURL,
Private: *repo.Private,
})
}
return repos, nil
}
func (g *Github) SetHook(user *model.User, repo *model.Repo, link string) error {
client := setupClient(g.API, user.Token)
repo_, _, err := client.Repositories.Get(repo.Owner, repo.Name)
if err != nil {
return err
}
old, err := GetHook(client, repo.Owner, repo.Name, link)
if err == nil && old != nil {
client.Repositories.DeleteHook(repo.Owner, repo.Name, *old.ID)
}
_, err = CreateHook(client, repo.Owner, repo.Name, link)
if err != nil {
log.Debugf("Error creating the webhook at %s. %s", link, err)
return err
}
in := new(Branch)
in.Protection.Enabled = true
in.Protection.Checks.Enforcement = "non_admins"
in.Protection.Checks.Contexts = []string{context}
client_ := NewClientToken(g.API, user.Token)
err = client_.BranchProtect(repo.Owner, repo.Name, *repo_.DefaultBranch, in)
if err != nil {
if g.URL == "https://github.com" {
return err
}
log.Warnf("Error configuring protected branch for %s/%s@%s. %s", repo.Owner, repo.Name, *repo_.DefaultBranch, err)
}
return nil
}
func (g *Github) DelHook(user *model.User, repo *model.Repo, link string) error {
client := setupClient(g.API, user.Token)
hook, err := GetHook(client, repo.Owner, repo.Name, link)
if err != nil {
return err
} else if hook == nil {
return nil
}
_, err = client.Repositories.DeleteHook(repo.Owner, repo.Name, *hook.ID)
if err != nil {
return err
}
repo_, _, err := client.Repositories.Get(repo.Owner, repo.Name)
if err != nil {
return err
}
client_ := NewClientToken(g.API, user.Token)
branch, _ := client_.Branch(repo.Owner, repo.Name, *repo_.DefaultBranch)
if len(branch.Protection.Checks.Contexts) == 0 {
return nil
}
checks := []string{}
for _, check := range branch.Protection.Checks.Contexts {
if check != context {
checks = append(checks, check)
}
}
branch.Protection.Checks.Contexts = checks
return client_.BranchProtect(repo.Owner, repo.Name, *repo_.DefaultBranch, branch)
}
func (g *Github) GetComments(u *model.User, r *model.Repo, num int) ([]*model.Comment, error) {
client := setupClient(g.API, u.Token)
opts := github.IssueListCommentsOptions{Direction: "desc", Sort: "created"}
opts.PerPage = 100
comments_, _, err := client.Issues.ListComments(r.Owner, r.Name, num, &opts)
if err != nil {
return nil, err
}
comments := []*model.Comment{}
for _, comment := range comments_ {
comments = append(comments, &model.Comment{
Author: *comment.User.Login,
Body: *comment.Body,
})
}
return comments, nil
}
func (g *Github) GetContents(u *model.User, r *model.Repo, path string) ([]byte, error) {
client := setupClient(g.API, u.Token)
content, _, _, err := client.Repositories.GetContents(r.Owner, r.Name, path, nil)
if err != nil {
return nil, err
}
return content.Decode()
}
func (g *Github) SetStatus(u *model.User, r *model.Repo, num int, ok bool) error {
client := setupClient(g.API, u.Token)
pr, _, err := client.PullRequests.Get(r.Owner, r.Name, num)
if err != nil {
return err
}
status := "pending"
desc := "this commit is pending approval"
if ok {
status = "success"
desc = "this commit looks good"
}
data := github.RepoStatus{
Context: github.String(context),
State: github.String(status),
Description: github.String(desc),
}
_, _, err = client.Repositories.CreateStatus(r.Owner, r.Name, *pr.Head.SHA, &data)
return err
}
func (g *Github) GetHook(r *http.Request) (*model.Hook, error) {
// only process comment hooks
if r.Header.Get("X-Github-Event") != "issue_comment" {
return nil, nil
}
data := commentHook{}
err := json.NewDecoder(r.Body).Decode(&data)
if err != nil {
return nil, err
}
if len(data.Issue.PullRequest.Link) == 0 {
return nil, nil
}
hook := new(model.Hook)
hook.Issue = new(model.Issue)
hook.Issue.Number = data.Issue.Number
hook.Issue.Author = data.Issue.User.Login
hook.Repo = new(model.Repo)
hook.Repo.Owner = data.Repository.Owner.Login
hook.Repo.Name = data.Repository.Name
hook.Repo.Slug = data.Repository.FullName
hook.Comment = new(model.Comment)
hook.Comment.Body = data.Comment.Body
hook.Comment.Author = data.Comment.User.Login
return hook, nil
}

52
remote/github/types.go Normal file
Просмотреть файл

@ -0,0 +1,52 @@
package github
type Error struct {
Message string `json:"message"`
}
func (e Error) Error() string { return e.Message }
func (e Error) String() string { return e.Message }
type Branch struct {
Protection struct {
Enabled bool `json:"enabled"`
Checks struct {
Enforcement string `json:"enforcement_level"`
Contexts []string `json:"contexts"`
} `json:"required_status_checks"`
} `json:"protection"`
}
// commentHook represents a subset of the issue_comment payload.
type commentHook struct {
Issue struct {
Link string `json:"html_url"`
Number int `json:"number"`
User struct {
Login string `json:"login"`
} `json:"user"`
PullRequest struct {
Link string `json:"html_url"`
} `json:"pull_request"`
} `json:"issue"`
Comment struct {
Body string `json:"body"`
User struct {
Login string `json:"login"`
} `json:"user"`
} `json:"comment"`
Repository struct {
Name string `json:"name"`
FullName string `json:"full_name"`
Desc string `json:"description"`
Private bool `json:"private"`
Owner struct {
Login string `json:"login"`
Type string `json:"type"`
Avatar string `json:"avatar_url"`
} `json:"owner"`
} `json:"repository"`
}

109
remote/github/utils.go Normal file
Просмотреть файл

@ -0,0 +1,109 @@
package github
import (
"fmt"
"net/url"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
)
func setupClient(rawurl, accessToken string) *github.Client {
token := oauth2.Token{AccessToken: accessToken}
source := oauth2.StaticTokenSource(&token)
client := oauth2.NewClient(oauth2.NoContext, source)
github := github.NewClient(client)
github.BaseURL, _ = url.Parse(rawurl)
return github
}
// GetHook is a heper function that retrieves a hook by
// hostname. To do this, it will retrieve a list of all hooks
// and iterate through the list.
func GetHook(client *github.Client, owner, name, rawurl string) (*github.Hook, error) {
hooks, _, err := client.Repositories.ListHooks(owner, name, nil)
if err != nil {
return nil, err
}
newurl, err := url.Parse(rawurl)
if err != nil {
fmt.Println("error parsing new hook url", rawurl, err)
return nil, err
}
for _, hook := range hooks {
hookurl, ok := hook.Config["url"].(string)
if !ok {
continue
}
oldurl, err := url.Parse(hookurl)
if err != nil {
fmt.Println("error parsing old hook url", hookurl, err)
continue
}
if newurl.Host == oldurl.Host {
return &hook, nil
}
}
return nil, nil
}
func DeleteHook(client *github.Client, owner, name, url string) error {
hook, err := GetHook(client, owner, name, url)
if err != nil {
return err
}
if hook == nil {
return nil
}
_, err = client.Repositories.DeleteHook(owner, name, *hook.ID)
return err
}
// CreateHook is a heper function that creates a post-commit hook
// for the specified repository.
func CreateHook(client *github.Client, owner, name, url string) (*github.Hook, error) {
var hook = new(github.Hook)
hook.Name = github.String("web")
hook.Events = []string{"issue_comment"}
hook.Config = map[string]interface{}{}
hook.Config["url"] = url
hook.Config["content_type"] = "json"
created, _, err := client.Repositories.CreateHook(owner, name, hook)
return created, err
}
// GetFile is a heper function that retrieves a file from
// GitHub and returns its contents in byte array format.
func GetFile(client *github.Client, owner, name, path, ref string) ([]byte, error) {
var opts = new(github.RepositoryContentGetOptions)
opts.Ref = ref
content, _, _, err := client.Repositories.GetContents(owner, name, path, opts)
if err != nil {
return nil, err
}
return content.Decode()
}
// GetUserRepos is a helper function that returns a list of
// all user repositories. Paginated results are aggregated into
// a single list.
func GetUserRepos(client *github.Client) ([]github.Repository, error) {
var repos []github.Repository
var opts = github.RepositoryListOptions{}
opts.PerPage = 100
opts.Page = 1
// loop through user repository list
for opts.Page > 0 {
list, resp, err := client.Repositories.List("", &opts)
if err != nil {
return nil, err
}
repos = append(repos, list...)
// increment the next page to retrieve
opts.Page = resp.NextPage
}
return repos, nil
}

255
remote/mock/remote.go Normal file
Просмотреть файл

@ -0,0 +1,255 @@
package mock
import "github.com/stretchr/testify/mock"
import "net/http"
import "github.com/bradrydzewski/lgtm/model"
type Remote struct {
mock.Mock
}
func (_m *Remote) GetUser(_a0 http.ResponseWriter, _a1 *http.Request) (*model.User, error) {
ret := _m.Called(_a0, _a1)
var r0 *model.User
if rf, ok := ret.Get(0).(func(http.ResponseWriter, *http.Request) *model.User); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*model.User)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(http.ResponseWriter, *http.Request) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *Remote) GetUserToken(_a0 string) (string, error) {
ret := _m.Called(_a0)
var r0 string
if rf, ok := ret.Get(0).(func(string) string); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(string)
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *Remote) GetTeams(_a0 *model.User) ([]*model.Team, error) {
ret := _m.Called(_a0)
var r0 []*model.Team
if rf, ok := ret.Get(0).(func(*model.User) []*model.Team); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*model.Team)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.User) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *Remote) GetMembers(_a0 *model.User, _a1 string) ([]*model.Member, error) {
ret := _m.Called(_a0, _a1)
var r0 []*model.Member
if rf, ok := ret.Get(0).(func(*model.User, string) []*model.Member); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*model.Member)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.User, string) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *Remote) GetRepo(_a0 *model.User, _a1 string, _a2 string) (*model.Repo, error) {
ret := _m.Called(_a0, _a1, _a2)
var r0 *model.Repo
if rf, ok := ret.Get(0).(func(*model.User, string, string) *model.Repo); ok {
r0 = rf(_a0, _a1, _a2)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*model.Repo)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.User, string, string) error); ok {
r1 = rf(_a0, _a1, _a2)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *Remote) GetPerm(_a0 *model.User, _a1 string, _a2 string) (*model.Perm, error) {
ret := _m.Called(_a0, _a1, _a2)
var r0 *model.Perm
if rf, ok := ret.Get(0).(func(*model.User, string, string) *model.Perm); ok {
r0 = rf(_a0, _a1, _a2)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*model.Perm)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.User, string, string) error); ok {
r1 = rf(_a0, _a1, _a2)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *Remote) GetRepos(_a0 *model.User) ([]*model.Repo, error) {
ret := _m.Called(_a0)
var r0 []*model.Repo
if rf, ok := ret.Get(0).(func(*model.User) []*model.Repo); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*model.Repo)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.User) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *Remote) SetHook(_a0 *model.User, _a1 *model.Repo, _a2 string) error {
ret := _m.Called(_a0, _a1, _a2)
var r0 error
if rf, ok := ret.Get(0).(func(*model.User, *model.Repo, string) error); ok {
r0 = rf(_a0, _a1, _a2)
} else {
r0 = ret.Error(0)
}
return r0
}
func (_m *Remote) DelHook(_a0 *model.User, _a1 *model.Repo, _a2 string) error {
ret := _m.Called(_a0, _a1, _a2)
var r0 error
if rf, ok := ret.Get(0).(func(*model.User, *model.Repo, string) error); ok {
r0 = rf(_a0, _a1, _a2)
} else {
r0 = ret.Error(0)
}
return r0
}
func (_m *Remote) GetComments(_a0 *model.User, _a1 *model.Repo, _a2 int) ([]*model.Comment, error) {
ret := _m.Called(_a0, _a1, _a2)
var r0 []*model.Comment
if rf, ok := ret.Get(0).(func(*model.User, *model.Repo, int) []*model.Comment); ok {
r0 = rf(_a0, _a1, _a2)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*model.Comment)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.User, *model.Repo, int) error); ok {
r1 = rf(_a0, _a1, _a2)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *Remote) GetContents(_a0 *model.User, _a1 *model.Repo, _a2 string) ([]byte, error) {
ret := _m.Called(_a0, _a1, _a2)
var r0 []byte
if rf, ok := ret.Get(0).(func(*model.User, *model.Repo, string) []byte); ok {
r0 = rf(_a0, _a1, _a2)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]byte)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.User, *model.Repo, string) error); ok {
r1 = rf(_a0, _a1, _a2)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *Remote) SetStatus(_a0 *model.User, _a1 *model.Repo, _a2 int, _a3 bool) error {
ret := _m.Called(_a0, _a1, _a2, _a3)
var r0 error
if rf, ok := ret.Get(0).(func(*model.User, *model.Repo, int, bool) error); ok {
r0 = rf(_a0, _a1, _a2, _a3)
} else {
r0 = ret.Error(0)
}
return r0
}
func (_m *Remote) GetHook(r *http.Request) (*model.Hook, error) {
ret := _m.Called(r)
var r0 *model.Hook
if rf, ok := ret.Get(0).(func(*http.Request) *model.Hook); ok {
r0 = rf(r)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*model.Hook)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*http.Request) error); ok {
r1 = rf(r)
} else {
r1 = ret.Error(1)
}
return r0, r1
}

118
remote/remote.go Normal file
Просмотреть файл

@ -0,0 +1,118 @@
package remote
//go:generate mockery -name Remote -output mock -case=underscore
import (
"net/http"
"github.com/bradrydzewski/lgtm/model"
"golang.org/x/net/context"
)
type Remote interface {
// GetUser authenticates a user with the remote system.
GetUser(http.ResponseWriter, *http.Request) (*model.User, error)
// GetUserToken authenticates a user with the remote system using
// the remote systems OAuth token.
GetUserToken(string) (string, error)
// GetTeams gets a team list from the remote system.
GetTeams(*model.User) ([]*model.Team, error)
// GetMembers gets a team member list from the remote system.
GetMembers(*model.User, string) ([]*model.Member, error)
// GetRepo gets a repository from the remote system.
GetRepo(*model.User, string, string) (*model.Repo, error)
// GetPerm gets a repository permission from the remote system.
GetPerm(*model.User, string, string) (*model.Perm, error)
// GetRepo gets a repository list from the remote system.
GetRepos(*model.User) ([]*model.Repo, error)
// SetHook adds a webhook to the remote repository.
SetHook(*model.User, *model.Repo, string) error
// DelHook deletes a webhook from the remote repository.
DelHook(*model.User, *model.Repo, string) error
// GetComments gets pull request comments from the remote system.
GetComments(*model.User, *model.Repo, int) ([]*model.Comment, error)
// GetContents gets the file contents from the remote system.
GetContents(*model.User, *model.Repo, string) ([]byte, error)
// SetStatus adds or updates the pull request status in the remote system.
SetStatus(*model.User, *model.Repo, int, bool) error
// GetHook gets the hook from the http Request.
GetHook(r *http.Request) (*model.Hook, error)
}
// GetUser authenticates a user with the remote system.
func GetUser(c context.Context, w http.ResponseWriter, r *http.Request) (*model.User, error) {
return FromContext(c).GetUser(w, r)
}
// GetUserToken authenticates a user with the remote system using
// the remote systems OAuth token.
func GetUserToken(c context.Context, token string) (string, error) {
return FromContext(c).GetUserToken(token)
}
// GetTeams gets a team list from the remote system.
func GetTeams(c context.Context, u *model.User) ([]*model.Team, error) {
return FromContext(c).GetTeams(u)
}
// GetMembers gets a team members list from the remote system.
func GetMembers(c context.Context, u *model.User, team string) ([]*model.Member, error) {
return FromContext(c).GetMembers(u, team)
}
// GetRepo gets a repository from the remote system.
func GetRepo(c context.Context, u *model.User, owner, name string) (*model.Repo, error) {
return FromContext(c).GetRepo(u, owner, name)
}
// GetPerm gets a repository permission from the remote system.
func GetPerm(c context.Context, u *model.User, owner, name string) (*model.Perm, error) {
return FromContext(c).GetPerm(u, owner, name)
}
// GetRepos gets a repository list from the remote system.
func GetRepos(c context.Context, u *model.User) ([]*model.Repo, error) {
return FromContext(c).GetRepos(u)
}
// GetComments gets pull request comments from the remote system.
func GetComments(c context.Context, u *model.User, r *model.Repo, num int) ([]*model.Comment, error) {
return FromContext(c).GetComments(u, r, num)
}
// GetContents gets the file contents from the remote system.
func GetContents(c context.Context, u *model.User, r *model.Repo, path string) ([]byte, error) {
return FromContext(c).GetContents(u, r, path)
}
// SetHook adds a webhook to the remote repository.
func SetHook(c context.Context, u *model.User, r *model.Repo, hook string) error {
return FromContext(c).SetHook(u, r, hook)
}
// DelHook deletes a webhook from the remote repository.
func DelHook(c context.Context, u *model.User, r *model.Repo, hook string) error {
return FromContext(c).DelHook(u, r, hook)
}
// SetStatus adds or updates the pull request status in the remote system.
func SetStatus(c context.Context, u *model.User, r *model.Repo, num int, ok bool) error {
return FromContext(c).SetStatus(u, r, num, ok)
}
// GetHook gets the hook from the http Request.
func GetHook(c context.Context, r *http.Request) (*model.Hook, error) {
return FromContext(c).GetHook(r)
}

22
remote/types.go Normal file
Просмотреть файл

@ -0,0 +1,22 @@
package remote
// Account represents a user or team account.
type Account struct {
Login string `json:"login"`
Avatar string `json:"avatar"`
Kind string `json:"type"`
}
// Issue represents an issue or pull request.
type Issue struct {
Number int `json:"issue"`
Title string `json:"title"`
Author string `json:"author"`
}
// Comment represents a user comment on an issue
// or pull request.
type Comment struct {
Author string `json:"author"`
Body string `json:"body"`
}

Просмотреть файл

@ -0,0 +1,56 @@
package access
import (
log "github.com/Sirupsen/logrus"
"github.com/bradrydzewski/lgtm/cache"
"github.com/bradrydzewski/lgtm/router/middleware/session"
"github.com/gin-gonic/gin"
)
func RepoAdmin(c *gin.Context) {
var (
owner = c.Param("owner")
name = c.Param("repo")
user = session.User(c)
)
perm, err := cache.GetPerm(c, user, owner, name)
if err != nil {
log.Errorf("Cannot find repository %s/%s. %s", owner, name, err)
c.String(404, "Not Found")
c.Abort()
return
}
if !perm.Admin {
log.Errorf("User %s does not have Admin access to repository %s/%s", user.Login, owner, name)
c.String(403, "Insufficient privileges")
c.Abort()
return
}
log.Debugf("User %s granted Admin access to %s/%s", user.Login, owner, name)
c.Next()
}
func RepoPull(c *gin.Context) {
var (
owner = c.Param("owner")
name = c.Param("repo")
user = session.User(c)
)
perm, err := cache.GetPerm(c, user, owner, name)
if err != nil {
log.Errorf("Cannot find repository %s/%s. %s", owner, name, err)
c.String(404, "Not Found")
c.Abort()
return
}
if !perm.Pull {
log.Errorf("User %s does not have Pull access to repository %s/%s", user.Login, owner, name)
c.String(404, "Not Found")
c.Abort()
return
}
log.Debugf("User %s granted Pull access to %s/%s", user.Login, owner, name)
c.Next()
}

Просмотреть файл

@ -0,0 +1,22 @@
package middleware
import (
"time"
"github.com/bradrydzewski/lgtm/cache"
"github.com/gin-gonic/gin"
"github.com/ianschenck/envflag"
)
var (
ttl = envflag.Duration("CACHE_TTL", time.Minute*15, "")
)
func Cache() gin.HandlerFunc {
cache_ := cache.NewTTL(*ttl)
return func(c *gin.Context) {
c.Set("cache", cache_)
c.Next()
}
}

Просмотреть файл

@ -0,0 +1,44 @@
package header
import (
"net/http"
"time"
"github.com/gin-gonic/gin"
)
// NoCache is a middleware function that appends headers
// to prevent the client from caching the HTTP response.
func NoCache(c *gin.Context) {
c.Header("Cache-Control", "no-cache, no-store, max-age=0, must-revalidate, value")
c.Header("Expires", "Thu, 01 Jan 1970 00:00:00 GMT")
c.Header("Last-Modified", time.Now().UTC().Format(http.TimeFormat))
c.Next()
}
// Options is a middleware function that appends headers
// for options requests and aborts then exits the middleware
// chain and ends the request.
func Options(c *gin.Context) {
if c.Request.Method != "OPTIONS" {
c.Next()
} else {
c.Header("Access-Control-Allow-Methods", "GET,POST,PUT,PATCH,DELETE,OPTIONS")
c.Header("Access-Control-Allow-Headers", "Authorization")
c.Header("Allow", "HEAD,GET,POST,PUT,PATCH,DELETE,OPTIONS")
c.Header("Content-Type", "application/json")
c.AbortWithStatus(200)
}
}
// Secure is a middleware function that appends security
// and resource access headers.
func Secure(c *gin.Context) {
c.Header("Access-Control-Allow-Origin", "*")
c.Header("X-Frame-Options", "DENY")
c.Header("X-Content-Type-Options", "nosniff")
c.Header("X-XSS-Protection", "1; mode=block")
if c.Request.TLS != nil {
c.Header("Strict-Transport-Security", "max-age=31536000")
}
}

Просмотреть файл

@ -0,0 +1,41 @@
package middleware
import (
"strings"
"github.com/bradrydzewski/lgtm/remote/github"
"github.com/gin-gonic/gin"
"github.com/ianschenck/envflag"
)
const (
DefaultURL = "https://github.com"
DefaultAPI = "https://api.github.com/"
DefaultScope = "user:email,read:org,public_repo"
)
var (
server = envflag.String("GITHUB_URL", DefaultURL, "")
client = envflag.String("GITHUB_CLIENT", "", "")
secret = envflag.String("GITHUB_SECRET", "", "")
scope = envflag.String("GITHUB_SCOPE", DefaultScope, "")
)
func Remote() gin.HandlerFunc {
remote := &github.Github{
API: DefaultAPI,
URL: *server,
Client: *client,
Secret: *secret,
Scopes: strings.Split(*scope, ","),
}
if remote.URL != DefaultURL {
remote.URL = strings.TrimSuffix(remote.URL, "/")
remote.API = remote.URL + "/api/v3/"
}
return func(c *gin.Context) {
c.Set("remote", remote)
c.Next()
}
}

Просмотреть файл

@ -0,0 +1,66 @@
package session
import (
"net/http"
"github.com/bradrydzewski/lgtm/model"
"github.com/bradrydzewski/lgtm/shared/token"
"github.com/bradrydzewski/lgtm/store"
"github.com/gin-gonic/gin"
)
func User(c *gin.Context) *model.User {
v, ok := c.Get("user")
if !ok {
return nil
}
u, ok := v.(*model.User)
if !ok {
return nil
}
return u
}
func UserMust(c *gin.Context) {
user := User(c)
switch {
case user == nil:
c.AbortWithStatus(http.StatusUnauthorized)
// c.HTML(http.StatusUnauthorized, "401.html", gin.H{})
default:
c.Next()
}
}
func SetUser(c *gin.Context) {
var user *model.User
// authenticates the user via an authentication cookie
// or an auth token.
t, err := token.ParseRequest(c.Request, func(t *token.Token) (string, error) {
var err error
user, err = store.GetUserLogin(c, t.Text)
return user.Secret, err
})
if err == nil {
c.Set("user", user)
// if this is a session token (ie not the API token)
// this means the user is accessing with a web browser,
// so we should implement CSRF protection measures.
if t.Kind == token.SessToken {
err = token.CheckCsrf(c.Request, func(t *token.Token) (string, error) {
return user.Secret, nil
})
// if csrf token validation fails, exit immediately
// with a not authorized error.
if err != nil {
c.AbortWithStatus(http.StatusUnauthorized)
return
}
}
}
c.Next()
}

Просмотреть файл

@ -0,0 +1,21 @@
package middleware
import (
"github.com/bradrydzewski/lgtm/store/datastore"
"github.com/gin-gonic/gin"
"github.com/ianschenck/envflag"
)
var (
driver = envflag.String("DATABASE_DRIVER", "sqlite3", "")
datasource = envflag.String("DATABASE_DATASOURCE", "lgtm.sqlite", "")
)
func Store() gin.HandlerFunc {
store := datastore.New(*driver, *datasource)
return func(c *gin.Context) {
c.Set("store", store)
c.Next()
}
}

Просмотреть файл

@ -0,0 +1,14 @@
package middleware
import (
"github.com/bradrydzewski/lgtm/version"
"github.com/gin-gonic/gin"
)
// Version is a middleware function that appends the LGTM version information
// to the HTTP response. This is intended for debugging and troubleshooting.
func Version(c *gin.Context) {
c.Header("X-LGTM-VERSION", version.Version)
c.Next()
}

45
router/router.go Normal file
Просмотреть файл

@ -0,0 +1,45 @@
package router
import (
"net/http"
"github.com/bradrydzewski/lgtm/api"
"github.com/bradrydzewski/lgtm/router/middleware/access"
"github.com/bradrydzewski/lgtm/router/middleware/header"
"github.com/bradrydzewski/lgtm/router/middleware/session"
"github.com/bradrydzewski/lgtm/web"
"github.com/bradrydzewski/lgtm/web/static"
"github.com/bradrydzewski/lgtm/web/template"
"github.com/gin-gonic/gin"
)
func Load(middleware ...gin.HandlerFunc) http.Handler {
e := gin.New()
e.Use(gin.Recovery())
e.SetHTMLTemplate(template.Template())
e.StaticFS("/static", static.FileSystem())
e.Use(header.NoCache)
e.Use(header.Options)
e.Use(header.Secure)
e.Use(middleware...)
e.Use(session.SetUser)
e.GET("/api/user", session.UserMust, api.GetUser)
e.GET("/api/user/teams", session.UserMust, api.GetTeams)
e.GET("/api/user/repos", session.UserMust, api.GetRepos)
e.GET("/api/repos/:owner/:repo", session.UserMust, access.RepoPull, api.GetRepo)
e.POST("/api/repos/:owner/:repo", session.UserMust, access.RepoAdmin, api.PostRepo)
e.DELETE("/api/repos/:owner/:repo", session.UserMust, access.RepoAdmin, api.DeleteRepo)
e.GET("/api/repos/:owner/:repo/maintainers", session.UserMust, access.RepoPull, api.GetMaintainer)
e.GET("/api/repos/:owner/:repo/maintainers/:org", session.UserMust, access.RepoPull, api.GetMaintainerOrg)
e.POST("/hook", web.Hook)
e.GET("/login", web.Login)
e.POST("/login", web.LoginToken)
e.GET("/logout", web.Logout)
e.NoRoute(web.Index)
return e
}

112
shared/httputil/httputil.go Normal file
Просмотреть файл

@ -0,0 +1,112 @@
package httputil
import (
"net/http"
"strings"
)
// IsHttps is a helper function that evaluates the http.Request
// and returns True if the Request uses HTTPS. It is able to detect,
// using the X-Forwarded-Proto, if the original request was HTTPS and
// routed through a reverse proxy with SSL termination.
func IsHttps(r *http.Request) bool {
switch {
case r.URL.Scheme == "https":
return true
case r.TLS != nil:
return true
case strings.HasPrefix(r.Proto, "HTTPS"):
return true
case r.Header.Get("X-Forwarded-Proto") == "https":
return true
default:
return false
}
}
// GetScheme is a helper function that evaluates the http.Request
// and returns the scheme, HTTP or HTTPS. It is able to detect,
// using the X-Forwarded-Proto, if the original request was HTTPS
// and routed through a reverse proxy with SSL termination.
func GetScheme(r *http.Request) string {
switch {
case r.URL.Scheme == "https":
return "https"
case r.TLS != nil:
return "https"
case strings.HasPrefix(r.Proto, "HTTPS"):
return "https"
case r.Header.Get("X-Forwarded-Proto") == "https":
return "https"
default:
return "http"
}
}
// GetHost is a helper function that evaluates the http.Request
// and returns the hostname. It is able to detect, using the
// X-Forarded-For header, the original hostname when routed
// through a reverse proxy.
func GetHost(r *http.Request) string {
switch {
case len(r.Host) != 0:
return r.Host
case len(r.URL.Host) != 0:
return r.URL.Host
case len(r.Header.Get("X-Forwarded-For")) != 0:
return r.Header.Get("X-Forwarded-For")
case len(r.Header.Get("X-Host")) != 0:
return r.Header.Get("X-Host")
case len(r.Header.Get("XFF")) != 0:
return r.Header.Get("XFF")
case len(r.Header.Get("X-Real-IP")) != 0:
return r.Header.Get("X-Real-IP")
default:
return "localhost:8080"
}
}
// GetURL is a helper function that evaluates the http.Request
// and returns the URL as a string. Only the scheme + hostname
// are included; the path is excluded.
func GetURL(r *http.Request) string {
return GetScheme(r) + "://" + GetHost(r)
}
// GetCookie retrieves and verifies the cookie value.
func GetCookie(r *http.Request, name string) (value string) {
cookie, err := r.Cookie(name)
if err != nil {
return
}
value = cookie.Value
return
}
// SetCookie writes the cookie value.
func SetCookie(w http.ResponseWriter, r *http.Request, name, value string) {
cookie := http.Cookie{
Name: name,
Value: value,
Path: "/",
Domain: r.URL.Host,
HttpOnly: true,
Secure: IsHttps(r),
MaxAge: 2147483647, // the cooke value (token) is responsible for expiration
}
http.SetCookie(w, &cookie)
}
// DelCookie deletes a cookie.
func DelCookie(w http.ResponseWriter, r *http.Request, name string) {
cookie := http.Cookie{
Name: name,
Value: "deleted",
Path: "/",
Domain: r.URL.Host,
MaxAge: -1,
}
http.SetCookie(w, &cookie)
}

27
shared/server/server.go Normal file
Просмотреть файл

@ -0,0 +1,27 @@
package server
import (
"net/http"
log "github.com/Sirupsen/logrus"
)
type Server struct {
Addr string
Cert string
Key string
}
func (s *Server) Run(handler http.Handler) {
log.Infof("starting server %s", s.Addr)
if len(s.Cert) != 0 {
log.Fatal(
http.ListenAndServeTLS(s.Addr, s.Cert, s.Key, handler),
)
} else {
log.Fatal(
http.ListenAndServe(s.Addr, handler),
)
}
}

130
shared/token/token.go Normal file
Просмотреть файл

@ -0,0 +1,130 @@
package token
import (
"fmt"
"net/http"
"github.com/dgrijalva/jwt-go"
)
type SecretFunc func(*Token) (string, error)
const (
UserToken = "user"
SessToken = "sess"
HookToken = "hook"
CsrfToken = "csrf"
)
// Default algorithm used to sign JWT tokens.
const SignerAlgo = "HS256"
type Token struct {
Kind string
Text string
}
func Parse(raw string, fn SecretFunc) (*Token, error) {
token := &Token{}
parsed, err := jwt.Parse(raw, keyFunc(token, fn))
if err != nil {
return nil, err
} else if !parsed.Valid {
return nil, jwt.ValidationError{}
}
return token, nil
}
func ParseRequest(r *http.Request, fn SecretFunc) (*Token, error) {
var token = r.Header.Get("Authorization")
// first we attempt to get the token from the
// authorization header.
if len(token) != 0 {
token = r.Header.Get("Authorization")
fmt.Sscanf(token, "Bearer %s", &token)
return Parse(token, fn)
}
// then we attempt to get the token from the
// access_token url query parameter
token = r.FormValue("access_token")
if len(token) != 0 {
return Parse(token, fn)
}
// and finally we attemt to get the token from
// the user session cookie
cookie, err := r.Cookie("user_sess")
if err != nil {
return nil, err
}
return Parse(cookie.Value, fn)
}
func CheckCsrf(r *http.Request, fn SecretFunc) error {
// get and options requests are always
// enabled, without CSRF checks.
switch r.Method {
case "GET", "OPTIONS":
return nil
}
// parse the raw CSRF token value and validate
raw := r.Header.Get("X-CSRF-TOKEN")
_, err := Parse(raw, fn)
return err
}
func New(kind, text string) *Token {
return &Token{Kind: kind, Text: text}
}
// Sign signs the token using the given secret hash
// and returns the string value.
func (t *Token) Sign(secret string) (string, error) {
return t.SignExpires(secret, 0)
}
// Sign signs the token using the given secret hash
// with an expiration date.
func (t *Token) SignExpires(secret string, exp int64) (string, error) {
token := jwt.New(jwt.SigningMethodHS256)
token.Claims["type"] = t.Kind
token.Claims["text"] = t.Text
if exp > 0 {
token.Claims["exp"] = float64(exp)
}
return token.SignedString([]byte(secret))
}
func keyFunc(token *Token, fn SecretFunc) jwt.Keyfunc {
return func(t *jwt.Token) (interface{}, error) {
// validate the correct algorithm is being used
if t.Method.Alg() != SignerAlgo {
return nil, jwt.ErrSignatureInvalid
}
// extract the token kind and cast to
// the expected type.
kindv, ok := t.Claims["type"]
if !ok {
return nil, jwt.ValidationError{}
}
token.Kind, _ = kindv.(string)
// extract the token value and cast to
// exepected type.
textv, ok := t.Claims["text"]
if !ok {
return nil, jwt.ValidationError{}
}
token.Text, _ = textv.(string)
// invoke the callback function to retrieve
// the secret key used to verify
secret, err := fn(token)
return []byte(secret), err
}
}

22
store/context.go Normal file
Просмотреть файл

@ -0,0 +1,22 @@
package store
import (
"golang.org/x/net/context"
)
const key = "store"
// Setter defines a context that enables setting values.
type Setter interface {
Set(string, interface{})
}
// FromContext returns the Store associated with this context.
func FromContext(c context.Context) Store {
return c.Value(key).(Store)
}
// ToContext adds the Store to this context if it supports the Setter interface.
func ToContext(c Setter, store Store) {
c.Set(key, store)
}

Просмотреть файл

@ -0,0 +1,117 @@
package datastore
import (
"database/sql"
"os"
"time"
"github.com/bradrydzewski/lgtm/store"
"github.com/bradrydzewski/lgtm/store/migration"
"github.com/Sirupsen/logrus"
_ "github.com/go-sql-driver/mysql"
_ "github.com/mattn/go-sqlite3"
"github.com/rubenv/sql-migrate"
"github.com/russross/meddler"
)
type datastore struct {
*sql.DB
}
// New creates a database connection for the given driver and datasource
// and returns a new Store.
func New(driver, config string) store.Store {
db := Open(driver, config)
return From(db)
}
// From returns a Store using an existing database connection.
func From(db *sql.DB) store.Store {
return &datastore{db}
}
// Open opens a new database connection with the specified
// driver and connection string and returns a store.
func Open(driver, config string) *sql.DB {
db, err := sql.Open(driver, config)
if err != nil {
logrus.Errorln(err)
logrus.Fatalln("database connection failed")
}
if driver == "mysql" {
// per issue https://github.com/go-sql-driver/mysql/issues/257
db.SetMaxIdleConns(0)
}
setupMeddler(driver)
logrus.Debugf("Driver %s", driver)
logrus.Debugf("Data Source %s", config)
if err := pingDatabase(db); err != nil {
logrus.Errorln(err)
logrus.Fatalln("database ping attempts failed")
}
if err := setupDatabase(driver, db); err != nil {
logrus.Errorln(err)
logrus.Fatalln("migration failed")
}
return db
}
// OpenTest opens a new database connection for testing purposes.
// The database driver and connection string are provided by
// environment variables, with fallback to in-memory sqlite.
func openTest() *sql.DB {
var (
driver = "sqlite3"
config = ":memory:"
)
if os.Getenv("DATABASE_DRIVER") != "" {
driver = os.Getenv("DATABASE_DRIVER")
config = os.Getenv("DATABASE_DATASOURCE")
}
return Open(driver, config)
}
// helper function to ping the database with backoff to ensure
// a connection can be established before we proceed with the
// database setup and migration.
func pingDatabase(db *sql.DB) (err error) {
for i := 0; i < 30; i++ {
err = db.Ping()
if err == nil {
return
}
logrus.Infof("database ping failed. retry in 1s")
time.Sleep(time.Second)
}
return
}
// helper function to setup the databsae by performing
// automated database migration steps.
func setupDatabase(driver string, db *sql.DB) error {
var migrations = &migrate.AssetMigrationSource{
Asset: migration.Asset,
AssetDir: migration.AssetDir,
Dir: driver,
}
_, err := migrate.Exec(db, driver, migrations, migrate.Up)
return err
}
// helper function to setup the meddler default driver
// based on the selected driver name.
func setupMeddler(driver string) {
switch driver {
case "sqlite3":
meddler.Default = meddler.SQLite
case "mysql":
meddler.Default = meddler.MySQL
case "postgres":
meddler.Default = meddler.PostgreSQL
}
}

91
store/datastore/repos.go Normal file
Просмотреть файл

@ -0,0 +1,91 @@
package datastore
import (
"fmt"
"strings"
"github.com/bradrydzewski/lgtm/model"
"github.com/russross/meddler"
)
func (db *datastore) GetRepo(id int64) (*model.Repo, error) {
var repo = new(model.Repo)
var err = meddler.Load(db, repoTable, repo, id)
return repo, err
}
func (db *datastore) GetRepoSlug(slug string) (*model.Repo, error) {
var repo = new(model.Repo)
var err = meddler.QueryRow(db, repo, repoSlugQuery, slug)
return repo, err
}
func (db *datastore) GetRepoMulti(slug ...string) ([]*model.Repo, error) {
var repos = []*model.Repo{}
var instr, params = toList(slug)
var stmt = fmt.Sprintf(repoListQuery, instr)
var err = meddler.QueryAll(db, &repos, stmt, params...)
return repos, err
}
func (db *datastore) GetRepoOwner(owner string) ([]*model.Repo, error) {
var repos = []*model.Repo{}
var err = meddler.QueryAll(db, &repos, repoOwnerQuery, owner)
return repos, err
}
func (db *datastore) CreateRepo(repo *model.Repo) error {
return meddler.Insert(db, repoTable, repo)
}
func (db *datastore) UpdateRepo(repo *model.Repo) error {
return meddler.Update(db, repoTable, repo)
}
func (db *datastore) DeleteRepo(repo *model.Repo) error {
var _, err = db.Exec(repoDeleteStmt, repo.ID)
return err
}
func toList(items []string) (string, []interface{}) {
var size = len(items)
if size > 990 {
size = 990
items = items[:990]
}
var qs = make([]string, size, size)
var in = make([]interface{}, size, size)
for i, item := range items {
qs[i] = "?"
in[i] = item
}
return strings.Join(qs, ","), in
}
const repoTable = "repos"
const repoSlugQuery = `
SELECT *
FROM repos
WHERE repo_slug = ?
LIMIT 1;
`
const repoOwnerQuery = `
SELECT *
FROM repos
WHERE repo_owner = ?
`
const repoListQuery = `
SELECT *
FROM repos
WHERE repo_slug IN (%s)
ORDER BY repo_slug
`
const repoDeleteStmt = `
DELETE FROM repos
WHERE repo_id = ?
`

Просмотреть файл

@ -0,0 +1,154 @@
package datastore
import (
"testing"
"github.com/bradrydzewski/lgtm/model"
"github.com/franela/goblin"
)
func Test_repostore(t *testing.T) {
db := openTest()
defer db.Close()
s := From(db)
g := goblin.Goblin(t)
g.Describe("Repo", func() {
// before each test be sure to purge the package
// table data from the database.
g.BeforeEach(func() {
db.Exec("DELETE FROM repos")
db.Exec("DELETE FROM users")
})
g.It("Should Set a Repo", func() {
repo := model.Repo{
UserID: 1,
Slug: "bradrydzewski/drone",
Owner: "bradrydzewski",
Name: "drone",
}
err1 := s.CreateRepo(&repo)
err2 := s.UpdateRepo(&repo)
getrepo, err3 := s.GetRepo(repo.ID)
g.Assert(err1 == nil).IsTrue()
g.Assert(err2 == nil).IsTrue()
g.Assert(err3 == nil).IsTrue()
g.Assert(repo.ID).Equal(getrepo.ID)
})
g.It("Should Add a Repo", func() {
repo := model.Repo{
UserID: 1,
Slug: "bradrydzewski/drone",
Owner: "bradrydzewski",
Name: "drone",
}
err := s.CreateRepo(&repo)
g.Assert(err == nil).IsTrue()
g.Assert(repo.ID != 0).IsTrue()
})
g.It("Should Get a Repo by ID", func() {
repo := model.Repo{
UserID: 1,
Slug: "bradrydzewski/drone",
Owner: "bradrydzewski",
Name: "drone",
Link: "https://github.com/octocat/hello-world",
Private: true,
}
s.CreateRepo(&repo)
getrepo, err := s.GetRepo(repo.ID)
g.Assert(err == nil).IsTrue()
g.Assert(repo.ID).Equal(getrepo.ID)
g.Assert(repo.UserID).Equal(getrepo.UserID)
g.Assert(repo.Owner).Equal(getrepo.Owner)
g.Assert(repo.Name).Equal(getrepo.Name)
g.Assert(repo.Private).Equal(getrepo.Private)
g.Assert(repo.Link).Equal(getrepo.Link)
})
g.It("Should Get a Repo by Slug", func() {
repo := model.Repo{
UserID: 1,
Slug: "bradrydzewski/drone",
Owner: "bradrydzewski",
Name: "drone",
}
s.CreateRepo(&repo)
getrepo, err := s.GetRepoSlug(repo.Slug)
g.Assert(err == nil).IsTrue()
g.Assert(repo.ID).Equal(getrepo.ID)
g.Assert(repo.UserID).Equal(getrepo.UserID)
g.Assert(repo.Owner).Equal(getrepo.Owner)
g.Assert(repo.Name).Equal(getrepo.Name)
})
g.It("Should Get a Multiple Repos", func() {
repo1 := &model.Repo{
UserID: 1,
Owner: "foo",
Name: "bar",
Slug: "foo/bar",
}
repo2 := &model.Repo{
UserID: 2,
Owner: "octocat",
Name: "fork-knife",
Slug: "octocat/fork-knife",
}
repo3 := &model.Repo{
UserID: 2,
Owner: "octocat",
Name: "hello-world",
Slug: "octocat/hello-world",
}
s.CreateRepo(repo1)
s.CreateRepo(repo2)
s.CreateRepo(repo3)
repos, err := s.GetRepoMulti("octocat/fork-knife", "octocat/hello-world")
g.Assert(err == nil).IsTrue()
g.Assert(len(repos)).Equal(2)
g.Assert(repos[0].ID).Equal(repo2.ID)
g.Assert(repos[1].ID).Equal(repo3.ID)
})
g.It("Should Delete a Repo", func() {
repo := model.Repo{
UserID: 1,
Slug: "bradrydzewski/drone",
Owner: "bradrydzewski",
Name: "drone",
}
s.CreateRepo(&repo)
_, err1 := s.GetRepo(repo.ID)
err2 := s.DeleteRepo(&repo)
_, err3 := s.GetRepo(repo.ID)
g.Assert(err1 == nil).IsTrue()
g.Assert(err2 == nil).IsTrue()
g.Assert(err3 == nil).IsFalse()
})
g.It("Should Enforce Unique Repo Name", func() {
repo1 := model.Repo{
UserID: 1,
Slug: "bradrydzewski/drone",
Owner: "bradrydzewski",
Name: "drone",
}
repo2 := model.Repo{
UserID: 2,
Slug: "bradrydzewski/drone",
Owner: "bradrydzewski",
Name: "drone",
}
err1 := s.CreateRepo(&repo1)
err2 := s.CreateRepo(&repo2)
g.Assert(err1 == nil).IsTrue()
g.Assert(err2 == nil).IsFalse()
})
})
}

57
store/datastore/users.go Normal file
Просмотреть файл

@ -0,0 +1,57 @@
package datastore
import (
"github.com/bradrydzewski/lgtm/model"
"github.com/russross/meddler"
)
func (db *datastore) GetUser(id int64) (*model.User, error) {
var usr = new(model.User)
var err = meddler.Load(db, userTable, usr, id)
return usr, err
}
func (db *datastore) GetUserLogin(login string) (*model.User, error) {
var usr = new(model.User)
var err = meddler.QueryRow(db, usr, userLoginQuery, login)
return usr, err
}
func (db *datastore) CreateUser(user *model.User) error {
return meddler.Insert(db, userTable, user)
}
func (db *datastore) UpdateUser(user *model.User) error {
return meddler.Update(db, userTable, user)
}
func (db *datastore) DeleteUser(user *model.User) error {
var _, err = db.Exec(userDeleteStmt, user.ID)
return err
}
const userTable = "users"
const userLoginQuery = `
SELECT *
FROM users
WHERE user_login=?
LIMIT 1
`
const userListQuery = `
SELECT *
FROM users
ORDER BY user_login ASC
`
const userCountQuery = `
SELECT count(1)
FROM users
`
const userDeleteStmt = `
DELETE FROM users
WHERE user_id=?
`

Просмотреть файл

@ -0,0 +1,115 @@
package datastore
import (
"testing"
"github.com/bradrydzewski/lgtm/model"
"github.com/franela/goblin"
)
func Test_userstore(t *testing.T) {
db := openTest()
defer db.Close()
s := From(db)
g := goblin.Goblin(t)
g.Describe("User", func() {
// before each test be sure to purge the package
// table data from the database.
g.BeforeEach(func() {
db.Exec("DELETE FROM users")
})
g.It("Should Update a User", func() {
user := model.User{
Login: "joe",
Email: "foo@bar.com",
Token: "e42080dddf012c718e476da161d21ad5",
}
err1 := s.CreateUser(&user)
err2 := s.UpdateUser(&user)
getuser, err3 := s.GetUser(user.ID)
g.Assert(err1 == nil).IsTrue()
g.Assert(err2 == nil).IsTrue()
g.Assert(err3 == nil).IsTrue()
g.Assert(user.ID).Equal(getuser.ID)
})
g.It("Should Add a new User", func() {
user := model.User{
Login: "joe",
Email: "foo@bar.com",
Token: "e42080dddf012c718e476da161d21ad5",
}
err := s.CreateUser(&user)
g.Assert(err == nil).IsTrue()
g.Assert(user.ID != 0).IsTrue()
})
g.It("Should Get a User", func() {
user := model.User{
Login: "joe",
Token: "f0b461ca586c27872b43a0685cbc2847",
Secret: "976f22a5eef7caacb7e678d6c52f49b1",
Email: "foo@bar.com",
Avatar: "b9015b0857e16ac4d94a0ffd9a0b79c8",
}
s.CreateUser(&user)
getuser, err := s.GetUser(user.ID)
g.Assert(err == nil).IsTrue()
g.Assert(user.ID).Equal(getuser.ID)
g.Assert(user.Login).Equal(getuser.Login)
g.Assert(user.Token).Equal(getuser.Token)
g.Assert(user.Secret).Equal(getuser.Secret)
g.Assert(user.Email).Equal(getuser.Email)
g.Assert(user.Avatar).Equal(getuser.Avatar)
})
g.It("Should Get a User By Login", func() {
user := model.User{
Login: "joe",
Email: "foo@bar.com",
Token: "e42080dddf012c718e476da161d21ad5",
}
s.CreateUser(&user)
getuser, err := s.GetUserLogin(user.Login)
g.Assert(err == nil).IsTrue()
g.Assert(user.ID).Equal(getuser.ID)
g.Assert(user.Login).Equal(getuser.Login)
})
g.It("Should Enforce Unique User Login", func() {
user1 := model.User{
Login: "joe",
Email: "foo@bar.com",
Token: "e42080dddf012c718e476da161d21ad5",
}
user2 := model.User{
Login: "joe",
Email: "foo@bar.com",
Token: "ab20g0ddaf012c744e136da16aa21ad9",
}
err1 := s.CreateUser(&user1)
err2 := s.CreateUser(&user2)
g.Assert(err1 == nil).IsTrue()
g.Assert(err2 == nil).IsFalse()
})
g.It("Should Del a User", func() {
user := model.User{
Login: "joe",
Email: "foo@bar.com",
Token: "e42080dddf012c718e476da161d21ad5",
}
s.CreateUser(&user)
_, err1 := s.GetUser(user.ID)
err2 := s.DeleteUser(&user)
_, err3 := s.GetUser(user.ID)
g.Assert(err1 == nil).IsTrue()
g.Assert(err2 == nil).IsTrue()
g.Assert(err3 == nil).IsFalse()
})
})
}

Просмотреть файл

@ -0,0 +1,3 @@
package migration
//go:generate go-bindata -pkg migration -o migration_gen.go sqlite3/ mysql/

Просмотреть файл

@ -0,0 +1,33 @@
-- +migrate Up
CREATE TABLE IF NOT EXISTS users (
user_id INTEGER PRIMARY KEY AUTO_INCREMENT
,user_login VARCHAR(255)
,user_token VARCHAR(255)
,user_email VARCHAR(255)
,user_avatar VARCHAR(1024)
,user_secret VARCHAR(255)
,UNIQUE(user_login)
);
CREATE TABLE IF NOT EXISTS repos (
repo_id INTEGER PRIMARY KEY AUTO_INCREMENT
,repo_user_id INTEGER
,repo_owner VARCHAR(255)
,repo_name VARCHAR(255)
,repo_slug VARCHAR(255)
,repo_link VARCHAR(1024)
,repo_private BOOLEAN
,repo_secret VARCHAR(255)
,UNIQUE(repo_slug)
);
CREATE INDEX ix_repo_owner ON repos (repo_owner);
CREATE INDEX ix_repo_user_id ON repos (repo_user_id);
-- +migrate Down
DROP TABLE repos;
DROP TABLE users;

Просмотреть файл

@ -0,0 +1,33 @@
-- +migrate Up
CREATE TABLE IF NOT EXISTS users (
user_id INTEGER PRIMARY KEY AUTOINCREMENT
,user_login TEXT
,user_token TEXT
,user_email TEXT
,user_avatar TEXT
,user_secret TEXT
,UNIQUE(user_login)
);
CREATE TABLE IF NOT EXISTS repos (
repo_id INTEGER PRIMARY KEY AUTOINCREMENT
,repo_user_id INTEGER
,repo_owner TEXT
,repo_name TEXT
,repo_slug TEXT
,repo_link TEXT
,repo_private BOOLEAN
,repo_secret TEXT
,UNIQUE(repo_slug)
);
CREATE INDEX IF NOT EXISTS ix_repo_owner ON repos (repo_owner);
CREATE INDEX IF NOT EXISTS ix_repo_user_id ON repos (repo_user_id);
-- +migrate Down
DROP TABLE repos;
DROP TABLE users;

33
store/mock/store.go Normal file
Просмотреть файл

@ -0,0 +1,33 @@
package mock
import "github.com/bradrydzewski/lgtm/store"
import "github.com/stretchr/testify/mock"
type Store struct {
mock.Mock
}
func (_m *Store) Users() store.UserStore {
ret := _m.Called()
var r0 store.UserStore
if rf, ok := ret.Get(0).(func() store.UserStore); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(store.UserStore)
}
return r0
}
func (_m *Store) Repos() store.RepoStore {
ret := _m.Called()
var r0 store.RepoStore
if rf, ok := ret.Get(0).(func() store.RepoStore); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(store.RepoStore)
}
return r0
}

139
store/store.go Normal file
Просмотреть файл

@ -0,0 +1,139 @@
package store
import (
"path"
"github.com/bradrydzewski/lgtm/model"
"golang.org/x/net/context"
)
//go:generate mockery -name Store -output mock -case=underscore
// Store defines a data storage abstraction for managing structured data
// in the system.
type Store interface {
// GetUser gets a user by unique ID.
GetUser(int64) (*model.User, error)
// GetUserLogin gets a user by unique Login name.
GetUserLogin(string) (*model.User, error)
// CreateUser creates a new user account.
CreateUser(*model.User) error
// UpdateUser updates a user account.
UpdateUser(*model.User) error
// DeleteUser deletes a user account.
DeleteUser(*model.User) error
// GetRepo gets a repo by unique ID.
GetRepo(int64) (*model.Repo, error)
// GetRepoSlug gets a repo by its full name.
GetRepoSlug(string) (*model.Repo, error)
// GetRepoMulti gets a list of multiple repos by their full name.
GetRepoMulti(...string) ([]*model.Repo, error)
// GetRepoOwner gets a list by owner.
GetRepoOwner(string) ([]*model.Repo, error)
// CreateRepo creates a new repository.
CreateRepo(*model.Repo) error
// UpdateRepo updates a user repository.
UpdateRepo(*model.Repo) error
// DeleteRepo deletes a user repository.
DeleteRepo(*model.Repo) error
}
// GetUser gets a user by unique ID.
func GetUser(c context.Context, id int64) (*model.User, error) {
return FromContext(c).GetUser(id)
}
// GetUserLogin gets a user by unique Login name.
func GetUserLogin(c context.Context, login string) (*model.User, error) {
return FromContext(c).GetUserLogin(login)
}
// CreateUser creates a new user account.
func CreateUser(c context.Context, user *model.User) error {
return FromContext(c).CreateUser(user)
}
// UpdateUser updates a user account.
func UpdateUser(c context.Context, user *model.User) error {
return FromContext(c).UpdateUser(user)
}
// DeleteUser deletes a user account.
func DeleteUser(c context.Context, user *model.User) error {
return FromContext(c).DeleteUser(user)
}
// GetRepo gets a repo by unique ID.
func GetRepo(c context.Context, id int64) (*model.Repo, error) {
return FromContext(c).GetRepo(id)
}
// GetRepoSlug gets a repo by its full name.
func GetRepoSlug(c context.Context, slug string) (*model.Repo, error) {
return FromContext(c).GetRepoSlug(slug)
}
// GetRepoOwnerName gets a repo by its owner and name.
func GetRepoOwnerName(c context.Context, owner, name string) (*model.Repo, error) {
return GetRepoSlug(c, path.Join(owner, name))
}
// GetRepoMulti gets a list of multiple repos by their full name.
func GetRepoMulti(c context.Context, slug ...string) ([]*model.Repo, error) {
return FromContext(c).GetRepoMulti(slug...)
}
// GetRepoOwner gets a repo list by account.
func GetRepoOwner(c context.Context, owner string) ([]*model.Repo, error) {
return FromContext(c).GetRepoOwner(owner)
}
// GetRepoIntersect gets a repo list by account login.
func GetRepoIntersect(c context.Context, repos []*model.Repo) ([]*model.Repo, error) {
slugs := make([]string, len(repos))
for i, repo := range repos {
slugs[i] = repo.Slug
}
return GetRepoMulti(c, slugs...)
}
// GetRepoIntersectMap gets a repo set by account login where the key is
// the repository slug and the value is the repository struct.
func GetRepoIntersectMap(c context.Context, repos []*model.Repo) (map[string]*model.Repo, error) {
repos, err := GetRepoIntersect(c, repos)
if err != nil {
return nil, err
}
set := make(map[string]*model.Repo, len(repos))
for _, repo := range repos {
set[repo.Slug] = repo
}
return set, nil
}
// CreateRepo creates a new repository.
func CreateRepo(c context.Context, repo *model.Repo) error {
return FromContext(c).CreateRepo(repo)
}
// UpdateRepo updates a user repository.
func UpdateRepo(c context.Context, repo *model.Repo) error {
return FromContext(c).UpdateRepo(repo)
}
// DeleteRepo deletes a user repository.
func DeleteRepo(c context.Context, repo *model.Repo) error {
return FromContext(c).DeleteRepo(repo)
}

3
vendor/github.com/BurntSushi/toml/COMPATIBLE сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,3 @@
Compatible with TOML version
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)

14
vendor/github.com/BurntSushi/toml/COPYING сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,14 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

19
vendor/github.com/BurntSushi/toml/Makefile сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,19 @@
install:
go install ./...
test: install
go test -v
toml-test toml-test-decoder
toml-test -encoder toml-test-encoder
fmt:
gofmt -w *.go */*.go
colcheck *.go */*.go
tags:
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
push:
git push origin master
git push github master

220
vendor/github.com/BurntSushi/toml/README.md сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,220 @@
## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
Spec: https://github.com/mojombo/toml
Compatible with TOML version
[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md)
Documentation: http://godoc.org/github.com/BurntSushi/toml
Installation:
```bash
go get github.com/BurntSushi/toml
```
Try the toml validator:
```bash
go get github.com/BurntSushi/toml/cmd/tomlv
tomlv some-toml-file.toml
```
[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml)
### Testing
This package passes all tests in
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
and the encoder.
### Examples
This package works similarly to how the Go standard library handles `XML`
and `JSON`. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys
and values:
```toml
Age = 25
Cats = [ "Cauchy", "Plato" ]
Pi = 3.14
Perfection = [ 6, 28, 496, 8128 ]
DOB = 1987-07-05T05:45:00Z
```
Which could be defined in Go as:
```go
type Config struct {
Age int
Cats []string
Pi float64
Perfection []int
DOB time.Time // requires `import time`
}
```
And then decoded with:
```go
var conf Config
if _, err := toml.Decode(tomlData, &conf); err != nil {
// handle error
}
```
You can also use struct tags if your struct field name doesn't map to a TOML
key value directly:
```toml
some_key_NAME = "wat"
```
```go
type TOML struct {
ObscureKey string `toml:"some_key_NAME"`
}
```
### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into
`time.Duration` values:
```toml
[[song]]
name = "Thunder Road"
duration = "4m49s"
[[song]]
name = "Stairway to Heaven"
duration = "8m03s"
```
Which can be decoded with:
```go
type song struct {
Name string
Duration duration
}
type songs struct {
Song []song
}
var favorites songs
if _, err := toml.Decode(blob, &favorites); err != nil {
log.Fatal(err)
}
for _, s := range favorites.Song {
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
}
```
And you'll also need a `duration` type that satisfies the
`encoding.TextUnmarshaler` interface:
```go
type duration struct {
time.Duration
}
func (d *duration) UnmarshalText(text []byte) error {
var err error
d.Duration, err = time.ParseDuration(string(text))
return err
}
```
### More complex usage
Here's an example of how to load the example from the official spec page:
```toml
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]
```
And the corresponding Go types are:
```go
type tomlConfig struct {
Title string
Owner ownerInfo
DB database `toml:"database"`
Servers map[string]server
Clients clients
}
type ownerInfo struct {
Name string
Org string `toml:"organization"`
Bio string
DOB time.Time
}
type database struct {
Server string
Ports []int
ConnMax int `toml:"connection_max"`
Enabled bool
}
type server struct {
IP string
DC string
}
type clients struct {
Data [][]interface{}
Hosts []string
}
```
Note that a case insensitive match will be tried if an exact match can't be
found.
A working example of the above can be found in `_examples/example.{go,toml}`.

492
vendor/github.com/BurntSushi/toml/decode.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,492 @@
package toml
import (
"fmt"
"io"
"io/ioutil"
"math"
"reflect"
"strings"
"time"
)
var e = fmt.Errorf
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
type Unmarshaler interface {
UnmarshalTOML(interface{}) error
}
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
func Unmarshal(p []byte, v interface{}) error {
_, err := Decode(string(p), v)
return err
}
// Primitive is a TOML value that hasn't been decoded into a Go value.
// When using the various `Decode*` functions, the type `Primitive` may
// be given to any value, and its decoding will be delayed.
//
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
//
// The underlying representation of a `Primitive` value is subject to change.
// Do not rely on it.
//
// N.B. Primitive values are still parsed, so using them will only avoid
// the overhead of reflection. They can be useful when you don't know the
// exact type of TOML data until run time.
type Primitive struct {
undecoded interface{}
context Key
}
// DEPRECATED!
//
// Use MetaData.PrimitiveDecode instead.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]bool)}
return md.unify(primValue.undecoded, rvalue(v))
}
// PrimitiveDecode is just like the other `Decode*` functions, except it
// decodes a TOML value that has already been parsed. Valid primitive values
// can *only* be obtained from values filled by the decoder functions,
// including this method. (i.e., `v` may contain more `Primitive`
// values.)
//
// Meta data for primitive values is included in the meta data returned by
// the `Decode*` functions with one exception: keys returned by the Undecoded
// method will only reflect keys that were decoded. Namely, any keys hidden
// behind a Primitive will be considered undecoded. Executing this method will
// update the undecoded keys in the meta data. (See the example.)
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
md.context = primValue.context
defer func() { md.context = nil }()
return md.unify(primValue.undecoded, rvalue(v))
}
// Decode will decode the contents of `data` in TOML format into a pointer
// `v`.
//
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
// used interchangeably.)
//
// TOML arrays of tables correspond to either a slice of structs or a slice
// of maps.
//
// TOML datetimes correspond to Go `time.Time` values.
//
// All other TOML types (float, string, int, bool and array) correspond
// to the obvious Go types.
//
// An exception to the above rules is if a type implements the
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
// (floats, strings, integers, booleans and datetimes) will be converted to
// a byte string and given to the value's UnmarshalText method. See the
// Unmarshaler example for a demonstration with time duration strings.
//
// Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go
// struct. The special `toml` struct tag may be used to map TOML keys to
// struct fields that don't match the key name exactly. (See the example.)
// A case insensitive match to struct names will be tried if an exact match
// can't be found.
//
// The mapping between TOML values and Go values is loose. That is, there
// may exist TOML values that cannot be placed into your representation, and
// there may be parts of your representation that do not correspond to
// TOML values. This loose mapping can be made stricter by using the IsDefined
// and/or Undecoded methods on the MetaData returned.
//
// This decoder will not handle cyclic types. If a cyclic type is passed,
// `Decode` will not terminate.
func Decode(data string, v interface{}) (MetaData, error) {
p, err := parse(data)
if err != nil {
return MetaData{}, err
}
md := MetaData{
p.mapping, p.types, p.ordered,
make(map[string]bool, len(p.ordered)), nil,
}
return md, md.unify(p.mapping, rvalue(v))
}
// DecodeFile is just like Decode, except it will automatically read the
// contents of the file at `fpath` and decode it for you.
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadFile(fpath)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// DecodeReader is just like Decode, except it will consume all bytes
// from the reader and decode it for you.
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadAll(r)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
// Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error.
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
// Special case. Look for a `Primitive` value.
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
// Save the undecoded data and the key context into the primitive
// value.
context := make(Key, len(md.context))
copy(context, md.context)
rv.Set(reflect.ValueOf(Primitive{
undecoded: data,
context: context,
}))
return nil
}
// Special case. Unmarshaler Interface support.
if rv.CanAddr() {
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
return v.UnmarshalTOML(data)
}
}
// Special case. Handle time.Time values specifically.
// TODO: Remove this code when we decide to drop support for Go 1.1.
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
// interfaces.
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
return md.unifyDatetime(data, rv)
}
// Special case. Look for a value satisfying the TextUnmarshaler interface.
if v, ok := rv.Interface().(TextUnmarshaler); ok {
return md.unifyText(data, v)
}
// BUG(burntsushi)
// The behavior here is incorrect whenever a Go type satisfies the
// encoding.TextUnmarshaler interface but also corresponds to a TOML
// hash or array. In particular, the unmarshaler should only be applied
// to primitive TOML values. But at this point, it will be applied to
// all kinds of values and produce an incorrect error whenever those values
// are hashes or arrays (including arrays of tables).
k := rv.Kind()
// laziness
if k >= reflect.Int && k <= reflect.Uint64 {
return md.unifyInt(data, rv)
}
switch k {
case reflect.Ptr:
elem := reflect.New(rv.Type().Elem())
err := md.unify(data, reflect.Indirect(elem))
if err != nil {
return err
}
rv.Set(elem)
return nil
case reflect.Struct:
return md.unifyStruct(data, rv)
case reflect.Map:
return md.unifyMap(data, rv)
case reflect.Array:
return md.unifyArray(data, rv)
case reflect.Slice:
return md.unifySlice(data, rv)
case reflect.String:
return md.unifyString(data, rv)
case reflect.Bool:
return md.unifyBool(data, rv)
case reflect.Interface:
// we only support empty interfaces.
if rv.NumMethod() > 0 {
return e("Unsupported type '%s'.", rv.Kind())
}
return md.unifyAnything(data, rv)
case reflect.Float32:
fallthrough
case reflect.Float64:
return md.unifyFloat64(data, rv)
}
return e("Unsupported type '%s'.", rv.Kind())
}
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
return mismatch(rv, "map", mapping)
}
for key, datum := range tmap {
var f *field
fields := cachedTypeFields(rv.Type())
for i := range fields {
ff := &fields[i]
if ff.name == key {
f = ff
break
}
if f == nil && strings.EqualFold(ff.name, key) {
f = ff
}
}
if f != nil {
subv := rv
for _, i := range f.index {
subv = indirect(subv.Field(i))
}
if isUnifiable(subv) {
md.decoded[md.context.add(key).String()] = true
md.context = append(md.context, key)
if err := md.unify(datum, subv); err != nil {
return e("Type mismatch for '%s.%s': %s",
rv.Type().String(), f.name, err)
}
md.context = md.context[0 : len(md.context)-1]
} else if f.name != "" {
// Bad user! No soup for you!
return e("Field '%s.%s' is unexported, and therefore cannot "+
"be loaded with reflection.", rv.Type().String(), f.name)
}
}
}
return nil
}
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
return badtype("map", mapping)
}
if rv.IsNil() {
rv.Set(reflect.MakeMap(rv.Type()))
}
for k, v := range tmap {
md.decoded[md.context.add(k).String()] = true
md.context = append(md.context, k)
rvkey := indirect(reflect.New(rv.Type().Key()))
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
if err := md.unify(v, rvval); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
rvkey.SetString(k)
rv.SetMapIndex(rvkey, rvval)
}
return nil
}
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
return badtype("slice", data)
}
sliceLen := datav.Len()
if sliceLen != rv.Len() {
return e("expected array length %d; got TOML array of length %d",
rv.Len(), sliceLen)
}
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
return badtype("slice", data)
}
sliceLen := datav.Len()
if rv.IsNil() {
rv.Set(reflect.MakeSlice(rv.Type(), sliceLen, sliceLen))
}
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
sliceLen := data.Len()
for i := 0; i < sliceLen; i++ {
v := data.Index(i).Interface()
sliceval := indirect(rv.Index(i))
if err := md.unify(v, sliceval); err != nil {
return err
}
}
return nil
}
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
if _, ok := data.(time.Time); ok {
rv.Set(reflect.ValueOf(data))
return nil
}
return badtype("time.Time", data)
}
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
if s, ok := data.(string); ok {
rv.SetString(s)
return nil
}
return badtype("string", data)
}
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
if num, ok := data.(float64); ok {
switch rv.Kind() {
case reflect.Float32:
fallthrough
case reflect.Float64:
rv.SetFloat(num)
default:
panic("bug")
}
return nil
}
return badtype("float", data)
}
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
if num, ok := data.(int64); ok {
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
switch rv.Kind() {
case reflect.Int, reflect.Int64:
// No bounds checking necessary.
case reflect.Int8:
if num < math.MinInt8 || num > math.MaxInt8 {
return e("Value '%d' is out of range for int8.", num)
}
case reflect.Int16:
if num < math.MinInt16 || num > math.MaxInt16 {
return e("Value '%d' is out of range for int16.", num)
}
case reflect.Int32:
if num < math.MinInt32 || num > math.MaxInt32 {
return e("Value '%d' is out of range for int32.", num)
}
}
rv.SetInt(num)
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
unum := uint64(num)
switch rv.Kind() {
case reflect.Uint, reflect.Uint64:
// No bounds checking necessary.
case reflect.Uint8:
if num < 0 || unum > math.MaxUint8 {
return e("Value '%d' is out of range for uint8.", num)
}
case reflect.Uint16:
if num < 0 || unum > math.MaxUint16 {
return e("Value '%d' is out of range for uint16.", num)
}
case reflect.Uint32:
if num < 0 || unum > math.MaxUint32 {
return e("Value '%d' is out of range for uint32.", num)
}
}
rv.SetUint(unum)
} else {
panic("unreachable")
}
return nil
}
return badtype("integer", data)
}
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
if b, ok := data.(bool); ok {
rv.SetBool(b)
return nil
}
return badtype("boolean", data)
}
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
rv.Set(reflect.ValueOf(data))
return nil
}
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
case TextMarshaler:
text, err := sdata.MarshalText()
if err != nil {
return err
}
s = string(text)
case fmt.Stringer:
s = sdata.String()
case string:
s = sdata
case bool:
s = fmt.Sprintf("%v", sdata)
case int64:
s = fmt.Sprintf("%d", sdata)
case float64:
s = fmt.Sprintf("%f", sdata)
default:
return badtype("primitive (string-like)", data)
}
if err := v.UnmarshalText([]byte(s)); err != nil {
return err
}
return nil
}
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
func rvalue(v interface{}) reflect.Value {
return indirect(reflect.ValueOf(v))
}
// indirect returns the value pointed to by a pointer.
// Pointers are followed until the value is not a pointer.
// New values are allocated for each nil pointer.
//
// An exception to this rule is if the value satisfies an interface of
// interest to us (like encoding.TextUnmarshaler).
func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr {
if v.CanAddr() {
pv := v.Addr()
if _, ok := pv.Interface().(TextUnmarshaler); ok {
return pv
}
}
return v
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
return indirect(reflect.Indirect(v))
}
func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() {
return true
}
if _, ok := rv.Interface().(TextUnmarshaler); ok {
return true
}
return false
}
func badtype(expected string, data interface{}) error {
return e("Expected %s but found '%T'.", expected, data)
}
func mismatch(user reflect.Value, expected string, data interface{}) error {
return e("Type mismatch for %s. Expected %s but found '%T'.",
user.Type().String(), expected, data)
}

122
vendor/github.com/BurntSushi/toml/decode_meta.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,122 @@
package toml
import "strings"
// MetaData allows access to meta information about TOML data that may not
// be inferrable via reflection. In particular, whether a key has been defined
// and the TOML type of a key.
type MetaData struct {
mapping map[string]interface{}
types map[string]tomlType
keys []Key
decoded map[string]bool
context Key // Used only during decoding.
}
// IsDefined returns true if the key given exists in the TOML data. The key
// should be specified hierarchially. e.g.,
//
// // access the TOML key 'a.b.c'
// IsDefined("a", "b", "c")
//
// IsDefined will return false if an empty key given. Keys are case sensitive.
func (md *MetaData) IsDefined(key ...string) bool {
if len(key) == 0 {
return false
}
var hash map[string]interface{}
var ok bool
var hashOrVal interface{} = md.mapping
for _, k := range key {
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
return false
}
if hashOrVal, ok = hash[k]; !ok {
return false
}
}
return true
}
// Type returns a string representation of the type of the key specified.
//
// Type will return the empty string if given an empty key or a key that
// does not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".")
if typ, ok := md.types[fullkey]; ok {
return typ.typeString()
}
return ""
}
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
// to get values of this type.
type Key []string
func (k Key) String() string {
return strings.Join(k, ".")
}
func (k Key) maybeQuotedAll() string {
var ss []string
for i := range k {
ss = append(ss, k.maybeQuoted(i))
}
return strings.Join(ss, ".")
}
func (k Key) maybeQuoted(i int) string {
quote := false
for _, c := range k[i] {
if !isBareKeyChar(c) {
quote = true
break
}
}
if quote {
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
} else {
return k[i]
}
}
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}
// Keys returns a slice of every key in the TOML data, including key groups.
// Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific.
//
// The list will have the same order as the keys appeared in the TOML data.
//
// All keys returned are non-empty.
func (md *MetaData) Keys() []Key {
return md.keys
}
// Undecoded returns all keys that have not been decoded in the order in which
// they appear in the original TOML document.
//
// This includes keys that haven't been decoded because of a Primitive value.
// Once the Primitive value is decoded, the keys will be considered decoded.
//
// Also note that decoding into an empty interface will result in no decoding,
// and so no keys will be considered decoded.
//
// In this sense, the Undecoded keys correspond to keys in the TOML document
// that do not have a concrete type in your representation.
func (md *MetaData) Undecoded() []Key {
undecoded := make([]Key, 0, len(md.keys))
for _, key := range md.keys {
if !md.decoded[key.String()] {
undecoded = append(undecoded, key)
}
}
return undecoded
}

27
vendor/github.com/BurntSushi/toml/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,27 @@
/*
Package toml provides facilities for decoding and encoding TOML configuration
files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
The specification implemented: https://github.com/mojombo/toml
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
whether a file is a valid TOML document. It can also be used to print the
type of each key in a TOML document.
Testing
There are two important types of tests used for this package. The first is
contained inside '*_test.go' files and uses the standard Go unit testing
framework. These tests are primarily devoted to holistically testing the
decoder and encoder.
The second type of testing is used to verify the implementation's adherence
to the TOML specification. These tests have been factored into their own
project: https://github.com/BurntSushi/toml-test
The reason the tests are in a separate project is so that they can be used by
any implementation of TOML. Namely, it is language agnostic.
*/
package toml

551
vendor/github.com/BurntSushi/toml/encode.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,551 @@
package toml
import (
"bufio"
"errors"
"fmt"
"io"
"reflect"
"sort"
"strconv"
"strings"
"time"
)
type tomlEncodeError struct{ error }
var (
errArrayMixedElementTypes = errors.New(
"can't encode array with mixed element types")
errArrayNilElement = errors.New(
"can't encode array with nil element")
errNonString = errors.New(
"can't encode a map with non-string key type")
errAnonNonStruct = errors.New(
"can't encode an anonymous field that is not a struct")
errArrayNoTable = errors.New(
"TOML array element can't contain a table")
errNoKey = errors.New(
"top-level values must be a Go map or struct")
errAnything = errors.New("") // used in testing
)
var quotedReplacer = strings.NewReplacer(
"\t", "\\t",
"\n", "\\n",
"\r", "\\r",
"\"", "\\\"",
"\\", "\\\\",
)
// Encoder controls the encoding of Go values to a TOML document to some
// io.Writer.
//
// The indentation level can be controlled with the Indent field.
type Encoder struct {
// A single indentation level. By default it is two spaces.
Indent string
// hasWritten is whether we have written any output to w yet.
hasWritten bool
w *bufio.Writer
}
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
// given. By default, a single indentation level is 2 spaces.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
w: bufio.NewWriter(w),
Indent: " ",
}
}
// Encode writes a TOML representation of the Go value to the underlying
// io.Writer. If the value given cannot be encoded to a valid TOML document,
// then an error is returned.
//
// The mapping between Go values and TOML values should be precisely the same
// as for the Decode* functions. Similarly, the TextMarshaler interface is
// supported by encoding the resulting bytes as strings. (If you want to write
// arbitrary binary data then you will need to use something like base64 since
// TOML does not have any binary types.)
//
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
// sub-hashes are encoded first.
//
// If a Go map is encoded, then its keys are sorted alphabetically for
// deterministic output. More control over this behavior may be provided if
// there is demand for it.
//
// Encoding Go values without a corresponding TOML representation---like map
// types with non-string keys---will cause an error to be returned. Similarly
// for mixed arrays/slices, arrays/slices with nil elements, embedded
// non-struct types and nested slices containing maps or structs.
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
// and so is []map[string][]string.)
func (enc *Encoder) Encode(v interface{}) error {
rv := eindirect(reflect.ValueOf(v))
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
return err
}
return enc.w.Flush()
}
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
defer func() {
if r := recover(); r != nil {
if terr, ok := r.(tomlEncodeError); ok {
err = terr.error
return
}
panic(r)
}
}()
enc.encode(key, rv)
return nil
}
func (enc *Encoder) encode(key Key, rv reflect.Value) {
// Special case. Time needs to be in ISO8601 format.
// Special case. If we can marshal the type to text, then we used that.
// Basically, this prevents the encoder for handling these types as
// generic structs (or whatever the underlying type of a TextMarshaler is).
switch rv.Interface().(type) {
case time.Time, TextMarshaler:
enc.keyEqElement(key, rv)
return
}
k := rv.Kind()
switch k {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64,
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
enc.keyEqElement(key, rv)
case reflect.Array, reflect.Slice:
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
enc.eArrayOfTables(key, rv)
} else {
enc.keyEqElement(key, rv)
}
case reflect.Interface:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Map:
if rv.IsNil() {
return
}
enc.eTable(key, rv)
case reflect.Ptr:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Struct:
enc.eTable(key, rv)
default:
panic(e("Unsupported type for key '%s': %s", key, k))
}
}
// eElement encodes any value that can be an array element (primitives and
// arrays).
func (enc *Encoder) eElement(rv reflect.Value) {
switch v := rv.Interface().(type) {
case time.Time:
// Special case time.Time as a primitive. Has to come before
// TextMarshaler below because time.Time implements
// encoding.TextMarshaler, but we need to always use UTC.
enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z"))
return
case TextMarshaler:
// Special case. Use text marshaler if it's available for this value.
if s, err := v.MarshalText(); err != nil {
encPanic(err)
} else {
enc.writeQuoted(string(s))
}
return
}
switch rv.Kind() {
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16,
reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
case reflect.Float64:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Interface:
enc.eElement(rv.Elem())
case reflect.String:
enc.writeQuoted(rv.String())
default:
panic(e("Unexpected primitive type: %s", rv.Kind()))
}
}
// By the TOML spec, all floats must have a decimal with at least one
// number on either side.
func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") {
return fstr + ".0"
}
return fstr
}
func (enc *Encoder) writeQuoted(s string) {
enc.wf("\"%s\"", quotedReplacer.Replace(s))
}
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
length := rv.Len()
enc.wf("[")
for i := 0; i < length; i++ {
elem := rv.Index(i)
enc.eElement(elem)
if i != length-1 {
enc.wf(", ")
}
}
enc.wf("]")
}
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
for i := 0; i < rv.Len(); i++ {
trv := rv.Index(i)
if isNil(trv) {
continue
}
panicIfInvalidKey(key)
enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
enc.eMapOrStruct(key, trv)
}
}
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key)
if len(key) == 1 {
// Output an extra new line between top-level tables.
// (The newline isn't written if nothing else has been written though.)
enc.newline()
}
if len(key) > 0 {
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
}
enc.eMapOrStruct(key, rv)
}
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
switch rv := eindirect(rv); rv.Kind() {
case reflect.Map:
enc.eMap(key, rv)
case reflect.Struct:
enc.eStruct(key, rv)
default:
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
}
}
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
rt := rv.Type()
if rt.Key().Kind() != reflect.String {
encPanic(errNonString)
}
// Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps.
var mapKeysDirect, mapKeysSub []string
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
mapKeysSub = append(mapKeysSub, k)
} else {
mapKeysDirect = append(mapKeysDirect, k)
}
}
var writeMapKeys = func(mapKeys []string) {
sort.Strings(mapKeys)
for _, mapKey := range mapKeys {
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
if isNil(mrv) {
// Don't write anything for nil fields.
continue
}
enc.encode(key.add(mapKey), mrv)
}
}
writeMapKeys(mapKeysDirect)
writeMapKeys(mapKeysSub)
}
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
// Write keys for fields directly under this key first, because if we write
// a field that creates a new table, then all keys under it will be in that
// table (not the one we're writing here).
rt := rv.Type()
var fieldsDirect, fieldsSub [][]int
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i)
// skip unexporded fields
if f.PkgPath != "" {
continue
}
frv := rv.Field(i)
if f.Anonymous {
frv := eindirect(frv)
t := frv.Type()
if t.Kind() != reflect.Struct {
encPanic(errAnonNonStruct)
}
addFields(t, frv, f.Index)
} else if typeIsHash(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
}
}
}
addFields(rt, rv, nil)
var writeFields = func(fields [][]int) {
for _, fieldIndex := range fields {
sft := rt.FieldByIndex(fieldIndex)
sf := rv.FieldByIndex(fieldIndex)
if isNil(sf) {
// Don't write anything for nil fields.
continue
}
keyName := sft.Tag.Get("toml")
if keyName == "-" {
continue
}
if keyName == "" {
keyName = sft.Name
}
keyName, opts := getOptions(keyName)
if _, ok := opts["omitempty"]; ok && isEmpty(sf) {
continue
} else if _, ok := opts["omitzero"]; ok && isZero(sf) {
continue
}
enc.encode(key.add(keyName), sf)
}
}
writeFields(fieldsDirect)
writeFields(fieldsSub)
}
// tomlTypeName returns the TOML type name of the Go value's type. It is
// used to determine whether the types of array elements are mixed (which is
// forbidden). If the Go value is nil, then it is illegal for it to be an array
// element, and valueIsNil is returned as true.
// Returns the TOML type of a Go value. The type may be `nil`, which means
// no concrete TOML type could be found.
func tomlTypeOfGo(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() {
return nil
}
switch rv.Kind() {
case reflect.Bool:
return tomlBool
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64:
return tomlInteger
case reflect.Float32, reflect.Float64:
return tomlFloat
case reflect.Array, reflect.Slice:
if typeEqual(tomlHash, tomlArrayType(rv)) {
return tomlArrayHash
} else {
return tomlArray
}
case reflect.Ptr, reflect.Interface:
return tomlTypeOfGo(rv.Elem())
case reflect.String:
return tomlString
case reflect.Map:
return tomlHash
case reflect.Struct:
switch rv.Interface().(type) {
case time.Time:
return tomlDatetime
case TextMarshaler:
return tomlString
default:
return tomlHash
}
default:
panic("unexpected reflect.Kind: " + rv.Kind().String())
}
}
// tomlArrayType returns the element type of a TOML array. The type returned
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
// slize). This function may also panic if it finds a type that cannot be
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
// nested arrays of tables).
func tomlArrayType(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
return nil
}
firstType := tomlTypeOfGo(rv.Index(0))
if firstType == nil {
encPanic(errArrayNilElement)
}
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
elem := rv.Index(i)
switch elemType := tomlTypeOfGo(elem); {
case elemType == nil:
encPanic(errArrayNilElement)
case !typeEqual(firstType, elemType):
encPanic(errArrayMixedElementTypes)
}
}
// If we have a nested array, then we must make sure that the nested
// array contains ONLY primitives.
// This checks arbitrarily nested arrays.
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
nest := tomlArrayType(eindirect(rv.Index(0)))
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
encPanic(errArrayNoTable)
}
}
return firstType
}
func getOptions(keyName string) (string, map[string]struct{}) {
opts := make(map[string]struct{})
ss := strings.Split(keyName, ",")
name := ss[0]
if len(ss) > 1 {
for _, opt := range ss {
opts[opt] = struct{}{}
}
}
return name, opts
}
func isZero(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if rv.Int() == 0 {
return true
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
if rv.Uint() == 0 {
return true
}
case reflect.Float32, reflect.Float64:
if rv.Float() == 0.0 {
return true
}
}
return false
}
func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.String:
if len(strings.TrimSpace(rv.String())) == 0 {
return true
}
case reflect.Array, reflect.Slice, reflect.Map:
if rv.Len() == 0 {
return true
}
}
return false
}
func (enc *Encoder) newline() {
if enc.hasWritten {
enc.wf("\n")
}
}
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
panicIfInvalidKey(key)
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
enc.newline()
}
func (enc *Encoder) wf(format string, v ...interface{}) {
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
encPanic(err)
}
enc.hasWritten = true
}
func (enc *Encoder) indentStr(key Key) string {
return strings.Repeat(enc.Indent, len(key)-1)
}
func encPanic(err error) {
panic(tomlEncodeError{err})
}
func eindirect(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Ptr, reflect.Interface:
return eindirect(v.Elem())
default:
return v
}
}
func isNil(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return rv.IsNil()
default:
return false
}
}
func panicIfInvalidKey(key Key) {
for _, k := range key {
if len(k) == 0 {
encPanic(e("Key '%s' is not a valid table name. Key names "+
"cannot be empty.", key.maybeQuotedAll()))
}
}
}
func isValidKeyName(s string) bool {
return len(s) != 0
}

19
vendor/github.com/BurntSushi/toml/encoding_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,19 @@
// +build go1.2
package toml
// In order to support Go 1.1, we define our own TextMarshaler and
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
// standard library interfaces.
import (
"encoding"
)
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler encoding.TextMarshaler
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler encoding.TextUnmarshaler

18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,18 @@
// +build !go1.2
package toml
// These interfaces were introduced in Go 1.2, so we add them manually when
// compiling for Go 1.1.
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler interface {
MarshalText() (text []byte, err error)
}
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler interface {
UnmarshalText(text []byte) error
}

874
vendor/github.com/BurntSushi/toml/lex.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,874 @@
package toml
import (
"fmt"
"strings"
"unicode/utf8"
)
type itemType int
const (
itemError itemType = iota
itemNIL // used in the parser to indicate no type
itemEOF
itemText
itemString
itemRawString
itemMultilineString
itemRawMultilineString
itemBool
itemInteger
itemFloat
itemDatetime
itemArray // the start of an array
itemArrayEnd
itemTableStart
itemTableEnd
itemArrayTableStart
itemArrayTableEnd
itemKeyStart
itemCommentStart
)
const (
eof = 0
tableStart = '['
tableEnd = ']'
arrayTableStart = '['
arrayTableEnd = ']'
tableSep = '.'
keySep = '='
arrayStart = '['
arrayEnd = ']'
arrayValTerm = ','
commentStart = '#'
stringStart = '"'
stringEnd = '"'
rawStringStart = '\''
rawStringEnd = '\''
)
type stateFn func(lx *lexer) stateFn
type lexer struct {
input string
start int
pos int
width int
line int
state stateFn
items chan item
// A stack of state functions used to maintain context.
// The idea is to reuse parts of the state machine in various places.
// For example, values can appear at the top level or within arbitrarily
// nested arrays. The last state on the stack is used after a value has
// been lexed. Similarly for comments.
stack []stateFn
}
type item struct {
typ itemType
val string
line int
}
func (lx *lexer) nextItem() item {
for {
select {
case item := <-lx.items:
return item
default:
lx.state = lx.state(lx)
}
}
}
func lex(input string) *lexer {
lx := &lexer{
input: input + "\n",
state: lexTop,
line: 1,
items: make(chan item, 10),
stack: make([]stateFn, 0, 10),
}
return lx
}
func (lx *lexer) push(state stateFn) {
lx.stack = append(lx.stack, state)
}
func (lx *lexer) pop() stateFn {
if len(lx.stack) == 0 {
return lx.errorf("BUG in lexer: no states to pop.")
}
last := lx.stack[len(lx.stack)-1]
lx.stack = lx.stack[0 : len(lx.stack)-1]
return last
}
func (lx *lexer) current() string {
return lx.input[lx.start:lx.pos]
}
func (lx *lexer) emit(typ itemType) {
lx.items <- item{typ, lx.current(), lx.line}
lx.start = lx.pos
}
func (lx *lexer) emitTrim(typ itemType) {
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
lx.start = lx.pos
}
func (lx *lexer) next() (r rune) {
if lx.pos >= len(lx.input) {
lx.width = 0
return eof
}
if lx.input[lx.pos] == '\n' {
lx.line++
}
r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
lx.pos += lx.width
return r
}
// ignore skips over the pending input before this point.
func (lx *lexer) ignore() {
lx.start = lx.pos
}
// backup steps back one rune. Can be called only once per call of next.
func (lx *lexer) backup() {
lx.pos -= lx.width
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
lx.line--
}
}
// accept consumes the next rune if it's equal to `valid`.
func (lx *lexer) accept(valid rune) bool {
if lx.next() == valid {
return true
}
lx.backup()
return false
}
// peek returns but does not consume the next rune in the input.
func (lx *lexer) peek() rune {
r := lx.next()
lx.backup()
return r
}
// errorf stops all lexing by emitting an error and returning `nil`.
// Note that any value that is a character is escaped if it's a special
// character (new lines, tabs, etc.).
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
lx.items <- item{
itemError,
fmt.Sprintf(format, values...),
lx.line,
}
return nil
}
// lexTop consumes elements at the top level of TOML data.
func lexTop(lx *lexer) stateFn {
r := lx.next()
if isWhitespace(r) || isNL(r) {
return lexSkip(lx, lexTop)
}
switch r {
case commentStart:
lx.push(lexTop)
return lexCommentStart
case tableStart:
return lexTableStart
case eof:
if lx.pos > lx.start {
return lx.errorf("Unexpected EOF.")
}
lx.emit(itemEOF)
return nil
}
// At this point, the only valid item can be a key, so we back up
// and let the key lexer do the rest.
lx.backup()
lx.push(lexTopEnd)
return lexKeyStart
}
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
// or a table.) It must see only whitespace, and will turn back to lexTop
// upon a new line. If it sees EOF, it will quit the lexer successfully.
func lexTopEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case r == commentStart:
// a comment will read to a new line for us.
lx.push(lexTop)
return lexCommentStart
case isWhitespace(r):
return lexTopEnd
case isNL(r):
lx.ignore()
return lexTop
case r == eof:
lx.ignore()
return lexTop
}
return lx.errorf("Expected a top-level item to end with a new line, "+
"comment or EOF, but got %q instead.", r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
// it starts with a character other than '.' and ']'.
// It assumes that '[' has already been consumed.
// It also handles the case that this is an item in an array of tables.
// e.g., '[[name]]'.
func lexTableStart(lx *lexer) stateFn {
if lx.peek() == arrayTableStart {
lx.next()
lx.emit(itemArrayTableStart)
lx.push(lexArrayTableEnd)
} else {
lx.emit(itemTableStart)
lx.push(lexTableEnd)
}
return lexTableNameStart
}
func lexTableEnd(lx *lexer) stateFn {
lx.emit(itemTableEnd)
return lexTopEnd
}
func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd {
return lx.errorf("Expected end of table array name delimiter %q, "+
"but got %q instead.", arrayTableEnd, r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
}
func lexTableNameStart(lx *lexer) stateFn {
switch r := lx.peek(); {
case r == tableEnd || r == eof:
return lx.errorf("Unexpected end of table name. (Table names cannot " +
"be empty.)")
case r == tableSep:
return lx.errorf("Unexpected table separator. (Table names cannot " +
"be empty.)")
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.push(lexTableNameEnd)
return lexValue // reuse string lexing
case isWhitespace(r):
return lexTableNameStart
default:
return lexBareTableName
}
}
// lexTableName lexes the name of a table. It assumes that at least one
// valid character for the table has already been read.
func lexBareTableName(lx *lexer) stateFn {
switch r := lx.next(); {
case isBareKeyChar(r):
return lexBareTableName
case r == tableSep || r == tableEnd:
lx.backup()
lx.emitTrim(itemText)
return lexTableNameEnd
default:
return lx.errorf("Bare keys cannot contain %q.", r)
}
}
// lexTableNameEnd reads the end of a piece of a table name, optionally
// consuming whitespace.
func lexTableNameEnd(lx *lexer) stateFn {
switch r := lx.next(); {
case isWhitespace(r):
return lexTableNameEnd
case r == tableSep:
lx.ignore()
return lexTableNameStart
case r == tableEnd:
return lx.pop()
default:
return lx.errorf("Expected '.' or ']' to end table name, but got %q "+
"instead.", r)
}
}
// lexKeyStart consumes a key name up until the first non-whitespace character.
// lexKeyStart will ignore whitespace.
func lexKeyStart(lx *lexer) stateFn {
r := lx.peek()
switch {
case r == keySep:
return lx.errorf("Unexpected key separator %q.", keySep)
case isWhitespace(r) || isNL(r):
lx.next()
return lexSkip(lx, lexKeyStart)
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.emit(itemKeyStart)
lx.push(lexKeyEnd)
return lexValue // reuse string lexing
default:
lx.ignore()
lx.emit(itemKeyStart)
return lexBareKey
}
}
// lexBareKey consumes the text of a bare key. Assumes that the first character
// (which is not whitespace) has not yet been consumed.
func lexBareKey(lx *lexer) stateFn {
switch r := lx.next(); {
case isBareKeyChar(r):
return lexBareKey
case isWhitespace(r):
lx.emitTrim(itemText)
return lexKeyEnd
case r == keySep:
lx.backup()
lx.emitTrim(itemText)
return lexKeyEnd
default:
return lx.errorf("Bare keys cannot contain %q.", r)
}
}
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
// separator).
func lexKeyEnd(lx *lexer) stateFn {
switch r := lx.next(); {
case r == keySep:
return lexSkip(lx, lexValue)
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
default:
return lx.errorf("Expected key separator %q, but got %q instead.",
keySep, r)
}
}
// lexValue starts the consumption of a value anywhere a value is expected.
// lexValue will ignore whitespace.
// After a value is lexed, the last state on the next is popped and returned.
func lexValue(lx *lexer) stateFn {
// We allow whitespace to precede a value, but NOT new lines.
// In array syntax, the array states are responsible for ignoring new
// lines.
r := lx.next()
if isWhitespace(r) {
return lexSkip(lx, lexValue)
}
switch {
case r == arrayStart:
lx.ignore()
lx.emit(itemArray)
return lexArrayValue
case r == stringStart:
if lx.accept(stringStart) {
if lx.accept(stringStart) {
lx.ignore() // Ignore """
return lexMultilineString
}
lx.backup()
}
lx.ignore() // ignore the '"'
return lexString
case r == rawStringStart:
if lx.accept(rawStringStart) {
if lx.accept(rawStringStart) {
lx.ignore() // Ignore """
return lexMultilineRawString
}
lx.backup()
}
lx.ignore() // ignore the "'"
return lexRawString
case r == 't':
return lexTrue
case r == 'f':
return lexFalse
case r == '-':
return lexNumberStart
case isDigit(r):
lx.backup() // avoid an extra state and use the same as above
return lexNumberOrDateStart
case r == '.': // special error case, be kind to users
return lx.errorf("Floats must start with a digit, not '.'.")
}
return lx.errorf("Expected value but found %q instead.", r)
}
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
// have already been consumed. All whitespace and new lines are ignored.
func lexArrayValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValue)
case r == commentStart:
lx.push(lexArrayValue)
return lexCommentStart
case r == arrayValTerm:
return lx.errorf("Unexpected array value terminator %q.",
arrayValTerm)
case r == arrayEnd:
return lexArrayEnd
}
lx.backup()
lx.push(lexArrayValueEnd)
return lexValue
}
// lexArrayValueEnd consumes the cruft between values of an array. Namely,
// it ignores whitespace and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValueEnd)
case r == commentStart:
lx.push(lexArrayValueEnd)
return lexCommentStart
case r == arrayValTerm:
lx.ignore()
return lexArrayValue // move on to the next value
case r == arrayEnd:
return lexArrayEnd
}
return lx.errorf("Expected an array value terminator %q or an array "+
"terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
}
// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
// just been consumed.
func lexArrayEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemArrayEnd)
return lx.pop()
}
// lexString consumes the inner contents of a string. It assumes that the
// beginning '"' has already been consumed and ignored.
func lexString(lx *lexer) stateFn {
r := lx.next()
switch {
case isNL(r):
return lx.errorf("Strings cannot contain new lines.")
case r == '\\':
lx.push(lexString)
return lexStringEscape
case r == stringEnd:
lx.backup()
lx.emit(itemString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexString
}
// lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == '\\':
return lexMultilineStringEscape
case r == stringEnd:
if lx.accept(stringEnd) {
if lx.accept(stringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineString
}
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
// It assumes that the beginning "'" has already been consumed and ignored.
func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case isNL(r):
return lx.errorf("Strings cannot contain new lines.")
case r == rawStringEnd:
lx.backup()
lx.emit(itemRawString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexRawString
}
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
// a string. It assumes that the beginning "'" has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == rawStringEnd:
if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemRawMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineRawString
}
// lexMultilineStringEscape consumes an escaped character. It assumes that the
// preceding '\\' has already been consumed.
func lexMultilineStringEscape(lx *lexer) stateFn {
// Handle the special case first:
if isNL(lx.next()) {
lx.next()
return lexMultilineString
} else {
lx.backup()
lx.push(lexMultilineString)
return lexStringEscape(lx)
}
}
func lexStringEscape(lx *lexer) stateFn {
r := lx.next()
switch r {
case 'b':
fallthrough
case 't':
fallthrough
case 'n':
fallthrough
case 'f':
fallthrough
case 'r':
fallthrough
case '"':
fallthrough
case '\\':
return lx.pop()
case 'u':
return lexShortUnicodeEscape
case 'U':
return lexLongUnicodeEscape
}
return lx.errorf("Invalid escape character %q. Only the following "+
"escape characters are allowed: "+
"\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+
"\\uXXXX and \\UXXXXXXXX.", r)
}
func lexShortUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 4; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf("Expected four hexadecimal digits after '\\u', "+
"but got '%s' instead.", lx.current())
}
}
return lx.pop()
}
func lexLongUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 8; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf("Expected eight hexadecimal digits after '\\U', "+
"but got '%s' instead.", lx.current())
}
}
return lx.pop()
}
// lexNumberOrDateStart consumes either a (positive) integer, float or
// datetime. It assumes that NO negative sign has been consumed.
func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next()
if !isDigit(r) {
if r == '.' {
return lx.errorf("Floats must start with a digit, not '.'.")
} else {
return lx.errorf("Expected a digit but got %q.", r)
}
}
return lexNumberOrDate
}
// lexNumberOrDate consumes either a (positive) integer, float or datetime.
func lexNumberOrDate(lx *lexer) stateFn {
r := lx.next()
switch {
case r == '-':
if lx.pos-lx.start != 5 {
return lx.errorf("All ISO8601 dates must be in full Zulu form.")
}
return lexDateAfterYear
case isDigit(r):
return lexNumberOrDate
case r == '.':
return lexFloatStart
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format.
// It assumes that "YYYY-" has already been consumed.
func lexDateAfterYear(lx *lexer) stateFn {
formats := []rune{
// digits are '0'.
// everything else is direct equality.
'0', '0', '-', '0', '0',
'T',
'0', '0', ':', '0', '0', ':', '0', '0',
'Z',
}
for _, f := range formats {
r := lx.next()
if f == '0' {
if !isDigit(r) {
return lx.errorf("Expected digit in ISO8601 datetime, "+
"but found %q instead.", r)
}
} else if f != r {
return lx.errorf("Expected %q in ISO8601 datetime, "+
"but found %q instead.", f, r)
}
}
lx.emit(itemDatetime)
return lx.pop()
}
// lexNumberStart consumes either an integer or a float. It assumes that
// a negative sign has already been read, but that *no* digits have been
// consumed. lexNumberStart will move to the appropriate integer or float
// states.
func lexNumberStart(lx *lexer) stateFn {
// we MUST see a digit. Even floats have to start with a digit.
r := lx.next()
if !isDigit(r) {
if r == '.' {
return lx.errorf("Floats must start with a digit, not '.'.")
} else {
return lx.errorf("Expected a digit but got %q.", r)
}
}
return lexNumber
}
// lexNumber consumes an integer or a float after seeing the first digit.
func lexNumber(lx *lexer) stateFn {
r := lx.next()
switch {
case isDigit(r):
return lexNumber
case r == '.':
return lexFloatStart
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexFloatStart starts the consumption of digits of a float after a '.'.
// Namely, at least one digit is required.
func lexFloatStart(lx *lexer) stateFn {
r := lx.next()
if !isDigit(r) {
return lx.errorf("Floats must have a digit after the '.', but got "+
"%q instead.", r)
}
return lexFloat
}
// lexFloat consumes the digits of a float after a '.'.
// Assumes that one digit has been consumed after a '.' already.
func lexFloat(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexFloat
}
lx.backup()
lx.emit(itemFloat)
return lx.pop()
}
// lexConst consumes the s[1:] in s. It assumes that s[0] has already been
// consumed.
func lexConst(lx *lexer, s string) stateFn {
for i := range s[1:] {
if r := lx.next(); r != rune(s[i+1]) {
return lx.errorf("Expected %q, but found %q instead.", s[:i+1],
s[:i]+string(r))
}
}
return nil
}
// lexTrue consumes the "rue" in "true". It assumes that 't' has already
// been consumed.
func lexTrue(lx *lexer) stateFn {
if fn := lexConst(lx, "true"); fn != nil {
return fn
}
lx.emit(itemBool)
return lx.pop()
}
// lexFalse consumes the "alse" in "false". It assumes that 'f' has already
// been consumed.
func lexFalse(lx *lexer) stateFn {
if fn := lexConst(lx, "false"); fn != nil {
return fn
}
lx.emit(itemBool)
return lx.pop()
}
// lexCommentStart begins the lexing of a comment. It will emit
// itemCommentStart and consume no characters, passing control to lexComment.
func lexCommentStart(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemCommentStart)
return lexComment
}
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
// It will consume *up to* the first new line character, and pass control
// back to the last state on the stack.
func lexComment(lx *lexer) stateFn {
r := lx.peek()
if isNL(r) || r == eof {
lx.emit(itemText)
return lx.pop()
}
lx.next()
return lexComment
}
// lexSkip ignores all slurped input and moves on to the next state.
func lexSkip(lx *lexer, nextState stateFn) stateFn {
return func(lx *lexer) stateFn {
lx.ignore()
return nextState
}
}
// isWhitespace returns true if `r` is a whitespace character according
// to the spec.
func isWhitespace(r rune) bool {
return r == '\t' || r == ' '
}
func isNL(r rune) bool {
return r == '\n' || r == '\r'
}
func isDigit(r rune) bool {
return r >= '0' && r <= '9'
}
func isHexadecimal(r rune) bool {
return (r >= '0' && r <= '9') ||
(r >= 'a' && r <= 'f') ||
(r >= 'A' && r <= 'F')
}
func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' ||
r == '-'
}
func (itype itemType) String() string {
switch itype {
case itemError:
return "Error"
case itemNIL:
return "NIL"
case itemEOF:
return "EOF"
case itemText:
return "Text"
case itemString:
return "String"
case itemRawString:
return "String"
case itemMultilineString:
return "String"
case itemRawMultilineString:
return "String"
case itemBool:
return "Bool"
case itemInteger:
return "Integer"
case itemFloat:
return "Float"
case itemDatetime:
return "DateTime"
case itemTableStart:
return "TableStart"
case itemTableEnd:
return "TableEnd"
case itemKeyStart:
return "KeyStart"
case itemArray:
return "Array"
case itemArrayEnd:
return "ArrayEnd"
case itemCommentStart:
return "CommentStart"
}
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
}
func (item item) String() string {
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
}

498
vendor/github.com/BurntSushi/toml/parse.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,498 @@
package toml
import (
"fmt"
"log"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
)
type parser struct {
mapping map[string]interface{}
types map[string]tomlType
lx *lexer
// A list of keys in the order that they appear in the TOML data.
ordered []Key
// the full key for the current hash in scope
context Key
// the base key name for everything except hashes
currentKey string
// rough approximation of line number
approxLine int
// A map of 'key.group.names' to whether they were created implicitly.
implicits map[string]bool
}
type parseError string
func (pe parseError) Error() string {
return string(pe)
}
func parse(data string) (p *parser, err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
if err, ok = r.(parseError); ok {
return
}
panic(r)
}
}()
p = &parser{
mapping: make(map[string]interface{}),
types: make(map[string]tomlType),
lx: lex(data),
ordered: make([]Key, 0),
implicits: make(map[string]bool),
}
for {
item := p.next()
if item.typ == itemEOF {
break
}
p.topLevel(item)
}
return p, nil
}
func (p *parser) panicf(format string, v ...interface{}) {
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
p.approxLine, p.current(), fmt.Sprintf(format, v...))
panic(parseError(msg))
}
func (p *parser) next() item {
it := p.lx.nextItem()
if it.typ == itemError {
p.panicf("%s", it.val)
}
return it
}
func (p *parser) bug(format string, v ...interface{}) {
log.Fatalf("BUG: %s\n\n", fmt.Sprintf(format, v...))
}
func (p *parser) expect(typ itemType) item {
it := p.next()
p.assertEqual(typ, it.typ)
return it
}
func (p *parser) assertEqual(expected, got itemType) {
if expected != got {
p.bug("Expected '%s' but got '%s'.", expected, got)
}
}
func (p *parser) topLevel(item item) {
switch item.typ {
case itemCommentStart:
p.approxLine = item.line
p.expect(itemText)
case itemTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemTableEnd, kg.typ)
p.establishContext(key, false)
p.setType("", tomlHash)
p.ordered = append(p.ordered, key)
case itemArrayTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemArrayTableEnd, kg.typ)
p.establishContext(key, true)
p.setType("", tomlArrayHash)
p.ordered = append(p.ordered, key)
case itemKeyStart:
kname := p.next()
p.approxLine = kname.line
p.currentKey = p.keyString(kname)
val, typ := p.value(p.next())
p.setValue(p.currentKey, val)
p.setType(p.currentKey, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
p.currentKey = ""
default:
p.bug("Unexpected type at top level: %s", item.typ)
}
}
// Gets a string for a key (or part of a key in a table name).
func (p *parser) keyString(it item) string {
switch it.typ {
case itemText:
return it.val
case itemString, itemMultilineString,
itemRawString, itemRawMultilineString:
s, _ := p.value(it)
return s.(string)
default:
p.bug("Unexpected key type: %s", it.typ)
panic("unreachable")
}
}
// value translates an expected value from the lexer into a Go value wrapped
// as an empty interface.
func (p *parser) value(it item) (interface{}, tomlType) {
switch it.typ {
case itemString:
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
case itemMultilineString:
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
case itemBool:
switch it.val {
case "true":
return true, p.typeOfPrimitive(it)
case "false":
return false, p.typeOfPrimitive(it)
}
p.bug("Expected boolean value, but got '%s'.", it.val)
case itemInteger:
num, err := strconv.ParseInt(it.val, 10, 64)
if err != nil {
// See comment below for floats describing why we make a
// distinction between a bug and a user error.
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Integer '%s' is out of the range of 64-bit "+
"signed integers.", it.val)
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemFloat:
num, err := strconv.ParseFloat(it.val, 64)
if err != nil {
// Distinguish float values. Normally, it'd be a bug if the lexer
// provides an invalid float, but it's possible that the float is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
//
// This is also true for integers.
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit "+
"IEEE-754 floating-point numbers.", it.val)
} else {
p.bug("Expected float value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemDatetime:
t, err := time.Parse("2006-01-02T15:04:05Z", it.val)
if err != nil {
p.bug("Expected Zulu formatted DateTime, but got '%s'.", it.val)
}
return t, p.typeOfPrimitive(it)
case itemArray:
array := make([]interface{}, 0)
types := make([]tomlType, 0)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
val, typ := p.value(it)
array = append(array, val)
types = append(types, typ)
}
return array, p.typeOfArray(types)
}
p.bug("Unexpected value type: %s", it.typ)
panic("unreachable")
}
// establishContext sets the current context of the parser,
// where the context is either a hash or an array of hashes. Which one is
// set depends on the value of the `array` parameter.
//
// Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically.
func (p *parser) establishContext(key Key, array bool) {
var ok bool
// Always start at the top level and drill down for our context.
hashContext := p.mapping
keyContext := make(Key, 0)
// We only need implicit hashes for key[0:-1]
for _, k := range key[0 : len(key)-1] {
_, ok = hashContext[k]
keyContext = append(keyContext, k)
// No key? Make an implicit hash and move on.
if !ok {
p.addImplicit(keyContext)
hashContext[k] = make(map[string]interface{})
}
// If the hash context is actually an array of tables, then set
// the hash context to the last element in that array.
//
// Otherwise, it better be a table, since this MUST be a key group (by
// virtue of it not being the last element in a key).
switch t := hashContext[k].(type) {
case []map[string]interface{}:
hashContext = t[len(t)-1]
case map[string]interface{}:
hashContext = t
default:
p.panicf("Key '%s' was already created as a hash.", keyContext)
}
}
p.context = keyContext
if array {
// If this is the first element for this array, then allocate a new
// list of tables for it.
k := key[len(key)-1]
if _, ok := hashContext[k]; !ok {
hashContext[k] = make([]map[string]interface{}, 0, 5)
}
// Add a new table. But make sure the key hasn't already been used
// for something else.
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{}))
} else {
p.panicf("Key '%s' was already created and cannot be used as "+
"an array.", keyContext)
}
} else {
p.setValue(key[len(key)-1], make(map[string]interface{}))
}
p.context = append(p.context, key[len(key)-1])
}
// setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for
// implicit key groups.
func (p *parser) setValue(key string, value interface{}) {
var tmpHash interface{}
var ok bool
hash := p.mapping
keyContext := make(Key, 0)
for _, k := range p.context {
keyContext = append(keyContext, k)
if tmpHash, ok = hash[k]; !ok {
p.bug("Context for key '%s' has not been established.", keyContext)
}
switch t := tmpHash.(type) {
case []map[string]interface{}:
// The context is a table of hashes. Pick the most recent table
// defined as the current hash.
hash = t[len(t)-1]
case map[string]interface{}:
hash = t
default:
p.bug("Expected hash to have type 'map[string]interface{}', but "+
"it has '%T' instead.", tmpHash)
}
}
keyContext = append(keyContext, key)
if _, ok := hash[key]; ok {
// Typically, if the given key has already been set, then we have
// to raise an error since duplicate keys are disallowed. However,
// it's possible that a key was previously defined implicitly. In this
// case, it is allowed to be redefined concretely. (See the
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
//
// But we have to make sure to stop marking it as an implicit. (So that
// another redefinition provokes an error.)
//
// Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done.
if p.isImplicit(keyContext) {
p.removeImplicit(keyContext)
return
}
// Otherwise, we have a concrete key trying to override a previous
// key, which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext)
}
hash[key] = value
}
// setType sets the type of a particular value at a given key.
// It should be called immediately AFTER setValue.
//
// Note that if `key` is empty, then the type given will be applied to the
// current context (which is either a table or an array of tables).
func (p *parser) setType(key string, typ tomlType) {
keyContext := make(Key, 0, len(p.context)+1)
for _, k := range p.context {
keyContext = append(keyContext, k)
}
if len(key) > 0 { // allow type setting for hashes
keyContext = append(keyContext, key)
}
p.types[keyContext.String()] = typ
}
// addImplicit sets the given Key as having been created implicitly.
func (p *parser) addImplicit(key Key) {
p.implicits[key.String()] = true
}
// removeImplicit stops tagging the given key as having been implicitly
// created.
func (p *parser) removeImplicit(key Key) {
p.implicits[key.String()] = false
}
// isImplicit returns true if the key group pointed to by the key was created
// implicitly.
func (p *parser) isImplicit(key Key) bool {
return p.implicits[key.String()]
}
// current returns the full key name of the current context.
func (p *parser) current() string {
if len(p.currentKey) == 0 {
return p.context.String()
}
if len(p.context) == 0 {
return p.currentKey
}
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
}
func stripFirstNewline(s string) string {
if len(s) == 0 || s[0] != '\n' {
return s
}
return s[1:len(s)]
}
func stripEscapedWhitespace(s string) string {
esc := strings.Split(s, "\\\n")
if len(esc) > 1 {
for i := 1; i < len(esc); i++ {
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
}
}
return strings.Join(esc, "")
}
func (p *parser) replaceEscapes(str string) string {
var replaced []rune
s := []byte(str)
r := 0
for r < len(s) {
if s[r] != '\\' {
c, size := utf8.DecodeRune(s[r:])
r += size
replaced = append(replaced, c)
continue
}
r += 1
if r >= len(s) {
p.bug("Escape sequence at end of string.")
return ""
}
switch s[r] {
default:
p.bug("Expected valid escape code after \\, but got %q.", s[r])
return ""
case 'b':
replaced = append(replaced, rune(0x0008))
r += 1
case 't':
replaced = append(replaced, rune(0x0009))
r += 1
case 'n':
replaced = append(replaced, rune(0x000A))
r += 1
case 'f':
replaced = append(replaced, rune(0x000C))
r += 1
case 'r':
replaced = append(replaced, rune(0x000D))
r += 1
case '"':
replaced = append(replaced, rune(0x0022))
r += 1
case '\\':
replaced = append(replaced, rune(0x005C))
r += 1
case 'u':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
replaced = append(replaced, escaped)
r += 5
case 'U':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
replaced = append(replaced, escaped)
r += 9
}
}
return string(replaced)
}
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
s := string(bs)
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil {
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
"lexer claims it's OK: %s", s, err)
}
// BUG(burntsushi)
// I honestly don't understand how this works. I can't seem
// to find a way to make this fail. I figured this would fail on invalid
// UTF-8 characters like U+DCFF, but it doesn't.
if !utf8.ValidString(string(rune(hex))) {
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
}
return rune(hex)
}
func isStringType(ty itemType) bool {
return ty == itemString || ty == itemMultilineString ||
ty == itemRawString || ty == itemRawMultilineString
}

1
vendor/github.com/BurntSushi/toml/session.vim сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
au BufWritePost *.go silent!make tags > /dev/null 2>&1

91
vendor/github.com/BurntSushi/toml/type_check.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,91 @@
package toml
// tomlType represents any Go type that corresponds to a TOML type.
// While the first draft of the TOML spec has a simplistic type system that
// probably doesn't need this level of sophistication, we seem to be militating
// toward adding real composite types.
type tomlType interface {
typeString() string
}
// typeEqual accepts any two types and returns true if they are equal.
func typeEqual(t1, t2 tomlType) bool {
if t1 == nil || t2 == nil {
return false
}
return t1.typeString() == t2.typeString()
}
func typeIsHash(t tomlType) bool {
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
}
type tomlBaseType string
func (btype tomlBaseType) typeString() string {
return string(btype)
}
func (btype tomlBaseType) String() string {
return btype.typeString()
}
var (
tomlInteger tomlBaseType = "Integer"
tomlFloat tomlBaseType = "Float"
tomlDatetime tomlBaseType = "Datetime"
tomlString tomlBaseType = "String"
tomlBool tomlBaseType = "Bool"
tomlArray tomlBaseType = "Array"
tomlHash tomlBaseType = "Hash"
tomlArrayHash tomlBaseType = "ArrayHash"
)
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
// Primitive values are: Integer, Float, Datetime, String and Bool.
//
// Passing a lexer item other than the following will cause a BUG message
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
switch lexItem.typ {
case itemInteger:
return tomlInteger
case itemFloat:
return tomlFloat
case itemDatetime:
return tomlDatetime
case itemString:
return tomlString
case itemMultilineString:
return tomlString
case itemRawString:
return tomlString
case itemRawMultilineString:
return tomlString
case itemBool:
return tomlBool
}
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
panic("unreachable")
}
// typeOfArray returns a tomlType for an array given a list of types of its
// values.
//
// In the current spec, if an array is homogeneous, then its type is always
// "Array". If the array is not homogeneous, an error is generated.
func (p *parser) typeOfArray(types []tomlType) tomlType {
// Empty arrays are cool.
if len(types) == 0 {
return tomlArray
}
theType := types[0]
for _, t := range types[1:] {
if !typeEqual(theType, t) {
p.panicf("Array contains values of type '%s' and '%s', but "+
"arrays must be homogeneous.", theType, t)
}
}
return tomlArray
}

241
vendor/github.com/BurntSushi/toml/type_fields.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,241 @@
package toml
// Struct field handling is adapted from code in encoding/json:
//
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the Go distribution.
import (
"reflect"
"sort"
"sync"
)
// A field represents a single field found in a struct.
type field struct {
name string // the name of the field (`toml` tag included)
tag bool // whether field has a `toml` tag
index []int // represents the depth of an anonymous field
typ reflect.Type // the type of the field
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from toml tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that TOML should recognize for the given
// type. The algorithm is breadth-first search over the set of structs to
// include - the top struct and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" { // unexported
continue
}
name := sf.Tag.Get("toml")
if name == "-" {
continue
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != ""
if name == "" {
name = sf.Name
}
fields = append(fields, field{name, tagged, index, ft})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
f := field{name: ft.Name(), index: index, typ: ft}
next = append(next, f)
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with TOML tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// TOML tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}

21
vendor/github.com/Sirupsen/logrus/LICENSE сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Simon Eskildsen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

377
vendor/github.com/Sirupsen/logrus/README.md сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,377 @@
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
many large deployments. The core API is unlikely to change much but please
version control your Logrus to make sure you aren't fetching latest `master` on
every build.**
Nicely color-coded in development (when a TTY is attached, otherwise just
plain text):
![Colored](http://i.imgur.com/PY7qMwd.png)
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
or Splunk:
```json
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
{"level":"warning","msg":"The group's number increased tremendously!",
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
```
With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
attached, the output is compatible with the
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
```text
time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10
time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122
time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10
time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9
time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100
```
#### Example
The simplest way to use Logrus is simply the package-level exported logger:
```go
package main
import (
log "github.com/Sirupsen/logrus"
)
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
}).Info("A walrus appears")
}
```
Note that it's completely api-compatible with the stdlib logger, so you can
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
and you'll now have the flexibility of Logrus. You can customize it all you
want:
```go
package main
import (
"os"
log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
)
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
// Use the Airbrake hook to report errors that have Error severity or above to
// an exception tracker. You can create custom hooks, see the Hooks section.
log.AddHook(&logrus_airbrake.AirbrakeHook{})
// Output to stderr instead of stdout, could also be a file.
log.SetOutput(os.Stderr)
// Only log the warning severity or above.
log.SetLevel(log.WarnLevel)
}
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(log.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(log.Fields{
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
}
```
For more advanced usage such as logging to multiple locations from the same
application, you can also create an instance of the `logrus` Logger:
```go
package main
import (
"github.com/Sirupsen/logrus"
)
// Create a new instance of the logger. You can have any number of instances.
var log = logrus.New()
func main() {
// The API for setting attributes is a little different than the package level
// exported logger. See Godoc.
log.Out = os.Stderr
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
}
```
#### Fields
Logrus encourages careful, structured logging though logging fields instead of
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
to send event %s to topic %s with key %d")`, you should log the much more
discoverable:
```go
log.WithFields(log.Fields{
"event": event,
"topic": topic,
"key": key,
}).Fatal("Failed to send event")
```
We've found this API forces you to think about logging in a way that produces
much more useful logging messages. We've been in countless situations where just
a single added field to a log statement that was already there would've saved us
hours. The `WithFields` call is optional.
In general, with Logrus using any of the `printf`-family functions should be
seen as a hint you should add a field, however, you can still use the
`printf`-family functions with Logrus.
#### Hooks
You can add hooks for logging levels. For example to send errors to an exception
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
multiple places simultaneously, e.g. syslog.
```go
// Not the real implementation of the Airbrake hook. Just a simple sample.
import (
log "github.com/Sirupsen/logrus"
)
func init() {
log.AddHook(new(AirbrakeHook))
}
type AirbrakeHook struct{}
// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains
// the fields for the entry. See the Fields section of the README.
func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
err := airbrake.Notify(entry.Data["error"].(error))
if err != nil {
log.WithFields(log.Fields{
"source": "airbrake",
"endpoint": airbrake.Endpoint,
}).Info("Failed to send error to Airbrake")
}
return nil
}
// `Levels()` returns a slice of `Levels` the hook is fired for.
func (hook *AirbrakeHook) Levels() []log.Level {
return []log.Level{
log.ErrorLevel,
log.FatalLevel,
log.PanicLevel,
}
}
```
Logrus comes with built-in hooks. Add those, or your custom hook, in `init`:
```go
import (
log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
"github.com/Sirupsen/logrus/hooks/syslog"
"log/syslog"
)
func init() {
log.AddHook(new(logrus_airbrake.AirbrakeHook))
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
log.Error("Unable to connect to local syslog daemon")
} else {
log.AddHook(hook)
}
}
```
* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go)
Send errors to an exception tracking service compatible with the Airbrake API.
Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes.
* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go)
Send errors to the Papertrail hosted logging service via UDP.
* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go)
Send errors to remote syslog server.
Uses standard library `log/syslog` behind the scenes.
* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
Send errors to a channel in hipchat.
* [`github.com/sebest/logrusly`](https://github.com/sebest/logrusly)
Send logs to Loggly (https://www.loggly.com/)
* [`github.com/johntdyer/slackrus`](https://github.com/johntdyer/slackrus)
Hook for Slack chat.
* [`github.com/wercker/journalhook`](https://github.com/wercker/journalhook).
Hook for logging to `systemd-journald`.
#### Level logging
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
```go
log.Debug("Useful debugging information.")
log.Info("Something noteworthy happened!")
log.Warn("You should probably take a look at this.")
log.Error("Something failed but I'm not quitting.")
// Calls os.Exit(1) after logging
log.Fatal("Bye.")
// Calls panic() after logging
log.Panic("I'm bailing.")
```
You can set the logging level on a `Logger`, then it will only log entries with
that severity or anything above it:
```go
// Will log anything that is info or above (warn, error, fatal, panic). Default.
log.SetLevel(log.InfoLevel)
```
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
environment if your application has that.
#### Entries
Besides the fields added with `WithField` or `WithFields` some fields are
automatically added to all logging events:
1. `time`. The timestamp when the entry was created.
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
the `AddFields` call. E.g. `Failed to send event.`
3. `level`. The logging level. E.g. `info`.
#### Environments
Logrus has no notion of environment.
If you wish for hooks and formatters to only be used in specific environments,
you should handle that yourself. For example, if your application has a global
variable `Environment`, which is a string representation of the environment you
could do:
```go
import (
log "github.com/Sirupsen/logrus"
)
init() {
// do something here to set environment depending on an environment variable
// or command-line flag
if Environment == "production" {
log.SetFormatter(logrus.JSONFormatter)
} else {
// The TextFormatter is default, you don't actually have to do this.
log.SetFormatter(logrus.TextFormatter)
}
}
```
This configuration is how `logrus` was intended to be used, but JSON in
production is mostly only useful if you do log aggregation with tools like
Splunk or Logstash.
#### Formatters
The built-in logging formatters are:
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
without colors.
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`
* `logrus.JSONFormatter`. Logs fields as JSON.
Third party logging formatters:
* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
`Fields` type (`map[string]interface{}`) with all your fields as well as the
default ones (see Entries section above):
```go
type MyJSONFormatter struct {
}
log.SetFormatter(new(MyJSONFormatter))
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
// Note this doesn't include Time, Level and Message which are available on
// the Entry. Consult `godoc` on information about those fields or read the
// source of the official loggers.
serialized, err := json.Marshal(entry.Data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}
```
#### Logger as an `io.Writer`
Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
```go
w := logger.Writer()
defer w.Close()
srv := http.Server{
// create a stdlib log.Logger that writes to
// logrus.Logger.
ErrorLog: log.New(w, "", 0),
}
```
Each line written to that writer will be printed the usual way, using formatters
and hooks. The level for those entries is `info`.
#### Rotation
Log rotation is not provided with Logrus. Log rotation should be done by an
external program (like `logrotate(8)`) that can compress and delete old log
entries. It should not be a feature of the application-level logger.
[godoc]: https://godoc.org/github.com/Sirupsen/logrus

252
vendor/github.com/Sirupsen/logrus/entry.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,252 @@
package logrus
import (
"bytes"
"fmt"
"io"
"os"
"time"
)
// An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
// passed around as much as you wish to avoid field duplication.
type Entry struct {
Logger *Logger
// Contains all the fields set by the user.
Data Fields
// Time at which the log entry was created
Time time.Time
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
Level Level
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string
}
func NewEntry(logger *Logger) *Entry {
return &Entry{
Logger: logger,
// Default is three fields, give a little extra room
Data: make(Fields, 5),
}
}
// Returns a reader for the entry, which is a proxy to the formatter.
func (entry *Entry) Reader() (*bytes.Buffer, error) {
serialized, err := entry.Logger.Formatter.Format(entry)
return bytes.NewBuffer(serialized), err
}
// Returns the string representation from the reader and ultimately the
// formatter.
func (entry *Entry) String() (string, error) {
reader, err := entry.Reader()
if err != nil {
return "", err
}
return reader.String(), err
}
// Add a single field to the Entry.
func (entry *Entry) WithField(key string, value interface{}) *Entry {
return entry.WithFields(Fields{key: value})
}
// Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry {
data := Fields{}
for k, v := range entry.Data {
data[k] = v
}
for k, v := range fields {
data[k] = v
}
return &Entry{Logger: entry.Logger, Data: data}
}
func (entry *Entry) log(level Level, msg string) {
entry.Time = time.Now()
entry.Level = level
entry.Message = msg
if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
}
reader, err := entry.Reader()
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock()
}
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
_, err = io.Copy(entry.Logger.Out, reader)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
panic(entry)
}
}
func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.log(DebugLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Print(args ...interface{}) {
entry.Info(args...)
}
func (entry *Entry) Info(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.log(InfoLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warn(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.log(WarnLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warning(args ...interface{}) {
entry.Warn(args...)
}
func (entry *Entry) Error(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.log(ErrorLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...))
}
os.Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.log(PanicLevel, fmt.Sprint(args...))
}
panic(fmt.Sprint(args...))
}
// Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.Debug(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Infof(format string, args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.Info(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Printf(format string, args ...interface{}) {
entry.Infof(format, args...)
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.Warn(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Warningf(format string, args ...interface{}) {
entry.Warnf(format, args...)
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.Error(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.Panic(fmt.Sprintf(format, args...))
}
}
// Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.Debug(entry.sprintlnn(args...))
}
}
func (entry *Entry) Infoln(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.Info(entry.sprintlnn(args...))
}
}
func (entry *Entry) Println(args ...interface{}) {
entry.Infoln(args...)
}
func (entry *Entry) Warnln(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.Warn(entry.sprintlnn(args...))
}
}
func (entry *Entry) Warningln(args ...interface{}) {
entry.Warnln(args...)
}
func (entry *Entry) Errorln(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.Error(entry.sprintlnn(args...))
}
}
func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
}
func (entry *Entry) Panicln(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.Panic(entry.sprintlnn(args...))
}
}
// Sprintlnn => Sprint no newline. This is to get the behavior of how
// fmt.Sprintln where spaces are always added between operands, regardless of
// their type. Instead of vendoring the Sprintln implementation to spare a
// string allocation, we do the simplest thing.
func (entry *Entry) sprintlnn(args ...interface{}) string {
msg := fmt.Sprintln(args...)
return msg[:len(msg)-1]
}

188
vendor/github.com/Sirupsen/logrus/exported.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,188 @@
package logrus
import (
"io"
)
var (
// std is the name of the standard logger in stdlib `log`
std = New()
)
func StandardLogger() *Logger {
return std
}
// SetOutput sets the standard logger output.
func SetOutput(out io.Writer) {
std.mu.Lock()
defer std.mu.Unlock()
std.Out = out
}
// SetFormatter sets the standard logger formatter.
func SetFormatter(formatter Formatter) {
std.mu.Lock()
defer std.mu.Unlock()
std.Formatter = formatter
}
// SetLevel sets the standard logger level.
func SetLevel(level Level) {
std.mu.Lock()
defer std.mu.Unlock()
std.Level = level
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
std.mu.Lock()
defer std.mu.Unlock()
return std.Level
}
// AddHook adds a hook to the standard logger hooks.
func AddHook(hook Hook) {
std.mu.Lock()
defer std.mu.Unlock()
std.Hooks.Add(hook)
}
// WithField creates an entry from the standard logger and adds a field to
// it. If you want multiple fields, use `WithFields`.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithField(key string, value interface{}) *Entry {
return std.WithField(key, value)
}
// WithFields creates an entry from the standard logger and adds multiple
// fields to it. This is simply a helper for `WithField`, invoking it
// once for each field.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithFields(fields Fields) *Entry {
return std.WithFields(fields)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
std.Debug(args...)
}
// Print logs a message at level Info on the standard logger.
func Print(args ...interface{}) {
std.Print(args...)
}
// Info logs a message at level Info on the standard logger.
func Info(args ...interface{}) {
std.Info(args...)
}
// Warn logs a message at level Warn on the standard logger.
func Warn(args ...interface{}) {
std.Warn(args...)
}
// Warning logs a message at level Warn on the standard logger.
func Warning(args ...interface{}) {
std.Warning(args...)
}
// Error logs a message at level Error on the standard logger.
func Error(args ...interface{}) {
std.Error(args...)
}
// Panic logs a message at level Panic on the standard logger.
func Panic(args ...interface{}) {
std.Panic(args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func Fatal(args ...interface{}) {
std.Fatal(args...)
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
std.Debugf(format, args...)
}
// Printf logs a message at level Info on the standard logger.
func Printf(format string, args ...interface{}) {
std.Printf(format, args...)
}
// Infof logs a message at level Info on the standard logger.
func Infof(format string, args ...interface{}) {
std.Infof(format, args...)
}
// Warnf logs a message at level Warn on the standard logger.
func Warnf(format string, args ...interface{}) {
std.Warnf(format, args...)
}
// Warningf logs a message at level Warn on the standard logger.
func Warningf(format string, args ...interface{}) {
std.Warningf(format, args...)
}
// Errorf logs a message at level Error on the standard logger.
func Errorf(format string, args ...interface{}) {
std.Errorf(format, args...)
}
// Panicf logs a message at level Panic on the standard logger.
func Panicf(format string, args ...interface{}) {
std.Panicf(format, args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...)
}
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
std.Debugln(args...)
}
// Println logs a message at level Info on the standard logger.
func Println(args ...interface{}) {
std.Println(args...)
}
// Infoln logs a message at level Info on the standard logger.
func Infoln(args ...interface{}) {
std.Infoln(args...)
}
// Warnln logs a message at level Warn on the standard logger.
func Warnln(args ...interface{}) {
std.Warnln(args...)
}
// Warningln logs a message at level Warn on the standard logger.
func Warningln(args ...interface{}) {
std.Warningln(args...)
}
// Errorln logs a message at level Error on the standard logger.
func Errorln(args ...interface{}) {
std.Errorln(args...)
}
// Panicln logs a message at level Panic on the standard logger.
func Panicln(args ...interface{}) {
std.Panicln(args...)
}
// Fatalln logs a message at level Fatal on the standard logger.
func Fatalln(args ...interface{}) {
std.Fatalln(args...)
}

44
vendor/github.com/Sirupsen/logrus/formatter.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,44 @@
package logrus
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
//
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
// * `entry.Data["time"]`. The timestamp.
// * `entry.Data["level"]. The level the entry was logged at.
//
// Any additional fields added with `WithField` or `WithFields` are also in
// `entry.Data`. Format is expected to return an array of bytes which are then
// logged to `logger.Out`.
type Formatter interface {
Format(*Entry) ([]byte, error)
}
// This is to not silently overwrite `time`, `msg` and `level` fields when
// dumping it. If this code wasn't there doing:
//
// logrus.WithField("level", 1).Info("hello")
//
// Would just silently drop the user provided level. Instead with this code
// it'll logged as:
//
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
//
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) {
_, ok := data["time"]
if ok {
data["fields.time"] = data["time"]
}
_, ok = data["msg"]
if ok {
data["fields.msg"] = data["msg"]
}
_, ok = data["level"]
if ok {
data["fields.level"] = data["level"]
}
}

34
vendor/github.com/Sirupsen/logrus/hooks.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,34 @@
package logrus
// A hook to be fired when logging on the logging levels returned from
// `Levels()` on your implementation of the interface. Note that this is not
// fired in a goroutine or a channel with workers, you should handle such
// functionality yourself if your call is non-blocking and you don't wish for
// the logging calls for levels returned from `Levels()` to block.
type Hook interface {
Levels() []Level
Fire(*Entry) error
}
// Internal type for storing the hooks on a logger instance.
type levelHooks map[Level][]Hook
// Add a hook to an instance of logger. This is called with
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
func (hooks levelHooks) Add(hook Hook) {
for _, level := range hook.Levels() {
hooks[level] = append(hooks[level], hook)
}
}
// Fire all the hooks for the passed level. Used by `entry.log` to fire
// appropriate hooks for a log entry.
func (hooks levelHooks) Fire(level Level, entry *Entry) error {
for _, hook := range hooks[level] {
if err := hook.Fire(entry); err != nil {
return err
}
}
return nil
}

32
vendor/github.com/Sirupsen/logrus/json_formatter.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,32 @@
package logrus
import (
"encoding/json"
"fmt"
"time"
)
type JSONFormatter struct{}
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
for k, v := range entry.Data {
// Otherwise errors are ignored by `encoding/json`
// https://github.com/Sirupsen/logrus/issues/137
if err, ok := v.(error); ok {
data[k] = err.Error()
} else {
data[k] = v
}
}
prefixFieldClashes(data)
data["time"] = entry.Time.Format(time.RFC3339)
data["msg"] = entry.Message
data["level"] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}

161
vendor/github.com/Sirupsen/logrus/logger.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,161 @@
package logrus
import (
"io"
"os"
"sync"
)
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stdout`. You can also set this to
// something more adventorous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking
// service, log to StatsD or dump the core on fatal errors.
Hooks levelHooks
// All log entries pass through the formatter before logged to Out. The
// included formatters are `TextFormatter` and `JSONFormatter` for which
// TextFormatter is the default. In development (when a TTY is attached) it
// logs with colors, but to a file it wouldn't. You can easily implement your
// own that implements the `Formatter` interface, see the `README` or included
// formatters for examples.
Formatter Formatter
// The logging level the logger should log at. This is typically (and defaults
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged. `logrus.Debug` is useful in
Level Level
// Used to sync writing to the log.
mu sync.Mutex
}
// Creates a new logger. Configuration should be set by changing `Formatter`,
// `Out` and `Hooks` directly on the default logger instance. You can also just
// instantiate your own:
//
// var log = &Logger{
// Out: os.Stderr,
// Formatter: new(JSONFormatter),
// Hooks: make(levelHooks),
// Level: logrus.DebugLevel,
// }
//
// It's recommended to make this a global instance called `log`.
func New() *Logger {
return &Logger{
Out: os.Stdout,
Formatter: new(TextFormatter),
Hooks: make(levelHooks),
Level: InfoLevel,
}
}
// Adds a field to the log entry, note that you it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// Ff you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
return NewEntry(logger).WithField(key, value)
}
// Adds a struct of fields to the log entry. All it does is call `WithField` for
// each `Field`.
func (logger *Logger) WithFields(fields Fields) *Entry {
return NewEntry(logger).WithFields(fields)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
NewEntry(logger).Debugf(format, args...)
}
func (logger *Logger) Infof(format string, args ...interface{}) {
NewEntry(logger).Infof(format, args...)
}
func (logger *Logger) Printf(format string, args ...interface{}) {
NewEntry(logger).Printf(format, args...)
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
NewEntry(logger).Warnf(format, args...)
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
NewEntry(logger).Warnf(format, args...)
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
NewEntry(logger).Errorf(format, args...)
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
NewEntry(logger).Fatalf(format, args...)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
NewEntry(logger).Panicf(format, args...)
}
func (logger *Logger) Debug(args ...interface{}) {
NewEntry(logger).Debug(args...)
}
func (logger *Logger) Info(args ...interface{}) {
NewEntry(logger).Info(args...)
}
func (logger *Logger) Print(args ...interface{}) {
NewEntry(logger).Info(args...)
}
func (logger *Logger) Warn(args ...interface{}) {
NewEntry(logger).Warn(args...)
}
func (logger *Logger) Warning(args ...interface{}) {
NewEntry(logger).Warn(args...)
}
func (logger *Logger) Error(args ...interface{}) {
NewEntry(logger).Error(args...)
}
func (logger *Logger) Fatal(args ...interface{}) {
NewEntry(logger).Fatal(args...)
}
func (logger *Logger) Panic(args ...interface{}) {
NewEntry(logger).Panic(args...)
}
func (logger *Logger) Debugln(args ...interface{}) {
NewEntry(logger).Debugln(args...)
}
func (logger *Logger) Infoln(args ...interface{}) {
NewEntry(logger).Infoln(args...)
}
func (logger *Logger) Println(args ...interface{}) {
NewEntry(logger).Println(args...)
}
func (logger *Logger) Warnln(args ...interface{}) {
NewEntry(logger).Warnln(args...)
}
func (logger *Logger) Warningln(args ...interface{}) {
NewEntry(logger).Warnln(args...)
}
func (logger *Logger) Errorln(args ...interface{}) {
NewEntry(logger).Errorln(args...)
}
func (logger *Logger) Fatalln(args ...interface{}) {
NewEntry(logger).Fatalln(args...)
}
func (logger *Logger) Panicln(args ...interface{}) {
NewEntry(logger).Panicln(args...)
}

94
vendor/github.com/Sirupsen/logrus/logrus.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,94 @@
package logrus
import (
"fmt"
"log"
)
// Fields type, used to pass to `WithFields`.
type Fields map[string]interface{}
// Level type
type Level uint8
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string {
switch level {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarnLevel:
return "warning"
case ErrorLevel:
return "error"
case FatalLevel:
return "fatal"
case PanicLevel:
return "panic"
}
return "unknown"
}
// ParseLevel takes a string level and returns the Logrus log level constant.
func ParseLevel(lvl string) (Level, error) {
switch lvl {
case "panic":
return PanicLevel, nil
case "fatal":
return FatalLevel, nil
case "error":
return ErrorLevel, nil
case "warn", "warning":
return WarnLevel, nil
case "info":
return InfoLevel, nil
case "debug":
return DebugLevel, nil
}
var l Level
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
// These are the different logging levels. You can set the logging level to log
// on your instance of logger, obtained with `logrus.New()`.
const (
// PanicLevel level, highest level of severity. Logs and then calls panic with the
// message passed to Debug, Info, ...
PanicLevel Level = iota
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
// logging level is set to Panic.
FatalLevel
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
// Commonly used for hooks to send errors to an error tracking service.
ErrorLevel
// WarnLevel level. Non-critical entries that deserve eyes.
WarnLevel
// InfoLevel level. General operational entries about what's going on inside the
// application.
InfoLevel
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
DebugLevel
)
// Won't compile if StdLogger can't be realized by a log.Logger
var _ StdLogger = &log.Logger{}
// StdLogger is what your logrus-enabled library should take, that way
// it'll accept a stdlib logger and a logrus logger. There's no standard
// interface, this is the closest we get, unfortunately.
type StdLogger interface {
Print(...interface{})
Printf(string, ...interface{})
Println(...interface{})
Fatal(...interface{})
Fatalf(string, ...interface{})
Fatalln(...interface{})
Panic(...interface{})
Panicf(string, ...interface{})
Panicln(...interface{})
}

12
vendor/github.com/Sirupsen/logrus/terminal_darwin.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,12 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package logrus
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

20
vendor/github.com/Sirupsen/logrus/terminal_freebsd.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,20 @@
/*
Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
*/
package logrus
import (
"syscall"
)
const ioctlReadTermios = syscall.TIOCGETA
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]uint8
Ispeed uint32
Ospeed uint32
}

12
vendor/github.com/Sirupsen/logrus/terminal_linux.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,12 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package logrus
import "syscall"
const ioctlReadTermios = syscall.TCGETS
type Termios syscall.Termios

21
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,21 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd
package logrus
import (
"syscall"
"unsafe"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stdout
var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
}

8
vendor/github.com/Sirupsen/logrus/terminal_openbsd.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,8 @@
package logrus
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

27
vendor/github.com/Sirupsen/logrus/terminal_windows.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,27 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package logrus
import (
"syscall"
"unsafe"
)
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stdout
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
}

145
vendor/github.com/Sirupsen/logrus/text_formatter.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,145 @@
package logrus
import (
"bytes"
"fmt"
"regexp"
"sort"
"strings"
"time"
)
const (
nocolor = 0
red = 31
green = 32
yellow = 33
blue = 34
gray = 37
)
var (
baseTimestamp time.Time
isTerminal bool
noQuoteNeeded *regexp.Regexp
)
func init() {
baseTimestamp = time.Now()
isTerminal = IsTerminal()
}
func miniTS() int {
return int(time.Since(baseTimestamp) / time.Second)
}
type TextFormatter struct {
// Set to true to bypass checking for a TTY before outputting colors.
ForceColors bool
// Force disabling colors.
DisableColors bool
// Disable timestamp logging. useful when output is redirected to logging
// system that already adds timestamps.
DisableTimestamp bool
// Enable logging the full timestamp when a TTY is attached instead of just
// the time passed since beginning of execution.
FullTimestamp bool
// The fields are sorted by default for a consistent output. For applications
// that log extremely frequently and don't use the JSON formatter this may not
// be desired.
DisableSorting bool
}
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var keys []string = make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
if !f.DisableSorting {
sort.Strings(keys)
}
b := &bytes.Buffer{}
prefixFieldClashes(entry.Data)
isColored := (f.ForceColors || isTerminal) && !f.DisableColors
if isColored {
f.printColored(b, entry, keys)
} else {
if !f.DisableTimestamp {
f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339))
}
f.appendKeyValue(b, "level", entry.Level.String())
f.appendKeyValue(b, "msg", entry.Message)
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
}
}
b.WriteByte('\n')
return b.Bytes(), nil
}
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) {
var levelColor int
switch entry.Level {
case DebugLevel:
levelColor = gray
case WarnLevel:
levelColor = yellow
case ErrorLevel, FatalLevel, PanicLevel:
levelColor = red
default:
levelColor = blue
}
levelText := strings.ToUpper(entry.Level.String())[0:4]
if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
} else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(time.RFC3339), entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
}
}
func needsQuoting(text string) bool {
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.') {
return false
}
}
return true
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
switch value.(type) {
case string:
if needsQuoting(value.(string)) {
fmt.Fprintf(b, "%v=%s ", key, value)
} else {
fmt.Fprintf(b, "%v=%q ", key, value)
}
case error:
if needsQuoting(value.(error).Error()) {
fmt.Fprintf(b, "%v=%s ", key, value)
} else {
fmt.Fprintf(b, "%v=%q ", key, value)
}
default:
fmt.Fprintf(b, "%v=%v ", key, value)
}
}

31
vendor/github.com/Sirupsen/logrus/writer.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,31 @@
package logrus
import (
"bufio"
"io"
"runtime"
)
func (logger *Logger) Writer() (*io.PipeWriter) {
reader, writer := io.Pipe()
go logger.writerScanner(reader)
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
func (logger *Logger) writerScanner(reader *io.PipeReader) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
logger.Print(scanner.Text())
}
if err := scanner.Err(); err != nil {
logger.Errorf("Error while reading from Writer: %s", err)
}
reader.Close()
}
func writerFinalizer(writer *io.PipeWriter) {
writer.Close()
}

8
vendor/github.com/dgrijalva/jwt-go/LICENSE сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,8 @@
Copyright (c) 2012 Dave Grijalva
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

59
vendor/github.com/dgrijalva/jwt-go/README.md сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,59 @@
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-jones-json-web-token.html)
**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect.
## What the heck is a JWT?
In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
## What's in the box?
This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are RSA256 and HMAC SHA256, though hooks are present for adding your own.
## Parse and Verify
Parsing and verifying tokens is pretty straight forward. You pass in the token and a function for looking up the key. This is done as a callback since you may need to parse the token to find out what signing method and key was used.
```go
token, err := jwt.Parse(myToken, func(token *jwt.Token) (interface{}, error) {
// Don't forget to validate the alg is what you expect:
if _, ok := t.Method.(*jwt.SigningMethodRSA); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", t.Header["alg"])
}
return myLookupKey(token.Header["kid"])
})
if err == nil && token.Valid {
deliverGoodness("!")
} else {
deliverUtterRejection(":(")
}
```
## Create a token
```go
// Create the token
token := jwt.New(jwt.SigningMethodHS256)
// Set some claims
token.Claims["foo"] = "bar"
token.Claims["exp"] = time.Now().Add(time.Hour * 72).Unix()
// Sign and get the complete encoded token as a string
tokenString, err := token.SignedString(mySigningKey)
```
## Project Status & Versioning
This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
## More
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. For a more http centric example, see [this gist](https://gist.github.com/cryptix/45c33ecf0ae54828e63b).

54
vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,54 @@
## `jwt-go` Version History
#### 2.2.0
* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
#### 2.1.0
Backwards compatible API change that was missed in 2.0.0.
* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
#### 2.0.0
There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
* **Compatibility Breaking Changes**
* `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
* `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
* `KeyFunc` now returns `interface{}` instead of `[]byte`
* `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
* `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
* Added public package global `SigningMethodHS256`
* Added public package global `SigningMethodHS384`
* Added public package global `SigningMethodHS512`
* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
* Added public package global `SigningMethodRS256`
* Added public package global `SigningMethodRS384`
* Added public package global `SigningMethodRS512`
* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
* Refactored the RSA implementation to be easier to read
* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
#### 1.0.2
* Fixed bug in parsing public keys from certificates
* Added more tests around the parsing of keys for RS256
* Code refactoring in RS256 implementation. No functional changes
#### 1.0.1
* Fixed panic if RS256 signing method was passed an invalid key
#### 1.0.0
* First versioned release
* API stabilized
* Supports creating, signing, parsing, and validating JWT tokens
* Supports RS256 and HS256 signing methods

4
vendor/github.com/dgrijalva/jwt-go/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
//
// See README.md for more info.
package jwt

84
vendor/github.com/dgrijalva/jwt-go/hmac.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,84 @@
package jwt
import (
"crypto"
"crypto/hmac"
"errors"
)
// Implements the HMAC-SHA family of signing methods signing methods
type SigningMethodHMAC struct {
Name string
Hash crypto.Hash
}
// Specific instances for HS256 and company
var (
SigningMethodHS256 *SigningMethodHMAC
SigningMethodHS384 *SigningMethodHMAC
SigningMethodHS512 *SigningMethodHMAC
ErrSignatureInvalid = errors.New("signature is invalid")
)
func init() {
// HS256
SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
return SigningMethodHS256
})
// HS384
SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
return SigningMethodHS384
})
// HS512
SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
return SigningMethodHS512
})
}
func (m *SigningMethodHMAC) Alg() string {
return m.Name
}
func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
if keyBytes, ok := key.([]byte); ok {
var sig []byte
var err error
if sig, err = DecodeSegment(signature); err == nil {
if !m.Hash.Available() {
return ErrHashUnavailable
}
hasher := hmac.New(m.Hash.New, keyBytes)
hasher.Write([]byte(signingString))
if !hmac.Equal(sig, hasher.Sum(nil)) {
err = ErrSignatureInvalid
}
}
return err
}
return ErrInvalidKey
}
// Implements the Sign method from SigningMethod for this signing method.
// Key must be []byte
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
if keyBytes, ok := key.([]byte); ok {
if !m.Hash.Available() {
return "", ErrHashUnavailable
}
hasher := hmac.New(m.Hash.New, keyBytes)
hasher.Write([]byte(signingString))
return EncodeSegment(hasher.Sum(nil)), nil
}
return "", ErrInvalidKey
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше