зеркало из https://github.com/golang/dep.git
Merge pull request #1559 from sdboyer/v040-release-prep
v0.4.0 release prep
This commit is contained in:
Коммит
6d95d0d6ac
10
CHANGELOG.md
10
CHANGELOG.md
|
@ -1,4 +1,12 @@
|
|||
# v0.4.0 (Unreleased)
|
||||
# (next version)
|
||||
|
||||
NEW FEATURES:
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
# v0.4.0
|
||||
|
||||
NEW FEATURES:
|
||||
* Add support for importing from [glock](https://github.com/robfig/glock) based projects. ([#1422](https://github.com/golang/dep/pull/1422)
|
||||
|
|
|
@ -2,3 +2,7 @@
|
|||
[[constraint]]
|
||||
name = "github.com/sdboyer/deptestdos"
|
||||
version = "2.0.0"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
|
|
|
@ -6,6 +6,6 @@ title: Getting Started
|
|||
|
||||
Welcome! This is documentation for dep, the "official experiment" dependency management tool for the Go language. Dep is a tool intended primarily for use by developers, to support the work of actually writing and shipping code. It is _not_ intended for end users who are installing Go software - that's what `go get` does.
|
||||
|
||||
This site has both guides and reference documents. The guides are practical explanations of how to actually do things with dep, whereas the reference material provides deeper dives on specific topics. Of particular note is the [glossary](#glossary.md) - if you're unfamiliar with terminology used in this documentation, make sure to check there!
|
||||
This site has both guides and reference documents. The guides are practical explanations of how to actually do things with dep, whereas the reference material provides deeper dives on specific topics. Of particular note is the [glossary](glossary.md) - if you're unfamiliar with terminology used in this documentation, make sure to check there!
|
||||
|
||||
After [installing dep](installation.md), if you're using it for the first time, check out [Creating a New Project](new-project.md). Or, if you have an existing Go project that you want to convert to dep, [Migrating to Dep](migrating.md) is probably the place to start.
|
|
@ -33,12 +33,11 @@ unset IFS
|
|||
if [ ${#files[@]} -gt 0 ]; then
|
||||
go build ./cmd/dep
|
||||
./dep ensure -vendor-only
|
||||
./dep prune
|
||||
# Let see if the working directory is clean
|
||||
diffs="$(git status --porcelain -- vendor Gopkg.toml Gopkg.lock 2>/dev/null)"
|
||||
if [ "$diffs" ]; then
|
||||
{
|
||||
echo 'The contents of vendor differ after "dep ensure && dep prune":'
|
||||
echo 'The contents of vendor differ after "dep ensure":'
|
||||
echo
|
||||
echo "$diffs"
|
||||
echo
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.6
|
||||
- 1.7
|
||||
- tip
|
||||
|
||||
# Setting sudo access to false will let Travis CI use containers rather than
|
||||
# VMs to run the tests. For more details see:
|
||||
# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/
|
||||
# - http://docs.travis-ci.com/user/workers/standard-infrastructure/
|
||||
sudo: false
|
||||
|
||||
script:
|
||||
- make setup
|
||||
- make test
|
||||
|
||||
notifications:
|
||||
webhooks:
|
||||
urls:
|
||||
- https://webhooks.gitter.im/e/06e3328629952dabe3e0
|
||||
on_success: change # options: [always|never|change] default: always
|
||||
on_failure: always # options: [always|never|change] default: always
|
||||
on_start: never # options: [always|never|change] default: always
|
|
@ -1,17 +0,0 @@
|
|||
# Release 1.x.x (xxxx-xx-xx)
|
||||
|
||||
- Issue #9: Speed up version comparison performance (thanks @sdboyer)
|
||||
- Issue #8: Added benchmarks (thanks @sdboyer)
|
||||
|
||||
# Release 1.1.0 (2015-03-11)
|
||||
|
||||
- Issue #2: Implemented validation to provide reasons a versions failed a
|
||||
constraint.
|
||||
|
||||
# Release 1.0.1 (2015-12-31)
|
||||
|
||||
- Fixed #1: * constraint failing on valid versions.
|
||||
|
||||
# Release 1.0.0 (2015-10-20)
|
||||
|
||||
- Initial release
|
|
@ -1,36 +0,0 @@
|
|||
.PHONY: setup
|
||||
setup:
|
||||
go get -u gopkg.in/alecthomas/gometalinter.v1
|
||||
gometalinter.v1 --install
|
||||
|
||||
.PHONY: test
|
||||
test: validate lint
|
||||
@echo "==> Running tests"
|
||||
go test -v
|
||||
|
||||
.PHONY: validate
|
||||
validate:
|
||||
@echo "==> Running static validations"
|
||||
@gometalinter.v1 \
|
||||
--disable-all \
|
||||
--enable deadcode \
|
||||
--severity deadcode:error \
|
||||
--enable gofmt \
|
||||
--enable gosimple \
|
||||
--enable ineffassign \
|
||||
--enable misspell \
|
||||
--enable vet \
|
||||
--tests \
|
||||
--vendor \
|
||||
--deadline 60s \
|
||||
./... || exit_code=1
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
@echo "==> Running linters"
|
||||
@gometalinter.v1 \
|
||||
--disable-all \
|
||||
--enable golint \
|
||||
--vendor \
|
||||
--deadline 60s \
|
||||
./... || :
|
|
@ -1,146 +0,0 @@
|
|||
# SemVer
|
||||
|
||||
The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to:
|
||||
|
||||
* Parse semantic versions
|
||||
* Sort semantic versions
|
||||
* Check if a semantic version fits within a set of constraints
|
||||
* Optionally work with a `v` prefix
|
||||
|
||||
[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.png)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](http://goreportcard.com/badge/Masterminds/semver)](http://goreportcard.com/report/Masterminds/semver)
|
||||
|
||||
## Parsing Semantic Versions
|
||||
|
||||
To parse a semantic version use the `NewVersion` function. For example,
|
||||
|
||||
v, err := semver.NewVersion("1.2.3-beta.1+build345")
|
||||
|
||||
If there is an error the version wasn't parseable. The version object has methods
|
||||
to get the parts of the version, compare it to other versions, convert the
|
||||
version back into a string, and get the original string. For more details
|
||||
please see the [documentation](https://godoc.org/github.com/Masterminds/semver).
|
||||
|
||||
## Sorting Semantic Versions
|
||||
|
||||
A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/)
|
||||
package from the standard library. For example,
|
||||
|
||||
raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
|
||||
vs := make([]*semver.Version, len(raw))
|
||||
for i, r := range raw {
|
||||
v, err := semver.NewVersion(r)
|
||||
if err != nil {
|
||||
t.Errorf("Error parsing version: %s", err)
|
||||
}
|
||||
|
||||
vs[i] = v
|
||||
}
|
||||
|
||||
sort.Sort(semver.Collection(vs))
|
||||
|
||||
## Checking Version Constraints
|
||||
|
||||
Checking a version against version constraints is one of the most featureful
|
||||
parts of the package.
|
||||
|
||||
c, err := semver.NewConstraint(">= 1.2.3")
|
||||
if err != nil {
|
||||
// Handle constraint not being parseable.
|
||||
}
|
||||
|
||||
v, _ := semver.NewVersion("1.3")
|
||||
if err != nil {
|
||||
// Handle version not being parseable.
|
||||
}
|
||||
// Check if the version meets the constraints. The a variable will be true.
|
||||
a := c.Check(v)
|
||||
|
||||
## Basic Comparisons
|
||||
|
||||
There are two elements to the comparisons. First, a comparison string is a list
|
||||
of comma separated and comparisons. These are then separated by || separated or
|
||||
comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a
|
||||
comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
|
||||
greater than or equal to 4.2.3.
|
||||
|
||||
The basic comparisons are:
|
||||
|
||||
* `=`: equal (aliased to no operator)
|
||||
* `!=`: not equal
|
||||
* `>`: greater than
|
||||
* `<`: less than
|
||||
* `>=`: greater than or equal to
|
||||
* `<=`: less than or equal to
|
||||
|
||||
## Hyphen Range Comparisons
|
||||
|
||||
There are multiple methods to handle ranges and the first is hyphens ranges.
|
||||
These look like:
|
||||
|
||||
* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5`
|
||||
* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5`
|
||||
|
||||
## Wildcards In Comparisons
|
||||
|
||||
The `x`, `X`, and `*` characters can be used as a wildcard character. This works
|
||||
for all comparison operators. When used on the `=` operator it falls
|
||||
back to the pack level comparison (see tilde below). For example,
|
||||
|
||||
* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
|
||||
* `>= 1.2.x` is equivalent to `>= 1.2.0`
|
||||
* `<= 2.x` is equivalent to `<= 3`
|
||||
* `*` is equivalent to `>= 0.0.0`
|
||||
|
||||
## Tilde Range Comparisons (Patch)
|
||||
|
||||
The tilde (`~`) comparison operator is for patch level ranges when a minor
|
||||
version is specified and major level changes when the minor number is missing.
|
||||
For example,
|
||||
|
||||
* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0`
|
||||
* `~1` is equivalent to `>= 1, < 2`
|
||||
* `~2.3` is equivalent to `>= 2.3, < 2.4`
|
||||
* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
|
||||
* `~1.x` is equivalent to `>= 1, < 2`
|
||||
|
||||
## Caret Range Comparisons (Major)
|
||||
|
||||
The caret (`^`) comparison operator is for major level changes. This is useful
|
||||
when comparisons of API versions as a major change is API breaking. For example,
|
||||
|
||||
* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
|
||||
* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
|
||||
* `^2.3` is equivalent to `>= 2.3, < 3`
|
||||
* `^2.x` is equivalent to `>= 2.0.0, < 3`
|
||||
|
||||
# Validation
|
||||
|
||||
In addition to testing a version against a constraint, a version can be validated
|
||||
against a constraint. When validation fails a slice of errors containing why a
|
||||
version didn't meet the constraint is returned. For example,
|
||||
|
||||
c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
|
||||
if err != nil {
|
||||
// Handle constraint not being parseable.
|
||||
}
|
||||
|
||||
v, _ := semver.NewVersion("1.3")
|
||||
if err != nil {
|
||||
// Handle version not being parseable.
|
||||
}
|
||||
|
||||
// Validate a version against a constraint.
|
||||
a, msgs := c.Validate(v)
|
||||
// a is false
|
||||
for _, m := range msgs {
|
||||
fmt.Println(m)
|
||||
|
||||
// Loops over the errors which would read
|
||||
// "1.3 is greater than 1.2.3"
|
||||
// "1.3 is less than 1.4"
|
||||
}
|
||||
|
||||
# Contribute
|
||||
|
||||
If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues)
|
||||
or [create a pull request](https://github.com/Masterminds/semver/pulls).
|
|
@ -1,44 +0,0 @@
|
|||
version: build-{build}.{branch}
|
||||
|
||||
clone_folder: C:\gopath\src\github.com\Masterminds\semver
|
||||
shallow_clone: true
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
|
||||
platform:
|
||||
- x64
|
||||
|
||||
install:
|
||||
- go version
|
||||
- go env
|
||||
- go get -u gopkg.in/alecthomas/gometalinter.v1
|
||||
- set PATH=%PATH%;%GOPATH%\bin
|
||||
- gometalinter.v1.exe --install
|
||||
|
||||
build_script:
|
||||
- go install -v ./...
|
||||
|
||||
test_script:
|
||||
- "gometalinter.v1 \
|
||||
--disable-all \
|
||||
--enable deadcode \
|
||||
--severity deadcode:error \
|
||||
--enable gofmt \
|
||||
--enable gosimple \
|
||||
--enable ineffassign \
|
||||
--enable misspell \
|
||||
--enable vet \
|
||||
--tests \
|
||||
--vendor \
|
||||
--deadline 60s \
|
||||
./... || cmd /C EXIT 0"
|
||||
- "gometalinter.v1 \
|
||||
--disable-all \
|
||||
--enable golint \
|
||||
--vendor \
|
||||
--deadline 60s \
|
||||
./... || cmd /C EXIT 0"
|
||||
- go test -v
|
||||
|
||||
deploy: off
|
|
@ -1,259 +0,0 @@
|
|||
package semver
|
||||
|
||||
import "testing"
|
||||
|
||||
func init() {
|
||||
// disable constraint and version creation caching
|
||||
CacheConstraints = false
|
||||
CacheVersions = false
|
||||
}
|
||||
|
||||
var (
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 5, 0),
|
||||
max: newV(2, 0, 0),
|
||||
includeMax: true,
|
||||
}
|
||||
rc2 = rangeConstraint{
|
||||
min: newV(2, 0, 0),
|
||||
max: newV(3, 0, 0),
|
||||
}
|
||||
rc3 = rangeConstraint{
|
||||
min: newV(1, 5, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
rc4 = rangeConstraint{
|
||||
min: newV(1, 7, 0),
|
||||
max: newV(4, 0, 0),
|
||||
}
|
||||
rc5 = rangeConstraint{
|
||||
min: newV(2, 7, 0),
|
||||
max: newV(3, 0, 0),
|
||||
}
|
||||
rc6 = rangeConstraint{
|
||||
min: newV(3, 0, 1),
|
||||
max: newV(3, 0, 4),
|
||||
}
|
||||
rc7 = rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(1, 2, 0),
|
||||
}
|
||||
// Two fully non-overlapping unions
|
||||
u1 = rc1.Union(rc7)
|
||||
u2 = rc5.Union(rc6)
|
||||
)
|
||||
|
||||
/* Constraint creation benchmarks */
|
||||
|
||||
func benchNewConstraint(c string, b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
NewConstraint(c)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkNewConstraintUnary(b *testing.B) {
|
||||
benchNewConstraint("=2.0", b)
|
||||
}
|
||||
|
||||
func BenchmarkNewConstraintTilde(b *testing.B) {
|
||||
benchNewConstraint("~2.0.0", b)
|
||||
}
|
||||
|
||||
func BenchmarkNewConstraintCaret(b *testing.B) {
|
||||
benchNewConstraint("^2.0.0", b)
|
||||
}
|
||||
|
||||
func BenchmarkNewConstraintWildcard(b *testing.B) {
|
||||
benchNewConstraint("1.x", b)
|
||||
}
|
||||
|
||||
func BenchmarkNewConstraintRange(b *testing.B) {
|
||||
benchNewConstraint(">=2.1.x, <3.1.0", b)
|
||||
}
|
||||
|
||||
func BenchmarkNewConstraintUnion(b *testing.B) {
|
||||
benchNewConstraint("~2.0.0 || =3.1.0", b)
|
||||
}
|
||||
|
||||
/* Validate benchmarks, including fails */
|
||||
|
||||
func benchValidateVersion(c, v string, b *testing.B) {
|
||||
version, _ := NewVersion(v)
|
||||
constraint, _ := NewConstraint(c)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
constraint.Matches(version)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkValidateVersionUnary(b *testing.B) {
|
||||
benchValidateVersion("=2.0", "2.0.0", b)
|
||||
}
|
||||
|
||||
func BenchmarkValidateVersionUnaryFail(b *testing.B) {
|
||||
benchValidateVersion("=2.0", "2.0.1", b)
|
||||
}
|
||||
|
||||
func BenchmarkValidateVersionTilde(b *testing.B) {
|
||||
benchValidateVersion("~2.0.0", "2.0.5", b)
|
||||
}
|
||||
|
||||
func BenchmarkValidateVersionTildeFail(b *testing.B) {
|
||||
benchValidateVersion("~2.0.0", "1.0.5", b)
|
||||
}
|
||||
|
||||
func BenchmarkValidateVersionCaret(b *testing.B) {
|
||||
benchValidateVersion("^2.0.0", "2.1.0", b)
|
||||
}
|
||||
|
||||
func BenchmarkValidateVersionCaretFail(b *testing.B) {
|
||||
benchValidateVersion("^2.0.0", "4.1.0", b)
|
||||
}
|
||||
|
||||
func BenchmarkValidateVersionWildcard(b *testing.B) {
|
||||
benchValidateVersion("1.x", "1.4.0", b)
|
||||
}
|
||||
|
||||
func BenchmarkValidateVersionWildcardFail(b *testing.B) {
|
||||
benchValidateVersion("1.x", "2.4.0", b)
|
||||
}
|
||||
|
||||
func BenchmarkValidateVersionRange(b *testing.B) {
|
||||
benchValidateVersion(">=2.1.x, <3.1.0", "2.4.5", b)
|
||||
}
|
||||
|
||||
func BenchmarkValidateVersionRangeFail(b *testing.B) {
|
||||
benchValidateVersion(">=2.1.x, <3.1.0", "1.4.5", b)
|
||||
}
|
||||
|
||||
func BenchmarkValidateVersionUnion(b *testing.B) {
|
||||
benchValidateVersion("~2.0.0 || =3.1.0", "3.1.0", b)
|
||||
}
|
||||
|
||||
func BenchmarkValidateVersionUnionFail(b *testing.B) {
|
||||
benchValidateVersion("~2.0.0 || =3.1.0", "3.1.1", b)
|
||||
}
|
||||
|
||||
/* Version creation benchmarks */
|
||||
|
||||
func benchNewVersion(v string, b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
NewVersion(v)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkNewVersionSimple(b *testing.B) {
|
||||
benchNewVersion("1.0.0", b)
|
||||
}
|
||||
|
||||
func BenchmarkNewVersionPre(b *testing.B) {
|
||||
benchNewVersion("1.0.0-alpha", b)
|
||||
}
|
||||
|
||||
func BenchmarkNewVersionMeta(b *testing.B) {
|
||||
benchNewVersion("1.0.0+metadata", b)
|
||||
}
|
||||
|
||||
func BenchmarkNewVersionMetaDash(b *testing.B) {
|
||||
benchNewVersion("1.0.0+metadata-dash", b)
|
||||
}
|
||||
|
||||
/* Union benchmarks */
|
||||
|
||||
func BenchmarkAdjacentRangeUnion(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
Union(rc1, rc2)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAdjacentRangeUnionMethod(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
rc1.Union(rc2)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDisjointRangeUnion(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
Union(rc2, rc3)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDisjointRangeUnionMethod(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
rc2.Union(rc3)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkOverlappingRangeUnion(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
Union(rc1, rc4)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkOverlappingRangeUnionMethod(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
rc1.Union(rc4)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnionUnion(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
Union(u1, u2)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnionUnionMethod(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
u1.Union(u2)
|
||||
}
|
||||
}
|
||||
|
||||
/* Intersection benchmarks */
|
||||
|
||||
func BenchmarkSubsetRangeIntersection(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
Intersection(rc2, rc4)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSubsetRangeIntersectionMethod(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
rc2.Intersect(rc4)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDisjointRangeIntersection(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
Intersection(rc2, rc3)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDisjointRangeIntersectionMethod(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
rc2.Intersect(rc3)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkOverlappingRangeIntersection(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
Intersection(rc1, rc4)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkOverlappingRangeIntersectionMethod(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
rc1.Intersect(rc4)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnionIntersection(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
Intersection(u1, u2)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnionIntersectionMethod(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
u1.Intersect(u2)
|
||||
}
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
package semver
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCollection(t *testing.T) {
|
||||
raw := []string{
|
||||
"1.2.3",
|
||||
"1.0",
|
||||
"1.3",
|
||||
"2",
|
||||
"0.4.2",
|
||||
}
|
||||
|
||||
vs := make([]Version, len(raw))
|
||||
for i, r := range raw {
|
||||
v, err := NewVersion(r)
|
||||
if err != nil {
|
||||
t.Errorf("Error parsing version: %s", err)
|
||||
}
|
||||
|
||||
vs[i] = v
|
||||
}
|
||||
|
||||
sort.Sort(Collection(vs))
|
||||
|
||||
e := []string{
|
||||
"0.4.2",
|
||||
"1.0.0",
|
||||
"1.2.3",
|
||||
"1.3.0",
|
||||
"2.0.0",
|
||||
}
|
||||
|
||||
a := make([]string, len(vs))
|
||||
for i, v := range vs {
|
||||
a[i] = v.String()
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(a, e) {
|
||||
t.Error("Sorting Collection failed")
|
||||
}
|
||||
}
|
|
@ -1,712 +0,0 @@
|
|||
package semver
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestParseConstraint(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string
|
||||
c Constraint
|
||||
err bool
|
||||
}{
|
||||
{"*", Any(), false},
|
||||
{">= 1.2", rangeConstraint{
|
||||
min: newV(1, 2, 0),
|
||||
max: Version{special: infiniteVersion},
|
||||
includeMin: true,
|
||||
}, false},
|
||||
{"1.0", newV(1, 0, 0), false},
|
||||
{"foo", nil, true},
|
||||
{"<= 1.2", rangeConstraint{
|
||||
min: Version{special: zeroVersion},
|
||||
max: newV(1, 2, 0),
|
||||
includeMax: true,
|
||||
}, false},
|
||||
{"=< 1.2", rangeConstraint{
|
||||
min: Version{special: zeroVersion},
|
||||
max: newV(1, 2, 0),
|
||||
includeMax: true,
|
||||
}, false},
|
||||
{"=> 1.2", rangeConstraint{
|
||||
min: newV(1, 2, 0),
|
||||
max: Version{special: infiniteVersion},
|
||||
includeMin: true,
|
||||
}, false},
|
||||
{"v1.2", newV(1, 2, 0), false},
|
||||
{"=1.5", newV(1, 5, 0), false},
|
||||
{"> 1.3", rangeConstraint{
|
||||
min: newV(1, 3, 0),
|
||||
max: Version{special: infiniteVersion},
|
||||
}, false},
|
||||
{"< 1.4.1", rangeConstraint{
|
||||
min: Version{special: zeroVersion},
|
||||
max: newV(1, 4, 1),
|
||||
}, false},
|
||||
{"~1.1.0", rangeConstraint{
|
||||
min: newV(1, 1, 0),
|
||||
max: newV(1, 2, 0),
|
||||
includeMin: true,
|
||||
includeMax: false,
|
||||
}, false},
|
||||
{"^1.1.0", rangeConstraint{
|
||||
min: newV(1, 1, 0),
|
||||
max: newV(2, 0, 0),
|
||||
includeMin: true,
|
||||
includeMax: false,
|
||||
}, false},
|
||||
{"^1.1.0-12-abc123", rangeConstraint{
|
||||
min: Version{major: 1, minor: 1, patch: 0, pre: "12-abc123"},
|
||||
max: newV(2, 0, 0),
|
||||
includeMin: true,
|
||||
includeMax: false,
|
||||
}, false},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
c, err := parseConstraint(tc.in, false)
|
||||
if tc.err && err == nil {
|
||||
t.Errorf("Expected error for %s didn't occur", tc.in)
|
||||
} else if !tc.err && err != nil {
|
||||
t.Errorf("Unexpected error %q for %s", err, tc.in)
|
||||
}
|
||||
|
||||
// If an error was expected continue the loop and don't try the other
|
||||
// tests as they will cause errors.
|
||||
if tc.err {
|
||||
continue
|
||||
}
|
||||
|
||||
if !constraintEq(tc.c, c) {
|
||||
t.Errorf("%q produced constraint %q, but expected %q", tc.in, c, tc.c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func constraintEq(c1, c2 Constraint) bool {
|
||||
switch tc1 := c1.(type) {
|
||||
case any:
|
||||
if _, ok := c2.(any); !ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
case none:
|
||||
if _, ok := c2.(none); !ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
case Version:
|
||||
if tc2, ok := c2.(Version); ok {
|
||||
return tc1.Equal(tc2)
|
||||
}
|
||||
return false
|
||||
case rangeConstraint:
|
||||
if tc2, ok := c2.(rangeConstraint); ok {
|
||||
if len(tc1.excl) != len(tc2.excl) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !tc1.minIsZero() {
|
||||
if !(tc1.includeMin == tc2.includeMin && tc1.min.Equal(tc2.min)) {
|
||||
return false
|
||||
}
|
||||
} else if !tc2.minIsZero() {
|
||||
return false
|
||||
}
|
||||
|
||||
if !tc1.maxIsInf() {
|
||||
if !(tc1.includeMax == tc2.includeMax && tc1.max.Equal(tc2.max)) {
|
||||
return false
|
||||
}
|
||||
} else if !tc2.maxIsInf() {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, e := range tc1.excl {
|
||||
if !e.Equal(tc2.excl[k]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
case unionConstraint:
|
||||
if tc2, ok := c2.(unionConstraint); ok {
|
||||
if len(tc1) != len(tc2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, c := range tc1 {
|
||||
if !constraintEq(c, tc2[k]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
panic("unknown type")
|
||||
}
|
||||
|
||||
// newV is a helper to create a new Version object.
|
||||
func newV(major, minor, patch uint64) Version {
|
||||
return Version{
|
||||
major: major,
|
||||
minor: minor,
|
||||
patch: patch,
|
||||
}
|
||||
}
|
||||
|
||||
func TestConstraintCheck(t *testing.T) {
|
||||
tests := []struct {
|
||||
constraint string
|
||||
version string
|
||||
check bool
|
||||
}{
|
||||
{"= 2.0", "1.2.3", false},
|
||||
{"= 2.0", "2.0.0", true},
|
||||
{"4.1", "4.1.0", true},
|
||||
{"!=4.1", "4.1.0", false},
|
||||
{"!=4.1", "5.1.0", true},
|
||||
{">1.1", "4.1.0", true},
|
||||
{">1.1", "1.1.0", false},
|
||||
{"<1.1", "0.1.0", true},
|
||||
{"<1.1", "1.1.0", false},
|
||||
{"<1.1", "1.1.1", false},
|
||||
{">=1.1", "4.1.0", true},
|
||||
{">=1.1", "1.1.0", true},
|
||||
{">=1.1", "0.0.9", false},
|
||||
{"<=1.1", "0.1.0", true},
|
||||
{"<=1.1", "1.1.0", true},
|
||||
{"<=1.1", "1.1.1", false},
|
||||
{"<=1.1-alpha1", "1.1", false},
|
||||
{"<=2.x", "3.0.0", false},
|
||||
{"<=2.x", "2.9.9", true},
|
||||
{"<2.x", "2.0.0", false},
|
||||
{"<2.x", "1.9.9", true},
|
||||
{">=2.x", "3.0.0", true},
|
||||
{">=2.x", "2.9.9", true},
|
||||
{">=2.x", "1.9.9", false},
|
||||
{">2.x", "3.0.0", true},
|
||||
{">2.x", "2.9.9", false},
|
||||
{">2.x", "1.9.9", false},
|
||||
{"<=2.x-alpha2", "3.0.0-alpha3", false},
|
||||
{"<=2.0.0", "2.0.0-alpha1", false},
|
||||
{">2.x-beta1", "3.0.0-alpha2", false},
|
||||
{"^2.0.0", "3.0.0-alpha2", false},
|
||||
{"^2.0.0", "2.0.0-alpha1", false},
|
||||
{"^2.1.0-alpha1", "2.1.0-alpha2", true}, // allow prerelease match within same major/minor/patch
|
||||
{"^2.1.0-alpha1", "2.1.1-alpha2", false}, // but ONLY within same major/minor/patch
|
||||
{"^2.1.0-alpha3", "2.1.0-alpha2", false}, // still respect prerelease ordering
|
||||
{"^2.0.0", "2.0.0-alpha2", false}, // and only if the min has a prerelease
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
if testing.Verbose() {
|
||||
t.Logf("Testing if %q allows %q", tc.constraint, tc.version)
|
||||
}
|
||||
c, err := parseConstraint(tc.constraint, false)
|
||||
if err != nil {
|
||||
t.Errorf("err: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
v, err := NewVersion(tc.version)
|
||||
if err != nil {
|
||||
t.Errorf("err: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
a := c.Matches(v) == nil
|
||||
if a != tc.check {
|
||||
if tc.check {
|
||||
t.Errorf("%q should have matched %q", tc.constraint, tc.version)
|
||||
} else {
|
||||
t.Errorf("%q should not have matched %q", tc.constraint, tc.version)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewConstraint(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
c Constraint
|
||||
err bool
|
||||
}{
|
||||
{">= 1.1", rangeConstraint{
|
||||
min: newV(1, 1, 0),
|
||||
max: Version{special: infiniteVersion},
|
||||
includeMin: true,
|
||||
}, false},
|
||||
{"2.0", newV(2, 0, 0), false},
|
||||
{">= bar", nil, true},
|
||||
{"^1.1.0", rangeConstraint{
|
||||
min: newV(1, 1, 0),
|
||||
max: newV(2, 0, 0),
|
||||
includeMin: true,
|
||||
}, false},
|
||||
{">= 1.2.3, < 2.0 || => 3.0, < 4", unionConstraint{
|
||||
rangeConstraint{
|
||||
min: newV(1, 2, 3),
|
||||
max: newV(2, 0, 0),
|
||||
includeMin: true,
|
||||
},
|
||||
rangeConstraint{
|
||||
min: newV(3, 0, 0),
|
||||
max: newV(4, 0, 0),
|
||||
includeMin: true,
|
||||
},
|
||||
}, false},
|
||||
{"3-4 || => 1.0, < 2", Union(
|
||||
rangeConstraint{
|
||||
min: newV(3, 0, 0),
|
||||
max: newV(4, 0, 0),
|
||||
includeMin: true,
|
||||
includeMax: true,
|
||||
},
|
||||
rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(2, 0, 0),
|
||||
includeMin: true,
|
||||
},
|
||||
), false},
|
||||
// demonstrates union compression
|
||||
{"3-4 || => 3.0, < 4", rangeConstraint{
|
||||
min: newV(3, 0, 0),
|
||||
max: newV(4, 0, 0),
|
||||
includeMin: true,
|
||||
includeMax: true,
|
||||
}, false},
|
||||
{">=1.1.0, <2.0.0", rangeConstraint{
|
||||
min: newV(1, 1, 0),
|
||||
max: newV(2, 0, 0),
|
||||
includeMin: true,
|
||||
includeMax: false,
|
||||
}, false},
|
||||
{"!=1.4.0", rangeConstraint{
|
||||
min: Version{special: zeroVersion},
|
||||
max: Version{special: infiniteVersion},
|
||||
excl: []Version{
|
||||
newV(1, 4, 0),
|
||||
},
|
||||
}, false},
|
||||
{">=1.1.0, !=1.4.0", rangeConstraint{
|
||||
min: newV(1, 1, 0),
|
||||
max: Version{special: infiniteVersion},
|
||||
includeMin: true,
|
||||
excl: []Version{
|
||||
newV(1, 4, 0),
|
||||
},
|
||||
}, false},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
c, err := NewConstraint(tc.input)
|
||||
if tc.err && err == nil {
|
||||
t.Errorf("expected but did not get error for: %s", tc.input)
|
||||
continue
|
||||
} else if !tc.err && err != nil {
|
||||
t.Errorf("unexpectederror for input %s: %s", tc.input, err)
|
||||
continue
|
||||
}
|
||||
if tc.err {
|
||||
continue
|
||||
}
|
||||
|
||||
if !constraintEq(tc.c, c) {
|
||||
t.Errorf("%q produced constraint %q, but expected %q", tc.input, c, tc.c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewConstraintIC(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
c Constraint
|
||||
err bool
|
||||
}{
|
||||
{"=2.0", newV(2, 0, 0), false},
|
||||
{"= 2.0", newV(2, 0, 0), false},
|
||||
{"1.1.0", rangeConstraint{
|
||||
min: newV(1, 1, 0),
|
||||
max: newV(2, 0, 0),
|
||||
includeMin: true,
|
||||
}, false},
|
||||
{"1.1", rangeConstraint{
|
||||
min: newV(1, 1, 0),
|
||||
max: newV(2, 0, 0),
|
||||
includeMin: true,
|
||||
}, false},
|
||||
{"v1.1.0-12-abc123", rangeConstraint{
|
||||
min: Version{major: 1, minor: 1, patch: 0, pre: "12-abc123"},
|
||||
max: newV(2, 0, 0),
|
||||
includeMin: true,
|
||||
includeMax: false,
|
||||
}, false},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
c, err := NewConstraintIC(tc.input)
|
||||
if tc.err && err == nil {
|
||||
t.Errorf("expected but did not get error for: %s", tc.input)
|
||||
continue
|
||||
} else if !tc.err && err != nil {
|
||||
t.Errorf("unexpectederror for input %s: %s", tc.input, err)
|
||||
continue
|
||||
}
|
||||
if tc.err {
|
||||
continue
|
||||
}
|
||||
|
||||
if !constraintEq(tc.c, c) {
|
||||
t.Errorf("%q produced constraint %q, but expected %q", tc.input, c, tc.c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConstraintsCheck(t *testing.T) {
|
||||
tests := []struct {
|
||||
constraint string
|
||||
version string
|
||||
check bool
|
||||
}{
|
||||
{"*", "1.2.3", true},
|
||||
{"~0.0.0", "1.2.3", false},
|
||||
{"0.x.x", "1.2.3", false},
|
||||
{"0.0.x", "1.2.3", false},
|
||||
{"~0.0.0", "0.1.9", false},
|
||||
{"~0.0.0", "0.0.9", true},
|
||||
{"^0.0.0", "0.0.9", true},
|
||||
{"^0.0.0", "0.1.9", false}, // caret behaves like tilde below 1.0.0
|
||||
{"= 2.0", "1.2.3", false},
|
||||
{"= 2.0", "2.0.0", true},
|
||||
{"4.1", "4.1.0", true},
|
||||
{"4.1.x", "4.1.3", true},
|
||||
{"1.x", "1.4", true},
|
||||
{"!=4.1", "4.1.0", false},
|
||||
{"!=4.1", "5.1.0", true},
|
||||
{"!=4.x", "5.1.0", true},
|
||||
{"!=4.x", "4.1.0", false},
|
||||
{"!=4.1.x", "4.2.0", true},
|
||||
{"!=4.2.x", "4.2.3", false},
|
||||
{">1.1", "4.1.0", true},
|
||||
{">1.1", "1.1.0", false},
|
||||
{"<1.1", "0.1.0", true},
|
||||
{"<1.1", "1.1.0", false},
|
||||
{"<1.1", "1.1.1", false},
|
||||
{"<1.x", "1.1.1", false},
|
||||
{"<1.x", "0.9.1", true},
|
||||
{"<1.x", "2.1.1", false},
|
||||
{"<1.1.x", "1.2.1", false},
|
||||
{"<1.1.x", "1.1.500", false},
|
||||
{"<1.1.x", "1.0.500", true},
|
||||
{"<1.2.x", "1.1.1", true},
|
||||
{">=1.1", "4.1.0", true},
|
||||
{">=1.1", "1.1.0", true},
|
||||
{">=1.1", "0.0.9", false},
|
||||
{"<=1.1", "0.1.0", true},
|
||||
{"<=1.1", "1.1.0", true},
|
||||
{"<=1.x", "1.1.0", true},
|
||||
{"<=2.x", "3.1.0", false},
|
||||
{"<=1.1", "1.1.1", false},
|
||||
{"<=1.1.x", "1.2.500", false},
|
||||
{">1.1, <2", "1.1.1", true},
|
||||
{">1.1, <3", "4.3.2", false},
|
||||
{">=1.1, <2, !=1.2.3", "1.2.3", false},
|
||||
{">=1.1, <2, !=1.2.3 || > 3", "3.1.2", true},
|
||||
{">=1.1, <2, !=1.2.3 || >= 3", "3.0.0", true},
|
||||
{">=1.1, <2, !=1.2.3 || > 3", "3.0.0", false},
|
||||
{">=1.1, <2, !=1.2.3 || > 3", "1.2.3", false},
|
||||
{"1.1 - 2", "1.1.1", true},
|
||||
{"1.1-3", "4.3.2", false},
|
||||
{"^1.1", "1.1.1", true},
|
||||
{"^1.1", "4.3.2", false},
|
||||
{"^1.x", "1.1.1", true},
|
||||
{"^2.x", "1.1.1", false},
|
||||
{"^1.x", "2.1.1", false},
|
||||
{"~*", "2.1.1", true},
|
||||
{"~1.x", "2.1.1", false},
|
||||
{"~1.x", "1.3.5", true},
|
||||
{"~1.x", "1.4", true},
|
||||
{"~1.1", "1.1.1", true},
|
||||
{"~1.2.3", "1.2.5", true},
|
||||
{"~1.2.3", "1.2.2", false},
|
||||
{"~1.2.3", "1.3.2", false},
|
||||
{"~1.1", "1.2.3", false},
|
||||
{"~1.3", "2.4.5", false},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
c, err := NewConstraint(tc.constraint)
|
||||
if err != nil {
|
||||
t.Errorf("err: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
v, err := NewVersion(tc.version)
|
||||
if err != nil {
|
||||
t.Errorf("err: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
a := c.Matches(v) == nil
|
||||
if a != tc.check {
|
||||
if a {
|
||||
t.Errorf("Input %q produced constraint %q; should not have admitted %q, but did", tc.constraint, c, tc.version)
|
||||
} else {
|
||||
t.Errorf("Input %q produced constraint %q; should have admitted %q, but did not", tc.constraint, c, tc.version)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBidirectionalSerialization(t *testing.T) {
|
||||
tests := []struct {
|
||||
io string
|
||||
eq bool
|
||||
}{
|
||||
{"*", true}, // any
|
||||
{"~0.0.0", false}, // tildes expand into ranges
|
||||
{"=2.0", false}, // abbreviated versions print as full
|
||||
{"4.1.x", false}, // wildcards expand into ranges
|
||||
{">= 1.1.0", false}, // does not produce spaces on ranges
|
||||
{"4.1.0", true},
|
||||
{"!=4.1.0", true},
|
||||
{">=1.1.0", true},
|
||||
{">1.0.0, <=1.1.0", true},
|
||||
{"<=1.1.0", true},
|
||||
{">=1.1.7, <1.3.0", true}, // tilde width
|
||||
{">=1.1.0, <=2.0.0", true}, // no unary op on lte max
|
||||
{">1.1.3, <2.0.0", true}, // no unary op on gt min
|
||||
{">1.1.0, <=2.0.0", true}, // no unary op on gt min and lte max
|
||||
{">=1.1.0, <=1.2.0", true}, // no unary op on lte max
|
||||
{">1.1.1, <1.2.0", true}, // no unary op on gt min
|
||||
{">1.1.7, <=2.0.0", true}, // no unary op on gt min and lte max
|
||||
{">1.1.7, <=2.0.0", true}, // no unary op on gt min and lte max
|
||||
{">=0.1.7, <1.0.0", true}, // caret shifting below 1.0.0
|
||||
{">=0.1.7, <0.3.0", true}, // caret shifting width below 1.0.0
|
||||
}
|
||||
|
||||
for _, fix := range tests {
|
||||
c, err := NewConstraint(fix.io)
|
||||
if err != nil {
|
||||
t.Errorf("Valid constraint string produced unexpected error: %s", err)
|
||||
}
|
||||
|
||||
eq := fix.io == c.String()
|
||||
if eq != fix.eq {
|
||||
if eq {
|
||||
t.Errorf("Constraint %q should not have reproduced input string %q, but did", c, fix.io)
|
||||
} else {
|
||||
t.Errorf("Constraint should have reproduced input string %q, but instead produced %q", fix.io, c)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBidirectionalSerializationIC(t *testing.T) {
|
||||
tests := []struct {
|
||||
io string
|
||||
eq bool
|
||||
}{
|
||||
{"*", true}, // any
|
||||
{"=2.0.0", true}, // versions retain leading =
|
||||
{"2.0.0", true}, // (no) caret in, (no) caret out
|
||||
}
|
||||
|
||||
for _, fix := range tests {
|
||||
c, err := NewConstraintIC(fix.io)
|
||||
if err != nil {
|
||||
t.Errorf("Valid constraint string produced unexpected error: %s", err)
|
||||
}
|
||||
|
||||
eq := fix.io == c.ImpliedCaretString()
|
||||
if eq != fix.eq {
|
||||
if eq {
|
||||
t.Errorf("Constraint %q should not have reproduced input string %q, but did", c, fix.io)
|
||||
} else {
|
||||
t.Errorf("Constraint should have reproduced input string %q, but instead produced %q", fix.io, c)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreferUnaryOpForm(t *testing.T) {
|
||||
tests := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{">=0.1.7, <0.2.0", "^0.1.7"}, // caret shifting below 1.0.0
|
||||
{">=1.1.0, <2.0.0", "^1.1.0"},
|
||||
{">=1.1.0, <2.0.0, !=1.2.3", "^1.1.0, !=1.2.3"},
|
||||
}
|
||||
|
||||
for _, fix := range tests {
|
||||
c, err := NewConstraint(fix.in)
|
||||
if err != nil {
|
||||
t.Errorf("Valid constraint string produced unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if fix.out != c.String() {
|
||||
t.Errorf("Constraint %q was not transformed into expected output string %q", fix.in, fix.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRewriteRange(t *testing.T) {
|
||||
tests := []struct {
|
||||
c string
|
||||
nc string
|
||||
}{
|
||||
{"2-3", ">= 2, <= 3"},
|
||||
{"2-3, 2-3", ">= 2, <= 3,>= 2, <= 3"},
|
||||
{"2-3, 4.0.0-5.1", ">= 2, <= 3,>= 4.0.0, <= 5.1"},
|
||||
{"v2-3, 2-3", "v2-3,>= 2, <= 3"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
o := rewriteRange(tc.c)
|
||||
|
||||
if o != tc.nc {
|
||||
t.Errorf("Range %s rewritten incorrectly as '%s'", tc.c, o)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsX(t *testing.T) {
|
||||
tests := []struct {
|
||||
t string
|
||||
c bool
|
||||
}{
|
||||
{"A", false},
|
||||
{"%", false},
|
||||
{"X", true},
|
||||
{"x", true},
|
||||
{"*", true},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
a := isX(tc.t)
|
||||
if a != tc.c {
|
||||
t.Errorf("Function isX error on %s", tc.t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnionErr(t *testing.T) {
|
||||
u1 := Union(
|
||||
rangeConstraint{
|
||||
min: newV(3, 0, 0),
|
||||
max: newV(4, 0, 0),
|
||||
includeMin: true,
|
||||
includeMax: true,
|
||||
},
|
||||
rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(2, 0, 0),
|
||||
includeMin: true,
|
||||
},
|
||||
)
|
||||
fail := u1.Matches(newV(2, 5, 0))
|
||||
failstr := `2.5.0 is greater than or equal to the maximum of ^1.0.0
|
||||
2.5.0 is less than the minimum of >=3.0.0, <=4.0.0`
|
||||
if fail.Error() != failstr {
|
||||
t.Errorf("Did not get expected failure message from union, got %q", fail)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSuperset(t *testing.T) {
|
||||
rc := []rangeConstraint{
|
||||
{
|
||||
min: newV(1, 2, 0),
|
||||
max: newV(2, 0, 0),
|
||||
includeMin: true,
|
||||
},
|
||||
{
|
||||
min: newV(1, 2, 0),
|
||||
max: newV(2, 1, 0),
|
||||
},
|
||||
{
|
||||
min: Version{special: zeroVersion},
|
||||
max: newV(1, 10, 0),
|
||||
},
|
||||
{
|
||||
min: newV(2, 0, 0),
|
||||
max: Version{special: infiniteVersion},
|
||||
},
|
||||
{
|
||||
min: newV(1, 2, 0),
|
||||
max: newV(2, 0, 0),
|
||||
includeMax: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range rc {
|
||||
|
||||
// Superset comparison is not strict, so a range should always be a superset
|
||||
// of itself.
|
||||
if !c.isSupersetOf(c) {
|
||||
t.Errorf("Ranges should be supersets of themselves; %s indicated it was not", c)
|
||||
}
|
||||
}
|
||||
|
||||
pairs := []struct{ l, r rangeConstraint }{
|
||||
{
|
||||
// ensures lte is handled correctly (min side)
|
||||
l: rc[0],
|
||||
r: rc[1],
|
||||
},
|
||||
{
|
||||
// ensures nil on min side works well
|
||||
l: rc[0],
|
||||
r: rc[2],
|
||||
},
|
||||
{
|
||||
// ensures nil on max side works well
|
||||
l: rc[0],
|
||||
r: rc[3],
|
||||
},
|
||||
{
|
||||
// ensures nils on both sides work well
|
||||
l: rc[2],
|
||||
r: rc[3],
|
||||
},
|
||||
{
|
||||
// ensures gte is handled correctly (max side)
|
||||
l: rc[2],
|
||||
r: rc[4],
|
||||
},
|
||||
}
|
||||
|
||||
for _, p := range pairs {
|
||||
if p.l.isSupersetOf(p.r) {
|
||||
t.Errorf("%s is not a superset of %s", p.l, p.r)
|
||||
}
|
||||
if p.r.isSupersetOf(p.l) {
|
||||
t.Errorf("%s is not a superset of %s", p.r, p.l)
|
||||
}
|
||||
}
|
||||
|
||||
rc[1].max.minor = 0
|
||||
|
||||
if !rc[0].isSupersetOf(rc[1]) {
|
||||
t.Errorf("%s is a superset of %s", rc[0], rc[1])
|
||||
}
|
||||
rc[1].includeMax = true
|
||||
if rc[1].isSupersetOf(rc[0]) {
|
||||
t.Errorf("%s is not a superset of %s", rc[1], rc[0])
|
||||
}
|
||||
rc[0].includeMin = false
|
||||
if !rc[1].isSupersetOf(rc[0]) {
|
||||
t.Errorf("%s is a superset of %s", rc[1], rc[0])
|
||||
}
|
||||
|
||||
// isSupersetOf ignores excludes, so even though this would make rc[1] not a
|
||||
// superset of rc[0] anymore, it should still say it is.
|
||||
rc[1].excl = []Version{
|
||||
newV(1, 5, 0),
|
||||
}
|
||||
|
||||
if !rc[1].isSupersetOf(rc[0]) {
|
||||
t.Errorf("%s is still a superset of %s, because isSupersetOf is supposed to ignore excluded versions", rc[1], rc[0])
|
||||
}
|
||||
}
|
|
@ -1,932 +0,0 @@
|
|||
package semver
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestIntersection(t *testing.T) {
|
||||
var actual Constraint
|
||||
rc1 := rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
|
||||
if actual = Intersection(); !IsNone(actual) {
|
||||
t.Errorf("Intersection of nothing should always produce None; got %q", actual)
|
||||
}
|
||||
|
||||
if actual = Intersection(rc1); !constraintEq(actual, rc1) {
|
||||
t.Errorf("Intersection of one item should always return that item; got %q", actual)
|
||||
}
|
||||
|
||||
if actual = Intersection(rc1, None()); !IsNone(actual) {
|
||||
t.Errorf("Intersection of anything with None should always produce None; got %q", actual)
|
||||
}
|
||||
|
||||
if actual = Intersection(rc1, Any()); !constraintEq(actual, rc1) {
|
||||
t.Errorf("Intersection of anything with Any should return self; got %q", actual)
|
||||
}
|
||||
|
||||
v1 := newV(1, 5, 0)
|
||||
if actual = Intersection(rc1, v1); !constraintEq(actual, v1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, v1)
|
||||
}
|
||||
|
||||
rc2 := rangeConstraint{
|
||||
min: newV(1, 2, 0),
|
||||
max: newV(2, 2, 0),
|
||||
}
|
||||
result := rangeConstraint{
|
||||
min: newV(1, 2, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
|
||||
if actual = Intersection(rc1, rc2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
|
||||
u1 := unionConstraint{
|
||||
rangeConstraint{
|
||||
min: newV(1, 2, 0),
|
||||
max: newV(3, 0, 0),
|
||||
},
|
||||
newV(3, 1, 0),
|
||||
}
|
||||
|
||||
if actual = Intersection(u1, rc1); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = Intersection(rc1, newV(2, 0, 5), u1); !IsNone(actual) {
|
||||
t.Errorf("First two are disjoint, should have gotten None but got %q", actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRangeIntersection(t *testing.T) {
|
||||
var actual Constraint
|
||||
// Test magic cases
|
||||
rc1 := rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
if actual = rc1.Intersect(Any()); !constraintEq(actual, rc1) {
|
||||
t.Errorf("Intersection of anything with Any should return self; got %q", actual)
|
||||
}
|
||||
if actual = rc1.Intersect(None()); !IsNone(actual) {
|
||||
t.Errorf("Intersection of anything with None should always produce None; got %q", actual)
|
||||
}
|
||||
|
||||
// Test single version cases
|
||||
|
||||
// single v, in range
|
||||
v1 := newV(1, 5, 0)
|
||||
|
||||
if actual = rc1.Intersect(v1); !constraintEq(actual, v1) {
|
||||
t.Errorf("Intersection of version with matching range should return the version; got %q", actual)
|
||||
}
|
||||
|
||||
// now exclude just that version
|
||||
rc1.excl = []Version{v1}
|
||||
if actual = rc1.Intersect(v1); !IsNone(actual) {
|
||||
t.Errorf("Intersection of version with range having specific exclude for that version should produce None; got %q", actual)
|
||||
}
|
||||
|
||||
// and, of course, none if the version is out of range
|
||||
v2 := newV(0, 5, 0)
|
||||
if actual = rc1.Intersect(v2); !IsNone(actual) {
|
||||
t.Errorf("Intersection of version with non-matching range should produce None; got %q", actual)
|
||||
}
|
||||
|
||||
// Test basic overlap case
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
rc2 := rangeConstraint{
|
||||
min: newV(1, 2, 0),
|
||||
max: newV(2, 2, 0),
|
||||
}
|
||||
result := rangeConstraint{
|
||||
min: newV(1, 2, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
|
||||
if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
|
||||
// And with includes
|
||||
rc1.includeMin = true
|
||||
rc1.includeMax = true
|
||||
rc2.includeMin = true
|
||||
rc2.includeMax = true
|
||||
result.includeMin = true
|
||||
result.includeMax = true
|
||||
|
||||
if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
|
||||
// Overlaps with nils
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: Version{special: infiniteVersion},
|
||||
}
|
||||
rc2 = rangeConstraint{
|
||||
min: Version{special: zeroVersion},
|
||||
max: newV(2, 2, 0),
|
||||
}
|
||||
result = rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(2, 2, 0),
|
||||
}
|
||||
|
||||
if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
|
||||
// And with includes
|
||||
rc1.includeMin = true
|
||||
rc2.includeMax = true
|
||||
result.includeMin = true
|
||||
result.includeMax = true
|
||||
|
||||
if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
|
||||
// Test superset overlap case
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 5, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
rc2 = rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(3, 0, 0),
|
||||
}
|
||||
result = rangeConstraint{
|
||||
min: newV(1, 5, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
|
||||
if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
|
||||
// Make sure irrelevant includes don't leak in
|
||||
rc2.includeMin = true
|
||||
rc2.includeMax = true
|
||||
|
||||
if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
|
||||
// But relevant includes get used
|
||||
rc1.includeMin = true
|
||||
rc1.includeMax = true
|
||||
result.includeMin = true
|
||||
result.includeMax = true
|
||||
|
||||
if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
|
||||
// Test disjoint case
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 5, 0),
|
||||
max: newV(1, 6, 0),
|
||||
}
|
||||
rc2 = rangeConstraint{
|
||||
min: newV(2, 0, 0),
|
||||
max: newV(3, 0, 0),
|
||||
}
|
||||
|
||||
if actual = rc1.Intersect(rc2); !constraintEq(actual, None()) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, None())
|
||||
}
|
||||
if actual = rc2.Intersect(rc1); !constraintEq(actual, None()) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, None())
|
||||
}
|
||||
|
||||
// Test disjoint at gt/lt boundary (non-adjacent)
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 5, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
rc2 = rangeConstraint{
|
||||
min: newV(2, 0, 0),
|
||||
max: newV(3, 0, 0),
|
||||
}
|
||||
|
||||
if actual = rc1.Intersect(rc2); !constraintEq(actual, None()) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, None())
|
||||
}
|
||||
if actual = rc2.Intersect(rc1); !constraintEq(actual, None()) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, None())
|
||||
}
|
||||
|
||||
// Now, just have them touch at a single version
|
||||
rc1.includeMax = true
|
||||
rc2.includeMin = true
|
||||
|
||||
vresult := newV(2, 0, 0)
|
||||
if actual = rc1.Intersect(rc2); !constraintEq(actual, vresult) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, vresult)
|
||||
}
|
||||
if actual = rc2.Intersect(rc1); !constraintEq(actual, vresult) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, vresult)
|
||||
}
|
||||
|
||||
// Test excludes in intersection range
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 5, 0),
|
||||
max: newV(2, 0, 0),
|
||||
excl: []Version{
|
||||
newV(1, 6, 0),
|
||||
},
|
||||
}
|
||||
rc2 = rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(3, 0, 0),
|
||||
}
|
||||
|
||||
if actual = rc1.Intersect(rc2); !constraintEq(actual, rc1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc1)
|
||||
}
|
||||
if actual = rc2.Intersect(rc1); !constraintEq(actual, rc1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc1)
|
||||
}
|
||||
|
||||
// Test excludes not in intersection range
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 5, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
rc2 = rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(3, 0, 0),
|
||||
excl: []Version{
|
||||
newV(1, 1, 0),
|
||||
},
|
||||
}
|
||||
|
||||
if actual = rc1.Intersect(rc2); !constraintEq(actual, rc1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc1)
|
||||
}
|
||||
if actual = rc2.Intersect(rc1); !constraintEq(actual, rc1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc1)
|
||||
}
|
||||
|
||||
// Test min, and greater min
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: Version{special: infiniteVersion},
|
||||
}
|
||||
rc2 = rangeConstraint{
|
||||
min: newV(1, 5, 0),
|
||||
max: Version{special: infiniteVersion},
|
||||
includeMin: true,
|
||||
}
|
||||
|
||||
if actual = rc1.Intersect(rc2); !constraintEq(actual, rc2) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = rc2.Intersect(rc1); !constraintEq(actual, rc2) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
|
||||
// Test max, and lesser max
|
||||
rc1 = rangeConstraint{
|
||||
max: newV(1, 0, 0),
|
||||
}
|
||||
rc2 = rangeConstraint{
|
||||
max: newV(1, 5, 0),
|
||||
}
|
||||
result = rangeConstraint{
|
||||
max: newV(1, 0, 0),
|
||||
}
|
||||
|
||||
if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
|
||||
// Ensure pure excludes come through as they should
|
||||
rc1 = rangeConstraint{
|
||||
min: Version{special: zeroVersion},
|
||||
max: Version{special: infiniteVersion},
|
||||
excl: []Version{
|
||||
newV(1, 6, 0),
|
||||
},
|
||||
}
|
||||
|
||||
rc2 = rangeConstraint{
|
||||
min: Version{special: zeroVersion},
|
||||
max: Version{special: infiniteVersion},
|
||||
excl: []Version{
|
||||
newV(1, 6, 0),
|
||||
newV(1, 7, 0),
|
||||
},
|
||||
}
|
||||
|
||||
if actual = Any().Intersect(rc1); !constraintEq(actual, rc1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc1)
|
||||
}
|
||||
if actual = rc1.Intersect(Any()); !constraintEq(actual, rc1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc1)
|
||||
}
|
||||
if actual = rc1.Intersect(rc2); !constraintEq(actual, rc2) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc2)
|
||||
}
|
||||
|
||||
// TODO test the pre-release special range stuff
|
||||
}
|
||||
|
||||
func TestRangeUnion(t *testing.T) {
|
||||
var actual Constraint
|
||||
// Test magic cases
|
||||
rc1 := rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
if actual = rc1.Union(Any()); !IsAny(actual) {
|
||||
t.Errorf("Union of anything with Any should always produce Any; got %q", actual)
|
||||
}
|
||||
if actual = rc1.Union(None()); !constraintEq(actual, rc1) {
|
||||
t.Errorf("Union of anything with None should return self; got %q", actual)
|
||||
}
|
||||
|
||||
// Test single version cases
|
||||
|
||||
// single v, in range
|
||||
v1 := newV(1, 5, 0)
|
||||
|
||||
if actual = rc1.Union(v1); !constraintEq(actual, rc1) {
|
||||
t.Errorf("Union of version with matching range should return the range; got %q", actual)
|
||||
}
|
||||
|
||||
// now exclude just that version
|
||||
rc2 := rc1.dup()
|
||||
rc2.excl = []Version{v1}
|
||||
if actual = rc2.Union(v1); !constraintEq(actual, rc1) {
|
||||
t.Errorf("Union of version with range having specific exclude for that version should produce the range without that exclude; got %q", actual)
|
||||
}
|
||||
|
||||
// and a union if the version is not within the range
|
||||
v2 := newV(0, 5, 0)
|
||||
uresult := unionConstraint{v2, rc1}
|
||||
if actual = rc1.Union(v2); !constraintEq(actual, uresult) {
|
||||
t.Errorf("Union of version with non-matching range should produce a unionConstraint with those two; got %q", actual)
|
||||
}
|
||||
|
||||
// union with version at the min should ensure "oreq"
|
||||
v2 = newV(1, 0, 0)
|
||||
rc3 := rc1
|
||||
rc3.includeMin = true
|
||||
|
||||
if actual = rc1.Union(v2); !constraintEq(actual, rc3) {
|
||||
t.Errorf("Union of range with version at min end should add includeMin (%q), but got %q", rc3, actual)
|
||||
}
|
||||
if actual = v2.Union(rc1); !constraintEq(actual, rc3) {
|
||||
t.Errorf("Union of range with version at min end should add includeMin (%q), but got %q", rc3, actual)
|
||||
}
|
||||
|
||||
// same at max end
|
||||
v2 = newV(2, 0, 0)
|
||||
rc3.includeMin = false
|
||||
rc3.includeMax = true
|
||||
|
||||
if actual = rc1.Union(v2); !constraintEq(actual, rc3) {
|
||||
t.Errorf("Union of range with version at max end should add includeMax (%q), but got %q", rc3, actual)
|
||||
}
|
||||
if actual = v2.Union(rc1); !constraintEq(actual, rc3) {
|
||||
t.Errorf("Union of range with version at max end should add includeMax (%q), but got %q", rc3, actual)
|
||||
}
|
||||
|
||||
// Test basic overlap case
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
rc2 = rangeConstraint{
|
||||
min: newV(1, 2, 0),
|
||||
max: newV(2, 2, 0),
|
||||
}
|
||||
result := rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(2, 2, 0),
|
||||
}
|
||||
|
||||
if actual = rc1.Union(rc2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = rc2.Union(rc1); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
|
||||
// And with includes
|
||||
rc1.includeMin = true
|
||||
rc1.includeMax = true
|
||||
rc2.includeMin = true
|
||||
rc2.includeMax = true
|
||||
result.includeMin = true
|
||||
result.includeMax = true
|
||||
|
||||
if actual = rc1.Union(rc2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = rc2.Union(rc1); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
|
||||
// Overlaps with nils
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: Version{special: infiniteVersion},
|
||||
}
|
||||
rc2 = rangeConstraint{
|
||||
min: Version{special: zeroVersion},
|
||||
max: newV(2, 2, 0),
|
||||
}
|
||||
|
||||
if actual = rc1.Union(rc2); !constraintEq(actual, Any()) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, Any())
|
||||
}
|
||||
if actual = rc2.Union(rc1); !constraintEq(actual, Any()) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, Any())
|
||||
}
|
||||
|
||||
// Just one nil in overlap
|
||||
rc1.max = newV(2, 0, 0)
|
||||
result = rangeConstraint{
|
||||
min: Version{special: zeroVersion},
|
||||
max: newV(2, 2, 0),
|
||||
}
|
||||
|
||||
if actual = rc1.Union(rc2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = rc2.Union(rc1); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
|
||||
rc1.max = Version{special: infiniteVersion}
|
||||
rc2.min = newV(1, 5, 0)
|
||||
result = rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: Version{special: infiniteVersion},
|
||||
}
|
||||
|
||||
if actual = rc1.Union(rc2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = rc2.Union(rc1); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
|
||||
// Test superset overlap case
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 5, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
rc2 = rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(3, 0, 0),
|
||||
}
|
||||
|
||||
if actual = rc1.Union(rc2); !constraintEq(actual, rc2) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc2)
|
||||
}
|
||||
if actual = rc2.Union(rc1); !constraintEq(actual, rc2) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc2)
|
||||
}
|
||||
|
||||
// Test disjoint case
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 5, 0),
|
||||
max: newV(1, 6, 0),
|
||||
}
|
||||
rc2 = rangeConstraint{
|
||||
min: newV(2, 0, 0),
|
||||
max: newV(3, 0, 0),
|
||||
}
|
||||
uresult = unionConstraint{rc1, rc2}
|
||||
|
||||
if actual = rc1.Union(rc2); !constraintEq(actual, uresult) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, uresult)
|
||||
}
|
||||
if actual = rc2.Union(rc1); !constraintEq(actual, uresult) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, uresult)
|
||||
}
|
||||
|
||||
// Test disjoint at gt/lt boundary (non-adjacent)
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 5, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
rc2 = rangeConstraint{
|
||||
min: newV(2, 0, 0),
|
||||
max: newV(3, 0, 0),
|
||||
}
|
||||
uresult = unionConstraint{rc1, rc2}
|
||||
|
||||
if actual = rc1.Union(rc2); !constraintEq(actual, uresult) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, uresult)
|
||||
}
|
||||
if actual = rc2.Union(rc1); !constraintEq(actual, uresult) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, uresult)
|
||||
}
|
||||
|
||||
// Now, just have them touch at a single version
|
||||
rc1.includeMax = true
|
||||
rc2.includeMin = true
|
||||
result = rangeConstraint{
|
||||
min: newV(1, 5, 0),
|
||||
max: newV(3, 0, 0),
|
||||
}
|
||||
|
||||
if actual = rc1.Union(rc2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = rc2.Union(rc1); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
|
||||
// And top-adjacent at that version
|
||||
rc2.includeMin = false
|
||||
if actual = rc1.Union(rc2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = rc2.Union(rc1); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
// And bottom-adjacent at that version
|
||||
rc1.includeMax = false
|
||||
rc2.includeMin = true
|
||||
if actual = rc1.Union(rc2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = rc2.Union(rc1); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
|
||||
// Test excludes in overlapping range
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 5, 0),
|
||||
max: newV(2, 0, 0),
|
||||
excl: []Version{
|
||||
newV(1, 6, 0),
|
||||
},
|
||||
}
|
||||
rc2 = rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(3, 0, 0),
|
||||
}
|
||||
|
||||
if actual = rc1.Union(rc2); !constraintEq(actual, rc2) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc2)
|
||||
}
|
||||
if actual = rc2.Union(rc1); !constraintEq(actual, rc2) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc2)
|
||||
}
|
||||
|
||||
// Test excludes not in non-overlapping range
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 5, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
rc2 = rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(3, 0, 0),
|
||||
excl: []Version{
|
||||
newV(1, 1, 0),
|
||||
},
|
||||
}
|
||||
|
||||
if actual = rc1.Union(rc2); !constraintEq(actual, rc2) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc2)
|
||||
}
|
||||
if actual = rc2.Union(rc1); !constraintEq(actual, rc2) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc2)
|
||||
}
|
||||
|
||||
// Ensure pure excludes come through as they should
|
||||
rc1 = rangeConstraint{
|
||||
min: Version{special: zeroVersion},
|
||||
max: Version{special: infiniteVersion},
|
||||
excl: []Version{
|
||||
newV(1, 6, 0),
|
||||
},
|
||||
}
|
||||
|
||||
rc2 = rangeConstraint{
|
||||
min: Version{special: zeroVersion},
|
||||
max: Version{special: infiniteVersion},
|
||||
excl: []Version{
|
||||
newV(1, 6, 0),
|
||||
newV(1, 7, 0),
|
||||
},
|
||||
}
|
||||
|
||||
if actual = rc1.Union(rc2); !constraintEq(actual, rc1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc1)
|
||||
}
|
||||
if actual = rc2.Union(rc1); !constraintEq(actual, rc1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc1)
|
||||
}
|
||||
|
||||
rc1 = rangeConstraint{
|
||||
min: Version{special: zeroVersion},
|
||||
max: Version{special: infiniteVersion},
|
||||
excl: []Version{
|
||||
newV(1, 5, 0),
|
||||
},
|
||||
}
|
||||
|
||||
if actual = rc1.Union(rc2); !constraintEq(actual, Any()) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, Any())
|
||||
}
|
||||
if actual = rc2.Union(rc1); !constraintEq(actual, Any()) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, Any())
|
||||
}
|
||||
|
||||
// TODO test the pre-release special range stuff
|
||||
}
|
||||
|
||||
func TestUnionIntersection(t *testing.T) {
|
||||
var actual Constraint
|
||||
// magic first
|
||||
u1 := unionConstraint{
|
||||
newV(1, 1, 0),
|
||||
newV(1, 2, 0),
|
||||
newV(1, 3, 0),
|
||||
}
|
||||
if actual = u1.Intersect(Any()); !constraintEq(actual, u1) {
|
||||
t.Errorf("Intersection of anything with Any should return self; got %s", actual)
|
||||
}
|
||||
if actual = u1.Intersect(None()); !IsNone(actual) {
|
||||
t.Errorf("Intersection of anything with None should always produce None; got %s", actual)
|
||||
}
|
||||
if u1.MatchesAny(None()) {
|
||||
t.Errorf("Can't match any when intersected with None")
|
||||
}
|
||||
|
||||
// intersect of unions with single versions
|
||||
v1 := newV(1, 1, 0)
|
||||
if actual = u1.Intersect(v1); !constraintEq(actual, v1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, v1)
|
||||
}
|
||||
if actual = v1.Intersect(u1); !constraintEq(actual, v1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, v1)
|
||||
}
|
||||
|
||||
// intersect of range with union of versions
|
||||
u1 = unionConstraint{
|
||||
newV(1, 1, 0),
|
||||
newV(1, 2, 0),
|
||||
newV(1, 3, 0),
|
||||
}
|
||||
rc1 := rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
|
||||
if actual = u1.Intersect(rc1); !constraintEq(actual, u1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, u1)
|
||||
}
|
||||
if actual = rc1.Intersect(u1); !constraintEq(actual, u1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, u1)
|
||||
}
|
||||
|
||||
u2 := unionConstraint{
|
||||
newV(1, 1, 0),
|
||||
newV(1, 2, 0),
|
||||
}
|
||||
|
||||
if actual = u1.Intersect(u2); !constraintEq(actual, u2) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, u2)
|
||||
}
|
||||
|
||||
// Overlapping sub/supersets
|
||||
rc1 = rangeConstraint{
|
||||
min: newV(1, 5, 0),
|
||||
max: newV(1, 6, 0),
|
||||
}
|
||||
rc2 := rangeConstraint{
|
||||
min: newV(2, 0, 0),
|
||||
max: newV(3, 0, 0),
|
||||
}
|
||||
rc3 = rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
rc4 := rangeConstraint{
|
||||
min: newV(2, 5, 0),
|
||||
max: newV(2, 6, 0),
|
||||
}
|
||||
u1 = unionConstraint{rc1, rc2}
|
||||
u2 = unionConstraint{rc3, rc4}
|
||||
ur := unionConstraint{rc1, rc4}
|
||||
|
||||
if actual = u1.Intersect(u2); !constraintEq(actual, ur) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, ur)
|
||||
}
|
||||
if actual = u2.Intersect(u1); !constraintEq(actual, ur) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, ur)
|
||||
}
|
||||
|
||||
// Ensure excludes carry as they should
|
||||
rc1.excl = []Version{newV(1, 5, 5)}
|
||||
u1 = unionConstraint{rc1, rc2}
|
||||
ur = unionConstraint{rc1, rc4}
|
||||
|
||||
if actual = u1.Intersect(u2); !constraintEq(actual, ur) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, ur)
|
||||
}
|
||||
if actual = u2.Intersect(u1); !constraintEq(actual, ur) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, ur)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnionUnion(t *testing.T) {
|
||||
var actual Constraint
|
||||
// magic first
|
||||
u1 := unionConstraint{
|
||||
newV(1, 1, 0),
|
||||
newV(1, 2, 0),
|
||||
newV(1, 3, 0),
|
||||
}
|
||||
if actual = u1.Union(Any()); !IsAny(actual) {
|
||||
t.Errorf("Union of anything with Any should always return Any; got %s", actual)
|
||||
}
|
||||
if actual = u1.Union(None()); !constraintEq(actual, u1) {
|
||||
t.Errorf("Union of anything with None should always return self; got %s", actual)
|
||||
}
|
||||
|
||||
// union of uc with single versions
|
||||
// already present
|
||||
v1 := newV(1, 2, 0)
|
||||
if actual = u1.Union(v1); !constraintEq(actual, u1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, u1)
|
||||
}
|
||||
if actual = v1.Union(u1); !constraintEq(actual, u1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, u1)
|
||||
}
|
||||
|
||||
// not present
|
||||
v2 := newV(1, 4, 0)
|
||||
ur := append(u1, v2)
|
||||
if actual = u1.Union(v2); !constraintEq(actual, ur) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, ur)
|
||||
}
|
||||
if actual = v2.Union(u1); !constraintEq(actual, ur) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, ur)
|
||||
}
|
||||
|
||||
// union of uc with uc, all versions
|
||||
u2 := unionConstraint{
|
||||
newV(1, 3, 0),
|
||||
newV(1, 4, 0),
|
||||
newV(1, 5, 0),
|
||||
}
|
||||
ur = unionConstraint{
|
||||
newV(1, 1, 0),
|
||||
newV(1, 2, 0),
|
||||
newV(1, 3, 0),
|
||||
newV(1, 4, 0),
|
||||
newV(1, 5, 0),
|
||||
}
|
||||
|
||||
if actual = u1.Union(u2); !constraintEq(actual, ur) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, ur)
|
||||
}
|
||||
if actual = u2.Union(u1); !constraintEq(actual, ur) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, ur)
|
||||
}
|
||||
|
||||
// union that should compress versions into range
|
||||
rc1 := rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
|
||||
if actual = u1.Union(rc1); !constraintEq(actual, rc1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc1)
|
||||
}
|
||||
if actual = rc1.Union(u1); !constraintEq(actual, rc1) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, rc1)
|
||||
}
|
||||
|
||||
rc1.max = newV(1, 4, 5)
|
||||
u3 := append(u2, newV(1, 7, 0))
|
||||
ur = unionConstraint{
|
||||
rc1,
|
||||
newV(1, 5, 0),
|
||||
newV(1, 7, 0),
|
||||
}
|
||||
|
||||
if actual = u3.Union(rc1); !constraintEq(actual, ur) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, ur)
|
||||
}
|
||||
if actual = rc1.Union(u3); !constraintEq(actual, ur) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, ur)
|
||||
}
|
||||
}
|
||||
|
||||
// Most version stuff got tested by range and/or union b/c most tests were
|
||||
// repeated bidirectionally (set operations are commutative; testing in pairs
|
||||
// helps us catch any situation where we fail to maintain that invariant)
|
||||
func TestVersionSetOps(t *testing.T) {
|
||||
var actual Constraint
|
||||
|
||||
v1 := newV(1, 0, 0)
|
||||
|
||||
if actual = v1.Intersect(v1); !constraintEq(actual, v1) {
|
||||
t.Errorf("Version intersected with itself should be itself, got %q", actual)
|
||||
}
|
||||
if !v1.MatchesAny(v1) {
|
||||
t.Errorf("MatchesAny should work with a version against itself")
|
||||
}
|
||||
|
||||
v2 := newV(2, 0, 0)
|
||||
if actual = v1.Intersect(v2); !IsNone(actual) {
|
||||
t.Errorf("Versions should only intersect with themselves, got %q", actual)
|
||||
}
|
||||
if v1.MatchesAny(v2) {
|
||||
t.Errorf("MatchesAny should not work when combined with anything other than itself")
|
||||
}
|
||||
|
||||
result := unionConstraint{v1, v2}
|
||||
|
||||
if actual = v1.Union(v1); !constraintEq(actual, v1) {
|
||||
t.Errorf("Version union with itself should return self, got %q", actual)
|
||||
}
|
||||
|
||||
if actual = v1.Union(v2); !constraintEq(actual, result) {
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
if actual = v1.Union(v2); !constraintEq(actual, result) {
|
||||
// Duplicate just to make sure ordering works right
|
||||
t.Errorf("Got constraint %q, but expected %q", actual, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAreAdjacent(t *testing.T) {
|
||||
rc1 := rangeConstraint{
|
||||
min: newV(1, 0, 0),
|
||||
max: newV(2, 0, 0),
|
||||
}
|
||||
rc2 := rangeConstraint{
|
||||
min: newV(1, 2, 0),
|
||||
max: newV(2, 2, 0),
|
||||
}
|
||||
|
||||
if areAdjacent(rc1, rc2) {
|
||||
t.Errorf("Ranges overlap, should not indicate as adjacent")
|
||||
}
|
||||
|
||||
rc2 = rangeConstraint{
|
||||
min: newV(2, 0, 0),
|
||||
}
|
||||
|
||||
if areAdjacent(rc1, rc2) {
|
||||
t.Errorf("Ranges are non-overlapping and non-adjacent, but reported as adjacent")
|
||||
}
|
||||
|
||||
rc2.includeMin = true
|
||||
|
||||
if !areAdjacent(rc1, rc2) {
|
||||
t.Errorf("Ranges are non-overlapping and adjacent, but reported as non-adjacent")
|
||||
}
|
||||
|
||||
rc1.includeMax = true
|
||||
|
||||
if areAdjacent(rc1, rc2) {
|
||||
t.Errorf("Ranges are overlapping at a single version, but reported as adjacent")
|
||||
}
|
||||
|
||||
rc2.includeMin = false
|
||||
if !areAdjacent(rc1, rc2) {
|
||||
t.Errorf("Ranges are non-overlapping and adjacent, but reported as non-adjacent")
|
||||
}
|
||||
}
|
|
@ -1,310 +0,0 @@
|
|||
package semver
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
version string
|
||||
err bool
|
||||
}{
|
||||
{"1.2.3", false},
|
||||
{"v1.2.3", false},
|
||||
{"1.0", false},
|
||||
{"v1.0", false},
|
||||
{"1", false},
|
||||
{"v1", false},
|
||||
{"1.2.beta", true},
|
||||
{"v1.2.beta", true},
|
||||
{"foo", true},
|
||||
{"1.2-5", false},
|
||||
{"v1.2-5", false},
|
||||
{"1.2-beta.5", false},
|
||||
{"v1.2-beta.5", false},
|
||||
{"\n1.2", true},
|
||||
{"\nv1.2", true},
|
||||
{"1.2.0-x.Y.0+metadata", false},
|
||||
{"v1.2.0-x.Y.0+metadata", false},
|
||||
{"1.2.0-x.Y.0+metadata-width-hypen", false},
|
||||
{"v1.2.0-x.Y.0+metadata-width-hypen", false},
|
||||
{"1.2.3-rc1-with-hypen", false},
|
||||
{"v1.2.3-rc1-with-hypen", false},
|
||||
{"1.2.3.4", true},
|
||||
{"v1.2.3.4", true},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
_, err := NewVersion(tc.version)
|
||||
if tc.err && err == nil {
|
||||
t.Fatalf("expected error for version: %s", tc.version)
|
||||
} else if !tc.err && err != nil {
|
||||
t.Fatalf("error for version %s: %s", tc.version, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOriginal(t *testing.T) {
|
||||
tests := []string{
|
||||
"1.2.3",
|
||||
"v1.2.3",
|
||||
"1.0",
|
||||
"v1.0",
|
||||
"1",
|
||||
"v1",
|
||||
"1.2-5",
|
||||
"v1.2-5",
|
||||
"1.2-beta.5",
|
||||
"v1.2-beta.5",
|
||||
"1.2.0-x.Y.0+metadata",
|
||||
"v1.2.0-x.Y.0+metadata",
|
||||
"1.2.0-x.Y.0+metadata-width-hypen",
|
||||
"v1.2.0-x.Y.0+metadata-width-hypen",
|
||||
"1.2.3-rc1-with-hypen",
|
||||
"v1.2.3-rc1-with-hypen",
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
v, err := NewVersion(tc)
|
||||
if err != nil {
|
||||
t.Errorf("Error parsing version %s", tc)
|
||||
}
|
||||
|
||||
o := v.Original()
|
||||
if o != tc {
|
||||
t.Errorf("Error retrieving originl. Expected '%s' but got '%s'", tc, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParts(t *testing.T) {
|
||||
v, err := NewVersion("1.2.3-beta.1+build.123")
|
||||
if err != nil {
|
||||
t.Error("Error parsing version 1.2.3-beta.1+build.123")
|
||||
}
|
||||
|
||||
if v.Major() != 1 {
|
||||
t.Error("Major() returning wrong value")
|
||||
}
|
||||
if v.Minor() != 2 {
|
||||
t.Error("Minor() returning wrong value")
|
||||
}
|
||||
if v.Patch() != 3 {
|
||||
t.Error("Patch() returning wrong value")
|
||||
}
|
||||
if v.Prerelease() != "beta.1" {
|
||||
t.Error("Prerelease() returning wrong value")
|
||||
}
|
||||
if v.Metadata() != "build.123" {
|
||||
t.Error("Metadata() returning wrong value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestString(t *testing.T) {
|
||||
tests := []struct {
|
||||
version string
|
||||
expected string
|
||||
}{
|
||||
{"1.2.3", "1.2.3"},
|
||||
{"v1.2.3", "1.2.3"},
|
||||
{"1.0", "1.0.0"},
|
||||
{"v1.0", "1.0.0"},
|
||||
{"1", "1.0.0"},
|
||||
{"v1", "1.0.0"},
|
||||
{"1.2-5", "1.2.0-5"},
|
||||
{"v1.2-5", "1.2.0-5"},
|
||||
{"1.2-beta.5", "1.2.0-beta.5"},
|
||||
{"v1.2-beta.5", "1.2.0-beta.5"},
|
||||
{"1.2.0-x.Y.0+metadata", "1.2.0-x.Y.0+metadata"},
|
||||
{"v1.2.0-x.Y.0+metadata", "1.2.0-x.Y.0+metadata"},
|
||||
{"1.2.0-x.Y.0+metadata-width-hypen", "1.2.0-x.Y.0+metadata-width-hypen"},
|
||||
{"v1.2.0-x.Y.0+metadata-width-hypen", "1.2.0-x.Y.0+metadata-width-hypen"},
|
||||
{"1.2.3-rc1-with-hypen", "1.2.3-rc1-with-hypen"},
|
||||
{"v1.2.3-rc1-with-hypen", "1.2.3-rc1-with-hypen"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
v, err := NewVersion(tc.version)
|
||||
if err != nil {
|
||||
t.Errorf("Error parsing version %s", tc)
|
||||
}
|
||||
|
||||
s := v.String()
|
||||
if s != tc.expected {
|
||||
t.Errorf("Error generating string. Expected '%s' but got '%s'", tc.expected, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompare(t *testing.T) {
|
||||
tests := []struct {
|
||||
v1 string
|
||||
v2 string
|
||||
expected int
|
||||
}{
|
||||
{"1.2.3", "1.5.1", -1},
|
||||
{"2.2.3", "1.5.1", 1},
|
||||
{"2.2.3", "2.2.2", 1},
|
||||
{"3.2-beta", "3.2-beta", 0},
|
||||
{"1.3", "1.1.4", 1},
|
||||
{"4.2", "4.2-beta", 1},
|
||||
{"4.2-beta", "4.2", -1},
|
||||
{"4.2-alpha", "4.2-beta", -1},
|
||||
{"4.2-alpha", "4.2-alpha", 0},
|
||||
{"4.2-beta.2", "4.2-beta.1", 1},
|
||||
{"4.2-beta2", "4.2-beta1", 1},
|
||||
{"4.2-beta", "4.2-beta.2", -1},
|
||||
{"4.2-beta", "4.2-beta.foo", 1},
|
||||
{"4.2-beta.2", "4.2-beta", 1},
|
||||
{"4.2-beta.foo", "4.2-beta", -1},
|
||||
{"1.2+bar", "1.2+baz", 0},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
v1, err := NewVersion(tc.v1)
|
||||
if err != nil {
|
||||
t.Errorf("Error parsing version: %s", err)
|
||||
}
|
||||
|
||||
v2, err := NewVersion(tc.v2)
|
||||
if err != nil {
|
||||
t.Errorf("Error parsing version: %s", err)
|
||||
}
|
||||
|
||||
a := v1.Compare(v2)
|
||||
e := tc.expected
|
||||
if a != e {
|
||||
t.Errorf(
|
||||
"Comparison of '%s' and '%s' failed. Expected '%d', got '%d'",
|
||||
tc.v1, tc.v2, e, a,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// One-off tests for special version comparisons
|
||||
zero := Version{special: zeroVersion}
|
||||
inf := Version{special: infiniteVersion}
|
||||
|
||||
if zero.Compare(inf) != -1 {
|
||||
t.Error("Zero version should always be less than infinite version")
|
||||
}
|
||||
if zero.Compare(zero) != 0 {
|
||||
t.Error("Zero version should equal itself")
|
||||
}
|
||||
if inf.Compare(zero) != 1 {
|
||||
t.Error("Infinite version should always be greater than zero version")
|
||||
}
|
||||
if inf.Compare(inf) != 0 {
|
||||
t.Error("Infinite version should equal itself")
|
||||
}
|
||||
|
||||
// Need to work vs. a normal version, too.
|
||||
v := Version{}
|
||||
|
||||
if zero.Compare(v) != -1 {
|
||||
t.Error("Zero version should always be less than any normal version")
|
||||
}
|
||||
if inf.Compare(v) != 1 {
|
||||
t.Error("Infinite version should always be greater than any normal version")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLessThan(t *testing.T) {
|
||||
tests := []struct {
|
||||
v1 string
|
||||
v2 string
|
||||
expected bool
|
||||
}{
|
||||
{"1.2.3", "1.5.1", true},
|
||||
{"2.2.3", "1.5.1", false},
|
||||
{"3.2-beta", "3.2-beta", false},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
v1, err := NewVersion(tc.v1)
|
||||
if err != nil {
|
||||
t.Errorf("Error parsing version: %s", err)
|
||||
}
|
||||
|
||||
v2, err := NewVersion(tc.v2)
|
||||
if err != nil {
|
||||
t.Errorf("Error parsing version: %s", err)
|
||||
}
|
||||
|
||||
a := v1.LessThan(v2)
|
||||
e := tc.expected
|
||||
if a != e {
|
||||
t.Errorf(
|
||||
"Comparison of '%s' and '%s' failed. Expected '%t', got '%t'",
|
||||
tc.v1, tc.v2, e, a,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGreaterThan(t *testing.T) {
|
||||
tests := []struct {
|
||||
v1 string
|
||||
v2 string
|
||||
expected bool
|
||||
}{
|
||||
{"1.2.3", "1.5.1", false},
|
||||
{"2.2.3", "1.5.1", true},
|
||||
{"3.2-beta", "3.2-beta", false},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
v1, err := NewVersion(tc.v1)
|
||||
if err != nil {
|
||||
t.Errorf("Error parsing version: %s", err)
|
||||
}
|
||||
|
||||
v2, err := NewVersion(tc.v2)
|
||||
if err != nil {
|
||||
t.Errorf("Error parsing version: %s", err)
|
||||
}
|
||||
|
||||
a := v1.GreaterThan(v2)
|
||||
e := tc.expected
|
||||
if a != e {
|
||||
t.Errorf(
|
||||
"Comparison of '%s' and '%s' failed. Expected '%t', got '%t'",
|
||||
tc.v1, tc.v2, e, a,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEqual(t *testing.T) {
|
||||
tests := []struct {
|
||||
v1 string
|
||||
v2 string
|
||||
expected bool
|
||||
}{
|
||||
{"1.2.3", "1.5.1", false},
|
||||
{"2.2.3", "1.5.1", false},
|
||||
{"3.2-beta", "3.2-beta", true},
|
||||
{"3.2-beta+foo", "3.2-beta+bar", true},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
v1, err := NewVersion(tc.v1)
|
||||
if err != nil {
|
||||
t.Errorf("Error parsing version: %s", err)
|
||||
}
|
||||
|
||||
v2, err := NewVersion(tc.v2)
|
||||
if err != nil {
|
||||
t.Errorf("Error parsing version: %s", err)
|
||||
}
|
||||
|
||||
a := v1.Equal(v2)
|
||||
e := tc.expected
|
||||
if a != e {
|
||||
t.Errorf(
|
||||
"Comparison of '%s' and '%s' failed. Expected '%t', got '%t'",
|
||||
tc.v1, tc.v2, e, a,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
|
@ -1,29 +0,0 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.6
|
||||
- 1.7
|
||||
- 1.8
|
||||
- tip
|
||||
|
||||
before_script:
|
||||
- git version
|
||||
- svn --version
|
||||
|
||||
# Setting sudo access to false will let Travis CI use containers rather than
|
||||
# VMs to run the tests. For more details see:
|
||||
# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/
|
||||
# - http://docs.travis-ci.com/user/workers/standard-infrastructure/
|
||||
sudo: false
|
||||
|
||||
script:
|
||||
- make setup
|
||||
- make test
|
||||
|
||||
notifications:
|
||||
webhooks:
|
||||
urls:
|
||||
- https://webhooks.gitter.im/e/06e3328629952dabe3e0
|
||||
on_success: change # options: [always|never|change] default: always
|
||||
on_failure: always # options: [always|never|change] default: always
|
||||
on_start: never # options: [always|never|change] default: always
|
|
@ -1,148 +0,0 @@
|
|||
# 1.11.1 (2017-04-28)
|
||||
|
||||
## Fixed
|
||||
- #76: Fix submodule handling for Windows (thanks @m0j0hn)
|
||||
|
||||
# 1.11.0 (2017-03-23)
|
||||
|
||||
## Added
|
||||
- #65: Exposed CmdFromDir function (thanks @erizocosmico)
|
||||
|
||||
## Changed
|
||||
- #69: Updated testing for Go 1.8
|
||||
|
||||
## Fixed
|
||||
- #64: Testing fatal error if bzr not installed (thanks @kevinburke)
|
||||
|
||||
# 1.10.2 (2017-01-24)
|
||||
|
||||
## Fixed
|
||||
- #63: Remove extra quotes in submodule export (thanks @dt)
|
||||
|
||||
# 1.10.1 (2017-01-18)
|
||||
|
||||
## Fixed
|
||||
- #62: Added windows testing via appveyor and fixed issues under windows.
|
||||
|
||||
# 1.10.0 (2017-01-09)
|
||||
|
||||
## Added
|
||||
- #60: Handle Git submodules (thanks @sdboyer)
|
||||
- #61: Add gometalinter to testing
|
||||
|
||||
# 1.9.0 (2016-11-18)
|
||||
|
||||
## Added
|
||||
- #50: Auto-detect remotes with file:// prefix.
|
||||
- #59: Testing against Go 1.7
|
||||
|
||||
## Changed
|
||||
- Removed auto-detection for Google Code as the service is deprecated
|
||||
- Added auto-detection of git.openstack.org
|
||||
|
||||
## Fixed
|
||||
- #53: Git not fetching tags off branch
|
||||
|
||||
# 1.8.0 (2016-06-29)
|
||||
|
||||
## Added
|
||||
- #43: Detect when tool (e.g., git, svn, etc) not installed
|
||||
- #49: Detect access denied and not found situations
|
||||
|
||||
## Changed
|
||||
- #48: Updated Go Report Gard url to new format
|
||||
- Refactored SVN handling to detect when not in a top level directory
|
||||
- Updating tagging to v[SemVer] structure for compatibility with other tools.
|
||||
|
||||
## Fixed
|
||||
- #45: Fixed hg's update method so that it pulls from remote before updates
|
||||
|
||||
# 1.7.0 (2016-05-05)
|
||||
|
||||
- Adds a glide.yaml file with some limited information.
|
||||
- Implements #37: Ability to export source as a directory.
|
||||
- Implements #36: Get current version-ish with Current method. This returns
|
||||
a branch (if on tip) or equivalent tip, a tag if on a tag, or a revision if
|
||||
on an individual revision. Note, the tip of branch is VCS specific so usage
|
||||
may require detecting VCS type.
|
||||
|
||||
# 1.6.1 (2016-04-27)
|
||||
|
||||
- Fixed #30: tags from commit should not have ^{} appended (seen in git)
|
||||
- Fixed #29: isDetachedHead fails with non-english locales (git)
|
||||
- Fixed #33: Access denied and not found http errors causing xml parsing errors
|
||||
|
||||
# 1.6.0 (2016-04-18)
|
||||
|
||||
- Issue #26: Added Init method to initialize a repo at the local location
|
||||
(thanks tony).
|
||||
- Issue #19: Added method to retrieve tags for a commit.
|
||||
- Issue #24: Reworked errors returned from common methods. Now differing
|
||||
VCS implementations return the same errors. The original VCS specific error
|
||||
is available on the error. See the docs for more details.
|
||||
- Issue #25: Export the function RunFromDir which runs VCS commands from the
|
||||
root of the local directory. This is useful for those that want to build and
|
||||
extend on top of the vcs package (thanks tony).
|
||||
- Issue #22: Added Ping command to test if remote location is present and
|
||||
accessible.
|
||||
|
||||
# 1.5.1 (2016-03-23)
|
||||
|
||||
- Fixing bug parsing some Git commit dates.
|
||||
|
||||
# 1.5.0 (2016-03-22)
|
||||
|
||||
- Add Travis CI testing for Go 1.6.
|
||||
- Issue #17: Add CommitInfo method allowing for a common way to get commit
|
||||
metadata from all VCS.
|
||||
- Autodetect types that have git@ or hg@ users.
|
||||
- Autodetect git+ssh, bzr+ssh, git, and svn+ssh scheme urls.
|
||||
- On Bitbucket for ssh style URLs retrieve the type from the URL. This allows
|
||||
for private repo type detection.
|
||||
- Issue #14: Autodetect ssh/scp style urls (thanks chonthu).
|
||||
|
||||
# 1.4.1 (2016-03-07)
|
||||
|
||||
- Fixes #16: some windows situations are unable to create parent directory.
|
||||
|
||||
# 1.4.0 (2016-02-15)
|
||||
|
||||
- Adding support for IBM JazzHub.
|
||||
|
||||
# 1.3.1 (2016-01-27)
|
||||
|
||||
- Issue #12: Failed to checkout Bzr repo when parent directory didn't
|
||||
exist (thanks cyrilleverrier).
|
||||
|
||||
# 1.3.0 (2015-11-09)
|
||||
|
||||
- Issue #9: Added Date method to get the date/time of latest commit (thanks kamilchm).
|
||||
|
||||
# 1.2.0 (2015-10-29)
|
||||
|
||||
- Adding IsDirty method to detect a checkout with uncommitted changes.
|
||||
|
||||
# 1.1.4 (2015-10-28)
|
||||
|
||||
- Fixed #8: Git IsReference not detecting branches that have not been checked
|
||||
out yet.
|
||||
|
||||
# 1.1.3 (2015-10-21)
|
||||
|
||||
- Fixing issue where there are multiple go-import statements for go redirects
|
||||
|
||||
# 1.1.2 (2015-10-20)
|
||||
|
||||
- Fixes #7: hg not checking out code when Get is called
|
||||
|
||||
# 1.1.1 (2015-10-20)
|
||||
|
||||
- Issue #6: Allow VCS commands to be run concurrently.
|
||||
|
||||
# 1.1.0 (2015-10-19)
|
||||
|
||||
- #5: Added output of failed command to returned errors.
|
||||
|
||||
# 1.0.0 (2015-10-06)
|
||||
|
||||
- Initial release.
|
|
@ -1,41 +0,0 @@
|
|||
.PHONY: setup
|
||||
setup:
|
||||
go get -u gopkg.in/alecthomas/gometalinter.v1
|
||||
gometalinter.v1 --install
|
||||
|
||||
.PHONY: test
|
||||
test: validate lint
|
||||
@echo "==> Running tests"
|
||||
go test -v
|
||||
|
||||
.PHONY: validate
|
||||
validate:
|
||||
# misspell finds the work adresář (used in bzr.go) as a mispelling of
|
||||
# address. It finds adres. An issue has been filed at
|
||||
# https://github.com/client9/misspell/issues/99. In the meantime adding
|
||||
# adres to the ignore list.
|
||||
@echo "==> Running static validations"
|
||||
@gometalinter.v1 \
|
||||
--disable-all \
|
||||
--linter "misspell:misspell -i adres -j 1 {path}/*.go:PATH:LINE:COL:MESSAGE" \
|
||||
--enable deadcode \
|
||||
--severity deadcode:error \
|
||||
--enable gofmt \
|
||||
--enable gosimple \
|
||||
--enable ineffassign \
|
||||
--enable misspell \
|
||||
--enable vet \
|
||||
--tests \
|
||||
--vendor \
|
||||
--deadline 60s \
|
||||
./... || exit_code=1
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
@echo "==> Running linters"
|
||||
@gometalinter.v1 \
|
||||
--disable-all \
|
||||
--enable golint \
|
||||
--vendor \
|
||||
--deadline 60s \
|
||||
./... || :
|
|
@ -1,48 +0,0 @@
|
|||
# VCS Repository Management for Go
|
||||
|
||||
Manage repos in varying version control systems with ease through a common
|
||||
interface.
|
||||
|
||||
[![Build Status](https://travis-ci.org/Masterminds/vcs.svg)](https://travis-ci.org/Masterminds/vcs) [![GoDoc](https://godoc.org/github.com/Masterminds/vcs?status.png)](https://godoc.org/github.com/Masterminds/vcs) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/vcs)](https://goreportcard.com/report/github.com/Masterminds/vcs)
|
||||
[![Build status](https://ci.appveyor.com/api/projects/status/vg3cjc561q2trobm?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/vcs)
|
||||
|
||||
|
||||
## Quick Usage
|
||||
|
||||
Quick usage:
|
||||
|
||||
remote := "https://github.com/Masterminds/vcs"
|
||||
local, _ := ioutil.TempDir("", "go-vcs")
|
||||
repo, err := NewRepo(remote, local)
|
||||
|
||||
In this case `NewRepo` will detect the VCS is Git and return a `GitRepo`. All of
|
||||
the repos implement the `Repo` interface with a common set of features between
|
||||
them.
|
||||
|
||||
## Supported VCS
|
||||
|
||||
Git, SVN, Bazaar (Bzr), and Mercurial (Hg) are currently supported. They each
|
||||
have their own type (e.g., `GitRepo`) that follow a simple naming pattern. Each
|
||||
type implements the `Repo` interface and has a constructor (e.g., `NewGitRepo`).
|
||||
The constructors have the same signature as `NewRepo`.
|
||||
|
||||
## Features
|
||||
|
||||
- Clone or checkout a repository depending on the version control system.
|
||||
- Pull updates to a repository.
|
||||
- Get the currently checked out commit id.
|
||||
- Checkout a commit id, branch, or tag (depending on the availability in the VCS).
|
||||
- Get a list of tags and branches in the VCS.
|
||||
- Check if a string value is a valid reference within the VCS.
|
||||
- More...
|
||||
|
||||
For more details see [the documentation](https://godoc.org/github.com/Masterminds/vcs).
|
||||
|
||||
## Motivation
|
||||
|
||||
The package `golang.org/x/tools/go/vcs` provides some valuable functionality
|
||||
for working with packages in repositories in varying source control management
|
||||
systems. That package, while useful and well tested, is designed with a specific
|
||||
purpose in mind. Our uses went beyond the scope of that package. To implement
|
||||
our scope we built a package that went beyond the functionality and scope
|
||||
of `golang.org/x/tools/go/vcs`.
|
|
@ -1,26 +0,0 @@
|
|||
|
||||
version: build-{build}.{branch}
|
||||
|
||||
clone_folder: C:\gopath\src\github.com\Masterminds\vcs
|
||||
shallow_clone: true
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
|
||||
platform:
|
||||
- x64
|
||||
|
||||
install:
|
||||
- go version
|
||||
- go env
|
||||
- choco install -y bzr
|
||||
- set PATH=C:\Program Files (x86)\Bazaar;%PATH%
|
||||
- bzr --version
|
||||
|
||||
build_script:
|
||||
- go install -v ./...
|
||||
|
||||
test_script:
|
||||
- go test -v
|
||||
|
||||
deploy: off
|
|
@ -1,328 +0,0 @@
|
|||
package vcs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"time"
|
||||
//"log"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Canary test to ensure BzrRepo implements the Repo interface.
|
||||
var _ Repo = &BzrRepo{}
|
||||
|
||||
// To verify bzr is working we perform integration testing
|
||||
// with a known bzr service. Due to the long time of repeatedly checking out
|
||||
// repos these tests are structured to work together.
|
||||
|
||||
func TestBzr(t *testing.T) {
|
||||
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-bzr-tests")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, err := NewBzrRepo("https://launchpad.net/govcstestbzrrepo", tempDir+"/govcstestbzrrepo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if repo.Vcs() != Bzr {
|
||||
t.Error("Bzr is detecting the wrong type")
|
||||
}
|
||||
|
||||
// Check the basic getters.
|
||||
if repo.Remote() != "https://launchpad.net/govcstestbzrrepo" {
|
||||
t.Error("Remote not set properly")
|
||||
}
|
||||
if repo.LocalPath() != tempDir+"/govcstestbzrrepo" {
|
||||
t.Error("Local disk location not set properly")
|
||||
}
|
||||
|
||||
//Logger = log.New(os.Stdout, "", log.LstdFlags)
|
||||
|
||||
// Do an initial clone.
|
||||
err = repo.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Unable to clone Bzr repo. Err was %s", err)
|
||||
}
|
||||
|
||||
// Verify Bzr repo is a Bzr repo
|
||||
if !repo.CheckLocal() {
|
||||
t.Error("Problem checking out repo or Bzr CheckLocal is not working")
|
||||
}
|
||||
|
||||
// Test internal lookup mechanism used outside of Bzr specific functionality.
|
||||
ltype, err := DetectVcsFromFS(tempDir + "/govcstestbzrrepo")
|
||||
if err != nil {
|
||||
t.Error("detectVcsFromFS unable to Bzr repo")
|
||||
}
|
||||
if ltype != Bzr {
|
||||
t.Errorf("detectVcsFromFS detected %s instead of Bzr type", ltype)
|
||||
}
|
||||
|
||||
// Test NewRepo on existing checkout. This should simply provide a working
|
||||
// instance without error based on looking at the local directory.
|
||||
nrepo, nrerr := NewRepo("https://launchpad.net/govcstestbzrrepo", tempDir+"/govcstestbzrrepo")
|
||||
if nrerr != nil {
|
||||
t.Error(nrerr)
|
||||
}
|
||||
// Verify the right oject is returned. It will check the local repo type.
|
||||
if !nrepo.CheckLocal() {
|
||||
t.Error("Wrong version returned from NewRepo")
|
||||
}
|
||||
|
||||
v, err := repo.Current()
|
||||
if err != nil {
|
||||
t.Errorf("Error trying Bzr Current: %s", err)
|
||||
}
|
||||
if v != "-1" {
|
||||
t.Errorf("Current failed to detect Bzr on tip of branch. Got version: %s", v)
|
||||
}
|
||||
|
||||
err = repo.UpdateVersion("2")
|
||||
if err != nil {
|
||||
t.Errorf("Unable to update Bzr repo version. Err was %s", err)
|
||||
}
|
||||
|
||||
// Use Version to verify we are on the right version.
|
||||
v, err = repo.Version()
|
||||
if v != "2" {
|
||||
t.Error("Error checking checked out Bzr version")
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
v, err = repo.Current()
|
||||
if err != nil {
|
||||
t.Errorf("Error trying Bzr Current: %s", err)
|
||||
}
|
||||
if v != "2" {
|
||||
t.Errorf("Current failed to detect Bzr on rev 2 of branch. Got version: %s", v)
|
||||
}
|
||||
|
||||
// Use Date to verify we are on the right commit.
|
||||
d, err := repo.Date()
|
||||
if d.Format(longForm) != "2015-07-31 09:50:42 -0400" {
|
||||
t.Error("Error checking checked out Bzr commit date")
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// Perform an update.
|
||||
err = repo.Update()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
v, err = repo.Version()
|
||||
if v != "3" {
|
||||
t.Error("Error checking checked out Bzr version")
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
tags, err := repo.Tags()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if tags[0] != "1.0.0" {
|
||||
t.Error("Bzr tags is not reporting the correct version")
|
||||
}
|
||||
|
||||
tags, err = repo.TagsFromCommit("2")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(tags) != 0 {
|
||||
t.Error("Bzr is incorrectly returning tags for a commit")
|
||||
}
|
||||
|
||||
tags, err = repo.TagsFromCommit("3")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(tags) != 1 || tags[0] != "1.0.0" {
|
||||
t.Error("Bzr is incorrectly returning tags for a commit")
|
||||
}
|
||||
|
||||
branches, err := repo.Branches()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(branches) != 0 {
|
||||
t.Error("Bzr is incorrectly returning branches")
|
||||
}
|
||||
|
||||
if !repo.IsReference("1.0.0") {
|
||||
t.Error("Bzr is reporting a reference is not one")
|
||||
}
|
||||
|
||||
if repo.IsReference("foo") {
|
||||
t.Error("Bzr is reporting a non-existent reference is one")
|
||||
}
|
||||
|
||||
if repo.IsDirty() {
|
||||
t.Error("Bzr incorrectly reporting dirty")
|
||||
}
|
||||
|
||||
ci, err := repo.CommitInfo("3")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if ci.Commit != "3" {
|
||||
t.Error("Bzr.CommitInfo wrong commit id")
|
||||
}
|
||||
if ci.Author != "Matt Farina <matt@mattfarina.com>" {
|
||||
t.Error("Bzr.CommitInfo wrong author")
|
||||
}
|
||||
if ci.Message != "Updated Readme with pointer." {
|
||||
t.Error("Bzr.CommitInfo wrong message")
|
||||
}
|
||||
ti, err := time.Parse(time.RFC1123Z, "Fri, 31 Jul 2015 09:51:37 -0400")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !ti.Equal(ci.Date) {
|
||||
t.Error("Bzr.CommitInfo wrong date")
|
||||
}
|
||||
|
||||
_, err = repo.CommitInfo("asdfasdfasdf")
|
||||
if err != ErrRevisionUnavailable {
|
||||
t.Error("Bzr didn't return expected ErrRevisionUnavailable")
|
||||
}
|
||||
|
||||
tempDir2, err := ioutil.TempDir("", "go-vcs-bzr-tests-export")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temp directory: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
exportDir := filepath.Join(tempDir2, "src")
|
||||
|
||||
err = repo.ExportDir(exportDir)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to export Bzr repo. Err was %s", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(filepath.Join(exportDir, "Readme.md"))
|
||||
if err != nil {
|
||||
t.Errorf("Error checking exported file in Bzr: %s", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(filepath.Join(exportDir, string(repo.Vcs())))
|
||||
if err != nil {
|
||||
if found := os.IsNotExist(err); !found {
|
||||
t.Errorf("Error checking exported metadata in Bzr: %s", err)
|
||||
}
|
||||
} else {
|
||||
t.Error("Error checking Bzr metadata. It exists.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBzrCheckLocal(t *testing.T) {
|
||||
// Verify repo.CheckLocal fails for non-Bzr directories.
|
||||
// TestBzr is already checking on a valid repo
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-bzr-tests")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, _ := NewBzrRepo("", tempDir)
|
||||
if repo.CheckLocal() {
|
||||
t.Error("Bzr CheckLocal does not identify non-Bzr location")
|
||||
}
|
||||
|
||||
// Test NewRepo when there's no local. This should simply provide a working
|
||||
// instance without error based on looking at the remote localtion.
|
||||
_, nrerr := NewRepo("https://launchpad.net/govcstestbzrrepo", tempDir+"/govcstestbzrrepo")
|
||||
if nrerr != nil {
|
||||
t.Error(nrerr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBzrPing(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-bzr-tests")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, err := NewBzrRepo("https://launchpad.net/govcstestbzrrepo", tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
ping := repo.Ping()
|
||||
if !ping {
|
||||
t.Error("Bzr unable to ping working repo")
|
||||
}
|
||||
|
||||
repo, err = NewBzrRepo("https://launchpad.net/ihopethisneverexistsbecauseitshouldnt", tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
ping = repo.Ping()
|
||||
if ping {
|
||||
t.Error("Bzr got a ping response from when it should not have")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBzrInit(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-bzr-tests")
|
||||
repoDir := tempDir + "/repo"
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, err := NewBzrRepo(repoDir, repoDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
err = repo.Init()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
v, err := repo.Version()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if v != "0" {
|
||||
t.Errorf("Bzr Init returns wrong version: %s", v)
|
||||
}
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
package vcs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewRemoteError(t *testing.T) {
|
||||
base := errors.New("Foo error")
|
||||
out := "This is a test"
|
||||
msg := "remote error msg"
|
||||
|
||||
e := NewRemoteError(msg, base, out)
|
||||
|
||||
switch e.(type) {
|
||||
case *RemoteError:
|
||||
// This is the right error type
|
||||
default:
|
||||
t.Error("Wrong error type returned from NewRemoteError")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewLocalError(t *testing.T) {
|
||||
base := errors.New("Foo error")
|
||||
out := "This is a test"
|
||||
msg := "local error msg"
|
||||
|
||||
e := NewLocalError(msg, base, out)
|
||||
|
||||
switch e.(type) {
|
||||
case *LocalError:
|
||||
// This is the right error type
|
||||
default:
|
||||
t.Error("Wrong error type returned from NewLocalError")
|
||||
}
|
||||
}
|
|
@ -1,599 +0,0 @@
|
|||
package vcs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"time"
|
||||
//"log"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Canary test to ensure GitRepo implements the Repo interface.
|
||||
var _ Repo = &GitRepo{}
|
||||
|
||||
// To verify git is working we perform integration testing
|
||||
// with a known git service.
|
||||
|
||||
func TestGit(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-git-tests")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, err := NewGitRepo("https://github.com/Masterminds/VCSTestRepo", tempDir+"/VCSTestRepo")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if repo.Vcs() != Git {
|
||||
t.Error("Git is detecting the wrong type")
|
||||
}
|
||||
|
||||
// Check the basic getters.
|
||||
if repo.Remote() != "https://github.com/Masterminds/VCSTestRepo" {
|
||||
t.Error("Remote not set properly")
|
||||
}
|
||||
if repo.LocalPath() != tempDir+"/VCSTestRepo" {
|
||||
t.Error("Local disk location not set properly")
|
||||
}
|
||||
|
||||
//Logger = log.New(os.Stdout, "", log.LstdFlags)
|
||||
|
||||
// Do an initial clone.
|
||||
err = repo.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Unable to clone Git repo. Err was %s", err)
|
||||
}
|
||||
|
||||
// Verify Git repo is a Git repo
|
||||
if !repo.CheckLocal() {
|
||||
t.Error("Problem checking out repo or Git CheckLocal is not working")
|
||||
}
|
||||
|
||||
// Test internal lookup mechanism used outside of Git specific functionality.
|
||||
ltype, err := DetectVcsFromFS(tempDir + "/VCSTestRepo")
|
||||
if err != nil {
|
||||
t.Error("detectVcsFromFS unable to Git repo")
|
||||
}
|
||||
if ltype != Git {
|
||||
t.Errorf("detectVcsFromFS detected %s instead of Git type", ltype)
|
||||
}
|
||||
|
||||
// Test NewRepo on existing checkout. This should simply provide a working
|
||||
// instance without error based on looking at the local directory.
|
||||
nrepo, nrerr := NewRepo("https://github.com/Masterminds/VCSTestRepo", tempDir+"/VCSTestRepo")
|
||||
if nrerr != nil {
|
||||
t.Error(nrerr)
|
||||
}
|
||||
// Verify the right oject is returned. It will check the local repo type.
|
||||
if !nrepo.CheckLocal() {
|
||||
t.Error("Wrong version returned from NewRepo")
|
||||
}
|
||||
|
||||
// Perform an update.
|
||||
err = repo.Update()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
v, err := repo.Current()
|
||||
if err != nil {
|
||||
t.Errorf("Error trying Git Current: %s", err)
|
||||
}
|
||||
if v != "master" {
|
||||
t.Errorf("Current failed to detect Git on tip of master. Got version: %s", v)
|
||||
}
|
||||
|
||||
// Set the version using the short hash.
|
||||
err = repo.UpdateVersion("806b07b")
|
||||
if err != nil {
|
||||
t.Errorf("Unable to update Git repo version. Err was %s", err)
|
||||
}
|
||||
|
||||
// Once a ref has been checked out the repo is in a detached head state.
|
||||
// Trying to pull in an update in this state will cause an error. Update
|
||||
// should cleanly handle this. Pulling on a branch (tested elsewhere) and
|
||||
// skipping that here.
|
||||
err = repo.Update()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// Use Version to verify we are on the right version.
|
||||
v, err = repo.Version()
|
||||
if v != "806b07b08faa21cfbdae93027904f80174679402" {
|
||||
t.Error("Error checking checked out Git version")
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
v, err = repo.Current()
|
||||
if err != nil {
|
||||
t.Errorf("Error trying Git Current for ref: %s", err)
|
||||
}
|
||||
if v != "806b07b08faa21cfbdae93027904f80174679402" {
|
||||
t.Errorf("Current failed to detect Git on ref of branch. Got version: %s", v)
|
||||
}
|
||||
|
||||
// Use Date to verify we are on the right commit.
|
||||
d, err := repo.Date()
|
||||
if d.Format(longForm) != "2015-07-29 09:46:39 -0400" {
|
||||
t.Error("Error checking checked out Git commit date")
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// Verify that we can set the version something other than short hash
|
||||
err = repo.UpdateVersion("master")
|
||||
if err != nil {
|
||||
t.Errorf("Unable to update Git repo version. Err was %s", err)
|
||||
}
|
||||
err = repo.UpdateVersion("806b07b08faa21cfbdae93027904f80174679402")
|
||||
if err != nil {
|
||||
t.Errorf("Unable to update Git repo version. Err was %s", err)
|
||||
}
|
||||
v, err = repo.Version()
|
||||
if v != "806b07b08faa21cfbdae93027904f80174679402" {
|
||||
t.Error("Error checking checked out Git version")
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
tags, err := repo.Tags()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
var hasRelTag bool
|
||||
var hasOffMasterTag bool
|
||||
|
||||
for _, tv := range tags {
|
||||
if tv == "1.0.0" {
|
||||
hasRelTag = true
|
||||
} else if tv == "off-master-tag" {
|
||||
hasOffMasterTag = true
|
||||
}
|
||||
}
|
||||
|
||||
if !hasRelTag {
|
||||
t.Error("Git tags unable to find release tag on master")
|
||||
}
|
||||
if !hasOffMasterTag {
|
||||
t.Error("Git tags did not fetch tags not on master")
|
||||
}
|
||||
|
||||
tags, err = repo.TagsFromCommit("74dd547545b7df4aa285bcec1b54e2b76f726395")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(tags) != 0 {
|
||||
t.Error("Git is incorrectly returning tags for a commit")
|
||||
}
|
||||
|
||||
tags, err = repo.TagsFromCommit("30605f6ac35fcb075ad0bfa9296f90a7d891523e")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(tags) != 1 || tags[0] != "1.0.0" {
|
||||
t.Error("Git is incorrectly returning tags for a commit")
|
||||
}
|
||||
|
||||
branches, err := repo.Branches()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
// The branches should be HEAD, master, other, and test.
|
||||
if branches[3] != "test" {
|
||||
t.Error("Git is incorrectly returning branches")
|
||||
}
|
||||
|
||||
if !repo.IsReference("1.0.0") {
|
||||
t.Error("Git is reporting a reference is not one")
|
||||
}
|
||||
|
||||
if repo.IsReference("foo") {
|
||||
t.Error("Git is reporting a non-existent reference is one")
|
||||
}
|
||||
|
||||
if repo.IsDirty() {
|
||||
t.Error("Git incorrectly reporting dirty")
|
||||
}
|
||||
|
||||
ci, err := repo.CommitInfo("806b07b08faa21cfbdae93027904f80174679402")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if ci.Commit != "806b07b08faa21cfbdae93027904f80174679402" {
|
||||
t.Error("Git.CommitInfo wrong commit id")
|
||||
}
|
||||
if ci.Author != "Matt Farina <matt@mattfarina.com>" {
|
||||
t.Error("Git.CommitInfo wrong author")
|
||||
}
|
||||
if ci.Message != "Update README.md" {
|
||||
t.Error("Git.CommitInfo wrong message")
|
||||
}
|
||||
ti, err := time.Parse(time.RFC1123Z, "Wed, 29 Jul 2015 09:46:39 -0400")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !ti.Equal(ci.Date) {
|
||||
t.Error("Git.CommitInfo wrong date")
|
||||
}
|
||||
|
||||
_, err = repo.CommitInfo("asdfasdfasdf")
|
||||
if err != ErrRevisionUnavailable {
|
||||
t.Error("Git didn't return expected ErrRevisionUnavailable")
|
||||
}
|
||||
|
||||
tempDir2, err := ioutil.TempDir("", "go-vcs-git-tests-export")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temp directory: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
exportDir := filepath.Join(tempDir2, "src")
|
||||
|
||||
err = repo.ExportDir(exportDir)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to export Git repo. Err was %s", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(filepath.Join(exportDir, "README.md"))
|
||||
if err != nil {
|
||||
t.Errorf("Error checking exported file in Git: %s", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(filepath.Join(exportDir, string(repo.Vcs())))
|
||||
if err != nil {
|
||||
if found := os.IsNotExist(err); !found {
|
||||
t.Errorf("Error checking exported metadata in Git: %s", err)
|
||||
}
|
||||
} else {
|
||||
t.Error("Error checking Git metadata. It exists.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGitCheckLocal(t *testing.T) {
|
||||
// Verify repo.CheckLocal fails for non-Git directories.
|
||||
// TestGit is already checking on a valid repo
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-git-tests")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, _ := NewGitRepo("", tempDir)
|
||||
if repo.CheckLocal() {
|
||||
t.Error("Git CheckLocal does not identify non-Git location")
|
||||
}
|
||||
|
||||
// Test NewRepo when there's no local. This should simply provide a working
|
||||
// instance without error based on looking at the remote localtion.
|
||||
_, nrerr := NewRepo("https://github.com/Masterminds/VCSTestRepo", tempDir+"/VCSTestRepo")
|
||||
if nrerr != nil {
|
||||
t.Error(nrerr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGitPing(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-git-tests")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, err := NewGitRepo("https://github.com/Masterminds/VCSTestRepo", tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
ping := repo.Ping()
|
||||
if !ping {
|
||||
t.Error("Git unable to ping working repo")
|
||||
}
|
||||
|
||||
repo, err = NewGitRepo("https://github.com/Masterminds/ihopethisneverexistsbecauseitshouldnt", tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
ping = repo.Ping()
|
||||
if ping {
|
||||
t.Error("Git got a ping response from when it should not have")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGitInit(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-git-tests")
|
||||
repoDir := tempDir + "/repo"
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, err := NewGitRepo(repoDir, repoDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
err = repo.Init()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
_, err = repo.RunFromDir("git", "status")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGitSubmoduleHandling(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-git-submodule-tests")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
dumplocal := func(err error) string {
|
||||
if terr, ok := err.(*LocalError); ok {
|
||||
return fmt.Sprintf("msg: %s\norig: %s\nout: %s", terr.Error(), terr.Original(), terr.Out())
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
subdirExists := func(dir ...string) bool {
|
||||
_, err := os.Stat(filepath.Join(append([]string{tempDir}, dir...)...))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Initial clone should get version with two submodules, each of which have
|
||||
// their own submodule
|
||||
repo, err := NewGitRepo("https://github.com/sdboyer/subm", tempDir)
|
||||
if err != nil {
|
||||
t.Fatal(dumplocal(err))
|
||||
}
|
||||
err = repo.Get()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to clone Git repo. Err was %s", dumplocal(err))
|
||||
}
|
||||
|
||||
// Verify we are on the right version.
|
||||
v, err := repo.Version()
|
||||
if v != "18e3a5f6fc7f6d577e732e7a5ab2caf990efbf8f" {
|
||||
t.Fatalf("did not start from expected rev, tests could fail - bailing out (got %s)", v)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(dumplocal(err))
|
||||
}
|
||||
|
||||
if !subdirExists("subm1", ".git") {
|
||||
t.Fatal("subm1 submodule does not exist on initial clone/checkout")
|
||||
}
|
||||
if !subdirExists("subm1", "dep-test", ".git") {
|
||||
t.Fatal("dep-test submodule nested under subm1 does not exist on initial clone/checkout")
|
||||
}
|
||||
|
||||
if !subdirExists("subm-again", ".git") {
|
||||
t.Fatal("subm-again submodule does not exist on initial clone/checkout")
|
||||
}
|
||||
if !subdirExists("subm-again", "dep-test", ".git") {
|
||||
t.Fatal("dep-test submodule nested under subm-again does not exist on initial clone/checkout")
|
||||
}
|
||||
|
||||
// Now switch to version with no submodules, make sure they all go away
|
||||
err = repo.UpdateVersion("e677f82015f72ac1c8fafa66b5463163b3597af2")
|
||||
if err != nil {
|
||||
t.Fatalf("checking out needed version failed with err: %s", dumplocal(err))
|
||||
}
|
||||
|
||||
if subdirExists("subm1") {
|
||||
t.Fatal("checking out version without submodule did not clean up immediate submodules")
|
||||
}
|
||||
if subdirExists("subm1", "dep-test") {
|
||||
t.Fatal("checking out version without submodule did not clean up nested submodules")
|
||||
}
|
||||
if subdirExists("subm-again") {
|
||||
t.Fatal("checking out version without submodule did not clean up immediate submodules")
|
||||
}
|
||||
if subdirExists("subm-again", "dep-test") {
|
||||
t.Fatal("checking out version without submodule did not clean up nested submodules")
|
||||
}
|
||||
|
||||
err = repo.UpdateVersion("aaf7aa1bc4c3c682cc530eca8f80417088ee8540")
|
||||
if err != nil {
|
||||
t.Fatalf("checking out needed version failed with err: %s", dumplocal(err))
|
||||
}
|
||||
|
||||
if !subdirExists("subm1", ".git") {
|
||||
t.Fatal("checking out version with immediate submodule did not set up git subrepo")
|
||||
}
|
||||
|
||||
err = repo.UpdateVersion("6cc4669af468f3b4f16e7e96275ad01ade5b522f")
|
||||
if err != nil {
|
||||
t.Fatalf("checking out needed version failed with err: %s", dumplocal(err))
|
||||
}
|
||||
|
||||
if !subdirExists("subm1", "dep-test", ".git") {
|
||||
t.Fatal("checking out version with nested submodule did not set up nested git subrepo")
|
||||
}
|
||||
|
||||
err = repo.UpdateVersion("aaf7aa1bc4c3c682cc530eca8f80417088ee8540")
|
||||
if err != nil {
|
||||
t.Fatalf("checking out needed version failed with err: %s", dumplocal(err))
|
||||
}
|
||||
|
||||
if subdirExists("subm1", "dep-test") {
|
||||
t.Fatal("rolling back to version without nested submodule did not clean up the nested submodule")
|
||||
}
|
||||
|
||||
err = repo.UpdateVersion("18e3a5f6fc7f6d577e732e7a5ab2caf990efbf8f")
|
||||
if err != nil {
|
||||
t.Fatalf("checking out needed version failed with err: %s", dumplocal(err))
|
||||
}
|
||||
|
||||
if !subdirExists("subm1", ".git") {
|
||||
t.Fatal("subm1 submodule does not exist after switch from other commit")
|
||||
}
|
||||
if !subdirExists("subm1", "dep-test", ".git") {
|
||||
t.Fatal("dep-test submodule nested under subm1 does not exist after switch from other commit")
|
||||
}
|
||||
|
||||
if !subdirExists("subm-again", ".git") {
|
||||
t.Fatal("subm-again submodule does not exist after switch from other commit")
|
||||
}
|
||||
if !subdirExists("subm-again", "dep-test", ".git") {
|
||||
t.Fatal("dep-test submodule nested under subm-again does not exist after switch from other commit")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGitSubmoduleHandling2(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-git-submodule-tests2")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, err := NewGitRepo("https://github.com/cloudfoundry/sonde-go", tempDir+"/VCSTestRepo2")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if repo.Vcs() != Git {
|
||||
t.Error("Git is detecting the wrong type")
|
||||
}
|
||||
|
||||
// Check the basic getters.
|
||||
if repo.Remote() != "https://github.com/cloudfoundry/sonde-go" {
|
||||
t.Error("Remote not set properly")
|
||||
}
|
||||
if repo.LocalPath() != tempDir+"/VCSTestRepo2" {
|
||||
t.Error("Local disk location not set properly")
|
||||
}
|
||||
|
||||
//Logger = log.New(os.Stdout, "", log.LstdFlags)
|
||||
|
||||
// Do an initial clone.
|
||||
err = repo.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Unable to clone Git repo. Err was %s", err)
|
||||
}
|
||||
|
||||
// Verify Git repo is a Git repo
|
||||
if !repo.CheckLocal() {
|
||||
t.Error("Problem checking out repo or Git CheckLocal is not working")
|
||||
}
|
||||
|
||||
// Test internal lookup mechanism used outside of Git specific functionality.
|
||||
ltype, err := DetectVcsFromFS(tempDir + "/VCSTestRepo2")
|
||||
if err != nil {
|
||||
t.Error("detectVcsFromFS unable to Git repo")
|
||||
}
|
||||
if ltype != Git {
|
||||
t.Errorf("detectVcsFromFS detected %s instead of Git type", ltype)
|
||||
}
|
||||
|
||||
// Test NewRepo on existing checkout. This should simply provide a working
|
||||
// instance without error based on looking at the local directory.
|
||||
nrepo, nrerr := NewRepo("https://github.com/cloudfoundry/sonde-go", tempDir+"/VCSTestRepo2")
|
||||
if nrerr != nil {
|
||||
t.Error(nrerr)
|
||||
}
|
||||
// Verify the right oject is returned. It will check the local repo type.
|
||||
if !nrepo.CheckLocal() {
|
||||
t.Error("Wrong version returned from NewRepo")
|
||||
}
|
||||
|
||||
// Perform an update.
|
||||
err = repo.Update()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
v, err := repo.Current()
|
||||
if err != nil {
|
||||
t.Errorf("Error trying Git Current: %s", err)
|
||||
}
|
||||
if v != "master" {
|
||||
t.Errorf("Current failed to detect Git on tip of master. Got version: %s", v)
|
||||
}
|
||||
|
||||
|
||||
tempDir2, err := ioutil.TempDir("", "go-vcs-git-tests-export")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temp directory: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
exportDir := filepath.Join(tempDir2, "src")
|
||||
|
||||
err = repo.ExportDir(exportDir)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to export Git repo. Err was %s", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(filepath.Join(exportDir, "README.md"))
|
||||
if err != nil {
|
||||
t.Errorf("Error checking exported file in Git: %s", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(filepath.Join( filepath.Join(exportDir, "definitions"), "README.md"))
|
||||
if err != nil {
|
||||
t.Errorf("Error checking exported file in Git: %s", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(filepath.Join(exportDir, string(repo.Vcs())))
|
||||
if err != nil {
|
||||
if found := os.IsNotExist(err); !found {
|
||||
t.Errorf("Error checking exported metadata in Git: %s", err)
|
||||
}
|
||||
} else {
|
||||
t.Error("Error checking Git metadata. It exists.")
|
||||
}
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
package: github.com/Masterminds/vcs
|
||||
homepage: https://github.com/Masterminds/vcs
|
||||
license: MIT
|
||||
owners:
|
||||
- name: Matt Farina
|
||||
email: matt@mattfarina.com
|
||||
homepage: https://www.mattfarina.com/
|
||||
import: []
|
|
@ -1,332 +0,0 @@
|
|||
package vcs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
//"log"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Canary test to ensure HgRepo implements the Repo interface.
|
||||
var _ Repo = &HgRepo{}
|
||||
|
||||
// To verify hg is working we perform integration testing
|
||||
// with a known hg service.
|
||||
|
||||
func TestHg(t *testing.T) {
|
||||
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-hg-tests")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, err := NewHgRepo("https://bitbucket.org/mattfarina/testhgrepo", tempDir+"/testhgrepo")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if repo.Vcs() != Hg {
|
||||
t.Error("Hg is detecting the wrong type")
|
||||
}
|
||||
|
||||
// Check the basic getters.
|
||||
if repo.Remote() != "https://bitbucket.org/mattfarina/testhgrepo" {
|
||||
t.Error("Remote not set properly")
|
||||
}
|
||||
if repo.LocalPath() != tempDir+"/testhgrepo" {
|
||||
t.Error("Local disk location not set properly")
|
||||
}
|
||||
|
||||
//Logger = log.New(os.Stdout, "", log.LstdFlags)
|
||||
|
||||
// Do an initial clone.
|
||||
err = repo.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Unable to clone Hg repo. Err was %s", err)
|
||||
}
|
||||
|
||||
// Verify Hg repo is a Hg repo
|
||||
if !repo.CheckLocal() {
|
||||
t.Error("Problem checking out repo or Hg CheckLocal is not working")
|
||||
}
|
||||
|
||||
// Test internal lookup mechanism used outside of Hg specific functionality.
|
||||
ltype, err := DetectVcsFromFS(tempDir + "/testhgrepo")
|
||||
if err != nil {
|
||||
t.Error("detectVcsFromFS unable to Hg repo")
|
||||
}
|
||||
if ltype != Hg {
|
||||
t.Errorf("detectVcsFromFS detected %s instead of Hg type", ltype)
|
||||
}
|
||||
|
||||
// Test NewRepo on existing checkout. This should simply provide a working
|
||||
// instance without error based on looking at the local directory.
|
||||
nrepo, nrerr := NewRepo("https://bitbucket.org/mattfarina/testhgrepo", tempDir+"/testhgrepo")
|
||||
if nrerr != nil {
|
||||
t.Error(nrerr)
|
||||
}
|
||||
// Verify the right oject is returned. It will check the local repo type.
|
||||
if !nrepo.CheckLocal() {
|
||||
t.Error("Wrong version returned from NewRepo")
|
||||
}
|
||||
|
||||
v, err := repo.Current()
|
||||
if err != nil {
|
||||
t.Errorf("Error trying Hg Current: %s", err)
|
||||
}
|
||||
if v != "default" {
|
||||
t.Errorf("Current failed to detect Hg on tip of default. Got version: %s", v)
|
||||
}
|
||||
|
||||
// Set the version using the short hash.
|
||||
err = repo.UpdateVersion("a5494ba2177f")
|
||||
if err != nil {
|
||||
t.Errorf("Unable to update Hg repo version. Err was %s", err)
|
||||
}
|
||||
|
||||
// Use Version to verify we are on the right version.
|
||||
v, err = repo.Version()
|
||||
if v != "a5494ba2177ff9ef26feb3c155dfecc350b1a8ef" {
|
||||
t.Errorf("Error checking checked out Hg version: %s", v)
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
v, err = repo.Current()
|
||||
if err != nil {
|
||||
t.Errorf("Error trying Hg Current for ref: %s", err)
|
||||
}
|
||||
if v != "a5494ba2177ff9ef26feb3c155dfecc350b1a8ef" {
|
||||
t.Errorf("Current failed to detect Hg on ref of branch. Got version: %s", v)
|
||||
}
|
||||
|
||||
// Use Date to verify we are on the right commit.
|
||||
d, err := repo.Date()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if d.Format(longForm) != "2015-07-30 16:14:08 -0400" {
|
||||
t.Error("Error checking checked out Hg commit date. Got wrong date:", d)
|
||||
}
|
||||
|
||||
// Perform an update.
|
||||
err = repo.Update()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
v, err = repo.Version()
|
||||
if v != "9c6ccbca73e8a1351c834f33f57f1f7a0329ad35" {
|
||||
t.Errorf("Error checking checked out Hg version: %s", v)
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
tags, err := repo.Tags()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if tags[1] != "1.0.0" {
|
||||
t.Error("Hg tags is not reporting the correct version")
|
||||
}
|
||||
|
||||
tags, err = repo.TagsFromCommit("a5494ba2177f")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(tags) != 0 {
|
||||
t.Error("Hg is incorrectly returning tags for a commit")
|
||||
}
|
||||
|
||||
tags, err = repo.TagsFromCommit("d680e82228d2")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(tags) != 1 || tags[0] != "1.0.0" {
|
||||
t.Error("Hg is incorrectly returning tags for a commit")
|
||||
}
|
||||
|
||||
branches, err := repo.Branches()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
// The branches should be HEAD, master, and test.
|
||||
if branches[0] != "test" {
|
||||
t.Error("Hg is incorrectly returning branches")
|
||||
}
|
||||
|
||||
if !repo.IsReference("1.0.0") {
|
||||
t.Error("Hg is reporting a reference is not one")
|
||||
}
|
||||
|
||||
if !repo.IsReference("test") {
|
||||
t.Error("Hg is reporting a reference is not one")
|
||||
}
|
||||
|
||||
if repo.IsReference("foo") {
|
||||
t.Error("Hg is reporting a non-existent reference is one")
|
||||
}
|
||||
|
||||
if repo.IsDirty() {
|
||||
t.Error("Hg incorrectly reporting dirty")
|
||||
}
|
||||
|
||||
ci, err := repo.CommitInfo("a5494ba2177f")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if ci.Commit != "a5494ba2177ff9ef26feb3c155dfecc350b1a8ef" {
|
||||
t.Error("Hg.CommitInfo wrong commit id")
|
||||
}
|
||||
if ci.Author != "Matt Farina <matt@mattfarina.com>" {
|
||||
t.Error("Hg.CommitInfo wrong author")
|
||||
}
|
||||
if ci.Message != "A commit" {
|
||||
t.Error("Hg.CommitInfo wrong message")
|
||||
}
|
||||
|
||||
ti := time.Unix(1438287248, 0)
|
||||
if !ti.Equal(ci.Date) {
|
||||
t.Error("Hg.CommitInfo wrong date")
|
||||
}
|
||||
|
||||
_, err = repo.CommitInfo("asdfasdfasdf")
|
||||
if err != ErrRevisionUnavailable {
|
||||
t.Error("Hg didn't return expected ErrRevisionUnavailable")
|
||||
}
|
||||
|
||||
tempDir2, err := ioutil.TempDir("", "go-vcs-hg-tests-export")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temp directory: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
exportDir := filepath.Join(tempDir2, "src")
|
||||
|
||||
err = repo.ExportDir(exportDir)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to export Hg repo. Err was %s", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(filepath.Join(exportDir, "Readme.md"))
|
||||
if err != nil {
|
||||
t.Errorf("Error checking exported file in Hg: %s", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(filepath.Join(exportDir, string(repo.Vcs())))
|
||||
if err != nil {
|
||||
if found := os.IsNotExist(err); !found {
|
||||
t.Errorf("Error checking exported metadata in Hg: %s", err)
|
||||
}
|
||||
} else {
|
||||
t.Error("Error checking Hg metadata. It exists.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHgCheckLocal(t *testing.T) {
|
||||
// Verify repo.CheckLocal fails for non-Hg directories.
|
||||
// TestHg is already checking on a valid repo
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-hg-tests")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, _ := NewHgRepo("", tempDir)
|
||||
if repo.CheckLocal() {
|
||||
t.Error("Hg CheckLocal does not identify non-Hg location")
|
||||
}
|
||||
|
||||
// Test NewRepo when there's no local. This should simply provide a working
|
||||
// instance without error based on looking at the remote localtion.
|
||||
_, nrerr := NewRepo("https://bitbucket.org/mattfarina/testhgrepo", tempDir+"/testhgrepo")
|
||||
if nrerr != nil {
|
||||
t.Error(nrerr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHgPing(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-hg-tests")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, err := NewHgRepo("https://bitbucket.org/mattfarina/testhgrepo", tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
ping := repo.Ping()
|
||||
if !ping {
|
||||
t.Error("Hg unable to ping working repo")
|
||||
}
|
||||
|
||||
repo, err = NewHgRepo("https://bitbucket.org/mattfarina/ihopethisneverexistsbecauseitshouldnt", tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
ping = repo.Ping()
|
||||
if ping {
|
||||
t.Error("Hg got a ping response from when it should not have")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHgInit(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-hg-tests")
|
||||
repoDir := tempDir + "/repo"
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, err := NewHgRepo(repoDir, repoDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
err = repo.Init()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
v, err := repo.Version()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !strings.HasPrefix(v, "000000") {
|
||||
t.Errorf("Hg Init reporting wrong initial version: %s", v)
|
||||
}
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
package vcs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func ExampleNewRepo() {
|
||||
remote := "https://github.com/Masterminds/vcs"
|
||||
local, _ := ioutil.TempDir("", "go-vcs")
|
||||
repo, _ := NewRepo(remote, local)
|
||||
// Returns: instance of GitRepo
|
||||
|
||||
repo.Vcs()
|
||||
// Returns Git as this is a Git repo
|
||||
|
||||
err := repo.Get()
|
||||
// Pulls down a repo, or a checkout in the case of SVN, and returns an
|
||||
// error if that didn't happen successfully.
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
|
||||
err = repo.UpdateVersion("master")
|
||||
// Checkouts out a specific version. In most cases this can be a commit id,
|
||||
// branch, or tag.
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTypeSwitch(t *testing.T) {
|
||||
|
||||
// To test repo type switching we checkout as SVN and then try to get it as
|
||||
// a git repo afterwards.
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-svn-tests")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, err := NewSvnRepo("https://github.com/Masterminds/VCSTestRepo/trunk", tempDir+string(os.PathSeparator)+"VCSTestRepo")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
err = repo.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Unable to checkout SVN repo for repo switching tests. Err was %s", err)
|
||||
}
|
||||
|
||||
_, err = NewRepo("https://github.com/Masterminds/VCSTestRepo", tempDir+string(os.PathSeparator)+"VCSTestRepo")
|
||||
if err != ErrWrongVCS {
|
||||
t.Errorf("Not detecting repo switch from SVN to Git")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDepInstalled(t *testing.T) {
|
||||
i := depInstalled("git")
|
||||
if !i {
|
||||
t.Error("depInstalled not finding installed dep.")
|
||||
}
|
||||
|
||||
i = depInstalled("thisreallyisntinstalled")
|
||||
if i {
|
||||
t.Error("depInstalled finding not installed dep.")
|
||||
}
|
||||
}
|
|
@ -1,337 +0,0 @@
|
|||
package vcs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"time"
|
||||
//"log"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// To verify svn is working we perform integration testing
|
||||
// with a known svn service.
|
||||
|
||||
// Canary test to ensure SvnRepo implements the Repo interface.
|
||||
var _ Repo = &SvnRepo{}
|
||||
|
||||
func TestSvn(t *testing.T) {
|
||||
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-svn-tests")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, err := NewSvnRepo("https://github.com/Masterminds/VCSTestRepo/trunk", tempDir+string(os.PathSeparator)+"VCSTestRepo")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if repo.Vcs() != Svn {
|
||||
t.Error("Svn is detecting the wrong type")
|
||||
}
|
||||
|
||||
// Check the basic getters.
|
||||
if repo.Remote() != "https://github.com/Masterminds/VCSTestRepo/trunk" {
|
||||
t.Error("Remote not set properly")
|
||||
}
|
||||
if repo.LocalPath() != tempDir+string(os.PathSeparator)+"VCSTestRepo" {
|
||||
t.Error("Local disk location not set properly")
|
||||
}
|
||||
|
||||
//Logger = log.New(os.Stdout, "", log.LstdFlags)
|
||||
|
||||
// Do an initial checkout.
|
||||
err = repo.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Unable to checkout SVN repo. Err was %s", err)
|
||||
}
|
||||
|
||||
// Verify SVN repo is a SVN repo
|
||||
if !repo.CheckLocal() {
|
||||
t.Error("Problem checking out repo or SVN CheckLocal is not working")
|
||||
}
|
||||
|
||||
// Verify an incorrect remote is caught when NewSvnRepo is used on an existing location
|
||||
_, nrerr := NewSvnRepo("https://github.com/Masterminds/VCSTestRepo/unknownbranch", tempDir+"/VCSTestRepo")
|
||||
if nrerr != ErrWrongRemote {
|
||||
t.Error("ErrWrongRemote was not triggered for SVN")
|
||||
}
|
||||
|
||||
// Test internal lookup mechanism used outside of Hg specific functionality.
|
||||
ltype, err := DetectVcsFromFS(tempDir + "/VCSTestRepo")
|
||||
if err != nil {
|
||||
t.Error("detectVcsFromFS unable to Svn repo")
|
||||
}
|
||||
if ltype != Svn {
|
||||
t.Errorf("detectVcsFromFS detected %s instead of Svn type", ltype)
|
||||
}
|
||||
|
||||
// Commenting out auto-detection tests for SVN. NewRepo automatically detects
|
||||
// GitHub to be a Git repo and that's an issue for this test. Need an
|
||||
// SVN host that can autodetect from before using this test again.
|
||||
//
|
||||
// Test NewRepo on existing checkout. This should simply provide a working
|
||||
// instance without error based on looking at the local directory.
|
||||
// nrepo, nrerr := NewRepo("https://github.com/Masterminds/VCSTestRepo/trunk", tempDir+"/VCSTestRepo")
|
||||
// if nrerr != nil {
|
||||
// t.Error(nrerr)
|
||||
// }
|
||||
// // Verify the right oject is returned. It will check the local repo type.
|
||||
// if nrepo.CheckLocal() == false {
|
||||
// t.Error("Wrong version returned from NewRepo")
|
||||
// }
|
||||
|
||||
v, err := repo.Current()
|
||||
if err != nil {
|
||||
t.Errorf("Error trying Svn Current: %s", err)
|
||||
}
|
||||
if v != "HEAD" {
|
||||
t.Errorf("Current failed to detect Svn on HEAD. Got version: %s", v)
|
||||
}
|
||||
|
||||
// Update the version to a previous version.
|
||||
err = repo.UpdateVersion("r2")
|
||||
if err != nil {
|
||||
t.Errorf("Unable to update SVN repo version. Err was %s", err)
|
||||
}
|
||||
|
||||
// Use Version to verify we are on the right version.
|
||||
v, err = repo.Version()
|
||||
if v != "2" {
|
||||
t.Error("Error checking checked SVN out version")
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
v, err = repo.Current()
|
||||
if err != nil {
|
||||
t.Errorf("Error trying Svn Current for ref: %s", err)
|
||||
}
|
||||
if v != "2" {
|
||||
t.Errorf("Current failed to detect Svn on HEAD. Got version: %s", v)
|
||||
}
|
||||
|
||||
// Perform an update which should take up back to the latest version.
|
||||
err = repo.Update()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// Make sure we are on a newer version because of the update.
|
||||
v, err = repo.Version()
|
||||
if v == "2" {
|
||||
t.Error("Error with version. Still on old version. Update failed")
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// Use Date to verify we are on the right commit.
|
||||
d, err := repo.Date()
|
||||
if d.Format(longForm) != "2015-07-29 13:47:03 +0000" {
|
||||
t.Error("Error checking checked out Svn commit date")
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
tags, err := repo.Tags()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(tags) != 0 {
|
||||
t.Error("Svn is incorrectly returning tags")
|
||||
}
|
||||
|
||||
tags, err = repo.TagsFromCommit("2")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(tags) != 0 {
|
||||
t.Error("Svn is incorrectly returning tags for a commit")
|
||||
}
|
||||
|
||||
branches, err := repo.Branches()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(branches) != 0 {
|
||||
t.Error("Svn is incorrectly returning branches")
|
||||
}
|
||||
|
||||
if !repo.IsReference("r4") {
|
||||
t.Error("Svn is reporting a reference is not one")
|
||||
}
|
||||
|
||||
if repo.IsReference("55") {
|
||||
t.Error("Svn is reporting a non-existent reference is one")
|
||||
}
|
||||
|
||||
if repo.IsDirty() {
|
||||
t.Error("Svn incorrectly reporting dirty")
|
||||
}
|
||||
|
||||
ci, err := repo.CommitInfo("2")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if ci.Commit != "2" {
|
||||
t.Error("Svn.CommitInfo wrong commit id")
|
||||
}
|
||||
if ci.Author != "matt.farina" {
|
||||
t.Error("Svn.CommitInfo wrong author")
|
||||
}
|
||||
if ci.Message != "Update README.md" {
|
||||
t.Error("Svn.CommitInfo wrong message")
|
||||
}
|
||||
ti, err := time.Parse(time.RFC3339Nano, "2015-07-29T13:46:20.000000Z")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !ti.Equal(ci.Date) {
|
||||
t.Error("Svn.CommitInfo wrong date")
|
||||
}
|
||||
|
||||
_, err = repo.CommitInfo("555555555")
|
||||
if err != ErrRevisionUnavailable {
|
||||
t.Error("Svn didn't return expected ErrRevisionUnavailable")
|
||||
}
|
||||
|
||||
tempDir2, err := ioutil.TempDir("", "go-vcs-svn-tests-export")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temp directory: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
exportDir := filepath.Join(tempDir2, "src")
|
||||
|
||||
err = repo.ExportDir(exportDir)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to export Svn repo. Err was %s", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(filepath.Join(exportDir, "README.md"))
|
||||
if err != nil {
|
||||
t.Errorf("Error checking exported file in Svn: %s", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(filepath.Join(exportDir, string(repo.Vcs())))
|
||||
if err != nil {
|
||||
if found := os.IsNotExist(err); !found {
|
||||
t.Errorf("Error checking exported metadata in Svn: %s", err)
|
||||
}
|
||||
} else {
|
||||
t.Error("Error checking Svn metadata. It exists.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSvnCheckLocal(t *testing.T) {
|
||||
// Verify repo.CheckLocal fails for non-SVN directories.
|
||||
// TestSvn is already checking on a valid repo
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-svn-tests")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, _ := NewSvnRepo("", tempDir)
|
||||
if repo.CheckLocal() {
|
||||
t.Error("SVN CheckLocal does not identify non-SVN location")
|
||||
}
|
||||
|
||||
// Test NewRepo when there's no local. This should simply provide a working
|
||||
// instance without error based on looking at the remote localtion.
|
||||
_, nrerr := NewRepo("https://github.com/Masterminds/VCSTestRepo/trunk", tempDir+"/VCSTestRepo")
|
||||
if nrerr != nil {
|
||||
t.Error(nrerr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSvnPing(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-svn-tests")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, err := NewSvnRepo("https://github.com/Masterminds/VCSTestRepo/trunk", tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
ping := repo.Ping()
|
||||
if !ping {
|
||||
t.Error("Svn unable to ping working repo")
|
||||
}
|
||||
|
||||
repo, err = NewSvnRepo("https://github.com/Masterminds/ihopethisneverexistsbecauseitshouldnt", tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
ping = repo.Ping()
|
||||
if ping {
|
||||
t.Error("Svn got a ping response from when it should not have")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSvnInit(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-svn-tests")
|
||||
remoteDir := tempDir + string(os.PathSeparator) + "remoteDir"
|
||||
localDir := tempDir + string(os.PathSeparator) + "localDir"
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
repo, err := NewSvnRepo(remoteDir, localDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
err = repo.Init()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
err = repo.Get()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
v, err := repo.Version()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if v != "0" {
|
||||
t.Errorf("Svn Init returns wrong version: %s", v)
|
||||
}
|
||||
}
|
|
@ -1,137 +0,0 @@
|
|||
package vcs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestVCSLookup(t *testing.T) {
|
||||
// TODO: Expand to make sure it detected the right vcs.
|
||||
urlList := map[string]struct {
|
||||
work bool
|
||||
t Type
|
||||
}{
|
||||
"https://github.com/masterminds": {work: false, t: Git},
|
||||
"https://github.com/Masterminds/VCSTestRepo": {work: true, t: Git},
|
||||
"https://bitbucket.org/mattfarina/testhgrepo": {work: true, t: Hg},
|
||||
"https://bitbucket.org/mattfarina/repo-does-not-exist": {work: false, t: Hg},
|
||||
"https://bitbucket.org/mattfarina/private-repo-for-vcs-testing": {work: false, t: Hg},
|
||||
"https://launchpad.net/govcstestbzrrepo/trunk": {work: true, t: Bzr},
|
||||
"https://launchpad.net/~mattfarina/+junk/mygovcstestbzrrepo": {work: true, t: Bzr},
|
||||
"https://launchpad.net/~mattfarina/+junk/mygovcstestbzrrepo/trunk": {work: true, t: Bzr},
|
||||
"https://git.launchpad.net/govcstestgitrepo": {work: true, t: Git},
|
||||
"https://git.launchpad.net/~mattfarina/+git/mygovcstestgitrepo": {work: true, t: Git},
|
||||
"https://hub.jazz.net/git/user1/pkgname": {work: true, t: Git},
|
||||
"https://hub.jazz.net/git/user1/pkgname/subpkg/subpkg/subpkg": {work: true, t: Git},
|
||||
"https://hubs.jazz.net/git/user1/pkgname": {work: false, t: Git},
|
||||
"https://example.com/foo/bar.git": {work: true, t: Git},
|
||||
"https://example.com/foo/bar.svn": {work: true, t: Svn},
|
||||
"https://example.com/foo/bar/baz.bzr": {work: true, t: Bzr},
|
||||
"https://example.com/foo/bar/baz.hg": {work: true, t: Hg},
|
||||
"https://gopkg.in/tomb.v1": {work: true, t: Git},
|
||||
"https://golang.org/x/net": {work: true, t: Git},
|
||||
"https://speter.net/go/exp/math/dec/inf": {work: true, t: Git},
|
||||
"https://git.openstack.org/foo/bar": {work: true, t: Git},
|
||||
"git@github.com:Masterminds/vcs.git": {work: true, t: Git},
|
||||
"git@example.com:foo.git": {work: true, t: Git},
|
||||
"ssh://hg@bitbucket.org/mattfarina/testhgrepo": {work: true, t: Hg},
|
||||
"git@bitbucket.org:mattfarina/glide-bitbucket-example.git": {work: true, t: Git},
|
||||
"git+ssh://example.com/foo/bar": {work: true, t: Git},
|
||||
"git://example.com/foo/bar": {work: true, t: Git},
|
||||
"bzr+ssh://example.com/foo/bar": {work: true, t: Bzr},
|
||||
"svn+ssh://example.com/foo/bar": {work: true, t: Svn},
|
||||
"git@example.com:foo/bar": {work: true, t: Git},
|
||||
"hg@example.com:foo/bar": {work: true, t: Hg},
|
||||
}
|
||||
|
||||
for u, c := range urlList {
|
||||
ty, _, err := detectVcsFromRemote(u)
|
||||
if err == nil && !c.work {
|
||||
t.Errorf("Error detecting VCS from URL(%s)", u)
|
||||
}
|
||||
|
||||
if err == ErrCannotDetectVCS && c.work {
|
||||
t.Errorf("Error detecting VCS from URL(%s)", u)
|
||||
}
|
||||
|
||||
if err != nil && c.work {
|
||||
t.Errorf("Error detecting VCS from URL(%s): %s", u, err)
|
||||
}
|
||||
|
||||
if err != nil &&
|
||||
err != ErrCannotDetectVCS &&
|
||||
!strings.HasSuffix(err.Error(), "Not Found") &&
|
||||
!strings.HasSuffix(err.Error(), "Access Denied") &&
|
||||
!c.work {
|
||||
t.Errorf("Unexpected error returned (%s): %s", u, err)
|
||||
}
|
||||
|
||||
if c.work && ty != c.t {
|
||||
t.Errorf("Incorrect VCS type returned(%s)", u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestVCSFileLookup(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "go-vcs-file-lookup-tests")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = exec.Command("git", "init", tempDir).CombinedOutput()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// On Windows it should be file:// followed by /C:\for\bar. That / before
|
||||
// the drive needs to be included in testing.
|
||||
var pth string
|
||||
if runtime.GOOS == "windows" {
|
||||
pth = "file:///" + tempDir
|
||||
} else {
|
||||
pth = "file://" + tempDir
|
||||
}
|
||||
ty, _, err := detectVcsFromRemote(pth)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Unable to detect file:// path: %s", err)
|
||||
}
|
||||
|
||||
if ty != Git {
|
||||
t.Errorf("Detected wrong type from file:// path. Found type %v", ty)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotFound(t *testing.T) {
|
||||
_, _, err := detectVcsFromRemote("https://mattfarina.com/notfound")
|
||||
if err == nil || !strings.HasSuffix(err.Error(), " Not Found") {
|
||||
t.Errorf("Failed to find not found repo")
|
||||
}
|
||||
|
||||
_, err = NewRepo("https://mattfarina.com/notfound", "")
|
||||
if err == nil || !strings.HasSuffix(err.Error(), " Not Found") {
|
||||
t.Errorf("Failed to find not found repo")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccessDenied(t *testing.T) {
|
||||
_, _, err := detectVcsFromRemote("https://bitbucket.org/mattfarina/private-repo-for-vcs-testing")
|
||||
if err == nil || err.Error() != "Access Denied" {
|
||||
t.Errorf("Failed to detect access denied")
|
||||
}
|
||||
|
||||
_, err = NewRepo("https://bitbucket.org/mattfarina/private-repo-for-vcs-testing", "")
|
||||
if err == nil || err.Error() != "Access Denied" {
|
||||
t.Errorf("Failed to detect access denied")
|
||||
}
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
|
@ -1,3 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- tip
|
|
@ -1,38 +0,0 @@
|
|||
go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix)
|
||||
=========
|
||||
|
||||
Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
|
||||
The package only provides a single `Tree` implementation, optimized for sparse nodes.
|
||||
|
||||
As a radix tree, it provides the following:
|
||||
* O(k) operations. In many cases, this can be faster than a hash table since
|
||||
the hash function is an O(k) operation, and hash tables have very poor cache locality.
|
||||
* Minimum / Maximum value lookups
|
||||
* Ordered iteration
|
||||
|
||||
For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix).
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix).
|
||||
|
||||
Example
|
||||
=======
|
||||
|
||||
Below is a simple example of usage
|
||||
|
||||
```go
|
||||
// Create a tree
|
||||
r := radix.New()
|
||||
r.Insert("foo", 1)
|
||||
r.Insert("bar", 2)
|
||||
r.Insert("foobar", 2)
|
||||
|
||||
// Find the longest prefix match
|
||||
m, _, _ := r.LongestPrefix("foozip")
|
||||
if m != "foo" {
|
||||
panic("should be foo")
|
||||
}
|
||||
```
|
||||
|
|
@ -1,319 +0,0 @@
|
|||
package radix
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRadix(t *testing.T) {
|
||||
var min, max string
|
||||
inp := make(map[string]interface{})
|
||||
for i := 0; i < 1000; i++ {
|
||||
gen := generateUUID()
|
||||
inp[gen] = i
|
||||
if gen < min || i == 0 {
|
||||
min = gen
|
||||
}
|
||||
if gen > max || i == 0 {
|
||||
max = gen
|
||||
}
|
||||
}
|
||||
|
||||
r := NewFromMap(inp)
|
||||
if r.Len() != len(inp) {
|
||||
t.Fatalf("bad length: %v %v", r.Len(), len(inp))
|
||||
}
|
||||
|
||||
r.Walk(func(k string, v interface{}) bool {
|
||||
println(k)
|
||||
return false
|
||||
})
|
||||
|
||||
for k, v := range inp {
|
||||
out, ok := r.Get(k)
|
||||
if !ok {
|
||||
t.Fatalf("missing key: %v", k)
|
||||
}
|
||||
if out != v {
|
||||
t.Fatalf("value mis-match: %v %v", out, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Check min and max
|
||||
outMin, _, _ := r.Minimum()
|
||||
if outMin != min {
|
||||
t.Fatalf("bad minimum: %v %v", outMin, min)
|
||||
}
|
||||
outMax, _, _ := r.Maximum()
|
||||
if outMax != max {
|
||||
t.Fatalf("bad maximum: %v %v", outMax, max)
|
||||
}
|
||||
|
||||
for k, v := range inp {
|
||||
out, ok := r.Delete(k)
|
||||
if !ok {
|
||||
t.Fatalf("missing key: %v", k)
|
||||
}
|
||||
if out != v {
|
||||
t.Fatalf("value mis-match: %v %v", out, v)
|
||||
}
|
||||
}
|
||||
if r.Len() != 0 {
|
||||
t.Fatalf("bad length: %v", r.Len())
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoot(t *testing.T) {
|
||||
r := New()
|
||||
_, ok := r.Delete("")
|
||||
if ok {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
_, ok = r.Insert("", true)
|
||||
if ok {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
val, ok := r.Get("")
|
||||
if !ok || val != true {
|
||||
t.Fatalf("bad: %v", val)
|
||||
}
|
||||
val, ok = r.Delete("")
|
||||
if !ok || val != true {
|
||||
t.Fatalf("bad: %v", val)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
|
||||
r := New()
|
||||
|
||||
s := []string{"", "A", "AB"}
|
||||
|
||||
for _, ss := range s {
|
||||
r.Insert(ss, true)
|
||||
}
|
||||
|
||||
for _, ss := range s {
|
||||
_, ok := r.Delete(ss)
|
||||
if !ok {
|
||||
t.Fatalf("bad %q", ss)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLongestPrefix(t *testing.T) {
|
||||
r := New()
|
||||
|
||||
keys := []string{
|
||||
"",
|
||||
"foo",
|
||||
"foobar",
|
||||
"foobarbaz",
|
||||
"foobarbazzip",
|
||||
"foozip",
|
||||
}
|
||||
for _, k := range keys {
|
||||
r.Insert(k, nil)
|
||||
}
|
||||
if r.Len() != len(keys) {
|
||||
t.Fatalf("bad len: %v %v", r.Len(), len(keys))
|
||||
}
|
||||
|
||||
type exp struct {
|
||||
inp string
|
||||
out string
|
||||
}
|
||||
cases := []exp{
|
||||
{"a", ""},
|
||||
{"abc", ""},
|
||||
{"fo", ""},
|
||||
{"foo", "foo"},
|
||||
{"foob", "foo"},
|
||||
{"foobar", "foobar"},
|
||||
{"foobarba", "foobar"},
|
||||
{"foobarbaz", "foobarbaz"},
|
||||
{"foobarbazzi", "foobarbaz"},
|
||||
{"foobarbazzip", "foobarbazzip"},
|
||||
{"foozi", "foo"},
|
||||
{"foozip", "foozip"},
|
||||
{"foozipzap", "foozip"},
|
||||
}
|
||||
for _, test := range cases {
|
||||
m, _, ok := r.LongestPrefix(test.inp)
|
||||
if !ok {
|
||||
t.Fatalf("no match: %v", test)
|
||||
}
|
||||
if m != test.out {
|
||||
t.Fatalf("mis-match: %v %v", m, test)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalkPrefix(t *testing.T) {
|
||||
r := New()
|
||||
|
||||
keys := []string{
|
||||
"foobar",
|
||||
"foo/bar/baz",
|
||||
"foo/baz/bar",
|
||||
"foo/zip/zap",
|
||||
"zipzap",
|
||||
}
|
||||
for _, k := range keys {
|
||||
r.Insert(k, nil)
|
||||
}
|
||||
if r.Len() != len(keys) {
|
||||
t.Fatalf("bad len: %v %v", r.Len(), len(keys))
|
||||
}
|
||||
|
||||
type exp struct {
|
||||
inp string
|
||||
out []string
|
||||
}
|
||||
cases := []exp{
|
||||
{
|
||||
"f",
|
||||
[]string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
|
||||
},
|
||||
{
|
||||
"foo",
|
||||
[]string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
|
||||
},
|
||||
{
|
||||
"foob",
|
||||
[]string{"foobar"},
|
||||
},
|
||||
{
|
||||
"foo/",
|
||||
[]string{"foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
|
||||
},
|
||||
{
|
||||
"foo/b",
|
||||
[]string{"foo/bar/baz", "foo/baz/bar"},
|
||||
},
|
||||
{
|
||||
"foo/ba",
|
||||
[]string{"foo/bar/baz", "foo/baz/bar"},
|
||||
},
|
||||
{
|
||||
"foo/bar",
|
||||
[]string{"foo/bar/baz"},
|
||||
},
|
||||
{
|
||||
"foo/bar/baz",
|
||||
[]string{"foo/bar/baz"},
|
||||
},
|
||||
{
|
||||
"foo/bar/bazoo",
|
||||
[]string{},
|
||||
},
|
||||
{
|
||||
"z",
|
||||
[]string{"zipzap"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range cases {
|
||||
out := []string{}
|
||||
fn := func(s string, v interface{}) bool {
|
||||
out = append(out, s)
|
||||
return false
|
||||
}
|
||||
r.WalkPrefix(test.inp, fn)
|
||||
sort.Strings(out)
|
||||
sort.Strings(test.out)
|
||||
if !reflect.DeepEqual(out, test.out) {
|
||||
t.Fatalf("mis-match: %v %v", out, test.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalkPath(t *testing.T) {
|
||||
r := New()
|
||||
|
||||
keys := []string{
|
||||
"foo",
|
||||
"foo/bar",
|
||||
"foo/bar/baz",
|
||||
"foo/baz/bar",
|
||||
"foo/zip/zap",
|
||||
"zipzap",
|
||||
}
|
||||
for _, k := range keys {
|
||||
r.Insert(k, nil)
|
||||
}
|
||||
if r.Len() != len(keys) {
|
||||
t.Fatalf("bad len: %v %v", r.Len(), len(keys))
|
||||
}
|
||||
|
||||
type exp struct {
|
||||
inp string
|
||||
out []string
|
||||
}
|
||||
cases := []exp{
|
||||
{
|
||||
"f",
|
||||
[]string{},
|
||||
},
|
||||
{
|
||||
"foo",
|
||||
[]string{"foo"},
|
||||
},
|
||||
{
|
||||
"foo/",
|
||||
[]string{"foo"},
|
||||
},
|
||||
{
|
||||
"foo/ba",
|
||||
[]string{"foo"},
|
||||
},
|
||||
{
|
||||
"foo/bar",
|
||||
[]string{"foo", "foo/bar"},
|
||||
},
|
||||
{
|
||||
"foo/bar/baz",
|
||||
[]string{"foo", "foo/bar", "foo/bar/baz"},
|
||||
},
|
||||
{
|
||||
"foo/bar/bazoo",
|
||||
[]string{"foo", "foo/bar", "foo/bar/baz"},
|
||||
},
|
||||
{
|
||||
"z",
|
||||
[]string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range cases {
|
||||
out := []string{}
|
||||
fn := func(s string, v interface{}) bool {
|
||||
out = append(out, s)
|
||||
return false
|
||||
}
|
||||
r.WalkPath(test.inp, fn)
|
||||
sort.Strings(out)
|
||||
sort.Strings(test.out)
|
||||
if !reflect.DeepEqual(out, test.out) {
|
||||
t.Fatalf("mis-match: %v %v", out, test.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// generateUUID is used to generate a random UUID
|
||||
func generateUUID() string {
|
||||
buf := make([]byte, 16)
|
||||
if _, err := crand.Read(buf); err != nil {
|
||||
panic(fmt.Errorf("failed to read random bytes: %v", err))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
|
||||
buf[0:4],
|
||||
buf[4:6],
|
||||
buf[6:8],
|
||||
buf[8:10],
|
||||
buf[10:16])
|
||||
}
|
|
@ -1,4 +0,0 @@
|
|||
*.prof
|
||||
*.test
|
||||
*.swp
|
||||
/bin/
|
|
@ -1,18 +0,0 @@
|
|||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
||||
|
||||
default: build
|
||||
|
||||
race:
|
||||
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
|
||||
# go get github.com/kisielk/errcheck
|
||||
errcheck:
|
||||
@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
|
||||
|
||||
test:
|
||||
@go test -v -cover .
|
||||
@go test -v ./cmd/bolt
|
||||
|
||||
.PHONY: fmt test
|
|
@ -1,916 +0,0 @@
|
|||
Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg)
|
||||
====
|
||||
|
||||
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
|
||||
[LMDB project][lmdb]. The goal of the project is to provide a simple,
|
||||
fast, and reliable database for projects that don't require a full database
|
||||
server such as Postgres or MySQL.
|
||||
|
||||
Since Bolt is meant to be used as such a low-level piece of functionality,
|
||||
simplicity is key. The API will be small and only focus on getting values
|
||||
and setting values. That's it.
|
||||
|
||||
[hyc_symas]: https://twitter.com/hyc_symas
|
||||
[lmdb]: http://symas.com/mdb/
|
||||
|
||||
## Project Status
|
||||
|
||||
Bolt is stable, the API is fixed, and the file format is fixed. Full unit
|
||||
test coverage and randomized black box testing are used to ensure database
|
||||
consistency and thread safety. Bolt is currently used in high-load production
|
||||
environments serving databases as large as 1TB. Many companies such as
|
||||
Shopify and Heroku use Bolt-backed services every day.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Getting Started](#getting-started)
|
||||
- [Installing](#installing)
|
||||
- [Opening a database](#opening-a-database)
|
||||
- [Transactions](#transactions)
|
||||
- [Read-write transactions](#read-write-transactions)
|
||||
- [Read-only transactions](#read-only-transactions)
|
||||
- [Batch read-write transactions](#batch-read-write-transactions)
|
||||
- [Managing transactions manually](#managing-transactions-manually)
|
||||
- [Using buckets](#using-buckets)
|
||||
- [Using key/value pairs](#using-keyvalue-pairs)
|
||||
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
|
||||
- [Iterating over keys](#iterating-over-keys)
|
||||
- [Prefix scans](#prefix-scans)
|
||||
- [Range scans](#range-scans)
|
||||
- [ForEach()](#foreach)
|
||||
- [Nested buckets](#nested-buckets)
|
||||
- [Database backups](#database-backups)
|
||||
- [Statistics](#statistics)
|
||||
- [Read-Only Mode](#read-only-mode)
|
||||
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
|
||||
- [Resources](#resources)
|
||||
- [Comparison with other databases](#comparison-with-other-databases)
|
||||
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
|
||||
- [LevelDB, RocksDB](#leveldb-rocksdb)
|
||||
- [LMDB](#lmdb)
|
||||
- [Caveats & Limitations](#caveats--limitations)
|
||||
- [Reading the Source](#reading-the-source)
|
||||
- [Other Projects Using Bolt](#other-projects-using-bolt)
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Installing
|
||||
|
||||
To start using Bolt, install Go and run `go get`:
|
||||
|
||||
```sh
|
||||
$ go get github.com/boltdb/bolt/...
|
||||
```
|
||||
|
||||
This will retrieve the library and install the `bolt` command line utility into
|
||||
your `$GOBIN` path.
|
||||
|
||||
|
||||
### Opening a database
|
||||
|
||||
The top-level object in Bolt is a `DB`. It is represented as a single file on
|
||||
your disk and represents a consistent snapshot of your data.
|
||||
|
||||
To open your database, simply use the `bolt.Open()` function:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Open the my.db data file in your current directory.
|
||||
// It will be created if it doesn't exist.
|
||||
db, err := bolt.Open("my.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Please note that Bolt obtains a file lock on the data file so multiple processes
|
||||
cannot open the same database at the same time. Opening an already open Bolt
|
||||
database will cause it to hang until the other process closes it. To prevent
|
||||
an indefinite wait you can pass a timeout option to the `Open()` function:
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
|
||||
```
|
||||
|
||||
|
||||
### Transactions
|
||||
|
||||
Bolt allows only one read-write transaction at a time but allows as many
|
||||
read-only transactions as you want at a time. Each transaction has a consistent
|
||||
view of the data as it existed when the transaction started.
|
||||
|
||||
Individual transactions and all objects created from them (e.g. buckets, keys)
|
||||
are not thread safe. To work with data in multiple goroutines you must start
|
||||
a transaction for each one or use locking to ensure only one goroutine accesses
|
||||
a transaction at a time. Creating transaction from the `DB` is thread safe.
|
||||
|
||||
Read-only transactions and read-write transactions should not depend on one
|
||||
another and generally shouldn't be opened simultaneously in the same goroutine.
|
||||
This can cause a deadlock as the read-write transaction needs to periodically
|
||||
re-map the data file but it cannot do so while a read-only transaction is open.
|
||||
|
||||
|
||||
#### Read-write transactions
|
||||
|
||||
To start a read-write transaction, you can use the `DB.Update()` function:
|
||||
|
||||
```go
|
||||
err := db.Update(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Inside the closure, you have a consistent view of the database. You commit the
|
||||
transaction by returning `nil` at the end. You can also rollback the transaction
|
||||
at any point by returning an error. All database operations are allowed inside
|
||||
a read-write transaction.
|
||||
|
||||
Always check the return error as it will report any disk failures that can cause
|
||||
your transaction to not complete. If you return an error within your closure
|
||||
it will be passed through.
|
||||
|
||||
|
||||
#### Read-only transactions
|
||||
|
||||
To start a read-only transaction, you can use the `DB.View()` function:
|
||||
|
||||
```go
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You also get a consistent view of the database within this closure, however,
|
||||
no mutating operations are allowed within a read-only transaction. You can only
|
||||
retrieve buckets, retrieve values, and copy the database within a read-only
|
||||
transaction.
|
||||
|
||||
|
||||
#### Batch read-write transactions
|
||||
|
||||
Each `DB.Update()` waits for disk to commit the writes. This overhead
|
||||
can be minimized by combining multiple updates with the `DB.Batch()`
|
||||
function:
|
||||
|
||||
```go
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Concurrent Batch calls are opportunistically combined into larger
|
||||
transactions. Batch is only useful when there are multiple goroutines
|
||||
calling it.
|
||||
|
||||
The trade-off is that `Batch` can call the given
|
||||
function multiple times, if parts of the transaction fail. The
|
||||
function must be idempotent and side effects must take effect only
|
||||
after a successful return from `DB.Batch()`.
|
||||
|
||||
For example: don't display messages from inside the function, instead
|
||||
set variables in the enclosing scope:
|
||||
|
||||
```go
|
||||
var id uint64
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
// Find last key in bucket, decode as bigendian uint64, increment
|
||||
// by one, encode back to []byte, and add new key.
|
||||
...
|
||||
id = newValue
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return ...
|
||||
}
|
||||
fmt.Println("Allocated ID %d", id)
|
||||
```
|
||||
|
||||
|
||||
#### Managing transactions manually
|
||||
|
||||
The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
|
||||
function. These helper functions will start the transaction, execute a function,
|
||||
and then safely close your transaction if an error is returned. This is the
|
||||
recommended way to use Bolt transactions.
|
||||
|
||||
However, sometimes you may want to manually start and end your transactions.
|
||||
You can use the `DB.Begin()` function directly but **please** be sure to close
|
||||
the transaction.
|
||||
|
||||
```go
|
||||
// Start a writable transaction.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Use the transaction...
|
||||
_, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit the transaction and check for error.
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
The first argument to `DB.Begin()` is a boolean stating if the transaction
|
||||
should be writable.
|
||||
|
||||
|
||||
### Using buckets
|
||||
|
||||
Buckets are collections of key/value pairs within the database. All keys in a
|
||||
bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
|
||||
function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create bucket: %s", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You can also create a bucket only if it doesn't exist by using the
|
||||
`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
|
||||
function for all your top-level buckets after you open your database so you can
|
||||
guarantee that they exist for future transactions.
|
||||
|
||||
To delete a bucket, simply call the `Tx.DeleteBucket()` function.
|
||||
|
||||
|
||||
### Using key/value pairs
|
||||
|
||||
To save a key/value pair to a bucket, use the `Bucket.Put()` function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
err := b.Put([]byte("answer"), []byte("42"))
|
||||
return err
|
||||
})
|
||||
```
|
||||
|
||||
This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
|
||||
bucket. To retrieve this value, we can use the `Bucket.Get()` function:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
v := b.Get([]byte("answer"))
|
||||
fmt.Printf("The answer is: %s\n", v)
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The `Get()` function does not return an error because its operation is
|
||||
guaranteed to work (unless there is some kind of system failure). If the key
|
||||
exists then it will return its byte slice value. If it doesn't exist then it
|
||||
will return `nil`. It's important to note that you can have a zero-length value
|
||||
set to a key which is different than the key not existing.
|
||||
|
||||
Use the `Bucket.Delete()` function to delete a key from the bucket.
|
||||
|
||||
Please note that values returned from `Get()` are only valid while the
|
||||
transaction is open. If you need to use a value outside of the transaction
|
||||
then you must use `copy()` to copy it to another byte slice.
|
||||
|
||||
|
||||
### Autoincrementing integer for the bucket
|
||||
By using the `NextSequence()` function, you can let Bolt determine a sequence
|
||||
which can be used as the unique identifier for your key/value pairs. See the
|
||||
example below.
|
||||
|
||||
```go
|
||||
// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
|
||||
func (s *Store) CreateUser(u *User) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the users bucket.
|
||||
// This should be created when the DB is first opened.
|
||||
b := tx.Bucket([]byte("users"))
|
||||
|
||||
// Generate ID for the user.
|
||||
// This returns an error only if the Tx is closed or not writeable.
|
||||
// That can't happen in an Update() call so I ignore the error check.
|
||||
id, _ := b.NextSequence()
|
||||
u.ID = int(id)
|
||||
|
||||
// Marshal user data into bytes.
|
||||
buf, err := json.Marshal(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Persist bytes to users bucket.
|
||||
return b.Put(itob(u.ID), buf)
|
||||
})
|
||||
}
|
||||
|
||||
// itob returns an 8-byte big endian representation of v.
|
||||
func itob(v int) []byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(v))
|
||||
return b
|
||||
}
|
||||
|
||||
type User struct {
|
||||
ID int
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Iterating over keys
|
||||
|
||||
Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
|
||||
iteration over these keys extremely fast. To iterate over keys we'll use a
|
||||
`Cursor`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
|
||||
c := b.Cursor()
|
||||
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The cursor allows you to move to a specific point in the list of keys and move
|
||||
forward or backward through the keys one at a time.
|
||||
|
||||
The following functions are available on the cursor:
|
||||
|
||||
```
|
||||
First() Move to the first key.
|
||||
Last() Move to the last key.
|
||||
Seek() Move to a specific key.
|
||||
Next() Move to the next key.
|
||||
Prev() Move to the previous key.
|
||||
```
|
||||
|
||||
Each of those functions has a return signature of `(key []byte, value []byte)`.
|
||||
When you have iterated to the end of the cursor then `Next()` will return a
|
||||
`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
|
||||
before calling `Next()` or `Prev()`. If you do not seek to a position then
|
||||
these functions will return a `nil` key.
|
||||
|
||||
During iteration, if the key is non-`nil` but the value is `nil`, that means
|
||||
the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
|
||||
access the sub-bucket.
|
||||
|
||||
|
||||
#### Prefix scans
|
||||
|
||||
To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
c := tx.Bucket([]byte("MyBucket")).Cursor()
|
||||
|
||||
prefix := []byte("1234")
|
||||
for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
#### Range scans
|
||||
|
||||
Another common use case is scanning over a range such as a time range. If you
|
||||
use a sortable time encoding such as RFC3339 then you can query a specific
|
||||
date range like this:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume our events bucket exists and has RFC3339 encoded time keys.
|
||||
c := tx.Bucket([]byte("Events")).Cursor()
|
||||
|
||||
// Our time range spans the 90's decade.
|
||||
min := []byte("1990-01-01T00:00:00Z")
|
||||
max := []byte("2000-01-01T00:00:00Z")
|
||||
|
||||
// Iterate over the 90's.
|
||||
for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
|
||||
fmt.Printf("%s: %s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
|
||||
|
||||
|
||||
#### ForEach()
|
||||
|
||||
You can also use the function `ForEach()` if you know you'll be iterating over
|
||||
all the keys in a bucket:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
|
||||
b.ForEach(func(k, v []byte) error {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Please note that keys and values in `ForEach()` are only valid while
|
||||
the transaction is open. If you need to use a key or value outside of
|
||||
the transaction, you must use `copy()` to copy it to another byte
|
||||
slice.
|
||||
|
||||
### Nested buckets
|
||||
|
||||
You can also store a bucket in a key to create nested buckets. The API is the
|
||||
same as the bucket management API on the `DB` object:
|
||||
|
||||
```go
|
||||
func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
|
||||
func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
|
||||
func (*Bucket) DeleteBucket(key []byte) error
|
||||
```
|
||||
|
||||
Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
|
||||
|
||||
```go
|
||||
|
||||
// createUser creates a new user in the given account.
|
||||
func createUser(accountID int, u *User) error {
|
||||
// Start the transaction.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Retrieve the root bucket for the account.
|
||||
// Assume this has already been created when the account was set up.
|
||||
root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
|
||||
|
||||
// Setup the users bucket.
|
||||
bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Generate an ID for the new user.
|
||||
userID, err := bkt.NextSequence()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u.ID = userID
|
||||
|
||||
// Marshal and save the encoded user.
|
||||
if buf, err := json.Marshal(u); err != nil {
|
||||
return err
|
||||
} else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit the transaction.
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
### Database backups
|
||||
|
||||
Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
|
||||
function to write a consistent view of the database to a writer. If you call
|
||||
this from a read-only transaction, it will perform a hot backup and not block
|
||||
your other database reads and writes.
|
||||
|
||||
By default, it will use a regular file handle which will utilize the operating
|
||||
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
|
||||
documentation for information about optimizing for larger-than-RAM datasets.
|
||||
|
||||
One common use case is to backup over HTTP so you can use tools like `cURL` to
|
||||
do database backups:
|
||||
|
||||
```go
|
||||
func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
|
||||
w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Then you can backup using this command:
|
||||
|
||||
```sh
|
||||
$ curl http://localhost/backup > my.db
|
||||
```
|
||||
|
||||
Or you can open your browser to `http://localhost/backup` and it will download
|
||||
automatically.
|
||||
|
||||
If you want to backup to another file you can use the `Tx.CopyFile()` helper
|
||||
function.
|
||||
|
||||
|
||||
### Statistics
|
||||
|
||||
The database keeps a running count of many of the internal operations it
|
||||
performs so you can better understand what's going on. By grabbing a snapshot
|
||||
of these stats at two points in time we can see what operations were performed
|
||||
in that time range.
|
||||
|
||||
For example, we could start a goroutine to log stats every 10 seconds:
|
||||
|
||||
```go
|
||||
go func() {
|
||||
// Grab the initial stats.
|
||||
prev := db.Stats()
|
||||
|
||||
for {
|
||||
// Wait for 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
// Grab the current stats and diff them.
|
||||
stats := db.Stats()
|
||||
diff := stats.Sub(&prev)
|
||||
|
||||
// Encode stats to JSON and print to STDERR.
|
||||
json.NewEncoder(os.Stderr).Encode(diff)
|
||||
|
||||
// Save stats for the next loop.
|
||||
prev = stats
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
It's also useful to pipe these stats to a service such as statsd for monitoring
|
||||
or to provide an HTTP endpoint that will perform a fixed-length sample.
|
||||
|
||||
|
||||
### Read-Only Mode
|
||||
|
||||
Sometimes it is useful to create a shared, read-only Bolt database. To this,
|
||||
set the `Options.ReadOnly` flag when opening your database. Read-only mode
|
||||
uses a shared lock to allow multiple processes to read from the database but
|
||||
it will block any processes from opening the database in read-write mode.
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
### Mobile Use (iOS/Android)
|
||||
|
||||
Bolt is able to run on mobile devices by leveraging the binding feature of the
|
||||
[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
|
||||
contain your database logic and a reference to a `*bolt.DB` with a initializing
|
||||
constructor that takes in a filepath where the database file will be stored.
|
||||
Neither Android nor iOS require extra permissions or cleanup from using this method.
|
||||
|
||||
```go
|
||||
func NewBoltDB(filepath string) *BoltDB {
|
||||
db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return &BoltDB{db}
|
||||
}
|
||||
|
||||
type BoltDB struct {
|
||||
db *bolt.DB
|
||||
...
|
||||
}
|
||||
|
||||
func (b *BoltDB) Path() string {
|
||||
return b.db.Path()
|
||||
}
|
||||
|
||||
func (b *BoltDB) Close() {
|
||||
b.db.Close()
|
||||
}
|
||||
```
|
||||
|
||||
Database logic should be defined as methods on this wrapper struct.
|
||||
|
||||
To initialize this struct from the native language (both platforms now sync
|
||||
their local storage to the cloud. These snippets disable that functionality for the
|
||||
database file):
|
||||
|
||||
#### Android
|
||||
|
||||
```java
|
||||
String path;
|
||||
if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
|
||||
path = getNoBackupFilesDir().getAbsolutePath();
|
||||
} else{
|
||||
path = getFilesDir().getAbsolutePath();
|
||||
}
|
||||
Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
|
||||
```
|
||||
|
||||
#### iOS
|
||||
|
||||
```objc
|
||||
- (void)demo {
|
||||
NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
|
||||
NSUserDomainMask,
|
||||
YES) objectAtIndex:0];
|
||||
GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
|
||||
[self addSkipBackupAttributeToItemAtPath:demo.path];
|
||||
//Some DB Logic would go here
|
||||
[demo close];
|
||||
}
|
||||
|
||||
- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
|
||||
{
|
||||
NSURL* URL= [NSURL fileURLWithPath: filePathString];
|
||||
assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
|
||||
|
||||
NSError *error = nil;
|
||||
BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
|
||||
forKey: NSURLIsExcludedFromBackupKey error: &error];
|
||||
if(!success){
|
||||
NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
For more information on getting started with Bolt, check out the following articles:
|
||||
|
||||
* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
|
||||
* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
|
||||
|
||||
|
||||
## Comparison with other databases
|
||||
|
||||
### Postgres, MySQL, & other relational databases
|
||||
|
||||
Relational databases structure data into rows and are only accessible through
|
||||
the use of SQL. This approach provides flexibility in how you store and query
|
||||
your data but also incurs overhead in parsing and planning SQL statements. Bolt
|
||||
accesses all data by a byte slice key. This makes Bolt fast to read and write
|
||||
data by key but provides no built-in support for joining values together.
|
||||
|
||||
Most relational databases (with the exception of SQLite) are standalone servers
|
||||
that run separately from your application. This gives your systems
|
||||
flexibility to connect multiple application servers to a single database
|
||||
server but also adds overhead in serializing and transporting data over the
|
||||
network. Bolt runs as a library included in your application so all data access
|
||||
has to go through your application's process. This brings data closer to your
|
||||
application but limits multi-process access to the data.
|
||||
|
||||
|
||||
### LevelDB, RocksDB
|
||||
|
||||
LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
|
||||
they are libraries bundled into the application, however, their underlying
|
||||
structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
|
||||
random writes by using a write ahead log and multi-tiered, sorted files called
|
||||
SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
|
||||
have trade-offs.
|
||||
|
||||
If you require a high random write throughput (>10,000 w/sec) or you need to use
|
||||
spinning disks then LevelDB could be a good choice. If your application is
|
||||
read-heavy or does a lot of range scans then Bolt could be a good choice.
|
||||
|
||||
One other important consideration is that LevelDB does not have transactions.
|
||||
It supports batch writing of key/values pairs and it supports read snapshots
|
||||
but it will not give you the ability to do a compare-and-swap operation safely.
|
||||
Bolt supports fully serializable ACID transactions.
|
||||
|
||||
|
||||
### LMDB
|
||||
|
||||
Bolt was originally a port of LMDB so it is architecturally similar. Both use
|
||||
a B+tree, have ACID semantics with fully serializable transactions, and support
|
||||
lock-free MVCC using a single writer and multiple readers.
|
||||
|
||||
The two projects have somewhat diverged. LMDB heavily focuses on raw performance
|
||||
while Bolt has focused on simplicity and ease of use. For example, LMDB allows
|
||||
several unsafe actions such as direct writes for the sake of performance. Bolt
|
||||
opts to disallow actions which can leave the database in a corrupted state. The
|
||||
only exception to this in Bolt is `DB.NoSync`.
|
||||
|
||||
There are also a few differences in API. LMDB requires a maximum mmap size when
|
||||
opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
|
||||
automatically. LMDB overloads the getter and setter functions with multiple
|
||||
flags whereas Bolt splits these specialized cases into their own functions.
|
||||
|
||||
|
||||
## Caveats & Limitations
|
||||
|
||||
It's important to pick the right tool for the job and Bolt is no exception.
|
||||
Here are a few things to note when evaluating and using Bolt:
|
||||
|
||||
* Bolt is good for read intensive workloads. Sequential write performance is
|
||||
also fast but random writes can be slow. You can use `DB.Batch()` or add a
|
||||
write-ahead log to help mitigate this issue.
|
||||
|
||||
* Bolt uses a B+tree internally so there can be a lot of random page access.
|
||||
SSDs provide a significant performance boost over spinning disks.
|
||||
|
||||
* Try to avoid long running read transactions. Bolt uses copy-on-write so
|
||||
old pages cannot be reclaimed while an old transaction is using them.
|
||||
|
||||
* Byte slices returned from Bolt are only valid during a transaction. Once the
|
||||
transaction has been committed or rolled back then the memory they point to
|
||||
can be reused by a new page or can be unmapped from virtual memory and you'll
|
||||
see an `unexpected fault address` panic when accessing it.
|
||||
|
||||
* Bolt uses an exclusive write lock on the database file so it cannot be
|
||||
shared by multiple processes.
|
||||
|
||||
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
|
||||
buckets that have random inserts will cause your database to have very poor
|
||||
page utilization.
|
||||
|
||||
* Use larger buckets in general. Smaller buckets causes poor page utilization
|
||||
once they become larger than the page size (typically 4KB).
|
||||
|
||||
* Bulk loading a lot of random writes into a new bucket can be slow as the
|
||||
page will not split until the transaction is committed. Randomly inserting
|
||||
more than 100,000 key/value pairs into a single new bucket in a single
|
||||
transaction is not advised.
|
||||
|
||||
* Bolt uses a memory-mapped file so the underlying operating system handles the
|
||||
caching of the data. Typically, the OS will cache as much of the file as it
|
||||
can in memory and will release memory as needed to other processes. This means
|
||||
that Bolt can show very high memory usage when working with large databases.
|
||||
However, this is expected and the OS will release memory as needed. Bolt can
|
||||
handle databases much larger than the available physical RAM, provided its
|
||||
memory-map fits in the process virtual address space. It may be problematic
|
||||
on 32-bits systems.
|
||||
|
||||
* The data structures in the Bolt database are memory mapped so the data file
|
||||
will be endian specific. This means that you cannot copy a Bolt file from a
|
||||
little endian machine to a big endian machine and have it work. For most
|
||||
users this is not a concern since most modern CPUs are little endian.
|
||||
|
||||
* Because of the way pages are laid out on disk, Bolt cannot truncate data files
|
||||
and return free pages back to the disk. Instead, Bolt maintains a free list
|
||||
of unused pages within its data file. These free pages can be reused by later
|
||||
transactions. This works well for many use cases as databases generally tend
|
||||
to grow. However, it's important to note that deleting large chunks of data
|
||||
will not allow you to reclaim that space on disk.
|
||||
|
||||
For more information on page allocation, [see this comment][page-allocation].
|
||||
|
||||
[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
|
||||
|
||||
|
||||
## Reading the Source
|
||||
|
||||
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
|
||||
transactional key/value database so it can be a good starting point for people
|
||||
interested in how databases work.
|
||||
|
||||
The best places to start are the main entry points into Bolt:
|
||||
|
||||
- `Open()` - Initializes the reference to the database. It's responsible for
|
||||
creating the database if it doesn't exist, obtaining an exclusive lock on the
|
||||
file, reading the meta pages, & memory-mapping the file.
|
||||
|
||||
- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
|
||||
value of the `writable` argument. This requires briefly obtaining the "meta"
|
||||
lock to keep track of open transactions. Only one read-write transaction can
|
||||
exist at a time so the "rwlock" is acquired during the life of a read-write
|
||||
transaction.
|
||||
|
||||
- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
|
||||
arguments, a cursor is used to traverse the B+tree to the page and position
|
||||
where they key & value will be written. Once the position is found, the bucket
|
||||
materializes the underlying page and the page's parent pages into memory as
|
||||
"nodes". These nodes are where mutations occur during read-write transactions.
|
||||
These changes get flushed to disk during commit.
|
||||
|
||||
- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
|
||||
to move to the page & position of a key/value pair. During a read-only
|
||||
transaction, the key and value data is returned as a direct reference to the
|
||||
underlying mmap file so there's no allocation overhead. For read-write
|
||||
transactions, this data may reference the mmap file or one of the in-memory
|
||||
node values.
|
||||
|
||||
- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
|
||||
or in-memory nodes. It can seek to a specific key, move to the first or last
|
||||
value, or it can move forward or backward. The cursor handles the movement up
|
||||
and down the B+tree transparently to the end user.
|
||||
|
||||
- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
|
||||
into pages to be written to disk. Writing to disk then occurs in two phases.
|
||||
First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
|
||||
new meta page with an incremented transaction ID is written and another
|
||||
`fsync()` occurs. This two phase write ensures that partially written data
|
||||
pages are ignored in the event of a crash since the meta page pointing to them
|
||||
is never written. Partially written meta pages are invalidated because they
|
||||
are written with a checksum.
|
||||
|
||||
If you have additional notes that could be helpful for others, please submit
|
||||
them via pull request.
|
||||
|
||||
|
||||
## Other Projects Using Bolt
|
||||
|
||||
Below is a list of public, open source projects that use Bolt:
|
||||
|
||||
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
|
||||
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
|
||||
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
|
||||
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
||||
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
|
||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
||||
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
|
||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
||||
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
|
||||
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
|
||||
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
|
||||
backed by boltdb.
|
||||
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
|
||||
simple tx and key scans.
|
||||
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
|
||||
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
|
||||
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
||||
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
|
||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
||||
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
|
||||
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
||||
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
|
||||
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
|
||||
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
|
||||
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
|
||||
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
|
||||
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
|
||||
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
|
||||
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
|
||||
* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework.
|
||||
|
||||
If you are using Bolt in a project please send a pull request to add it to the list.
|
|
@ -1,18 +0,0 @@
|
|||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\boltdb\bolt
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go get -v -t ./...
|
||||
|
||||
build_script:
|
||||
- go test -v ./...
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,817 +0,0 @@
|
|||
package bolt_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
// Ensure that a cursor can return a reference to the bucket that created it.
|
||||
func TestCursor_Bucket(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if cb := b.Cursor().Bucket(); !reflect.DeepEqual(cb, b) {
|
||||
t.Fatal("cursor bucket mismatch")
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a Tx cursor can seek to the appropriate keys.
|
||||
func TestCursor_Seek(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("foo"), []byte("0001")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("bar"), []byte("0002")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("baz"), []byte("0003")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := b.CreateBucket([]byte("bkt")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
|
||||
// Exact match should go to the key.
|
||||
if k, v := c.Seek([]byte("bar")); !bytes.Equal(k, []byte("bar")) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if !bytes.Equal(v, []byte("0002")) {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
|
||||
// Inexact match should go to the next key.
|
||||
if k, v := c.Seek([]byte("bas")); !bytes.Equal(k, []byte("baz")) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if !bytes.Equal(v, []byte("0003")) {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
|
||||
// Low key should go to the first key.
|
||||
if k, v := c.Seek([]byte("")); !bytes.Equal(k, []byte("bar")) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if !bytes.Equal(v, []byte("0002")) {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
|
||||
// High key should return no key.
|
||||
if k, v := c.Seek([]byte("zzz")); k != nil {
|
||||
t.Fatalf("expected nil key: %v", k)
|
||||
} else if v != nil {
|
||||
t.Fatalf("expected nil value: %v", v)
|
||||
}
|
||||
|
||||
// Buckets should return their key but no value.
|
||||
if k, v := c.Seek([]byte("bkt")); !bytes.Equal(k, []byte("bkt")) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if v != nil {
|
||||
t.Fatalf("expected nil value: %v", v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCursor_Delete(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
const count = 1000
|
||||
|
||||
// Insert every other key between 0 and $count.
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i := 0; i < count; i += 1 {
|
||||
k := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(k, uint64(i))
|
||||
if err := b.Put(k, make([]byte, 100)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if _, err := b.CreateBucket([]byte("sub")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
bound := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(bound, uint64(count/2))
|
||||
for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() {
|
||||
if err := c.Delete(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
c.Seek([]byte("sub"))
|
||||
if err := c.Delete(); err != bolt.ErrIncompatibleValue {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
stats := tx.Bucket([]byte("widgets")).Stats()
|
||||
if stats.KeyN != count/2+1 {
|
||||
t.Fatalf("unexpected KeyN: %d", stats.KeyN)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a Tx cursor can seek to the appropriate keys when there are a
|
||||
// large number of keys. This test also checks that seek will always move
|
||||
// forward to the next key.
|
||||
//
|
||||
// Related: https://github.com/boltdb/bolt/pull/187
|
||||
func TestCursor_Seek_Large(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
var count = 10000
|
||||
|
||||
// Insert every other key between 0 and $count.
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < count; i += 100 {
|
||||
for j := i; j < i+100; j += 2 {
|
||||
k := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(k, uint64(j))
|
||||
if err := b.Put(k, make([]byte, 100)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
for i := 0; i < count; i++ {
|
||||
seek := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(seek, uint64(i))
|
||||
|
||||
k, _ := c.Seek(seek)
|
||||
|
||||
// The last seek is beyond the end of the the range so
|
||||
// it should return nil.
|
||||
if i == count-1 {
|
||||
if k != nil {
|
||||
t.Fatal("expected nil key")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Otherwise we should seek to the exact key or the next key.
|
||||
num := binary.BigEndian.Uint64(k)
|
||||
if i%2 == 0 {
|
||||
if num != uint64(i) {
|
||||
t.Fatalf("unexpected num: %d", num)
|
||||
}
|
||||
} else {
|
||||
if num != uint64(i+1) {
|
||||
t.Fatalf("unexpected num: %d", num)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a cursor can iterate over an empty bucket without error.
|
||||
func TestCursor_EmptyBucket(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
k, v := c.First()
|
||||
if k != nil {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if v != nil {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
|
||||
func TestCursor_EmptyBucketReverse(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
k, v := c.Last()
|
||||
if k != nil {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if v != nil {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a Tx cursor can iterate over a single root with a couple elements.
|
||||
func TestCursor_Iterate_Leaf(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("baz"), []byte{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("foo"), []byte{0}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("bar"), []byte{1}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tx, err := db.Begin(false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
|
||||
k, v := c.First()
|
||||
if !bytes.Equal(k, []byte("bar")) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if !bytes.Equal(v, []byte{1}) {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
|
||||
k, v = c.Next()
|
||||
if !bytes.Equal(k, []byte("baz")) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if !bytes.Equal(v, []byte{}) {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
|
||||
k, v = c.Next()
|
||||
if !bytes.Equal(k, []byte("foo")) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if !bytes.Equal(v, []byte{0}) {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
|
||||
k, v = c.Next()
|
||||
if k != nil {
|
||||
t.Fatalf("expected nil key: %v", k)
|
||||
} else if v != nil {
|
||||
t.Fatalf("expected nil value: %v", v)
|
||||
}
|
||||
|
||||
k, v = c.Next()
|
||||
if k != nil {
|
||||
t.Fatalf("expected nil key: %v", k)
|
||||
} else if v != nil {
|
||||
t.Fatalf("expected nil value: %v", v)
|
||||
}
|
||||
|
||||
if err := tx.Rollback(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements.
|
||||
func TestCursor_LeafRootReverse(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("baz"), []byte{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("foo"), []byte{0}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("bar"), []byte{1}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tx, err := db.Begin(false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
|
||||
if k, v := c.Last(); !bytes.Equal(k, []byte("foo")) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if !bytes.Equal(v, []byte{0}) {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
|
||||
if k, v := c.Prev(); !bytes.Equal(k, []byte("baz")) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if !bytes.Equal(v, []byte{}) {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
|
||||
if k, v := c.Prev(); !bytes.Equal(k, []byte("bar")) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if !bytes.Equal(v, []byte{1}) {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
|
||||
if k, v := c.Prev(); k != nil {
|
||||
t.Fatalf("expected nil key: %v", k)
|
||||
} else if v != nil {
|
||||
t.Fatalf("expected nil value: %v", v)
|
||||
}
|
||||
|
||||
if k, v := c.Prev(); k != nil {
|
||||
t.Fatalf("expected nil key: %v", k)
|
||||
} else if v != nil {
|
||||
t.Fatalf("expected nil value: %v", v)
|
||||
}
|
||||
|
||||
if err := tx.Rollback(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a Tx cursor can restart from the beginning.
|
||||
func TestCursor_Restart(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("bar"), []byte{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("foo"), []byte{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tx, err := db.Begin(false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
|
||||
if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
}
|
||||
if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
}
|
||||
|
||||
if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
}
|
||||
if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
}
|
||||
|
||||
if err := tx.Rollback(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a cursor can skip over empty pages that have been deleted.
|
||||
func TestCursor_First_EmptyPages(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
// Create 1000 keys in the "widgets" bucket.
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
if err := b.Put(u64tob(uint64(i)), []byte{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Delete half the keys and then try to iterate.
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
for i := 0; i < 600; i++ {
|
||||
if err := b.Delete(u64tob(uint64(i))); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
c := b.Cursor()
|
||||
var n int
|
||||
for k, _ := c.First(); k != nil; k, _ = c.Next() {
|
||||
n++
|
||||
}
|
||||
if n != 400 {
|
||||
t.Fatalf("unexpected key count: %d", n)
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a Tx can iterate over all elements in a bucket.
|
||||
func TestCursor_QuickCheck(t *testing.T) {
|
||||
f := func(items testdata) bool {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
// Bulk insert all values.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, item := range items {
|
||||
if err := b.Put(item.Key, item.Value); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Sort test data.
|
||||
sort.Sort(items)
|
||||
|
||||
// Iterate over all items and check consistency.
|
||||
var index = 0
|
||||
tx, err = db.Begin(false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
|
||||
if !bytes.Equal(k, items[index].Key) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if !bytes.Equal(v, items[index].Value) {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
index++
|
||||
}
|
||||
if len(items) != index {
|
||||
t.Fatalf("unexpected item count: %v, expected %v", len(items), index)
|
||||
}
|
||||
|
||||
if err := tx.Rollback(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
if err := quick.Check(f, qconfig()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a transaction can iterate over all elements in a bucket in reverse.
|
||||
func TestCursor_QuickCheck_Reverse(t *testing.T) {
|
||||
f := func(items testdata) bool {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
// Bulk insert all values.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, item := range items {
|
||||
if err := b.Put(item.Key, item.Value); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Sort test data.
|
||||
sort.Sort(revtestdata(items))
|
||||
|
||||
// Iterate over all items and check consistency.
|
||||
var index = 0
|
||||
tx, err = db.Begin(false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
|
||||
if !bytes.Equal(k, items[index].Key) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if !bytes.Equal(v, items[index].Value) {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
index++
|
||||
}
|
||||
if len(items) != index {
|
||||
t.Fatalf("unexpected item count: %v, expected %v", len(items), index)
|
||||
}
|
||||
|
||||
if err := tx.Rollback(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
if err := quick.Check(f, qconfig()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a Tx cursor can iterate over subbuckets.
|
||||
func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := b.CreateBucket([]byte("foo")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := b.CreateBucket([]byte("bar")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := b.CreateBucket([]byte("baz")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
var names []string
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
names = append(names, string(k))
|
||||
if v != nil {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(names, []string{"bar", "baz", "foo"}) {
|
||||
t.Fatalf("unexpected names: %+v", names)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a Tx cursor can reverse iterate over subbuckets.
|
||||
func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := b.CreateBucket([]byte("foo")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := b.CreateBucket([]byte("bar")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := b.CreateBucket([]byte("baz")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
var names []string
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
for k, v := c.Last(); k != nil; k, v = c.Prev() {
|
||||
names = append(names, string(k))
|
||||
if v != nil {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(names, []string{"foo", "baz", "bar"}) {
|
||||
t.Fatalf("unexpected names: %+v", names)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleCursor() {
|
||||
// Open the database.
|
||||
db, err := bolt.Open(tempfile(), 0666, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.Remove(db.Path())
|
||||
|
||||
// Start a read-write transaction.
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
// Create a new bucket.
|
||||
b, err := tx.CreateBucket([]byte("animals"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Insert data into a bucket.
|
||||
if err := b.Put([]byte("dog"), []byte("fun")); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("cat"), []byte("lame")); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("liger"), []byte("awesome")); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a cursor for iteration.
|
||||
c := b.Cursor()
|
||||
|
||||
// Iterate over items in sorted key order. This starts from the
|
||||
// first key/value pair and updates the k/v variables to the
|
||||
// next key/value on each iteration.
|
||||
//
|
||||
// The loop finishes at the end of the cursor when a nil key is returned.
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
fmt.Printf("A %s is %s.\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// A cat is lame.
|
||||
// A dog is fun.
|
||||
// A liger is awesome.
|
||||
}
|
||||
|
||||
func ExampleCursor_reverse() {
|
||||
// Open the database.
|
||||
db, err := bolt.Open(tempfile(), 0666, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.Remove(db.Path())
|
||||
|
||||
// Start a read-write transaction.
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
// Create a new bucket.
|
||||
b, err := tx.CreateBucket([]byte("animals"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Insert data into a bucket.
|
||||
if err := b.Put([]byte("dog"), []byte("fun")); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("cat"), []byte("lame")); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("liger"), []byte("awesome")); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a cursor for iteration.
|
||||
c := b.Cursor()
|
||||
|
||||
// Iterate over items in reverse sorted key order. This starts
|
||||
// from the last key/value pair and updates the k/v variables to
|
||||
// the previous key/value on each iteration.
|
||||
//
|
||||
// The loop finishes at the beginning of the cursor when a nil key
|
||||
// is returned.
|
||||
for k, v := c.Last(); k != nil; k, v = c.Prev() {
|
||||
fmt.Printf("A %s is %s.\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Close the database to release the file lock.
|
||||
if err := db.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// A liger is awesome.
|
||||
// A dog is fun.
|
||||
// A cat is lame.
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,158 +0,0 @@
|
|||
package bolt
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Ensure that a page is added to a transaction's freelist.
|
||||
func TestFreelist_free(t *testing.T) {
|
||||
f := newFreelist()
|
||||
f.free(100, &page{id: 12})
|
||||
if !reflect.DeepEqual([]pgid{12}, f.pending[100]) {
|
||||
t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100])
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a page and its overflow is added to a transaction's freelist.
|
||||
func TestFreelist_free_overflow(t *testing.T) {
|
||||
f := newFreelist()
|
||||
f.free(100, &page{id: 12, overflow: 3})
|
||||
if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) {
|
||||
t.Fatalf("exp=%v; got=%v", exp, f.pending[100])
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a transaction's free pages can be released.
|
||||
func TestFreelist_release(t *testing.T) {
|
||||
f := newFreelist()
|
||||
f.free(100, &page{id: 12, overflow: 1})
|
||||
f.free(100, &page{id: 9})
|
||||
f.free(102, &page{id: 39})
|
||||
f.release(100)
|
||||
f.release(101)
|
||||
if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) {
|
||||
t.Fatalf("exp=%v; got=%v", exp, f.ids)
|
||||
}
|
||||
|
||||
f.release(102)
|
||||
if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) {
|
||||
t.Fatalf("exp=%v; got=%v", exp, f.ids)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a freelist can find contiguous blocks of pages.
|
||||
func TestFreelist_allocate(t *testing.T) {
|
||||
f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}}
|
||||
if id := int(f.allocate(3)); id != 3 {
|
||||
t.Fatalf("exp=3; got=%v", id)
|
||||
}
|
||||
if id := int(f.allocate(1)); id != 6 {
|
||||
t.Fatalf("exp=6; got=%v", id)
|
||||
}
|
||||
if id := int(f.allocate(3)); id != 0 {
|
||||
t.Fatalf("exp=0; got=%v", id)
|
||||
}
|
||||
if id := int(f.allocate(2)); id != 12 {
|
||||
t.Fatalf("exp=12; got=%v", id)
|
||||
}
|
||||
if id := int(f.allocate(1)); id != 7 {
|
||||
t.Fatalf("exp=7; got=%v", id)
|
||||
}
|
||||
if id := int(f.allocate(0)); id != 0 {
|
||||
t.Fatalf("exp=0; got=%v", id)
|
||||
}
|
||||
if id := int(f.allocate(0)); id != 0 {
|
||||
t.Fatalf("exp=0; got=%v", id)
|
||||
}
|
||||
if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) {
|
||||
t.Fatalf("exp=%v; got=%v", exp, f.ids)
|
||||
}
|
||||
|
||||
if id := int(f.allocate(1)); id != 9 {
|
||||
t.Fatalf("exp=9; got=%v", id)
|
||||
}
|
||||
if id := int(f.allocate(1)); id != 18 {
|
||||
t.Fatalf("exp=18; got=%v", id)
|
||||
}
|
||||
if id := int(f.allocate(1)); id != 0 {
|
||||
t.Fatalf("exp=0; got=%v", id)
|
||||
}
|
||||
if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) {
|
||||
t.Fatalf("exp=%v; got=%v", exp, f.ids)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a freelist can deserialize from a freelist page.
|
||||
func TestFreelist_read(t *testing.T) {
|
||||
// Create a page.
|
||||
var buf [4096]byte
|
||||
page := (*page)(unsafe.Pointer(&buf[0]))
|
||||
page.flags = freelistPageFlag
|
||||
page.count = 2
|
||||
|
||||
// Insert 2 page ids.
|
||||
ids := (*[3]pgid)(unsafe.Pointer(&page.ptr))
|
||||
ids[0] = 23
|
||||
ids[1] = 50
|
||||
|
||||
// Deserialize page into a freelist.
|
||||
f := newFreelist()
|
||||
f.read(page)
|
||||
|
||||
// Ensure that there are two page ids in the freelist.
|
||||
if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) {
|
||||
t.Fatalf("exp=%v; got=%v", exp, f.ids)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a freelist can serialize into a freelist page.
|
||||
func TestFreelist_write(t *testing.T) {
|
||||
// Create a freelist and write it to a page.
|
||||
var buf [4096]byte
|
||||
f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid][]pgid)}
|
||||
f.pending[100] = []pgid{28, 11}
|
||||
f.pending[101] = []pgid{3}
|
||||
p := (*page)(unsafe.Pointer(&buf[0]))
|
||||
if err := f.write(p); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Read the page back out.
|
||||
f2 := newFreelist()
|
||||
f2.read(p)
|
||||
|
||||
// Ensure that the freelist is correct.
|
||||
// All pages should be present and in reverse order.
|
||||
if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) {
|
||||
t.Fatalf("exp=%v; got=%v", exp, f2.ids)
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) }
|
||||
func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) }
|
||||
func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) }
|
||||
func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) }
|
||||
|
||||
func benchmark_FreelistRelease(b *testing.B, size int) {
|
||||
ids := randomPgids(size)
|
||||
pending := randomPgids(len(ids) / 400)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
f := &freelist{ids: ids, pending: map[txid][]pgid{1: pending}}
|
||||
f.release(1)
|
||||
}
|
||||
}
|
||||
|
||||
func randomPgids(n int) []pgid {
|
||||
rand.Seed(42)
|
||||
pgids := make(pgids, n)
|
||||
for i := range pgids {
|
||||
pgids[i] = pgid(rand.Int63())
|
||||
}
|
||||
sort.Sort(pgids)
|
||||
return pgids
|
||||
}
|
|
@ -1,156 +0,0 @@
|
|||
package bolt
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Ensure that a node can insert a key/value.
|
||||
func TestNode_put(t *testing.T) {
|
||||
n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{meta: &meta{pgid: 1}}}}
|
||||
n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0)
|
||||
n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0)
|
||||
n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0)
|
||||
n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag)
|
||||
|
||||
if len(n.inodes) != 3 {
|
||||
t.Fatalf("exp=3; got=%d", len(n.inodes))
|
||||
}
|
||||
if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" {
|
||||
t.Fatalf("exp=<bar,1>; got=<%s,%s>", k, v)
|
||||
}
|
||||
if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" {
|
||||
t.Fatalf("exp=<baz,2>; got=<%s,%s>", k, v)
|
||||
}
|
||||
if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" {
|
||||
t.Fatalf("exp=<foo,3>; got=<%s,%s>", k, v)
|
||||
}
|
||||
if n.inodes[2].flags != uint32(leafPageFlag) {
|
||||
t.Fatalf("not a leaf: %d", n.inodes[2].flags)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a node can deserialize from a leaf page.
|
||||
func TestNode_read_LeafPage(t *testing.T) {
|
||||
// Create a page.
|
||||
var buf [4096]byte
|
||||
page := (*page)(unsafe.Pointer(&buf[0]))
|
||||
page.flags = leafPageFlag
|
||||
page.count = 2
|
||||
|
||||
// Insert 2 elements at the beginning. sizeof(leafPageElement) == 16
|
||||
nodes := (*[3]leafPageElement)(unsafe.Pointer(&page.ptr))
|
||||
nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4} // pos = sizeof(leafPageElement) * 2
|
||||
nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // pos = sizeof(leafPageElement) + 3 + 4
|
||||
|
||||
// Write data for the nodes at the end.
|
||||
data := (*[4096]byte)(unsafe.Pointer(&nodes[2]))
|
||||
copy(data[:], []byte("barfooz"))
|
||||
copy(data[7:], []byte("helloworldbye"))
|
||||
|
||||
// Deserialize page into a leaf.
|
||||
n := &node{}
|
||||
n.read(page)
|
||||
|
||||
// Check that there are two inodes with correct data.
|
||||
if !n.isLeaf {
|
||||
t.Fatal("expected leaf")
|
||||
}
|
||||
if len(n.inodes) != 2 {
|
||||
t.Fatalf("exp=2; got=%d", len(n.inodes))
|
||||
}
|
||||
if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" {
|
||||
t.Fatalf("exp=<bar,fooz>; got=<%s,%s>", k, v)
|
||||
}
|
||||
if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" {
|
||||
t.Fatalf("exp=<helloworld,bye>; got=<%s,%s>", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a node can serialize into a leaf page.
|
||||
func TestNode_write_LeafPage(t *testing.T) {
|
||||
// Create a node.
|
||||
n := &node{isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
|
||||
n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0)
|
||||
n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0)
|
||||
n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0)
|
||||
|
||||
// Write it to a page.
|
||||
var buf [4096]byte
|
||||
p := (*page)(unsafe.Pointer(&buf[0]))
|
||||
n.write(p)
|
||||
|
||||
// Read the page back in.
|
||||
n2 := &node{}
|
||||
n2.read(p)
|
||||
|
||||
// Check that the two pages are the same.
|
||||
if len(n2.inodes) != 3 {
|
||||
t.Fatalf("exp=3; got=%d", len(n2.inodes))
|
||||
}
|
||||
if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" {
|
||||
t.Fatalf("exp=<john,johnson>; got=<%s,%s>", k, v)
|
||||
}
|
||||
if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" {
|
||||
t.Fatalf("exp=<ricki,lake>; got=<%s,%s>", k, v)
|
||||
}
|
||||
if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" {
|
||||
t.Fatalf("exp=<susy,que>; got=<%s,%s>", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a node can split into appropriate subgroups.
|
||||
func TestNode_split(t *testing.T) {
|
||||
// Create a node.
|
||||
n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
|
||||
n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
|
||||
n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
|
||||
n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0)
|
||||
n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0)
|
||||
n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0)
|
||||
|
||||
// Split between 2 & 3.
|
||||
n.split(100)
|
||||
|
||||
var parent = n.parent
|
||||
if len(parent.children) != 2 {
|
||||
t.Fatalf("exp=2; got=%d", len(parent.children))
|
||||
}
|
||||
if len(parent.children[0].inodes) != 2 {
|
||||
t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes))
|
||||
}
|
||||
if len(parent.children[1].inodes) != 3 {
|
||||
t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a page with the minimum number of inodes just returns a single node.
|
||||
func TestNode_split_MinKeys(t *testing.T) {
|
||||
// Create a node.
|
||||
n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
|
||||
n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
|
||||
n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
|
||||
|
||||
// Split.
|
||||
n.split(20)
|
||||
if n.parent != nil {
|
||||
t.Fatalf("expected nil parent")
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a node that has keys that all fit on a page just returns one leaf.
|
||||
func TestNode_split_SinglePage(t *testing.T) {
|
||||
// Create a node.
|
||||
n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
|
||||
n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
|
||||
n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
|
||||
n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0)
|
||||
n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0)
|
||||
n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0)
|
||||
|
||||
// Split.
|
||||
n.split(4096)
|
||||
if n.parent != nil {
|
||||
t.Fatalf("expected nil parent")
|
||||
}
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
package bolt
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
// Ensure that the page type can be returned in human readable format.
|
||||
func TestPage_typ(t *testing.T) {
|
||||
if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" {
|
||||
t.Fatalf("exp=branch; got=%v", typ)
|
||||
}
|
||||
if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" {
|
||||
t.Fatalf("exp=leaf; got=%v", typ)
|
||||
}
|
||||
if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" {
|
||||
t.Fatalf("exp=meta; got=%v", typ)
|
||||
}
|
||||
if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" {
|
||||
t.Fatalf("exp=freelist; got=%v", typ)
|
||||
}
|
||||
if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" {
|
||||
t.Fatalf("exp=unknown<4e20>; got=%v", typ)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that the hexdump debugging function doesn't blow up.
|
||||
func TestPage_dump(t *testing.T) {
|
||||
(&page{id: 256}).hexdump(16)
|
||||
}
|
||||
|
||||
func TestPgids_merge(t *testing.T) {
|
||||
a := pgids{4, 5, 6, 10, 11, 12, 13, 27}
|
||||
b := pgids{1, 3, 8, 9, 25, 30}
|
||||
c := a.merge(b)
|
||||
if !reflect.DeepEqual(c, pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}) {
|
||||
t.Errorf("mismatch: %v", c)
|
||||
}
|
||||
|
||||
a = pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36}
|
||||
b = pgids{8, 9, 25, 30}
|
||||
c = a.merge(b)
|
||||
if !reflect.DeepEqual(c, pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}) {
|
||||
t.Errorf("mismatch: %v", c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPgids_merge_quick(t *testing.T) {
|
||||
if err := quick.Check(func(a, b pgids) bool {
|
||||
// Sort incoming lists.
|
||||
sort.Sort(a)
|
||||
sort.Sort(b)
|
||||
|
||||
// Merge the two lists together.
|
||||
got := a.merge(b)
|
||||
|
||||
// The expected value should be the two lists combined and sorted.
|
||||
exp := append(a, b...)
|
||||
sort.Sort(exp)
|
||||
|
||||
if !reflect.DeepEqual(exp, got) {
|
||||
t.Errorf("\nexp=%+v\ngot=%+v\n", exp, got)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
package bolt_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing/quick"
|
||||
"time"
|
||||
)
|
||||
|
||||
// testing/quick defaults to 5 iterations and a random seed.
|
||||
// You can override these settings from the command line:
|
||||
//
|
||||
// -quick.count The number of iterations to perform.
|
||||
// -quick.seed The seed to use for randomizing.
|
||||
// -quick.maxitems The maximum number of items to insert into a DB.
|
||||
// -quick.maxksize The maximum size of a key.
|
||||
// -quick.maxvsize The maximum size of a value.
|
||||
//
|
||||
|
||||
var qcount, qseed, qmaxitems, qmaxksize, qmaxvsize int
|
||||
|
||||
func init() {
|
||||
flag.IntVar(&qcount, "quick.count", 5, "")
|
||||
flag.IntVar(&qseed, "quick.seed", int(time.Now().UnixNano())%100000, "")
|
||||
flag.IntVar(&qmaxitems, "quick.maxitems", 1000, "")
|
||||
flag.IntVar(&qmaxksize, "quick.maxksize", 1024, "")
|
||||
flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "")
|
||||
flag.Parse()
|
||||
fmt.Fprintln(os.Stderr, "seed:", qseed)
|
||||
fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize)
|
||||
}
|
||||
|
||||
func qconfig() *quick.Config {
|
||||
return &quick.Config{
|
||||
MaxCount: qcount,
|
||||
Rand: rand.New(rand.NewSource(int64(qseed))),
|
||||
}
|
||||
}
|
||||
|
||||
type testdata []testdataitem
|
||||
|
||||
func (t testdata) Len() int { return len(t) }
|
||||
func (t testdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
|
||||
func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == -1 }
|
||||
|
||||
func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
n := rand.Intn(qmaxitems-1) + 1
|
||||
items := make(testdata, n)
|
||||
used := make(map[string]bool)
|
||||
for i := 0; i < n; i++ {
|
||||
item := &items[i]
|
||||
// Ensure that keys are unique by looping until we find one that we have not already used.
|
||||
for {
|
||||
item.Key = randByteSlice(rand, 1, qmaxksize)
|
||||
if !used[string(item.Key)] {
|
||||
used[string(item.Key)] = true
|
||||
break
|
||||
}
|
||||
}
|
||||
item.Value = randByteSlice(rand, 0, qmaxvsize)
|
||||
}
|
||||
return reflect.ValueOf(items)
|
||||
}
|
||||
|
||||
type revtestdata []testdataitem
|
||||
|
||||
func (t revtestdata) Len() int { return len(t) }
|
||||
func (t revtestdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
|
||||
func (t revtestdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == 1 }
|
||||
|
||||
type testdataitem struct {
|
||||
Key []byte
|
||||
Value []byte
|
||||
}
|
||||
|
||||
func randByteSlice(rand *rand.Rand, minSize, maxSize int) []byte {
|
||||
n := rand.Intn(maxSize-minSize) + minSize
|
||||
b := make([]byte, n)
|
||||
for i := 0; i < n; i++ {
|
||||
b[i] = byte(rand.Intn(255))
|
||||
}
|
||||
return b
|
||||
}
|
|
@ -1,329 +0,0 @@
|
|||
package bolt_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 1, 1) }
|
||||
func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) }
|
||||
func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) }
|
||||
func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) }
|
||||
func TestSimulate_10000op_1p(t *testing.T) { testSimulate(t, 10000, 1) }
|
||||
|
||||
func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) }
|
||||
func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) }
|
||||
func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) }
|
||||
func TestSimulate_10000op_10p(t *testing.T) { testSimulate(t, 10000, 10) }
|
||||
|
||||
func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) }
|
||||
func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) }
|
||||
func TestSimulate_10000op_100p(t *testing.T) { testSimulate(t, 10000, 100) }
|
||||
|
||||
func TestSimulate_10000op_1000p(t *testing.T) { testSimulate(t, 10000, 1000) }
|
||||
|
||||
// Randomly generate operations on a given database with multiple clients to ensure consistency and thread safety.
|
||||
func testSimulate(t *testing.T, threadCount, parallelism int) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
rand.Seed(int64(qseed))
|
||||
|
||||
// A list of operations that readers and writers can perform.
|
||||
var readerHandlers = []simulateHandler{simulateGetHandler}
|
||||
var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler}
|
||||
|
||||
var versions = make(map[int]*QuickDB)
|
||||
versions[1] = NewQuickDB()
|
||||
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
var mutex sync.Mutex
|
||||
|
||||
// Run n threads in parallel, each with their own operation.
|
||||
var wg sync.WaitGroup
|
||||
var threads = make(chan bool, parallelism)
|
||||
var i int
|
||||
for {
|
||||
threads <- true
|
||||
wg.Add(1)
|
||||
writable := ((rand.Int() % 100) < 20) // 20% writers
|
||||
|
||||
// Choose an operation to execute.
|
||||
var handler simulateHandler
|
||||
if writable {
|
||||
handler = writerHandlers[rand.Intn(len(writerHandlers))]
|
||||
} else {
|
||||
handler = readerHandlers[rand.Intn(len(readerHandlers))]
|
||||
}
|
||||
|
||||
// Execute a thread for the given operation.
|
||||
go func(writable bool, handler simulateHandler) {
|
||||
defer wg.Done()
|
||||
|
||||
// Start transaction.
|
||||
tx, err := db.Begin(writable)
|
||||
if err != nil {
|
||||
t.Fatal("tx begin: ", err)
|
||||
}
|
||||
|
||||
// Obtain current state of the dataset.
|
||||
mutex.Lock()
|
||||
var qdb = versions[tx.ID()]
|
||||
if writable {
|
||||
qdb = versions[tx.ID()-1].Copy()
|
||||
}
|
||||
mutex.Unlock()
|
||||
|
||||
// Make sure we commit/rollback the tx at the end and update the state.
|
||||
if writable {
|
||||
defer func() {
|
||||
mutex.Lock()
|
||||
versions[tx.ID()] = qdb
|
||||
mutex.Unlock()
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
}
|
||||
|
||||
// Ignore operation if we don't have data yet.
|
||||
if qdb == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Execute handler.
|
||||
handler(tx, qdb)
|
||||
|
||||
// Release a thread back to the scheduling loop.
|
||||
<-threads
|
||||
}(writable, handler)
|
||||
|
||||
i++
|
||||
if i > threadCount {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Wait until all threads are done.
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
type simulateHandler func(tx *bolt.Tx, qdb *QuickDB)
|
||||
|
||||
// Retrieves a key from the database and verifies that it is what is expected.
|
||||
func simulateGetHandler(tx *bolt.Tx, qdb *QuickDB) {
|
||||
// Randomly retrieve an existing exist.
|
||||
keys := qdb.Rand()
|
||||
if len(keys) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Retrieve root bucket.
|
||||
b := tx.Bucket(keys[0])
|
||||
if b == nil {
|
||||
panic(fmt.Sprintf("bucket[0] expected: %08x\n", trunc(keys[0], 4)))
|
||||
}
|
||||
|
||||
// Drill into nested buckets.
|
||||
for _, key := range keys[1 : len(keys)-1] {
|
||||
b = b.Bucket(key)
|
||||
if b == nil {
|
||||
panic(fmt.Sprintf("bucket[n] expected: %v -> %v\n", keys, key))
|
||||
}
|
||||
}
|
||||
|
||||
// Verify key/value on the final bucket.
|
||||
expected := qdb.Get(keys)
|
||||
actual := b.Get(keys[len(keys)-1])
|
||||
if !bytes.Equal(actual, expected) {
|
||||
fmt.Println("=== EXPECTED ===")
|
||||
fmt.Println(expected)
|
||||
fmt.Println("=== ACTUAL ===")
|
||||
fmt.Println(actual)
|
||||
fmt.Println("=== END ===")
|
||||
panic("value mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
// Inserts a key into the database.
|
||||
func simulatePutHandler(tx *bolt.Tx, qdb *QuickDB) {
|
||||
var err error
|
||||
keys, value := randKeys(), randValue()
|
||||
|
||||
// Retrieve root bucket.
|
||||
b := tx.Bucket(keys[0])
|
||||
if b == nil {
|
||||
b, err = tx.CreateBucket(keys[0])
|
||||
if err != nil {
|
||||
panic("create bucket: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Create nested buckets, if necessary.
|
||||
for _, key := range keys[1 : len(keys)-1] {
|
||||
child := b.Bucket(key)
|
||||
if child != nil {
|
||||
b = child
|
||||
} else {
|
||||
b, err = b.CreateBucket(key)
|
||||
if err != nil {
|
||||
panic("create bucket: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Insert into database.
|
||||
if err := b.Put(keys[len(keys)-1], value); err != nil {
|
||||
panic("put: " + err.Error())
|
||||
}
|
||||
|
||||
// Insert into in-memory database.
|
||||
qdb.Put(keys, value)
|
||||
}
|
||||
|
||||
// QuickDB is an in-memory database that replicates the functionality of the
|
||||
// Bolt DB type except that it is entirely in-memory. It is meant for testing
|
||||
// that the Bolt database is consistent.
|
||||
type QuickDB struct {
|
||||
sync.RWMutex
|
||||
m map[string]interface{}
|
||||
}
|
||||
|
||||
// NewQuickDB returns an instance of QuickDB.
|
||||
func NewQuickDB() *QuickDB {
|
||||
return &QuickDB{m: make(map[string]interface{})}
|
||||
}
|
||||
|
||||
// Get retrieves the value at a key path.
|
||||
func (db *QuickDB) Get(keys [][]byte) []byte {
|
||||
db.RLock()
|
||||
defer db.RUnlock()
|
||||
|
||||
m := db.m
|
||||
for _, key := range keys[:len(keys)-1] {
|
||||
value := m[string(key)]
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
switch value := value.(type) {
|
||||
case map[string]interface{}:
|
||||
m = value
|
||||
case []byte:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Only return if it's a simple value.
|
||||
if value, ok := m[string(keys[len(keys)-1])].([]byte); ok {
|
||||
return value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put inserts a value into a key path.
|
||||
func (db *QuickDB) Put(keys [][]byte, value []byte) {
|
||||
db.Lock()
|
||||
defer db.Unlock()
|
||||
|
||||
// Build buckets all the way down the key path.
|
||||
m := db.m
|
||||
for _, key := range keys[:len(keys)-1] {
|
||||
if _, ok := m[string(key)].([]byte); ok {
|
||||
return // Keypath intersects with a simple value. Do nothing.
|
||||
}
|
||||
|
||||
if m[string(key)] == nil {
|
||||
m[string(key)] = make(map[string]interface{})
|
||||
}
|
||||
m = m[string(key)].(map[string]interface{})
|
||||
}
|
||||
|
||||
// Insert value into the last key.
|
||||
m[string(keys[len(keys)-1])] = value
|
||||
}
|
||||
|
||||
// Rand returns a random key path that points to a simple value.
|
||||
func (db *QuickDB) Rand() [][]byte {
|
||||
db.RLock()
|
||||
defer db.RUnlock()
|
||||
if len(db.m) == 0 {
|
||||
return nil
|
||||
}
|
||||
var keys [][]byte
|
||||
db.rand(db.m, &keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
func (db *QuickDB) rand(m map[string]interface{}, keys *[][]byte) {
|
||||
i, index := 0, rand.Intn(len(m))
|
||||
for k, v := range m {
|
||||
if i == index {
|
||||
*keys = append(*keys, []byte(k))
|
||||
if v, ok := v.(map[string]interface{}); ok {
|
||||
db.rand(v, keys)
|
||||
}
|
||||
return
|
||||
}
|
||||
i++
|
||||
}
|
||||
panic("quickdb rand: out-of-range")
|
||||
}
|
||||
|
||||
// Copy copies the entire database.
|
||||
func (db *QuickDB) Copy() *QuickDB {
|
||||
db.RLock()
|
||||
defer db.RUnlock()
|
||||
return &QuickDB{m: db.copy(db.m)}
|
||||
}
|
||||
|
||||
func (db *QuickDB) copy(m map[string]interface{}) map[string]interface{} {
|
||||
clone := make(map[string]interface{}, len(m))
|
||||
for k, v := range m {
|
||||
switch v := v.(type) {
|
||||
case map[string]interface{}:
|
||||
clone[k] = db.copy(v)
|
||||
default:
|
||||
clone[k] = v
|
||||
}
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
||||
func randKey() []byte {
|
||||
var min, max = 1, 1024
|
||||
n := rand.Intn(max-min) + min
|
||||
b := make([]byte, n)
|
||||
for i := 0; i < n; i++ {
|
||||
b[i] = byte(rand.Intn(255))
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func randKeys() [][]byte {
|
||||
var keys [][]byte
|
||||
var count = rand.Intn(2) + 2
|
||||
for i := 0; i < count; i++ {
|
||||
keys = append(keys, randKey())
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func randValue() []byte {
|
||||
n := rand.Intn(8192)
|
||||
b := make([]byte, n)
|
||||
for i := 0; i < n; i++ {
|
||||
b[i] = byte(rand.Intn(255))
|
||||
}
|
||||
return b
|
||||
}
|
|
@ -1,716 +0,0 @@
|
|||
package bolt_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
// Ensure that committing a closed transaction returns an error.
|
||||
func TestTx_Commit_ErrTxClosed(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := tx.CreateBucket([]byte("foo")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != bolt.ErrTxClosed {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that rolling back a closed transaction returns an error.
|
||||
func TestTx_Rollback_ErrTxClosed(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := tx.Rollback(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := tx.Rollback(); err != bolt.ErrTxClosed {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that committing a read-only transaction returns an error.
|
||||
func TestTx_Commit_ErrTxNotWritable(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
tx, err := db.Begin(false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := tx.Commit(); err != bolt.ErrTxNotWritable {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a transaction can retrieve a cursor on the root bucket.
|
||||
func TestTx_Cursor(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := tx.CreateBucket([]byte("woojits")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c := tx.Cursor()
|
||||
if k, v := c.First(); !bytes.Equal(k, []byte("widgets")) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if v != nil {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
|
||||
if k, v := c.Next(); !bytes.Equal(k, []byte("woojits")) {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if v != nil {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
|
||||
if k, v := c.Next(); k != nil {
|
||||
t.Fatalf("unexpected key: %v", k)
|
||||
} else if v != nil {
|
||||
t.Fatalf("unexpected value: %v", k)
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that creating a bucket with a read-only transaction returns an error.
|
||||
func TestTx_CreateBucket_ErrTxNotWritable(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("foo"))
|
||||
if err != bolt.ErrTxNotWritable {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that creating a bucket on a closed transaction returns an error.
|
||||
func TestTx_CreateBucket_ErrTxClosed(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := tx.CreateBucket([]byte("foo")); err != bolt.ErrTxClosed {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a Tx can retrieve a bucket.
|
||||
func TestTx_Bucket(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if tx.Bucket([]byte("widgets")) == nil {
|
||||
t.Fatal("expected bucket")
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a Tx retrieving a non-existent key returns nil.
|
||||
func TestTx_Get_NotFound(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if b.Get([]byte("no_such_key")) != nil {
|
||||
t.Fatal("expected nil value")
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a bucket can be created and retrieved.
|
||||
func TestTx_CreateBucket(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
// Create a bucket.
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if b == nil {
|
||||
t.Fatal("expected bucket")
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Read the bucket through a separate transaction.
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
if tx.Bucket([]byte("widgets")) == nil {
|
||||
t.Fatal("expected bucket")
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a bucket can be created if it doesn't already exist.
|
||||
func TestTx_CreateBucketIfNotExists(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
// Create bucket.
|
||||
if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if b == nil {
|
||||
t.Fatal("expected bucket")
|
||||
}
|
||||
|
||||
// Create bucket again.
|
||||
if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if b == nil {
|
||||
t.Fatal("expected bucket")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Read the bucket through a separate transaction.
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
if tx.Bucket([]byte("widgets")) == nil {
|
||||
t.Fatal("expected bucket")
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure transaction returns an error if creating an unnamed bucket.
|
||||
func TestTx_CreateBucketIfNotExists_ErrBucketNameRequired(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
if _, err := tx.CreateBucketIfNotExists([]byte{}); err != bolt.ErrBucketNameRequired {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if _, err := tx.CreateBucketIfNotExists(nil); err != bolt.ErrBucketNameRequired {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a bucket cannot be created twice.
|
||||
func TestTx_CreateBucket_ErrBucketExists(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
// Create a bucket.
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create the same bucket again.
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
if _, err := tx.CreateBucket([]byte("widgets")); err != bolt.ErrBucketExists {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a bucket is created with a non-blank name.
|
||||
func TestTx_CreateBucket_ErrBucketNameRequired(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
if _, err := tx.CreateBucket(nil); err != bolt.ErrBucketNameRequired {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a bucket can be deleted.
|
||||
func TestTx_DeleteBucket(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
// Create a bucket and add a value.
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Delete the bucket and make sure we can't get the value.
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
if err := tx.DeleteBucket([]byte("widgets")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if tx.Bucket([]byte("widgets")) != nil {
|
||||
t.Fatal("unexpected bucket")
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
// Create the bucket again and make sure there's not a phantom value.
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if v := b.Get([]byte("foo")); v != nil {
|
||||
t.Fatalf("unexpected phantom value: %v", v)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that deleting a bucket on a closed transaction returns an error.
|
||||
func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := tx.DeleteBucket([]byte("foo")); err != bolt.ErrTxClosed {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that deleting a bucket with a read-only transaction returns an error.
|
||||
func TestTx_DeleteBucket_ReadOnly(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
if err := tx.DeleteBucket([]byte("foo")); err != bolt.ErrTxNotWritable {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that nothing happens when deleting a bucket that doesn't exist.
|
||||
func TestTx_DeleteBucket_NotFound(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
if err := tx.DeleteBucket([]byte("widgets")); err != bolt.ErrBucketNotFound {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that no error is returned when a tx.ForEach function does not return
|
||||
// an error.
|
||||
func TestTx_ForEach_NoError(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error {
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that an error is returned when a tx.ForEach function returns an error.
|
||||
func TestTx_ForEach_WithError(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
marker := errors.New("marker")
|
||||
if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error {
|
||||
return marker
|
||||
}); err != marker {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that Tx commit handlers are called after a transaction successfully commits.
|
||||
func TestTx_OnCommit(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
var x int
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
tx.OnCommit(func() { x += 1 })
|
||||
tx.OnCommit(func() { x += 2 })
|
||||
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if x != 3 {
|
||||
t.Fatalf("unexpected x: %d", x)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that Tx commit handlers are NOT called after a transaction rolls back.
|
||||
func TestTx_OnCommit_Rollback(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
var x int
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
tx.OnCommit(func() { x += 1 })
|
||||
tx.OnCommit(func() { x += 2 })
|
||||
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return errors.New("rollback this commit")
|
||||
}); err == nil || err.Error() != "rollback this commit" {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if x != 0 {
|
||||
t.Fatalf("unexpected x: %d", x)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that the database can be copied to a file path.
|
||||
func TestTx_CopyFile(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
|
||||
path := tempfile()
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("baz"), []byte("bat")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
return tx.CopyFile(path, 0600)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
db2, err := bolt.Open(path, 0600, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db2.View(func(tx *bolt.Tx) error {
|
||||
if v := tx.Bucket([]byte("widgets")).Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
if v := tx.Bucket([]byte("widgets")).Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) {
|
||||
t.Fatalf("unexpected value: %v", v)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db2.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
type failWriterError struct{}
|
||||
|
||||
func (failWriterError) Error() string {
|
||||
return "error injected for tests"
|
||||
}
|
||||
|
||||
type failWriter struct {
|
||||
// fail after this many bytes
|
||||
After int
|
||||
}
|
||||
|
||||
func (f *failWriter) Write(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
if n > f.After {
|
||||
n = f.After
|
||||
err = failWriterError{}
|
||||
}
|
||||
f.After -= n
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Ensure that Copy handles write errors right.
|
||||
func TestTx_CopyFile_Error_Meta(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("baz"), []byte("bat")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
return tx.Copy(&failWriter{})
|
||||
}); err == nil || err.Error() != "meta 0 copy: error injected for tests" {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that Copy handles write errors right.
|
||||
func TestTx_CopyFile_Error_Normal(t *testing.T) {
|
||||
db := MustOpenDB()
|
||||
defer db.MustClose()
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put([]byte("baz"), []byte("bat")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
return tx.Copy(&failWriter{3 * db.Info().PageSize})
|
||||
}); err == nil || err.Error() != "error injected for tests" {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTx_Rollback() {
|
||||
// Open the database.
|
||||
db, err := bolt.Open(tempfile(), 0666, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.Remove(db.Path())
|
||||
|
||||
// Create a bucket.
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Set a value for a key.
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||
}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Update the key but rollback the transaction so it never saves.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
if err := b.Put([]byte("foo"), []byte("baz")); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure that our original value is still set.
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
|
||||
fmt.Printf("The value for 'foo' is still: %s\n", value)
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Close database to release file lock.
|
||||
if err := db.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// The value for 'foo' is still: bar
|
||||
}
|
||||
|
||||
func ExampleTx_CopyFile() {
|
||||
// Open the database.
|
||||
db, err := bolt.Open(tempfile(), 0666, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.Remove(db.Path())
|
||||
|
||||
// Create a bucket and a key.
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Copy the database to another file.
|
||||
toFile := tempfile()
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
return tx.CopyFile(toFile, 0666)
|
||||
}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.Remove(toFile)
|
||||
|
||||
// Open the cloned database.
|
||||
db2, err := bolt.Open(toFile, 0666, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure that the key exists in the copy.
|
||||
if err := db2.View(func(tx *bolt.Tx) error {
|
||||
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
|
||||
fmt.Printf("The value for 'foo' in the clone is: %s\n", value)
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Close database to release file lock.
|
||||
if err := db.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db2.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// The value for 'foo' in the clone is: bar
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
||||
|
||||
go_import_path: gopkg.in/yaml.v2
|
|
@ -1,131 +0,0 @@
|
|||
# YAML support for the Go language
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
The yaml package enables Go programs to comfortably encode and decode YAML
|
||||
values. It was developed within [Canonical](https://www.canonical.com) as
|
||||
part of the [juju](https://juju.ubuntu.com) project, and is based on a
|
||||
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
|
||||
C library to parse and generate YAML data quickly and reliably.
|
||||
|
||||
Compatibility
|
||||
-------------
|
||||
|
||||
The yaml package supports most of YAML 1.1 and 1.2, including support for
|
||||
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
|
||||
implemented, and base-60 floats from YAML 1.1 are purposefully not
|
||||
supported since they're a poor design and are gone in YAML 1.2.
|
||||
|
||||
Installation and usage
|
||||
----------------------
|
||||
|
||||
The import path for the package is *gopkg.in/yaml.v2*.
|
||||
|
||||
To install it, run:
|
||||
|
||||
go get gopkg.in/yaml.v2
|
||||
|
||||
API documentation
|
||||
-----------------
|
||||
|
||||
If opened in a browser, the import path itself leads to the API documentation:
|
||||
|
||||
* [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
|
||||
|
||||
API stability
|
||||
-------------
|
||||
|
||||
The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
```Go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var data = `
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
`
|
||||
|
||||
type T struct {
|
||||
A string
|
||||
B struct {
|
||||
RenamedC int `yaml:"c"`
|
||||
D []int `yaml:",flow"`
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
t := T{}
|
||||
|
||||
err := yaml.Unmarshal([]byte(data), &t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t:\n%v\n\n", t)
|
||||
|
||||
d, err := yaml.Marshal(&t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t dump:\n%s\n\n", string(d))
|
||||
|
||||
m := make(map[interface{}]interface{})
|
||||
|
||||
err = yaml.Unmarshal([]byte(data), &m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m:\n%v\n\n", m)
|
||||
|
||||
d, err = yaml.Marshal(&m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m dump:\n%s\n\n", string(d))
|
||||
}
|
||||
```
|
||||
|
||||
This example will generate the following output:
|
||||
|
||||
```
|
||||
--- t:
|
||||
{Easy! {2 [3 4]}}
|
||||
|
||||
--- t dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
|
||||
|
||||
--- m:
|
||||
map[a:Easy! b:map[c:2 d:[3 4]]]
|
||||
|
||||
--- m dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d:
|
||||
- 3
|
||||
- 4
|
||||
```
|
||||
|
|
@ -1,998 +0,0 @@
|
|||
package yaml_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
. "gopkg.in/check.v1"
|
||||
"gopkg.in/yaml.v2"
|
||||
"math"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var unmarshalIntTest = 123
|
||||
|
||||
var unmarshalTests = []struct {
|
||||
data string
|
||||
value interface{}
|
||||
}{
|
||||
{
|
||||
"",
|
||||
&struct{}{},
|
||||
}, {
|
||||
"{}", &struct{}{},
|
||||
}, {
|
||||
"v: hi",
|
||||
map[string]string{"v": "hi"},
|
||||
}, {
|
||||
"v: hi", map[string]interface{}{"v": "hi"},
|
||||
}, {
|
||||
"v: true",
|
||||
map[string]string{"v": "true"},
|
||||
}, {
|
||||
"v: true",
|
||||
map[string]interface{}{"v": true},
|
||||
}, {
|
||||
"v: 10",
|
||||
map[string]interface{}{"v": 10},
|
||||
}, {
|
||||
"v: 0b10",
|
||||
map[string]interface{}{"v": 2},
|
||||
}, {
|
||||
"v: 0xA",
|
||||
map[string]interface{}{"v": 10},
|
||||
}, {
|
||||
"v: 4294967296",
|
||||
map[string]int64{"v": 4294967296},
|
||||
}, {
|
||||
"v: 0.1",
|
||||
map[string]interface{}{"v": 0.1},
|
||||
}, {
|
||||
"v: .1",
|
||||
map[string]interface{}{"v": 0.1},
|
||||
}, {
|
||||
"v: .Inf",
|
||||
map[string]interface{}{"v": math.Inf(+1)},
|
||||
}, {
|
||||
"v: -.Inf",
|
||||
map[string]interface{}{"v": math.Inf(-1)},
|
||||
}, {
|
||||
"v: -10",
|
||||
map[string]interface{}{"v": -10},
|
||||
}, {
|
||||
"v: -.1",
|
||||
map[string]interface{}{"v": -0.1},
|
||||
},
|
||||
|
||||
// Simple values.
|
||||
{
|
||||
"123",
|
||||
&unmarshalIntTest,
|
||||
},
|
||||
|
||||
// Floats from spec
|
||||
{
|
||||
"canonical: 6.8523e+5",
|
||||
map[string]interface{}{"canonical": 6.8523e+5},
|
||||
}, {
|
||||
"expo: 685.230_15e+03",
|
||||
map[string]interface{}{"expo": 685.23015e+03},
|
||||
}, {
|
||||
"fixed: 685_230.15",
|
||||
map[string]interface{}{"fixed": 685230.15},
|
||||
}, {
|
||||
"neginf: -.inf",
|
||||
map[string]interface{}{"neginf": math.Inf(-1)},
|
||||
}, {
|
||||
"fixed: 685_230.15",
|
||||
map[string]float64{"fixed": 685230.15},
|
||||
},
|
||||
//{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported
|
||||
//{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails.
|
||||
|
||||
// Bools from spec
|
||||
{
|
||||
"canonical: y",
|
||||
map[string]interface{}{"canonical": true},
|
||||
}, {
|
||||
"answer: NO",
|
||||
map[string]interface{}{"answer": false},
|
||||
}, {
|
||||
"logical: True",
|
||||
map[string]interface{}{"logical": true},
|
||||
}, {
|
||||
"option: on",
|
||||
map[string]interface{}{"option": true},
|
||||
}, {
|
||||
"option: on",
|
||||
map[string]bool{"option": true},
|
||||
},
|
||||
// Ints from spec
|
||||
{
|
||||
"canonical: 685230",
|
||||
map[string]interface{}{"canonical": 685230},
|
||||
}, {
|
||||
"decimal: +685_230",
|
||||
map[string]interface{}{"decimal": 685230},
|
||||
}, {
|
||||
"octal: 02472256",
|
||||
map[string]interface{}{"octal": 685230},
|
||||
}, {
|
||||
"hexa: 0x_0A_74_AE",
|
||||
map[string]interface{}{"hexa": 685230},
|
||||
}, {
|
||||
"bin: 0b1010_0111_0100_1010_1110",
|
||||
map[string]interface{}{"bin": 685230},
|
||||
}, {
|
||||
"bin: -0b101010",
|
||||
map[string]interface{}{"bin": -42},
|
||||
}, {
|
||||
"decimal: +685_230",
|
||||
map[string]int{"decimal": 685230},
|
||||
},
|
||||
|
||||
//{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported
|
||||
|
||||
// Nulls from spec
|
||||
{
|
||||
"empty:",
|
||||
map[string]interface{}{"empty": nil},
|
||||
}, {
|
||||
"canonical: ~",
|
||||
map[string]interface{}{"canonical": nil},
|
||||
}, {
|
||||
"english: null",
|
||||
map[string]interface{}{"english": nil},
|
||||
}, {
|
||||
"~: null key",
|
||||
map[interface{}]string{nil: "null key"},
|
||||
}, {
|
||||
"empty:",
|
||||
map[string]*bool{"empty": nil},
|
||||
},
|
||||
|
||||
// Flow sequence
|
||||
{
|
||||
"seq: [A,B]",
|
||||
map[string]interface{}{"seq": []interface{}{"A", "B"}},
|
||||
}, {
|
||||
"seq: [A,B,C,]",
|
||||
map[string][]string{"seq": []string{"A", "B", "C"}},
|
||||
}, {
|
||||
"seq: [A,1,C]",
|
||||
map[string][]string{"seq": []string{"A", "1", "C"}},
|
||||
}, {
|
||||
"seq: [A,1,C]",
|
||||
map[string][]int{"seq": []int{1}},
|
||||
}, {
|
||||
"seq: [A,1,C]",
|
||||
map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
|
||||
},
|
||||
// Block sequence
|
||||
{
|
||||
"seq:\n - A\n - B",
|
||||
map[string]interface{}{"seq": []interface{}{"A", "B"}},
|
||||
}, {
|
||||
"seq:\n - A\n - B\n - C",
|
||||
map[string][]string{"seq": []string{"A", "B", "C"}},
|
||||
}, {
|
||||
"seq:\n - A\n - 1\n - C",
|
||||
map[string][]string{"seq": []string{"A", "1", "C"}},
|
||||
}, {
|
||||
"seq:\n - A\n - 1\n - C",
|
||||
map[string][]int{"seq": []int{1}},
|
||||
}, {
|
||||
"seq:\n - A\n - 1\n - C",
|
||||
map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
|
||||
},
|
||||
|
||||
// Literal block scalar
|
||||
{
|
||||
"scalar: | # Comment\n\n literal\n\n \ttext\n\n",
|
||||
map[string]string{"scalar": "\nliteral\n\n\ttext\n"},
|
||||
},
|
||||
|
||||
// Folded block scalar
|
||||
{
|
||||
"scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n",
|
||||
map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"},
|
||||
},
|
||||
|
||||
// Map inside interface with no type hints.
|
||||
{
|
||||
"a: {b: c}",
|
||||
map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
|
||||
},
|
||||
|
||||
// Structs and type conversions.
|
||||
{
|
||||
"hello: world",
|
||||
&struct{ Hello string }{"world"},
|
||||
}, {
|
||||
"a: {b: c}",
|
||||
&struct{ A struct{ B string } }{struct{ B string }{"c"}},
|
||||
}, {
|
||||
"a: {b: c}",
|
||||
&struct{ A *struct{ B string } }{&struct{ B string }{"c"}},
|
||||
}, {
|
||||
"a: {b: c}",
|
||||
&struct{ A map[string]string }{map[string]string{"b": "c"}},
|
||||
}, {
|
||||
"a: {b: c}",
|
||||
&struct{ A *map[string]string }{&map[string]string{"b": "c"}},
|
||||
}, {
|
||||
"a:",
|
||||
&struct{ A map[string]string }{},
|
||||
}, {
|
||||
"a: 1",
|
||||
&struct{ A int }{1},
|
||||
}, {
|
||||
"a: 1",
|
||||
&struct{ A float64 }{1},
|
||||
}, {
|
||||
"a: 1.0",
|
||||
&struct{ A int }{1},
|
||||
}, {
|
||||
"a: 1.0",
|
||||
&struct{ A uint }{1},
|
||||
}, {
|
||||
"a: [1, 2]",
|
||||
&struct{ A []int }{[]int{1, 2}},
|
||||
}, {
|
||||
"a: 1",
|
||||
&struct{ B int }{0},
|
||||
}, {
|
||||
"a: 1",
|
||||
&struct {
|
||||
B int "a"
|
||||
}{1},
|
||||
}, {
|
||||
"a: y",
|
||||
&struct{ A bool }{true},
|
||||
},
|
||||
|
||||
// Some cross type conversions
|
||||
{
|
||||
"v: 42",
|
||||
map[string]uint{"v": 42},
|
||||
}, {
|
||||
"v: -42",
|
||||
map[string]uint{},
|
||||
}, {
|
||||
"v: 4294967296",
|
||||
map[string]uint64{"v": 4294967296},
|
||||
}, {
|
||||
"v: -4294967296",
|
||||
map[string]uint64{},
|
||||
},
|
||||
|
||||
// int
|
||||
{
|
||||
"int_max: 2147483647",
|
||||
map[string]int{"int_max": math.MaxInt32},
|
||||
},
|
||||
{
|
||||
"int_min: -2147483648",
|
||||
map[string]int{"int_min": math.MinInt32},
|
||||
},
|
||||
{
|
||||
"int_overflow: 9223372036854775808", // math.MaxInt64 + 1
|
||||
map[string]int{},
|
||||
},
|
||||
|
||||
// int64
|
||||
{
|
||||
"int64_max: 9223372036854775807",
|
||||
map[string]int64{"int64_max": math.MaxInt64},
|
||||
},
|
||||
{
|
||||
"int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111",
|
||||
map[string]int64{"int64_max_base2": math.MaxInt64},
|
||||
},
|
||||
{
|
||||
"int64_min: -9223372036854775808",
|
||||
map[string]int64{"int64_min": math.MinInt64},
|
||||
},
|
||||
{
|
||||
"int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111",
|
||||
map[string]int64{"int64_neg_base2": -math.MaxInt64},
|
||||
},
|
||||
{
|
||||
"int64_overflow: 9223372036854775808", // math.MaxInt64 + 1
|
||||
map[string]int64{},
|
||||
},
|
||||
|
||||
// uint
|
||||
{
|
||||
"uint_min: 0",
|
||||
map[string]uint{"uint_min": 0},
|
||||
},
|
||||
{
|
||||
"uint_max: 4294967295",
|
||||
map[string]uint{"uint_max": math.MaxUint32},
|
||||
},
|
||||
{
|
||||
"uint_underflow: -1",
|
||||
map[string]uint{},
|
||||
},
|
||||
|
||||
// uint64
|
||||
{
|
||||
"uint64_min: 0",
|
||||
map[string]uint{"uint64_min": 0},
|
||||
},
|
||||
{
|
||||
"uint64_max: 18446744073709551615",
|
||||
map[string]uint64{"uint64_max": math.MaxUint64},
|
||||
},
|
||||
{
|
||||
"uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111",
|
||||
map[string]uint64{"uint64_max_base2": math.MaxUint64},
|
||||
},
|
||||
{
|
||||
"uint64_maxint64: 9223372036854775807",
|
||||
map[string]uint64{"uint64_maxint64": math.MaxInt64},
|
||||
},
|
||||
{
|
||||
"uint64_underflow: -1",
|
||||
map[string]uint64{},
|
||||
},
|
||||
|
||||
// float32
|
||||
{
|
||||
"float32_max: 3.40282346638528859811704183484516925440e+38",
|
||||
map[string]float32{"float32_max": math.MaxFloat32},
|
||||
},
|
||||
{
|
||||
"float32_nonzero: 1.401298464324817070923729583289916131280e-45",
|
||||
map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32},
|
||||
},
|
||||
{
|
||||
"float32_maxuint64: 18446744073709551615",
|
||||
map[string]float32{"float32_maxuint64": float32(math.MaxUint64)},
|
||||
},
|
||||
{
|
||||
"float32_maxuint64+1: 18446744073709551616",
|
||||
map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)},
|
||||
},
|
||||
|
||||
// float64
|
||||
{
|
||||
"float64_max: 1.797693134862315708145274237317043567981e+308",
|
||||
map[string]float64{"float64_max": math.MaxFloat64},
|
||||
},
|
||||
{
|
||||
"float64_nonzero: 4.940656458412465441765687928682213723651e-324",
|
||||
map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64},
|
||||
},
|
||||
{
|
||||
"float64_maxuint64: 18446744073709551615",
|
||||
map[string]float64{"float64_maxuint64": float64(math.MaxUint64)},
|
||||
},
|
||||
{
|
||||
"float64_maxuint64+1: 18446744073709551616",
|
||||
map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)},
|
||||
},
|
||||
|
||||
// Overflow cases.
|
||||
{
|
||||
"v: 4294967297",
|
||||
map[string]int32{},
|
||||
}, {
|
||||
"v: 128",
|
||||
map[string]int8{},
|
||||
},
|
||||
|
||||
// Quoted values.
|
||||
{
|
||||
"'1': '\"2\"'",
|
||||
map[interface{}]interface{}{"1": "\"2\""},
|
||||
}, {
|
||||
"v:\n- A\n- 'B\n\n C'\n",
|
||||
map[string][]string{"v": []string{"A", "B\nC"}},
|
||||
},
|
||||
|
||||
// Explicit tags.
|
||||
{
|
||||
"v: !!float '1.1'",
|
||||
map[string]interface{}{"v": 1.1},
|
||||
}, {
|
||||
"v: !!null ''",
|
||||
map[string]interface{}{"v": nil},
|
||||
}, {
|
||||
"%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'",
|
||||
map[string]interface{}{"v": 1},
|
||||
},
|
||||
|
||||
// Anchors and aliases.
|
||||
{
|
||||
"a: &x 1\nb: &y 2\nc: *x\nd: *y\n",
|
||||
&struct{ A, B, C, D int }{1, 2, 1, 2},
|
||||
}, {
|
||||
"a: &a {c: 1}\nb: *a",
|
||||
&struct {
|
||||
A, B struct {
|
||||
C int
|
||||
}
|
||||
}{struct{ C int }{1}, struct{ C int }{1}},
|
||||
}, {
|
||||
"a: &a [1, 2]\nb: *a",
|
||||
&struct{ B []int }{[]int{1, 2}},
|
||||
}, {
|
||||
"b: *a\na: &a {c: 1}",
|
||||
&struct {
|
||||
A, B struct {
|
||||
C int
|
||||
}
|
||||
}{struct{ C int }{1}, struct{ C int }{1}},
|
||||
},
|
||||
|
||||
// Bug #1133337
|
||||
{
|
||||
"foo: ''",
|
||||
map[string]*string{"foo": new(string)},
|
||||
}, {
|
||||
"foo: null",
|
||||
map[string]string{"foo": ""},
|
||||
}, {
|
||||
"foo: null",
|
||||
map[string]interface{}{"foo": nil},
|
||||
},
|
||||
|
||||
// Ignored field
|
||||
{
|
||||
"a: 1\nb: 2\n",
|
||||
&struct {
|
||||
A int
|
||||
B int "-"
|
||||
}{1, 0},
|
||||
},
|
||||
|
||||
// Bug #1191981
|
||||
{
|
||||
"" +
|
||||
"%YAML 1.1\n" +
|
||||
"--- !!str\n" +
|
||||
`"Generic line break (no glyph)\n\` + "\n" +
|
||||
` Generic line break (glyphed)\n\` + "\n" +
|
||||
` Line separator\u2028\` + "\n" +
|
||||
` Paragraph separator\u2029"` + "\n",
|
||||
"" +
|
||||
"Generic line break (no glyph)\n" +
|
||||
"Generic line break (glyphed)\n" +
|
||||
"Line separator\u2028Paragraph separator\u2029",
|
||||
},
|
||||
|
||||
// Struct inlining
|
||||
{
|
||||
"a: 1\nb: 2\nc: 3\n",
|
||||
&struct {
|
||||
A int
|
||||
C inlineB `yaml:",inline"`
|
||||
}{1, inlineB{2, inlineC{3}}},
|
||||
},
|
||||
|
||||
// Map inlining
|
||||
{
|
||||
"a: 1\nb: 2\nc: 3\n",
|
||||
&struct {
|
||||
A int
|
||||
C map[string]int `yaml:",inline"`
|
||||
}{1, map[string]int{"b": 2, "c": 3}},
|
||||
},
|
||||
|
||||
// bug 1243827
|
||||
{
|
||||
"a: -b_c",
|
||||
map[string]interface{}{"a": "-b_c"},
|
||||
},
|
||||
{
|
||||
"a: +b_c",
|
||||
map[string]interface{}{"a": "+b_c"},
|
||||
},
|
||||
{
|
||||
"a: 50cent_of_dollar",
|
||||
map[string]interface{}{"a": "50cent_of_dollar"},
|
||||
},
|
||||
|
||||
// Duration
|
||||
{
|
||||
"a: 3s",
|
||||
map[string]time.Duration{"a": 3 * time.Second},
|
||||
},
|
||||
|
||||
// Issue #24.
|
||||
{
|
||||
"a: <foo>",
|
||||
map[string]string{"a": "<foo>"},
|
||||
},
|
||||
|
||||
// Base 60 floats are obsolete and unsupported.
|
||||
{
|
||||
"a: 1:1\n",
|
||||
map[string]string{"a": "1:1"},
|
||||
},
|
||||
|
||||
// Binary data.
|
||||
{
|
||||
"a: !!binary gIGC\n",
|
||||
map[string]string{"a": "\x80\x81\x82"},
|
||||
}, {
|
||||
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
|
||||
map[string]string{"a": strings.Repeat("\x90", 54)},
|
||||
}, {
|
||||
"a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n",
|
||||
map[string]string{"a": strings.Repeat("\x00", 52)},
|
||||
},
|
||||
|
||||
// Ordered maps.
|
||||
{
|
||||
"{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}",
|
||||
&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
|
||||
},
|
||||
|
||||
// Issue #39.
|
||||
{
|
||||
"a:\n b:\n c: d\n",
|
||||
map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}},
|
||||
},
|
||||
|
||||
// Custom map type.
|
||||
{
|
||||
"a: {b: c}",
|
||||
M{"a": M{"b": "c"}},
|
||||
},
|
||||
|
||||
// Support encoding.TextUnmarshaler.
|
||||
{
|
||||
"a: 1.2.3.4\n",
|
||||
map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
|
||||
},
|
||||
{
|
||||
"a: 2015-02-24T18:19:39Z\n",
|
||||
map[string]time.Time{"a": time.Unix(1424801979, 0).In(time.UTC)},
|
||||
},
|
||||
|
||||
// Encode empty lists as zero-length slices.
|
||||
{
|
||||
"a: []",
|
||||
&struct{ A []int }{[]int{}},
|
||||
},
|
||||
|
||||
// UTF-16-LE
|
||||
{
|
||||
"\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n\x00",
|
||||
M{"ñoño": "very yes"},
|
||||
},
|
||||
// UTF-16-LE with surrogate.
|
||||
{
|
||||
"\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \x00=\xd8\xd4\xdf\n\x00",
|
||||
M{"ñoño": "very yes 🟔"},
|
||||
},
|
||||
|
||||
// UTF-16-BE
|
||||
{
|
||||
"\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n",
|
||||
M{"ñoño": "very yes"},
|
||||
},
|
||||
// UTF-16-BE with surrogate.
|
||||
{
|
||||
"\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \xd8=\xdf\xd4\x00\n",
|
||||
M{"ñoño": "very yes 🟔"},
|
||||
},
|
||||
|
||||
// YAML Float regex shouldn't match this
|
||||
{
|
||||
"a: 123456e1\n",
|
||||
M{"a": "123456e1"},
|
||||
}, {
|
||||
"a: 123456E1\n",
|
||||
M{"a": "123456E1"},
|
||||
},
|
||||
}
|
||||
|
||||
type M map[interface{}]interface{}
|
||||
|
||||
type inlineB struct {
|
||||
B int
|
||||
inlineC `yaml:",inline"`
|
||||
}
|
||||
|
||||
type inlineC struct {
|
||||
C int
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshal(c *C) {
|
||||
for _, item := range unmarshalTests {
|
||||
t := reflect.ValueOf(item.value).Type()
|
||||
var value interface{}
|
||||
switch t.Kind() {
|
||||
case reflect.Map:
|
||||
value = reflect.MakeMap(t).Interface()
|
||||
case reflect.String:
|
||||
value = reflect.New(t).Interface()
|
||||
case reflect.Ptr:
|
||||
value = reflect.New(t.Elem()).Interface()
|
||||
default:
|
||||
c.Fatalf("missing case for %s", t)
|
||||
}
|
||||
err := yaml.Unmarshal([]byte(item.data), value)
|
||||
if _, ok := err.(*yaml.TypeError); !ok {
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
if t.Kind() == reflect.String {
|
||||
c.Assert(*value.(*string), Equals, item.value)
|
||||
} else {
|
||||
c.Assert(value, DeepEquals, item.value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalNaN(c *C) {
|
||||
value := map[string]interface{}{}
|
||||
err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
|
||||
}
|
||||
|
||||
var unmarshalErrorTests = []struct {
|
||||
data, error string
|
||||
}{
|
||||
{"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"},
|
||||
{"v: [A,", "yaml: line 1: did not find expected node content"},
|
||||
{"v:\n- [A,", "yaml: line 2: did not find expected node content"},
|
||||
{"a: *b\n", "yaml: unknown anchor 'b' referenced"},
|
||||
{"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"},
|
||||
{"value: -", "yaml: block sequence entries are not allowed in this context"},
|
||||
{"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"},
|
||||
{"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`},
|
||||
{"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`},
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalErrors(c *C) {
|
||||
for _, item := range unmarshalErrorTests {
|
||||
var value interface{}
|
||||
err := yaml.Unmarshal([]byte(item.data), &value)
|
||||
c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
|
||||
}
|
||||
}
|
||||
|
||||
var unmarshalerTests = []struct {
|
||||
data, tag string
|
||||
value interface{}
|
||||
}{
|
||||
{"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}},
|
||||
{"_: [1,A]", "!!seq", []interface{}{1, "A"}},
|
||||
{"_: 10", "!!int", 10},
|
||||
{"_: null", "!!null", nil},
|
||||
{`_: BAR!`, "!!str", "BAR!"},
|
||||
{`_: "BAR!"`, "!!str", "BAR!"},
|
||||
{"_: !!foo 'BAR!'", "!!foo", "BAR!"},
|
||||
{`_: ""`, "!!str", ""},
|
||||
}
|
||||
|
||||
var unmarshalerResult = map[int]error{}
|
||||
|
||||
type unmarshalerType struct {
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error {
|
||||
if err := unmarshal(&o.value); err != nil {
|
||||
return err
|
||||
}
|
||||
if i, ok := o.value.(int); ok {
|
||||
if result, ok := unmarshalerResult[i]; ok {
|
||||
return result
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type unmarshalerPointer struct {
|
||||
Field *unmarshalerType "_"
|
||||
}
|
||||
|
||||
type unmarshalerValue struct {
|
||||
Field unmarshalerType "_"
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalerPointerField(c *C) {
|
||||
for _, item := range unmarshalerTests {
|
||||
obj := &unmarshalerPointer{}
|
||||
err := yaml.Unmarshal([]byte(item.data), obj)
|
||||
c.Assert(err, IsNil)
|
||||
if item.value == nil {
|
||||
c.Assert(obj.Field, IsNil)
|
||||
} else {
|
||||
c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
|
||||
c.Assert(obj.Field.value, DeepEquals, item.value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalerValueField(c *C) {
|
||||
for _, item := range unmarshalerTests {
|
||||
obj := &unmarshalerValue{}
|
||||
err := yaml.Unmarshal([]byte(item.data), obj)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
|
||||
c.Assert(obj.Field.value, DeepEquals, item.value)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalerWholeDocument(c *C) {
|
||||
obj := &unmarshalerType{}
|
||||
err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj)
|
||||
c.Assert(err, IsNil)
|
||||
value, ok := obj.value.(map[interface{}]interface{})
|
||||
c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value))
|
||||
c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value)
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalerTypeError(c *C) {
|
||||
unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}}
|
||||
unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}}
|
||||
defer func() {
|
||||
delete(unmarshalerResult, 2)
|
||||
delete(unmarshalerResult, 4)
|
||||
}()
|
||||
|
||||
type T struct {
|
||||
Before int
|
||||
After int
|
||||
M map[string]*unmarshalerType
|
||||
}
|
||||
var v T
|
||||
data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}`
|
||||
err := yaml.Unmarshal([]byte(data), &v)
|
||||
c.Assert(err, ErrorMatches, ""+
|
||||
"yaml: unmarshal errors:\n"+
|
||||
" line 1: cannot unmarshal !!str `A` into int\n"+
|
||||
" foo\n"+
|
||||
" bar\n"+
|
||||
" line 1: cannot unmarshal !!str `B` into int")
|
||||
c.Assert(v.M["abc"], NotNil)
|
||||
c.Assert(v.M["def"], IsNil)
|
||||
c.Assert(v.M["ghi"], NotNil)
|
||||
c.Assert(v.M["jkl"], IsNil)
|
||||
|
||||
c.Assert(v.M["abc"].value, Equals, 1)
|
||||
c.Assert(v.M["ghi"].value, Equals, 3)
|
||||
}
|
||||
|
||||
type proxyTypeError struct{}
|
||||
|
||||
func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var s string
|
||||
var a int32
|
||||
var b int64
|
||||
if err := unmarshal(&s); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if s == "a" {
|
||||
if err := unmarshal(&b); err == nil {
|
||||
panic("should have failed")
|
||||
}
|
||||
return unmarshal(&a)
|
||||
}
|
||||
if err := unmarshal(&a); err == nil {
|
||||
panic("should have failed")
|
||||
}
|
||||
return unmarshal(&b)
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalerTypeErrorProxying(c *C) {
|
||||
type T struct {
|
||||
Before int
|
||||
After int
|
||||
M map[string]*proxyTypeError
|
||||
}
|
||||
var v T
|
||||
data := `{before: A, m: {abc: a, def: b}, after: B}`
|
||||
err := yaml.Unmarshal([]byte(data), &v)
|
||||
c.Assert(err, ErrorMatches, ""+
|
||||
"yaml: unmarshal errors:\n"+
|
||||
" line 1: cannot unmarshal !!str `A` into int\n"+
|
||||
" line 1: cannot unmarshal !!str `a` into int32\n"+
|
||||
" line 1: cannot unmarshal !!str `b` into int64\n"+
|
||||
" line 1: cannot unmarshal !!str `B` into int")
|
||||
}
|
||||
|
||||
type failingUnmarshaler struct{}
|
||||
|
||||
var failingErr = errors.New("failingErr")
|
||||
|
||||
func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
return failingErr
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalerError(c *C) {
|
||||
err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{})
|
||||
c.Assert(err, Equals, failingErr)
|
||||
}
|
||||
|
||||
type sliceUnmarshaler []int
|
||||
|
||||
func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var slice []int
|
||||
err := unmarshal(&slice)
|
||||
if err == nil {
|
||||
*su = slice
|
||||
return nil
|
||||
}
|
||||
|
||||
var intVal int
|
||||
err = unmarshal(&intVal)
|
||||
if err == nil {
|
||||
*su = []int{intVal}
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalerRetry(c *C) {
|
||||
var su sliceUnmarshaler
|
||||
err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3}))
|
||||
|
||||
err = yaml.Unmarshal([]byte("1"), &su)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1}))
|
||||
}
|
||||
|
||||
// From http://yaml.org/type/merge.html
|
||||
var mergeTests = `
|
||||
anchors:
|
||||
list:
|
||||
- &CENTER { "x": 1, "y": 2 }
|
||||
- &LEFT { "x": 0, "y": 2 }
|
||||
- &BIG { "r": 10 }
|
||||
- &SMALL { "r": 1 }
|
||||
|
||||
# All the following maps are equal:
|
||||
|
||||
plain:
|
||||
# Explicit keys
|
||||
"x": 1
|
||||
"y": 2
|
||||
"r": 10
|
||||
label: center/big
|
||||
|
||||
mergeOne:
|
||||
# Merge one map
|
||||
<< : *CENTER
|
||||
"r": 10
|
||||
label: center/big
|
||||
|
||||
mergeMultiple:
|
||||
# Merge multiple maps
|
||||
<< : [ *CENTER, *BIG ]
|
||||
label: center/big
|
||||
|
||||
override:
|
||||
# Override
|
||||
<< : [ *BIG, *LEFT, *SMALL ]
|
||||
"x": 1
|
||||
label: center/big
|
||||
|
||||
shortTag:
|
||||
# Explicit short merge tag
|
||||
!!merge "<<" : [ *CENTER, *BIG ]
|
||||
label: center/big
|
||||
|
||||
longTag:
|
||||
# Explicit merge long tag
|
||||
!<tag:yaml.org,2002:merge> "<<" : [ *CENTER, *BIG ]
|
||||
label: center/big
|
||||
|
||||
inlineMap:
|
||||
# Inlined map
|
||||
<< : {"x": 1, "y": 2, "r": 10}
|
||||
label: center/big
|
||||
|
||||
inlineSequenceMap:
|
||||
# Inlined map in sequence
|
||||
<< : [ *CENTER, {"r": 10} ]
|
||||
label: center/big
|
||||
`
|
||||
|
||||
func (s *S) TestMerge(c *C) {
|
||||
var want = map[interface{}]interface{}{
|
||||
"x": 1,
|
||||
"y": 2,
|
||||
"r": 10,
|
||||
"label": "center/big",
|
||||
}
|
||||
|
||||
var m map[interface{}]interface{}
|
||||
err := yaml.Unmarshal([]byte(mergeTests), &m)
|
||||
c.Assert(err, IsNil)
|
||||
for name, test := range m {
|
||||
if name == "anchors" {
|
||||
continue
|
||||
}
|
||||
c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestMergeStruct(c *C) {
|
||||
type Data struct {
|
||||
X, Y, R int
|
||||
Label string
|
||||
}
|
||||
want := Data{1, 2, 10, "center/big"}
|
||||
|
||||
var m map[string]Data
|
||||
err := yaml.Unmarshal([]byte(mergeTests), &m)
|
||||
c.Assert(err, IsNil)
|
||||
for name, test := range m {
|
||||
if name == "anchors" {
|
||||
continue
|
||||
}
|
||||
c.Assert(test, Equals, want, Commentf("test %q failed", name))
|
||||
}
|
||||
}
|
||||
|
||||
var unmarshalNullTests = []func() interface{}{
|
||||
func() interface{} { var v interface{}; v = "v"; return &v },
|
||||
func() interface{} { var s = "s"; return &s },
|
||||
func() interface{} { var s = "s"; sptr := &s; return &sptr },
|
||||
func() interface{} { var i = 1; return &i },
|
||||
func() interface{} { var i = 1; iptr := &i; return &iptr },
|
||||
func() interface{} { m := map[string]int{"s": 1}; return &m },
|
||||
func() interface{} { m := map[string]int{"s": 1}; return m },
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalNull(c *C) {
|
||||
for _, test := range unmarshalNullTests {
|
||||
item := test()
|
||||
zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface()
|
||||
err := yaml.Unmarshal([]byte("null"), item)
|
||||
c.Assert(err, IsNil)
|
||||
if reflect.TypeOf(item).Kind() == reflect.Map {
|
||||
c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface())
|
||||
} else {
|
||||
c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalSliceOnPreset(c *C) {
|
||||
// Issue #48.
|
||||
v := struct{ A []int }{[]int{1}}
|
||||
yaml.Unmarshal([]byte("a: [2]"), &v)
|
||||
c.Assert(v.A, DeepEquals, []int{2})
|
||||
}
|
||||
|
||||
//var data []byte
|
||||
//func init() {
|
||||
// var err error
|
||||
// data, err = ioutil.ReadFile("/tmp/file.yaml")
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//func (s *S) BenchmarkUnmarshal(c *C) {
|
||||
// var err error
|
||||
// for i := 0; i < c.N; i++ {
|
||||
// var v map[string]interface{}
|
||||
// err = yaml.Unmarshal(data, &v)
|
||||
// }
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//func (s *S) BenchmarkMarshal(c *C) {
|
||||
// var v map[string]interface{}
|
||||
// yaml.Unmarshal(data, &v)
|
||||
// c.ResetTimer()
|
||||
// for i := 0; i < c.N; i++ {
|
||||
// yaml.Marshal(&v)
|
||||
// }
|
||||
//}
|
|
@ -1,501 +0,0 @@
|
|||
package yaml_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
"gopkg.in/yaml.v2"
|
||||
"net"
|
||||
"os"
|
||||
)
|
||||
|
||||
var marshalIntTest = 123
|
||||
|
||||
var marshalTests = []struct {
|
||||
value interface{}
|
||||
data string
|
||||
}{
|
||||
{
|
||||
nil,
|
||||
"null\n",
|
||||
}, {
|
||||
&struct{}{},
|
||||
"{}\n",
|
||||
}, {
|
||||
map[string]string{"v": "hi"},
|
||||
"v: hi\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": "hi"},
|
||||
"v: hi\n",
|
||||
}, {
|
||||
map[string]string{"v": "true"},
|
||||
"v: \"true\"\n",
|
||||
}, {
|
||||
map[string]string{"v": "false"},
|
||||
"v: \"false\"\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": true},
|
||||
"v: true\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": false},
|
||||
"v: false\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": 10},
|
||||
"v: 10\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": -10},
|
||||
"v: -10\n",
|
||||
}, {
|
||||
map[string]uint{"v": 42},
|
||||
"v: 42\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": int64(4294967296)},
|
||||
"v: 4294967296\n",
|
||||
}, {
|
||||
map[string]int64{"v": int64(4294967296)},
|
||||
"v: 4294967296\n",
|
||||
}, {
|
||||
map[string]uint64{"v": 4294967296},
|
||||
"v: 4294967296\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": "10"},
|
||||
"v: \"10\"\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": 0.1},
|
||||
"v: 0.1\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": float64(0.1)},
|
||||
"v: 0.1\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": -0.1},
|
||||
"v: -0.1\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": math.Inf(+1)},
|
||||
"v: .inf\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": math.Inf(-1)},
|
||||
"v: -.inf\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": math.NaN()},
|
||||
"v: .nan\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": nil},
|
||||
"v: null\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": ""},
|
||||
"v: \"\"\n",
|
||||
}, {
|
||||
map[string][]string{"v": []string{"A", "B"}},
|
||||
"v:\n- A\n- B\n",
|
||||
}, {
|
||||
map[string][]string{"v": []string{"A", "B\nC"}},
|
||||
"v:\n- A\n- |-\n B\n C\n",
|
||||
}, {
|
||||
map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
|
||||
"v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
|
||||
}, {
|
||||
map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
|
||||
"a:\n b: c\n",
|
||||
}, {
|
||||
map[string]interface{}{"a": "-"},
|
||||
"a: '-'\n",
|
||||
},
|
||||
|
||||
// Simple values.
|
||||
{
|
||||
&marshalIntTest,
|
||||
"123\n",
|
||||
},
|
||||
|
||||
// Structures
|
||||
{
|
||||
&struct{ Hello string }{"world"},
|
||||
"hello: world\n",
|
||||
}, {
|
||||
&struct {
|
||||
A struct {
|
||||
B string
|
||||
}
|
||||
}{struct{ B string }{"c"}},
|
||||
"a:\n b: c\n",
|
||||
}, {
|
||||
&struct {
|
||||
A *struct {
|
||||
B string
|
||||
}
|
||||
}{&struct{ B string }{"c"}},
|
||||
"a:\n b: c\n",
|
||||
}, {
|
||||
&struct {
|
||||
A *struct {
|
||||
B string
|
||||
}
|
||||
}{},
|
||||
"a: null\n",
|
||||
}, {
|
||||
&struct{ A int }{1},
|
||||
"a: 1\n",
|
||||
}, {
|
||||
&struct{ A []int }{[]int{1, 2}},
|
||||
"a:\n- 1\n- 2\n",
|
||||
}, {
|
||||
&struct {
|
||||
B int "a"
|
||||
}{1},
|
||||
"a: 1\n",
|
||||
}, {
|
||||
&struct{ A bool }{true},
|
||||
"a: true\n",
|
||||
},
|
||||
|
||||
// Conditional flag
|
||||
{
|
||||
&struct {
|
||||
A int "a,omitempty"
|
||||
B int "b,omitempty"
|
||||
}{1, 0},
|
||||
"a: 1\n",
|
||||
}, {
|
||||
&struct {
|
||||
A int "a,omitempty"
|
||||
B int "b,omitempty"
|
||||
}{0, 0},
|
||||
"{}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A *struct{ X, y int } "a,omitempty,flow"
|
||||
}{&struct{ X, y int }{1, 2}},
|
||||
"a: {x: 1}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A *struct{ X, y int } "a,omitempty,flow"
|
||||
}{nil},
|
||||
"{}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A *struct{ X, y int } "a,omitempty,flow"
|
||||
}{&struct{ X, y int }{}},
|
||||
"a: {x: 0}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A struct{ X, y int } "a,omitempty,flow"
|
||||
}{struct{ X, y int }{1, 2}},
|
||||
"a: {x: 1}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A struct{ X, y int } "a,omitempty,flow"
|
||||
}{struct{ X, y int }{0, 1}},
|
||||
"{}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A float64 "a,omitempty"
|
||||
B float64 "b,omitempty"
|
||||
}{1, 0},
|
||||
"a: 1\n",
|
||||
},
|
||||
|
||||
// Flow flag
|
||||
{
|
||||
&struct {
|
||||
A []int "a,flow"
|
||||
}{[]int{1, 2}},
|
||||
"a: [1, 2]\n",
|
||||
}, {
|
||||
&struct {
|
||||
A map[string]string "a,flow"
|
||||
}{map[string]string{"b": "c", "d": "e"}},
|
||||
"a: {b: c, d: e}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A struct {
|
||||
B, D string
|
||||
} "a,flow"
|
||||
}{struct{ B, D string }{"c", "e"}},
|
||||
"a: {b: c, d: e}\n",
|
||||
},
|
||||
|
||||
// Unexported field
|
||||
{
|
||||
&struct {
|
||||
u int
|
||||
A int
|
||||
}{0, 1},
|
||||
"a: 1\n",
|
||||
},
|
||||
|
||||
// Ignored field
|
||||
{
|
||||
&struct {
|
||||
A int
|
||||
B int "-"
|
||||
}{1, 2},
|
||||
"a: 1\n",
|
||||
},
|
||||
|
||||
// Struct inlining
|
||||
{
|
||||
&struct {
|
||||
A int
|
||||
C inlineB `yaml:",inline"`
|
||||
}{1, inlineB{2, inlineC{3}}},
|
||||
"a: 1\nb: 2\nc: 3\n",
|
||||
},
|
||||
|
||||
// Map inlining
|
||||
{
|
||||
&struct {
|
||||
A int
|
||||
C map[string]int `yaml:",inline"`
|
||||
}{1, map[string]int{"b": 2, "c": 3}},
|
||||
"a: 1\nb: 2\nc: 3\n",
|
||||
},
|
||||
|
||||
// Duration
|
||||
{
|
||||
map[string]time.Duration{"a": 3 * time.Second},
|
||||
"a: 3s\n",
|
||||
},
|
||||
|
||||
// Issue #24: bug in map merging logic.
|
||||
{
|
||||
map[string]string{"a": "<foo>"},
|
||||
"a: <foo>\n",
|
||||
},
|
||||
|
||||
// Issue #34: marshal unsupported base 60 floats quoted for compatibility
|
||||
// with old YAML 1.1 parsers.
|
||||
{
|
||||
map[string]string{"a": "1:1"},
|
||||
"a: \"1:1\"\n",
|
||||
},
|
||||
|
||||
// Binary data.
|
||||
{
|
||||
map[string]string{"a": "\x00"},
|
||||
"a: \"\\0\"\n",
|
||||
}, {
|
||||
map[string]string{"a": "\x80\x81\x82"},
|
||||
"a: !!binary gIGC\n",
|
||||
}, {
|
||||
map[string]string{"a": strings.Repeat("\x90", 54)},
|
||||
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
|
||||
},
|
||||
|
||||
// Ordered maps.
|
||||
{
|
||||
&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
|
||||
"b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n",
|
||||
},
|
||||
|
||||
// Encode unicode as utf-8 rather than in escaped form.
|
||||
{
|
||||
map[string]string{"a": "你好"},
|
||||
"a: 你好\n",
|
||||
},
|
||||
|
||||
// Support encoding.TextMarshaler.
|
||||
{
|
||||
map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
|
||||
"a: 1.2.3.4\n",
|
||||
},
|
||||
{
|
||||
map[string]time.Time{"a": time.Unix(1424801979, 0)},
|
||||
"a: 2015-02-24T18:19:39Z\n",
|
||||
},
|
||||
|
||||
// Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible).
|
||||
{
|
||||
map[string]string{"a": "b: c"},
|
||||
"a: 'b: c'\n",
|
||||
},
|
||||
|
||||
// Containing hash mark ('#') in string should be quoted
|
||||
{
|
||||
map[string]string{"a": "Hello #comment"},
|
||||
"a: 'Hello #comment'\n",
|
||||
},
|
||||
{
|
||||
map[string]string{"a": "你好 #comment"},
|
||||
"a: '你好 #comment'\n",
|
||||
},
|
||||
}
|
||||
|
||||
func (s *S) TestMarshal(c *C) {
|
||||
defer os.Setenv("TZ", os.Getenv("TZ"))
|
||||
os.Setenv("TZ", "UTC")
|
||||
for _, item := range marshalTests {
|
||||
data, err := yaml.Marshal(item.value)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(data), Equals, item.data)
|
||||
}
|
||||
}
|
||||
|
||||
var marshalErrorTests = []struct {
|
||||
value interface{}
|
||||
error string
|
||||
panic string
|
||||
}{{
|
||||
value: &struct {
|
||||
B int
|
||||
inlineB ",inline"
|
||||
}{1, inlineB{2, inlineC{3}}},
|
||||
panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
|
||||
}, {
|
||||
value: &struct {
|
||||
A int
|
||||
B map[string]int ",inline"
|
||||
}{1, map[string]int{"a": 2}},
|
||||
panic: `Can't have key "a" in inlined map; conflicts with struct field`,
|
||||
}}
|
||||
|
||||
func (s *S) TestMarshalErrors(c *C) {
|
||||
for _, item := range marshalErrorTests {
|
||||
if item.panic != "" {
|
||||
c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
|
||||
} else {
|
||||
_, err := yaml.Marshal(item.value)
|
||||
c.Assert(err, ErrorMatches, item.error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestMarshalTypeCache(c *C) {
|
||||
var data []byte
|
||||
var err error
|
||||
func() {
|
||||
type T struct{ A int }
|
||||
data, err = yaml.Marshal(&T{})
|
||||
c.Assert(err, IsNil)
|
||||
}()
|
||||
func() {
|
||||
type T struct{ B int }
|
||||
data, err = yaml.Marshal(&T{})
|
||||
c.Assert(err, IsNil)
|
||||
}()
|
||||
c.Assert(string(data), Equals, "b: 0\n")
|
||||
}
|
||||
|
||||
var marshalerTests = []struct {
|
||||
data string
|
||||
value interface{}
|
||||
}{
|
||||
{"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}},
|
||||
{"_:\n- 1\n- A\n", []interface{}{1, "A"}},
|
||||
{"_: 10\n", 10},
|
||||
{"_: null\n", nil},
|
||||
{"_: BAR!\n", "BAR!"},
|
||||
}
|
||||
|
||||
type marshalerType struct {
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func (o marshalerType) MarshalText() ([]byte, error) {
|
||||
panic("MarshalText called on type with MarshalYAML")
|
||||
}
|
||||
|
||||
func (o marshalerType) MarshalYAML() (interface{}, error) {
|
||||
return o.value, nil
|
||||
}
|
||||
|
||||
type marshalerValue struct {
|
||||
Field marshalerType "_"
|
||||
}
|
||||
|
||||
func (s *S) TestMarshaler(c *C) {
|
||||
for _, item := range marshalerTests {
|
||||
obj := &marshalerValue{}
|
||||
obj.Field.value = item.value
|
||||
data, err := yaml.Marshal(obj)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(data), Equals, string(item.data))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestMarshalerWholeDocument(c *C) {
|
||||
obj := &marshalerType{}
|
||||
obj.value = map[string]string{"hello": "world!"}
|
||||
data, err := yaml.Marshal(obj)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(data), Equals, "hello: world!\n")
|
||||
}
|
||||
|
||||
type failingMarshaler struct{}
|
||||
|
||||
func (ft *failingMarshaler) MarshalYAML() (interface{}, error) {
|
||||
return nil, failingErr
|
||||
}
|
||||
|
||||
func (s *S) TestMarshalerError(c *C) {
|
||||
_, err := yaml.Marshal(&failingMarshaler{})
|
||||
c.Assert(err, Equals, failingErr)
|
||||
}
|
||||
|
||||
func (s *S) TestSortedOutput(c *C) {
|
||||
order := []interface{}{
|
||||
false,
|
||||
true,
|
||||
1,
|
||||
uint(1),
|
||||
1.0,
|
||||
1.1,
|
||||
1.2,
|
||||
2,
|
||||
uint(2),
|
||||
2.0,
|
||||
2.1,
|
||||
"",
|
||||
".1",
|
||||
".2",
|
||||
".a",
|
||||
"1",
|
||||
"2",
|
||||
"a!10",
|
||||
"a/2",
|
||||
"a/10",
|
||||
"a~10",
|
||||
"ab/1",
|
||||
"b/1",
|
||||
"b/01",
|
||||
"b/2",
|
||||
"b/02",
|
||||
"b/3",
|
||||
"b/03",
|
||||
"b1",
|
||||
"b01",
|
||||
"b3",
|
||||
"c2.10",
|
||||
"c10.2",
|
||||
"d1",
|
||||
"d12",
|
||||
"d12a",
|
||||
}
|
||||
m := make(map[interface{}]int)
|
||||
for _, k := range order {
|
||||
m[k] = 1
|
||||
}
|
||||
data, err := yaml.Marshal(m)
|
||||
c.Assert(err, IsNil)
|
||||
out := "\n" + string(data)
|
||||
last := 0
|
||||
for i, k := range order {
|
||||
repr := fmt.Sprint(k)
|
||||
if s, ok := k.(string); ok {
|
||||
if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
|
||||
repr = `"` + repr + `"`
|
||||
}
|
||||
}
|
||||
index := strings.Index(out, "\n"+repr+":")
|
||||
if index == -1 {
|
||||
c.Fatalf("%#v is not in the output: %#v", k, out)
|
||||
}
|
||||
if index < last {
|
||||
c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
|
||||
}
|
||||
last = index
|
||||
}
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
package yaml_test
|
||||
|
||||
import (
|
||||
. "gopkg.in/check.v1"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type S struct{}
|
||||
|
||||
var _ = Suite(&S{})
|
|
@ -1,16 +0,0 @@
|
|||
.DS_Store
|
||||
*.[568ao]
|
||||
*.ao
|
||||
*.so
|
||||
*.pyc
|
||||
._*
|
||||
.nfs.*
|
||||
[568a].out
|
||||
*~
|
||||
*.orig
|
||||
core
|
||||
_obj
|
||||
_test
|
||||
_testmain.go
|
||||
protoc-gen-go/testdata/multi/*.pb.go
|
||||
_conformance/_conformance
|
|
@ -1,18 +0,0 @@
|
|||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
|
||||
install:
|
||||
- go get -v -d -t github.com/golang/protobuf/...
|
||||
- curl -L https://github.com/google/protobuf/releases/download/v3.3.0/protoc-3.3.0-linux-x86_64.zip -o /tmp/protoc.zip
|
||||
- unzip /tmp/protoc.zip -d $HOME/protoc
|
||||
|
||||
env:
|
||||
- PATH=$HOME/protoc/bin:$PATH
|
||||
|
||||
script:
|
||||
- make all test
|
|
@ -1,40 +0,0 @@
|
|||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# https://github.com/golang/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
# Includable Makefile to add a rule for generating .pb.go files from .proto files
|
||||
# (Google protocol buffer descriptions).
|
||||
# Typical use if myproto.proto is a file in package mypackage in this directory:
|
||||
#
|
||||
# include $(GOROOT)/src/pkg/github.com/golang/protobuf/Make.protobuf
|
||||
|
||||
%.pb.go: %.proto
|
||||
protoc --go_out=. $<
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# https://github.com/golang/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
all: install
|
||||
|
||||
install:
|
||||
go install ./proto ./jsonpb ./ptypes
|
||||
go install ./protoc-gen-go
|
||||
|
||||
test:
|
||||
go test ./proto ./jsonpb ./ptypes
|
||||
make -C protoc-gen-go/testdata test
|
||||
|
||||
clean:
|
||||
go clean ./...
|
||||
|
||||
nuke:
|
||||
go clean -i ./...
|
||||
|
||||
regenerate:
|
||||
make -C protoc-gen-go/descriptor regenerate
|
||||
make -C protoc-gen-go/plugin regenerate
|
||||
make -C protoc-gen-go/testdata regenerate
|
||||
make -C proto/testdata regenerate
|
||||
make -C jsonpb/jsonpb_test_proto regenerate
|
||||
make -C _conformance regenerate
|
|
@ -1,242 +0,0 @@
|
|||
# Go support for Protocol Buffers
|
||||
|
||||
[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf)
|
||||
|
||||
Google's data interchange format.
|
||||
Copyright 2010 The Go Authors.
|
||||
https://github.com/golang/protobuf
|
||||
|
||||
This package and the code it generates requires at least Go 1.4.
|
||||
|
||||
This software implements Go bindings for protocol buffers. For
|
||||
information about protocol buffers themselves, see
|
||||
https://developers.google.com/protocol-buffers/
|
||||
|
||||
## Installation ##
|
||||
|
||||
To use this software, you must:
|
||||
- Install the standard C++ implementation of protocol buffers from
|
||||
https://developers.google.com/protocol-buffers/
|
||||
- Of course, install the Go compiler and tools from
|
||||
https://golang.org/
|
||||
See
|
||||
https://golang.org/doc/install
|
||||
for details or, if you are using gccgo, follow the instructions at
|
||||
https://golang.org/doc/install/gccgo
|
||||
- Grab the code from the repository and install the proto package.
|
||||
The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`.
|
||||
The compiler plugin, protoc-gen-go, will be installed in $GOBIN,
|
||||
defaulting to $GOPATH/bin. It must be in your $PATH for the protocol
|
||||
compiler, protoc, to find it.
|
||||
|
||||
This software has two parts: a 'protocol compiler plugin' that
|
||||
generates Go source files that, once compiled, can access and manage
|
||||
protocol buffers; and a library that implements run-time support for
|
||||
encoding (marshaling), decoding (unmarshaling), and accessing protocol
|
||||
buffers.
|
||||
|
||||
There is support for gRPC in Go using protocol buffers.
|
||||
See the note at the bottom of this file for details.
|
||||
|
||||
There are no insertion points in the plugin.
|
||||
|
||||
|
||||
## Using protocol buffers with Go ##
|
||||
|
||||
Once the software is installed, there are two steps to using it.
|
||||
First you must compile the protocol buffer definitions and then import
|
||||
them, with the support library, into your program.
|
||||
|
||||
To compile the protocol buffer definition, run protoc with the --go_out
|
||||
parameter set to the directory you want to output the Go code to.
|
||||
|
||||
protoc --go_out=. *.proto
|
||||
|
||||
The generated files will be suffixed .pb.go. See the Test code below
|
||||
for an example using such a file.
|
||||
|
||||
|
||||
The package comment for the proto library contains text describing
|
||||
the interface provided in Go for protocol buffers. Here is an edited
|
||||
version.
|
||||
|
||||
==========
|
||||
|
||||
The proto package converts data structures to and from the
|
||||
wire format of protocol buffers. It works in concert with the
|
||||
Go source code generated for .proto files by the protocol compiler.
|
||||
|
||||
A summary of the properties of the protocol buffer interface
|
||||
for a protocol buffer variable v:
|
||||
|
||||
- Names are turned from camel_case to CamelCase for export.
|
||||
- There are no methods on v to set fields; just treat
|
||||
them as structure fields.
|
||||
- There are getters that return a field's value if set,
|
||||
and return the field's default value if unset.
|
||||
The getters work even if the receiver is a nil message.
|
||||
- The zero value for a struct is its correct initialization state.
|
||||
All desired fields must be set before marshaling.
|
||||
- A Reset() method will restore a protobuf struct to its zero state.
|
||||
- Non-repeated fields are pointers to the values; nil means unset.
|
||||
That is, optional or required field int32 f becomes F *int32.
|
||||
- Repeated fields are slices.
|
||||
- Helper functions are available to aid the setting of fields.
|
||||
Helpers for getting values are superseded by the
|
||||
GetFoo methods and their use is deprecated.
|
||||
msg.Foo = proto.String("hello") // set field
|
||||
- Constants are defined to hold the default values of all fields that
|
||||
have them. They have the form Default_StructName_FieldName.
|
||||
Because the getter methods handle defaulted values,
|
||||
direct use of these constants should be rare.
|
||||
- Enums are given type names and maps from names to values.
|
||||
Enum values are prefixed with the enum's type name. Enum types have
|
||||
a String method, and a Enum method to assist in message construction.
|
||||
- Nested groups and enums have type names prefixed with the name of
|
||||
the surrounding message type.
|
||||
- Extensions are given descriptor names that start with E_,
|
||||
followed by an underscore-delimited list of the nested messages
|
||||
that contain it (if any) followed by the CamelCased name of the
|
||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||
and SetExtension are functions for manipulating extensions.
|
||||
- Oneof field sets are given a single field in their message,
|
||||
with distinguished wrapper types for each possible field value.
|
||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
Consider file test.proto, containing
|
||||
|
||||
```proto
|
||||
package example;
|
||||
|
||||
enum FOO { X = 17; };
|
||||
|
||||
message Test {
|
||||
required string label = 1;
|
||||
optional int32 type = 2 [default=77];
|
||||
repeated int64 reps = 3;
|
||||
optional group OptionalGroup = 4 {
|
||||
required string RequiredField = 5;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
To create and play with a Test object from the example package,
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"path/to/example"
|
||||
)
|
||||
|
||||
func main() {
|
||||
test := &example.Test {
|
||||
Label: proto.String("hello"),
|
||||
Type: proto.Int32(17),
|
||||
Reps: []int64{1, 2, 3},
|
||||
Optionalgroup: &example.Test_OptionalGroup {
|
||||
RequiredField: proto.String("good bye"),
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(test)
|
||||
if err != nil {
|
||||
log.Fatal("marshaling error: ", err)
|
||||
}
|
||||
newTest := &example.Test{}
|
||||
err = proto.Unmarshal(data, newTest)
|
||||
if err != nil {
|
||||
log.Fatal("unmarshaling error: ", err)
|
||||
}
|
||||
// Now test and newTest contain the same data.
|
||||
if test.GetLabel() != newTest.GetLabel() {
|
||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||
}
|
||||
// etc.
|
||||
}
|
||||
```
|
||||
|
||||
## Parameters ##
|
||||
|
||||
To pass extra parameters to the plugin, use a comma-separated
|
||||
parameter list separated from the output directory by a colon:
|
||||
|
||||
|
||||
protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto
|
||||
|
||||
|
||||
- `import_prefix=xxx` - a prefix that is added onto the beginning of
|
||||
all imports. Useful for things like generating protos in a
|
||||
subdirectory, or regenerating vendored protobufs in-place.
|
||||
- `import_path=foo/bar` - used as the package if no input files
|
||||
declare `go_package`. If it contains slashes, everything up to the
|
||||
rightmost slash is ignored.
|
||||
- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
|
||||
load. The only plugin in this repo is `grpc`.
|
||||
- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
|
||||
associated with Go package quux/shme. This is subject to the
|
||||
import_prefix parameter.
|
||||
|
||||
## gRPC Support ##
|
||||
|
||||
If a proto file specifies RPC services, protoc-gen-go can be instructed to
|
||||
generate code compatible with gRPC (http://www.grpc.io/). To do this, pass
|
||||
the `plugins` parameter to protoc-gen-go; the usual way is to insert it into
|
||||
the --go_out argument to protoc:
|
||||
|
||||
protoc --go_out=plugins=grpc:. *.proto
|
||||
|
||||
## Compatibility ##
|
||||
|
||||
The library and the generated code are expected to be stable over time.
|
||||
However, we reserve the right to make breaking changes without notice for the
|
||||
following reasons:
|
||||
|
||||
- Security. A security issue in the specification or implementation may come to
|
||||
light whose resolution requires breaking compatibility. We reserve the right
|
||||
to address such security issues.
|
||||
- Unspecified behavior. There are some aspects of the Protocol Buffers
|
||||
specification that are undefined. Programs that depend on such unspecified
|
||||
behavior may break in future releases.
|
||||
- Specification errors or changes. If it becomes necessary to address an
|
||||
inconsistency, incompleteness, or change in the Protocol Buffers
|
||||
specification, resolving the issue could affect the meaning or legality of
|
||||
existing programs. We reserve the right to address such issues, including
|
||||
updating the implementations.
|
||||
- Bugs. If the library has a bug that violates the specification, a program
|
||||
that depends on the buggy behavior may break if the bug is fixed. We reserve
|
||||
the right to fix such bugs.
|
||||
- Adding methods or fields to generated structs. These may conflict with field
|
||||
names that already exist in a schema, causing applications to break. When the
|
||||
code generator encounters a field in the schema that would collide with a
|
||||
generated field or method name, the code generator will append an underscore
|
||||
to the generated field or method name.
|
||||
- Adding, removing, or changing methods or fields in generated structs that
|
||||
start with `XXX`. These parts of the generated code are exported out of
|
||||
necessity, but should not be considered part of the public API.
|
||||
- Adding, removing, or changing unexported symbols in generated code.
|
||||
|
||||
Any breaking changes outside of these will be announced 6 months in advance to
|
||||
protobuf@googlegroups.com.
|
||||
|
||||
You should, whenever possible, use generated code created by the `protoc-gen-go`
|
||||
tool built at the same commit as the `proto` package. The `proto` package
|
||||
declares package-level constants in the form `ProtoPackageIsVersionX`.
|
||||
Application code and generated code may depend on one of these constants to
|
||||
ensure that compilation will fail if the available version of the proto library
|
||||
is too old. Whenever we make a change to the generated code that requires newer
|
||||
library support, in the same commit we will increment the version number of the
|
||||
generated code and declare a new package-level constant whose name incorporates
|
||||
the latest version number. Removing a compatibility constant is considered a
|
||||
breaking change and would be subject to the announcement policy stated above.
|
||||
|
||||
The `protoc-gen-go/generator` package exposes a plugin interface,
|
||||
which is used by the gRPC code generation. This interface is not
|
||||
supported and is subject to incompatible changes without notice.
|
|
@ -1,43 +0,0 @@
|
|||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# https://github.com/golang/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
install:
|
||||
go install
|
||||
|
||||
test: install generate-test-pbs
|
||||
go test
|
||||
|
||||
|
||||
generate-test-pbs:
|
||||
make install
|
||||
make -C testdata
|
||||
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
|
||||
make
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,300 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||
testpb "github.com/golang/protobuf/proto/testdata"
|
||||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
)
|
||||
|
||||
var (
|
||||
expandedMarshaler = proto.TextMarshaler{ExpandAny: true}
|
||||
expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true}
|
||||
)
|
||||
|
||||
// anyEqual reports whether two messages which may be google.protobuf.Any or may
|
||||
// contain google.protobuf.Any fields are equal. We can't use proto.Equal for
|
||||
// comparison, because semantically equivalent messages may be marshaled to
|
||||
// binary in different tag order. Instead, trust that TextMarshaler with
|
||||
// ExpandAny option works and compare the text marshaling results.
|
||||
func anyEqual(got, want proto.Message) bool {
|
||||
// if messages are proto.Equal, no need to marshal.
|
||||
if proto.Equal(got, want) {
|
||||
return true
|
||||
}
|
||||
g := expandedMarshaler.Text(got)
|
||||
w := expandedMarshaler.Text(want)
|
||||
return g == w
|
||||
}
|
||||
|
||||
type golden struct {
|
||||
m proto.Message
|
||||
t, c string
|
||||
}
|
||||
|
||||
var goldenMessages = makeGolden()
|
||||
|
||||
func makeGolden() []golden {
|
||||
nested := &pb.Nested{Bunny: "Monty"}
|
||||
nb, err := proto.Marshal(nested)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
m1 := &pb.Message{
|
||||
Name: "David",
|
||||
ResultCount: 47,
|
||||
Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb},
|
||||
}
|
||||
m2 := &pb.Message{
|
||||
Name: "David",
|
||||
ResultCount: 47,
|
||||
Anything: &anypb.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb},
|
||||
}
|
||||
m3 := &pb.Message{
|
||||
Name: "David",
|
||||
ResultCount: 47,
|
||||
Anything: &anypb.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb},
|
||||
}
|
||||
m4 := &pb.Message{
|
||||
Name: "David",
|
||||
ResultCount: 47,
|
||||
Anything: &anypb.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb},
|
||||
}
|
||||
m5 := &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}
|
||||
|
||||
any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")}
|
||||
proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")})
|
||||
proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar"))
|
||||
any1b, err := proto.Marshal(any1)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}}
|
||||
proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")})
|
||||
any2b, err := proto.Marshal(any2)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
m6 := &pb.Message{
|
||||
Name: "David",
|
||||
ResultCount: 47,
|
||||
Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b},
|
||||
ManyThings: []*anypb.Any{
|
||||
&anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b},
|
||||
&anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b},
|
||||
},
|
||||
}
|
||||
|
||||
const (
|
||||
m1Golden = `
|
||||
name: "David"
|
||||
result_count: 47
|
||||
anything: <
|
||||
[type.googleapis.com/proto3_proto.Nested]: <
|
||||
bunny: "Monty"
|
||||
>
|
||||
>
|
||||
`
|
||||
m2Golden = `
|
||||
name: "David"
|
||||
result_count: 47
|
||||
anything: <
|
||||
["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: <
|
||||
bunny: "Monty"
|
||||
>
|
||||
>
|
||||
`
|
||||
m3Golden = `
|
||||
name: "David"
|
||||
result_count: 47
|
||||
anything: <
|
||||
["type.googleapis.com/\"/proto3_proto.Nested"]: <
|
||||
bunny: "Monty"
|
||||
>
|
||||
>
|
||||
`
|
||||
m4Golden = `
|
||||
name: "David"
|
||||
result_count: 47
|
||||
anything: <
|
||||
[type.googleapis.com/a/path/proto3_proto.Nested]: <
|
||||
bunny: "Monty"
|
||||
>
|
||||
>
|
||||
`
|
||||
m5Golden = `
|
||||
[type.googleapis.com/proto3_proto.Nested]: <
|
||||
bunny: "Monty"
|
||||
>
|
||||
`
|
||||
m6Golden = `
|
||||
name: "David"
|
||||
result_count: 47
|
||||
anything: <
|
||||
[type.googleapis.com/testdata.MyMessage]: <
|
||||
count: 47
|
||||
name: "David"
|
||||
[testdata.Ext.more]: <
|
||||
data: "foo"
|
||||
>
|
||||
[testdata.Ext.text]: "bar"
|
||||
>
|
||||
>
|
||||
many_things: <
|
||||
[type.googleapis.com/testdata.MyMessage]: <
|
||||
count: 42
|
||||
bikeshed: GREEN
|
||||
rep_bytes: "roboto"
|
||||
[testdata.Ext.more]: <
|
||||
data: "baz"
|
||||
>
|
||||
>
|
||||
>
|
||||
many_things: <
|
||||
[type.googleapis.com/testdata.MyMessage]: <
|
||||
count: 47
|
||||
name: "David"
|
||||
[testdata.Ext.more]: <
|
||||
data: "foo"
|
||||
>
|
||||
[testdata.Ext.text]: "bar"
|
||||
>
|
||||
>
|
||||
`
|
||||
)
|
||||
return []golden{
|
||||
{m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "},
|
||||
{m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "},
|
||||
{m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "},
|
||||
{m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "},
|
||||
{m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "},
|
||||
{m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "},
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalGolden(t *testing.T) {
|
||||
for _, tt := range goldenMessages {
|
||||
if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want {
|
||||
t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want)
|
||||
}
|
||||
if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want {
|
||||
t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalGolden(t *testing.T) {
|
||||
for _, tt := range goldenMessages {
|
||||
want := tt.m
|
||||
got := proto.Clone(tt.m)
|
||||
got.Reset()
|
||||
if err := proto.UnmarshalText(tt.t, got); err != nil {
|
||||
t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err)
|
||||
}
|
||||
if !anyEqual(got, want) {
|
||||
t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want)
|
||||
}
|
||||
got.Reset()
|
||||
if err := proto.UnmarshalText(tt.c, got); err != nil {
|
||||
t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err)
|
||||
}
|
||||
if !anyEqual(got, want) {
|
||||
t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnknownAny(t *testing.T) {
|
||||
m := &pb.Message{
|
||||
Anything: &anypb.Any{
|
||||
TypeUrl: "foo",
|
||||
Value: []byte("bar"),
|
||||
},
|
||||
}
|
||||
want := `anything: <
|
||||
type_url: "foo"
|
||||
value: "bar"
|
||||
>
|
||||
`
|
||||
got := expandedMarshaler.Text(m)
|
||||
if got != want {
|
||||
t.Errorf("got\n`%s`\nwant\n`%s`", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmbiguousAny(t *testing.T) {
|
||||
pb := &anypb.Any{}
|
||||
err := proto.UnmarshalText(`
|
||||
type_url: "ttt/proto3_proto.Nested"
|
||||
value: "\n\x05Monty"
|
||||
`, pb)
|
||||
t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err)
|
||||
if err != nil {
|
||||
t.Errorf("failed to parse ambiguous Any message: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalOverwriteAny(t *testing.T) {
|
||||
pb := &anypb.Any{}
|
||||
err := proto.UnmarshalText(`
|
||||
[type.googleapis.com/a/path/proto3_proto.Nested]: <
|
||||
bunny: "Monty"
|
||||
>
|
||||
[type.googleapis.com/a/path/proto3_proto.Nested]: <
|
||||
bunny: "Rabbit of Caerbannog"
|
||||
>
|
||||
`, pb)
|
||||
want := `line 7: Any message unpacked multiple times, or "type_url" already set`
|
||||
if err.Error() != want {
|
||||
t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalAnyMixAndMatch(t *testing.T) {
|
||||
pb := &anypb.Any{}
|
||||
err := proto.UnmarshalText(`
|
||||
value: "\n\x05Monty"
|
||||
[type.googleapis.com/a/path/proto3_proto.Nested]: <
|
||||
bunny: "Rabbit of Caerbannog"
|
||||
>
|
||||
`, pb)
|
||||
want := `line 5: Any message unpacked multiple times, or "value" already set`
|
||||
if err.Error() != want {
|
||||
t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want)
|
||||
}
|
||||
}
|
|
@ -1,300 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||
pb "github.com/golang/protobuf/proto/testdata"
|
||||
)
|
||||
|
||||
var cloneTestMessage = &pb.MyMessage{
|
||||
Count: proto.Int32(42),
|
||||
Name: proto.String("Dave"),
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("niles"),
|
||||
Port: proto.Int32(9099),
|
||||
Connected: proto.Bool(true),
|
||||
},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Value: []byte("some bytes"),
|
||||
},
|
||||
},
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: proto.Int32(6),
|
||||
},
|
||||
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
|
||||
}
|
||||
|
||||
func init() {
|
||||
ext := &pb.Ext{
|
||||
Data: proto.String("extension"),
|
||||
}
|
||||
if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil {
|
||||
panic("SetExtension: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestClone(t *testing.T) {
|
||||
m := proto.Clone(cloneTestMessage).(*pb.MyMessage)
|
||||
if !proto.Equal(m, cloneTestMessage) {
|
||||
t.Errorf("Clone(%v) = %v", cloneTestMessage, m)
|
||||
}
|
||||
|
||||
// Verify it was a deep copy.
|
||||
*m.Inner.Port++
|
||||
if proto.Equal(m, cloneTestMessage) {
|
||||
t.Error("Mutating clone changed the original")
|
||||
}
|
||||
// Byte fields and repeated fields should be copied.
|
||||
if &m.Pet[0] == &cloneTestMessage.Pet[0] {
|
||||
t.Error("Pet: repeated field not copied")
|
||||
}
|
||||
if &m.Others[0] == &cloneTestMessage.Others[0] {
|
||||
t.Error("Others: repeated field not copied")
|
||||
}
|
||||
if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] {
|
||||
t.Error("Others[0].Value: bytes field not copied")
|
||||
}
|
||||
if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] {
|
||||
t.Error("RepBytes: repeated field not copied")
|
||||
}
|
||||
if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] {
|
||||
t.Error("RepBytes[0]: bytes field not copied")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloneNil(t *testing.T) {
|
||||
var m *pb.MyMessage
|
||||
if c := proto.Clone(m); !proto.Equal(m, c) {
|
||||
t.Errorf("Clone(%v) = %v", m, c)
|
||||
}
|
||||
}
|
||||
|
||||
var mergeTests = []struct {
|
||||
src, dst, want proto.Message
|
||||
}{
|
||||
{
|
||||
src: &pb.MyMessage{
|
||||
Count: proto.Int32(42),
|
||||
},
|
||||
dst: &pb.MyMessage{
|
||||
Name: proto.String("Dave"),
|
||||
},
|
||||
want: &pb.MyMessage{
|
||||
Count: proto.Int32(42),
|
||||
Name: proto.String("Dave"),
|
||||
},
|
||||
},
|
||||
{
|
||||
src: &pb.MyMessage{
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("hey"),
|
||||
Connected: proto.Bool(true),
|
||||
},
|
||||
Pet: []string{"horsey"},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Value: []byte("some bytes"),
|
||||
},
|
||||
},
|
||||
},
|
||||
dst: &pb.MyMessage{
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("niles"),
|
||||
Port: proto.Int32(9099),
|
||||
},
|
||||
Pet: []string{"bunny", "kitty"},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Key: proto.Int64(31415926535),
|
||||
},
|
||||
{
|
||||
// Explicitly test a src=nil field
|
||||
Inner: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &pb.MyMessage{
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("hey"),
|
||||
Connected: proto.Bool(true),
|
||||
Port: proto.Int32(9099),
|
||||
},
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Key: proto.Int64(31415926535),
|
||||
},
|
||||
{},
|
||||
{
|
||||
Value: []byte("some bytes"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
src: &pb.MyMessage{
|
||||
RepBytes: [][]byte{[]byte("wow")},
|
||||
},
|
||||
dst: &pb.MyMessage{
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: proto.Int32(6),
|
||||
},
|
||||
RepBytes: [][]byte{[]byte("sham")},
|
||||
},
|
||||
want: &pb.MyMessage{
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: proto.Int32(6),
|
||||
},
|
||||
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
|
||||
},
|
||||
},
|
||||
// Check that a scalar bytes field replaces rather than appends.
|
||||
{
|
||||
src: &pb.OtherMessage{Value: []byte("foo")},
|
||||
dst: &pb.OtherMessage{Value: []byte("bar")},
|
||||
want: &pb.OtherMessage{Value: []byte("foo")},
|
||||
},
|
||||
{
|
||||
src: &pb.MessageWithMap{
|
||||
NameMapping: map[int32]string{6: "Nigel"},
|
||||
MsgMapping: map[int64]*pb.FloatingPoint{
|
||||
0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
|
||||
0x4002: &pb.FloatingPoint{
|
||||
F: proto.Float64(2.0),
|
||||
},
|
||||
},
|
||||
ByteMapping: map[bool][]byte{true: []byte("wowsa")},
|
||||
},
|
||||
dst: &pb.MessageWithMap{
|
||||
NameMapping: map[int32]string{
|
||||
6: "Bruce", // should be overwritten
|
||||
7: "Andrew",
|
||||
},
|
||||
MsgMapping: map[int64]*pb.FloatingPoint{
|
||||
0x4002: &pb.FloatingPoint{
|
||||
F: proto.Float64(3.0),
|
||||
Exact: proto.Bool(true),
|
||||
}, // the entire message should be overwritten
|
||||
},
|
||||
},
|
||||
want: &pb.MessageWithMap{
|
||||
NameMapping: map[int32]string{
|
||||
6: "Nigel",
|
||||
7: "Andrew",
|
||||
},
|
||||
MsgMapping: map[int64]*pb.FloatingPoint{
|
||||
0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
|
||||
0x4002: &pb.FloatingPoint{
|
||||
F: proto.Float64(2.0),
|
||||
},
|
||||
},
|
||||
ByteMapping: map[bool][]byte{true: []byte("wowsa")},
|
||||
},
|
||||
},
|
||||
// proto3 shouldn't merge zero values,
|
||||
// in the same way that proto2 shouldn't merge nils.
|
||||
{
|
||||
src: &proto3pb.Message{
|
||||
Name: "Aaron",
|
||||
Data: []byte(""), // zero value, but not nil
|
||||
},
|
||||
dst: &proto3pb.Message{
|
||||
HeightInCm: 176,
|
||||
Data: []byte("texas!"),
|
||||
},
|
||||
want: &proto3pb.Message{
|
||||
Name: "Aaron",
|
||||
HeightInCm: 176,
|
||||
Data: []byte("texas!"),
|
||||
},
|
||||
},
|
||||
// Oneof fields should merge by assignment.
|
||||
{
|
||||
src: &pb.Communique{
|
||||
Union: &pb.Communique_Number{41},
|
||||
},
|
||||
dst: &pb.Communique{
|
||||
Union: &pb.Communique_Name{"Bobby Tables"},
|
||||
},
|
||||
want: &pb.Communique{
|
||||
Union: &pb.Communique_Number{41},
|
||||
},
|
||||
},
|
||||
// Oneof nil is the same as not set.
|
||||
{
|
||||
src: &pb.Communique{},
|
||||
dst: &pb.Communique{
|
||||
Union: &pb.Communique_Name{"Bobby Tables"},
|
||||
},
|
||||
want: &pb.Communique{
|
||||
Union: &pb.Communique_Name{"Bobby Tables"},
|
||||
},
|
||||
},
|
||||
{
|
||||
src: &proto3pb.Message{
|
||||
Terrain: map[string]*proto3pb.Nested{
|
||||
"kay_a": &proto3pb.Nested{Cute: true}, // replace
|
||||
"kay_b": &proto3pb.Nested{Bunny: "rabbit"}, // insert
|
||||
},
|
||||
},
|
||||
dst: &proto3pb.Message{
|
||||
Terrain: map[string]*proto3pb.Nested{
|
||||
"kay_a": &proto3pb.Nested{Bunny: "lost"}, // replaced
|
||||
"kay_c": &proto3pb.Nested{Bunny: "bunny"}, // keep
|
||||
},
|
||||
},
|
||||
want: &proto3pb.Message{
|
||||
Terrain: map[string]*proto3pb.Nested{
|
||||
"kay_a": &proto3pb.Nested{Cute: true},
|
||||
"kay_b": &proto3pb.Nested{Bunny: "rabbit"},
|
||||
"kay_c": &proto3pb.Nested{Bunny: "bunny"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
for _, m := range mergeTests {
|
||||
got := proto.Clone(m.dst)
|
||||
proto.Merge(got, m.src)
|
||||
if !proto.Equal(got, m.want) {
|
||||
t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,258 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
tpb "github.com/golang/protobuf/proto/proto3_proto"
|
||||
)
|
||||
|
||||
var (
|
||||
bytesBlackhole []byte
|
||||
msgBlackhole = new(tpb.Message)
|
||||
)
|
||||
|
||||
// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and
|
||||
// 2 bytes long).
|
||||
func BenchmarkVarint32ArraySmall(b *testing.B) {
|
||||
for i := uint(1); i <= 10; i++ {
|
||||
dist := genInt32Dist([7]int{0, 3, 1}, 1<<i)
|
||||
raw, err := proto.Marshal(&tpb.Message{
|
||||
ShortKey: dist,
|
||||
})
|
||||
if err != nil {
|
||||
b.Error("wrong encode", err)
|
||||
}
|
||||
b.Run(fmt.Sprintf("Len%v", len(dist)), func(b *testing.B) {
|
||||
scratchBuf := proto.NewBuffer(nil)
|
||||
b.ResetTimer()
|
||||
for k := 0; k < b.N; k++ {
|
||||
scratchBuf.SetBuf(raw)
|
||||
msgBlackhole.Reset()
|
||||
if err := scratchBuf.Unmarshal(msgBlackhole); err != nil {
|
||||
b.Error("wrong decode", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkVarint32ArrayLarge shows the performance on an array of large int32 fields (3 and
|
||||
// 4 bytes long, with a small number of 1, 2, 5 and 10 byte long versions).
|
||||
func BenchmarkVarint32ArrayLarge(b *testing.B) {
|
||||
for i := uint(1); i <= 10; i++ {
|
||||
dist := genInt32Dist([7]int{0, 1, 2, 4, 8, 1, 1}, 1<<i)
|
||||
raw, err := proto.Marshal(&tpb.Message{
|
||||
ShortKey: dist,
|
||||
})
|
||||
if err != nil {
|
||||
b.Error("wrong encode", err)
|
||||
}
|
||||
b.Run(fmt.Sprintf("Len%v", len(dist)), func(b *testing.B) {
|
||||
scratchBuf := proto.NewBuffer(nil)
|
||||
b.ResetTimer()
|
||||
for k := 0; k < b.N; k++ {
|
||||
scratchBuf.SetBuf(raw)
|
||||
msgBlackhole.Reset()
|
||||
if err := scratchBuf.Unmarshal(msgBlackhole); err != nil {
|
||||
b.Error("wrong decode", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkVarint64ArraySmall shows the performance on an array of small int64 fields (1 and
|
||||
// 2 bytes long).
|
||||
func BenchmarkVarint64ArraySmall(b *testing.B) {
|
||||
for i := uint(1); i <= 10; i++ {
|
||||
dist := genUint64Dist([11]int{0, 3, 1}, 1<<i)
|
||||
raw, err := proto.Marshal(&tpb.Message{
|
||||
Key: dist,
|
||||
})
|
||||
if err != nil {
|
||||
b.Error("wrong encode", err)
|
||||
}
|
||||
b.Run(fmt.Sprintf("Len%v", len(dist)), func(b *testing.B) {
|
||||
scratchBuf := proto.NewBuffer(nil)
|
||||
b.ResetTimer()
|
||||
for k := 0; k < b.N; k++ {
|
||||
scratchBuf.SetBuf(raw)
|
||||
msgBlackhole.Reset()
|
||||
if err := scratchBuf.Unmarshal(msgBlackhole); err != nil {
|
||||
b.Error("wrong decode", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkVarint64ArrayLarge shows the performance on an array of large int64 fields (6, 7,
|
||||
// and 8 bytes long with a small number of the other sizes).
|
||||
func BenchmarkVarint64ArrayLarge(b *testing.B) {
|
||||
for i := uint(1); i <= 10; i++ {
|
||||
dist := genUint64Dist([11]int{0, 1, 1, 2, 4, 8, 16, 32, 16, 1, 1}, 1<<i)
|
||||
raw, err := proto.Marshal(&tpb.Message{
|
||||
Key: dist,
|
||||
})
|
||||
if err != nil {
|
||||
b.Error("wrong encode", err)
|
||||
}
|
||||
b.Run(fmt.Sprintf("Len%v", len(dist)), func(b *testing.B) {
|
||||
scratchBuf := proto.NewBuffer(nil)
|
||||
b.ResetTimer()
|
||||
for k := 0; k < b.N; k++ {
|
||||
scratchBuf.SetBuf(raw)
|
||||
msgBlackhole.Reset()
|
||||
if err := scratchBuf.Unmarshal(msgBlackhole); err != nil {
|
||||
b.Error("wrong decode", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkVarint64ArrayMixed shows the performance of lots of small messages, each
|
||||
// containing a small number of large (3, 4, and 5 byte) repeated int64s.
|
||||
func BenchmarkVarint64ArrayMixed(b *testing.B) {
|
||||
for i := uint(1); i <= 1<<5; i <<= 1 {
|
||||
dist := genUint64Dist([11]int{0, 0, 0, 4, 6, 4, 0, 0, 0, 0, 0}, int(i))
|
||||
// number of sub fields
|
||||
for k := uint(1); k <= 1<<10; k <<= 2 {
|
||||
msg := &tpb.Message{}
|
||||
for m := uint(0); m < k; m++ {
|
||||
msg.Children = append(msg.Children, &tpb.Message{
|
||||
Key: dist,
|
||||
})
|
||||
}
|
||||
raw, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
b.Error("wrong encode", err)
|
||||
}
|
||||
b.Run(fmt.Sprintf("Fields%vLen%v", k, i), func(b *testing.B) {
|
||||
scratchBuf := proto.NewBuffer(nil)
|
||||
b.ResetTimer()
|
||||
for k := 0; k < b.N; k++ {
|
||||
scratchBuf.SetBuf(raw)
|
||||
msgBlackhole.Reset()
|
||||
if err := scratchBuf.Unmarshal(msgBlackhole); err != nil {
|
||||
b.Error("wrong decode", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// genInt32Dist generates a slice of ints that will match the size distribution of dist.
|
||||
// A size of 6 corresponds to a max length varint32, which is 10 bytes. The distribution
|
||||
// is 1-indexed. (i.e. the value at index 1 is how many 1 byte ints to create).
|
||||
func genInt32Dist(dist [7]int, count int) (dest []int32) {
|
||||
for i := 0; i < count; i++ {
|
||||
for k := 0; k < len(dist); k++ {
|
||||
var num int32
|
||||
switch k {
|
||||
case 1:
|
||||
num = 1<<7 - 1
|
||||
case 2:
|
||||
num = 1<<14 - 1
|
||||
case 3:
|
||||
num = 1<<21 - 1
|
||||
case 4:
|
||||
num = 1<<28 - 1
|
||||
case 5:
|
||||
num = 1<<29 - 1
|
||||
case 6:
|
||||
num = -1
|
||||
}
|
||||
for m := 0; m < dist[k]; m++ {
|
||||
dest = append(dest, num)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// genUint64Dist generates a slice of ints that will match the size distribution of dist.
|
||||
// The distribution is 1-indexed. (i.e. the value at index 1 is how many 1 byte ints to create).
|
||||
func genUint64Dist(dist [11]int, count int) (dest []uint64) {
|
||||
for i := 0; i < count; i++ {
|
||||
for k := 0; k < len(dist); k++ {
|
||||
var num uint64
|
||||
switch k {
|
||||
case 1:
|
||||
num = 1<<7 - 1
|
||||
case 2:
|
||||
num = 1<<14 - 1
|
||||
case 3:
|
||||
num = 1<<21 - 1
|
||||
case 4:
|
||||
num = 1<<28 - 1
|
||||
case 5:
|
||||
num = 1<<35 - 1
|
||||
case 6:
|
||||
num = 1<<42 - 1
|
||||
case 7:
|
||||
num = 1<<49 - 1
|
||||
case 8:
|
||||
num = 1<<56 - 1
|
||||
case 9:
|
||||
num = 1<<63 - 1
|
||||
case 10:
|
||||
num = 1<<64 - 1
|
||||
}
|
||||
for m := 0; m < dist[k]; m++ {
|
||||
dest = append(dest, num)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// BenchmarkDecodeEmpty measures the overhead of doing the minimal possible decode.
|
||||
func BenchmarkDecodeEmpty(b *testing.B) {
|
||||
raw, err := proto.Marshal(&tpb.Message{})
|
||||
if err != nil {
|
||||
b.Error("wrong encode", err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := proto.Unmarshal(raw, msgBlackhole); err != nil {
|
||||
b.Error("wrong decode", err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
tpb "github.com/golang/protobuf/proto/proto3_proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
)
|
||||
|
||||
var (
|
||||
blackhole []byte
|
||||
)
|
||||
|
||||
// BenchmarkAny creates increasingly large arbitrary Any messages. The type is always the
|
||||
// same.
|
||||
func BenchmarkAny(b *testing.B) {
|
||||
data := make([]byte, 1<<20)
|
||||
quantum := 1 << 10
|
||||
for i := uint(0); i <= 10; i++ {
|
||||
b.Run(strconv.Itoa(quantum<<i), func(b *testing.B) {
|
||||
for k := 0; k < b.N; k++ {
|
||||
inner := &tpb.Message{
|
||||
Data: data[:quantum<<i],
|
||||
}
|
||||
outer, err := ptypes.MarshalAny(inner)
|
||||
if err != nil {
|
||||
b.Error("wrong encode", err)
|
||||
}
|
||||
raw, err := proto.Marshal(&tpb.Message{
|
||||
Anything: outer,
|
||||
})
|
||||
if err != nil {
|
||||
b.Error("wrong encode", err)
|
||||
}
|
||||
blackhole = raw
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkEmpy measures the overhead of doing the minimal possible encode.
|
||||
func BenchmarkEmpy(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
raw, err := proto.Marshal(&tpb.Message{})
|
||||
if err != nil {
|
||||
b.Error("wrong encode", err)
|
||||
}
|
||||
blackhole = raw
|
||||
}
|
||||
}
|
|
@ -1,224 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/golang/protobuf/proto"
|
||||
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||
pb "github.com/golang/protobuf/proto/testdata"
|
||||
)
|
||||
|
||||
// Four identical base messages.
|
||||
// The init function adds extensions to some of them.
|
||||
var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)}
|
||||
var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)}
|
||||
var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)}
|
||||
var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)}
|
||||
|
||||
// Two messages with non-message extensions.
|
||||
var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)}
|
||||
var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)}
|
||||
|
||||
func init() {
|
||||
ext1 := &pb.Ext{Data: String("Kirk")}
|
||||
ext2 := &pb.Ext{Data: String("Picard")}
|
||||
|
||||
// messageWithExtension1a has ext1, but never marshals it.
|
||||
if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil {
|
||||
panic("SetExtension on 1a failed: " + err.Error())
|
||||
}
|
||||
|
||||
// messageWithExtension1b is the unmarshaled form of messageWithExtension1a.
|
||||
if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil {
|
||||
panic("SetExtension on 1b failed: " + err.Error())
|
||||
}
|
||||
buf, err := Marshal(messageWithExtension1b)
|
||||
if err != nil {
|
||||
panic("Marshal of 1b failed: " + err.Error())
|
||||
}
|
||||
messageWithExtension1b.Reset()
|
||||
if err := Unmarshal(buf, messageWithExtension1b); err != nil {
|
||||
panic("Unmarshal of 1b failed: " + err.Error())
|
||||
}
|
||||
|
||||
// messageWithExtension2 has ext2.
|
||||
if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil {
|
||||
panic("SetExtension on 2 failed: " + err.Error())
|
||||
}
|
||||
|
||||
if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil {
|
||||
panic("SetExtension on Int32-1 failed: " + err.Error())
|
||||
}
|
||||
if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil {
|
||||
panic("SetExtension on Int32-2 failed: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
var EqualTests = []struct {
|
||||
desc string
|
||||
a, b Message
|
||||
exp bool
|
||||
}{
|
||||
{"different types", &pb.GoEnum{}, &pb.GoTestField{}, false},
|
||||
{"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true},
|
||||
{"nil vs nil", nil, nil, true},
|
||||
{"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true},
|
||||
{"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false},
|
||||
{"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false},
|
||||
|
||||
{"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false},
|
||||
{"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false},
|
||||
{"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false},
|
||||
{"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true},
|
||||
|
||||
{"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false},
|
||||
{"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false},
|
||||
{"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false},
|
||||
{"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true},
|
||||
{"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true},
|
||||
{"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true},
|
||||
{"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true},
|
||||
|
||||
{
|
||||
"nested, different",
|
||||
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}},
|
||||
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"nested, equal",
|
||||
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
|
||||
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
|
||||
true,
|
||||
},
|
||||
|
||||
{"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true},
|
||||
{"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true},
|
||||
{"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false},
|
||||
{
|
||||
"repeated bytes",
|
||||
&pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
|
||||
&pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
|
||||
true,
|
||||
},
|
||||
// In proto3, []byte{} and []byte(nil) are equal.
|
||||
{"proto3 bytes, empty vs nil", &proto3pb.Message{Data: []byte{}}, &proto3pb.Message{Data: nil}, true},
|
||||
|
||||
{"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false},
|
||||
{"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true},
|
||||
{"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false},
|
||||
|
||||
{"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true},
|
||||
{"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false},
|
||||
|
||||
{
|
||||
"message with group",
|
||||
&pb.MyMessage{
|
||||
Count: Int32(1),
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: Int32(5),
|
||||
},
|
||||
},
|
||||
&pb.MyMessage{
|
||||
Count: Int32(1),
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: Int32(5),
|
||||
},
|
||||
},
|
||||
true,
|
||||
},
|
||||
|
||||
{
|
||||
"map same",
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"map different entry",
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"map different key only",
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"map different value only",
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"zero-length maps same",
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{}},
|
||||
&pb.MessageWithMap{NameMapping: nil},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"orders in map don't matter",
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken", 2: "Rob"}},
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob", 1: "Ken"}},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"oneof same",
|
||||
&pb.Communique{Union: &pb.Communique_Number{41}},
|
||||
&pb.Communique{Union: &pb.Communique_Number{41}},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"oneof one nil",
|
||||
&pb.Communique{Union: &pb.Communique_Number{41}},
|
||||
&pb.Communique{},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"oneof different",
|
||||
&pb.Communique{Union: &pb.Communique_Number{41}},
|
||||
&pb.Communique{Union: &pb.Communique_Name{"Bobby Tables"}},
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
func TestEqual(t *testing.T) {
|
||||
for _, tc := range EqualTests {
|
||||
if res := Equal(tc.a, tc.b); res != tc.exp {
|
||||
t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,536 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "github.com/golang/protobuf/proto/testdata"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func TestGetExtensionsWithMissingExtensions(t *testing.T) {
|
||||
msg := &pb.MyMessage{}
|
||||
ext1 := &pb.Ext{}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
|
||||
t.Fatalf("Could not set ext1: %s", err)
|
||||
}
|
||||
exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{
|
||||
pb.E_Ext_More,
|
||||
pb.E_Ext_Text,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("GetExtensions() failed: %s", err)
|
||||
}
|
||||
if exts[0] != ext1 {
|
||||
t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0])
|
||||
}
|
||||
if exts[1] != nil {
|
||||
t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionDescsWithMissingExtensions(t *testing.T) {
|
||||
msg := &pb.MyMessage{Count: proto.Int32(0)}
|
||||
extdesc1 := pb.E_Ext_More
|
||||
if descs, err := proto.ExtensionDescs(msg); len(descs) != 0 || err != nil {
|
||||
t.Errorf("proto.ExtensionDescs: got %d descs, error %v; want 0, nil", len(descs), err)
|
||||
}
|
||||
|
||||
ext1 := &pb.Ext{}
|
||||
if err := proto.SetExtension(msg, extdesc1, ext1); err != nil {
|
||||
t.Fatalf("Could not set ext1: %s", err)
|
||||
}
|
||||
extdesc2 := &proto.ExtensionDesc{
|
||||
ExtendedType: (*pb.MyMessage)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 123456789,
|
||||
Name: "a.b",
|
||||
Tag: "varint,123456789,opt",
|
||||
}
|
||||
ext2 := proto.Bool(false)
|
||||
if err := proto.SetExtension(msg, extdesc2, ext2); err != nil {
|
||||
t.Fatalf("Could not set ext2: %s", err)
|
||||
}
|
||||
|
||||
b, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not marshal msg: %v", err)
|
||||
}
|
||||
if err := proto.Unmarshal(b, msg); err != nil {
|
||||
t.Fatalf("Could not unmarshal into msg: %v", err)
|
||||
}
|
||||
|
||||
descs, err := proto.ExtensionDescs(msg)
|
||||
if err != nil {
|
||||
t.Fatalf("proto.ExtensionDescs: got error %v", err)
|
||||
}
|
||||
sortExtDescs(descs)
|
||||
wantDescs := []*proto.ExtensionDesc{extdesc1, &proto.ExtensionDesc{Field: extdesc2.Field}}
|
||||
if !reflect.DeepEqual(descs, wantDescs) {
|
||||
t.Errorf("proto.ExtensionDescs(msg) sorted extension ids: got %+v, want %+v", descs, wantDescs)
|
||||
}
|
||||
}
|
||||
|
||||
type ExtensionDescSlice []*proto.ExtensionDesc
|
||||
|
||||
func (s ExtensionDescSlice) Len() int { return len(s) }
|
||||
func (s ExtensionDescSlice) Less(i, j int) bool { return s[i].Field < s[j].Field }
|
||||
func (s ExtensionDescSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
func sortExtDescs(s []*proto.ExtensionDesc) {
|
||||
sort.Sort(ExtensionDescSlice(s))
|
||||
}
|
||||
|
||||
func TestGetExtensionStability(t *testing.T) {
|
||||
check := func(m *pb.MyMessage) bool {
|
||||
ext1, err := proto.GetExtension(m, pb.E_Ext_More)
|
||||
if err != nil {
|
||||
t.Fatalf("GetExtension() failed: %s", err)
|
||||
}
|
||||
ext2, err := proto.GetExtension(m, pb.E_Ext_More)
|
||||
if err != nil {
|
||||
t.Fatalf("GetExtension() failed: %s", err)
|
||||
}
|
||||
return ext1 == ext2
|
||||
}
|
||||
msg := &pb.MyMessage{Count: proto.Int32(4)}
|
||||
ext0 := &pb.Ext{}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil {
|
||||
t.Fatalf("Could not set ext1: %s", ext0)
|
||||
}
|
||||
if !check(msg) {
|
||||
t.Errorf("GetExtension() not stable before marshaling")
|
||||
}
|
||||
bb, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal() failed: %s", err)
|
||||
}
|
||||
msg1 := &pb.MyMessage{}
|
||||
err = proto.Unmarshal(bb, msg1)
|
||||
if err != nil {
|
||||
t.Fatalf("Unmarshal() failed: %s", err)
|
||||
}
|
||||
if !check(msg1) {
|
||||
t.Errorf("GetExtension() not stable after unmarshaling")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetExtensionDefaults(t *testing.T) {
|
||||
var setFloat64 float64 = 1
|
||||
var setFloat32 float32 = 2
|
||||
var setInt32 int32 = 3
|
||||
var setInt64 int64 = 4
|
||||
var setUint32 uint32 = 5
|
||||
var setUint64 uint64 = 6
|
||||
var setBool = true
|
||||
var setBool2 = false
|
||||
var setString = "Goodnight string"
|
||||
var setBytes = []byte("Goodnight bytes")
|
||||
var setEnum = pb.DefaultsMessage_TWO
|
||||
|
||||
type testcase struct {
|
||||
ext *proto.ExtensionDesc // Extension we are testing.
|
||||
want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail).
|
||||
def interface{} // Expected value of extension after ClearExtension().
|
||||
}
|
||||
tests := []testcase{
|
||||
{pb.E_NoDefaultDouble, setFloat64, nil},
|
||||
{pb.E_NoDefaultFloat, setFloat32, nil},
|
||||
{pb.E_NoDefaultInt32, setInt32, nil},
|
||||
{pb.E_NoDefaultInt64, setInt64, nil},
|
||||
{pb.E_NoDefaultUint32, setUint32, nil},
|
||||
{pb.E_NoDefaultUint64, setUint64, nil},
|
||||
{pb.E_NoDefaultSint32, setInt32, nil},
|
||||
{pb.E_NoDefaultSint64, setInt64, nil},
|
||||
{pb.E_NoDefaultFixed32, setUint32, nil},
|
||||
{pb.E_NoDefaultFixed64, setUint64, nil},
|
||||
{pb.E_NoDefaultSfixed32, setInt32, nil},
|
||||
{pb.E_NoDefaultSfixed64, setInt64, nil},
|
||||
{pb.E_NoDefaultBool, setBool, nil},
|
||||
{pb.E_NoDefaultBool, setBool2, nil},
|
||||
{pb.E_NoDefaultString, setString, nil},
|
||||
{pb.E_NoDefaultBytes, setBytes, nil},
|
||||
{pb.E_NoDefaultEnum, setEnum, nil},
|
||||
{pb.E_DefaultDouble, setFloat64, float64(3.1415)},
|
||||
{pb.E_DefaultFloat, setFloat32, float32(3.14)},
|
||||
{pb.E_DefaultInt32, setInt32, int32(42)},
|
||||
{pb.E_DefaultInt64, setInt64, int64(43)},
|
||||
{pb.E_DefaultUint32, setUint32, uint32(44)},
|
||||
{pb.E_DefaultUint64, setUint64, uint64(45)},
|
||||
{pb.E_DefaultSint32, setInt32, int32(46)},
|
||||
{pb.E_DefaultSint64, setInt64, int64(47)},
|
||||
{pb.E_DefaultFixed32, setUint32, uint32(48)},
|
||||
{pb.E_DefaultFixed64, setUint64, uint64(49)},
|
||||
{pb.E_DefaultSfixed32, setInt32, int32(50)},
|
||||
{pb.E_DefaultSfixed64, setInt64, int64(51)},
|
||||
{pb.E_DefaultBool, setBool, true},
|
||||
{pb.E_DefaultBool, setBool2, true},
|
||||
{pb.E_DefaultString, setString, "Hello, string"},
|
||||
{pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")},
|
||||
{pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE},
|
||||
}
|
||||
|
||||
checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error {
|
||||
val, err := proto.GetExtension(msg, test.ext)
|
||||
if err != nil {
|
||||
if valWant != nil {
|
||||
return fmt.Errorf("GetExtension(): %s", err)
|
||||
}
|
||||
if want := proto.ErrMissingExtension; err != want {
|
||||
return fmt.Errorf("Unexpected error: got %v, want %v", err, want)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// All proto2 extension values are either a pointer to a value or a slice of values.
|
||||
ty := reflect.TypeOf(val)
|
||||
tyWant := reflect.TypeOf(test.ext.ExtensionType)
|
||||
if got, want := ty, tyWant; got != want {
|
||||
return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want)
|
||||
}
|
||||
tye := ty.Elem()
|
||||
tyeWant := tyWant.Elem()
|
||||
if got, want := tye, tyeWant; got != want {
|
||||
return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want)
|
||||
}
|
||||
|
||||
// Check the name of the type of the value.
|
||||
// If it is an enum it will be type int32 with the name of the enum.
|
||||
if got, want := tye.Name(), tye.Name(); got != want {
|
||||
return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want)
|
||||
}
|
||||
|
||||
// Check that value is what we expect.
|
||||
// If we have a pointer in val, get the value it points to.
|
||||
valExp := val
|
||||
if ty.Kind() == reflect.Ptr {
|
||||
valExp = reflect.ValueOf(val).Elem().Interface()
|
||||
}
|
||||
if got, want := valExp, valWant; !reflect.DeepEqual(got, want) {
|
||||
return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
setTo := func(test testcase) interface{} {
|
||||
setTo := reflect.ValueOf(test.want)
|
||||
if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr {
|
||||
setTo = reflect.New(typ).Elem()
|
||||
setTo.Set(reflect.New(setTo.Type().Elem()))
|
||||
setTo.Elem().Set(reflect.ValueOf(test.want))
|
||||
}
|
||||
return setTo.Interface()
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
msg := &pb.DefaultsMessage{}
|
||||
name := test.ext.Name
|
||||
|
||||
// Check the initial value.
|
||||
if err := checkVal(test, msg, test.def); err != nil {
|
||||
t.Errorf("%s: %v", name, err)
|
||||
}
|
||||
|
||||
// Set the per-type value and check value.
|
||||
name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want)
|
||||
if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil {
|
||||
t.Errorf("%s: SetExtension(): %v", name, err)
|
||||
continue
|
||||
}
|
||||
if err := checkVal(test, msg, test.want); err != nil {
|
||||
t.Errorf("%s: %v", name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Set and check the value.
|
||||
name += " (cleared)"
|
||||
proto.ClearExtension(msg, test.ext)
|
||||
if err := checkVal(test, msg, test.def); err != nil {
|
||||
t.Errorf("%s: %v", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionsRoundTrip(t *testing.T) {
|
||||
msg := &pb.MyMessage{}
|
||||
ext1 := &pb.Ext{
|
||||
Data: proto.String("hi"),
|
||||
}
|
||||
ext2 := &pb.Ext{
|
||||
Data: proto.String("there"),
|
||||
}
|
||||
exists := proto.HasExtension(msg, pb.E_Ext_More)
|
||||
if exists {
|
||||
t.Error("Extension More present unexpectedly")
|
||||
}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
e, err := proto.GetExtension(msg, pb.E_Ext_More)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
x, ok := e.(*pb.Ext)
|
||||
if !ok {
|
||||
t.Errorf("e has type %T, expected testdata.Ext", e)
|
||||
} else if *x.Data != "there" {
|
||||
t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x)
|
||||
}
|
||||
proto.ClearExtension(msg, pb.E_Ext_More)
|
||||
if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension {
|
||||
t.Errorf("got %v, expected ErrMissingExtension", e)
|
||||
}
|
||||
if _, err := proto.GetExtension(msg, pb.E_X215); err == nil {
|
||||
t.Error("expected bad extension error, got nil")
|
||||
}
|
||||
if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil {
|
||||
t.Error("expected extension err")
|
||||
}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil {
|
||||
t.Error("expected some sort of type mismatch error, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNilExtension(t *testing.T) {
|
||||
msg := &pb.MyMessage{
|
||||
Count: proto.Int32(1),
|
||||
}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil {
|
||||
t.Error("expected SetExtension to fail due to a nil extension")
|
||||
} else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want {
|
||||
t.Errorf("expected error %v, got %v", want, err)
|
||||
}
|
||||
// Note: if the behavior of Marshal is ever changed to ignore nil extensions, update
|
||||
// this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal.
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalRepeatedExtension(t *testing.T) {
|
||||
// Add a repeated extension to the result.
|
||||
tests := []struct {
|
||||
name string
|
||||
ext []*pb.ComplexExtension
|
||||
}{
|
||||
{
|
||||
"two fields",
|
||||
[]*pb.ComplexExtension{
|
||||
{First: proto.Int32(7)},
|
||||
{Second: proto.Int32(11)},
|
||||
},
|
||||
},
|
||||
{
|
||||
"repeated field",
|
||||
[]*pb.ComplexExtension{
|
||||
{Third: []int32{1000}},
|
||||
{Third: []int32{2000}},
|
||||
},
|
||||
},
|
||||
{
|
||||
"two fields and repeated field",
|
||||
[]*pb.ComplexExtension{
|
||||
{Third: []int32{1000}},
|
||||
{First: proto.Int32(9)},
|
||||
{Second: proto.Int32(21)},
|
||||
{Third: []int32{2000}},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
// Marshal message with a repeated extension.
|
||||
msg1 := new(pb.OtherMessage)
|
||||
err := proto.SetExtension(msg1, pb.E_RComplex, test.ext)
|
||||
if err != nil {
|
||||
t.Fatalf("[%s] Error setting extension: %v", test.name, err)
|
||||
}
|
||||
b, err := proto.Marshal(msg1)
|
||||
if err != nil {
|
||||
t.Fatalf("[%s] Error marshaling message: %v", test.name, err)
|
||||
}
|
||||
|
||||
// Unmarshal and read the merged proto.
|
||||
msg2 := new(pb.OtherMessage)
|
||||
err = proto.Unmarshal(b, msg2)
|
||||
if err != nil {
|
||||
t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err)
|
||||
}
|
||||
e, err := proto.GetExtension(msg2, pb.E_RComplex)
|
||||
if err != nil {
|
||||
t.Fatalf("[%s] Error getting extension: %v", test.name, err)
|
||||
}
|
||||
ext := e.([]*pb.ComplexExtension)
|
||||
if ext == nil {
|
||||
t.Fatalf("[%s] Invalid extension", test.name)
|
||||
}
|
||||
if !reflect.DeepEqual(ext, test.ext) {
|
||||
t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, test.ext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) {
|
||||
// We may see multiple instances of the same extension in the wire
|
||||
// format. For example, the proto compiler may encode custom options in
|
||||
// this way. Here, we verify that we merge the extensions together.
|
||||
tests := []struct {
|
||||
name string
|
||||
ext []*pb.ComplexExtension
|
||||
}{
|
||||
{
|
||||
"two fields",
|
||||
[]*pb.ComplexExtension{
|
||||
{First: proto.Int32(7)},
|
||||
{Second: proto.Int32(11)},
|
||||
},
|
||||
},
|
||||
{
|
||||
"repeated field",
|
||||
[]*pb.ComplexExtension{
|
||||
{Third: []int32{1000}},
|
||||
{Third: []int32{2000}},
|
||||
},
|
||||
},
|
||||
{
|
||||
"two fields and repeated field",
|
||||
[]*pb.ComplexExtension{
|
||||
{Third: []int32{1000}},
|
||||
{First: proto.Int32(9)},
|
||||
{Second: proto.Int32(21)},
|
||||
{Third: []int32{2000}},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
var buf bytes.Buffer
|
||||
var want pb.ComplexExtension
|
||||
|
||||
// Generate a serialized representation of a repeated extension
|
||||
// by catenating bytes together.
|
||||
for i, e := range test.ext {
|
||||
// Merge to create the wanted proto.
|
||||
proto.Merge(&want, e)
|
||||
|
||||
// serialize the message
|
||||
msg := new(pb.OtherMessage)
|
||||
err := proto.SetExtension(msg, pb.E_Complex, e)
|
||||
if err != nil {
|
||||
t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err)
|
||||
}
|
||||
b, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err)
|
||||
}
|
||||
buf.Write(b)
|
||||
}
|
||||
|
||||
// Unmarshal and read the merged proto.
|
||||
msg2 := new(pb.OtherMessage)
|
||||
err := proto.Unmarshal(buf.Bytes(), msg2)
|
||||
if err != nil {
|
||||
t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err)
|
||||
}
|
||||
e, err := proto.GetExtension(msg2, pb.E_Complex)
|
||||
if err != nil {
|
||||
t.Fatalf("[%s] Error getting extension: %v", test.name, err)
|
||||
}
|
||||
ext := e.(*pb.ComplexExtension)
|
||||
if ext == nil {
|
||||
t.Fatalf("[%s] Invalid extension", test.name)
|
||||
}
|
||||
if !reflect.DeepEqual(*ext, want) {
|
||||
t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClearAllExtensions(t *testing.T) {
|
||||
// unregistered extension
|
||||
desc := &proto.ExtensionDesc{
|
||||
ExtendedType: (*pb.MyMessage)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 101010100,
|
||||
Name: "emptyextension",
|
||||
Tag: "varint,0,opt",
|
||||
}
|
||||
m := &pb.MyMessage{}
|
||||
if proto.HasExtension(m, desc) {
|
||||
t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m))
|
||||
}
|
||||
if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil {
|
||||
t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err)
|
||||
}
|
||||
if !proto.HasExtension(m, desc) {
|
||||
t.Errorf("proto.HasExtension(%s): got false, want true", proto.MarshalTextString(m))
|
||||
}
|
||||
proto.ClearAllExtensions(m)
|
||||
if proto.HasExtension(m, desc) {
|
||||
t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalRace(t *testing.T) {
|
||||
// unregistered extension
|
||||
desc := &proto.ExtensionDesc{
|
||||
ExtendedType: (*pb.MyMessage)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 101010100,
|
||||
Name: "emptyextension",
|
||||
Tag: "varint,0,opt",
|
||||
}
|
||||
|
||||
m := &pb.MyMessage{Count: proto.Int32(4)}
|
||||
if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil {
|
||||
t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err)
|
||||
}
|
||||
|
||||
var g errgroup.Group
|
||||
for n := 3; n > 0; n-- {
|
||||
g.Go(func() error {
|
||||
_, err := proto.Marshal(m)
|
||||
return err
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
package proto_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
ppb "github.com/golang/protobuf/proto/proto3_proto"
|
||||
)
|
||||
|
||||
func marshalled() []byte {
|
||||
m := &ppb.IntMaps{}
|
||||
for i := 0; i < 1000; i++ {
|
||||
m.Maps = append(m.Maps, &ppb.IntMap{
|
||||
Rtt: map[int32]int32{1: 2},
|
||||
})
|
||||
}
|
||||
b, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Can't marshal %+v: %v", m, err))
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func BenchmarkConcurrentMapUnmarshal(b *testing.B) {
|
||||
in := marshalled()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var out ppb.IntMaps
|
||||
if err := proto.Unmarshal(in, &out); err != nil {
|
||||
b.Errorf("Can't unmarshal ppb.IntMaps: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkSequentialMapUnmarshal(b *testing.B) {
|
||||
in := marshalled()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
var out ppb.IntMaps
|
||||
if err := proto.Unmarshal(in, &out); err != nil {
|
||||
b.Errorf("Can't unmarshal ppb.IntMaps: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUnmarshalMessageSetWithDuplicate(t *testing.T) {
|
||||
// Check that a repeated message set entry will be concatenated.
|
||||
in := &messageSet{
|
||||
Item: []*_MessageSet_Item{
|
||||
{TypeId: Int32(12345), Message: []byte("hoo")},
|
||||
{TypeId: Int32(12345), Message: []byte("hah")},
|
||||
},
|
||||
}
|
||||
b, err := Marshal(in)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal: %v", err)
|
||||
}
|
||||
t.Logf("Marshaled bytes: %q", b)
|
||||
|
||||
var extensions XXX_InternalExtensions
|
||||
if err := UnmarshalMessageSet(b, &extensions); err != nil {
|
||||
t.Fatalf("UnmarshalMessageSet: %v", err)
|
||||
}
|
||||
ext, ok := extensions.p.extensionMap[12345]
|
||||
if !ok {
|
||||
t.Fatalf("Didn't retrieve extension 12345; map is %v", extensions.p.extensionMap)
|
||||
}
|
||||
// Skip wire type/field number and length varints.
|
||||
got := skipVarint(skipVarint(ext.enc))
|
||||
if want := []byte("hoohah"); !bytes.Equal(got, want) {
|
||||
t.Errorf("Combined extension is %q, want %q", got, want)
|
||||
}
|
||||
}
|
|
@ -1,135 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||
tpb "github.com/golang/protobuf/proto/testdata"
|
||||
)
|
||||
|
||||
func TestProto3ZeroValues(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
m proto.Message
|
||||
}{
|
||||
{"zero message", &pb.Message{}},
|
||||
{"empty bytes field", &pb.Message{Data: []byte{}}},
|
||||
}
|
||||
for _, test := range tests {
|
||||
b, err := proto.Marshal(test.m)
|
||||
if err != nil {
|
||||
t.Errorf("%s: proto.Marshal: %v", test.desc, err)
|
||||
continue
|
||||
}
|
||||
if len(b) > 0 {
|
||||
t.Errorf("%s: Encoding is non-empty: %q", test.desc, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundTripProto3(t *testing.T) {
|
||||
m := &pb.Message{
|
||||
Name: "David", // (2 | 1<<3): 0x0a 0x05 "David"
|
||||
Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01
|
||||
HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01
|
||||
Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto"
|
||||
ResultCount: 47, // (0 | 7<<3): 0x38 0x2f
|
||||
TrueScotsman: true, // (0 | 8<<3): 0x40 0x01
|
||||
Score: 8.1, // (5 | 9<<3): 0x4d <8.1>
|
||||
|
||||
Key: []uint64{1, 0xdeadbeef},
|
||||
Nested: &pb.Nested{
|
||||
Bunny: "Monty",
|
||||
},
|
||||
}
|
||||
t.Logf(" m: %v", m)
|
||||
|
||||
b, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
t.Fatalf("proto.Marshal: %v", err)
|
||||
}
|
||||
t.Logf(" b: %q", b)
|
||||
|
||||
m2 := new(pb.Message)
|
||||
if err := proto.Unmarshal(b, m2); err != nil {
|
||||
t.Fatalf("proto.Unmarshal: %v", err)
|
||||
}
|
||||
t.Logf("m2: %v", m2)
|
||||
|
||||
if !proto.Equal(m, m2) {
|
||||
t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGettersForBasicTypesExist(t *testing.T) {
|
||||
var m pb.Message
|
||||
if got := m.GetNested().GetBunny(); got != "" {
|
||||
t.Errorf("m.GetNested().GetBunny() = %q, want empty string", got)
|
||||
}
|
||||
if got := m.GetNested().GetCute(); got {
|
||||
t.Errorf("m.GetNested().GetCute() = %t, want false", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProto3SetDefaults(t *testing.T) {
|
||||
in := &pb.Message{
|
||||
Terrain: map[string]*pb.Nested{
|
||||
"meadow": new(pb.Nested),
|
||||
},
|
||||
Proto2Field: new(tpb.SubDefaults),
|
||||
Proto2Value: map[string]*tpb.SubDefaults{
|
||||
"badlands": new(tpb.SubDefaults),
|
||||
},
|
||||
}
|
||||
|
||||
got := proto.Clone(in).(*pb.Message)
|
||||
proto.SetDefaults(got)
|
||||
|
||||
// There are no defaults in proto3. Everything should be the zero value, but
|
||||
// we need to remember to set defaults for nested proto2 messages.
|
||||
want := &pb.Message{
|
||||
Terrain: map[string]*pb.Nested{
|
||||
"meadow": new(pb.Nested),
|
||||
},
|
||||
Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)},
|
||||
Proto2Value: map[string]*tpb.SubDefaults{
|
||||
"badlands": &tpb.SubDefaults{N: proto.Int64(7)},
|
||||
},
|
||||
}
|
||||
|
||||
if !proto.Equal(got, want) {
|
||||
t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want)
|
||||
}
|
||||
}
|
|
@ -1,63 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// This is a separate file and package from size_test.go because that one uses
|
||||
// generated messages and thus may not be in package proto without having a circular
|
||||
// dependency, whereas this file tests unexported details of size.go.
|
||||
|
||||
func TestVarintSize(t *testing.T) {
|
||||
// Check the edge cases carefully.
|
||||
testCases := []struct {
|
||||
n uint64
|
||||
size int
|
||||
}{
|
||||
{0, 1},
|
||||
{1, 1},
|
||||
{127, 1},
|
||||
{128, 2},
|
||||
{16383, 2},
|
||||
{16384, 3},
|
||||
{1<<63 - 1, 9},
|
||||
{1 << 63, 10},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
size := sizeVarint(tc.n)
|
||||
if size != tc.size {
|
||||
t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,164 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
. "github.com/golang/protobuf/proto"
|
||||
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||
pb "github.com/golang/protobuf/proto/testdata"
|
||||
)
|
||||
|
||||
var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}
|
||||
|
||||
// messageWithExtension2 is in equal_test.go.
|
||||
var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)}
|
||||
|
||||
func init() {
|
||||
if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil {
|
||||
log.Panicf("SetExtension: %v", err)
|
||||
}
|
||||
if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil {
|
||||
log.Panicf("SetExtension: %v", err)
|
||||
}
|
||||
|
||||
// Force messageWithExtension3 to have the extension encoded.
|
||||
Marshal(messageWithExtension3)
|
||||
|
||||
}
|
||||
|
||||
var SizeTests = []struct {
|
||||
desc string
|
||||
pb Message
|
||||
}{
|
||||
{"empty", &pb.OtherMessage{}},
|
||||
// Basic types.
|
||||
{"bool", &pb.Defaults{F_Bool: Bool(true)}},
|
||||
{"int32", &pb.Defaults{F_Int32: Int32(12)}},
|
||||
{"negative int32", &pb.Defaults{F_Int32: Int32(-1)}},
|
||||
{"small int64", &pb.Defaults{F_Int64: Int64(1)}},
|
||||
{"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}},
|
||||
{"negative int64", &pb.Defaults{F_Int64: Int64(-1)}},
|
||||
{"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}},
|
||||
{"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}},
|
||||
{"uint32", &pb.Defaults{F_Uint32: Uint32(123)}},
|
||||
{"uint64", &pb.Defaults{F_Uint64: Uint64(124)}},
|
||||
{"float", &pb.Defaults{F_Float: Float32(12.6)}},
|
||||
{"double", &pb.Defaults{F_Double: Float64(13.9)}},
|
||||
{"string", &pb.Defaults{F_String: String("niles")}},
|
||||
{"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}},
|
||||
{"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}},
|
||||
{"sint32", &pb.Defaults{F_Sint32: Int32(65)}},
|
||||
{"sint64", &pb.Defaults{F_Sint64: Int64(67)}},
|
||||
{"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}},
|
||||
// Repeated.
|
||||
{"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}},
|
||||
{"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}},
|
||||
{"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}},
|
||||
{"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}},
|
||||
{"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}},
|
||||
{"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{
|
||||
// Need enough large numbers to verify that the header is counting the number of bytes
|
||||
// for the field, not the number of elements.
|
||||
1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
|
||||
1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
|
||||
}}},
|
||||
{"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}},
|
||||
{"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}},
|
||||
// Nested.
|
||||
{"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}},
|
||||
{"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}},
|
||||
// Other things.
|
||||
{"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}},
|
||||
{"extension (unencoded)", messageWithExtension1},
|
||||
{"extension (encoded)", messageWithExtension3},
|
||||
// proto3 message
|
||||
{"proto3 empty", &proto3pb.Message{}},
|
||||
{"proto3 bool", &proto3pb.Message{TrueScotsman: true}},
|
||||
{"proto3 int64", &proto3pb.Message{ResultCount: 1}},
|
||||
{"proto3 uint32", &proto3pb.Message{HeightInCm: 123}},
|
||||
{"proto3 float", &proto3pb.Message{Score: 12.6}},
|
||||
{"proto3 string", &proto3pb.Message{Name: "Snezana"}},
|
||||
{"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}},
|
||||
{"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}},
|
||||
{"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
|
||||
{"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}},
|
||||
|
||||
{"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}},
|
||||
{"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}},
|
||||
{"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}},
|
||||
{"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}},
|
||||
|
||||
{"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}},
|
||||
{"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}},
|
||||
{"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}},
|
||||
|
||||
{"oneof not set", &pb.Oneof{}},
|
||||
{"oneof bool", &pb.Oneof{Union: &pb.Oneof_F_Bool{true}}},
|
||||
{"oneof zero int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{0}}},
|
||||
{"oneof big int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{1 << 20}}},
|
||||
{"oneof int64", &pb.Oneof{Union: &pb.Oneof_F_Int64{42}}},
|
||||
{"oneof fixed32", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{43}}},
|
||||
{"oneof fixed64", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{44}}},
|
||||
{"oneof uint32", &pb.Oneof{Union: &pb.Oneof_F_Uint32{45}}},
|
||||
{"oneof uint64", &pb.Oneof{Union: &pb.Oneof_F_Uint64{46}}},
|
||||
{"oneof float", &pb.Oneof{Union: &pb.Oneof_F_Float{47.1}}},
|
||||
{"oneof double", &pb.Oneof{Union: &pb.Oneof_F_Double{48.9}}},
|
||||
{"oneof string", &pb.Oneof{Union: &pb.Oneof_F_String{"Rhythmic Fman"}}},
|
||||
{"oneof bytes", &pb.Oneof{Union: &pb.Oneof_F_Bytes{[]byte("let go")}}},
|
||||
{"oneof sint32", &pb.Oneof{Union: &pb.Oneof_F_Sint32{50}}},
|
||||
{"oneof sint64", &pb.Oneof{Union: &pb.Oneof_F_Sint64{51}}},
|
||||
{"oneof enum", &pb.Oneof{Union: &pb.Oneof_F_Enum{pb.MyMessage_BLUE}}},
|
||||
{"message for oneof", &pb.GoTestField{Label: String("k"), Type: String("v")}},
|
||||
{"oneof message", &pb.Oneof{Union: &pb.Oneof_F_Message{&pb.GoTestField{Label: String("k"), Type: String("v")}}}},
|
||||
{"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{&pb.Oneof_F_Group{X: Int32(52)}}}},
|
||||
{"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{1}}},
|
||||
{"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{1}, Tormato: &pb.Oneof_Value{2}}},
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
for _, tc := range SizeTests {
|
||||
size := Size(tc.pb)
|
||||
b, err := Marshal(tc.pb)
|
||||
if err != nil {
|
||||
t.Errorf("%v: Marshal failed: %v", tc.desc, err)
|
||||
continue
|
||||
}
|
||||
if size != len(b) {
|
||||
t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b))
|
||||
t.Logf("%v: bytes: %#v", tc.desc, b)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,673 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
. "github.com/golang/protobuf/proto"
|
||||
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||
. "github.com/golang/protobuf/proto/testdata"
|
||||
)
|
||||
|
||||
type UnmarshalTextTest struct {
|
||||
in string
|
||||
err string // if "", no error expected
|
||||
out *MyMessage
|
||||
}
|
||||
|
||||
func buildExtStructTest(text string) UnmarshalTextTest {
|
||||
msg := &MyMessage{
|
||||
Count: Int32(42),
|
||||
}
|
||||
SetExtension(msg, E_Ext_More, &Ext{
|
||||
Data: String("Hello, world!"),
|
||||
})
|
||||
return UnmarshalTextTest{in: text, out: msg}
|
||||
}
|
||||
|
||||
func buildExtDataTest(text string) UnmarshalTextTest {
|
||||
msg := &MyMessage{
|
||||
Count: Int32(42),
|
||||
}
|
||||
SetExtension(msg, E_Ext_Text, String("Hello, world!"))
|
||||
SetExtension(msg, E_Ext_Number, Int32(1729))
|
||||
return UnmarshalTextTest{in: text, out: msg}
|
||||
}
|
||||
|
||||
func buildExtRepStringTest(text string) UnmarshalTextTest {
|
||||
msg := &MyMessage{
|
||||
Count: Int32(42),
|
||||
}
|
||||
if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return UnmarshalTextTest{in: text, out: msg}
|
||||
}
|
||||
|
||||
var unMarshalTextTests = []UnmarshalTextTest{
|
||||
// Basic
|
||||
{
|
||||
in: " count:42\n name:\"Dave\" ",
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("Dave"),
|
||||
},
|
||||
},
|
||||
|
||||
// Empty quoted string
|
||||
{
|
||||
in: `count:42 name:""`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String(""),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string concatenation with double quotes
|
||||
{
|
||||
in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("My name is elsewhere"),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string concatenation with single quotes
|
||||
{
|
||||
in: "count:42 name: 'My name is '\n'elsewhere'",
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("My name is elsewhere"),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string concatenations with mixed quotes
|
||||
{
|
||||
in: "count:42 name: 'My name is '\n\"elsewhere\"",
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("My name is elsewhere"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "count:42 name: \"My name is \"\n'elsewhere'",
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("My name is elsewhere"),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with escaped apostrophe
|
||||
{
|
||||
in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("HOLIDAY - New Year's Day"),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with single quote
|
||||
{
|
||||
in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String(`Roger "The Ramster" Ramjet`),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with all the accepted special characters from the C++ test
|
||||
{
|
||||
in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with quoted backslash
|
||||
{
|
||||
in: `count:42 name: "\\'xyz"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String(`\'xyz`),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with UTF-8 bytes.
|
||||
{
|
||||
in: "count:42 name: '\303\277\302\201\xAB'",
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("\303\277\302\201\xAB"),
|
||||
},
|
||||
},
|
||||
|
||||
// Bad quoted string
|
||||
{
|
||||
in: `inner: < host: "\0" >` + "\n",
|
||||
err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`,
|
||||
},
|
||||
|
||||
// Number too large for int64
|
||||
{
|
||||
in: "count: 1 others { key: 123456789012345678901 }",
|
||||
err: "line 1.23: invalid int64: 123456789012345678901",
|
||||
},
|
||||
|
||||
// Number too large for int32
|
||||
{
|
||||
in: "count: 1234567890123",
|
||||
err: "line 1.7: invalid int32: 1234567890123",
|
||||
},
|
||||
|
||||
// Number in hexadecimal
|
||||
{
|
||||
in: "count: 0x2beef",
|
||||
out: &MyMessage{
|
||||
Count: Int32(0x2beef),
|
||||
},
|
||||
},
|
||||
|
||||
// Number in octal
|
||||
{
|
||||
in: "count: 024601",
|
||||
out: &MyMessage{
|
||||
Count: Int32(024601),
|
||||
},
|
||||
},
|
||||
|
||||
// Floating point number with "f" suffix
|
||||
{
|
||||
in: "count: 4 others:< weight: 17.0f >",
|
||||
out: &MyMessage{
|
||||
Count: Int32(4),
|
||||
Others: []*OtherMessage{
|
||||
{
|
||||
Weight: Float32(17),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// Floating point positive infinity
|
||||
{
|
||||
in: "count: 4 bigfloat: inf",
|
||||
out: &MyMessage{
|
||||
Count: Int32(4),
|
||||
Bigfloat: Float64(math.Inf(1)),
|
||||
},
|
||||
},
|
||||
|
||||
// Floating point negative infinity
|
||||
{
|
||||
in: "count: 4 bigfloat: -inf",
|
||||
out: &MyMessage{
|
||||
Count: Int32(4),
|
||||
Bigfloat: Float64(math.Inf(-1)),
|
||||
},
|
||||
},
|
||||
|
||||
// Number too large for float32
|
||||
{
|
||||
in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
|
||||
err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
|
||||
},
|
||||
|
||||
// Number posing as a quoted string
|
||||
{
|
||||
in: `inner: < host: 12 >` + "\n",
|
||||
err: `line 1.15: invalid string: 12`,
|
||||
},
|
||||
|
||||
// Quoted string posing as int32
|
||||
{
|
||||
in: `count: "12"`,
|
||||
err: `line 1.7: invalid int32: "12"`,
|
||||
},
|
||||
|
||||
// Quoted string posing a float32
|
||||
{
|
||||
in: `others:< weight: "17.4" >`,
|
||||
err: `line 1.17: invalid float32: "17.4"`,
|
||||
},
|
||||
|
||||
// Enum
|
||||
{
|
||||
in: `count:42 bikeshed: BLUE`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Bikeshed: MyMessage_BLUE.Enum(),
|
||||
},
|
||||
},
|
||||
|
||||
// Repeated field
|
||||
{
|
||||
in: `count:42 pet: "horsey" pet:"bunny"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Pet: []string{"horsey", "bunny"},
|
||||
},
|
||||
},
|
||||
|
||||
// Repeated field with list notation
|
||||
{
|
||||
in: `count:42 pet: ["horsey", "bunny"]`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Pet: []string{"horsey", "bunny"},
|
||||
},
|
||||
},
|
||||
|
||||
// Repeated message with/without colon and <>/{}
|
||||
{
|
||||
in: `count:42 others:{} others{} others:<> others:{}`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Others: []*OtherMessage{
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// Missing colon for inner message
|
||||
{
|
||||
in: `count:42 inner < host: "cauchy.syd" >`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("cauchy.syd"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// Missing colon for string field
|
||||
{
|
||||
in: `name "Dave"`,
|
||||
err: `line 1.5: expected ':', found "\"Dave\""`,
|
||||
},
|
||||
|
||||
// Missing colon for int32 field
|
||||
{
|
||||
in: `count 42`,
|
||||
err: `line 1.6: expected ':', found "42"`,
|
||||
},
|
||||
|
||||
// Missing required field
|
||||
{
|
||||
in: `name: "Pawel"`,
|
||||
err: `proto: required field "testdata.MyMessage.count" not set`,
|
||||
out: &MyMessage{
|
||||
Name: String("Pawel"),
|
||||
},
|
||||
},
|
||||
|
||||
// Missing required field in a required submessage
|
||||
{
|
||||
in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`,
|
||||
err: `proto: required field "testdata.InnerMessage.host" not set`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}},
|
||||
},
|
||||
},
|
||||
|
||||
// Repeated non-repeated field
|
||||
{
|
||||
in: `name: "Rob" name: "Russ"`,
|
||||
err: `line 1.12: non-repeated field "name" was repeated`,
|
||||
},
|
||||
|
||||
// Group
|
||||
{
|
||||
in: `count: 17 SomeGroup { group_field: 12 }`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(17),
|
||||
Somegroup: &MyMessage_SomeGroup{
|
||||
GroupField: Int32(12),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// Semicolon between fields
|
||||
{
|
||||
in: `count:3;name:"Calvin"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(3),
|
||||
Name: String("Calvin"),
|
||||
},
|
||||
},
|
||||
// Comma between fields
|
||||
{
|
||||
in: `count:4,name:"Ezekiel"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(4),
|
||||
Name: String("Ezekiel"),
|
||||
},
|
||||
},
|
||||
|
||||
// Boolean false
|
||||
{
|
||||
in: `count:42 inner { host: "example.com" connected: false }`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("example.com"),
|
||||
Connected: Bool(false),
|
||||
},
|
||||
},
|
||||
},
|
||||
// Boolean true
|
||||
{
|
||||
in: `count:42 inner { host: "example.com" connected: true }`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("example.com"),
|
||||
Connected: Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
// Boolean 0
|
||||
{
|
||||
in: `count:42 inner { host: "example.com" connected: 0 }`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("example.com"),
|
||||
Connected: Bool(false),
|
||||
},
|
||||
},
|
||||
},
|
||||
// Boolean 1
|
||||
{
|
||||
in: `count:42 inner { host: "example.com" connected: 1 }`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("example.com"),
|
||||
Connected: Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
// Boolean f
|
||||
{
|
||||
in: `count:42 inner { host: "example.com" connected: f }`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("example.com"),
|
||||
Connected: Bool(false),
|
||||
},
|
||||
},
|
||||
},
|
||||
// Boolean t
|
||||
{
|
||||
in: `count:42 inner { host: "example.com" connected: t }`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("example.com"),
|
||||
Connected: Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
// Boolean False
|
||||
{
|
||||
in: `count:42 inner { host: "example.com" connected: False }`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("example.com"),
|
||||
Connected: Bool(false),
|
||||
},
|
||||
},
|
||||
},
|
||||
// Boolean True
|
||||
{
|
||||
in: `count:42 inner { host: "example.com" connected: True }`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("example.com"),
|
||||
Connected: Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// Extension
|
||||
buildExtStructTest(`count: 42 [testdata.Ext.more]:<data:"Hello, world!" >`),
|
||||
buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`),
|
||||
buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`),
|
||||
buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`),
|
||||
|
||||
// Big all-in-one
|
||||
{
|
||||
in: "count:42 # Meaning\n" +
|
||||
`name:"Dave" ` +
|
||||
`quote:"\"I didn't want to go.\"" ` +
|
||||
`pet:"bunny" ` +
|
||||
`pet:"kitty" ` +
|
||||
`pet:"horsey" ` +
|
||||
`inner:<` +
|
||||
` host:"footrest.syd" ` +
|
||||
` port:7001 ` +
|
||||
` connected:true ` +
|
||||
`> ` +
|
||||
`others:<` +
|
||||
` key:3735928559 ` +
|
||||
` value:"\x01A\a\f" ` +
|
||||
`> ` +
|
||||
`others:<` +
|
||||
" weight:58.9 # Atomic weight of Co\n" +
|
||||
` inner:<` +
|
||||
` host:"lesha.mtv" ` +
|
||||
` port:8002 ` +
|
||||
` >` +
|
||||
`>`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("Dave"),
|
||||
Quote: String(`"I didn't want to go."`),
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Inner: &InnerMessage{
|
||||
Host: String("footrest.syd"),
|
||||
Port: Int32(7001),
|
||||
Connected: Bool(true),
|
||||
},
|
||||
Others: []*OtherMessage{
|
||||
{
|
||||
Key: Int64(3735928559),
|
||||
Value: []byte{0x1, 'A', '\a', '\f'},
|
||||
},
|
||||
{
|
||||
Weight: Float32(58.9),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("lesha.mtv"),
|
||||
Port: Int32(8002),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestUnmarshalText(t *testing.T) {
|
||||
for i, test := range unMarshalTextTests {
|
||||
pb := new(MyMessage)
|
||||
err := UnmarshalText(test.in, pb)
|
||||
if test.err == "" {
|
||||
// We don't expect failure.
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: Unexpected error: %v", i, err)
|
||||
} else if !reflect.DeepEqual(pb, test.out) {
|
||||
t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
|
||||
i, pb, test.out)
|
||||
}
|
||||
} else {
|
||||
// We do expect failure.
|
||||
if err == nil {
|
||||
t.Errorf("Test %d: Didn't get expected error: %v", i, test.err)
|
||||
} else if err.Error() != test.err {
|
||||
t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v",
|
||||
i, err.Error(), test.err)
|
||||
} else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) {
|
||||
t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
|
||||
i, pb, test.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalTextCustomMessage(t *testing.T) {
|
||||
msg := &textMessage{}
|
||||
if err := UnmarshalText("custom", msg); err != nil {
|
||||
t.Errorf("Unexpected error from custom unmarshal: %v", err)
|
||||
}
|
||||
if UnmarshalText("not custom", msg) == nil {
|
||||
t.Errorf("Didn't get expected error from custom unmarshal")
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test; this caused a panic.
|
||||
func TestRepeatedEnum(t *testing.T) {
|
||||
pb := new(RepeatedEnum)
|
||||
if err := UnmarshalText("color: RED", pb); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
exp := &RepeatedEnum{
|
||||
Color: []RepeatedEnum_Color{RepeatedEnum_RED},
|
||||
}
|
||||
if !Equal(pb, exp) {
|
||||
t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProto3TextParsing(t *testing.T) {
|
||||
m := new(proto3pb.Message)
|
||||
const in = `name: "Wallace" true_scotsman: true`
|
||||
want := &proto3pb.Message{
|
||||
Name: "Wallace",
|
||||
TrueScotsman: true,
|
||||
}
|
||||
if err := UnmarshalText(in, m); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !Equal(m, want) {
|
||||
t.Errorf("\n got %v\nwant %v", m, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapParsing(t *testing.T) {
|
||||
m := new(MessageWithMap)
|
||||
const in = `name_mapping:<key:1234 value:"Feist"> name_mapping:<key:1 value:"Beatles">` +
|
||||
`msg_mapping:<key:-4, value:<f: 2.0>,>` + // separating commas are okay
|
||||
`msg_mapping<key:-2 value<f: 4.0>>` + // no colon after "value"
|
||||
`msg_mapping:<value:<f: 5.0>>` + // omitted key
|
||||
`msg_mapping:<key:1>` + // omitted value
|
||||
`byte_mapping:<key:true value:"so be it">` +
|
||||
`byte_mapping:<>` // omitted key and value
|
||||
want := &MessageWithMap{
|
||||
NameMapping: map[int32]string{
|
||||
1: "Beatles",
|
||||
1234: "Feist",
|
||||
},
|
||||
MsgMapping: map[int64]*FloatingPoint{
|
||||
-4: {F: Float64(2.0)},
|
||||
-2: {F: Float64(4.0)},
|
||||
0: {F: Float64(5.0)},
|
||||
1: nil,
|
||||
},
|
||||
ByteMapping: map[bool][]byte{
|
||||
false: nil,
|
||||
true: []byte("so be it"),
|
||||
},
|
||||
}
|
||||
if err := UnmarshalText(in, m); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !Equal(m, want) {
|
||||
t.Errorf("\n got %v\nwant %v", m, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOneofParsing(t *testing.T) {
|
||||
const in = `name:"Shrek"`
|
||||
m := new(Communique)
|
||||
want := &Communique{Union: &Communique_Name{"Shrek"}}
|
||||
if err := UnmarshalText(in, m); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !Equal(m, want) {
|
||||
t.Errorf("\n got %v\nwant %v", m, want)
|
||||
}
|
||||
|
||||
const inOverwrite = `name:"Shrek" number:42`
|
||||
m = new(Communique)
|
||||
testErr := "line 1.13: field 'number' would overwrite already parsed oneof 'Union'"
|
||||
if err := UnmarshalText(inOverwrite, m); err == nil {
|
||||
t.Errorf("TestOneofParsing: Didn't get expected error: %v", testErr)
|
||||
} else if err.Error() != testErr {
|
||||
t.Errorf("TestOneofParsing: Incorrect error.\nHave: %v\nWant: %v",
|
||||
err.Error(), testErr)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var benchInput string
|
||||
|
||||
func init() {
|
||||
benchInput = "count: 4\n"
|
||||
for i := 0; i < 1000; i++ {
|
||||
benchInput += "pet: \"fido\"\n"
|
||||
}
|
||||
|
||||
// Check it is valid input.
|
||||
pb := new(MyMessage)
|
||||
err := UnmarshalText(benchInput, pb)
|
||||
if err != nil {
|
||||
panic("Bad benchmark input: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalText(b *testing.B) {
|
||||
pb := new(MyMessage)
|
||||
for i := 0; i < b.N; i++ {
|
||||
UnmarshalText(benchInput, pb)
|
||||
}
|
||||
b.SetBytes(int64(len(benchInput)))
|
||||
}
|
|
@ -1,474 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||
pb "github.com/golang/protobuf/proto/testdata"
|
||||
)
|
||||
|
||||
// textMessage implements the methods that allow it to marshal and unmarshal
|
||||
// itself as text.
|
||||
type textMessage struct {
|
||||
}
|
||||
|
||||
func (*textMessage) MarshalText() ([]byte, error) {
|
||||
return []byte("custom"), nil
|
||||
}
|
||||
|
||||
func (*textMessage) UnmarshalText(bytes []byte) error {
|
||||
if string(bytes) != "custom" {
|
||||
return errors.New("expected 'custom'")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*textMessage) Reset() {}
|
||||
func (*textMessage) String() string { return "" }
|
||||
func (*textMessage) ProtoMessage() {}
|
||||
|
||||
func newTestMessage() *pb.MyMessage {
|
||||
msg := &pb.MyMessage{
|
||||
Count: proto.Int32(42),
|
||||
Name: proto.String("Dave"),
|
||||
Quote: proto.String(`"I didn't want to go."`),
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("footrest.syd"),
|
||||
Port: proto.Int32(7001),
|
||||
Connected: proto.Bool(true),
|
||||
},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Key: proto.Int64(0xdeadbeef),
|
||||
Value: []byte{1, 65, 7, 12},
|
||||
},
|
||||
{
|
||||
Weight: proto.Float32(6.022),
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("lesha.mtv"),
|
||||
Port: proto.Int32(8002),
|
||||
},
|
||||
},
|
||||
},
|
||||
Bikeshed: pb.MyMessage_BLUE.Enum(),
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: proto.Int32(8),
|
||||
},
|
||||
// One normally wouldn't do this.
|
||||
// This is an undeclared tag 13, as a varint (wire type 0) with value 4.
|
||||
XXX_unrecognized: []byte{13<<3 | 0, 4},
|
||||
}
|
||||
ext := &pb.Ext{
|
||||
Data: proto.String("Big gobs for big rats"),
|
||||
}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
greetings := []string{"adg", "easy", "cow"}
|
||||
if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Add an unknown extension. We marshal a pb.Ext, and fake the ID.
|
||||
b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...)
|
||||
proto.SetRawExtension(msg, 201, b)
|
||||
|
||||
// Extensions can be plain fields, too, so let's test that.
|
||||
b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19)
|
||||
proto.SetRawExtension(msg, 202, b)
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
const text = `count: 42
|
||||
name: "Dave"
|
||||
quote: "\"I didn't want to go.\""
|
||||
pet: "bunny"
|
||||
pet: "kitty"
|
||||
pet: "horsey"
|
||||
inner: <
|
||||
host: "footrest.syd"
|
||||
port: 7001
|
||||
connected: true
|
||||
>
|
||||
others: <
|
||||
key: 3735928559
|
||||
value: "\001A\007\014"
|
||||
>
|
||||
others: <
|
||||
weight: 6.022
|
||||
inner: <
|
||||
host: "lesha.mtv"
|
||||
port: 8002
|
||||
>
|
||||
>
|
||||
bikeshed: BLUE
|
||||
SomeGroup {
|
||||
group_field: 8
|
||||
}
|
||||
/* 2 unknown bytes */
|
||||
13: 4
|
||||
[testdata.Ext.more]: <
|
||||
data: "Big gobs for big rats"
|
||||
>
|
||||
[testdata.greeting]: "adg"
|
||||
[testdata.greeting]: "easy"
|
||||
[testdata.greeting]: "cow"
|
||||
/* 13 unknown bytes */
|
||||
201: "\t3G skiing"
|
||||
/* 3 unknown bytes */
|
||||
202: 19
|
||||
`
|
||||
|
||||
func TestMarshalText(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := proto.MarshalText(buf, newTestMessage()); err != nil {
|
||||
t.Fatalf("proto.MarshalText: %v", err)
|
||||
}
|
||||
s := buf.String()
|
||||
if s != text {
|
||||
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalTextCustomMessage(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := proto.MarshalText(buf, &textMessage{}); err != nil {
|
||||
t.Fatalf("proto.MarshalText: %v", err)
|
||||
}
|
||||
s := buf.String()
|
||||
if s != "custom" {
|
||||
t.Errorf("Got %q, expected %q", s, "custom")
|
||||
}
|
||||
}
|
||||
func TestMarshalTextNil(t *testing.T) {
|
||||
want := "<nil>"
|
||||
tests := []proto.Message{nil, (*pb.MyMessage)(nil)}
|
||||
for i, test := range tests {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := proto.MarshalText(buf, test); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := buf.String(); got != want {
|
||||
t.Errorf("%d: got %q want %q", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalTextUnknownEnum(t *testing.T) {
|
||||
// The Color enum only specifies values 0-2.
|
||||
m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()}
|
||||
got := m.String()
|
||||
const want = `bikeshed:3 `
|
||||
if got != want {
|
||||
t.Errorf("\n got %q\nwant %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTextOneof(t *testing.T) {
|
||||
tests := []struct {
|
||||
m proto.Message
|
||||
want string
|
||||
}{
|
||||
// zero message
|
||||
{&pb.Communique{}, ``},
|
||||
// scalar field
|
||||
{&pb.Communique{Union: &pb.Communique_Number{4}}, `number:4`},
|
||||
// message field
|
||||
{&pb.Communique{Union: &pb.Communique_Msg{
|
||||
&pb.Strings{StringField: proto.String("why hello!")},
|
||||
}}, `msg:<string_field:"why hello!" >`},
|
||||
// bad oneof (should not panic)
|
||||
{&pb.Communique{Union: &pb.Communique_Msg{nil}}, `msg:/* nil */`},
|
||||
}
|
||||
for _, test := range tests {
|
||||
got := strings.TrimSpace(test.m.String())
|
||||
if got != test.want {
|
||||
t.Errorf("\n got %s\nwant %s", got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalTextBuffered(b *testing.B) {
|
||||
buf := new(bytes.Buffer)
|
||||
m := newTestMessage()
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf.Reset()
|
||||
proto.MarshalText(buf, m)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalTextUnbuffered(b *testing.B) {
|
||||
w := ioutil.Discard
|
||||
m := newTestMessage()
|
||||
for i := 0; i < b.N; i++ {
|
||||
proto.MarshalText(w, m)
|
||||
}
|
||||
}
|
||||
|
||||
func compact(src string) string {
|
||||
// s/[ \n]+/ /g; s/ $//;
|
||||
dst := make([]byte, len(src))
|
||||
space, comment := false, false
|
||||
j := 0
|
||||
for i := 0; i < len(src); i++ {
|
||||
if strings.HasPrefix(src[i:], "/*") {
|
||||
comment = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if comment && strings.HasPrefix(src[i:], "*/") {
|
||||
comment = false
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if comment {
|
||||
continue
|
||||
}
|
||||
c := src[i]
|
||||
if c == ' ' || c == '\n' {
|
||||
space = true
|
||||
continue
|
||||
}
|
||||
if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') {
|
||||
space = false
|
||||
}
|
||||
if c == '{' {
|
||||
space = false
|
||||
}
|
||||
if space {
|
||||
dst[j] = ' '
|
||||
j++
|
||||
space = false
|
||||
}
|
||||
dst[j] = c
|
||||
j++
|
||||
}
|
||||
if space {
|
||||
dst[j] = ' '
|
||||
j++
|
||||
}
|
||||
return string(dst[0:j])
|
||||
}
|
||||
|
||||
var compactText = compact(text)
|
||||
|
||||
func TestCompactText(t *testing.T) {
|
||||
s := proto.CompactTextString(newTestMessage())
|
||||
if s != compactText {
|
||||
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringEscaping(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in *pb.Strings
|
||||
out string
|
||||
}{
|
||||
{
|
||||
// Test data from C++ test (TextFormatTest.StringEscape).
|
||||
// Single divergence: we don't escape apostrophes.
|
||||
&pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")},
|
||||
"string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n",
|
||||
},
|
||||
{
|
||||
// Test data from the same C++ test.
|
||||
&pb.Strings{StringField: proto.String("\350\260\267\346\255\214")},
|
||||
"string_field: \"\\350\\260\\267\\346\\255\\214\"\n",
|
||||
},
|
||||
{
|
||||
// Some UTF-8.
|
||||
&pb.Strings{StringField: proto.String("\x00\x01\xff\x81")},
|
||||
`string_field: "\000\001\377\201"` + "\n",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
var buf bytes.Buffer
|
||||
if err := proto.MarshalText(&buf, tc.in); err != nil {
|
||||
t.Errorf("proto.MarsalText: %v", err)
|
||||
continue
|
||||
}
|
||||
s := buf.String()
|
||||
if s != tc.out {
|
||||
t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check round-trip.
|
||||
pb := new(pb.Strings)
|
||||
if err := proto.UnmarshalText(s, pb); err != nil {
|
||||
t.Errorf("#%d: UnmarshalText: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if !proto.Equal(pb, tc.in) {
|
||||
t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A limitedWriter accepts some output before it fails.
|
||||
// This is a proxy for something like a nearly-full or imminently-failing disk,
|
||||
// or a network connection that is about to die.
|
||||
type limitedWriter struct {
|
||||
b bytes.Buffer
|
||||
limit int
|
||||
}
|
||||
|
||||
var outOfSpace = errors.New("proto: insufficient space")
|
||||
|
||||
func (w *limitedWriter) Write(p []byte) (n int, err error) {
|
||||
var avail = w.limit - w.b.Len()
|
||||
if avail <= 0 {
|
||||
return 0, outOfSpace
|
||||
}
|
||||
if len(p) <= avail {
|
||||
return w.b.Write(p)
|
||||
}
|
||||
n, _ = w.b.Write(p[:avail])
|
||||
return n, outOfSpace
|
||||
}
|
||||
|
||||
func TestMarshalTextFailing(t *testing.T) {
|
||||
// Try lots of different sizes to exercise more error code-paths.
|
||||
for lim := 0; lim < len(text); lim++ {
|
||||
buf := new(limitedWriter)
|
||||
buf.limit = lim
|
||||
err := proto.MarshalText(buf, newTestMessage())
|
||||
// We expect a certain error, but also some partial results in the buffer.
|
||||
if err != outOfSpace {
|
||||
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace)
|
||||
}
|
||||
s := buf.b.String()
|
||||
x := text[:buf.limit]
|
||||
if s != x {
|
||||
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFloats(t *testing.T) {
|
||||
tests := []struct {
|
||||
f float64
|
||||
want string
|
||||
}{
|
||||
{0, "0"},
|
||||
{4.7, "4.7"},
|
||||
{math.Inf(1), "inf"},
|
||||
{math.Inf(-1), "-inf"},
|
||||
{math.NaN(), "nan"},
|
||||
}
|
||||
for _, test := range tests {
|
||||
msg := &pb.FloatingPoint{F: &test.f}
|
||||
got := strings.TrimSpace(msg.String())
|
||||
want := `f:` + test.want
|
||||
if got != want {
|
||||
t.Errorf("f=%f: got %q, want %q", test.f, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeatedNilText(t *testing.T) {
|
||||
m := &pb.MessageList{
|
||||
Message: []*pb.MessageList_Message{
|
||||
nil,
|
||||
&pb.MessageList_Message{
|
||||
Name: proto.String("Horse"),
|
||||
},
|
||||
nil,
|
||||
},
|
||||
}
|
||||
want := `Message <nil>
|
||||
Message {
|
||||
name: "Horse"
|
||||
}
|
||||
Message <nil>
|
||||
`
|
||||
if s := proto.MarshalTextString(m); s != want {
|
||||
t.Errorf(" got: %s\nwant: %s", s, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProto3Text(t *testing.T) {
|
||||
tests := []struct {
|
||||
m proto.Message
|
||||
want string
|
||||
}{
|
||||
// zero message
|
||||
{&proto3pb.Message{}, ``},
|
||||
// zero message except for an empty byte slice
|
||||
{&proto3pb.Message{Data: []byte{}}, ``},
|
||||
// trivial case
|
||||
{&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`},
|
||||
// empty map
|
||||
{&pb.MessageWithMap{}, ``},
|
||||
// non-empty map; map format is the same as a repeated struct,
|
||||
// and they are sorted by key (numerically for numeric keys).
|
||||
{
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{
|
||||
-1: "Negatory",
|
||||
7: "Lucky",
|
||||
1234: "Feist",
|
||||
6345789: "Otis",
|
||||
}},
|
||||
`name_mapping:<key:-1 value:"Negatory" > ` +
|
||||
`name_mapping:<key:7 value:"Lucky" > ` +
|
||||
`name_mapping:<key:1234 value:"Feist" > ` +
|
||||
`name_mapping:<key:6345789 value:"Otis" >`,
|
||||
},
|
||||
// map with nil value; not well-defined, but we shouldn't crash
|
||||
{
|
||||
&pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}},
|
||||
`msg_mapping:<key:7 >`,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
got := strings.TrimSpace(test.m.String())
|
||||
if got != test.want {
|
||||
t.Errorf("\n got %s\nwant %s", got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
.idea
|
||||
testdata
|
||||
vendor/
|
|
@ -1,9 +0,0 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
- tip
|
||||
|
||||
install:
|
||||
- go get -u github.com/golang/dep/...
|
||||
- dep ensure -vendor-only
|
|
@ -1,21 +0,0 @@
|
|||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/boltdb/bolt"
|
||||
packages = ["."]
|
||||
revision = "2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8"
|
||||
version = "v1.3.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "568758fa97eac2946d6043bc72b7e728f802aeba2169c527f8fbad2222e74700"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
|
@ -1,3 +0,0 @@
|
|||
[[constraint]]
|
||||
name = "github.com/boltdb/bolt"
|
||||
version = "^1.0.0"
|
|
@ -1,53 +0,0 @@
|
|||
# Nuts - BoltDB Utilities
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/jmank88/nuts?status.svg)](https://godoc.org/github.com/jmank88/nuts) [![Go Report Card](https://goreportcard.com/badge/github.com/jmank88/nuts)](https://goreportcard.com/report/github.com/jmank88/nuts) [![Coverage Status](https://coveralls.io/repos/github/jmank88/nuts/badge.svg?branch=master)](https://coveralls.io/github/jmank88/nuts?branch=master)
|
||||
|
||||
A collection of [BoltDB](https://github.com/boltdb/bolt) utilities.
|
||||
|
||||
## Path Prefix Scans
|
||||
|
||||
The prefix scanning functions `SeekPathConflict` and `SeekPathMatch` facilitate maintenance and access to buckets of
|
||||
paths supporting *variable elements* with *exclusive matches*. Paths are `/` delimited, must begin with a `/`, and
|
||||
elements beginning with `:` or `*` are variable.
|
||||
|
||||
Examples:
|
||||
|
||||
```
|
||||
/
|
||||
/blogs/
|
||||
/blogs/:blog_id
|
||||
```
|
||||
|
||||
### Variable Elements
|
||||
|
||||
Path elements beginning with a `:` match any single element. Path elements beginning with `*` match any remaining
|
||||
elements, and therefore must be last.
|
||||
|
||||
Examples:
|
||||
|
||||
```
|
||||
Path: /blogs/:blog_id
|
||||
Match: /blogs/someblog
|
||||
```
|
||||
|
||||
```
|
||||
Path: /blogs/:blog_id/comments/:comment_id/*suffix
|
||||
Match: /blogs/42/comments/100/edit
|
||||
```
|
||||
|
||||
### Exclusive Matches
|
||||
|
||||
Using `SeekPathConflict` before putting new paths to ensure the bucket remains conflict-free guarantees that `SeekPathMatch`
|
||||
will never match more than one path.
|
||||
|
||||
Examples:
|
||||
|
||||
```
|
||||
Conflicts: `/blogs/:blog_id`, `/blogs/golang`
|
||||
Match: `/blogs/golang`
|
||||
```
|
||||
|
||||
```
|
||||
Conflicts: `/blogs/*`, `/blogs/:blog_id/comments`
|
||||
Match: `/blogs/42/comments`
|
||||
```
|
|
@ -1,22 +0,0 @@
|
|||
package nuts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func ExampleKey_UUID() {
|
||||
type uuid struct{ a, b uint64 }
|
||||
|
||||
u := uuid{
|
||||
a: 0xaaaaaaaaaaaaaaaa,
|
||||
b: 0xbbbbbbbbbbbbbbbb,
|
||||
}
|
||||
|
||||
key := make(Key, 16)
|
||||
key[:8].Put(u.a)
|
||||
key[8:].Put(u.b)
|
||||
fmt.Printf("%#x", key)
|
||||
|
||||
// Output:
|
||||
// 0xaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbb
|
||||
}
|
|
@ -1,114 +0,0 @@
|
|||
package nuts
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestKeyLen(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
x uint64
|
||||
exp int
|
||||
}{
|
||||
{0, 1},
|
||||
{1, 1},
|
||||
{1 << 8, 2},
|
||||
{1 << 16, 3},
|
||||
{1 << 24, 4},
|
||||
{1 << 32, 5},
|
||||
{1 << 40, 6},
|
||||
{1 << 48, 7},
|
||||
{1 << 56, 8},
|
||||
} {
|
||||
got := KeyLen(test.x)
|
||||
if got != test.exp {
|
||||
t.Errorf("%d: expected length %d but got %d", test.x, test.exp, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKey(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
max int
|
||||
xs []uint64
|
||||
bs [][]byte
|
||||
}{
|
||||
{
|
||||
max: 1 << 7,
|
||||
xs: []uint64{0, 1, (1 << 8) - 1},
|
||||
bs: [][]byte{
|
||||
{0x00}, {0x01}, {0xFF},
|
||||
},
|
||||
},
|
||||
{
|
||||
max: 1 << 15,
|
||||
xs: []uint64{0, 1, (1 << 16) - 1},
|
||||
bs: [][]byte{
|
||||
{0x00, 0x00}, {0x00, 0x01}, {0xFF, 0xFF},
|
||||
},
|
||||
},
|
||||
{
|
||||
max: 1 << 23,
|
||||
xs: []uint64{0, 1, (1 << 24) - 1},
|
||||
bs: [][]byte{
|
||||
{0x00, 0x00, 0x00}, {0x00, 0x00, 0x01}, {0xFF, 0xFF, 0xFF},
|
||||
},
|
||||
},
|
||||
{
|
||||
max: 1 << 31,
|
||||
xs: []uint64{0, 1, (1 << 32) - 1},
|
||||
bs: [][]byte{
|
||||
{0x00, 0x00, 0x00, 0x00},
|
||||
{0x00, 0x00, 0x00, 0x01},
|
||||
{0xFF, 0xFF, 0xFF, 0xFF},
|
||||
},
|
||||
},
|
||||
{
|
||||
max: 1 << 39,
|
||||
xs: []uint64{0, 1, (1 << 40) - 1},
|
||||
bs: [][]byte{
|
||||
{0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
{0x00, 0x00, 0x00, 0x00, 0x01},
|
||||
{0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
|
||||
},
|
||||
},
|
||||
{
|
||||
max: 1 << 47,
|
||||
xs: []uint64{0, 1, (1 << 48) - 1},
|
||||
bs: [][]byte{
|
||||
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
{0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
|
||||
{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
|
||||
},
|
||||
},
|
||||
{
|
||||
max: 1 << 55,
|
||||
xs: []uint64{0, 1, (1 << 56) - 1},
|
||||
bs: [][]byte{
|
||||
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
|
||||
{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
|
||||
},
|
||||
},
|
||||
{
|
||||
max: 1 << 60,
|
||||
xs: []uint64{0, 1, (1 << 60) - 1},
|
||||
bs: [][]byte{
|
||||
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
|
||||
{0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(strconv.Itoa(test.max), func(t *testing.T) {
|
||||
k := make(Key, KeyLen(uint64(test.max)))
|
||||
for i, x := range test.xs {
|
||||
k.Put(x)
|
||||
if !bytes.Equal(k, test.bs[i]) {
|
||||
t.Errorf("unexpected serialized integer %d:\n\t(GOT): %#x\n\t(WNT): %#x", x, k, test.bs[i])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,338 +0,0 @@
|
|||
//go:generate rm -rf testdata
|
||||
//
|
||||
//go:generate go run cmd/testpaths/main.go testdata standard 10 100 1000 10000 100000 1000000
|
||||
//go:generate go run cmd/testpaths/main.go testdata segmentCount 1 5 10 50 100
|
||||
//go:generate go run cmd/testpaths/main.go testdata branchFactor 1 5 10 50 100 500 1000 5000 10000
|
||||
//go:generate go run cmd/testpaths/main.go testdata segmentSize 1 5 10 50 100 200
|
||||
//
|
||||
//go:generate go run cmd/testdb/main.go testdata
|
||||
package nuts
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
var bucketName = []byte("testBucket")
|
||||
|
||||
func exDB(f func(db *bolt.DB)) {
|
||||
tmp := tempfile()
|
||||
defer os.Remove(tmp)
|
||||
db, err := bolt.Open(tmp, 0666, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
f(db)
|
||||
}
|
||||
|
||||
func ExampleSeekPathMatch() {
|
||||
exDB(func(db *bolt.DB) {
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Put a variable path.
|
||||
return b.Put([]byte("/blogs/:blog_id/comments/:comment_id"), []byte{})
|
||||
}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket(bucketName)
|
||||
|
||||
// Match path.
|
||||
path, _ := SeekPathMatch(b.Cursor(), []byte("/blogs/asdf/comments/42"))
|
||||
fmt.Println(string(path))
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Output: /blogs/:blog_id/comments/:comment_id
|
||||
}
|
||||
|
||||
func ExampleSeekPathConflict() {
|
||||
exDB(func(db *bolt.DB) {
|
||||
insert := func(path string) {
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for conflicts.
|
||||
if k, _ := SeekPathConflict(b.Cursor(), []byte(path)); k != nil {
|
||||
fmt.Printf("Put(%s) blocked - conflict: %s\n", path, string(k))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put.
|
||||
if err := b.Put([]byte(path), []byte{}); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("Put(%s)\n", path)
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
// Put
|
||||
insert("/blogs/")
|
||||
// Put
|
||||
insert("/blogs/:blog_id")
|
||||
// Conflict
|
||||
insert("/blogs/a_blog")
|
||||
})
|
||||
|
||||
// Output:
|
||||
// Put(/blogs/)
|
||||
// Put(/blogs/:blog_id)
|
||||
// Put(/blogs/a_blog) blocked - conflict: /blogs/:blog_id
|
||||
}
|
||||
|
||||
var matchTests = []struct {
|
||||
path string
|
||||
matches []string
|
||||
}{
|
||||
{`/blogs`, []string{`/blogs`}},
|
||||
{`/blogs/`, []string{`/blogs/`}},
|
||||
{`/blogs/:blog_id`, []string{`/blogs/123`}},
|
||||
{`/blogs/:blog_id/comments`, []string{`/blogs/123/comments`}},
|
||||
{`/blogs/:blog_id/comments/`, []string{`/blogs/123/comments/`}},
|
||||
{`/blogs/:blog_id/comments/:comment_id`, []string{`/blogs/123/comments/456`}},
|
||||
{`/blogs/:blog_id/comments/:comment_id/*suffix`,
|
||||
[]string{`/blogs/123/comments/456/test`, `/blogs/123/comments/456/test/test`}},
|
||||
}
|
||||
|
||||
func TestMatchPath(t *testing.T) {
|
||||
testDB(t, func(db *bolt.DB) {
|
||||
bucketName := []byte("testBucket")
|
||||
|
||||
// Setup - Put all paths
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, test := range matchTests {
|
||||
err := b.Put([]byte(test.path), []byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal("failed to insert paths:", err)
|
||||
}
|
||||
|
||||
// Test - Match each
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket(bucketName)
|
||||
for _, test := range matchTests {
|
||||
for _, match := range test.matches {
|
||||
k, _ := SeekPathMatch(b.Cursor(), []byte(match))
|
||||
if k == nil {
|
||||
t.Errorf("expected %q to match %q but got none", match, test.path)
|
||||
} else if !bytes.Equal(k, []byte(test.path)) {
|
||||
t.Errorf("expected %q to match %q but got %q", match, test.path, string(k))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal("tests failed:", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestConflicts(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
path string
|
||||
conflicts []string
|
||||
}{
|
||||
{`/test/test`, []string{`/test/test`, `/:test`, `/*test`, `/test/:test`, `/test/*test`, `/:test/test`}},
|
||||
{`/:test`, []string{`/:tst`, `/test`, `/*test`}},
|
||||
{`/test/*test`, []string{`/test/*tst`, `/test/test`, `/test/:tst`, `/test/test/test`, `/test/test/:test`, `/test/test/*test`}},
|
||||
} {
|
||||
testDB(t, func(db *bolt.DB) {
|
||||
// Setup - Put path
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return b.Put([]byte(test.path), []byte{})
|
||||
}); err != nil {
|
||||
t.Fatal("failed to insert path", err)
|
||||
}
|
||||
|
||||
// Test - Verify all conflicts
|
||||
if err := db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket(bucketName)
|
||||
for _, c := range test.conflicts {
|
||||
k, _ := SeekPathConflict(b.Cursor(), []byte(c))
|
||||
kStr := string(k)
|
||||
if kStr != test.path {
|
||||
t.Errorf("expected %q to match %q but got %q", c, test.path, kStr)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal("failed to run tests", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Attempts to put all matchTests w/o conflict.
|
||||
func TestNoConflicts(t *testing.T) {
|
||||
testDB(t, func(db *bolt.DB) {
|
||||
bucketName := []byte("testBucket")
|
||||
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c := b.Cursor()
|
||||
for _, test := range matchTests {
|
||||
pathB := []byte(test.path)
|
||||
if k, _ := SeekPathConflict(c, pathB); k != nil {
|
||||
t.Errorf("unexpected conflict with %q: %s", test.path, string(k))
|
||||
}
|
||||
|
||||
if err := b.Put(pathB, []byte{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal("failed to insert paths:", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func Benchmark(b *testing.B) {
|
||||
b.Run("standard", forEachDB("standard", strings.NewReplacer(":", "", "*", "").Replace))
|
||||
b.Run("branchFactor", forEachDB("branchFactor", nil))
|
||||
b.Run("segmentCount", forEachDB("segmentCount", nil))
|
||||
b.Run("segmentSize", forEachDB("segmentSize", nil))
|
||||
}
|
||||
|
||||
func forEachDB(testname string, fn func(path string) string) func(*testing.B) {
|
||||
return func(b *testing.B) {
|
||||
dir := filepath.Join("testdata", testname)
|
||||
err := filepath.Walk(dir, func(testfile string, info os.FileInfo, err error) error {
|
||||
if !info.IsDir() && filepath.Ext(testfile) == ".db" {
|
||||
arg := strings.TrimSuffix(filepath.Base(testfile), ".db")
|
||||
|
||||
b.Run(arg, benchMatch(testfile, fn))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchMatch(testdb string, pathFn func(path string) string) func(b *testing.B) {
|
||||
return func(b *testing.B) {
|
||||
db, err := bolt.Open(testdb, 0666, nil)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to open database %s: %s", testdb, err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
testtxt := strings.TrimSuffix(testdb, ".db") + ".txt"
|
||||
f, err := os.Open(testtxt)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to open file %s: %s", testtxt, err)
|
||||
}
|
||||
|
||||
var paths [][]byte
|
||||
func() {
|
||||
defer f.Close()
|
||||
|
||||
// Use default, ScanLines
|
||||
s := bufio.NewScanner(f)
|
||||
|
||||
paths = make([][]byte, 0, b.N)
|
||||
for s.Scan() {
|
||||
if pathFn == nil {
|
||||
paths = append(paths, s.Bytes())
|
||||
} else {
|
||||
paths = append(paths, []byte(pathFn(s.Text())))
|
||||
}
|
||||
|
||||
if len(paths) == cap(paths) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if s.Err() != nil {
|
||||
b.Fatal("failed to read text paths:", s.Err())
|
||||
}
|
||||
}()
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
lookup := func(path []byte) error {
|
||||
return db.View(func(tx *bolt.Tx) error {
|
||||
bk := tx.Bucket([]byte("paths"))
|
||||
k, _ := SeekPathMatch(bk.Cursor(), path)
|
||||
if k == nil {
|
||||
return errors.New("no match found")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
path := paths[i%len(paths)]
|
||||
|
||||
if err := lookup(path); err != nil {
|
||||
b.Fatalf("failed to match %q: %s", string(path), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testDB(t *testing.T, f func(db *bolt.DB)) {
|
||||
tmp := tempfile()
|
||||
defer os.Remove(tmp)
|
||||
db, err := bolt.Open(tmp, 0666, nil)
|
||||
if err != nil {
|
||||
t.Fatal("failed to open db:", err)
|
||||
}
|
||||
defer db.Close()
|
||||
f(db)
|
||||
}
|
||||
|
||||
func tempfile() string {
|
||||
f, err := ioutil.TempFile("", "nuts-bolt-")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := os.Remove(f.Name()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return f.Name()
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# popular temporaries
|
||||
.err
|
||||
.out
|
||||
.diff
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
|
@ -1,3 +0,0 @@
|
|||
[submodule "git-hooks"]
|
||||
path = git-hooks
|
||||
url = https://github.com/nightlyone/git-hooks
|
|
@ -1,14 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.4.3
|
||||
- 1.6.2
|
||||
- tip
|
||||
|
||||
# Only test commits to production branch and all pull requests
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
|
@ -1,52 +0,0 @@
|
|||
lockfile
|
||||
=========
|
||||
Handle locking via pid files.
|
||||
|
||||
[![Build Status Unix][1]][2]
|
||||
[![Build status Windows][3]][4]
|
||||
|
||||
[1]: https://secure.travis-ci.org/nightlyone/lockfile.png
|
||||
[2]: https://travis-ci.org/nightlyone/lockfile
|
||||
[3]: https://ci.appveyor.com/api/projects/status/7mojkmauj81uvp8u/branch/master?svg=true
|
||||
[4]: https://ci.appveyor.com/project/nightlyone/lockfile/branch/master
|
||||
|
||||
|
||||
|
||||
install
|
||||
-------
|
||||
Install [Go 1][5], either [from source][6] or [with a prepackaged binary][7].
|
||||
For Windows suport, Go 1.4 or newer is required.
|
||||
|
||||
Then run
|
||||
|
||||
go get github.com/nightlyone/lockfile
|
||||
|
||||
[5]: http://golang.org
|
||||
[6]: http://golang.org/doc/install/source
|
||||
[7]: http://golang.org/doc/install
|
||||
|
||||
LICENSE
|
||||
-------
|
||||
MIT
|
||||
|
||||
documentation
|
||||
-------------
|
||||
[package documentation at godoc.org](http://godoc.org/github.com/nightlyone/lockfile)
|
||||
|
||||
install
|
||||
-------------------
|
||||
go get github.com/nightlyone/lockfile
|
||||
|
||||
|
||||
contributing
|
||||
============
|
||||
|
||||
Contributions are welcome. Please open an issue or send me a pull request for a dedicated branch.
|
||||
Make sure the git commit hooks show it works.
|
||||
|
||||
git commit hooks
|
||||
-----------------------
|
||||
enable commit hooks via
|
||||
|
||||
cd .git ; rm -rf hooks; ln -s ../git-hooks hooks ; cd ..
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
clone_folder: c:\gopath\src\github.com\nightlyone\lockfile
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
- go version
|
||||
- go env
|
||||
- go get -v -t ./...
|
||||
|
||||
build_script:
|
||||
- go test -v ./...
|
|
@ -1,308 +0,0 @@
|
|||
package lockfile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func ExampleLockfile() {
|
||||
lock, err := New(filepath.Join(os.TempDir(), "lock.me.now.lck"))
|
||||
if err != nil {
|
||||
fmt.Printf("Cannot init lock. reason: %v", err)
|
||||
panic(err) // handle properly please!
|
||||
}
|
||||
err = lock.TryLock()
|
||||
|
||||
// Error handling is essential, as we only try to get the lock.
|
||||
if err != nil {
|
||||
fmt.Printf("Cannot lock %q, reason: %v", lock, err)
|
||||
panic(err) // handle properly please!
|
||||
}
|
||||
|
||||
defer lock.Unlock()
|
||||
|
||||
fmt.Println("Do stuff under lock")
|
||||
// Output: Do stuff under lock
|
||||
}
|
||||
|
||||
func TestBasicLockUnlock(t *testing.T) {
|
||||
path, err := filepath.Abs("test_lockfile.pid")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
lf, err := New(path)
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
fmt.Println("Error making lockfile: ", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = lf.TryLock()
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
fmt.Println("Error locking lockfile: ", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = lf.Unlock()
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
fmt.Println("Error unlocking lockfile: ", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func GetDeadPID() int {
|
||||
// I have no idea how windows handles large PIDs, or if they even exist.
|
||||
// So limit it to be less or equal to 4096 to be safe.
|
||||
|
||||
const maxPid = 4095
|
||||
|
||||
// limited iteration, so we finish one day
|
||||
seen := map[int]bool{}
|
||||
for len(seen) < maxPid {
|
||||
pid := rand.Intn(maxPid + 1) // see https://godoc.org/math/rand#Intn why
|
||||
if seen[pid] {
|
||||
continue
|
||||
}
|
||||
seen[pid] = true
|
||||
running, err := isRunning(pid)
|
||||
if err != nil {
|
||||
fmt.Println("Error checking PID: ", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !running {
|
||||
return pid
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("all pids lower %d are used, cannot test this", maxPid))
|
||||
}
|
||||
|
||||
func TestBusy(t *testing.T) {
|
||||
path, err := filepath.Abs("test_lockfile.pid")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
pid := os.Getppid()
|
||||
|
||||
if err := ioutil.WriteFile(path, []byte(strconv.Itoa(pid)+"\n"), 0666); err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
defer os.Remove(path)
|
||||
|
||||
lf, err := New(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
got := lf.TryLock()
|
||||
if got != ErrBusy {
|
||||
t.Fatalf("expected error %q, got %v", ErrBusy, got)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestRogueDeletion(t *testing.T) {
|
||||
path, err := filepath.Abs("test_lockfile.pid")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
lf, err := New(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
err = lf.TryLock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
err = os.Remove(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
got := lf.Unlock()
|
||||
if got != ErrRogueDeletion {
|
||||
t.Fatalf("unexpected error: %v", got)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestRogueDeletionDeadPid(t *testing.T) {
|
||||
path, err := filepath.Abs("test_lockfile.pid")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
lf, err := New(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
err = lf.TryLock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
pid := GetDeadPID()
|
||||
if err := ioutil.WriteFile(path, []byte(strconv.Itoa(pid)+"\n"), 0666); err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
defer os.Remove(path)
|
||||
|
||||
err = lf.Unlock()
|
||||
if err != ErrRogueDeletion {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
t.Fatal("lockfile should not be deleted by us, if we didn't create it")
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovesStaleLockOnDeadOwner(t *testing.T) {
|
||||
path, err := filepath.Abs("test_lockfile.pid")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
lf, err := New(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
pid := GetDeadPID()
|
||||
if err := ioutil.WriteFile(path, []byte(strconv.Itoa(pid)+"\n"), 0666); err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
err = lf.TryLock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := lf.Unlock(); err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidPidLeadToReplacedLockfileAndSuccess(t *testing.T) {
|
||||
path, err := filepath.Abs("test_lockfile.pid")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
if err := ioutil.WriteFile(path, []byte("\n"), 0666); err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
defer os.Remove(path)
|
||||
|
||||
lf, err := New(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := lf.TryLock(); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// now check if file exists and contains the correct content
|
||||
got, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
return
|
||||
}
|
||||
want := fmt.Sprintf("%d\n", os.Getpid())
|
||||
if string(got) != want {
|
||||
t.Fatalf("got %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScanPidLine(t *testing.T) {
|
||||
tests := [...]struct {
|
||||
input []byte
|
||||
pid int
|
||||
xfail error
|
||||
}{
|
||||
{
|
||||
xfail: ErrInvalidPid,
|
||||
},
|
||||
{
|
||||
input: []byte(""),
|
||||
xfail: ErrInvalidPid,
|
||||
},
|
||||
{
|
||||
input: []byte("\n"),
|
||||
xfail: ErrInvalidPid,
|
||||
},
|
||||
{
|
||||
input: []byte("-1\n"),
|
||||
xfail: ErrInvalidPid,
|
||||
},
|
||||
{
|
||||
input: []byte("0\n"),
|
||||
xfail: ErrInvalidPid,
|
||||
},
|
||||
{
|
||||
input: []byte("a\n"),
|
||||
xfail: ErrInvalidPid,
|
||||
},
|
||||
{
|
||||
input: []byte("1\n"),
|
||||
pid: 1,
|
||||
},
|
||||
}
|
||||
|
||||
// test positive cases first
|
||||
for step, tc := range tests {
|
||||
if tc.xfail != nil {
|
||||
continue
|
||||
}
|
||||
want := tc.pid
|
||||
got, err := scanPidLine(tc.input)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: unexpected error %v", step, err)
|
||||
}
|
||||
if got != want {
|
||||
t.Errorf("%d: expected pid %d, got %d", step, want, got)
|
||||
}
|
||||
}
|
||||
|
||||
// test negative cases now
|
||||
for step, tc := range tests {
|
||||
if tc.xfail == nil {
|
||||
continue
|
||||
}
|
||||
want := tc.xfail
|
||||
_, got := scanPidLine(tc.input)
|
||||
if got != want {
|
||||
t.Errorf("%d: expected error %v, got %v", step, want, got)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,2 +0,0 @@
|
|||
test_program/test_program_bin
|
||||
fuzz/
|
|
@ -1,22 +0,0 @@
|
|||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.8.4
|
||||
- 1.9.1
|
||||
- tip
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
fast_finish: true
|
||||
script:
|
||||
- if [ -n "$(go fmt ./...)" ]; then exit 1; fi
|
||||
- ./test.sh
|
||||
- ./benchmark.sh $TRAVIS_BRANCH https://github.com/$TRAVIS_REPO_SLUG.git
|
||||
before_install:
|
||||
- go get github.com/axw/gocov/gocov
|
||||
- go get github.com/mattn/goveralls
|
||||
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
|
||||
branches:
|
||||
only: [master]
|
||||
after_success:
|
||||
- $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=coverage.out -repotoken $COVERALLS_TOKEN
|
|
@ -1,131 +0,0 @@
|
|||
# go-toml
|
||||
|
||||
Go library for the [TOML](https://github.com/mojombo/toml) format.
|
||||
|
||||
This library supports TOML version
|
||||
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml)
|
||||
[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE)
|
||||
[![Build Status](https://travis-ci.org/pelletier/go-toml.svg?branch=master)](https://travis-ci.org/pelletier/go-toml)
|
||||
[![Coverage Status](https://coveralls.io/repos/github/pelletier/go-toml/badge.svg?branch=master)](https://coveralls.io/github/pelletier/go-toml?branch=master)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml)
|
||||
|
||||
## Features
|
||||
|
||||
Go-toml provides the following features for using data parsed from TOML documents:
|
||||
|
||||
* Load TOML documents from files and string data
|
||||
* Easily navigate TOML structure using Tree
|
||||
* Mashaling and unmarshaling to and from data structures
|
||||
* Line & column position data for all parsed elements
|
||||
* [Query support similar to JSON-Path](query/)
|
||||
* Syntax errors contain line and column numbers
|
||||
|
||||
## Import
|
||||
|
||||
```go
|
||||
import "github.com/pelletier/go-toml"
|
||||
```
|
||||
|
||||
## Usage example
|
||||
|
||||
Read a TOML document:
|
||||
|
||||
```go
|
||||
config, _ := toml.Load(`
|
||||
[postgres]
|
||||
user = "pelletier"
|
||||
password = "mypassword"`)
|
||||
// retrieve data directly
|
||||
user := config.Get("postgres.user").(string)
|
||||
|
||||
// or using an intermediate object
|
||||
postgresConfig := config.Get("postgres").(*toml.Tree)
|
||||
password := postgresConfig.Get("password").(string)
|
||||
```
|
||||
|
||||
Or use Unmarshal:
|
||||
|
||||
```go
|
||||
type Postgres struct {
|
||||
User string
|
||||
Password string
|
||||
}
|
||||
type Config struct {
|
||||
Postgres Postgres
|
||||
}
|
||||
|
||||
doc := []byte(`
|
||||
[Postgres]
|
||||
User = "pelletier"
|
||||
Password = "mypassword"`)
|
||||
|
||||
config := Config{}
|
||||
toml.Unmarshal(doc, &config)
|
||||
fmt.Println("user=", config.Postgres.User)
|
||||
```
|
||||
|
||||
Or use a query:
|
||||
|
||||
```go
|
||||
// use a query to gather elements without walking the tree
|
||||
q, _ := query.Compile("$..[user,password]")
|
||||
results := q.Execute(config)
|
||||
for ii, item := range results.Values() {
|
||||
fmt.Println("Query result %d: %v", ii, item)
|
||||
}
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation and additional examples are available at
|
||||
[godoc.org](http://godoc.org/github.com/pelletier/go-toml).
|
||||
|
||||
## Tools
|
||||
|
||||
Go-toml provides two handy command line tools:
|
||||
|
||||
* `tomll`: Reads TOML files and lint them.
|
||||
|
||||
```
|
||||
go install github.com/pelletier/go-toml/cmd/tomll
|
||||
tomll --help
|
||||
```
|
||||
* `tomljson`: Reads a TOML file and outputs its JSON representation.
|
||||
|
||||
```
|
||||
go install github.com/pelletier/go-toml/cmd/tomljson
|
||||
tomljson --help
|
||||
```
|
||||
|
||||
## Contribute
|
||||
|
||||
Feel free to report bugs and patches using GitHub's pull requests system on
|
||||
[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be
|
||||
much appreciated!
|
||||
|
||||
### Run tests
|
||||
|
||||
You have to make sure two kind of tests run:
|
||||
|
||||
1. The Go unit tests
|
||||
2. The TOML examples base
|
||||
|
||||
You can run both of them using `./test.sh`.
|
||||
|
||||
### Fuzzing
|
||||
|
||||
The script `./fuzz.sh` is available to
|
||||
run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml.
|
||||
|
||||
## Versioning
|
||||
|
||||
Go-toml follows [Semantic Versioning](http://semver.org/). The supported version
|
||||
of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of
|
||||
this document. The last two major versions of Go are supported
|
||||
(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)).
|
||||
|
||||
## License
|
||||
|
||||
The MIT License (MIT). Read [LICENSE](LICENSE).
|
|
@ -1,164 +0,0 @@
|
|||
{
|
||||
"array": {
|
||||
"key1": [
|
||||
1,
|
||||
2,
|
||||
3
|
||||
],
|
||||
"key2": [
|
||||
"red",
|
||||
"yellow",
|
||||
"green"
|
||||
],
|
||||
"key3": [
|
||||
[
|
||||
1,
|
||||
2
|
||||
],
|
||||
[
|
||||
3,
|
||||
4,
|
||||
5
|
||||
]
|
||||
],
|
||||
"key4": [
|
||||
[
|
||||
1,
|
||||
2
|
||||
],
|
||||
[
|
||||
"a",
|
||||
"b",
|
||||
"c"
|
||||
]
|
||||
],
|
||||
"key5": [
|
||||
1,
|
||||
2,
|
||||
3
|
||||
],
|
||||
"key6": [
|
||||
1,
|
||||
2
|
||||
]
|
||||
},
|
||||
"boolean": {
|
||||
"False": false,
|
||||
"True": true
|
||||
},
|
||||
"datetime": {
|
||||
"key1": "1979-05-27T07:32:00Z",
|
||||
"key2": "1979-05-27T00:32:00-07:00",
|
||||
"key3": "1979-05-27T00:32:00.999999-07:00"
|
||||
},
|
||||
"float": {
|
||||
"both": {
|
||||
"key": 6.626e-34
|
||||
},
|
||||
"exponent": {
|
||||
"key1": 5e+22,
|
||||
"key2": 1000000,
|
||||
"key3": -0.02
|
||||
},
|
||||
"fractional": {
|
||||
"key1": 1,
|
||||
"key2": 3.1415,
|
||||
"key3": -0.01
|
||||
},
|
||||
"underscores": {
|
||||
"key1": 9224617.445991227,
|
||||
"key2": 1e+100
|
||||
}
|
||||
},
|
||||
"fruit": [{
|
||||
"name": "apple",
|
||||
"physical": {
|
||||
"color": "red",
|
||||
"shape": "round"
|
||||
},
|
||||
"variety": [{
|
||||
"name": "red delicious"
|
||||
},
|
||||
{
|
||||
"name": "granny smith"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "banana",
|
||||
"variety": [{
|
||||
"name": "plantain"
|
||||
}]
|
||||
}
|
||||
],
|
||||
"integer": {
|
||||
"key1": 99,
|
||||
"key2": 42,
|
||||
"key3": 0,
|
||||
"key4": -17,
|
||||
"underscores": {
|
||||
"key1": 1000,
|
||||
"key2": 5349221,
|
||||
"key3": 12345
|
||||
}
|
||||
},
|
||||
"products": [{
|
||||
"name": "Hammer",
|
||||
"sku": 738594937
|
||||
},
|
||||
{},
|
||||
{
|
||||
"color": "gray",
|
||||
"name": "Nail",
|
||||
"sku": 284758393
|
||||
}
|
||||
],
|
||||
"string": {
|
||||
"basic": {
|
||||
"basic": "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF."
|
||||
},
|
||||
"literal": {
|
||||
"multiline": {
|
||||
"lines": "The first newline is\ntrimmed in raw strings.\n All other whitespace\n is preserved.\n",
|
||||
"regex2": "I [dw]on't need \\d{2} apples"
|
||||
},
|
||||
"quoted": "Tom \"Dubs\" Preston-Werner",
|
||||
"regex": "\u003c\\i\\c*\\s*\u003e",
|
||||
"winpath": "C:\\Users\\nodejs\\templates",
|
||||
"winpath2": "\\\\ServerX\\admin$\\system32\\"
|
||||
},
|
||||
"multiline": {
|
||||
"continued": {
|
||||
"key1": "The quick brown fox jumps over the lazy dog.",
|
||||
"key2": "The quick brown fox jumps over the lazy dog.",
|
||||
"key3": "The quick brown fox jumps over the lazy dog."
|
||||
},
|
||||
"key1": "One\nTwo",
|
||||
"key2": "One\nTwo",
|
||||
"key3": "One\nTwo"
|
||||
}
|
||||
},
|
||||
"table": {
|
||||
"inline": {
|
||||
"name": {
|
||||
"first": "Tom",
|
||||
"last": "Preston-Werner"
|
||||
},
|
||||
"point": {
|
||||
"x": 1,
|
||||
"y": 2
|
||||
}
|
||||
},
|
||||
"key": "value",
|
||||
"subtable": {
|
||||
"key": "another value"
|
||||
}
|
||||
},
|
||||
"x": {
|
||||
"y": {
|
||||
"z": {
|
||||
"w": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
reference_ref=${1:-master}
|
||||
reference_git=${2:-.}
|
||||
|
||||
if ! `hash benchstat 2>/dev/null`; then
|
||||
echo "Installing benchstat"
|
||||
go get golang.org/x/perf/cmd/benchstat
|
||||
go install golang.org/x/perf/cmd/benchstat
|
||||
fi
|
||||
|
||||
tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX`
|
||||
ref_tempdir="${tempdir}/ref"
|
||||
ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt"
|
||||
local_benchmark="`pwd`/benchmark-local.txt"
|
||||
|
||||
echo "=== ${reference_ref} (${ref_tempdir})"
|
||||
git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null
|
||||
pushd ${ref_tempdir} >/dev/null
|
||||
git checkout ${reference_ref} >/dev/null 2>/dev/null
|
||||
go test -bench=. -benchmem | tee ${ref_benchmark}
|
||||
popd >/dev/null
|
||||
|
||||
echo ""
|
||||
echo "=== local"
|
||||
go test -bench=. -benchmem | tee ${local_benchmark}
|
||||
|
||||
echo ""
|
||||
echo "=== diff"
|
||||
benchstat -delta-test=none ${ref_benchmark} ${local_benchmark}
|
|
@ -1,244 +0,0 @@
|
|||
################################################################################
|
||||
## Comment
|
||||
|
||||
# Speak your mind with the hash symbol. They go from the symbol to the end of
|
||||
# the line.
|
||||
|
||||
|
||||
################################################################################
|
||||
## Table
|
||||
|
||||
# Tables (also known as hash tables or dictionaries) are collections of
|
||||
# key/value pairs. They appear in square brackets on a line by themselves.
|
||||
|
||||
[table]
|
||||
|
||||
key = "value" # Yeah, you can do this.
|
||||
|
||||
# Nested tables are denoted by table names with dots in them. Name your tables
|
||||
# whatever crap you please, just don't use #, ., [ or ].
|
||||
|
||||
[table.subtable]
|
||||
|
||||
key = "another value"
|
||||
|
||||
# You don't need to specify all the super-tables if you don't want to. TOML
|
||||
# knows how to do it for you.
|
||||
|
||||
# [x] you
|
||||
# [x.y] don't
|
||||
# [x.y.z] need these
|
||||
[x.y.z.w] # for this to work
|
||||
|
||||
|
||||
################################################################################
|
||||
## Inline Table
|
||||
|
||||
# Inline tables provide a more compact syntax for expressing tables. They are
|
||||
# especially useful for grouped data that can otherwise quickly become verbose.
|
||||
# Inline tables are enclosed in curly braces `{` and `}`. No newlines are
|
||||
# allowed between the curly braces unless they are valid within a value.
|
||||
|
||||
[table.inline]
|
||||
|
||||
name = { first = "Tom", last = "Preston-Werner" }
|
||||
point = { x = 1, y = 2 }
|
||||
|
||||
|
||||
################################################################################
|
||||
## String
|
||||
|
||||
# There are four ways to express strings: basic, multi-line basic, literal, and
|
||||
# multi-line literal. All strings must contain only valid UTF-8 characters.
|
||||
|
||||
[string.basic]
|
||||
|
||||
basic = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF."
|
||||
|
||||
[string.multiline]
|
||||
|
||||
# The following strings are byte-for-byte equivalent:
|
||||
key1 = "One\nTwo"
|
||||
key2 = """One\nTwo"""
|
||||
key3 = """
|
||||
One
|
||||
Two"""
|
||||
|
||||
[string.multiline.continued]
|
||||
|
||||
# The following strings are byte-for-byte equivalent:
|
||||
key1 = "The quick brown fox jumps over the lazy dog."
|
||||
|
||||
key2 = """
|
||||
The quick brown \
|
||||
|
||||
|
||||
fox jumps over \
|
||||
the lazy dog."""
|
||||
|
||||
key3 = """\
|
||||
The quick brown \
|
||||
fox jumps over \
|
||||
the lazy dog.\
|
||||
"""
|
||||
|
||||
[string.literal]
|
||||
|
||||
# What you see is what you get.
|
||||
winpath = 'C:\Users\nodejs\templates'
|
||||
winpath2 = '\\ServerX\admin$\system32\'
|
||||
quoted = 'Tom "Dubs" Preston-Werner'
|
||||
regex = '<\i\c*\s*>'
|
||||
|
||||
|
||||
[string.literal.multiline]
|
||||
|
||||
regex2 = '''I [dw]on't need \d{2} apples'''
|
||||
lines = '''
|
||||
The first newline is
|
||||
trimmed in raw strings.
|
||||
All other whitespace
|
||||
is preserved.
|
||||
'''
|
||||
|
||||
|
||||
################################################################################
|
||||
## Integer
|
||||
|
||||
# Integers are whole numbers. Positive numbers may be prefixed with a plus sign.
|
||||
# Negative numbers are prefixed with a minus sign.
|
||||
|
||||
[integer]
|
||||
|
||||
key1 = +99
|
||||
key2 = 42
|
||||
key3 = 0
|
||||
key4 = -17
|
||||
|
||||
[integer.underscores]
|
||||
|
||||
# For large numbers, you may use underscores to enhance readability. Each
|
||||
# underscore must be surrounded by at least one digit.
|
||||
key1 = 1_000
|
||||
key2 = 5_349_221
|
||||
key3 = 1_2_3_4_5 # valid but inadvisable
|
||||
|
||||
|
||||
################################################################################
|
||||
## Float
|
||||
|
||||
# A float consists of an integer part (which may be prefixed with a plus or
|
||||
# minus sign) followed by a fractional part and/or an exponent part.
|
||||
|
||||
[float.fractional]
|
||||
|
||||
key1 = +1.0
|
||||
key2 = 3.1415
|
||||
key3 = -0.01
|
||||
|
||||
[float.exponent]
|
||||
|
||||
key1 = 5e+22
|
||||
key2 = 1e6
|
||||
key3 = -2E-2
|
||||
|
||||
[float.both]
|
||||
|
||||
key = 6.626e-34
|
||||
|
||||
[float.underscores]
|
||||
|
||||
key1 = 9_224_617.445_991_228_313
|
||||
key2 = 1e1_00
|
||||
|
||||
|
||||
################################################################################
|
||||
## Boolean
|
||||
|
||||
# Booleans are just the tokens you're used to. Always lowercase.
|
||||
|
||||
[boolean]
|
||||
|
||||
True = true
|
||||
False = false
|
||||
|
||||
|
||||
################################################################################
|
||||
## Datetime
|
||||
|
||||
# Datetimes are RFC 3339 dates.
|
||||
|
||||
[datetime]
|
||||
|
||||
key1 = 1979-05-27T07:32:00Z
|
||||
key2 = 1979-05-27T00:32:00-07:00
|
||||
key3 = 1979-05-27T00:32:00.999999-07:00
|
||||
|
||||
|
||||
################################################################################
|
||||
## Array
|
||||
|
||||
# Arrays are square brackets with other primitives inside. Whitespace is
|
||||
# ignored. Elements are separated by commas. Data types may not be mixed.
|
||||
|
||||
[array]
|
||||
|
||||
key1 = [ 1, 2, 3 ]
|
||||
key2 = [ "red", "yellow", "green" ]
|
||||
key3 = [ [ 1, 2 ], [3, 4, 5] ]
|
||||
#key4 = [ [ 1, 2 ], ["a", "b", "c"] ] # this is ok
|
||||
|
||||
# Arrays can also be multiline. So in addition to ignoring whitespace, arrays
|
||||
# also ignore newlines between the brackets. Terminating commas are ok before
|
||||
# the closing bracket.
|
||||
|
||||
key5 = [
|
||||
1, 2, 3
|
||||
]
|
||||
key6 = [
|
||||
1,
|
||||
2, # this is ok
|
||||
]
|
||||
|
||||
|
||||
################################################################################
|
||||
## Array of Tables
|
||||
|
||||
# These can be expressed by using a table name in double brackets. Each table
|
||||
# with the same double bracketed name will be an element in the array. The
|
||||
# tables are inserted in the order encountered.
|
||||
|
||||
[[products]]
|
||||
|
||||
name = "Hammer"
|
||||
sku = 738594937
|
||||
|
||||
[[products]]
|
||||
|
||||
[[products]]
|
||||
|
||||
name = "Nail"
|
||||
sku = 284758393
|
||||
color = "gray"
|
||||
|
||||
|
||||
# You can create nested arrays of tables as well.
|
||||
|
||||
[[fruit]]
|
||||
name = "apple"
|
||||
|
||||
[fruit.physical]
|
||||
color = "red"
|
||||
shape = "round"
|
||||
|
||||
[[fruit.variety]]
|
||||
name = "red delicious"
|
||||
|
||||
[[fruit.variety]]
|
||||
name = "granny smith"
|
||||
|
||||
[[fruit]]
|
||||
name = "banana"
|
||||
|
||||
[[fruit.variety]]
|
||||
name = "plantain"
|
|
@ -1,121 +0,0 @@
|
|||
---
|
||||
array:
|
||||
key1:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
key2:
|
||||
- red
|
||||
- yellow
|
||||
- green
|
||||
key3:
|
||||
- - 1
|
||||
- 2
|
||||
- - 3
|
||||
- 4
|
||||
- 5
|
||||
key4:
|
||||
- - 1
|
||||
- 2
|
||||
- - a
|
||||
- b
|
||||
- c
|
||||
key5:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
key6:
|
||||
- 1
|
||||
- 2
|
||||
boolean:
|
||||
'False': false
|
||||
'True': true
|
||||
datetime:
|
||||
key1: '1979-05-27T07:32:00Z'
|
||||
key2: '1979-05-27T00:32:00-07:00'
|
||||
key3: '1979-05-27T00:32:00.999999-07:00'
|
||||
float:
|
||||
both:
|
||||
key: 6.626e-34
|
||||
exponent:
|
||||
key1: 5.0e+22
|
||||
key2: 1000000
|
||||
key3: -0.02
|
||||
fractional:
|
||||
key1: 1
|
||||
key2: 3.1415
|
||||
key3: -0.01
|
||||
underscores:
|
||||
key1: 9224617.445991227
|
||||
key2: 1.0e+100
|
||||
fruit:
|
||||
- name: apple
|
||||
physical:
|
||||
color: red
|
||||
shape: round
|
||||
variety:
|
||||
- name: red delicious
|
||||
- name: granny smith
|
||||
- name: banana
|
||||
variety:
|
||||
- name: plantain
|
||||
integer:
|
||||
key1: 99
|
||||
key2: 42
|
||||
key3: 0
|
||||
key4: -17
|
||||
underscores:
|
||||
key1: 1000
|
||||
key2: 5349221
|
||||
key3: 12345
|
||||
products:
|
||||
- name: Hammer
|
||||
sku: 738594937
|
||||
- {}
|
||||
- color: gray
|
||||
name: Nail
|
||||
sku: 284758393
|
||||
string:
|
||||
basic:
|
||||
basic: "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF."
|
||||
literal:
|
||||
multiline:
|
||||
lines: |
|
||||
The first newline is
|
||||
trimmed in raw strings.
|
||||
All other whitespace
|
||||
is preserved.
|
||||
regex2: I [dw]on't need \d{2} apples
|
||||
quoted: Tom "Dubs" Preston-Werner
|
||||
regex: "<\\i\\c*\\s*>"
|
||||
winpath: C:\Users\nodejs\templates
|
||||
winpath2: "\\\\ServerX\\admin$\\system32\\"
|
||||
multiline:
|
||||
continued:
|
||||
key1: The quick brown fox jumps over the lazy dog.
|
||||
key2: The quick brown fox jumps over the lazy dog.
|
||||
key3: The quick brown fox jumps over the lazy dog.
|
||||
key1: |-
|
||||
One
|
||||
Two
|
||||
key2: |-
|
||||
One
|
||||
Two
|
||||
key3: |-
|
||||
One
|
||||
Two
|
||||
table:
|
||||
inline:
|
||||
name:
|
||||
first: Tom
|
||||
last: Preston-Werner
|
||||
point:
|
||||
x: 1
|
||||
y: 2
|
||||
key: value
|
||||
subtable:
|
||||
key: another value
|
||||
x:
|
||||
y:
|
||||
z:
|
||||
w: {}
|
|
@ -1,192 +0,0 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
burntsushi "github.com/BurntSushi/toml"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type benchmarkDoc struct {
|
||||
Table struct {
|
||||
Key string
|
||||
Subtable struct {
|
||||
Key string
|
||||
}
|
||||
Inline struct {
|
||||
Name struct {
|
||||
First string
|
||||
Last string
|
||||
}
|
||||
Point struct {
|
||||
X int64
|
||||
U int64
|
||||
}
|
||||
}
|
||||
}
|
||||
String struct {
|
||||
Basic struct {
|
||||
Basic string
|
||||
}
|
||||
Multiline struct {
|
||||
Key1 string
|
||||
Key2 string
|
||||
Key3 string
|
||||
Continued struct {
|
||||
Key1 string
|
||||
Key2 string
|
||||
Key3 string
|
||||
}
|
||||
}
|
||||
Literal struct {
|
||||
Winpath string
|
||||
Winpath2 string
|
||||
Quoted string
|
||||
Regex string
|
||||
Multiline struct {
|
||||
Regex2 string
|
||||
Lines string
|
||||
}
|
||||
}
|
||||
}
|
||||
Integer struct {
|
||||
Key1 int64
|
||||
Key2 int64
|
||||
Key3 int64
|
||||
Key4 int64
|
||||
Underscores struct {
|
||||
Key1 int64
|
||||
Key2 int64
|
||||
Key3 int64
|
||||
}
|
||||
}
|
||||
Float struct {
|
||||
Fractional struct {
|
||||
Key1 float64
|
||||
Key2 float64
|
||||
Key3 float64
|
||||
}
|
||||
Exponent struct {
|
||||
Key1 float64
|
||||
Key2 float64
|
||||
Key3 float64
|
||||
}
|
||||
Both struct {
|
||||
Key float64
|
||||
}
|
||||
Underscores struct {
|
||||
Key1 float64
|
||||
Key2 float64
|
||||
}
|
||||
}
|
||||
Boolean struct {
|
||||
True bool
|
||||
False bool
|
||||
}
|
||||
Datetime struct {
|
||||
Key1 time.Time
|
||||
Key2 time.Time
|
||||
Key3 time.Time
|
||||
}
|
||||
Array struct {
|
||||
Key1 []int64
|
||||
Key2 []string
|
||||
Key3 [][]int64
|
||||
// TODO: Key4 not supported by go-toml's Unmarshal
|
||||
Key5 []int64
|
||||
Key6 []int64
|
||||
}
|
||||
Products []struct {
|
||||
Name string
|
||||
Sku int64
|
||||
Color string
|
||||
}
|
||||
Fruit []struct {
|
||||
Name string
|
||||
Physical struct {
|
||||
Color string
|
||||
Shape string
|
||||
Variety []struct {
|
||||
Name string
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParseToml(b *testing.B) {
|
||||
fileBytes, err := ioutil.ReadFile("benchmark.toml")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := LoadReader(bytes.NewReader(fileBytes))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalToml(b *testing.B) {
|
||||
bytes, err := ioutil.ReadFile("benchmark.toml")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
target := benchmarkDoc{}
|
||||
err := Unmarshal(bytes, &target)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBurntSushiToml(b *testing.B) {
|
||||
bytes, err := ioutil.ReadFile("benchmark.toml")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
target := benchmarkDoc{}
|
||||
err := burntsushi.Unmarshal(bytes, &target)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalJson(b *testing.B) {
|
||||
bytes, err := ioutil.ReadFile("benchmark.json")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
target := benchmarkDoc{}
|
||||
err := json.Unmarshal(bytes, &target)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalYaml(b *testing.B) {
|
||||
bytes, err := ioutil.ReadFile("benchmark.yml")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
target := benchmarkDoc{}
|
||||
err := yaml.Unmarshal(bytes, &target)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,105 +0,0 @@
|
|||
// code examples for godoc
|
||||
|
||||
package toml_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
toml "github.com/pelletier/go-toml"
|
||||
)
|
||||
|
||||
func Example_tree() {
|
||||
config, err := toml.LoadFile("config.toml")
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("Error ", err.Error())
|
||||
} else {
|
||||
// retrieve data directly
|
||||
user := config.Get("postgres.user").(string)
|
||||
password := config.Get("postgres.password").(string)
|
||||
|
||||
// or using an intermediate object
|
||||
configTree := config.Get("postgres").(*toml.Tree)
|
||||
user = configTree.Get("user").(string)
|
||||
password = configTree.Get("password").(string)
|
||||
fmt.Println("User is", user, " and password is", password)
|
||||
|
||||
// show where elements are in the file
|
||||
fmt.Printf("User position: %v\n", configTree.GetPosition("user"))
|
||||
fmt.Printf("Password position: %v\n", configTree.GetPosition("password"))
|
||||
}
|
||||
}
|
||||
|
||||
func Example_unmarshal() {
|
||||
type Employer struct {
|
||||
Name string
|
||||
Phone string
|
||||
}
|
||||
type Person struct {
|
||||
Name string
|
||||
Age int64
|
||||
Employer Employer
|
||||
}
|
||||
|
||||
document := []byte(`
|
||||
name = "John"
|
||||
age = 30
|
||||
[employer]
|
||||
name = "Company Inc."
|
||||
phone = "+1 234 567 89012"
|
||||
`)
|
||||
|
||||
person := Person{}
|
||||
toml.Unmarshal(document, &person)
|
||||
fmt.Println(person.Name, "is", person.Age, "and works at", person.Employer.Name)
|
||||
// Output:
|
||||
// John is 30 and works at Company Inc.
|
||||
}
|
||||
|
||||
func ExampleMarshal() {
|
||||
type Postgres struct {
|
||||
User string `toml:"user"`
|
||||
Password string `toml:"password"`
|
||||
Database string `toml:"db" commented:"true" comment:"not used anymore"`
|
||||
}
|
||||
type Config struct {
|
||||
Postgres Postgres `toml:"postgres" comment:"Postgres configuration"`
|
||||
}
|
||||
|
||||
config := Config{Postgres{User: "pelletier", Password: "mypassword", Database: "old_database"}}
|
||||
b, err := toml.Marshal(config)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Println(string(b))
|
||||
// Output:
|
||||
// # Postgres configuration
|
||||
// [postgres]
|
||||
//
|
||||
// # not used anymore
|
||||
// # db = "old_database"
|
||||
// password = "mypassword"
|
||||
// user = "pelletier"
|
||||
}
|
||||
|
||||
func ExampleUnmarshal() {
|
||||
type Postgres struct {
|
||||
User string
|
||||
Password string
|
||||
}
|
||||
type Config struct {
|
||||
Postgres Postgres
|
||||
}
|
||||
|
||||
doc := []byte(`
|
||||
[postgres]
|
||||
user = "pelletier"
|
||||
password = "mypassword"`)
|
||||
|
||||
config := Config{}
|
||||
toml.Unmarshal(doc, &config)
|
||||
fmt.Println("user=", config.Postgres.User)
|
||||
// Output:
|
||||
// user= pelletier
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
|
@ -1,29 +0,0 @@
|
|||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
|
@ -1,15 +0,0 @@
|
|||
#! /bin/sh
|
||||
set -eu
|
||||
|
||||
go get github.com/dvyukov/go-fuzz/go-fuzz
|
||||
go get github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
if [ ! -e toml-fuzz.zip ]; then
|
||||
go-fuzz-build github.com/pelletier/go-toml
|
||||
fi
|
||||
|
||||
rm -fr fuzz
|
||||
mkdir -p fuzz/corpus
|
||||
cp *.toml fuzz/corpus
|
||||
|
||||
go-fuzz -bin=toml-fuzz.zip -workdir=fuzz
|
|
@ -1,70 +0,0 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testResult(t *testing.T, key string, expected []string) {
|
||||
parsed, err := parseKey(key)
|
||||
t.Logf("key=%s expected=%s parsed=%s", key, expected, parsed)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
if len(expected) != len(parsed) {
|
||||
t.Fatal("Expected length", len(expected), "but", len(parsed), "parsed")
|
||||
}
|
||||
for index, expectedKey := range expected {
|
||||
if expectedKey != parsed[index] {
|
||||
t.Fatal("Expected", expectedKey, "at index", index, "but found", parsed[index])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testError(t *testing.T, key string, expectedError string) {
|
||||
res, err := parseKey(key)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error, but succesfully parsed key %s", res)
|
||||
}
|
||||
if fmt.Sprintf("%s", err) != expectedError {
|
||||
t.Fatalf("Expected error \"%s\", but got \"%s\".", expectedError, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBareKeyBasic(t *testing.T) {
|
||||
testResult(t, "test", []string{"test"})
|
||||
}
|
||||
|
||||
func TestBareKeyDotted(t *testing.T) {
|
||||
testResult(t, "this.is.a.key", []string{"this", "is", "a", "key"})
|
||||
}
|
||||
|
||||
func TestDottedKeyBasic(t *testing.T) {
|
||||
testResult(t, "\"a.dotted.key\"", []string{"a.dotted.key"})
|
||||
}
|
||||
|
||||
func TestBaseKeyPound(t *testing.T) {
|
||||
testError(t, "hello#world", "invalid bare character: #")
|
||||
}
|
||||
|
||||
func TestQuotedKeys(t *testing.T) {
|
||||
testResult(t, `hello."foo".bar`, []string{"hello", "foo", "bar"})
|
||||
testResult(t, `"hello!"`, []string{"hello!"})
|
||||
testResult(t, `"hello\tworld"`, []string{"hello\tworld"})
|
||||
testResult(t, `"\U0001F914"`, []string{"\U0001F914"})
|
||||
testResult(t, `"\u2764"`, []string{"\u2764"})
|
||||
|
||||
testResult(t, `hello.'foo'.bar`, []string{"hello", "foo", "bar"})
|
||||
testResult(t, `'hello!'`, []string{"hello!"})
|
||||
testResult(t, `'hello\tworld'`, []string{`hello\tworld`})
|
||||
|
||||
testError(t, `"\w"`, `invalid escape sequence \w`)
|
||||
testError(t, `"\`, `unfinished escape sequence`)
|
||||
testError(t, `"\t`, `mismatched quotes`)
|
||||
}
|
||||
|
||||
func TestEmptyKey(t *testing.T) {
|
||||
testError(t, "", "empty key")
|
||||
testError(t, " ", "empty key")
|
||||
testResult(t, `""`, []string{""})
|
||||
}
|
|
@ -1,750 +0,0 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testFlow(t *testing.T, input string, expectedFlow []token) {
|
||||
tokens := lexToml([]byte(input))
|
||||
if !reflect.DeepEqual(tokens, expectedFlow) {
|
||||
t.Fatal("Different flows. Expected\n", expectedFlow, "\nGot:\n", tokens)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidKeyGroup(t *testing.T) {
|
||||
testFlow(t, "[hello world]", []token{
|
||||
{Position{1, 1}, tokenLeftBracket, "["},
|
||||
{Position{1, 2}, tokenKeyGroup, "hello world"},
|
||||
{Position{1, 13}, tokenRightBracket, "]"},
|
||||
{Position{1, 14}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNestedQuotedUnicodeKeyGroup(t *testing.T) {
|
||||
testFlow(t, `[ j . "ʞ" . l ]`, []token{
|
||||
{Position{1, 1}, tokenLeftBracket, "["},
|
||||
{Position{1, 2}, tokenKeyGroup, ` j . "ʞ" . l `},
|
||||
{Position{1, 15}, tokenRightBracket, "]"},
|
||||
{Position{1, 16}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestUnclosedKeyGroup(t *testing.T) {
|
||||
testFlow(t, "[hello world", []token{
|
||||
{Position{1, 1}, tokenLeftBracket, "["},
|
||||
{Position{1, 2}, tokenError, "unclosed table key"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestComment(t *testing.T) {
|
||||
testFlow(t, "# blahblah", []token{
|
||||
{Position{1, 11}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestKeyGroupComment(t *testing.T) {
|
||||
testFlow(t, "[hello world] # blahblah", []token{
|
||||
{Position{1, 1}, tokenLeftBracket, "["},
|
||||
{Position{1, 2}, tokenKeyGroup, "hello world"},
|
||||
{Position{1, 13}, tokenRightBracket, "]"},
|
||||
{Position{1, 25}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestMultipleKeyGroupsComment(t *testing.T) {
|
||||
testFlow(t, "[hello world] # blahblah\n[test]", []token{
|
||||
{Position{1, 1}, tokenLeftBracket, "["},
|
||||
{Position{1, 2}, tokenKeyGroup, "hello world"},
|
||||
{Position{1, 13}, tokenRightBracket, "]"},
|
||||
{Position{2, 1}, tokenLeftBracket, "["},
|
||||
{Position{2, 2}, tokenKeyGroup, "test"},
|
||||
{Position{2, 6}, tokenRightBracket, "]"},
|
||||
{Position{2, 7}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestSimpleWindowsCRLF(t *testing.T) {
|
||||
testFlow(t, "a=4\r\nb=2", []token{
|
||||
{Position{1, 1}, tokenKey, "a"},
|
||||
{Position{1, 2}, tokenEqual, "="},
|
||||
{Position{1, 3}, tokenInteger, "4"},
|
||||
{Position{2, 1}, tokenKey, "b"},
|
||||
{Position{2, 2}, tokenEqual, "="},
|
||||
{Position{2, 3}, tokenInteger, "2"},
|
||||
{Position{2, 4}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestBasicKey(t *testing.T) {
|
||||
testFlow(t, "hello", []token{
|
||||
{Position{1, 1}, tokenKey, "hello"},
|
||||
{Position{1, 6}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestBasicKeyWithUnderscore(t *testing.T) {
|
||||
testFlow(t, "hello_hello", []token{
|
||||
{Position{1, 1}, tokenKey, "hello_hello"},
|
||||
{Position{1, 12}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestBasicKeyWithDash(t *testing.T) {
|
||||
testFlow(t, "hello-world", []token{
|
||||
{Position{1, 1}, tokenKey, "hello-world"},
|
||||
{Position{1, 12}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestBasicKeyWithUppercaseMix(t *testing.T) {
|
||||
testFlow(t, "helloHELLOHello", []token{
|
||||
{Position{1, 1}, tokenKey, "helloHELLOHello"},
|
||||
{Position{1, 16}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestBasicKeyWithInternationalCharacters(t *testing.T) {
|
||||
testFlow(t, "héllÖ", []token{
|
||||
{Position{1, 1}, tokenKey, "héllÖ"},
|
||||
{Position{1, 6}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestBasicKeyAndEqual(t *testing.T) {
|
||||
testFlow(t, "hello =", []token{
|
||||
{Position{1, 1}, tokenKey, "hello"},
|
||||
{Position{1, 7}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestKeyWithSharpAndEqual(t *testing.T) {
|
||||
testFlow(t, "key#name = 5", []token{
|
||||
{Position{1, 1}, tokenError, "keys cannot contain # character"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestKeyWithSymbolsAndEqual(t *testing.T) {
|
||||
testFlow(t, "~!@$^&*()_+-`1234567890[]\\|/?><.,;:' = 5", []token{
|
||||
{Position{1, 1}, tokenError, "keys cannot contain ~ character"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestKeyEqualStringEscape(t *testing.T) {
|
||||
testFlow(t, `foo = "hello\""`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenString, "hello\""},
|
||||
{Position{1, 16}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestKeyEqualStringUnfinished(t *testing.T) {
|
||||
testFlow(t, `foo = "bar`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenError, "unclosed string"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestKeyEqualString(t *testing.T) {
|
||||
testFlow(t, `foo = "bar"`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenString, "bar"},
|
||||
{Position{1, 12}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestKeyEqualTrue(t *testing.T) {
|
||||
testFlow(t, "foo = true", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenTrue, "true"},
|
||||
{Position{1, 11}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestKeyEqualFalse(t *testing.T) {
|
||||
testFlow(t, "foo = false", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenFalse, "false"},
|
||||
{Position{1, 12}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestArrayNestedString(t *testing.T) {
|
||||
testFlow(t, `a = [ ["hello", "world"] ]`, []token{
|
||||
{Position{1, 1}, tokenKey, "a"},
|
||||
{Position{1, 3}, tokenEqual, "="},
|
||||
{Position{1, 5}, tokenLeftBracket, "["},
|
||||
{Position{1, 7}, tokenLeftBracket, "["},
|
||||
{Position{1, 9}, tokenString, "hello"},
|
||||
{Position{1, 15}, tokenComma, ","},
|
||||
{Position{1, 18}, tokenString, "world"},
|
||||
{Position{1, 24}, tokenRightBracket, "]"},
|
||||
{Position{1, 26}, tokenRightBracket, "]"},
|
||||
{Position{1, 27}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestArrayNestedInts(t *testing.T) {
|
||||
testFlow(t, "a = [ [42, 21], [10] ]", []token{
|
||||
{Position{1, 1}, tokenKey, "a"},
|
||||
{Position{1, 3}, tokenEqual, "="},
|
||||
{Position{1, 5}, tokenLeftBracket, "["},
|
||||
{Position{1, 7}, tokenLeftBracket, "["},
|
||||
{Position{1, 8}, tokenInteger, "42"},
|
||||
{Position{1, 10}, tokenComma, ","},
|
||||
{Position{1, 12}, tokenInteger, "21"},
|
||||
{Position{1, 14}, tokenRightBracket, "]"},
|
||||
{Position{1, 15}, tokenComma, ","},
|
||||
{Position{1, 17}, tokenLeftBracket, "["},
|
||||
{Position{1, 18}, tokenInteger, "10"},
|
||||
{Position{1, 20}, tokenRightBracket, "]"},
|
||||
{Position{1, 22}, tokenRightBracket, "]"},
|
||||
{Position{1, 23}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestArrayInts(t *testing.T) {
|
||||
testFlow(t, "a = [ 42, 21, 10, ]", []token{
|
||||
{Position{1, 1}, tokenKey, "a"},
|
||||
{Position{1, 3}, tokenEqual, "="},
|
||||
{Position{1, 5}, tokenLeftBracket, "["},
|
||||
{Position{1, 7}, tokenInteger, "42"},
|
||||
{Position{1, 9}, tokenComma, ","},
|
||||
{Position{1, 11}, tokenInteger, "21"},
|
||||
{Position{1, 13}, tokenComma, ","},
|
||||
{Position{1, 15}, tokenInteger, "10"},
|
||||
{Position{1, 17}, tokenComma, ","},
|
||||
{Position{1, 19}, tokenRightBracket, "]"},
|
||||
{Position{1, 20}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestMultilineArrayComments(t *testing.T) {
|
||||
testFlow(t, "a = [1, # wow\n2, # such items\n3, # so array\n]", []token{
|
||||
{Position{1, 1}, tokenKey, "a"},
|
||||
{Position{1, 3}, tokenEqual, "="},
|
||||
{Position{1, 5}, tokenLeftBracket, "["},
|
||||
{Position{1, 6}, tokenInteger, "1"},
|
||||
{Position{1, 7}, tokenComma, ","},
|
||||
{Position{2, 1}, tokenInteger, "2"},
|
||||
{Position{2, 2}, tokenComma, ","},
|
||||
{Position{3, 1}, tokenInteger, "3"},
|
||||
{Position{3, 2}, tokenComma, ","},
|
||||
{Position{4, 1}, tokenRightBracket, "]"},
|
||||
{Position{4, 2}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNestedArraysComment(t *testing.T) {
|
||||
toml := `
|
||||
someArray = [
|
||||
# does not work
|
||||
["entry1"]
|
||||
]`
|
||||
testFlow(t, toml, []token{
|
||||
{Position{2, 1}, tokenKey, "someArray"},
|
||||
{Position{2, 11}, tokenEqual, "="},
|
||||
{Position{2, 13}, tokenLeftBracket, "["},
|
||||
{Position{4, 1}, tokenLeftBracket, "["},
|
||||
{Position{4, 3}, tokenString, "entry1"},
|
||||
{Position{4, 10}, tokenRightBracket, "]"},
|
||||
{Position{5, 1}, tokenRightBracket, "]"},
|
||||
{Position{5, 2}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestKeyEqualArrayBools(t *testing.T) {
|
||||
testFlow(t, "foo = [true, false, true]", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenLeftBracket, "["},
|
||||
{Position{1, 8}, tokenTrue, "true"},
|
||||
{Position{1, 12}, tokenComma, ","},
|
||||
{Position{1, 14}, tokenFalse, "false"},
|
||||
{Position{1, 19}, tokenComma, ","},
|
||||
{Position{1, 21}, tokenTrue, "true"},
|
||||
{Position{1, 25}, tokenRightBracket, "]"},
|
||||
{Position{1, 26}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestKeyEqualArrayBoolsWithComments(t *testing.T) {
|
||||
testFlow(t, "foo = [true, false, true] # YEAH", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenLeftBracket, "["},
|
||||
{Position{1, 8}, tokenTrue, "true"},
|
||||
{Position{1, 12}, tokenComma, ","},
|
||||
{Position{1, 14}, tokenFalse, "false"},
|
||||
{Position{1, 19}, tokenComma, ","},
|
||||
{Position{1, 21}, tokenTrue, "true"},
|
||||
{Position{1, 25}, tokenRightBracket, "]"},
|
||||
{Position{1, 33}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestDateRegexp(t *testing.T) {
|
||||
if dateRegexp.FindString("1979-05-27T07:32:00Z") == "" {
|
||||
t.Error("basic lexing")
|
||||
}
|
||||
if dateRegexp.FindString("1979-05-27T00:32:00-07:00") == "" {
|
||||
t.Error("offset lexing")
|
||||
}
|
||||
if dateRegexp.FindString("1979-05-27T00:32:00.999999-07:00") == "" {
|
||||
t.Error("nano precision lexing")
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeyEqualDate(t *testing.T) {
|
||||
testFlow(t, "foo = 1979-05-27T07:32:00Z", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenDate, "1979-05-27T07:32:00Z"},
|
||||
{Position{1, 27}, tokenEOF, ""},
|
||||
})
|
||||
testFlow(t, "foo = 1979-05-27T00:32:00-07:00", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenDate, "1979-05-27T00:32:00-07:00"},
|
||||
{Position{1, 32}, tokenEOF, ""},
|
||||
})
|
||||
testFlow(t, "foo = 1979-05-27T00:32:00.999999-07:00", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenDate, "1979-05-27T00:32:00.999999-07:00"},
|
||||
{Position{1, 39}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestFloatEndingWithDot(t *testing.T) {
|
||||
testFlow(t, "foo = 42.", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenError, "float cannot end with a dot"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestFloatWithTwoDots(t *testing.T) {
|
||||
testFlow(t, "foo = 4.2.", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenError, "cannot have two dots in one float"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestFloatWithExponent1(t *testing.T) {
|
||||
testFlow(t, "a = 5e+22", []token{
|
||||
{Position{1, 1}, tokenKey, "a"},
|
||||
{Position{1, 3}, tokenEqual, "="},
|
||||
{Position{1, 5}, tokenFloat, "5e+22"},
|
||||
{Position{1, 10}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestFloatWithExponent2(t *testing.T) {
|
||||
testFlow(t, "a = 5E+22", []token{
|
||||
{Position{1, 1}, tokenKey, "a"},
|
||||
{Position{1, 3}, tokenEqual, "="},
|
||||
{Position{1, 5}, tokenFloat, "5E+22"},
|
||||
{Position{1, 10}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestFloatWithExponent3(t *testing.T) {
|
||||
testFlow(t, "a = -5e+22", []token{
|
||||
{Position{1, 1}, tokenKey, "a"},
|
||||
{Position{1, 3}, tokenEqual, "="},
|
||||
{Position{1, 5}, tokenFloat, "-5e+22"},
|
||||
{Position{1, 11}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestFloatWithExponent4(t *testing.T) {
|
||||
testFlow(t, "a = -5e-22", []token{
|
||||
{Position{1, 1}, tokenKey, "a"},
|
||||
{Position{1, 3}, tokenEqual, "="},
|
||||
{Position{1, 5}, tokenFloat, "-5e-22"},
|
||||
{Position{1, 11}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestFloatWithExponent5(t *testing.T) {
|
||||
testFlow(t, "a = 6.626e-34", []token{
|
||||
{Position{1, 1}, tokenKey, "a"},
|
||||
{Position{1, 3}, tokenEqual, "="},
|
||||
{Position{1, 5}, tokenFloat, "6.626e-34"},
|
||||
{Position{1, 14}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestInvalidEsquapeSequence(t *testing.T) {
|
||||
testFlow(t, `foo = "\x"`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenError, "invalid escape sequence: \\x"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNestedArrays(t *testing.T) {
|
||||
testFlow(t, "foo = [[[]]]", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenLeftBracket, "["},
|
||||
{Position{1, 8}, tokenLeftBracket, "["},
|
||||
{Position{1, 9}, tokenLeftBracket, "["},
|
||||
{Position{1, 10}, tokenRightBracket, "]"},
|
||||
{Position{1, 11}, tokenRightBracket, "]"},
|
||||
{Position{1, 12}, tokenRightBracket, "]"},
|
||||
{Position{1, 13}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestKeyEqualNumber(t *testing.T) {
|
||||
testFlow(t, "foo = 42", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenInteger, "42"},
|
||||
{Position{1, 9}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, "foo = +42", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenInteger, "+42"},
|
||||
{Position{1, 10}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, "foo = -42", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenInteger, "-42"},
|
||||
{Position{1, 10}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, "foo = 4.2", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenFloat, "4.2"},
|
||||
{Position{1, 10}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, "foo = +4.2", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenFloat, "+4.2"},
|
||||
{Position{1, 11}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, "foo = -4.2", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenFloat, "-4.2"},
|
||||
{Position{1, 11}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, "foo = 1_000", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenInteger, "1_000"},
|
||||
{Position{1, 12}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, "foo = 5_349_221", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenInteger, "5_349_221"},
|
||||
{Position{1, 16}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, "foo = 1_2_3_4_5", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenInteger, "1_2_3_4_5"},
|
||||
{Position{1, 16}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, "flt8 = 9_224_617.445_991_228_313", []token{
|
||||
{Position{1, 1}, tokenKey, "flt8"},
|
||||
{Position{1, 6}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenFloat, "9_224_617.445_991_228_313"},
|
||||
{Position{1, 33}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, "foo = +", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenError, "no digit in that number"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestMultiline(t *testing.T) {
|
||||
testFlow(t, "foo = 42\nbar=21", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 7}, tokenInteger, "42"},
|
||||
{Position{2, 1}, tokenKey, "bar"},
|
||||
{Position{2, 4}, tokenEqual, "="},
|
||||
{Position{2, 5}, tokenInteger, "21"},
|
||||
{Position{2, 7}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestKeyEqualStringUnicodeEscape(t *testing.T) {
|
||||
testFlow(t, `foo = "hello \u2665"`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenString, "hello ♥"},
|
||||
{Position{1, 21}, tokenEOF, ""},
|
||||
})
|
||||
testFlow(t, `foo = "hello \U000003B4"`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenString, "hello δ"},
|
||||
{Position{1, 25}, tokenEOF, ""},
|
||||
})
|
||||
testFlow(t, `foo = "\uabcd"`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenString, "\uabcd"},
|
||||
{Position{1, 15}, tokenEOF, ""},
|
||||
})
|
||||
testFlow(t, `foo = "\uABCD"`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenString, "\uABCD"},
|
||||
{Position{1, 15}, tokenEOF, ""},
|
||||
})
|
||||
testFlow(t, `foo = "\U000bcdef"`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenString, "\U000bcdef"},
|
||||
{Position{1, 19}, tokenEOF, ""},
|
||||
})
|
||||
testFlow(t, `foo = "\U000BCDEF"`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenString, "\U000BCDEF"},
|
||||
{Position{1, 19}, tokenEOF, ""},
|
||||
})
|
||||
testFlow(t, `foo = "\u2"`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenError, "unfinished unicode escape"},
|
||||
})
|
||||
testFlow(t, `foo = "\U2"`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenError, "unfinished unicode escape"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestKeyEqualStringNoEscape(t *testing.T) {
|
||||
testFlow(t, "foo = \"hello \u0002\"", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenError, "unescaped control character U+0002"},
|
||||
})
|
||||
testFlow(t, "foo = \"hello \u001F\"", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenError, "unescaped control character U+001F"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestLiteralString(t *testing.T) {
|
||||
testFlow(t, `foo = 'C:\Users\nodejs\templates'`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenString, `C:\Users\nodejs\templates`},
|
||||
{Position{1, 34}, tokenEOF, ""},
|
||||
})
|
||||
testFlow(t, `foo = '\\ServerX\admin$\system32\'`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenString, `\\ServerX\admin$\system32\`},
|
||||
{Position{1, 35}, tokenEOF, ""},
|
||||
})
|
||||
testFlow(t, `foo = 'Tom "Dubs" Preston-Werner'`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenString, `Tom "Dubs" Preston-Werner`},
|
||||
{Position{1, 34}, tokenEOF, ""},
|
||||
})
|
||||
testFlow(t, `foo = '<\i\c*\s*>'`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenString, `<\i\c*\s*>`},
|
||||
{Position{1, 19}, tokenEOF, ""},
|
||||
})
|
||||
testFlow(t, `foo = 'C:\Users\nodejs\unfinis`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenError, "unclosed string"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestMultilineLiteralString(t *testing.T) {
|
||||
testFlow(t, `foo = '''hello 'literal' world'''`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 10}, tokenString, `hello 'literal' world`},
|
||||
{Position{1, 34}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, "foo = '''\nhello\n'literal'\nworld'''", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{2, 1}, tokenString, "hello\n'literal'\nworld"},
|
||||
{Position{4, 9}, tokenEOF, ""},
|
||||
})
|
||||
testFlow(t, "foo = '''\r\nhello\r\n'literal'\r\nworld'''", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{2, 1}, tokenString, "hello\r\n'literal'\r\nworld"},
|
||||
{Position{4, 9}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestMultilineString(t *testing.T) {
|
||||
testFlow(t, `foo = """hello "literal" world"""`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 10}, tokenString, `hello "literal" world`},
|
||||
{Position{1, 34}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, "foo = \"\"\"\r\nhello\\\r\n\"literal\"\\\nworld\"\"\"", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{2, 1}, tokenString, "hello\"literal\"world"},
|
||||
{Position{4, 9}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, "foo = \"\"\"\\\n \\\n \\\n hello\\\nmultiline\\\nworld\"\"\"", []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 10}, tokenString, "hellomultilineworld"},
|
||||
{Position{6, 9}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, "key2 = \"\"\"\nThe quick brown \\\n\n\n fox jumps over \\\n the lazy dog.\"\"\"", []token{
|
||||
{Position{1, 1}, tokenKey, "key2"},
|
||||
{Position{1, 6}, tokenEqual, "="},
|
||||
{Position{2, 1}, tokenString, "The quick brown fox jumps over the lazy dog."},
|
||||
{Position{6, 21}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, "key2 = \"\"\"\\\n The quick brown \\\n fox jumps over \\\n the lazy dog.\\\n \"\"\"", []token{
|
||||
{Position{1, 1}, tokenKey, "key2"},
|
||||
{Position{1, 6}, tokenEqual, "="},
|
||||
{Position{1, 11}, tokenString, "The quick brown fox jumps over the lazy dog."},
|
||||
{Position{5, 11}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, `key2 = "Roses are red\nViolets are blue"`, []token{
|
||||
{Position{1, 1}, tokenKey, "key2"},
|
||||
{Position{1, 6}, tokenEqual, "="},
|
||||
{Position{1, 9}, tokenString, "Roses are red\nViolets are blue"},
|
||||
{Position{1, 41}, tokenEOF, ""},
|
||||
})
|
||||
|
||||
testFlow(t, "key2 = \"\"\"\nRoses are red\nViolets are blue\"\"\"", []token{
|
||||
{Position{1, 1}, tokenKey, "key2"},
|
||||
{Position{1, 6}, tokenEqual, "="},
|
||||
{Position{2, 1}, tokenString, "Roses are red\nViolets are blue"},
|
||||
{Position{3, 20}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestUnicodeString(t *testing.T) {
|
||||
testFlow(t, `foo = "hello ♥ world"`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenString, "hello ♥ world"},
|
||||
{Position{1, 22}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
func TestEscapeInString(t *testing.T) {
|
||||
testFlow(t, `foo = "\b\f\/"`, []token{
|
||||
{Position{1, 1}, tokenKey, "foo"},
|
||||
{Position{1, 5}, tokenEqual, "="},
|
||||
{Position{1, 8}, tokenString, "\b\f/"},
|
||||
{Position{1, 15}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestKeyGroupArray(t *testing.T) {
|
||||
testFlow(t, "[[foo]]", []token{
|
||||
{Position{1, 1}, tokenDoubleLeftBracket, "[["},
|
||||
{Position{1, 3}, tokenKeyGroupArray, "foo"},
|
||||
{Position{1, 6}, tokenDoubleRightBracket, "]]"},
|
||||
{Position{1, 8}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestQuotedKey(t *testing.T) {
|
||||
testFlow(t, "\"a b\" = 42", []token{
|
||||
{Position{1, 1}, tokenKey, "\"a b\""},
|
||||
{Position{1, 7}, tokenEqual, "="},
|
||||
{Position{1, 9}, tokenInteger, "42"},
|
||||
{Position{1, 11}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestKeyNewline(t *testing.T) {
|
||||
testFlow(t, "a\n= 4", []token{
|
||||
{Position{1, 1}, tokenError, "keys cannot contain new lines"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestInvalidFloat(t *testing.T) {
|
||||
testFlow(t, "a=7e1_", []token{
|
||||
{Position{1, 1}, tokenKey, "a"},
|
||||
{Position{1, 2}, tokenEqual, "="},
|
||||
{Position{1, 3}, tokenFloat, "7e1_"},
|
||||
{Position{1, 7}, tokenEOF, ""},
|
||||
})
|
||||
}
|
||||
|
||||
func TestLexUnknownRvalue(t *testing.T) {
|
||||
testFlow(t, `a = !b`, []token{
|
||||
{Position{1, 1}, tokenKey, "a"},
|
||||
{Position{1, 3}, tokenEqual, "="},
|
||||
{Position{1, 5}, tokenError, "no value can start with !"},
|
||||
})
|
||||
|
||||
testFlow(t, `a = \b`, []token{
|
||||
{Position{1, 1}, tokenKey, "a"},
|
||||
{Position{1, 3}, tokenEqual, "="},
|
||||
{Position{1, 5}, tokenError, `no value can start with \`},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkLexer(b *testing.B) {
|
||||
sample := `title = "Hugo: A Fast and Flexible Website Generator"
|
||||
baseurl = "http://gohugo.io/"
|
||||
MetaDataFormat = "yaml"
|
||||
pluralizeListTitles = false
|
||||
|
||||
[params]
|
||||
description = "Documentation of Hugo, a fast and flexible static site generator built with love by spf13, bep and friends in Go"
|
||||
author = "Steve Francia (spf13) and friends"
|
||||
release = "0.22-DEV"
|
||||
|
||||
[[menu.main]]
|
||||
name = "Download Hugo"
|
||||
pre = "<i class='fa fa-download'></i>"
|
||||
url = "https://github.com/spf13/hugo/releases"
|
||||
weight = -200
|
||||
`
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
lexToml([]byte(sample))
|
||||
}
|
||||
}
|
|
@ -1,806 +0,0 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type basicMarshalTestStruct struct {
|
||||
String string `toml:"string"`
|
||||
StringList []string `toml:"strlist"`
|
||||
Sub basicMarshalTestSubStruct `toml:"subdoc"`
|
||||
SubList []basicMarshalTestSubStruct `toml:"sublist"`
|
||||
}
|
||||
|
||||
type basicMarshalTestSubStruct struct {
|
||||
String2 string
|
||||
}
|
||||
|
||||
var basicTestData = basicMarshalTestStruct{
|
||||
String: "Hello",
|
||||
StringList: []string{"Howdy", "Hey There"},
|
||||
Sub: basicMarshalTestSubStruct{"One"},
|
||||
SubList: []basicMarshalTestSubStruct{{"Two"}, {"Three"}},
|
||||
}
|
||||
|
||||
var basicTestToml = []byte(`string = "Hello"
|
||||
strlist = ["Howdy","Hey There"]
|
||||
|
||||
[subdoc]
|
||||
String2 = "One"
|
||||
|
||||
[[sublist]]
|
||||
String2 = "Two"
|
||||
|
||||
[[sublist]]
|
||||
String2 = "Three"
|
||||
`)
|
||||
|
||||
func TestBasicMarshal(t *testing.T) {
|
||||
result, err := Marshal(basicTestData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := basicTestToml
|
||||
if !bytes.Equal(result, expected) {
|
||||
t.Errorf("Bad marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasicUnmarshal(t *testing.T) {
|
||||
result := basicMarshalTestStruct{}
|
||||
err := Unmarshal(basicTestToml, &result)
|
||||
expected := basicTestData
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Errorf("Bad unmarshal: expected %v, got %v", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
type testDoc struct {
|
||||
Title string `toml:"title"`
|
||||
Basics testDocBasics `toml:"basic"`
|
||||
BasicLists testDocBasicLists `toml:"basic_lists"`
|
||||
BasicMap map[string]string `toml:"basic_map"`
|
||||
Subdocs testDocSubs `toml:"subdoc"`
|
||||
SubDocList []testSubDoc `toml:"subdoclist"`
|
||||
SubDocPtrs []*testSubDoc `toml:"subdocptrs"`
|
||||
err int `toml:"shouldntBeHere"`
|
||||
unexported int `toml:"shouldntBeHere"`
|
||||
Unexported2 int `toml:"-"`
|
||||
}
|
||||
|
||||
type testDocBasics struct {
|
||||
Bool bool `toml:"bool"`
|
||||
Date time.Time `toml:"date"`
|
||||
Float float32 `toml:"float"`
|
||||
Int int `toml:"int"`
|
||||
Uint uint `toml:"uint"`
|
||||
String *string `toml:"string"`
|
||||
unexported int `toml:"shouldntBeHere"`
|
||||
}
|
||||
|
||||
type testDocBasicLists struct {
|
||||
Bools []bool `toml:"bools"`
|
||||
Dates []time.Time `toml:"dates"`
|
||||
Floats []*float32 `toml:"floats"`
|
||||
Ints []int `toml:"ints"`
|
||||
Strings []string `toml:"strings"`
|
||||
UInts []uint `toml:"uints"`
|
||||
}
|
||||
|
||||
type testDocSubs struct {
|
||||
First testSubDoc `toml:"first"`
|
||||
Second *testSubDoc `toml:"second"`
|
||||
}
|
||||
|
||||
type testSubDoc struct {
|
||||
Name string `toml:"name"`
|
||||
unexported int `toml:"shouldntBeHere"`
|
||||
}
|
||||
|
||||
var biteMe = "Bite me"
|
||||
var float1 float32 = 12.3
|
||||
var float2 float32 = 45.6
|
||||
var float3 float32 = 78.9
|
||||
var subdoc = testSubDoc{"Second", 0}
|
||||
|
||||
var docData = testDoc{
|
||||
Title: "TOML Marshal Testing",
|
||||
unexported: 0,
|
||||
Unexported2: 0,
|
||||
Basics: testDocBasics{
|
||||
Bool: true,
|
||||
Date: time.Date(1979, 5, 27, 7, 32, 0, 0, time.UTC),
|
||||
Float: 123.4,
|
||||
Int: 5000,
|
||||
Uint: 5001,
|
||||
String: &biteMe,
|
||||
unexported: 0,
|
||||
},
|
||||
BasicLists: testDocBasicLists{
|
||||
Bools: []bool{true, false, true},
|
||||
Dates: []time.Time{
|
||||
time.Date(1979, 5, 27, 7, 32, 0, 0, time.UTC),
|
||||
time.Date(1980, 5, 27, 7, 32, 0, 0, time.UTC),
|
||||
},
|
||||
Floats: []*float32{&float1, &float2, &float3},
|
||||
Ints: []int{8001, 8001, 8002},
|
||||
Strings: []string{"One", "Two", "Three"},
|
||||
UInts: []uint{5002, 5003},
|
||||
},
|
||||
BasicMap: map[string]string{
|
||||
"one": "one",
|
||||
"two": "two",
|
||||
},
|
||||
Subdocs: testDocSubs{
|
||||
First: testSubDoc{"First", 0},
|
||||
Second: &subdoc,
|
||||
},
|
||||
SubDocList: []testSubDoc{
|
||||
testSubDoc{"List.First", 0},
|
||||
testSubDoc{"List.Second", 0},
|
||||
},
|
||||
SubDocPtrs: []*testSubDoc{&subdoc},
|
||||
}
|
||||
|
||||
func TestDocMarshal(t *testing.T) {
|
||||
result, err := Marshal(docData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected, _ := ioutil.ReadFile("marshal_test.toml")
|
||||
if !bytes.Equal(result, expected) {
|
||||
t.Errorf("Bad marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDocUnmarshal(t *testing.T) {
|
||||
result := testDoc{}
|
||||
tomlData, _ := ioutil.ReadFile("marshal_test.toml")
|
||||
err := Unmarshal(tomlData, &result)
|
||||
expected := docData
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
resStr, _ := json.MarshalIndent(result, "", " ")
|
||||
expStr, _ := json.MarshalIndent(expected, "", " ")
|
||||
t.Errorf("Bad unmarshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expStr, resStr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDocPartialUnmarshal(t *testing.T) {
|
||||
result := testDocSubs{}
|
||||
|
||||
tree, _ := LoadFile("marshal_test.toml")
|
||||
subTree := tree.Get("subdoc").(*Tree)
|
||||
err := subTree.Unmarshal(&result)
|
||||
expected := docData.Subdocs
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
resStr, _ := json.MarshalIndent(result, "", " ")
|
||||
expStr, _ := json.MarshalIndent(expected, "", " ")
|
||||
t.Errorf("Bad partial unmartial: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expStr, resStr)
|
||||
}
|
||||
}
|
||||
|
||||
type tomlTypeCheckTest struct {
|
||||
name string
|
||||
item interface{}
|
||||
typ int //0=primitive, 1=otherslice, 2=treeslice, 3=tree
|
||||
}
|
||||
|
||||
func TestTypeChecks(t *testing.T) {
|
||||
tests := []tomlTypeCheckTest{
|
||||
{"integer", 2, 0},
|
||||
{"time", time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC), 0},
|
||||
{"stringlist", []string{"hello", "hi"}, 1},
|
||||
{"timelist", []time.Time{time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)}, 1},
|
||||
{"objectlist", []tomlTypeCheckTest{}, 2},
|
||||
{"object", tomlTypeCheckTest{}, 3},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
expected := []bool{false, false, false, false}
|
||||
expected[test.typ] = true
|
||||
result := []bool{
|
||||
isPrimitive(reflect.TypeOf(test.item)),
|
||||
isOtherSlice(reflect.TypeOf(test.item)),
|
||||
isTreeSlice(reflect.TypeOf(test.item)),
|
||||
isTree(reflect.TypeOf(test.item)),
|
||||
}
|
||||
if !reflect.DeepEqual(expected, result) {
|
||||
t.Errorf("Bad type check on %q: expected %v, got %v", test.name, expected, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type unexportedMarshalTestStruct struct {
|
||||
String string `toml:"string"`
|
||||
StringList []string `toml:"strlist"`
|
||||
Sub basicMarshalTestSubStruct `toml:"subdoc"`
|
||||
SubList []basicMarshalTestSubStruct `toml:"sublist"`
|
||||
unexported int `toml:"shouldntBeHere"`
|
||||
Unexported2 int `toml:"-"`
|
||||
}
|
||||
|
||||
var unexportedTestData = unexportedMarshalTestStruct{
|
||||
String: "Hello",
|
||||
StringList: []string{"Howdy", "Hey There"},
|
||||
Sub: basicMarshalTestSubStruct{"One"},
|
||||
SubList: []basicMarshalTestSubStruct{{"Two"}, {"Three"}},
|
||||
unexported: 0,
|
||||
Unexported2: 0,
|
||||
}
|
||||
|
||||
var unexportedTestToml = []byte(`string = "Hello"
|
||||
strlist = ["Howdy","Hey There"]
|
||||
unexported = 1
|
||||
shouldntBeHere = 2
|
||||
|
||||
[subdoc]
|
||||
String2 = "One"
|
||||
|
||||
[[sublist]]
|
||||
String2 = "Two"
|
||||
|
||||
[[sublist]]
|
||||
String2 = "Three"
|
||||
`)
|
||||
|
||||
func TestUnexportedUnmarshal(t *testing.T) {
|
||||
result := unexportedMarshalTestStruct{}
|
||||
err := Unmarshal(unexportedTestToml, &result)
|
||||
expected := unexportedTestData
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Errorf("Bad unexported unmarshal: expected %v, got %v", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
type errStruct struct {
|
||||
Bool bool `toml:"bool"`
|
||||
Date time.Time `toml:"date"`
|
||||
Float float64 `toml:"float"`
|
||||
Int int16 `toml:"int"`
|
||||
String *string `toml:"string"`
|
||||
}
|
||||
|
||||
var errTomls = []string{
|
||||
"bool = truly\ndate = 1979-05-27T07:32:00Z\nfloat = 123.4\nint = 5000\nstring = \"Bite me\"",
|
||||
"bool = true\ndate = 1979-05-27T07:3200Z\nfloat = 123.4\nint = 5000\nstring = \"Bite me\"",
|
||||
"bool = true\ndate = 1979-05-27T07:32:00Z\nfloat = 123a4\nint = 5000\nstring = \"Bite me\"",
|
||||
"bool = true\ndate = 1979-05-27T07:32:00Z\nfloat = 123.4\nint = j000\nstring = \"Bite me\"",
|
||||
"bool = true\ndate = 1979-05-27T07:32:00Z\nfloat = 123.4\nint = 5000\nstring = Bite me",
|
||||
"bool = true\ndate = 1979-05-27T07:32:00Z\nfloat = 123.4\nint = 5000\nstring = Bite me",
|
||||
"bool = 1\ndate = 1979-05-27T07:32:00Z\nfloat = 123.4\nint = 5000\nstring = \"Bite me\"",
|
||||
"bool = true\ndate = 1\nfloat = 123.4\nint = 5000\nstring = \"Bite me\"",
|
||||
"bool = true\ndate = 1979-05-27T07:32:00Z\n\"sorry\"\nint = 5000\nstring = \"Bite me\"",
|
||||
"bool = true\ndate = 1979-05-27T07:32:00Z\nfloat = 123.4\nint = \"sorry\"\nstring = \"Bite me\"",
|
||||
"bool = true\ndate = 1979-05-27T07:32:00Z\nfloat = 123.4\nint = 5000\nstring = 1",
|
||||
}
|
||||
|
||||
type mapErr struct {
|
||||
Vals map[string]float64
|
||||
}
|
||||
|
||||
type intErr struct {
|
||||
Int1 int
|
||||
Int2 int8
|
||||
Int3 int16
|
||||
Int4 int32
|
||||
Int5 int64
|
||||
UInt1 uint
|
||||
UInt2 uint8
|
||||
UInt3 uint16
|
||||
UInt4 uint32
|
||||
UInt5 uint64
|
||||
Flt1 float32
|
||||
Flt2 float64
|
||||
}
|
||||
|
||||
var intErrTomls = []string{
|
||||
"Int1 = []\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0",
|
||||
"Int1 = 1\nInt2 = []\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0",
|
||||
"Int1 = 1\nInt2 = 2\nInt3 = []\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0",
|
||||
"Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = []\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0",
|
||||
"Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = []\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0",
|
||||
"Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = []\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0",
|
||||
"Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = []\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0",
|
||||
"Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = []\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0",
|
||||
"Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = []\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0",
|
||||
"Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = []\nFlt1 = 1.0\nFlt2 = 2.0",
|
||||
"Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = []\nFlt2 = 2.0",
|
||||
"Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = []",
|
||||
}
|
||||
|
||||
func TestErrUnmarshal(t *testing.T) {
|
||||
for ind, toml := range errTomls {
|
||||
result := errStruct{}
|
||||
err := Unmarshal([]byte(toml), &result)
|
||||
if err == nil {
|
||||
t.Errorf("Expected err from case %d\n", ind)
|
||||
}
|
||||
}
|
||||
result2 := mapErr{}
|
||||
err := Unmarshal([]byte("[Vals]\nfred=\"1.2\""), &result2)
|
||||
if err == nil {
|
||||
t.Errorf("Expected err from map")
|
||||
}
|
||||
for ind, toml := range intErrTomls {
|
||||
result3 := intErr{}
|
||||
err := Unmarshal([]byte(toml), &result3)
|
||||
if err == nil {
|
||||
t.Errorf("Expected int err from case %d\n", ind)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type emptyMarshalTestStruct struct {
|
||||
Title string `toml:"title"`
|
||||
Bool bool `toml:"bool"`
|
||||
Int int `toml:"int"`
|
||||
String string `toml:"string"`
|
||||
StringList []string `toml:"stringlist"`
|
||||
Ptr *basicMarshalTestStruct `toml:"ptr"`
|
||||
Map map[string]string `toml:"map"`
|
||||
}
|
||||
|
||||
var emptyTestData = emptyMarshalTestStruct{
|
||||
Title: "Placeholder",
|
||||
Bool: false,
|
||||
Int: 0,
|
||||
String: "",
|
||||
StringList: []string{},
|
||||
Ptr: nil,
|
||||
Map: map[string]string{},
|
||||
}
|
||||
|
||||
var emptyTestToml = []byte(`bool = false
|
||||
int = 0
|
||||
string = ""
|
||||
stringlist = []
|
||||
title = "Placeholder"
|
||||
|
||||
[map]
|
||||
`)
|
||||
|
||||
type emptyMarshalTestStruct2 struct {
|
||||
Title string `toml:"title"`
|
||||
Bool bool `toml:"bool,omitempty"`
|
||||
Int int `toml:"int, omitempty"`
|
||||
String string `toml:"string,omitempty "`
|
||||
StringList []string `toml:"stringlist,omitempty"`
|
||||
Ptr *basicMarshalTestStruct `toml:"ptr,omitempty"`
|
||||
Map map[string]string `toml:"map,omitempty"`
|
||||
}
|
||||
|
||||
var emptyTestData2 = emptyMarshalTestStruct2{
|
||||
Title: "Placeholder",
|
||||
Bool: false,
|
||||
Int: 0,
|
||||
String: "",
|
||||
StringList: []string{},
|
||||
Ptr: nil,
|
||||
Map: map[string]string{},
|
||||
}
|
||||
|
||||
var emptyTestToml2 = []byte(`title = "Placeholder"
|
||||
`)
|
||||
|
||||
func TestEmptyMarshal(t *testing.T) {
|
||||
result, err := Marshal(emptyTestData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := emptyTestToml
|
||||
if !bytes.Equal(result, expected) {
|
||||
t.Errorf("Bad empty marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyMarshalOmit(t *testing.T) {
|
||||
result, err := Marshal(emptyTestData2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := emptyTestToml2
|
||||
if !bytes.Equal(result, expected) {
|
||||
t.Errorf("Bad empty omit marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyUnmarshal(t *testing.T) {
|
||||
result := emptyMarshalTestStruct{}
|
||||
err := Unmarshal(emptyTestToml, &result)
|
||||
expected := emptyTestData
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Errorf("Bad empty unmarshal: expected %v, got %v", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyUnmarshalOmit(t *testing.T) {
|
||||
result := emptyMarshalTestStruct2{}
|
||||
err := Unmarshal(emptyTestToml, &result)
|
||||
expected := emptyTestData2
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Errorf("Bad empty omit unmarshal: expected %v, got %v", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
type pointerMarshalTestStruct struct {
|
||||
Str *string
|
||||
List *[]string
|
||||
ListPtr *[]*string
|
||||
Map *map[string]string
|
||||
MapPtr *map[string]*string
|
||||
EmptyStr *string
|
||||
EmptyList *[]string
|
||||
EmptyMap *map[string]string
|
||||
DblPtr *[]*[]*string
|
||||
}
|
||||
|
||||
var pointerStr = "Hello"
|
||||
var pointerList = []string{"Hello back"}
|
||||
var pointerListPtr = []*string{&pointerStr}
|
||||
var pointerMap = map[string]string{"response": "Goodbye"}
|
||||
var pointerMapPtr = map[string]*string{"alternate": &pointerStr}
|
||||
var pointerTestData = pointerMarshalTestStruct{
|
||||
Str: &pointerStr,
|
||||
List: &pointerList,
|
||||
ListPtr: &pointerListPtr,
|
||||
Map: &pointerMap,
|
||||
MapPtr: &pointerMapPtr,
|
||||
EmptyStr: nil,
|
||||
EmptyList: nil,
|
||||
EmptyMap: nil,
|
||||
}
|
||||
|
||||
var pointerTestToml = []byte(`List = ["Hello back"]
|
||||
ListPtr = ["Hello"]
|
||||
Str = "Hello"
|
||||
|
||||
[Map]
|
||||
response = "Goodbye"
|
||||
|
||||
[MapPtr]
|
||||
alternate = "Hello"
|
||||
`)
|
||||
|
||||
func TestPointerMarshal(t *testing.T) {
|
||||
result, err := Marshal(pointerTestData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := pointerTestToml
|
||||
if !bytes.Equal(result, expected) {
|
||||
t.Errorf("Bad pointer marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointerUnmarshal(t *testing.T) {
|
||||
result := pointerMarshalTestStruct{}
|
||||
err := Unmarshal(pointerTestToml, &result)
|
||||
expected := pointerTestData
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Errorf("Bad pointer unmarshal: expected %v, got %v", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalTypeMismatch(t *testing.T) {
|
||||
result := pointerMarshalTestStruct{}
|
||||
err := Unmarshal([]byte("List = 123"), &result)
|
||||
if !strings.HasPrefix(err.Error(), "(1, 1): Can't convert 123(int64) to []string(slice)") {
|
||||
t.Errorf("Type mismatch must be reported: got %v", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
type nestedMarshalTestStruct struct {
|
||||
String [][]string
|
||||
//Struct [][]basicMarshalTestSubStruct
|
||||
StringPtr *[]*[]*string
|
||||
// StructPtr *[]*[]*basicMarshalTestSubStruct
|
||||
}
|
||||
|
||||
var str1 = "Three"
|
||||
var str2 = "Four"
|
||||
var strPtr = []*string{&str1, &str2}
|
||||
var strPtr2 = []*[]*string{&strPtr}
|
||||
|
||||
var nestedTestData = nestedMarshalTestStruct{
|
||||
String: [][]string{[]string{"Five", "Six"}, []string{"One", "Two"}},
|
||||
StringPtr: &strPtr2,
|
||||
}
|
||||
|
||||
var nestedTestToml = []byte(`String = [["Five","Six"],["One","Two"]]
|
||||
StringPtr = [["Three","Four"]]
|
||||
`)
|
||||
|
||||
func TestNestedMarshal(t *testing.T) {
|
||||
result, err := Marshal(nestedTestData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := nestedTestToml
|
||||
if !bytes.Equal(result, expected) {
|
||||
t.Errorf("Bad nested marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNestedUnmarshal(t *testing.T) {
|
||||
result := nestedMarshalTestStruct{}
|
||||
err := Unmarshal(nestedTestToml, &result)
|
||||
expected := nestedTestData
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Errorf("Bad nested unmarshal: expected %v, got %v", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
type customMarshalerParent struct {
|
||||
Self customMarshaler `toml:"me"`
|
||||
Friends []customMarshaler `toml:"friends"`
|
||||
}
|
||||
|
||||
type customMarshaler struct {
|
||||
FirsName string
|
||||
LastName string
|
||||
}
|
||||
|
||||
func (c customMarshaler) MarshalTOML() ([]byte, error) {
|
||||
fullName := fmt.Sprintf("%s %s", c.FirsName, c.LastName)
|
||||
return []byte(fullName), nil
|
||||
}
|
||||
|
||||
var customMarshalerData = customMarshaler{FirsName: "Sally", LastName: "Fields"}
|
||||
var customMarshalerToml = []byte(`Sally Fields`)
|
||||
var nestedCustomMarshalerData = customMarshalerParent{
|
||||
Self: customMarshaler{FirsName: "Maiku", LastName: "Suteda"},
|
||||
Friends: []customMarshaler{customMarshalerData},
|
||||
}
|
||||
var nestedCustomMarshalerToml = []byte(`friends = ["Sally Fields"]
|
||||
me = "Maiku Suteda"
|
||||
`)
|
||||
|
||||
func TestCustomMarshaler(t *testing.T) {
|
||||
result, err := Marshal(customMarshalerData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := customMarshalerToml
|
||||
if !bytes.Equal(result, expected) {
|
||||
t.Errorf("Bad custom marshaler: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNestedCustomMarshaler(t *testing.T) {
|
||||
result, err := Marshal(nestedCustomMarshalerData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := nestedCustomMarshalerToml
|
||||
if !bytes.Equal(result, expected) {
|
||||
t.Errorf("Bad nested custom marshaler: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
var commentTestToml = []byte(`
|
||||
# it's a comment on type
|
||||
[postgres]
|
||||
# isCommented = "dvalue"
|
||||
noComment = "cvalue"
|
||||
|
||||
# A comment on AttrB with a
|
||||
# break line
|
||||
password = "bvalue"
|
||||
|
||||
# A comment on AttrA
|
||||
user = "avalue"
|
||||
|
||||
[[postgres.My]]
|
||||
|
||||
# a comment on my on typeC
|
||||
My = "Foo"
|
||||
|
||||
[[postgres.My]]
|
||||
|
||||
# a comment on my on typeC
|
||||
My = "Baar"
|
||||
`)
|
||||
|
||||
func TestMarshalComment(t *testing.T) {
|
||||
type TypeC struct {
|
||||
My string `comment:"a comment on my on typeC"`
|
||||
}
|
||||
type TypeB struct {
|
||||
AttrA string `toml:"user" comment:"A comment on AttrA"`
|
||||
AttrB string `toml:"password" comment:"A comment on AttrB with a\n break line"`
|
||||
AttrC string `toml:"noComment"`
|
||||
AttrD string `toml:"isCommented" commented:"true"`
|
||||
My []TypeC
|
||||
}
|
||||
type TypeA struct {
|
||||
TypeB TypeB `toml:"postgres" comment:"it's a comment on type"`
|
||||
}
|
||||
|
||||
ta := []TypeC{{My: "Foo"}, {My: "Baar"}}
|
||||
config := TypeA{TypeB{AttrA: "avalue", AttrB: "bvalue", AttrC: "cvalue", AttrD: "dvalue", My: ta}}
|
||||
result, err := Marshal(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := commentTestToml
|
||||
if !bytes.Equal(result, expected) {
|
||||
t.Errorf("Bad marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
type mapsTestStruct struct {
|
||||
Simple map[string]string
|
||||
Paths map[string]string
|
||||
Other map[string]float64
|
||||
X struct {
|
||||
Y struct {
|
||||
Z map[string]bool
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var mapsTestData = mapsTestStruct{
|
||||
Simple: map[string]string{
|
||||
"one plus one": "two",
|
||||
"next": "three",
|
||||
},
|
||||
Paths: map[string]string{
|
||||
"/this/is/a/path": "/this/is/also/a/path",
|
||||
"/heloo.txt": "/tmp/lololo.txt",
|
||||
},
|
||||
Other: map[string]float64{
|
||||
"testing": 3.9999,
|
||||
},
|
||||
X: struct{ Y struct{ Z map[string]bool } }{
|
||||
Y: struct{ Z map[string]bool }{
|
||||
Z: map[string]bool{
|
||||
"is.Nested": true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var mapsTestToml = []byte(`
|
||||
[Other]
|
||||
"testing" = 3.9999
|
||||
|
||||
[Paths]
|
||||
"/heloo.txt" = "/tmp/lololo.txt"
|
||||
"/this/is/a/path" = "/this/is/also/a/path"
|
||||
|
||||
[Simple]
|
||||
"next" = "three"
|
||||
"one plus one" = "two"
|
||||
|
||||
[X]
|
||||
|
||||
[X.Y]
|
||||
|
||||
[X.Y.Z]
|
||||
"is.Nested" = true
|
||||
`)
|
||||
|
||||
func TestEncodeQuotedMapKeys(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
if err := NewEncoder(&buf).QuoteMapKeys(true).Encode(mapsTestData); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
result := buf.Bytes()
|
||||
expected := mapsTestToml
|
||||
if !bytes.Equal(result, expected) {
|
||||
t.Errorf("Bad maps marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeQuotedMapKeys(t *testing.T) {
|
||||
result := mapsTestStruct{}
|
||||
err := NewDecoder(bytes.NewBuffer(mapsTestToml)).Decode(&result)
|
||||
expected := mapsTestData
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Errorf("Bad maps unmarshal: expected %v, got %v", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
type structArrayNoTag struct {
|
||||
A struct {
|
||||
B []int64
|
||||
C []int64
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalArray(t *testing.T) {
|
||||
expected := []byte(`
|
||||
[A]
|
||||
B = [1,2,3]
|
||||
C = [1]
|
||||
`)
|
||||
|
||||
m := structArrayNoTag{
|
||||
A: struct {
|
||||
B []int64
|
||||
C []int64
|
||||
}{
|
||||
B: []int64{1, 2, 3},
|
||||
C: []int64{1},
|
||||
},
|
||||
}
|
||||
|
||||
b, err := Marshal(m)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(b, expected) {
|
||||
t.Errorf("Bad arrays marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalArrayOnePerLine(t *testing.T) {
|
||||
expected := []byte(`
|
||||
[A]
|
||||
B = [
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
C = [1]
|
||||
`)
|
||||
|
||||
m := structArrayNoTag{
|
||||
A: struct {
|
||||
B []int64
|
||||
C []int64
|
||||
}{
|
||||
B: []int64{1, 2, 3},
|
||||
C: []int64{1},
|
||||
},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
encoder := NewEncoder(&buf).ArraysWithOneElementPerLine(true)
|
||||
err := encoder.Encode(m)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := buf.Bytes()
|
||||
|
||||
if !bytes.Equal(b, expected) {
|
||||
t.Errorf("Bad arrays marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, b)
|
||||
}
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
title = "TOML Marshal Testing"
|
||||
|
||||
[basic]
|
||||
bool = true
|
||||
date = 1979-05-27T07:32:00Z
|
||||
float = 123.4
|
||||
int = 5000
|
||||
string = "Bite me"
|
||||
uint = 5001
|
||||
|
||||
[basic_lists]
|
||||
bools = [true,false,true]
|
||||
dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z]
|
||||
floats = [12.3,45.6,78.9]
|
||||
ints = [8001,8001,8002]
|
||||
strings = ["One","Two","Three"]
|
||||
uints = [5002,5003]
|
||||
|
||||
[basic_map]
|
||||
one = "one"
|
||||
two = "two"
|
||||
|
||||
[subdoc]
|
||||
|
||||
[subdoc.first]
|
||||
name = "First"
|
||||
|
||||
[subdoc.second]
|
||||
name = "Second"
|
||||
|
||||
[[subdoclist]]
|
||||
name = "List.First"
|
||||
|
||||
[[subdoclist]]
|
||||
name = "List.Second"
|
||||
|
||||
[[subdocptrs]]
|
||||
name = "Second"
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче