зеркало из https://github.com/microsoft/docker.git
Merge pull request #27998 from dnephin/compose-on-swarm
Support `docker stack deploy` from a Compose file
This commit is contained in:
Коммит
750d634d62
|
@ -297,7 +297,7 @@ func (e *endpointOptions) ToEndpointSpec() *swarm.EndpointSpec {
|
|||
ports, portBindings, _ := nat.ParsePortSpecs(e.ports.GetAll())
|
||||
|
||||
for port := range ports {
|
||||
portConfigs = append(portConfigs, convertPortToPortConfig(port, portBindings)...)
|
||||
portConfigs = append(portConfigs, ConvertPortToPortConfig(port, portBindings)...)
|
||||
}
|
||||
|
||||
return &swarm.EndpointSpec{
|
||||
|
@ -306,7 +306,8 @@ func (e *endpointOptions) ToEndpointSpec() *swarm.EndpointSpec {
|
|||
}
|
||||
}
|
||||
|
||||
func convertPortToPortConfig(
|
||||
// ConvertPortToPortConfig converts ports to the swarm type
|
||||
func ConvertPortToPortConfig(
|
||||
port nat.Port,
|
||||
portBindings map[nat.Port][]nat.PortBinding,
|
||||
) []swarm.PortConfig {
|
||||
|
|
|
@ -639,7 +639,7 @@ func updatePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) error {
|
|||
ports, portBindings, _ := nat.ParsePortSpecs(values)
|
||||
|
||||
for port := range ports {
|
||||
newConfigs := convertPortToPortConfig(port, portBindings)
|
||||
newConfigs := ConvertPortToPortConfig(port, portBindings)
|
||||
for _, entry := range newConfigs {
|
||||
if v, ok := portSet[portConfigToString(&entry)]; ok && v != entry {
|
||||
return fmt.Errorf("conflicting port mapping between %v:%v/%s and %v:%v/%s", entry.PublishedPort, entry.TargetPort, entry.Protocol, v.PublishedPort, v.TargetPort, v.Protocol)
|
||||
|
|
|
@ -19,7 +19,6 @@ func NewStackCommand(dockerCli *command.DockerCli) *cobra.Command {
|
|||
Tags: map[string]string{"experimental": "", "version": "1.25"},
|
||||
}
|
||||
cmd.AddCommand(
|
||||
newConfigCommand(dockerCli),
|
||||
newDeployCommand(dockerCli),
|
||||
newListCommand(dockerCli),
|
||||
newRemoveCommand(dockerCli),
|
||||
|
|
|
@ -46,3 +46,11 @@ func getNetworks(
|
|||
ctx,
|
||||
types.NetworkListOptions{Filters: getStackFilter(namespace)})
|
||||
}
|
||||
|
||||
type namespace struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (n namespace) scope(name string) string {
|
||||
return n.name + "_" + name
|
||||
}
|
||||
|
|
|
@ -1,39 +0,0 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/cli/command/bundlefile"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type configOptions struct {
|
||||
bundlefile string
|
||||
namespace string
|
||||
}
|
||||
|
||||
func newConfigCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts configOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "config [OPTIONS] STACK",
|
||||
Short: "Print the stack configuration",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.namespace = args[0]
|
||||
return runConfig(dockerCli, opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
addBundlefileFlag(&opts.bundlefile, flags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runConfig(dockerCli *command.DockerCli, opts configOptions) error {
|
||||
bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bundlefile.Print(dockerCli.Out(), bundle)
|
||||
}
|
|
@ -2,16 +2,26 @@ package stack
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/aanand/compose-file/loader"
|
||||
composetypes "github.com/aanand/compose-file/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
networktypes "github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/cli/command/bundlefile"
|
||||
servicecmd "github.com/docker/docker/cli/command/service"
|
||||
"github.com/docker/docker/opts"
|
||||
runconfigopts "github.com/docker/docker/runconfig/opts"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -20,6 +30,7 @@ const (
|
|||
|
||||
type deployOptions struct {
|
||||
bundlefile string
|
||||
composefile string
|
||||
namespace string
|
||||
sendRegistryAuth bool
|
||||
}
|
||||
|
@ -30,10 +41,10 @@ func newDeployCommand(dockerCli *command.DockerCli) *cobra.Command {
|
|||
cmd := &cobra.Command{
|
||||
Use: "deploy [OPTIONS] STACK",
|
||||
Aliases: []string{"up"},
|
||||
Short: "Create and update a stack from a Distributed Application Bundle (DAB)",
|
||||
Short: "Deploy a new stack or update an existing stack",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.namespace = strings.TrimSuffix(args[0], ".dab")
|
||||
opts.namespace = args[0]
|
||||
return runDeploy(dockerCli, opts)
|
||||
},
|
||||
Tags: map[string]string{"experimental": "", "version": "1.25"},
|
||||
|
@ -41,57 +52,160 @@ func newDeployCommand(dockerCli *command.DockerCli) *cobra.Command {
|
|||
|
||||
flags := cmd.Flags()
|
||||
addBundlefileFlag(&opts.bundlefile, flags)
|
||||
addComposefileFlag(&opts.composefile, flags)
|
||||
addRegistryAuthFlag(&opts.sendRegistryAuth, flags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runDeploy(dockerCli *command.DockerCli, opts deployOptions) error {
|
||||
bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile)
|
||||
if err != nil {
|
||||
return err
|
||||
switch {
|
||||
case opts.bundlefile == "" && opts.composefile == "":
|
||||
return fmt.Errorf("Please specify either a bundle file (with --bundle-file) or a Compose file (with --compose-file).")
|
||||
case opts.bundlefile != "" && opts.composefile != "":
|
||||
return fmt.Errorf("You cannot specify both a bundle file and a Compose file.")
|
||||
case opts.bundlefile != "":
|
||||
return deployBundle(dockerCli, opts)
|
||||
default:
|
||||
return deployCompose(dockerCli, opts)
|
||||
}
|
||||
|
||||
info, err := dockerCli.Client().Info(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.Swarm.ControlAvailable {
|
||||
return fmt.Errorf("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.")
|
||||
}
|
||||
|
||||
networks := getUniqueNetworkNames(bundle.Services)
|
||||
ctx := context.Background()
|
||||
|
||||
if err := updateNetworks(ctx, dockerCli, networks, opts.namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
return deployServices(ctx, dockerCli, bundle.Services, opts.namespace, opts.sendRegistryAuth)
|
||||
}
|
||||
|
||||
func getUniqueNetworkNames(services map[string]bundlefile.Service) []string {
|
||||
networkSet := make(map[string]bool)
|
||||
for _, service := range services {
|
||||
for _, network := range service.Networks {
|
||||
networkSet[network] = true
|
||||
func deployCompose(dockerCli *command.DockerCli, opts deployOptions) error {
|
||||
configDetails, err := getConfigDetails(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config, err := loader.Load(configDetails)
|
||||
if err != nil {
|
||||
if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok {
|
||||
return fmt.Errorf("Compose file contains unsupported options:\n\n%s\n",
|
||||
propertyWarnings(fpe.Properties))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
networks := []string{}
|
||||
for network := range networkSet {
|
||||
networks = append(networks, network)
|
||||
unsupportedProperties := loader.GetUnsupportedProperties(configDetails)
|
||||
if len(unsupportedProperties) > 0 {
|
||||
fmt.Fprintf(dockerCli.Err(), "Ignoring unsupported options: %s\n\n",
|
||||
strings.Join(unsupportedProperties, ", "))
|
||||
}
|
||||
return networks
|
||||
|
||||
deprecatedProperties := loader.GetDeprecatedProperties(configDetails)
|
||||
if len(deprecatedProperties) > 0 {
|
||||
fmt.Fprintf(dockerCli.Err(), "Ignoring deprecated options:\n\n%s\n\n",
|
||||
propertyWarnings(deprecatedProperties))
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
namespace := namespace{name: opts.namespace}
|
||||
|
||||
networks := convertNetworks(namespace, config.Networks)
|
||||
if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil {
|
||||
return err
|
||||
}
|
||||
services, err := convertServices(namespace, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return deployServices(ctx, dockerCli, services, namespace, opts.sendRegistryAuth)
|
||||
}
|
||||
|
||||
func updateNetworks(
|
||||
func propertyWarnings(properties map[string]string) string {
|
||||
var msgs []string
|
||||
for name, description := range properties {
|
||||
msgs = append(msgs, fmt.Sprintf("%s: %s", name, description))
|
||||
}
|
||||
sort.Strings(msgs)
|
||||
return strings.Join(msgs, "\n\n")
|
||||
}
|
||||
|
||||
func getConfigDetails(opts deployOptions) (composetypes.ConfigDetails, error) {
|
||||
var details composetypes.ConfigDetails
|
||||
var err error
|
||||
|
||||
details.WorkingDir, err = os.Getwd()
|
||||
if err != nil {
|
||||
return details, err
|
||||
}
|
||||
|
||||
configFile, err := getConfigFile(opts.composefile)
|
||||
if err != nil {
|
||||
return details, err
|
||||
}
|
||||
// TODO: support multiple files
|
||||
details.ConfigFiles = []composetypes.ConfigFile{*configFile}
|
||||
return details, nil
|
||||
}
|
||||
|
||||
func getConfigFile(filename string) (*composetypes.ConfigFile, error) {
|
||||
bytes, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config, err := loader.ParseYAML(bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &composetypes.ConfigFile{
|
||||
Filename: filename,
|
||||
Config: config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func convertNetworks(
|
||||
namespace namespace,
|
||||
networks map[string]composetypes.NetworkConfig,
|
||||
) map[string]types.NetworkCreate {
|
||||
if networks == nil {
|
||||
networks = make(map[string]composetypes.NetworkConfig)
|
||||
}
|
||||
|
||||
// TODO: only add default network if it's used
|
||||
networks["default"] = composetypes.NetworkConfig{}
|
||||
|
||||
result := make(map[string]types.NetworkCreate)
|
||||
|
||||
for internalName, network := range networks {
|
||||
if network.External.Name != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
createOpts := types.NetworkCreate{
|
||||
Labels: getStackLabels(namespace.name, network.Labels),
|
||||
Driver: network.Driver,
|
||||
Options: network.DriverOpts,
|
||||
}
|
||||
|
||||
if network.Ipam.Driver != "" || len(network.Ipam.Config) > 0 {
|
||||
createOpts.IPAM = &networktypes.IPAM{}
|
||||
}
|
||||
|
||||
if network.Ipam.Driver != "" {
|
||||
createOpts.IPAM.Driver = network.Ipam.Driver
|
||||
}
|
||||
for _, ipamConfig := range network.Ipam.Config {
|
||||
config := networktypes.IPAMConfig{
|
||||
Subnet: ipamConfig.Subnet,
|
||||
}
|
||||
createOpts.IPAM.Config = append(createOpts.IPAM.Config, config)
|
||||
}
|
||||
result[internalName] = createOpts
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func createNetworks(
|
||||
ctx context.Context,
|
||||
dockerCli *command.DockerCli,
|
||||
networks []string,
|
||||
namespace string,
|
||||
namespace namespace,
|
||||
networks map[string]types.NetworkCreate,
|
||||
) error {
|
||||
client := dockerCli.Client()
|
||||
|
||||
existingNetworks, err := getNetworks(ctx, client, namespace)
|
||||
existingNetworks, err := getNetworks(ctx, client, namespace.name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -101,47 +215,170 @@ func updateNetworks(
|
|||
existingNetworkMap[network.Name] = network
|
||||
}
|
||||
|
||||
createOpts := types.NetworkCreate{
|
||||
Labels: getStackLabels(namespace, nil),
|
||||
Driver: defaultNetworkDriver,
|
||||
}
|
||||
|
||||
for _, internalName := range networks {
|
||||
name := fmt.Sprintf("%s_%s", namespace, internalName)
|
||||
|
||||
for internalName, createOpts := range networks {
|
||||
name := namespace.scope(internalName)
|
||||
if _, exists := existingNetworkMap[name]; exists {
|
||||
continue
|
||||
}
|
||||
|
||||
if createOpts.Driver == "" {
|
||||
createOpts.Driver = defaultNetworkDriver
|
||||
}
|
||||
|
||||
fmt.Fprintf(dockerCli.Out(), "Creating network %s\n", name)
|
||||
if _, err := client.NetworkCreate(ctx, name, createOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertNetworks(networks []string, namespace string, name string) []swarm.NetworkAttachmentConfig {
|
||||
func convertServiceNetworks(
|
||||
networks map[string]*composetypes.ServiceNetworkConfig,
|
||||
namespace namespace,
|
||||
name string,
|
||||
) []swarm.NetworkAttachmentConfig {
|
||||
if len(networks) == 0 {
|
||||
return []swarm.NetworkAttachmentConfig{
|
||||
{
|
||||
Target: namespace.scope("default"),
|
||||
Aliases: []string{name},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
nets := []swarm.NetworkAttachmentConfig{}
|
||||
for _, network := range networks {
|
||||
for networkName, network := range networks {
|
||||
nets = append(nets, swarm.NetworkAttachmentConfig{
|
||||
Target: namespace + "_" + network,
|
||||
Aliases: []string{name},
|
||||
Target: namespace.scope(networkName),
|
||||
Aliases: append(network.Aliases, name),
|
||||
})
|
||||
}
|
||||
return nets
|
||||
}
|
||||
|
||||
func convertVolumes(
|
||||
serviceVolumes []string,
|
||||
stackVolumes map[string]composetypes.VolumeConfig,
|
||||
namespace namespace,
|
||||
) ([]mount.Mount, error) {
|
||||
var mounts []mount.Mount
|
||||
|
||||
for _, volumeSpec := range serviceVolumes {
|
||||
mount, err := convertVolumeToMount(volumeSpec, stackVolumes, namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mounts = append(mounts, mount)
|
||||
}
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
func convertVolumeToMount(
|
||||
volumeSpec string,
|
||||
stackVolumes map[string]composetypes.VolumeConfig,
|
||||
namespace namespace,
|
||||
) (mount.Mount, error) {
|
||||
var source, target string
|
||||
var mode []string
|
||||
|
||||
// TODO: split Windows path mappings properly
|
||||
parts := strings.SplitN(volumeSpec, ":", 3)
|
||||
|
||||
switch len(parts) {
|
||||
case 3:
|
||||
source = parts[0]
|
||||
target = parts[1]
|
||||
mode = strings.Split(parts[2], ",")
|
||||
case 2:
|
||||
source = parts[0]
|
||||
target = parts[1]
|
||||
case 1:
|
||||
target = parts[0]
|
||||
default:
|
||||
return mount.Mount{}, fmt.Errorf("invald volume: %s", volumeSpec)
|
||||
}
|
||||
|
||||
// TODO: catch Windows paths here
|
||||
if strings.HasPrefix(source, "/") {
|
||||
return mount.Mount{
|
||||
Type: mount.TypeBind,
|
||||
Source: source,
|
||||
Target: target,
|
||||
ReadOnly: isReadOnly(mode),
|
||||
BindOptions: getBindOptions(mode),
|
||||
}, nil
|
||||
}
|
||||
|
||||
stackVolume, exists := stackVolumes[source]
|
||||
if !exists {
|
||||
return mount.Mount{}, fmt.Errorf("undefined volume: %s", source)
|
||||
}
|
||||
|
||||
var volumeOptions *mount.VolumeOptions
|
||||
if stackVolume.External.Name != "" {
|
||||
source = stackVolume.External.Name
|
||||
} else {
|
||||
volumeOptions = &mount.VolumeOptions{
|
||||
Labels: stackVolume.Labels,
|
||||
NoCopy: isNoCopy(mode),
|
||||
}
|
||||
|
||||
if stackVolume.Driver != "" {
|
||||
volumeOptions.DriverConfig = &mount.Driver{
|
||||
Name: stackVolume.Driver,
|
||||
Options: stackVolume.DriverOpts,
|
||||
}
|
||||
}
|
||||
source = namespace.scope(source)
|
||||
}
|
||||
return mount.Mount{
|
||||
Type: mount.TypeVolume,
|
||||
Source: source,
|
||||
Target: target,
|
||||
ReadOnly: isReadOnly(mode),
|
||||
VolumeOptions: volumeOptions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func modeHas(mode []string, field string) bool {
|
||||
for _, item := range mode {
|
||||
if item == field {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isReadOnly(mode []string) bool {
|
||||
return modeHas(mode, "ro")
|
||||
}
|
||||
|
||||
func isNoCopy(mode []string) bool {
|
||||
return modeHas(mode, "nocopy")
|
||||
}
|
||||
|
||||
func getBindOptions(mode []string) *mount.BindOptions {
|
||||
for _, item := range mode {
|
||||
if strings.Contains(item, "private") || strings.Contains(item, "shared") || strings.Contains(item, "slave") {
|
||||
return &mount.BindOptions{Propagation: mount.Propagation(item)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deployServices(
|
||||
ctx context.Context,
|
||||
dockerCli *command.DockerCli,
|
||||
services map[string]bundlefile.Service,
|
||||
namespace string,
|
||||
services map[string]swarm.ServiceSpec,
|
||||
namespace namespace,
|
||||
sendAuth bool,
|
||||
) error {
|
||||
apiClient := dockerCli.Client()
|
||||
out := dockerCli.Out()
|
||||
|
||||
existingServices, err := getServices(ctx, apiClient, namespace)
|
||||
existingServices, err := getServices(ctx, apiClient, namespace.name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -151,47 +388,8 @@ func deployServices(
|
|||
existingServiceMap[service.Spec.Name] = service
|
||||
}
|
||||
|
||||
for internalName, service := range services {
|
||||
name := fmt.Sprintf("%s_%s", namespace, internalName)
|
||||
|
||||
var ports []swarm.PortConfig
|
||||
for _, portSpec := range service.Ports {
|
||||
ports = append(ports, swarm.PortConfig{
|
||||
Protocol: swarm.PortConfigProtocol(portSpec.Protocol),
|
||||
TargetPort: portSpec.Port,
|
||||
})
|
||||
}
|
||||
|
||||
serviceSpec := swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: name,
|
||||
Labels: getStackLabels(namespace, service.Labels),
|
||||
},
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: swarm.ContainerSpec{
|
||||
Image: service.Image,
|
||||
Command: service.Command,
|
||||
Args: service.Args,
|
||||
Env: service.Env,
|
||||
// Service Labels will not be copied to Containers
|
||||
// automatically during the deployment so we apply
|
||||
// it here.
|
||||
Labels: getStackLabels(namespace, nil),
|
||||
},
|
||||
},
|
||||
EndpointSpec: &swarm.EndpointSpec{
|
||||
Ports: ports,
|
||||
},
|
||||
Networks: convertNetworks(service.Networks, namespace, internalName),
|
||||
}
|
||||
|
||||
cspec := &serviceSpec.TaskTemplate.ContainerSpec
|
||||
if service.WorkingDir != nil {
|
||||
cspec.Dir = *service.WorkingDir
|
||||
}
|
||||
if service.User != nil {
|
||||
cspec.User = *service.User
|
||||
}
|
||||
for internalName, serviceSpec := range services {
|
||||
name := namespace.scope(internalName)
|
||||
|
||||
encodedAuth := ""
|
||||
if sendAuth {
|
||||
|
@ -234,3 +432,202 @@ func deployServices(
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertServices(
|
||||
namespace namespace,
|
||||
config *composetypes.Config,
|
||||
) (map[string]swarm.ServiceSpec, error) {
|
||||
result := make(map[string]swarm.ServiceSpec)
|
||||
|
||||
services := config.Services
|
||||
volumes := config.Volumes
|
||||
|
||||
for _, service := range services {
|
||||
serviceSpec, err := convertService(namespace, service, volumes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result[service.Name] = serviceSpec
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func convertService(
|
||||
namespace namespace,
|
||||
service composetypes.ServiceConfig,
|
||||
volumes map[string]composetypes.VolumeConfig,
|
||||
) (swarm.ServiceSpec, error) {
|
||||
name := namespace.scope(service.Name)
|
||||
|
||||
endpoint, err := convertEndpointSpec(service.Ports)
|
||||
if err != nil {
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
mode, err := convertDeployMode(service.Deploy.Mode, service.Deploy.Replicas)
|
||||
if err != nil {
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
mounts, err := convertVolumes(service.Volumes, volumes, namespace)
|
||||
if err != nil {
|
||||
// TODO: better error message (include service name)
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
resources, err := convertResources(service.Deploy.Resources)
|
||||
if err != nil {
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
restartPolicy, err := convertRestartPolicy(
|
||||
service.Restart, service.Deploy.RestartPolicy)
|
||||
if err != nil {
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
serviceSpec := swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: name,
|
||||
Labels: getStackLabels(namespace.name, service.Deploy.Labels),
|
||||
},
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: swarm.ContainerSpec{
|
||||
Image: service.Image,
|
||||
Command: service.Entrypoint,
|
||||
Args: service.Command,
|
||||
Hostname: service.Hostname,
|
||||
Env: convertEnvironment(service.Environment),
|
||||
Labels: getStackLabels(namespace.name, service.Labels),
|
||||
Dir: service.WorkingDir,
|
||||
User: service.User,
|
||||
Mounts: mounts,
|
||||
StopGracePeriod: service.StopGracePeriod,
|
||||
},
|
||||
Resources: resources,
|
||||
RestartPolicy: restartPolicy,
|
||||
Placement: &swarm.Placement{
|
||||
Constraints: service.Deploy.Placement.Constraints,
|
||||
},
|
||||
},
|
||||
EndpointSpec: endpoint,
|
||||
Mode: mode,
|
||||
Networks: convertServiceNetworks(service.Networks, namespace, service.Name),
|
||||
UpdateConfig: convertUpdateConfig(service.Deploy.UpdateConfig),
|
||||
}
|
||||
|
||||
return serviceSpec, nil
|
||||
}
|
||||
|
||||
func convertRestartPolicy(restart string, source *composetypes.RestartPolicy) (*swarm.RestartPolicy, error) {
|
||||
// TODO: log if restart is being ignored
|
||||
if source == nil {
|
||||
policy, err := runconfigopts.ParseRestartPolicy(restart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: is this an accurate convertion?
|
||||
switch {
|
||||
case policy.IsNone():
|
||||
return nil, nil
|
||||
case policy.IsAlways(), policy.IsUnlessStopped():
|
||||
return &swarm.RestartPolicy{
|
||||
Condition: swarm.RestartPolicyConditionAny,
|
||||
}, nil
|
||||
case policy.IsOnFailure():
|
||||
attempts := uint64(policy.MaximumRetryCount)
|
||||
return &swarm.RestartPolicy{
|
||||
Condition: swarm.RestartPolicyConditionOnFailure,
|
||||
MaxAttempts: &attempts,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return &swarm.RestartPolicy{
|
||||
Condition: swarm.RestartPolicyCondition(source.Condition),
|
||||
Delay: source.Delay,
|
||||
MaxAttempts: source.MaxAttempts,
|
||||
Window: source.Window,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig {
|
||||
if source == nil {
|
||||
return nil
|
||||
}
|
||||
return &swarm.UpdateConfig{
|
||||
Parallelism: source.Parallelism,
|
||||
Delay: source.Delay,
|
||||
FailureAction: source.FailureAction,
|
||||
Monitor: source.Monitor,
|
||||
MaxFailureRatio: source.MaxFailureRatio,
|
||||
}
|
||||
}
|
||||
|
||||
func convertResources(source composetypes.Resources) (*swarm.ResourceRequirements, error) {
|
||||
resources := &swarm.ResourceRequirements{}
|
||||
if source.Limits != nil {
|
||||
cpus, err := opts.ParseCPUs(source.Limits.NanoCPUs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resources.Limits = &swarm.Resources{
|
||||
NanoCPUs: cpus,
|
||||
MemoryBytes: int64(source.Limits.MemoryBytes),
|
||||
}
|
||||
}
|
||||
if source.Reservations != nil {
|
||||
cpus, err := opts.ParseCPUs(source.Reservations.NanoCPUs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resources.Reservations = &swarm.Resources{
|
||||
NanoCPUs: cpus,
|
||||
MemoryBytes: int64(source.Reservations.MemoryBytes),
|
||||
}
|
||||
}
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
func convertEndpointSpec(source []string) (*swarm.EndpointSpec, error) {
|
||||
portConfigs := []swarm.PortConfig{}
|
||||
ports, portBindings, err := nat.ParsePortSpecs(source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for port := range ports {
|
||||
portConfigs = append(
|
||||
portConfigs,
|
||||
servicecmd.ConvertPortToPortConfig(port, portBindings)...)
|
||||
}
|
||||
|
||||
return &swarm.EndpointSpec{Ports: portConfigs}, nil
|
||||
}
|
||||
|
||||
func convertEnvironment(source map[string]string) []string {
|
||||
var output []string
|
||||
|
||||
for name, value := range source {
|
||||
output = append(output, fmt.Sprintf("%s=%s", name, value))
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
func convertDeployMode(mode string, replicas *uint64) (swarm.ServiceMode, error) {
|
||||
serviceMode := swarm.ServiceMode{}
|
||||
|
||||
switch mode {
|
||||
case "global":
|
||||
if replicas != nil {
|
||||
return serviceMode, fmt.Errorf("replicas can only be used with replicated mode")
|
||||
}
|
||||
serviceMode.Global = &swarm.GlobalService{}
|
||||
case "replicated", "":
|
||||
serviceMode.Replicated = &swarm.ReplicatedService{Replicas: replicas}
|
||||
default:
|
||||
return serviceMode, fmt.Errorf("Unknown mode: %s", mode)
|
||||
}
|
||||
return serviceMode, nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,80 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/cli/command"
|
||||
)
|
||||
|
||||
func deployBundle(dockerCli *command.DockerCli, opts deployOptions) error {
|
||||
bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
namespace := namespace{name: opts.namespace}
|
||||
|
||||
networks := make(map[string]types.NetworkCreate)
|
||||
for _, service := range bundle.Services {
|
||||
for _, networkName := range service.Networks {
|
||||
networks[networkName] = types.NetworkCreate{
|
||||
Labels: getStackLabels(namespace.name, nil),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
services := make(map[string]swarm.ServiceSpec)
|
||||
for internalName, service := range bundle.Services {
|
||||
name := namespace.scope(internalName)
|
||||
|
||||
var ports []swarm.PortConfig
|
||||
for _, portSpec := range service.Ports {
|
||||
ports = append(ports, swarm.PortConfig{
|
||||
Protocol: swarm.PortConfigProtocol(portSpec.Protocol),
|
||||
TargetPort: portSpec.Port,
|
||||
})
|
||||
}
|
||||
|
||||
nets := []swarm.NetworkAttachmentConfig{}
|
||||
for _, networkName := range service.Networks {
|
||||
nets = append(nets, swarm.NetworkAttachmentConfig{
|
||||
Target: namespace.scope(networkName),
|
||||
Aliases: []string{networkName},
|
||||
})
|
||||
}
|
||||
|
||||
serviceSpec := swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: name,
|
||||
Labels: getStackLabels(namespace.name, service.Labels),
|
||||
},
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: swarm.ContainerSpec{
|
||||
Image: service.Image,
|
||||
Command: service.Command,
|
||||
Args: service.Args,
|
||||
Env: service.Env,
|
||||
// Service Labels will not be copied to Containers
|
||||
// automatically during the deployment so we apply
|
||||
// it here.
|
||||
Labels: getStackLabels(namespace.name, nil),
|
||||
},
|
||||
},
|
||||
EndpointSpec: &swarm.EndpointSpec{
|
||||
Ports: ports,
|
||||
},
|
||||
Networks: nets,
|
||||
}
|
||||
|
||||
services[internalName] = serviceSpec
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil {
|
||||
return err
|
||||
}
|
||||
return deployServices(ctx, dockerCli, services, namespace, opts.sendRegistryAuth)
|
||||
}
|
|
@ -9,11 +9,12 @@ import (
|
|||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func addComposefileFlag(opt *string, flags *pflag.FlagSet) {
|
||||
flags.StringVar(opt, "compose-file", "", "Path to a Compose file")
|
||||
}
|
||||
|
||||
func addBundlefileFlag(opt *string, flags *pflag.FlagSet) {
|
||||
flags.StringVar(
|
||||
opt,
|
||||
"file", "",
|
||||
"Path to a Distributed Application Bundle file (Default: STACK.dab)")
|
||||
flags.StringVar(opt, "bundle-file", "", "Path to a Distributed Application Bundle file")
|
||||
}
|
||||
|
||||
func addRegistryAuthFlag(opt *bool, flags *pflag.FlagSet) {
|
||||
|
|
|
@ -141,8 +141,8 @@ func (c *containerConfig) config() *enginecontainer.Config {
|
|||
Labels: c.labels(),
|
||||
Tty: c.spec().TTY,
|
||||
User: c.spec().User,
|
||||
Hostname: c.spec().Hostname,
|
||||
Env: c.spec().Env,
|
||||
Hostname: c.spec().Hostname,
|
||||
WorkingDir: c.spec().Dir,
|
||||
Image: c.image(),
|
||||
Volumes: c.volumes(),
|
||||
|
|
|
@ -41,6 +41,30 @@ func (s *DockerSwarmSuite) TestStackServices(c *check.C) {
|
|||
c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n")
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestStackDeployComposeFile(c *check.C) {
|
||||
testRequires(c, ExperimentalDaemon)
|
||||
d := s.AddDaemon(c, true, true)
|
||||
|
||||
testStackName := "testdeploy"
|
||||
stackArgs := []string{
|
||||
"stack", "deploy",
|
||||
"--compose-file", "fixtures/deploy/default.yaml",
|
||||
testStackName,
|
||||
}
|
||||
out, err := d.Cmd(stackArgs...)
|
||||
c.Assert(err, checker.IsNil, check.Commentf(out))
|
||||
|
||||
out, err = d.Cmd([]string{"stack", "ls"}...)
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(out, check.Equals, "NAME SERVICES\n"+"testdeploy 2\n")
|
||||
|
||||
out, err = d.Cmd([]string{"stack", "rm", testStackName}...)
|
||||
c.Assert(err, checker.IsNil)
|
||||
out, err = d.Cmd([]string{"stack", "ls"}...)
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(out, check.Equals, "NAME SERVICES\n")
|
||||
}
|
||||
|
||||
// testDAB is the DAB JSON used for testing.
|
||||
// TODO: Use template/text and substitute "Image" with the result of
|
||||
// `docker inspect --format '{{index .RepoDigests 0}}' busybox:latest`
|
||||
|
@ -59,7 +83,7 @@ const testDAB = `{
|
|||
}
|
||||
}`
|
||||
|
||||
func (s *DockerSwarmSuite) TestStackWithDAB(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestStackDeployWithDAB(c *check.C) {
|
||||
testRequires(c, ExperimentalDaemon)
|
||||
// setup
|
||||
testStackName := "test"
|
||||
|
@ -69,7 +93,11 @@ func (s *DockerSwarmSuite) TestStackWithDAB(c *check.C) {
|
|||
c.Assert(err, checker.IsNil)
|
||||
d := s.AddDaemon(c, true, true)
|
||||
// deploy
|
||||
stackArgs := []string{"stack", "deploy", testStackName}
|
||||
stackArgs := []string{
|
||||
"stack", "deploy",
|
||||
"--bundle-file", testDABFileName,
|
||||
testStackName,
|
||||
}
|
||||
out, err := d.Cmd(stackArgs...)
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(out, checker.Contains, "Loading bundle from test.dab\n")
|
||||
|
@ -92,21 +120,3 @@ func (s *DockerSwarmSuite) TestStackWithDAB(c *check.C) {
|
|||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(out, check.Equals, "NAME SERVICES\n")
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestStackWithDABExtension(c *check.C) {
|
||||
testRequires(c, ExperimentalDaemon)
|
||||
// setup
|
||||
testStackName := "test.dab"
|
||||
testDABFileName := testStackName
|
||||
defer os.RemoveAll(testDABFileName)
|
||||
err := ioutil.WriteFile(testDABFileName, []byte(testDAB), 0444)
|
||||
c.Assert(err, checker.IsNil)
|
||||
d := s.AddDaemon(c, true, true)
|
||||
// deploy
|
||||
stackArgs := []string{"stack", "deploy", testStackName}
|
||||
out, err := d.Cmd(stackArgs...)
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(out, checker.Contains, "Loading bundle from test.dab\n")
|
||||
c.Assert(out, checker.Contains, "Creating service test_srv1\n")
|
||||
c.Assert(out, checker.Contains, "Creating service test_srv2\n")
|
||||
}
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
|
||||
version: "3"
|
||||
services:
|
||||
web:
|
||||
image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0
|
||||
command: top
|
||||
db:
|
||||
image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0
|
||||
command: "tail -f /dev/null"
|
26
opts/opts.go
26
opts/opts.go
|
@ -331,16 +331,9 @@ func (c *NanoCPUs) String() string {
|
|||
|
||||
// Set sets the value of the NanoCPU by passing a string
|
||||
func (c *NanoCPUs) Set(value string) error {
|
||||
cpu, ok := new(big.Rat).SetString(value)
|
||||
if !ok {
|
||||
return fmt.Errorf("Failed to parse %v as a rational number", value)
|
||||
}
|
||||
nano := cpu.Mul(cpu, big.NewRat(1e9, 1))
|
||||
if !nano.IsInt() {
|
||||
return fmt.Errorf("value is too precise")
|
||||
}
|
||||
*c = NanoCPUs(nano.Num().Int64())
|
||||
return nil
|
||||
cpus, err := ParseCPUs(value)
|
||||
*c = NanoCPUs(cpus)
|
||||
return err
|
||||
}
|
||||
|
||||
// Type returns the type
|
||||
|
@ -352,3 +345,16 @@ func (c *NanoCPUs) Type() string {
|
|||
func (c *NanoCPUs) Value() int64 {
|
||||
return int64(*c)
|
||||
}
|
||||
|
||||
// ParseCPUs takes a string ratio and returns an integer value of nano cpus
|
||||
func ParseCPUs(value string) (int64, error) {
|
||||
cpu, ok := new(big.Rat).SetString(value)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("failed to parse %v as a rational number", value)
|
||||
}
|
||||
nano := cpu.Mul(cpu, big.NewRat(1e9, 1))
|
||||
if !nano.IsInt() {
|
||||
return 0, fmt.Errorf("value is too precise")
|
||||
}
|
||||
return nano.Num().Int64(), nil
|
||||
}
|
||||
|
|
|
@ -130,3 +130,11 @@ github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff
|
|||
|
||||
# metrics
|
||||
github.com/docker/go-metrics 86138d05f285fd9737a99bee2d9be30866b59d72
|
||||
|
||||
# composefile
|
||||
github.com/aanand/compose-file 480a79677acccb83b52c41161c22eaf4358460cc
|
||||
github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715
|
||||
github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a
|
||||
github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45
|
||||
github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d
|
||||
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
|
||||
|
|
|
@ -0,0 +1,191 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2016 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
89
vendor/github.com/aanand/compose-file/interpolation/interpolation.go
сгенерированный
поставляемый
Normal file
89
vendor/github.com/aanand/compose-file/interpolation/interpolation.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,89 @@
|
|||
package interpolation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aanand/compose-file/template"
|
||||
"github.com/aanand/compose-file/types"
|
||||
)
|
||||
|
||||
func Interpolate(config types.Dict, section string, mapping template.Mapping) (types.Dict, error) {
|
||||
out := types.Dict{}
|
||||
|
||||
for name, item := range config {
|
||||
if item == nil {
|
||||
out[name] = nil
|
||||
continue
|
||||
}
|
||||
interpolatedItem, err := interpolateSectionItem(name, item.(types.Dict), section, mapping)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out[name] = interpolatedItem
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func interpolateSectionItem(
|
||||
name string,
|
||||
item types.Dict,
|
||||
section string,
|
||||
mapping template.Mapping,
|
||||
) (types.Dict, error) {
|
||||
|
||||
out := types.Dict{}
|
||||
|
||||
for key, value := range item {
|
||||
interpolatedValue, err := recursiveInterpolate(value, mapping)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"Invalid interpolation format for %#v option in %s %#v: %#v",
|
||||
key, section, name, err.Template,
|
||||
)
|
||||
}
|
||||
out[key] = interpolatedValue
|
||||
}
|
||||
|
||||
return out, nil
|
||||
|
||||
}
|
||||
|
||||
func recursiveInterpolate(
|
||||
value interface{},
|
||||
mapping template.Mapping,
|
||||
) (interface{}, *template.InvalidTemplateError) {
|
||||
|
||||
switch value := value.(type) {
|
||||
|
||||
case string:
|
||||
return template.Substitute(value, mapping)
|
||||
|
||||
case types.Dict:
|
||||
out := types.Dict{}
|
||||
for key, elem := range value {
|
||||
interpolatedElem, err := recursiveInterpolate(elem, mapping)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out[key] = interpolatedElem
|
||||
}
|
||||
return out, nil
|
||||
|
||||
case []interface{}:
|
||||
out := make([]interface{}, len(value))
|
||||
for i, elem := range value {
|
||||
interpolatedElem, err := recursiveInterpolate(elem, mapping)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out[i] = interpolatedElem
|
||||
}
|
||||
return out, nil
|
||||
|
||||
default:
|
||||
return value, nil
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,597 @@
|
|||
package loader
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/aanand/compose-file/interpolation"
|
||||
"github.com/aanand/compose-file/schema"
|
||||
"github.com/aanand/compose-file/types"
|
||||
"github.com/docker/docker/runconfig/opts"
|
||||
units "github.com/docker/go-units"
|
||||
shellwords "github.com/mattn/go-shellwords"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
fieldNameRegexp = regexp.MustCompile("[A-Z][a-z0-9]+")
|
||||
)
|
||||
|
||||
// ParseYAML reads the bytes from a file, parses the bytes into a mapping
|
||||
// structure, and returns it.
|
||||
func ParseYAML(source []byte) (types.Dict, error) {
|
||||
var cfg interface{}
|
||||
if err := yaml.Unmarshal(source, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfgMap, ok := cfg.(map[interface{}]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Top-level object must be a mapping")
|
||||
}
|
||||
converted, err := convertToStringKeysRecursive(cfgMap, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return converted.(types.Dict), nil
|
||||
}
|
||||
|
||||
// Load reads a ConfigDetails and returns a fully loaded configuration
|
||||
func Load(configDetails types.ConfigDetails) (*types.Config, error) {
|
||||
if len(configDetails.ConfigFiles) < 1 {
|
||||
return nil, fmt.Errorf("No files specified")
|
||||
}
|
||||
if len(configDetails.ConfigFiles) > 1 {
|
||||
return nil, fmt.Errorf("Multiple files are not yet supported")
|
||||
}
|
||||
|
||||
configDict := getConfigDict(configDetails)
|
||||
|
||||
if services, ok := configDict["services"]; ok {
|
||||
if servicesDict, ok := services.(types.Dict); ok {
|
||||
forbidden := getProperties(servicesDict, types.ForbiddenProperties)
|
||||
|
||||
if len(forbidden) > 0 {
|
||||
return nil, &ForbiddenPropertiesError{Properties: forbidden}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := schema.Validate(configDict); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := types.Config{}
|
||||
version := configDict["version"].(string)
|
||||
if version != "3" {
|
||||
return nil, fmt.Errorf("Unsupported version: %#v. The only supported version is 3", version)
|
||||
}
|
||||
|
||||
if services, ok := configDict["services"]; ok {
|
||||
servicesConfig, err := interpolation.Interpolate(services.(types.Dict), "service", os.LookupEnv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
servicesList, err := loadServices(servicesConfig, configDetails.WorkingDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg.Services = servicesList
|
||||
}
|
||||
|
||||
if networks, ok := configDict["networks"]; ok {
|
||||
networksConfig, err := interpolation.Interpolate(networks.(types.Dict), "network", os.LookupEnv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
networksMapping, err := loadNetworks(networksConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg.Networks = networksMapping
|
||||
}
|
||||
|
||||
if volumes, ok := configDict["volumes"]; ok {
|
||||
volumesConfig, err := interpolation.Interpolate(volumes.(types.Dict), "volume", os.LookupEnv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumesMapping, err := loadVolumes(volumesConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg.Volumes = volumesMapping
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func GetUnsupportedProperties(configDetails types.ConfigDetails) []string {
|
||||
unsupported := map[string]bool{}
|
||||
|
||||
for _, service := range getServices(getConfigDict(configDetails)) {
|
||||
serviceDict := service.(types.Dict)
|
||||
for _, property := range types.UnsupportedProperties {
|
||||
if _, isSet := serviceDict[property]; isSet {
|
||||
unsupported[property] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sortedKeys(unsupported)
|
||||
}
|
||||
|
||||
func sortedKeys(set map[string]bool) []string {
|
||||
var keys []string
|
||||
for key, _ := range set {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
func GetDeprecatedProperties(configDetails types.ConfigDetails) map[string]string {
|
||||
return getProperties(getServices(getConfigDict(configDetails)), types.DeprecatedProperties)
|
||||
}
|
||||
|
||||
func getProperties(services types.Dict, propertyMap map[string]string) map[string]string {
|
||||
output := map[string]string{}
|
||||
|
||||
for _, service := range services {
|
||||
if serviceDict, ok := service.(types.Dict); ok {
|
||||
for property, description := range propertyMap {
|
||||
if _, isSet := serviceDict[property]; isSet {
|
||||
output[property] = description
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
type ForbiddenPropertiesError struct {
|
||||
Properties map[string]string
|
||||
}
|
||||
|
||||
func (e *ForbiddenPropertiesError) Error() string {
|
||||
return "Configuration contains forbidden properties"
|
||||
}
|
||||
|
||||
// TODO: resolve multiple files into a single config
|
||||
func getConfigDict(configDetails types.ConfigDetails) types.Dict {
|
||||
return configDetails.ConfigFiles[0].Config
|
||||
}
|
||||
|
||||
func getServices(configDict types.Dict) types.Dict {
|
||||
if services, ok := configDict["services"]; ok {
|
||||
if servicesDict, ok := services.(types.Dict); ok {
|
||||
return servicesDict
|
||||
}
|
||||
}
|
||||
|
||||
return types.Dict{}
|
||||
}
|
||||
|
||||
func transform(source map[string]interface{}, target interface{}) error {
|
||||
data := mapstructure.Metadata{}
|
||||
config := &mapstructure.DecoderConfig{
|
||||
DecodeHook: mapstructure.ComposeDecodeHookFunc(
|
||||
transformHook,
|
||||
mapstructure.StringToTimeDurationHookFunc()),
|
||||
Result: target,
|
||||
Metadata: &data,
|
||||
}
|
||||
decoder, err := mapstructure.NewDecoder(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = decoder.Decode(source)
|
||||
// TODO: log unused keys
|
||||
return err
|
||||
}
|
||||
|
||||
func transformHook(
|
||||
source reflect.Type,
|
||||
target reflect.Type,
|
||||
data interface{},
|
||||
) (interface{}, error) {
|
||||
switch target {
|
||||
case reflect.TypeOf(types.External{}):
|
||||
return transformExternal(source, target, data)
|
||||
case reflect.TypeOf(make(map[string]string, 0)):
|
||||
return transformMapStringString(source, target, data)
|
||||
case reflect.TypeOf(types.UlimitsConfig{}):
|
||||
return transformUlimits(source, target, data)
|
||||
case reflect.TypeOf(types.UnitBytes(0)):
|
||||
return loadSize(data)
|
||||
}
|
||||
switch target.Kind() {
|
||||
case reflect.Struct:
|
||||
return transformStruct(source, target, data)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// keys needs to be converted to strings for jsonschema
|
||||
// TODO: don't use types.Dict
|
||||
func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) {
|
||||
if mapping, ok := value.(map[interface{}]interface{}); ok {
|
||||
dict := make(types.Dict)
|
||||
for key, entry := range mapping {
|
||||
str, ok := key.(string)
|
||||
if !ok {
|
||||
var location string
|
||||
if keyPrefix == "" {
|
||||
location = "at top level"
|
||||
} else {
|
||||
location = fmt.Sprintf("in %s", keyPrefix)
|
||||
}
|
||||
return nil, fmt.Errorf("Non-string key %s: %#v", location, key)
|
||||
}
|
||||
var newKeyPrefix string
|
||||
if keyPrefix == "" {
|
||||
newKeyPrefix = str
|
||||
} else {
|
||||
newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str)
|
||||
}
|
||||
convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dict[str] = convertedEntry
|
||||
}
|
||||
return dict, nil
|
||||
}
|
||||
if list, ok := value.([]interface{}); ok {
|
||||
var convertedList []interface{}
|
||||
for index, entry := range list {
|
||||
newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index)
|
||||
convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
convertedList = append(convertedList, convertedEntry)
|
||||
}
|
||||
return convertedList, nil
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func loadServices(servicesDict types.Dict, workingDir string) ([]types.ServiceConfig, error) {
|
||||
var services []types.ServiceConfig
|
||||
|
||||
for name, serviceDef := range servicesDict {
|
||||
serviceConfig, err := loadService(name, serviceDef.(types.Dict), workingDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
services = append(services, *serviceConfig)
|
||||
}
|
||||
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func loadService(name string, serviceDict types.Dict, workingDir string) (*types.ServiceConfig, error) {
|
||||
serviceConfig := &types.ServiceConfig{}
|
||||
if err := transform(serviceDict, serviceConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceConfig.Name = name
|
||||
|
||||
if err := resolveEnvironment(serviceConfig, serviceDict, workingDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := resolveVolumePaths(serviceConfig.Volumes, workingDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return serviceConfig, nil
|
||||
}
|
||||
|
||||
func resolveEnvironment(serviceConfig *types.ServiceConfig, serviceDict types.Dict, workingDir string) error {
|
||||
environment := make(map[string]string)
|
||||
|
||||
if envFileVal, ok := serviceDict["env_file"]; ok {
|
||||
envFiles := loadStringOrListOfStrings(envFileVal)
|
||||
|
||||
var envVars []string
|
||||
|
||||
for _, file := range envFiles {
|
||||
filePath := path.Join(workingDir, file)
|
||||
fileVars, err := opts.ParseEnvFile(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
envVars = append(envVars, fileVars...)
|
||||
}
|
||||
|
||||
for k, v := range opts.ConvertKVStringsToMap(envVars) {
|
||||
environment[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range serviceConfig.Environment {
|
||||
environment[k] = v
|
||||
}
|
||||
|
||||
serviceConfig.Environment = environment
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resolveVolumePaths(volumes []string, workingDir string) error {
|
||||
for i, mapping := range volumes {
|
||||
parts := strings.SplitN(mapping, ":", 2)
|
||||
if len(parts) == 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(parts[0], ".") {
|
||||
parts[0] = path.Join(workingDir, parts[0])
|
||||
}
|
||||
parts[0] = expandUser(parts[0])
|
||||
|
||||
volumes[i] = strings.Join(parts, ":")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: make this more robust
|
||||
func expandUser(path string) string {
|
||||
if strings.HasPrefix(path, "~") {
|
||||
return strings.Replace(path, "~", os.Getenv("HOME"), 1)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func transformUlimits(
|
||||
source reflect.Type,
|
||||
target reflect.Type,
|
||||
data interface{},
|
||||
) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case int:
|
||||
return types.UlimitsConfig{Single: value}, nil
|
||||
case types.Dict:
|
||||
ulimit := types.UlimitsConfig{}
|
||||
ulimit.Soft = value["soft"].(int)
|
||||
ulimit.Hard = value["hard"].(int)
|
||||
return ulimit, nil
|
||||
default:
|
||||
return data, fmt.Errorf("invalid type %T for ulimits", value)
|
||||
}
|
||||
}
|
||||
|
||||
func loadNetworks(source types.Dict) (map[string]types.NetworkConfig, error) {
|
||||
networks := make(map[string]types.NetworkConfig)
|
||||
err := transform(source, &networks)
|
||||
if err != nil {
|
||||
return networks, err
|
||||
}
|
||||
for name, network := range networks {
|
||||
if network.External.External && network.External.Name == "" {
|
||||
network.External.Name = name
|
||||
networks[name] = network
|
||||
}
|
||||
}
|
||||
return networks, nil
|
||||
}
|
||||
|
||||
func loadVolumes(source types.Dict) (map[string]types.VolumeConfig, error) {
|
||||
volumes := make(map[string]types.VolumeConfig)
|
||||
err := transform(source, &volumes)
|
||||
if err != nil {
|
||||
return volumes, err
|
||||
}
|
||||
for name, volume := range volumes {
|
||||
if volume.External.External && volume.External.Name == "" {
|
||||
volume.External.Name = name
|
||||
volumes[name] = volume
|
||||
}
|
||||
}
|
||||
return volumes, nil
|
||||
}
|
||||
|
||||
func transformStruct(
|
||||
source reflect.Type,
|
||||
target reflect.Type,
|
||||
data interface{},
|
||||
) (interface{}, error) {
|
||||
structValue, ok := data.(map[string]interface{})
|
||||
if !ok {
|
||||
// FIXME: this is necessary because of convertToStringKeysRecursive
|
||||
structValue, ok = data.(types.Dict)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf(
|
||||
"transformStruct called with non-map type: %T, %s", data, data))
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
for i := 0; i < target.NumField(); i++ {
|
||||
field := target.Field(i)
|
||||
fieldTag := field.Tag.Get("compose")
|
||||
|
||||
yamlName := toYAMLName(field.Name)
|
||||
value, ok := structValue[yamlName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
structValue[yamlName], err = convertField(
|
||||
fieldTag, reflect.TypeOf(value), field.Type, value)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("field %s: %s", yamlName, err.Error())
|
||||
}
|
||||
}
|
||||
return structValue, nil
|
||||
}
|
||||
|
||||
func transformMapStringString(
|
||||
source reflect.Type,
|
||||
target reflect.Type,
|
||||
data interface{},
|
||||
) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case map[string]interface{}:
|
||||
return toMapStringString(value), nil
|
||||
case types.Dict:
|
||||
return toMapStringString(value), nil
|
||||
case map[string]string:
|
||||
return value, nil
|
||||
default:
|
||||
return data, fmt.Errorf("invalid type %T for map[string]string", value)
|
||||
}
|
||||
}
|
||||
|
||||
func convertField(
|
||||
fieldTag string,
|
||||
source reflect.Type,
|
||||
target reflect.Type,
|
||||
data interface{},
|
||||
) (interface{}, error) {
|
||||
switch fieldTag {
|
||||
case "":
|
||||
return data, nil
|
||||
case "list_or_dict_equals":
|
||||
return loadMappingOrList(data, "="), nil
|
||||
case "list_or_dict_colon":
|
||||
return loadMappingOrList(data, ":"), nil
|
||||
case "list_or_struct_map":
|
||||
return loadListOrStructMap(data, target)
|
||||
case "string_or_list":
|
||||
return loadStringOrListOfStrings(data), nil
|
||||
case "list_of_strings_or_numbers":
|
||||
return loadListOfStringsOrNumbers(data), nil
|
||||
case "shell_command":
|
||||
return loadShellCommand(data)
|
||||
case "size":
|
||||
return loadSize(data)
|
||||
case "-":
|
||||
return nil, nil
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func transformExternal(
|
||||
source reflect.Type,
|
||||
target reflect.Type,
|
||||
data interface{},
|
||||
) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case bool:
|
||||
return map[string]interface{}{"external": value}, nil
|
||||
case types.Dict:
|
||||
return map[string]interface{}{"external": true, "name": value["name"]}, nil
|
||||
case map[string]interface{}:
|
||||
return map[string]interface{}{"external": true, "name": value["name"]}, nil
|
||||
default:
|
||||
return data, fmt.Errorf("invalid type %T for external", value)
|
||||
}
|
||||
}
|
||||
|
||||
func toYAMLName(name string) string {
|
||||
nameParts := fieldNameRegexp.FindAllString(name, -1)
|
||||
for i, p := range nameParts {
|
||||
nameParts[i] = strings.ToLower(p)
|
||||
}
|
||||
return strings.Join(nameParts, "_")
|
||||
}
|
||||
|
||||
func loadListOrStructMap(value interface{}, target reflect.Type) (interface{}, error) {
|
||||
mapValue := reflect.MakeMap(target)
|
||||
|
||||
if list, ok := value.([]interface{}); ok {
|
||||
for _, name := range list {
|
||||
mapValue.SetMapIndex(reflect.ValueOf(name), reflect.ValueOf(nil))
|
||||
}
|
||||
return mapValue.Interface(), nil
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func loadListOfStringsOrNumbers(value interface{}) []string {
|
||||
list := value.([]interface{})
|
||||
result := make([]string, len(list))
|
||||
for i, item := range list {
|
||||
result[i] = fmt.Sprint(item)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func loadStringOrListOfStrings(value interface{}) []string {
|
||||
if list, ok := value.([]interface{}); ok {
|
||||
result := make([]string, len(list))
|
||||
for i, item := range list {
|
||||
result[i] = fmt.Sprint(item)
|
||||
}
|
||||
return result
|
||||
}
|
||||
return []string{value.(string)}
|
||||
}
|
||||
|
||||
func loadMappingOrList(mappingOrList interface{}, sep string) map[string]string {
|
||||
if mapping, ok := mappingOrList.(types.Dict); ok {
|
||||
return toMapStringString(mapping)
|
||||
}
|
||||
if list, ok := mappingOrList.([]interface{}); ok {
|
||||
result := make(map[string]string)
|
||||
for _, value := range list {
|
||||
parts := strings.SplitN(value.(string), sep, 2)
|
||||
if len(parts) == 1 {
|
||||
result[parts[0]] = ""
|
||||
} else {
|
||||
result[parts[0]] = parts[1]
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
panic(fmt.Errorf("expected a map or a slice, got: %#v", mappingOrList))
|
||||
}
|
||||
|
||||
func loadShellCommand(value interface{}) (interface{}, error) {
|
||||
if str, ok := value.(string); ok {
|
||||
return shellwords.Parse(str)
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func loadSize(value interface{}) (int64, error) {
|
||||
switch value := value.(type) {
|
||||
case int:
|
||||
return int64(value), nil
|
||||
case string:
|
||||
return units.RAMInBytes(value)
|
||||
}
|
||||
panic(fmt.Errorf("invalid type for size %T", value))
|
||||
}
|
||||
|
||||
func toMapStringString(value map[string]interface{}) map[string]string {
|
||||
output := make(map[string]string)
|
||||
for key, value := range value {
|
||||
output[key] = toString(value)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func toString(value interface{}) string {
|
||||
if value == nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprint(value)
|
||||
}
|
237
vendor/github.com/aanand/compose-file/schema/bindata.go
сгенерированный
поставляемый
Normal file
237
vendor/github.com/aanand/compose-file/schema/bindata.go
сгенерированный
поставляемый
Normal file
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
|
@ -0,0 +1,113 @@
|
|||
package schema
|
||||
|
||||
//go:generate go-bindata -pkg schema data
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/xeipuuv/gojsonschema"
|
||||
)
|
||||
|
||||
type portsFormatChecker struct{}
|
||||
|
||||
func (checker portsFormatChecker) IsFormat(input string) bool {
|
||||
// TODO: implement this
|
||||
return true
|
||||
}
|
||||
|
||||
type durationFormatChecker struct{}
|
||||
|
||||
func (checker durationFormatChecker) IsFormat(input string) bool {
|
||||
_, err := time.ParseDuration(input)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{})
|
||||
gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{})
|
||||
gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{})
|
||||
}
|
||||
|
||||
// Validate uses the jsonschema to validate the configuration
|
||||
func Validate(config map[string]interface{}) error {
|
||||
schemaData, err := Asset("data/config_schema_v3.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
schemaLoader := gojsonschema.NewStringLoader(string(schemaData))
|
||||
dataLoader := gojsonschema.NewGoLoader(config)
|
||||
|
||||
result, err := gojsonschema.Validate(schemaLoader, dataLoader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !result.Valid() {
|
||||
return toError(result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func toError(result *gojsonschema.Result) error {
|
||||
err := getMostSpecificError(result.Errors())
|
||||
description := getDescription(err)
|
||||
return fmt.Errorf("%s %s", err.Field(), description)
|
||||
}
|
||||
|
||||
func getDescription(err gojsonschema.ResultError) string {
|
||||
if err.Type() == "invalid_type" {
|
||||
if expectedType, ok := err.Details()["expected"].(string); ok {
|
||||
return fmt.Sprintf("must be a %s", humanReadableType(expectedType))
|
||||
}
|
||||
}
|
||||
|
||||
return err.Description()
|
||||
}
|
||||
|
||||
func humanReadableType(definition string) string {
|
||||
if definition[0:1] == "[" {
|
||||
allTypes := strings.Split(definition[1:len(definition)-1], ",")
|
||||
for i, t := range allTypes {
|
||||
allTypes[i] = humanReadableType(t)
|
||||
}
|
||||
return fmt.Sprintf(
|
||||
"%s or %s",
|
||||
strings.Join(allTypes[0:len(allTypes)-1], ", "),
|
||||
allTypes[len(allTypes)-1],
|
||||
)
|
||||
}
|
||||
if definition == "object" {
|
||||
return "mapping"
|
||||
}
|
||||
if definition == "array" {
|
||||
return "list"
|
||||
}
|
||||
return definition
|
||||
}
|
||||
|
||||
func getMostSpecificError(errors []gojsonschema.ResultError) gojsonschema.ResultError {
|
||||
var mostSpecificError gojsonschema.ResultError
|
||||
|
||||
for _, err := range errors {
|
||||
if mostSpecificError == nil {
|
||||
mostSpecificError = err
|
||||
} else if specificity(err) > specificity(mostSpecificError) {
|
||||
mostSpecificError = err
|
||||
} else if specificity(err) == specificity(mostSpecificError) {
|
||||
// Invalid type errors win in a tie-breaker for most specific field name
|
||||
if err.Type() == "invalid_type" && mostSpecificError.Type() != "invalid_type" {
|
||||
mostSpecificError = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return mostSpecificError
|
||||
}
|
||||
|
||||
func specificity(err gojsonschema.ResultError) int {
|
||||
return len(strings.Split(err.Field(), "."))
|
||||
}
|
108
vendor/github.com/aanand/compose-file/template/template.go
сгенерированный
поставляемый
Normal file
108
vendor/github.com/aanand/compose-file/template/template.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,108 @@
|
|||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var delimiter = "\\$"
|
||||
var substitution = "[_a-z][_a-z0-9]*(?::?-[^}]+)?"
|
||||
|
||||
var patternString = fmt.Sprintf(
|
||||
"%s(?i:(?P<escaped>%s)|(?P<named>%s)|{(?P<braced>%s)}|(?P<invalid>))",
|
||||
delimiter, delimiter, substitution, substitution,
|
||||
)
|
||||
|
||||
var pattern = regexp.MustCompile(patternString)
|
||||
|
||||
type InvalidTemplateError struct {
|
||||
Template string
|
||||
}
|
||||
|
||||
func (e InvalidTemplateError) Error() string {
|
||||
return fmt.Sprintf("Invalid template: %#v", e.Template)
|
||||
}
|
||||
|
||||
// A user-supplied function which maps from variable names to values.
|
||||
// Returns the value as a string and a bool indicating whether
|
||||
// the value is present, to distinguish between an empty string
|
||||
// and the absence of a value.
|
||||
type Mapping func(string) (string, bool)
|
||||
|
||||
func Substitute(template string, mapping Mapping) (result string, err *InvalidTemplateError) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if e, ok := r.(*InvalidTemplateError); ok {
|
||||
err = e
|
||||
} else {
|
||||
panic(r)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
result = pattern.ReplaceAllStringFunc(template, func(substring string) string {
|
||||
matches := pattern.FindStringSubmatch(substring)
|
||||
groups := make(map[string]string)
|
||||
for i, name := range pattern.SubexpNames() {
|
||||
if i != 0 {
|
||||
groups[name] = matches[i]
|
||||
}
|
||||
}
|
||||
|
||||
substitution := groups["named"]
|
||||
if substitution == "" {
|
||||
substitution = groups["braced"]
|
||||
}
|
||||
if substitution != "" {
|
||||
// Soft default (fall back if unset or empty)
|
||||
if strings.Contains(substitution, ":-") {
|
||||
name, defaultValue := partition(substitution, ":-")
|
||||
value, ok := mapping(name)
|
||||
if !ok || value == "" {
|
||||
return defaultValue
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// Hard default (fall back if-and-only-if empty)
|
||||
if strings.Contains(substitution, "-") {
|
||||
name, defaultValue := partition(substitution, "-")
|
||||
value, ok := mapping(name)
|
||||
if !ok {
|
||||
return defaultValue
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// No default (fall back to empty string)
|
||||
value, ok := mapping(substitution)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
if escaped := groups["escaped"]; escaped != "" {
|
||||
return escaped
|
||||
}
|
||||
|
||||
panic(&InvalidTemplateError{Template: template})
|
||||
return ""
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Split the string at the first occurrence of sep, and return the part before the separator,
|
||||
// and the part after the separator.
|
||||
//
|
||||
// If the separator is not found, return the string itself, followed by an empty string.
|
||||
func partition(s, sep string) (string, string) {
|
||||
if strings.Contains(s, sep) {
|
||||
parts := strings.SplitN(s, sep, 2)
|
||||
return parts[0], parts[1]
|
||||
} else {
|
||||
return s, ""
|
||||
}
|
||||
}
|
|
@ -0,0 +1,200 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var UnsupportedProperties = []string{
|
||||
"build",
|
||||
"cap_add",
|
||||
"cap_drop",
|
||||
"cgroup_parent",
|
||||
"devices",
|
||||
"dns",
|
||||
"dns_search",
|
||||
"domainname",
|
||||
"external_links",
|
||||
"extra_hosts",
|
||||
"ipc",
|
||||
"links",
|
||||
"mac_address",
|
||||
"network_mode",
|
||||
"privileged",
|
||||
"read_only",
|
||||
"restart",
|
||||
"security_opt",
|
||||
"shm_size",
|
||||
"stdin_open",
|
||||
"stop_signal",
|
||||
"tmpfs",
|
||||
}
|
||||
|
||||
var DeprecatedProperties = map[string]string{
|
||||
"container_name": "Setting the container name is not supported.",
|
||||
"expose": "Exposing ports is unnecessary - services on the same network can access each other's containers on any port.",
|
||||
}
|
||||
|
||||
var ForbiddenProperties = map[string]string{
|
||||
"extends": "Support for `extends` is not implemented yet. Use `docker-compose config` to generate a configuration with all `extends` options resolved, and deploy from that.",
|
||||
"volume_driver": "Instead of setting the volume driver on the service, define a volume using the top-level `volumes` option and specify the driver there.",
|
||||
"volumes_from": "To share a volume between services, define it using the top-level `volumes` option and reference it from each service that shares it using the service-level `volumes` option.",
|
||||
"cpu_quota": "Set resource limits using deploy.resources",
|
||||
"cpu_shares": "Set resource limits using deploy.resources",
|
||||
"cpuset": "Set resource limits using deploy.resources",
|
||||
"mem_limit": "Set resource limits using deploy.resources",
|
||||
"memswap_limit": "Set resource limits using deploy.resources",
|
||||
}
|
||||
|
||||
type Dict map[string]interface{}
|
||||
|
||||
type ConfigFile struct {
|
||||
Filename string
|
||||
Config Dict
|
||||
}
|
||||
|
||||
type ConfigDetails struct {
|
||||
WorkingDir string
|
||||
ConfigFiles []ConfigFile
|
||||
Environment map[string]string
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Services []ServiceConfig
|
||||
Networks map[string]NetworkConfig
|
||||
Volumes map[string]VolumeConfig
|
||||
}
|
||||
|
||||
type ServiceConfig struct {
|
||||
Name string
|
||||
|
||||
CapAdd []string `mapstructure:"cap_add"`
|
||||
CapDrop []string `mapstructure:"cap_drop"`
|
||||
CgroupParent string `mapstructure:"cgroup_parent"`
|
||||
Command []string `compose:"shell_command"`
|
||||
ContainerName string `mapstructure:"container_name"`
|
||||
DependsOn []string `mapstructure:"depends_on"`
|
||||
Deploy DeployConfig
|
||||
Devices []string
|
||||
Dns []string `compose:"string_or_list"`
|
||||
DnsSearch []string `mapstructure:"dns_search" compose:"string_or_list"`
|
||||
DomainName string `mapstructure:"domainname"`
|
||||
Entrypoint []string `compose:"shell_command"`
|
||||
Environment map[string]string `compose:"list_or_dict_equals"`
|
||||
Expose []string `compose:"list_of_strings_or_numbers"`
|
||||
ExternalLinks []string `mapstructure:"external_links"`
|
||||
ExtraHosts map[string]string `mapstructure:"extra_hosts" compose:"list_or_dict_colon"`
|
||||
Hostname string
|
||||
Image string
|
||||
Ipc string
|
||||
Labels map[string]string `compose:"list_or_dict_equals"`
|
||||
Links []string
|
||||
Logging *LoggingConfig
|
||||
MacAddress string `mapstructure:"mac_address"`
|
||||
NetworkMode string `mapstructure:"network_mode"`
|
||||
Networks map[string]*ServiceNetworkConfig `compose:"list_or_struct_map"`
|
||||
Pid string
|
||||
Ports []string `compose:"list_of_strings_or_numbers"`
|
||||
Privileged bool
|
||||
ReadOnly bool `mapstructure:"read_only"`
|
||||
Restart string
|
||||
SecurityOpt []string `mapstructure:"security_opt"`
|
||||
StdinOpen bool `mapstructure:"stdin_open"`
|
||||
StopGracePeriod *time.Duration `mapstructure:"stop_grace_period"`
|
||||
StopSignal string `mapstructure:"stop_signal"`
|
||||
Tmpfs []string `compose:"string_or_list"`
|
||||
Tty bool `mapstructure:"tty"`
|
||||
Ulimits map[string]*UlimitsConfig
|
||||
User string
|
||||
Volumes []string
|
||||
WorkingDir string `mapstructure:"working_dir"`
|
||||
}
|
||||
|
||||
type LoggingConfig struct {
|
||||
Driver string
|
||||
Options map[string]string
|
||||
}
|
||||
|
||||
type DeployConfig struct {
|
||||
Mode string
|
||||
Replicas *uint64
|
||||
Labels map[string]string `compose:"list_or_dict_equals"`
|
||||
UpdateConfig *UpdateConfig `mapstructure:"update_config"`
|
||||
Resources Resources
|
||||
RestartPolicy *RestartPolicy `mapstructure:"restart_policy"`
|
||||
Placement Placement
|
||||
}
|
||||
|
||||
type UpdateConfig struct {
|
||||
Parallelism uint64
|
||||
Delay time.Duration
|
||||
FailureAction string `mapstructure:"failure_action"`
|
||||
Monitor time.Duration
|
||||
MaxFailureRatio float32 `mapstructure:"max_failure_ratio"`
|
||||
}
|
||||
|
||||
type Resources struct {
|
||||
Limits *Resource
|
||||
Reservations *Resource
|
||||
}
|
||||
|
||||
type Resource struct {
|
||||
// TODO: types to convert from units and ratios
|
||||
NanoCPUs string `mapstructure:"cpus"`
|
||||
MemoryBytes UnitBytes `mapstructure:"memory"`
|
||||
}
|
||||
|
||||
type UnitBytes int64
|
||||
|
||||
type RestartPolicy struct {
|
||||
Condition string
|
||||
Delay *time.Duration
|
||||
MaxAttempts *uint64 `mapstructure:"max_attempts"`
|
||||
Window *time.Duration
|
||||
}
|
||||
|
||||
type Placement struct {
|
||||
Constraints []string
|
||||
}
|
||||
|
||||
type ServiceNetworkConfig struct {
|
||||
Aliases []string
|
||||
Ipv4Address string `mapstructure:"ipv4_address"`
|
||||
Ipv6Address string `mapstructure:"ipv6_address"`
|
||||
}
|
||||
|
||||
type UlimitsConfig struct {
|
||||
Single int
|
||||
Soft int
|
||||
Hard int
|
||||
}
|
||||
|
||||
type NetworkConfig struct {
|
||||
Driver string
|
||||
DriverOpts map[string]string `mapstructure:"driver_opts"`
|
||||
Ipam IPAMConfig
|
||||
External External
|
||||
Labels map[string]string `compose:"list_or_dict_equals"`
|
||||
}
|
||||
|
||||
type IPAMConfig struct {
|
||||
Driver string
|
||||
Config []*IPAMPool
|
||||
}
|
||||
|
||||
type IPAMPool struct {
|
||||
Subnet string
|
||||
}
|
||||
|
||||
type VolumeConfig struct {
|
||||
Driver string
|
||||
DriverOpts map[string]string `mapstructure:"driver_opts"`
|
||||
External External
|
||||
Labels map[string]string `compose:"list_or_dict_equals"`
|
||||
}
|
||||
|
||||
// External identifies a Volume or Network as a reference to a resource that is
|
||||
// not managed, and should already exist.
|
||||
type External struct {
|
||||
Name string
|
||||
External bool
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Mitchell Hashimoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
154
vendor/github.com/mitchellh/mapstructure/decode_hooks.go
сгенерированный
поставляемый
Normal file
154
vendor/github.com/mitchellh/mapstructure/decode_hooks.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,154 @@
|
|||
package mapstructure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
|
||||
// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
|
||||
func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
|
||||
// Create variables here so we can reference them with the reflect pkg
|
||||
var f1 DecodeHookFuncType
|
||||
var f2 DecodeHookFuncKind
|
||||
|
||||
// Fill in the variables into this interface and the rest is done
|
||||
// automatically using the reflect package.
|
||||
potential := []interface{}{f1, f2}
|
||||
|
||||
v := reflect.ValueOf(h)
|
||||
vt := v.Type()
|
||||
for _, raw := range potential {
|
||||
pt := reflect.ValueOf(raw).Type()
|
||||
if vt.ConvertibleTo(pt) {
|
||||
return v.Convert(pt).Interface()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecodeHookExec executes the given decode hook. This should be used
|
||||
// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
|
||||
// that took reflect.Kind instead of reflect.Type.
|
||||
func DecodeHookExec(
|
||||
raw DecodeHookFunc,
|
||||
from reflect.Type, to reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
// Build our arguments that reflect expects
|
||||
argVals := make([]reflect.Value, 3)
|
||||
argVals[0] = reflect.ValueOf(from)
|
||||
argVals[1] = reflect.ValueOf(to)
|
||||
argVals[2] = reflect.ValueOf(data)
|
||||
|
||||
switch f := typedDecodeHook(raw).(type) {
|
||||
case DecodeHookFuncType:
|
||||
return f(from, to, data)
|
||||
case DecodeHookFuncKind:
|
||||
return f(from.Kind(), to.Kind(), data)
|
||||
default:
|
||||
return nil, errors.New("invalid decode hook signature")
|
||||
}
|
||||
}
|
||||
|
||||
// ComposeDecodeHookFunc creates a single DecodeHookFunc that
|
||||
// automatically composes multiple DecodeHookFuncs.
|
||||
//
|
||||
// The composed funcs are called in order, with the result of the
|
||||
// previous transformation.
|
||||
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
var err error
|
||||
for _, f1 := range fs {
|
||||
data, err = DecodeHookExec(f1, f, t, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Modify the from kind to be correct with the new data
|
||||
f = nil
|
||||
if val := reflect.ValueOf(data); val.IsValid() {
|
||||
f = val.Type()
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
}
|
||||
|
||||
// StringToSliceHookFunc returns a DecodeHookFunc that converts
|
||||
// string to []string by splitting on the given sep.
|
||||
func StringToSliceHookFunc(sep string) DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Kind,
|
||||
t reflect.Kind,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f != reflect.String || t != reflect.Slice {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
raw := data.(string)
|
||||
if raw == "" {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return strings.Split(raw, sep), nil
|
||||
}
|
||||
}
|
||||
|
||||
// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to time.Duration.
|
||||
func StringToTimeDurationHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(time.Duration(5)) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
return time.ParseDuration(data.(string))
|
||||
}
|
||||
}
|
||||
|
||||
func WeaklyTypedHook(
|
||||
f reflect.Kind,
|
||||
t reflect.Kind,
|
||||
data interface{}) (interface{}, error) {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
switch t {
|
||||
case reflect.String:
|
||||
switch f {
|
||||
case reflect.Bool:
|
||||
if dataVal.Bool() {
|
||||
return "1", nil
|
||||
} else {
|
||||
return "0", nil
|
||||
}
|
||||
case reflect.Float32:
|
||||
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
|
||||
case reflect.Int:
|
||||
return strconv.FormatInt(dataVal.Int(), 10), nil
|
||||
case reflect.Slice:
|
||||
dataType := dataVal.Type()
|
||||
elemKind := dataType.Elem().Kind()
|
||||
if elemKind == reflect.Uint8 {
|
||||
return string(dataVal.Interface().([]uint8)), nil
|
||||
}
|
||||
case reflect.Uint:
|
||||
return strconv.FormatUint(dataVal.Uint(), 10), nil
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
package mapstructure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Error implements the error interface and can represents multiple
|
||||
// errors that occur in the course of a single decode.
|
||||
type Error struct {
|
||||
Errors []string
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
points := make([]string, len(e.Errors))
|
||||
for i, err := range e.Errors {
|
||||
points[i] = fmt.Sprintf("* %s", err)
|
||||
}
|
||||
|
||||
sort.Strings(points)
|
||||
return fmt.Sprintf(
|
||||
"%d error(s) decoding:\n\n%s",
|
||||
len(e.Errors), strings.Join(points, "\n"))
|
||||
}
|
||||
|
||||
// WrappedErrors implements the errwrap.Wrapper interface to make this
|
||||
// return value more useful with the errwrap and go-multierror libraries.
|
||||
func (e *Error) WrappedErrors() []error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make([]error, len(e.Errors))
|
||||
for i, e := range e.Errors {
|
||||
result[i] = errors.New(e)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func appendErrors(errors []string, err error) []string {
|
||||
switch e := err.(type) {
|
||||
case *Error:
|
||||
return append(errors, e.Errors...)
|
||||
default:
|
||||
return append(errors, e.Error())
|
||||
}
|
||||
}
|
782
vendor/github.com/mitchellh/mapstructure/mapstructure.go
сгенерированный
поставляемый
Normal file
782
vendor/github.com/mitchellh/mapstructure/mapstructure.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,782 @@
|
|||
// The mapstructure package exposes functionality to convert an
|
||||
// abitrary map[string]interface{} into a native Go structure.
|
||||
//
|
||||
// The Go structure can be arbitrarily complex, containing slices,
|
||||
// other structs, etc. and the decoder will properly decode nested
|
||||
// maps and so on into the proper structures in the native Go struct.
|
||||
// See the examples to see what the decoder is capable of.
|
||||
package mapstructure
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DecodeHookFunc is the callback function that can be used for
|
||||
// data transformations. See "DecodeHook" in the DecoderConfig
|
||||
// struct.
|
||||
//
|
||||
// The type should be DecodeHookFuncType or DecodeHookFuncKind.
|
||||
// Either is accepted. Types are a superset of Kinds (Types can return
|
||||
// Kinds) and are generally a richer thing to use, but Kinds are simpler
|
||||
// if you only need those.
|
||||
//
|
||||
// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
|
||||
// we started with Kinds and then realized Types were the better solution,
|
||||
// but have a promise to not break backwards compat so we now support
|
||||
// both.
|
||||
type DecodeHookFunc interface{}
|
||||
|
||||
type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
|
||||
type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
|
||||
|
||||
// DecoderConfig is the configuration that is used to create a new decoder
|
||||
// and allows customization of various aspects of decoding.
|
||||
type DecoderConfig struct {
|
||||
// DecodeHook, if set, will be called before any decoding and any
|
||||
// type conversion (if WeaklyTypedInput is on). This lets you modify
|
||||
// the values before they're set down onto the resulting struct.
|
||||
//
|
||||
// If an error is returned, the entire decode will fail with that
|
||||
// error.
|
||||
DecodeHook DecodeHookFunc
|
||||
|
||||
// If ErrorUnused is true, then it is an error for there to exist
|
||||
// keys in the original map that were unused in the decoding process
|
||||
// (extra keys).
|
||||
ErrorUnused bool
|
||||
|
||||
// ZeroFields, if set to true, will zero fields before writing them.
|
||||
// For example, a map will be emptied before decoded values are put in
|
||||
// it. If this is false, a map will be merged.
|
||||
ZeroFields bool
|
||||
|
||||
// If WeaklyTypedInput is true, the decoder will make the following
|
||||
// "weak" conversions:
|
||||
//
|
||||
// - bools to string (true = "1", false = "0")
|
||||
// - numbers to string (base 10)
|
||||
// - bools to int/uint (true = 1, false = 0)
|
||||
// - strings to int/uint (base implied by prefix)
|
||||
// - int to bool (true if value != 0)
|
||||
// - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
|
||||
// FALSE, false, False. Anything else is an error)
|
||||
// - empty array = empty map and vice versa
|
||||
// - negative numbers to overflowed uint values (base 10)
|
||||
// - slice of maps to a merged map
|
||||
//
|
||||
WeaklyTypedInput bool
|
||||
|
||||
// Metadata is the struct that will contain extra metadata about
|
||||
// the decoding. If this is nil, then no metadata will be tracked.
|
||||
Metadata *Metadata
|
||||
|
||||
// Result is a pointer to the struct that will contain the decoded
|
||||
// value.
|
||||
Result interface{}
|
||||
|
||||
// The tag name that mapstructure reads for field names. This
|
||||
// defaults to "mapstructure"
|
||||
TagName string
|
||||
}
|
||||
|
||||
// A Decoder takes a raw interface value and turns it into structured
|
||||
// data, keeping track of rich error information along the way in case
|
||||
// anything goes wrong. Unlike the basic top-level Decode method, you can
|
||||
// more finely control how the Decoder behaves using the DecoderConfig
|
||||
// structure. The top-level Decode method is just a convenience that sets
|
||||
// up the most basic Decoder.
|
||||
type Decoder struct {
|
||||
config *DecoderConfig
|
||||
}
|
||||
|
||||
// Metadata contains information about decoding a structure that
|
||||
// is tedious or difficult to get otherwise.
|
||||
type Metadata struct {
|
||||
// Keys are the keys of the structure which were successfully decoded
|
||||
Keys []string
|
||||
|
||||
// Unused is a slice of keys that were found in the raw value but
|
||||
// weren't decoded since there was no matching field in the result interface
|
||||
Unused []string
|
||||
}
|
||||
|
||||
// Decode takes a map and uses reflection to convert it into the
|
||||
// given Go native structure. val must be a pointer to a struct.
|
||||
func Decode(m interface{}, rawVal interface{}) error {
|
||||
config := &DecoderConfig{
|
||||
Metadata: nil,
|
||||
Result: rawVal,
|
||||
}
|
||||
|
||||
decoder, err := NewDecoder(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return decoder.Decode(m)
|
||||
}
|
||||
|
||||
// WeakDecode is the same as Decode but is shorthand to enable
|
||||
// WeaklyTypedInput. See DecoderConfig for more info.
|
||||
func WeakDecode(input, output interface{}) error {
|
||||
config := &DecoderConfig{
|
||||
Metadata: nil,
|
||||
Result: output,
|
||||
WeaklyTypedInput: true,
|
||||
}
|
||||
|
||||
decoder, err := NewDecoder(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return decoder.Decode(input)
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder for the given configuration. Once
|
||||
// a decoder has been returned, the same configuration must not be used
|
||||
// again.
|
||||
func NewDecoder(config *DecoderConfig) (*Decoder, error) {
|
||||
val := reflect.ValueOf(config.Result)
|
||||
if val.Kind() != reflect.Ptr {
|
||||
return nil, errors.New("result must be a pointer")
|
||||
}
|
||||
|
||||
val = val.Elem()
|
||||
if !val.CanAddr() {
|
||||
return nil, errors.New("result must be addressable (a pointer)")
|
||||
}
|
||||
|
||||
if config.Metadata != nil {
|
||||
if config.Metadata.Keys == nil {
|
||||
config.Metadata.Keys = make([]string, 0)
|
||||
}
|
||||
|
||||
if config.Metadata.Unused == nil {
|
||||
config.Metadata.Unused = make([]string, 0)
|
||||
}
|
||||
}
|
||||
|
||||
if config.TagName == "" {
|
||||
config.TagName = "mapstructure"
|
||||
}
|
||||
|
||||
result := &Decoder{
|
||||
config: config,
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Decode decodes the given raw interface to the target pointer specified
|
||||
// by the configuration.
|
||||
func (d *Decoder) Decode(raw interface{}) error {
|
||||
return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem())
|
||||
}
|
||||
|
||||
// Decodes an unknown data type into a specific reflection value.
|
||||
func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error {
|
||||
if data == nil {
|
||||
// If the data is nil, then we don't set anything.
|
||||
return nil
|
||||
}
|
||||
|
||||
dataVal := reflect.ValueOf(data)
|
||||
if !dataVal.IsValid() {
|
||||
// If the data value is invalid, then we just set the value
|
||||
// to be the zero value.
|
||||
val.Set(reflect.Zero(val.Type()))
|
||||
return nil
|
||||
}
|
||||
|
||||
if d.config.DecodeHook != nil {
|
||||
// We have a DecodeHook, so let's pre-process the data.
|
||||
var err error
|
||||
data, err = DecodeHookExec(
|
||||
d.config.DecodeHook,
|
||||
dataVal.Type(), val.Type(), data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
dataKind := getKind(val)
|
||||
switch dataKind {
|
||||
case reflect.Bool:
|
||||
err = d.decodeBool(name, data, val)
|
||||
case reflect.Interface:
|
||||
err = d.decodeBasic(name, data, val)
|
||||
case reflect.String:
|
||||
err = d.decodeString(name, data, val)
|
||||
case reflect.Int:
|
||||
err = d.decodeInt(name, data, val)
|
||||
case reflect.Uint:
|
||||
err = d.decodeUint(name, data, val)
|
||||
case reflect.Float32:
|
||||
err = d.decodeFloat(name, data, val)
|
||||
case reflect.Struct:
|
||||
err = d.decodeStruct(name, data, val)
|
||||
case reflect.Map:
|
||||
err = d.decodeMap(name, data, val)
|
||||
case reflect.Ptr:
|
||||
err = d.decodePtr(name, data, val)
|
||||
case reflect.Slice:
|
||||
err = d.decodeSlice(name, data, val)
|
||||
default:
|
||||
// If we reached this point then we weren't able to decode it
|
||||
return fmt.Errorf("%s: unsupported type: %s", name, dataKind)
|
||||
}
|
||||
|
||||
// If we reached here, then we successfully decoded SOMETHING, so
|
||||
// mark the key as used if we're tracking metadata.
|
||||
if d.config.Metadata != nil && name != "" {
|
||||
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// This decodes a basic type (bool, int, string, etc.) and sets the
|
||||
// value to "data" of that type.
|
||||
func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
if !dataVal.IsValid() {
|
||||
dataVal = reflect.Zero(val.Type())
|
||||
}
|
||||
|
||||
dataValType := dataVal.Type()
|
||||
if !dataValType.AssignableTo(val.Type()) {
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got '%s'",
|
||||
name, val.Type(), dataValType)
|
||||
}
|
||||
|
||||
val.Set(dataVal)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
dataKind := getKind(dataVal)
|
||||
|
||||
converted := true
|
||||
switch {
|
||||
case dataKind == reflect.String:
|
||||
val.SetString(dataVal.String())
|
||||
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
|
||||
if dataVal.Bool() {
|
||||
val.SetString("1")
|
||||
} else {
|
||||
val.SetString("0")
|
||||
}
|
||||
case dataKind == reflect.Int && d.config.WeaklyTypedInput:
|
||||
val.SetString(strconv.FormatInt(dataVal.Int(), 10))
|
||||
case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
|
||||
val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
|
||||
case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
|
||||
val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
|
||||
case dataKind == reflect.Slice && d.config.WeaklyTypedInput:
|
||||
dataType := dataVal.Type()
|
||||
elemKind := dataType.Elem().Kind()
|
||||
switch {
|
||||
case elemKind == reflect.Uint8:
|
||||
val.SetString(string(dataVal.Interface().([]uint8)))
|
||||
default:
|
||||
converted = false
|
||||
}
|
||||
default:
|
||||
converted = false
|
||||
}
|
||||
|
||||
if !converted {
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||
name, val.Type(), dataVal.Type())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
dataKind := getKind(dataVal)
|
||||
dataType := dataVal.Type()
|
||||
|
||||
switch {
|
||||
case dataKind == reflect.Int:
|
||||
val.SetInt(dataVal.Int())
|
||||
case dataKind == reflect.Uint:
|
||||
val.SetInt(int64(dataVal.Uint()))
|
||||
case dataKind == reflect.Float32:
|
||||
val.SetInt(int64(dataVal.Float()))
|
||||
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
|
||||
if dataVal.Bool() {
|
||||
val.SetInt(1)
|
||||
} else {
|
||||
val.SetInt(0)
|
||||
}
|
||||
case dataKind == reflect.String && d.config.WeaklyTypedInput:
|
||||
i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
|
||||
if err == nil {
|
||||
val.SetInt(i)
|
||||
} else {
|
||||
return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
|
||||
}
|
||||
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
|
||||
jn := data.(json.Number)
|
||||
i, err := jn.Int64()
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"error decoding json.Number into %s: %s", name, err)
|
||||
}
|
||||
val.SetInt(i)
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||
name, val.Type(), dataVal.Type())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
dataKind := getKind(dataVal)
|
||||
|
||||
switch {
|
||||
case dataKind == reflect.Int:
|
||||
i := dataVal.Int()
|
||||
if i < 0 && !d.config.WeaklyTypedInput {
|
||||
return fmt.Errorf("cannot parse '%s', %d overflows uint",
|
||||
name, i)
|
||||
}
|
||||
val.SetUint(uint64(i))
|
||||
case dataKind == reflect.Uint:
|
||||
val.SetUint(dataVal.Uint())
|
||||
case dataKind == reflect.Float32:
|
||||
f := dataVal.Float()
|
||||
if f < 0 && !d.config.WeaklyTypedInput {
|
||||
return fmt.Errorf("cannot parse '%s', %f overflows uint",
|
||||
name, f)
|
||||
}
|
||||
val.SetUint(uint64(f))
|
||||
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
|
||||
if dataVal.Bool() {
|
||||
val.SetUint(1)
|
||||
} else {
|
||||
val.SetUint(0)
|
||||
}
|
||||
case dataKind == reflect.String && d.config.WeaklyTypedInput:
|
||||
i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
|
||||
if err == nil {
|
||||
val.SetUint(i)
|
||||
} else {
|
||||
return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||
name, val.Type(), dataVal.Type())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
dataKind := getKind(dataVal)
|
||||
|
||||
switch {
|
||||
case dataKind == reflect.Bool:
|
||||
val.SetBool(dataVal.Bool())
|
||||
case dataKind == reflect.Int && d.config.WeaklyTypedInput:
|
||||
val.SetBool(dataVal.Int() != 0)
|
||||
case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
|
||||
val.SetBool(dataVal.Uint() != 0)
|
||||
case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
|
||||
val.SetBool(dataVal.Float() != 0)
|
||||
case dataKind == reflect.String && d.config.WeaklyTypedInput:
|
||||
b, err := strconv.ParseBool(dataVal.String())
|
||||
if err == nil {
|
||||
val.SetBool(b)
|
||||
} else if dataVal.String() == "" {
|
||||
val.SetBool(false)
|
||||
} else {
|
||||
return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||
name, val.Type(), dataVal.Type())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
dataKind := getKind(dataVal)
|
||||
dataType := dataVal.Type()
|
||||
|
||||
switch {
|
||||
case dataKind == reflect.Int:
|
||||
val.SetFloat(float64(dataVal.Int()))
|
||||
case dataKind == reflect.Uint:
|
||||
val.SetFloat(float64(dataVal.Uint()))
|
||||
case dataKind == reflect.Float32:
|
||||
val.SetFloat(float64(dataVal.Float()))
|
||||
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
|
||||
if dataVal.Bool() {
|
||||
val.SetFloat(1)
|
||||
} else {
|
||||
val.SetFloat(0)
|
||||
}
|
||||
case dataKind == reflect.String && d.config.WeaklyTypedInput:
|
||||
f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
|
||||
if err == nil {
|
||||
val.SetFloat(f)
|
||||
} else {
|
||||
return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
|
||||
}
|
||||
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
|
||||
jn := data.(json.Number)
|
||||
i, err := jn.Float64()
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"error decoding json.Number into %s: %s", name, err)
|
||||
}
|
||||
val.SetFloat(i)
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||
name, val.Type(), dataVal.Type())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
|
||||
valType := val.Type()
|
||||
valKeyType := valType.Key()
|
||||
valElemType := valType.Elem()
|
||||
|
||||
// By default we overwrite keys in the current map
|
||||
valMap := val
|
||||
|
||||
// If the map is nil or we're purposely zeroing fields, make a new map
|
||||
if valMap.IsNil() || d.config.ZeroFields {
|
||||
// Make a new map to hold our result
|
||||
mapType := reflect.MapOf(valKeyType, valElemType)
|
||||
valMap = reflect.MakeMap(mapType)
|
||||
}
|
||||
|
||||
// Check input type
|
||||
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||
if dataVal.Kind() != reflect.Map {
|
||||
// In weak mode, we accept a slice of maps as an input...
|
||||
if d.config.WeaklyTypedInput {
|
||||
switch dataVal.Kind() {
|
||||
case reflect.Array, reflect.Slice:
|
||||
// Special case for BC reasons (covered by tests)
|
||||
if dataVal.Len() == 0 {
|
||||
val.Set(valMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < dataVal.Len(); i++ {
|
||||
err := d.decode(
|
||||
fmt.Sprintf("%s[%d]", name, i),
|
||||
dataVal.Index(i).Interface(), val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
|
||||
}
|
||||
|
||||
// Accumulate errors
|
||||
errors := make([]string, 0)
|
||||
|
||||
for _, k := range dataVal.MapKeys() {
|
||||
fieldName := fmt.Sprintf("%s[%s]", name, k)
|
||||
|
||||
// First decode the key into the proper type
|
||||
currentKey := reflect.Indirect(reflect.New(valKeyType))
|
||||
if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
|
||||
errors = appendErrors(errors, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Next decode the data into the proper type
|
||||
v := dataVal.MapIndex(k).Interface()
|
||||
currentVal := reflect.Indirect(reflect.New(valElemType))
|
||||
if err := d.decode(fieldName, v, currentVal); err != nil {
|
||||
errors = appendErrors(errors, err)
|
||||
continue
|
||||
}
|
||||
|
||||
valMap.SetMapIndex(currentKey, currentVal)
|
||||
}
|
||||
|
||||
// Set the built up map to the value
|
||||
val.Set(valMap)
|
||||
|
||||
// If we had errors, return those
|
||||
if len(errors) > 0 {
|
||||
return &Error{errors}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
|
||||
// Create an element of the concrete (non pointer) type and decode
|
||||
// into that. Then set the value of the pointer to this type.
|
||||
valType := val.Type()
|
||||
valElemType := valType.Elem()
|
||||
realVal := reflect.New(valElemType)
|
||||
if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
val.Set(realVal)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||
dataValKind := dataVal.Kind()
|
||||
valType := val.Type()
|
||||
valElemType := valType.Elem()
|
||||
sliceType := reflect.SliceOf(valElemType)
|
||||
|
||||
// Check input type
|
||||
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
|
||||
// Accept empty map instead of array/slice in weakly typed mode
|
||||
if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 {
|
||||
val.Set(reflect.MakeSlice(sliceType, 0, 0))
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf(
|
||||
"'%s': source data must be an array or slice, got %s", name, dataValKind)
|
||||
}
|
||||
}
|
||||
|
||||
// Make a new slice to hold our result, same size as the original data.
|
||||
valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
|
||||
|
||||
// Accumulate any errors
|
||||
errors := make([]string, 0)
|
||||
|
||||
for i := 0; i < dataVal.Len(); i++ {
|
||||
currentData := dataVal.Index(i).Interface()
|
||||
currentField := valSlice.Index(i)
|
||||
|
||||
fieldName := fmt.Sprintf("%s[%d]", name, i)
|
||||
if err := d.decode(fieldName, currentData, currentField); err != nil {
|
||||
errors = appendErrors(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, set the value to the slice we built up
|
||||
val.Set(valSlice)
|
||||
|
||||
// If there were errors, we return those
|
||||
if len(errors) > 0 {
|
||||
return &Error{errors}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||
|
||||
// If the type of the value to write to and the data match directly,
|
||||
// then we just set it directly instead of recursing into the structure.
|
||||
if dataVal.Type() == val.Type() {
|
||||
val.Set(dataVal)
|
||||
return nil
|
||||
}
|
||||
|
||||
dataValKind := dataVal.Kind()
|
||||
if dataValKind != reflect.Map {
|
||||
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind)
|
||||
}
|
||||
|
||||
dataValType := dataVal.Type()
|
||||
if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
|
||||
return fmt.Errorf(
|
||||
"'%s' needs a map with string keys, has '%s' keys",
|
||||
name, dataValType.Key().Kind())
|
||||
}
|
||||
|
||||
dataValKeys := make(map[reflect.Value]struct{})
|
||||
dataValKeysUnused := make(map[interface{}]struct{})
|
||||
for _, dataValKey := range dataVal.MapKeys() {
|
||||
dataValKeys[dataValKey] = struct{}{}
|
||||
dataValKeysUnused[dataValKey.Interface()] = struct{}{}
|
||||
}
|
||||
|
||||
errors := make([]string, 0)
|
||||
|
||||
// This slice will keep track of all the structs we'll be decoding.
|
||||
// There can be more than one struct if there are embedded structs
|
||||
// that are squashed.
|
||||
structs := make([]reflect.Value, 1, 5)
|
||||
structs[0] = val
|
||||
|
||||
// Compile the list of all the fields that we're going to be decoding
|
||||
// from all the structs.
|
||||
fields := make(map[*reflect.StructField]reflect.Value)
|
||||
for len(structs) > 0 {
|
||||
structVal := structs[0]
|
||||
structs = structs[1:]
|
||||
|
||||
structType := structVal.Type()
|
||||
|
||||
for i := 0; i < structType.NumField(); i++ {
|
||||
fieldType := structType.Field(i)
|
||||
fieldKind := fieldType.Type.Kind()
|
||||
|
||||
// If "squash" is specified in the tag, we squash the field down.
|
||||
squash := false
|
||||
tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
|
||||
for _, tag := range tagParts[1:] {
|
||||
if tag == "squash" {
|
||||
squash = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if squash {
|
||||
if fieldKind != reflect.Struct {
|
||||
errors = appendErrors(errors,
|
||||
fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
|
||||
} else {
|
||||
structs = append(structs, val.FieldByName(fieldType.Name))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Normal struct field, store it away
|
||||
fields[&fieldType] = structVal.Field(i)
|
||||
}
|
||||
}
|
||||
|
||||
for fieldType, field := range fields {
|
||||
fieldName := fieldType.Name
|
||||
|
||||
tagValue := fieldType.Tag.Get(d.config.TagName)
|
||||
tagValue = strings.SplitN(tagValue, ",", 2)[0]
|
||||
if tagValue != "" {
|
||||
fieldName = tagValue
|
||||
}
|
||||
|
||||
rawMapKey := reflect.ValueOf(fieldName)
|
||||
rawMapVal := dataVal.MapIndex(rawMapKey)
|
||||
if !rawMapVal.IsValid() {
|
||||
// Do a slower search by iterating over each key and
|
||||
// doing case-insensitive search.
|
||||
for dataValKey := range dataValKeys {
|
||||
mK, ok := dataValKey.Interface().(string)
|
||||
if !ok {
|
||||
// Not a string key
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.EqualFold(mK, fieldName) {
|
||||
rawMapKey = dataValKey
|
||||
rawMapVal = dataVal.MapIndex(dataValKey)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !rawMapVal.IsValid() {
|
||||
// There was no matching key in the map for the value in
|
||||
// the struct. Just ignore.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Delete the key we're using from the unused map so we stop tracking
|
||||
delete(dataValKeysUnused, rawMapKey.Interface())
|
||||
|
||||
if !field.IsValid() {
|
||||
// This should never happen
|
||||
panic("field is not valid")
|
||||
}
|
||||
|
||||
// If we can't set the field, then it is unexported or something,
|
||||
// and we just continue onwards.
|
||||
if !field.CanSet() {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the name is empty string, then we're at the root, and we
|
||||
// don't dot-join the fields.
|
||||
if name != "" {
|
||||
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
|
||||
}
|
||||
|
||||
if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil {
|
||||
errors = appendErrors(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
|
||||
keys := make([]string, 0, len(dataValKeysUnused))
|
||||
for rawKey := range dataValKeysUnused {
|
||||
keys = append(keys, rawKey.(string))
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
|
||||
errors = appendErrors(errors, err)
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return &Error{errors}
|
||||
}
|
||||
|
||||
// Add the unused keys to the list of unused keys if we're tracking metadata
|
||||
if d.config.Metadata != nil {
|
||||
for rawKey := range dataValKeysUnused {
|
||||
key := rawKey.(string)
|
||||
if name != "" {
|
||||
key = fmt.Sprintf("%s.%s", name, key)
|
||||
}
|
||||
|
||||
d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getKind(val reflect.Value) reflect.Kind {
|
||||
kind := val.Kind()
|
||||
|
||||
switch {
|
||||
case kind >= reflect.Int && kind <= reflect.Int64:
|
||||
return reflect.Int
|
||||
case kind >= reflect.Uint && kind <= reflect.Uint64:
|
||||
return reflect.Uint
|
||||
case kind >= reflect.Float32 && kind <= reflect.Float64:
|
||||
return reflect.Float32
|
||||
default:
|
||||
return kind
|
||||
}
|
||||
}
|
|
@ -0,0 +1,217 @@
|
|||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonpointer
|
||||
// repository-desc An implementation of JSON Pointer - Go language
|
||||
//
|
||||
// description Main and unique file.
|
||||
//
|
||||
// created 25-02-2013
|
||||
|
||||
package gojsonpointer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
const_empty_pointer = ``
|
||||
const_pointer_separator = `/`
|
||||
|
||||
const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"`
|
||||
)
|
||||
|
||||
type implStruct struct {
|
||||
mode string // "SET" or "GET"
|
||||
|
||||
inDocument interface{}
|
||||
|
||||
setInValue interface{}
|
||||
|
||||
getOutNode interface{}
|
||||
getOutKind reflect.Kind
|
||||
outError error
|
||||
}
|
||||
|
||||
func NewJsonPointer(jsonPointerString string) (JsonPointer, error) {
|
||||
|
||||
var p JsonPointer
|
||||
err := p.parse(jsonPointerString)
|
||||
return p, err
|
||||
|
||||
}
|
||||
|
||||
type JsonPointer struct {
|
||||
referenceTokens []string
|
||||
}
|
||||
|
||||
// "Constructor", parses the given string JSON pointer
|
||||
func (p *JsonPointer) parse(jsonPointerString string) error {
|
||||
|
||||
var err error
|
||||
|
||||
if jsonPointerString != const_empty_pointer {
|
||||
if !strings.HasPrefix(jsonPointerString, const_pointer_separator) {
|
||||
err = errors.New(const_invalid_start)
|
||||
} else {
|
||||
referenceTokens := strings.Split(jsonPointerString, const_pointer_separator)
|
||||
for _, referenceToken := range referenceTokens[1:] {
|
||||
p.referenceTokens = append(p.referenceTokens, referenceToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Uses the pointer to retrieve a value from a JSON document
|
||||
func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
|
||||
|
||||
is := &implStruct{mode: "GET", inDocument: document}
|
||||
p.implementation(is)
|
||||
return is.getOutNode, is.getOutKind, is.outError
|
||||
|
||||
}
|
||||
|
||||
// Uses the pointer to update a value from a JSON document
|
||||
func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) {
|
||||
|
||||
is := &implStruct{mode: "SET", inDocument: document, setInValue: value}
|
||||
p.implementation(is)
|
||||
return document, is.outError
|
||||
|
||||
}
|
||||
|
||||
// Both Get and Set functions use the same implementation to avoid code duplication
|
||||
func (p *JsonPointer) implementation(i *implStruct) {
|
||||
|
||||
kind := reflect.Invalid
|
||||
|
||||
// Full document when empty
|
||||
if len(p.referenceTokens) == 0 {
|
||||
i.getOutNode = i.inDocument
|
||||
i.outError = nil
|
||||
i.getOutKind = kind
|
||||
i.outError = nil
|
||||
return
|
||||
}
|
||||
|
||||
node := i.inDocument
|
||||
|
||||
for ti, token := range p.referenceTokens {
|
||||
|
||||
decodedToken := decodeReferenceToken(token)
|
||||
isLastToken := ti == len(p.referenceTokens)-1
|
||||
|
||||
rValue := reflect.ValueOf(node)
|
||||
kind = rValue.Kind()
|
||||
|
||||
switch kind {
|
||||
|
||||
case reflect.Map:
|
||||
m := node.(map[string]interface{})
|
||||
if _, ok := m[decodedToken]; ok {
|
||||
node = m[decodedToken]
|
||||
if isLastToken && i.mode == "SET" {
|
||||
m[decodedToken] = i.setInValue
|
||||
}
|
||||
} else {
|
||||
i.outError = errors.New(fmt.Sprintf("Object has no key '%s'", token))
|
||||
i.getOutKind = kind
|
||||
i.getOutNode = nil
|
||||
return
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
s := node.([]interface{})
|
||||
tokenIndex, err := strconv.Atoi(token)
|
||||
if err != nil {
|
||||
i.outError = errors.New(fmt.Sprintf("Invalid array index '%s'", token))
|
||||
i.getOutKind = kind
|
||||
i.getOutNode = nil
|
||||
return
|
||||
}
|
||||
sLength := len(s)
|
||||
if tokenIndex < 0 || tokenIndex >= sLength {
|
||||
i.outError = errors.New(fmt.Sprintf("Out of bound array[0,%d] index '%d'", sLength, tokenIndex))
|
||||
i.getOutKind = kind
|
||||
i.getOutNode = nil
|
||||
return
|
||||
}
|
||||
|
||||
node = s[tokenIndex]
|
||||
if isLastToken && i.mode == "SET" {
|
||||
s[tokenIndex] = i.setInValue
|
||||
}
|
||||
|
||||
default:
|
||||
i.outError = errors.New(fmt.Sprintf("Invalid token reference '%s'", token))
|
||||
i.getOutKind = kind
|
||||
i.getOutNode = nil
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
rValue := reflect.ValueOf(node)
|
||||
kind = rValue.Kind()
|
||||
|
||||
i.getOutNode = node
|
||||
i.getOutKind = kind
|
||||
i.outError = nil
|
||||
}
|
||||
|
||||
// Pointer to string representation function
|
||||
func (p *JsonPointer) String() string {
|
||||
|
||||
if len(p.referenceTokens) == 0 {
|
||||
return const_empty_pointer
|
||||
}
|
||||
|
||||
pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator)
|
||||
|
||||
return pointerString
|
||||
}
|
||||
|
||||
// Specific JSON pointer encoding here
|
||||
// ~0 => ~
|
||||
// ~1 => /
|
||||
// ... and vice versa
|
||||
|
||||
const (
|
||||
const_encoded_reference_token_0 = `~0`
|
||||
const_encoded_reference_token_1 = `~1`
|
||||
const_decoded_reference_token_0 = `~`
|
||||
const_decoded_reference_token_1 = `/`
|
||||
)
|
||||
|
||||
func decodeReferenceToken(token string) string {
|
||||
step1 := strings.Replace(token, const_encoded_reference_token_1, const_decoded_reference_token_1, -1)
|
||||
step2 := strings.Replace(step1, const_encoded_reference_token_0, const_decoded_reference_token_0, -1)
|
||||
return step2
|
||||
}
|
||||
|
||||
func encodeReferenceToken(token string) string {
|
||||
step1 := strings.Replace(token, const_decoded_reference_token_1, const_encoded_reference_token_1, -1)
|
||||
step2 := strings.Replace(step1, const_decoded_reference_token_0, const_encoded_reference_token_0, -1)
|
||||
return step2
|
||||
}
|
|
@ -0,0 +1,141 @@
|
|||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonreference
|
||||
// repository-desc An implementation of JSON Reference - Go language
|
||||
//
|
||||
// description Main and unique file.
|
||||
//
|
||||
// created 26-02-2013
|
||||
|
||||
package gojsonreference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/xeipuuv/gojsonpointer"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
const_fragment_char = `#`
|
||||
)
|
||||
|
||||
func NewJsonReference(jsonReferenceString string) (JsonReference, error) {
|
||||
|
||||
var r JsonReference
|
||||
err := r.parse(jsonReferenceString)
|
||||
return r, err
|
||||
|
||||
}
|
||||
|
||||
type JsonReference struct {
|
||||
referenceUrl *url.URL
|
||||
referencePointer gojsonpointer.JsonPointer
|
||||
|
||||
HasFullUrl bool
|
||||
HasUrlPathOnly bool
|
||||
HasFragmentOnly bool
|
||||
HasFileScheme bool
|
||||
HasFullFilePath bool
|
||||
}
|
||||
|
||||
func (r *JsonReference) GetUrl() *url.URL {
|
||||
return r.referenceUrl
|
||||
}
|
||||
|
||||
func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer {
|
||||
return &r.referencePointer
|
||||
}
|
||||
|
||||
func (r *JsonReference) String() string {
|
||||
|
||||
if r.referenceUrl != nil {
|
||||
return r.referenceUrl.String()
|
||||
}
|
||||
|
||||
if r.HasFragmentOnly {
|
||||
return const_fragment_char + r.referencePointer.String()
|
||||
}
|
||||
|
||||
return r.referencePointer.String()
|
||||
}
|
||||
|
||||
func (r *JsonReference) IsCanonical() bool {
|
||||
return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl)
|
||||
}
|
||||
|
||||
// "Constructor", parses the given string JSON reference
|
||||
func (r *JsonReference) parse(jsonReferenceString string) (err error) {
|
||||
|
||||
r.referenceUrl, err = url.Parse(jsonReferenceString)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
refUrl := r.referenceUrl
|
||||
|
||||
if refUrl.Scheme != "" && refUrl.Host != "" {
|
||||
r.HasFullUrl = true
|
||||
} else {
|
||||
if refUrl.Path != "" {
|
||||
r.HasUrlPathOnly = true
|
||||
} else if refUrl.RawQuery == "" && refUrl.Fragment != "" {
|
||||
r.HasFragmentOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
r.HasFileScheme = refUrl.Scheme == "file"
|
||||
if runtime.GOOS == "windows" {
|
||||
// on Windows, a file URL may have an extra leading slash, and if it
|
||||
// doesn't then its first component will be treated as the host by the
|
||||
// Go runtime
|
||||
if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") {
|
||||
r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:])
|
||||
} else {
|
||||
r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path)
|
||||
}
|
||||
} else {
|
||||
r.HasFullFilePath = filepath.IsAbs(refUrl.Path)
|
||||
}
|
||||
|
||||
// invalid json-pointer error means url has no json-pointer fragment. simply ignore error
|
||||
r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Creates a new reference from a parent and a child
|
||||
// If the child cannot inherit from the parent, an error is returned
|
||||
func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) {
|
||||
childUrl := child.GetUrl()
|
||||
parentUrl := r.GetUrl()
|
||||
if childUrl == nil {
|
||||
return nil, errors.New("childUrl is nil!")
|
||||
}
|
||||
if parentUrl == nil {
|
||||
return nil, errors.New("parentUrl is nil!")
|
||||
}
|
||||
|
||||
ref, err := NewJsonReference(parentUrl.ResolveReference(childUrl).String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ref, err
|
||||
}
|
|
@ -0,0 +1,242 @@
|
|||
package gojsonschema
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type (
|
||||
// RequiredError. ErrorDetails: property string
|
||||
RequiredError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// InvalidTypeError. ErrorDetails: expected, given
|
||||
InvalidTypeError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// NumberAnyOfError. ErrorDetails: -
|
||||
NumberAnyOfError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// NumberOneOfError. ErrorDetails: -
|
||||
NumberOneOfError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// NumberAllOfError. ErrorDetails: -
|
||||
NumberAllOfError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// NumberNotError. ErrorDetails: -
|
||||
NumberNotError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// MissingDependencyError. ErrorDetails: dependency
|
||||
MissingDependencyError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// InternalError. ErrorDetails: error
|
||||
InternalError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// EnumError. ErrorDetails: allowed
|
||||
EnumError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// ArrayNoAdditionalItemsError. ErrorDetails: -
|
||||
ArrayNoAdditionalItemsError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// ArrayMinItemsError. ErrorDetails: min
|
||||
ArrayMinItemsError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// ArrayMaxItemsError. ErrorDetails: max
|
||||
ArrayMaxItemsError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// ItemsMustBeUniqueError. ErrorDetails: type
|
||||
ItemsMustBeUniqueError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// ArrayMinPropertiesError. ErrorDetails: min
|
||||
ArrayMinPropertiesError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// ArrayMaxPropertiesError. ErrorDetails: max
|
||||
ArrayMaxPropertiesError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// AdditionalPropertyNotAllowedError. ErrorDetails: property
|
||||
AdditionalPropertyNotAllowedError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// InvalidPropertyPatternError. ErrorDetails: property, pattern
|
||||
InvalidPropertyPatternError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// StringLengthGTEError. ErrorDetails: min
|
||||
StringLengthGTEError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// StringLengthLTEError. ErrorDetails: max
|
||||
StringLengthLTEError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// DoesNotMatchPatternError. ErrorDetails: pattern
|
||||
DoesNotMatchPatternError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// DoesNotMatchFormatError. ErrorDetails: format
|
||||
DoesNotMatchFormatError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// MultipleOfError. ErrorDetails: multiple
|
||||
MultipleOfError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// NumberGTEError. ErrorDetails: min
|
||||
NumberGTEError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// NumberGTError. ErrorDetails: min
|
||||
NumberGTError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// NumberLTEError. ErrorDetails: max
|
||||
NumberLTEError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// NumberLTError. ErrorDetails: max
|
||||
NumberLTError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
)
|
||||
|
||||
// newError takes a ResultError type and sets the type, context, description, details, value, and field
|
||||
func newError(err ResultError, context *jsonContext, value interface{}, locale locale, details ErrorDetails) {
|
||||
var t string
|
||||
var d string
|
||||
switch err.(type) {
|
||||
case *RequiredError:
|
||||
t = "required"
|
||||
d = locale.Required()
|
||||
case *InvalidTypeError:
|
||||
t = "invalid_type"
|
||||
d = locale.InvalidType()
|
||||
case *NumberAnyOfError:
|
||||
t = "number_any_of"
|
||||
d = locale.NumberAnyOf()
|
||||
case *NumberOneOfError:
|
||||
t = "number_one_of"
|
||||
d = locale.NumberOneOf()
|
||||
case *NumberAllOfError:
|
||||
t = "number_all_of"
|
||||
d = locale.NumberAllOf()
|
||||
case *NumberNotError:
|
||||
t = "number_not"
|
||||
d = locale.NumberNot()
|
||||
case *MissingDependencyError:
|
||||
t = "missing_dependency"
|
||||
d = locale.MissingDependency()
|
||||
case *InternalError:
|
||||
t = "internal"
|
||||
d = locale.Internal()
|
||||
case *EnumError:
|
||||
t = "enum"
|
||||
d = locale.Enum()
|
||||
case *ArrayNoAdditionalItemsError:
|
||||
t = "array_no_additional_items"
|
||||
d = locale.ArrayNoAdditionalItems()
|
||||
case *ArrayMinItemsError:
|
||||
t = "array_min_items"
|
||||
d = locale.ArrayMinItems()
|
||||
case *ArrayMaxItemsError:
|
||||
t = "array_max_items"
|
||||
d = locale.ArrayMaxItems()
|
||||
case *ItemsMustBeUniqueError:
|
||||
t = "unique"
|
||||
d = locale.Unique()
|
||||
case *ArrayMinPropertiesError:
|
||||
t = "array_min_properties"
|
||||
d = locale.ArrayMinProperties()
|
||||
case *ArrayMaxPropertiesError:
|
||||
t = "array_max_properties"
|
||||
d = locale.ArrayMaxProperties()
|
||||
case *AdditionalPropertyNotAllowedError:
|
||||
t = "additional_property_not_allowed"
|
||||
d = locale.AdditionalPropertyNotAllowed()
|
||||
case *InvalidPropertyPatternError:
|
||||
t = "invalid_property_pattern"
|
||||
d = locale.InvalidPropertyPattern()
|
||||
case *StringLengthGTEError:
|
||||
t = "string_gte"
|
||||
d = locale.StringGTE()
|
||||
case *StringLengthLTEError:
|
||||
t = "string_lte"
|
||||
d = locale.StringLTE()
|
||||
case *DoesNotMatchPatternError:
|
||||
t = "pattern"
|
||||
d = locale.DoesNotMatchPattern()
|
||||
case *DoesNotMatchFormatError:
|
||||
t = "format"
|
||||
d = locale.DoesNotMatchFormat()
|
||||
case *MultipleOfError:
|
||||
t = "multiple_of"
|
||||
d = locale.MultipleOf()
|
||||
case *NumberGTEError:
|
||||
t = "number_gte"
|
||||
d = locale.NumberGTE()
|
||||
case *NumberGTError:
|
||||
t = "number_gt"
|
||||
d = locale.NumberGT()
|
||||
case *NumberLTEError:
|
||||
t = "number_lte"
|
||||
d = locale.NumberLTE()
|
||||
case *NumberLTError:
|
||||
t = "number_lt"
|
||||
d = locale.NumberLT()
|
||||
}
|
||||
|
||||
err.SetType(t)
|
||||
err.SetContext(context)
|
||||
err.SetValue(value)
|
||||
err.SetDetails(details)
|
||||
details["field"] = err.Field()
|
||||
err.SetDescription(formatErrorDescription(d, details))
|
||||
}
|
||||
|
||||
// formatErrorDescription takes a string in this format: %field% is required
|
||||
// and converts it to a string with replacements. The fields come from
|
||||
// the ErrorDetails struct and vary for each type of error.
|
||||
func formatErrorDescription(s string, details ErrorDetails) string {
|
||||
for name, val := range details {
|
||||
s = strings.Replace(s, "%"+strings.ToLower(name)+"%", fmt.Sprintf("%v", val), -1)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
178
vendor/github.com/xeipuuv/gojsonschema/format_checkers.go
сгенерированный
поставляемый
Normal file
178
vendor/github.com/xeipuuv/gojsonschema/format_checkers.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,178 @@
|
|||
package gojsonschema
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type (
|
||||
// FormatChecker is the interface all formatters added to FormatCheckerChain must implement
|
||||
FormatChecker interface {
|
||||
IsFormat(input string) bool
|
||||
}
|
||||
|
||||
// FormatCheckerChain holds the formatters
|
||||
FormatCheckerChain struct {
|
||||
formatters map[string]FormatChecker
|
||||
}
|
||||
|
||||
// EmailFormatter verifies email address formats
|
||||
EmailFormatChecker struct{}
|
||||
|
||||
// IPV4FormatChecker verifies IP addresses in the ipv4 format
|
||||
IPV4FormatChecker struct{}
|
||||
|
||||
// IPV6FormatChecker verifies IP addresses in the ipv6 format
|
||||
IPV6FormatChecker struct{}
|
||||
|
||||
// DateTimeFormatChecker verifies date/time formats per RFC3339 5.6
|
||||
//
|
||||
// Valid formats:
|
||||
// Partial Time: HH:MM:SS
|
||||
// Full Date: YYYY-MM-DD
|
||||
// Full Time: HH:MM:SSZ-07:00
|
||||
// Date Time: YYYY-MM-DDTHH:MM:SSZ-0700
|
||||
//
|
||||
// Where
|
||||
// YYYY = 4DIGIT year
|
||||
// MM = 2DIGIT month ; 01-12
|
||||
// DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year
|
||||
// HH = 2DIGIT hour ; 00-23
|
||||
// MM = 2DIGIT ; 00-59
|
||||
// SS = 2DIGIT ; 00-58, 00-60 based on leap second rules
|
||||
// T = Literal
|
||||
// Z = Literal
|
||||
//
|
||||
// Note: Nanoseconds are also suported in all formats
|
||||
//
|
||||
// http://tools.ietf.org/html/rfc3339#section-5.6
|
||||
DateTimeFormatChecker struct{}
|
||||
|
||||
// URIFormatCheckers validates a URI with a valid Scheme per RFC3986
|
||||
URIFormatChecker struct{}
|
||||
|
||||
// HostnameFormatChecker validates a hostname is in the correct format
|
||||
HostnameFormatChecker struct{}
|
||||
|
||||
// UUIDFormatChecker validates a UUID is in the correct format
|
||||
UUIDFormatChecker struct{}
|
||||
)
|
||||
|
||||
var (
|
||||
// Formatters holds the valid formatters, and is a public variable
|
||||
// so library users can add custom formatters
|
||||
FormatCheckers = FormatCheckerChain{
|
||||
formatters: map[string]FormatChecker{
|
||||
"date-time": DateTimeFormatChecker{},
|
||||
"hostname": HostnameFormatChecker{},
|
||||
"email": EmailFormatChecker{},
|
||||
"ipv4": IPV4FormatChecker{},
|
||||
"ipv6": IPV6FormatChecker{},
|
||||
"uri": URIFormatChecker{},
|
||||
"uuid": UUIDFormatChecker{},
|
||||
},
|
||||
}
|
||||
|
||||
// Regex credit: https://github.com/asaskevich/govalidator
|
||||
rxEmail = regexp.MustCompile("^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$")
|
||||
|
||||
// Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname
|
||||
rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`)
|
||||
|
||||
rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$")
|
||||
)
|
||||
|
||||
// Add adds a FormatChecker to the FormatCheckerChain
|
||||
// The name used will be the value used for the format key in your json schema
|
||||
func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain {
|
||||
c.formatters[name] = f
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists)
|
||||
func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain {
|
||||
delete(c.formatters, name)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name
|
||||
func (c *FormatCheckerChain) Has(name string) bool {
|
||||
_, ok := c.formatters[name]
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsFormat will check an input against a FormatChecker with the given name
|
||||
// to see if it is the correct format
|
||||
func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool {
|
||||
f, ok := c.formatters[name]
|
||||
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if !isKind(input, reflect.String) {
|
||||
return false
|
||||
}
|
||||
|
||||
inputString := input.(string)
|
||||
|
||||
return f.IsFormat(inputString)
|
||||
}
|
||||
|
||||
func (f EmailFormatChecker) IsFormat(input string) bool {
|
||||
return rxEmail.MatchString(input)
|
||||
}
|
||||
|
||||
// Credit: https://github.com/asaskevich/govalidator
|
||||
func (f IPV4FormatChecker) IsFormat(input string) bool {
|
||||
ip := net.ParseIP(input)
|
||||
return ip != nil && strings.Contains(input, ".")
|
||||
}
|
||||
|
||||
// Credit: https://github.com/asaskevich/govalidator
|
||||
func (f IPV6FormatChecker) IsFormat(input string) bool {
|
||||
ip := net.ParseIP(input)
|
||||
return ip != nil && strings.Contains(input, ":")
|
||||
}
|
||||
|
||||
func (f DateTimeFormatChecker) IsFormat(input string) bool {
|
||||
formats := []string{
|
||||
"15:04:05",
|
||||
"15:04:05Z07:00",
|
||||
"2006-01-02",
|
||||
time.RFC3339,
|
||||
time.RFC3339Nano,
|
||||
}
|
||||
|
||||
for _, format := range formats {
|
||||
if _, err := time.Parse(format, input); err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (f URIFormatChecker) IsFormat(input string) bool {
|
||||
u, err := url.Parse(input)
|
||||
if err != nil || u.Scheme == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (f HostnameFormatChecker) IsFormat(input string) bool {
|
||||
return rxHostname.MatchString(input) && len(input) < 256
|
||||
}
|
||||
|
||||
func (f UUIDFormatChecker) IsFormat(input string) bool {
|
||||
return rxUUID.MatchString(input)
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Very simple log wrapper.
|
||||
// Used for debugging/testing purposes.
|
||||
//
|
||||
// created 01-01-2015
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"log"
|
||||
)
|
||||
|
||||
const internalLogEnabled = false
|
||||
|
||||
func internalLog(format string, v ...interface{}) {
|
||||
log.Printf(format, v...)
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
// Copyright 2013 MongoDB, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author tolsen
|
||||
// author-github https://github.com/tolsen
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context
|
||||
//
|
||||
// created 04-09-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import "bytes"
|
||||
|
||||
// jsonContext implements a persistent linked-list of strings
|
||||
type jsonContext struct {
|
||||
head string
|
||||
tail *jsonContext
|
||||
}
|
||||
|
||||
func newJsonContext(head string, tail *jsonContext) *jsonContext {
|
||||
return &jsonContext{head, tail}
|
||||
}
|
||||
|
||||
// String displays the context in reverse.
|
||||
// This plays well with the data structure's persistent nature with
|
||||
// Cons and a json document's tree structure.
|
||||
func (c *jsonContext) String(del ...string) string {
|
||||
byteArr := make([]byte, 0, c.stringLen())
|
||||
buf := bytes.NewBuffer(byteArr)
|
||||
c.writeStringToBuffer(buf, del)
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (c *jsonContext) stringLen() int {
|
||||
length := 0
|
||||
if c.tail != nil {
|
||||
length = c.tail.stringLen() + 1 // add 1 for "."
|
||||
}
|
||||
|
||||
length += len(c.head)
|
||||
return length
|
||||
}
|
||||
|
||||
func (c *jsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) {
|
||||
if c.tail != nil {
|
||||
c.tail.writeStringToBuffer(buf, del)
|
||||
|
||||
if len(del) > 0 {
|
||||
buf.WriteString(del[0])
|
||||
} else {
|
||||
buf.WriteString(".")
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString(c.head)
|
||||
}
|
|
@ -0,0 +1,286 @@
|
|||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Different strategies to load JSON files.
|
||||
// Includes References (file and HTTP), JSON strings and Go types.
|
||||
//
|
||||
// created 01-02-2015
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/xeipuuv/gojsonreference"
|
||||
)
|
||||
|
||||
// JSON loader interface
|
||||
|
||||
type JSONLoader interface {
|
||||
jsonSource() interface{}
|
||||
loadJSON() (interface{}, error)
|
||||
loadSchema() (*Schema, error)
|
||||
}
|
||||
|
||||
// JSON Reference loader
|
||||
// references are used to load JSONs from files and HTTP
|
||||
|
||||
type jsonReferenceLoader struct {
|
||||
source string
|
||||
}
|
||||
|
||||
func (l *jsonReferenceLoader) jsonSource() interface{} {
|
||||
return l.source
|
||||
}
|
||||
|
||||
func NewReferenceLoader(source string) *jsonReferenceLoader {
|
||||
return &jsonReferenceLoader{source: source}
|
||||
}
|
||||
|
||||
func (l *jsonReferenceLoader) loadJSON() (interface{}, error) {
|
||||
|
||||
var err error
|
||||
|
||||
reference, err := gojsonreference.NewJsonReference(l.jsonSource().(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
refToUrl := reference
|
||||
refToUrl.GetUrl().Fragment = ""
|
||||
|
||||
var document interface{}
|
||||
|
||||
if reference.HasFileScheme {
|
||||
|
||||
filename := strings.Replace(refToUrl.String(), "file://", "", -1)
|
||||
if runtime.GOOS == "windows" {
|
||||
// on Windows, a file URL may have an extra leading slash, use slashes
|
||||
// instead of backslashes, and have spaces escaped
|
||||
if strings.HasPrefix(filename, "/") {
|
||||
filename = filename[1:]
|
||||
}
|
||||
filename = filepath.FromSlash(filename)
|
||||
filename = strings.Replace(filename, "%20", " ", -1)
|
||||
}
|
||||
|
||||
document, err = l.loadFromFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
document, err = l.loadFromHTTP(refToUrl.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return document, nil
|
||||
|
||||
}
|
||||
|
||||
func (l *jsonReferenceLoader) loadSchema() (*Schema, error) {
|
||||
|
||||
var err error
|
||||
|
||||
d := Schema{}
|
||||
d.pool = newSchemaPool()
|
||||
d.referencePool = newSchemaReferencePool()
|
||||
|
||||
d.documentReference, err = gojsonreference.NewJsonReference(l.jsonSource().(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
spd, err := d.pool.GetDocument(d.documentReference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = d.parse(spd.Document)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &d, nil
|
||||
|
||||
}
|
||||
|
||||
func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) {
|
||||
|
||||
resp, err := http.Get(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// must return HTTP Status 200 OK
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, errors.New(formatErrorDescription(Locale.httpBadStatus(), ErrorDetails{"status": resp.Status}))
|
||||
}
|
||||
|
||||
bodyBuff, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return decodeJsonUsingNumber(bytes.NewReader(bodyBuff))
|
||||
|
||||
}
|
||||
|
||||
func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) {
|
||||
|
||||
bodyBuff, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return decodeJsonUsingNumber(bytes.NewReader(bodyBuff))
|
||||
|
||||
}
|
||||
|
||||
// JSON string loader
|
||||
|
||||
type jsonStringLoader struct {
|
||||
source string
|
||||
}
|
||||
|
||||
func (l *jsonStringLoader) jsonSource() interface{} {
|
||||
return l.source
|
||||
}
|
||||
|
||||
func NewStringLoader(source string) *jsonStringLoader {
|
||||
return &jsonStringLoader{source: source}
|
||||
}
|
||||
|
||||
func (l *jsonStringLoader) loadJSON() (interface{}, error) {
|
||||
|
||||
return decodeJsonUsingNumber(strings.NewReader(l.jsonSource().(string)))
|
||||
|
||||
}
|
||||
|
||||
func (l *jsonStringLoader) loadSchema() (*Schema, error) {
|
||||
|
||||
var err error
|
||||
|
||||
document, err := l.loadJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d := Schema{}
|
||||
d.pool = newSchemaPool()
|
||||
d.referencePool = newSchemaReferencePool()
|
||||
d.documentReference, err = gojsonreference.NewJsonReference("#")
|
||||
d.pool.SetStandaloneDocument(document)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = d.parse(document)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &d, nil
|
||||
|
||||
}
|
||||
|
||||
// JSON Go (types) loader
|
||||
// used to load JSONs from the code as maps, interface{}, structs ...
|
||||
|
||||
type jsonGoLoader struct {
|
||||
source interface{}
|
||||
}
|
||||
|
||||
func (l *jsonGoLoader) jsonSource() interface{} {
|
||||
return l.source
|
||||
}
|
||||
|
||||
func NewGoLoader(source interface{}) *jsonGoLoader {
|
||||
return &jsonGoLoader{source: source}
|
||||
}
|
||||
|
||||
func (l *jsonGoLoader) loadJSON() (interface{}, error) {
|
||||
|
||||
// convert it to a compliant JSON first to avoid types "mismatches"
|
||||
|
||||
jsonBytes, err := json.Marshal(l.jsonSource())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return decodeJsonUsingNumber(bytes.NewReader(jsonBytes))
|
||||
|
||||
}
|
||||
|
||||
func (l *jsonGoLoader) loadSchema() (*Schema, error) {
|
||||
|
||||
var err error
|
||||
|
||||
document, err := l.loadJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d := Schema{}
|
||||
d.pool = newSchemaPool()
|
||||
d.referencePool = newSchemaReferencePool()
|
||||
d.documentReference, err = gojsonreference.NewJsonReference("#")
|
||||
d.pool.SetStandaloneDocument(document)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = d.parse(document)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &d, nil
|
||||
|
||||
}
|
||||
|
||||
func decodeJsonUsingNumber(r io.Reader) (interface{}, error) {
|
||||
|
||||
var document interface{}
|
||||
|
||||
decoder := json.NewDecoder(r)
|
||||
decoder.UseNumber()
|
||||
|
||||
err := decoder.Decode(&document)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return document, nil
|
||||
|
||||
}
|
|
@ -0,0 +1,275 @@
|
|||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Contains const string and messages.
|
||||
//
|
||||
// created 01-01-2015
|
||||
|
||||
package gojsonschema
|
||||
|
||||
type (
|
||||
// locale is an interface for definining custom error strings
|
||||
locale interface {
|
||||
Required() string
|
||||
InvalidType() string
|
||||
NumberAnyOf() string
|
||||
NumberOneOf() string
|
||||
NumberAllOf() string
|
||||
NumberNot() string
|
||||
MissingDependency() string
|
||||
Internal() string
|
||||
Enum() string
|
||||
ArrayNoAdditionalItems() string
|
||||
ArrayMinItems() string
|
||||
ArrayMaxItems() string
|
||||
Unique() string
|
||||
ArrayMinProperties() string
|
||||
ArrayMaxProperties() string
|
||||
AdditionalPropertyNotAllowed() string
|
||||
InvalidPropertyPattern() string
|
||||
StringGTE() string
|
||||
StringLTE() string
|
||||
DoesNotMatchPattern() string
|
||||
DoesNotMatchFormat() string
|
||||
MultipleOf() string
|
||||
NumberGTE() string
|
||||
NumberGT() string
|
||||
NumberLTE() string
|
||||
NumberLT() string
|
||||
|
||||
// Schema validations
|
||||
RegexPattern() string
|
||||
GreaterThanZero() string
|
||||
MustBeOfA() string
|
||||
MustBeOfAn() string
|
||||
CannotBeUsedWithout() string
|
||||
CannotBeGT() string
|
||||
MustBeOfType() string
|
||||
MustBeValidRegex() string
|
||||
MustBeValidFormat() string
|
||||
MustBeGTEZero() string
|
||||
KeyCannotBeGreaterThan() string
|
||||
KeyItemsMustBeOfType() string
|
||||
KeyItemsMustBeUnique() string
|
||||
ReferenceMustBeCanonical() string
|
||||
NotAValidType() string
|
||||
Duplicated() string
|
||||
httpBadStatus() string
|
||||
|
||||
// ErrorFormat
|
||||
ErrorFormat() string
|
||||
}
|
||||
|
||||
// DefaultLocale is the default locale for this package
|
||||
DefaultLocale struct{}
|
||||
)
|
||||
|
||||
func (l DefaultLocale) Required() string {
|
||||
return `%property% is required`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) InvalidType() string {
|
||||
return `Invalid type. Expected: %expected%, given: %given%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) NumberAnyOf() string {
|
||||
return `Must validate at least one schema (anyOf)`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) NumberOneOf() string {
|
||||
return `Must validate one and only one schema (oneOf)`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) NumberAllOf() string {
|
||||
return `Must validate all the schemas (allOf)`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) NumberNot() string {
|
||||
return `Must not validate the schema (not)`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) MissingDependency() string {
|
||||
return `Has a dependency on %dependency%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) Internal() string {
|
||||
return `Internal Error %error%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) Enum() string {
|
||||
return `%field% must be one of the following: %allowed%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) ArrayNoAdditionalItems() string {
|
||||
return `No additional items allowed on array`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) ArrayMinItems() string {
|
||||
return `Array must have at least %min% items`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) ArrayMaxItems() string {
|
||||
return `Array must have at most %max% items`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) Unique() string {
|
||||
return `%type% items must be unique`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) ArrayMinProperties() string {
|
||||
return `Must have at least %min% properties`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) ArrayMaxProperties() string {
|
||||
return `Must have at most %max% properties`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) AdditionalPropertyNotAllowed() string {
|
||||
return `Additional property %property% is not allowed`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) InvalidPropertyPattern() string {
|
||||
return `Property "%property%" does not match pattern %pattern%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) StringGTE() string {
|
||||
return `String length must be greater than or equal to %min%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) StringLTE() string {
|
||||
return `String length must be less than or equal to %max%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) DoesNotMatchPattern() string {
|
||||
return `Does not match pattern '%pattern%'`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) DoesNotMatchFormat() string {
|
||||
return `Does not match format '%format%'`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) MultipleOf() string {
|
||||
return `Must be a multiple of %multiple%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) NumberGTE() string {
|
||||
return `Must be greater than or equal to %min%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) NumberGT() string {
|
||||
return `Must be greater than %min%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) NumberLTE() string {
|
||||
return `Must be less than or equal to %max%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) NumberLT() string {
|
||||
return `Must be less than %max%`
|
||||
}
|
||||
|
||||
// Schema validators
|
||||
func (l DefaultLocale) RegexPattern() string {
|
||||
return `Invalid regex pattern '%pattern%'`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) GreaterThanZero() string {
|
||||
return `%number% must be strictly greater than 0`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) MustBeOfA() string {
|
||||
return `%x% must be of a %y%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) MustBeOfAn() string {
|
||||
return `%x% must be of an %y%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) CannotBeUsedWithout() string {
|
||||
return `%x% cannot be used without %y%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) CannotBeGT() string {
|
||||
return `%x% cannot be greater than %y%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) MustBeOfType() string {
|
||||
return `%key% must be of type %type%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) MustBeValidRegex() string {
|
||||
return `%key% must be a valid regex`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) MustBeValidFormat() string {
|
||||
return `%key% must be a valid format %given%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) MustBeGTEZero() string {
|
||||
return `%key% must be greater than or equal to 0`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) KeyCannotBeGreaterThan() string {
|
||||
return `%key% cannot be greater than %y%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) KeyItemsMustBeOfType() string {
|
||||
return `%key% items must be %type%`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) KeyItemsMustBeUnique() string {
|
||||
return `%key% items must be unique`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) ReferenceMustBeCanonical() string {
|
||||
return `Reference %reference% must be canonical`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) NotAValidType() string {
|
||||
return `%type% is not a valid type -- `
|
||||
}
|
||||
|
||||
func (l DefaultLocale) Duplicated() string {
|
||||
return `%type% type is duplicated`
|
||||
}
|
||||
|
||||
func (l DefaultLocale) httpBadStatus() string {
|
||||
return `Could not read schema from HTTP, response status is %status%`
|
||||
}
|
||||
|
||||
// Replacement options: field, description, context, value
|
||||
func (l DefaultLocale) ErrorFormat() string {
|
||||
return `%field%: %description%`
|
||||
}
|
||||
|
||||
const (
|
||||
STRING_NUMBER = "number"
|
||||
STRING_ARRAY_OF_STRINGS = "array of strings"
|
||||
STRING_ARRAY_OF_SCHEMAS = "array of schemas"
|
||||
STRING_SCHEMA = "schema"
|
||||
STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings"
|
||||
STRING_PROPERTIES = "properties"
|
||||
STRING_DEPENDENCY = "dependency"
|
||||
STRING_PROPERTY = "property"
|
||||
STRING_UNDEFINED = "undefined"
|
||||
STRING_CONTEXT_ROOT = "(root)"
|
||||
STRING_ROOT_SCHEMA_PROPERTY = "(root)"
|
||||
)
|
|
@ -0,0 +1,171 @@
|
|||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Result and ResultError implementations.
|
||||
//
|
||||
// created 01-01-2015
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type (
|
||||
// ErrorDetails is a map of details specific to each error.
|
||||
// While the values will vary, every error will contain a "field" value
|
||||
ErrorDetails map[string]interface{}
|
||||
|
||||
// ResultError is the interface that library errors must implement
|
||||
ResultError interface {
|
||||
Field() string
|
||||
SetType(string)
|
||||
Type() string
|
||||
SetContext(*jsonContext)
|
||||
Context() *jsonContext
|
||||
SetDescription(string)
|
||||
Description() string
|
||||
SetValue(interface{})
|
||||
Value() interface{}
|
||||
SetDetails(ErrorDetails)
|
||||
Details() ErrorDetails
|
||||
}
|
||||
|
||||
// ResultErrorFields holds the fields for each ResultError implementation.
|
||||
// ResultErrorFields implements the ResultError interface, so custom errors
|
||||
// can be defined by just embedding this type
|
||||
ResultErrorFields struct {
|
||||
errorType string // A string with the type of error (i.e. invalid_type)
|
||||
context *jsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ...
|
||||
description string // A human readable error message
|
||||
value interface{} // Value given by the JSON file that is the source of the error
|
||||
details ErrorDetails
|
||||
}
|
||||
|
||||
Result struct {
|
||||
errors []ResultError
|
||||
// Scores how well the validation matched. Useful in generating
|
||||
// better error messages for anyOf and oneOf.
|
||||
score int
|
||||
}
|
||||
)
|
||||
|
||||
// Field outputs the field name without the root context
|
||||
// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName
|
||||
func (v *ResultErrorFields) Field() string {
|
||||
if p, ok := v.Details()["property"]; ok {
|
||||
if str, isString := p.(string); isString {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".")
|
||||
}
|
||||
|
||||
func (v *ResultErrorFields) SetType(errorType string) {
|
||||
v.errorType = errorType
|
||||
}
|
||||
|
||||
func (v *ResultErrorFields) Type() string {
|
||||
return v.errorType
|
||||
}
|
||||
|
||||
func (v *ResultErrorFields) SetContext(context *jsonContext) {
|
||||
v.context = context
|
||||
}
|
||||
|
||||
func (v *ResultErrorFields) Context() *jsonContext {
|
||||
return v.context
|
||||
}
|
||||
|
||||
func (v *ResultErrorFields) SetDescription(description string) {
|
||||
v.description = description
|
||||
}
|
||||
|
||||
func (v *ResultErrorFields) Description() string {
|
||||
return v.description
|
||||
}
|
||||
|
||||
func (v *ResultErrorFields) SetValue(value interface{}) {
|
||||
v.value = value
|
||||
}
|
||||
|
||||
func (v *ResultErrorFields) Value() interface{} {
|
||||
return v.value
|
||||
}
|
||||
|
||||
func (v *ResultErrorFields) SetDetails(details ErrorDetails) {
|
||||
v.details = details
|
||||
}
|
||||
|
||||
func (v *ResultErrorFields) Details() ErrorDetails {
|
||||
return v.details
|
||||
}
|
||||
|
||||
func (v ResultErrorFields) String() string {
|
||||
// as a fallback, the value is displayed go style
|
||||
valueString := fmt.Sprintf("%v", v.value)
|
||||
|
||||
// marshal the go value value to json
|
||||
if v.value == nil {
|
||||
valueString = TYPE_NULL
|
||||
} else {
|
||||
if vs, err := marshalToJsonString(v.value); err == nil {
|
||||
if vs == nil {
|
||||
valueString = TYPE_NULL
|
||||
} else {
|
||||
valueString = *vs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{
|
||||
"context": v.context.String(),
|
||||
"description": v.description,
|
||||
"value": valueString,
|
||||
"field": v.Field(),
|
||||
})
|
||||
}
|
||||
|
||||
func (v *Result) Valid() bool {
|
||||
return len(v.errors) == 0
|
||||
}
|
||||
|
||||
func (v *Result) Errors() []ResultError {
|
||||
return v.errors
|
||||
}
|
||||
|
||||
func (v *Result) addError(err ResultError, context *jsonContext, value interface{}, details ErrorDetails) {
|
||||
newError(err, context, value, Locale, details)
|
||||
v.errors = append(v.errors, err)
|
||||
v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function
|
||||
}
|
||||
|
||||
// Used to copy errors from a sub-schema to the main one
|
||||
func (v *Result) mergeErrors(otherResult *Result) {
|
||||
v.errors = append(v.errors, otherResult.Errors()...)
|
||||
v.score += otherResult.score
|
||||
}
|
||||
|
||||
func (v *Result) incrementScore() {
|
||||
v.score++
|
||||
}
|
|
@ -0,0 +1,908 @@
|
|||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Defines Schema, the main entry to every subSchema.
|
||||
// Contains the parsing logic and error checking.
|
||||
//
|
||||
// created 26-02-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
// "encoding/json"
|
||||
"errors"
|
||||
"reflect"
|
||||
"regexp"
|
||||
|
||||
"github.com/xeipuuv/gojsonreference"
|
||||
)
|
||||
|
||||
var (
|
||||
// Locale is the default locale to use
|
||||
// Library users can overwrite with their own implementation
|
||||
Locale locale = DefaultLocale{}
|
||||
)
|
||||
|
||||
func NewSchema(l JSONLoader) (*Schema, error) {
|
||||
return l.loadSchema()
|
||||
}
|
||||
|
||||
type Schema struct {
|
||||
documentReference gojsonreference.JsonReference
|
||||
rootSchema *subSchema
|
||||
pool *schemaPool
|
||||
referencePool *schemaReferencePool
|
||||
}
|
||||
|
||||
func (d *Schema) parse(document interface{}) error {
|
||||
d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY}
|
||||
return d.parseSchema(document, d.rootSchema)
|
||||
}
|
||||
|
||||
func (d *Schema) SetRootSchemaName(name string) {
|
||||
d.rootSchema.property = name
|
||||
}
|
||||
|
||||
// Parses a subSchema
|
||||
//
|
||||
// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring
|
||||
// Not much magic involved here, most of the job is to validate the key names and their values,
|
||||
// then the values are copied into subSchema struct
|
||||
//
|
||||
func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error {
|
||||
|
||||
if !isKind(documentNode, reflect.Map) {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.InvalidType(),
|
||||
ErrorDetails{
|
||||
"expected": TYPE_OBJECT,
|
||||
"given": STRING_SCHEMA,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
m := documentNode.(map[string]interface{})
|
||||
|
||||
if currentSchema == d.rootSchema {
|
||||
currentSchema.ref = &d.documentReference
|
||||
}
|
||||
|
||||
// $subSchema
|
||||
if existsMapKey(m, KEY_SCHEMA) {
|
||||
if !isKind(m[KEY_SCHEMA], reflect.String) {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.InvalidType(),
|
||||
ErrorDetails{
|
||||
"expected": TYPE_STRING,
|
||||
"given": KEY_SCHEMA,
|
||||
},
|
||||
))
|
||||
}
|
||||
schemaRef := m[KEY_SCHEMA].(string)
|
||||
schemaReference, err := gojsonreference.NewJsonReference(schemaRef)
|
||||
currentSchema.subSchema = &schemaReference
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// $ref
|
||||
if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.InvalidType(),
|
||||
ErrorDetails{
|
||||
"expected": TYPE_STRING,
|
||||
"given": KEY_REF,
|
||||
},
|
||||
))
|
||||
}
|
||||
if k, ok := m[KEY_REF].(string); ok {
|
||||
|
||||
if sch, ok := d.referencePool.Get(currentSchema.ref.String() + k); ok {
|
||||
|
||||
currentSchema.refSchema = sch
|
||||
|
||||
} else {
|
||||
|
||||
var err error
|
||||
err = d.parseReference(documentNode, currentSchema, k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// definitions
|
||||
if existsMapKey(m, KEY_DEFINITIONS) {
|
||||
if isKind(m[KEY_DEFINITIONS], reflect.Map) {
|
||||
currentSchema.definitions = make(map[string]*subSchema)
|
||||
for dk, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) {
|
||||
if isKind(dv, reflect.Map) {
|
||||
newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema, ref: currentSchema.ref}
|
||||
currentSchema.definitions[dk] = newSchema
|
||||
err := d.parseSchema(dv, newSchema)
|
||||
if err != nil {
|
||||
return errors.New(err.Error())
|
||||
}
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.InvalidType(),
|
||||
ErrorDetails{
|
||||
"expected": STRING_ARRAY_OF_SCHEMAS,
|
||||
"given": KEY_DEFINITIONS,
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.InvalidType(),
|
||||
ErrorDetails{
|
||||
"expected": STRING_ARRAY_OF_SCHEMAS,
|
||||
"given": KEY_DEFINITIONS,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// id
|
||||
if existsMapKey(m, KEY_ID) && !isKind(m[KEY_ID], reflect.String) {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.InvalidType(),
|
||||
ErrorDetails{
|
||||
"expected": TYPE_STRING,
|
||||
"given": KEY_ID,
|
||||
},
|
||||
))
|
||||
}
|
||||
if k, ok := m[KEY_ID].(string); ok {
|
||||
currentSchema.id = &k
|
||||
}
|
||||
|
||||
// title
|
||||
if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.InvalidType(),
|
||||
ErrorDetails{
|
||||
"expected": TYPE_STRING,
|
||||
"given": KEY_TITLE,
|
||||
},
|
||||
))
|
||||
}
|
||||
if k, ok := m[KEY_TITLE].(string); ok {
|
||||
currentSchema.title = &k
|
||||
}
|
||||
|
||||
// description
|
||||
if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.InvalidType(),
|
||||
ErrorDetails{
|
||||
"expected": TYPE_STRING,
|
||||
"given": KEY_DESCRIPTION,
|
||||
},
|
||||
))
|
||||
}
|
||||
if k, ok := m[KEY_DESCRIPTION].(string); ok {
|
||||
currentSchema.description = &k
|
||||
}
|
||||
|
||||
// type
|
||||
if existsMapKey(m, KEY_TYPE) {
|
||||
if isKind(m[KEY_TYPE], reflect.String) {
|
||||
if k, ok := m[KEY_TYPE].(string); ok {
|
||||
err := currentSchema.types.Add(k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if isKind(m[KEY_TYPE], reflect.Slice) {
|
||||
arrayOfTypes := m[KEY_TYPE].([]interface{})
|
||||
for _, typeInArray := range arrayOfTypes {
|
||||
if reflect.ValueOf(typeInArray).Kind() != reflect.String {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.InvalidType(),
|
||||
ErrorDetails{
|
||||
"expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS,
|
||||
"given": KEY_TYPE,
|
||||
},
|
||||
))
|
||||
} else {
|
||||
currentSchema.types.Add(typeInArray.(string))
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.InvalidType(),
|
||||
ErrorDetails{
|
||||
"expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS,
|
||||
"given": KEY_TYPE,
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// properties
|
||||
if existsMapKey(m, KEY_PROPERTIES) {
|
||||
err := d.parseProperties(m[KEY_PROPERTIES], currentSchema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// additionalProperties
|
||||
if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) {
|
||||
if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) {
|
||||
currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool)
|
||||
} else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) {
|
||||
newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref}
|
||||
currentSchema.additionalProperties = newSchema
|
||||
err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema)
|
||||
if err != nil {
|
||||
return errors.New(err.Error())
|
||||
}
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.InvalidType(),
|
||||
ErrorDetails{
|
||||
"expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA,
|
||||
"given": KEY_ADDITIONAL_PROPERTIES,
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// patternProperties
|
||||
if existsMapKey(m, KEY_PATTERN_PROPERTIES) {
|
||||
if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) {
|
||||
patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{})
|
||||
if len(patternPropertiesMap) > 0 {
|
||||
currentSchema.patternProperties = make(map[string]*subSchema)
|
||||
for k, v := range patternPropertiesMap {
|
||||
_, err := regexp.MatchString(k, "")
|
||||
if err != nil {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.RegexPattern(),
|
||||
ErrorDetails{"pattern": k},
|
||||
))
|
||||
}
|
||||
newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref}
|
||||
err = d.parseSchema(v, newSchema)
|
||||
if err != nil {
|
||||
return errors.New(err.Error())
|
||||
}
|
||||
currentSchema.patternProperties[k] = newSchema
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.InvalidType(),
|
||||
ErrorDetails{
|
||||
"expected": STRING_SCHEMA,
|
||||
"given": KEY_PATTERN_PROPERTIES,
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// dependencies
|
||||
if existsMapKey(m, KEY_DEPENDENCIES) {
|
||||
err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// items
|
||||
if existsMapKey(m, KEY_ITEMS) {
|
||||
if isKind(m[KEY_ITEMS], reflect.Slice) {
|
||||
for _, itemElement := range m[KEY_ITEMS].([]interface{}) {
|
||||
if isKind(itemElement, reflect.Map) {
|
||||
newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS}
|
||||
newSchema.ref = currentSchema.ref
|
||||
currentSchema.AddItemsChild(newSchema)
|
||||
err := d.parseSchema(itemElement, newSchema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.InvalidType(),
|
||||
ErrorDetails{
|
||||
"expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS,
|
||||
"given": KEY_ITEMS,
|
||||
},
|
||||
))
|
||||
}
|
||||
currentSchema.itemsChildrenIsSingleSchema = false
|
||||
}
|
||||
} else if isKind(m[KEY_ITEMS], reflect.Map) {
|
||||
newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS}
|
||||
newSchema.ref = currentSchema.ref
|
||||
currentSchema.AddItemsChild(newSchema)
|
||||
err := d.parseSchema(m[KEY_ITEMS], newSchema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSchema.itemsChildrenIsSingleSchema = true
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.InvalidType(),
|
||||
ErrorDetails{
|
||||
"expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS,
|
||||
"given": KEY_ITEMS,
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// additionalItems
|
||||
if existsMapKey(m, KEY_ADDITIONAL_ITEMS) {
|
||||
if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) {
|
||||
currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool)
|
||||
} else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) {
|
||||
newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref}
|
||||
currentSchema.additionalItems = newSchema
|
||||
err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema)
|
||||
if err != nil {
|
||||
return errors.New(err.Error())
|
||||
}
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.InvalidType(),
|
||||
ErrorDetails{
|
||||
"expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA,
|
||||
"given": KEY_ADDITIONAL_ITEMS,
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// validation : number / integer
|
||||
|
||||
if existsMapKey(m, KEY_MULTIPLE_OF) {
|
||||
multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF])
|
||||
if multipleOfValue == nil {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.InvalidType(),
|
||||
ErrorDetails{
|
||||
"expected": STRING_NUMBER,
|
||||
"given": KEY_MULTIPLE_OF,
|
||||
},
|
||||
))
|
||||
}
|
||||
if *multipleOfValue <= 0 {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.GreaterThanZero(),
|
||||
ErrorDetails{"number": KEY_MULTIPLE_OF},
|
||||
))
|
||||
}
|
||||
currentSchema.multipleOf = multipleOfValue
|
||||
}
|
||||
|
||||
if existsMapKey(m, KEY_MINIMUM) {
|
||||
minimumValue := mustBeNumber(m[KEY_MINIMUM])
|
||||
if minimumValue == nil {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfA(),
|
||||
ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER},
|
||||
))
|
||||
}
|
||||
currentSchema.minimum = minimumValue
|
||||
}
|
||||
|
||||
if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) {
|
||||
if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) {
|
||||
if currentSchema.minimum == nil {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.CannotBeUsedWithout(),
|
||||
ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM},
|
||||
))
|
||||
}
|
||||
exclusiveMinimumValue := m[KEY_EXCLUSIVE_MINIMUM].(bool)
|
||||
currentSchema.exclusiveMinimum = exclusiveMinimumValue
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfA(),
|
||||
ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": TYPE_BOOLEAN},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
if existsMapKey(m, KEY_MAXIMUM) {
|
||||
maximumValue := mustBeNumber(m[KEY_MAXIMUM])
|
||||
if maximumValue == nil {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfA(),
|
||||
ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER},
|
||||
))
|
||||
}
|
||||
currentSchema.maximum = maximumValue
|
||||
}
|
||||
|
||||
if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) {
|
||||
if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) {
|
||||
if currentSchema.maximum == nil {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.CannotBeUsedWithout(),
|
||||
ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM},
|
||||
))
|
||||
}
|
||||
exclusiveMaximumValue := m[KEY_EXCLUSIVE_MAXIMUM].(bool)
|
||||
currentSchema.exclusiveMaximum = exclusiveMaximumValue
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfA(),
|
||||
ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": STRING_NUMBER},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
if currentSchema.minimum != nil && currentSchema.maximum != nil {
|
||||
if *currentSchema.minimum > *currentSchema.maximum {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.CannotBeGT(),
|
||||
ErrorDetails{"x": KEY_MINIMUM, "y": KEY_MAXIMUM},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// validation : string
|
||||
|
||||
if existsMapKey(m, KEY_MIN_LENGTH) {
|
||||
minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH])
|
||||
if minLengthIntegerValue == nil {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfAn(),
|
||||
ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER},
|
||||
))
|
||||
}
|
||||
if *minLengthIntegerValue < 0 {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeGTEZero(),
|
||||
ErrorDetails{"key": KEY_MIN_LENGTH},
|
||||
))
|
||||
}
|
||||
currentSchema.minLength = minLengthIntegerValue
|
||||
}
|
||||
|
||||
if existsMapKey(m, KEY_MAX_LENGTH) {
|
||||
maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH])
|
||||
if maxLengthIntegerValue == nil {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfAn(),
|
||||
ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER},
|
||||
))
|
||||
}
|
||||
if *maxLengthIntegerValue < 0 {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeGTEZero(),
|
||||
ErrorDetails{"key": KEY_MAX_LENGTH},
|
||||
))
|
||||
}
|
||||
currentSchema.maxLength = maxLengthIntegerValue
|
||||
}
|
||||
|
||||
if currentSchema.minLength != nil && currentSchema.maxLength != nil {
|
||||
if *currentSchema.minLength > *currentSchema.maxLength {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.CannotBeGT(),
|
||||
ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
if existsMapKey(m, KEY_PATTERN) {
|
||||
if isKind(m[KEY_PATTERN], reflect.String) {
|
||||
regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string))
|
||||
if err != nil {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeValidRegex(),
|
||||
ErrorDetails{"key": KEY_PATTERN},
|
||||
))
|
||||
}
|
||||
currentSchema.pattern = regexpObject
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfA(),
|
||||
ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
if existsMapKey(m, KEY_FORMAT) {
|
||||
formatString, ok := m[KEY_FORMAT].(string)
|
||||
if ok && FormatCheckers.Has(formatString) {
|
||||
currentSchema.format = formatString
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeValidFormat(),
|
||||
ErrorDetails{"key": KEY_FORMAT, "given": m[KEY_FORMAT]},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// validation : object
|
||||
|
||||
if existsMapKey(m, KEY_MIN_PROPERTIES) {
|
||||
minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES])
|
||||
if minPropertiesIntegerValue == nil {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfAn(),
|
||||
ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER},
|
||||
))
|
||||
}
|
||||
if *minPropertiesIntegerValue < 0 {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeGTEZero(),
|
||||
ErrorDetails{"key": KEY_MIN_PROPERTIES},
|
||||
))
|
||||
}
|
||||
currentSchema.minProperties = minPropertiesIntegerValue
|
||||
}
|
||||
|
||||
if existsMapKey(m, KEY_MAX_PROPERTIES) {
|
||||
maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES])
|
||||
if maxPropertiesIntegerValue == nil {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfAn(),
|
||||
ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER},
|
||||
))
|
||||
}
|
||||
if *maxPropertiesIntegerValue < 0 {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeGTEZero(),
|
||||
ErrorDetails{"key": KEY_MAX_PROPERTIES},
|
||||
))
|
||||
}
|
||||
currentSchema.maxProperties = maxPropertiesIntegerValue
|
||||
}
|
||||
|
||||
if currentSchema.minProperties != nil && currentSchema.maxProperties != nil {
|
||||
if *currentSchema.minProperties > *currentSchema.maxProperties {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.KeyCannotBeGreaterThan(),
|
||||
ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
if existsMapKey(m, KEY_REQUIRED) {
|
||||
if isKind(m[KEY_REQUIRED], reflect.Slice) {
|
||||
requiredValues := m[KEY_REQUIRED].([]interface{})
|
||||
for _, requiredValue := range requiredValues {
|
||||
if isKind(requiredValue, reflect.String) {
|
||||
err := currentSchema.AddRequired(requiredValue.(string))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.KeyItemsMustBeOfType(),
|
||||
ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING},
|
||||
))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfAn(),
|
||||
ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// validation : array
|
||||
|
||||
if existsMapKey(m, KEY_MIN_ITEMS) {
|
||||
minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS])
|
||||
if minItemsIntegerValue == nil {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfAn(),
|
||||
ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER},
|
||||
))
|
||||
}
|
||||
if *minItemsIntegerValue < 0 {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeGTEZero(),
|
||||
ErrorDetails{"key": KEY_MIN_ITEMS},
|
||||
))
|
||||
}
|
||||
currentSchema.minItems = minItemsIntegerValue
|
||||
}
|
||||
|
||||
if existsMapKey(m, KEY_MAX_ITEMS) {
|
||||
maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS])
|
||||
if maxItemsIntegerValue == nil {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfAn(),
|
||||
ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER},
|
||||
))
|
||||
}
|
||||
if *maxItemsIntegerValue < 0 {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeGTEZero(),
|
||||
ErrorDetails{"key": KEY_MAX_ITEMS},
|
||||
))
|
||||
}
|
||||
currentSchema.maxItems = maxItemsIntegerValue
|
||||
}
|
||||
|
||||
if existsMapKey(m, KEY_UNIQUE_ITEMS) {
|
||||
if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) {
|
||||
currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool)
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfA(),
|
||||
ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// validation : all
|
||||
|
||||
if existsMapKey(m, KEY_ENUM) {
|
||||
if isKind(m[KEY_ENUM], reflect.Slice) {
|
||||
for _, v := range m[KEY_ENUM].([]interface{}) {
|
||||
err := currentSchema.AddEnum(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfAn(),
|
||||
ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// validation : subSchema
|
||||
|
||||
if existsMapKey(m, KEY_ONE_OF) {
|
||||
if isKind(m[KEY_ONE_OF], reflect.Slice) {
|
||||
for _, v := range m[KEY_ONE_OF].([]interface{}) {
|
||||
newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref}
|
||||
currentSchema.AddOneOf(newSchema)
|
||||
err := d.parseSchema(v, newSchema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfAn(),
|
||||
ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
if existsMapKey(m, KEY_ANY_OF) {
|
||||
if isKind(m[KEY_ANY_OF], reflect.Slice) {
|
||||
for _, v := range m[KEY_ANY_OF].([]interface{}) {
|
||||
newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref}
|
||||
currentSchema.AddAnyOf(newSchema)
|
||||
err := d.parseSchema(v, newSchema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfAn(),
|
||||
ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
if existsMapKey(m, KEY_ALL_OF) {
|
||||
if isKind(m[KEY_ALL_OF], reflect.Slice) {
|
||||
for _, v := range m[KEY_ALL_OF].([]interface{}) {
|
||||
newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref}
|
||||
currentSchema.AddAllOf(newSchema)
|
||||
err := d.parseSchema(v, newSchema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfAn(),
|
||||
ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
if existsMapKey(m, KEY_NOT) {
|
||||
if isKind(m[KEY_NOT], reflect.Map) {
|
||||
newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref}
|
||||
currentSchema.SetNot(newSchema)
|
||||
err := d.parseSchema(m[KEY_NOT], newSchema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfAn(),
|
||||
ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema, reference string) (e error) {
|
||||
|
||||
var err error
|
||||
|
||||
jsonReference, err := gojsonreference.NewJsonReference(reference)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
standaloneDocument := d.pool.GetStandaloneDocument()
|
||||
|
||||
if jsonReference.HasFullUrl {
|
||||
currentSchema.ref = &jsonReference
|
||||
} else {
|
||||
inheritedReference, err := currentSchema.ref.Inherits(jsonReference)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSchema.ref = inheritedReference
|
||||
}
|
||||
|
||||
jsonPointer := currentSchema.ref.GetPointer()
|
||||
|
||||
var refdDocumentNode interface{}
|
||||
|
||||
if standaloneDocument != nil {
|
||||
|
||||
var err error
|
||||
refdDocumentNode, _, err = jsonPointer.Get(standaloneDocument)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
var err error
|
||||
dsp, err := d.pool.GetDocument(*currentSchema.ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
refdDocumentNode, _, err = jsonPointer.Get(dsp.Document)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if !isKind(refdDocumentNode, reflect.Map) {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfType(),
|
||||
ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT},
|
||||
))
|
||||
}
|
||||
|
||||
// returns the loaded referenced subSchema for the caller to update its current subSchema
|
||||
newSchemaDocument := refdDocumentNode.(map[string]interface{})
|
||||
|
||||
newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref}
|
||||
d.referencePool.Add(currentSchema.ref.String()+reference, newSchema)
|
||||
|
||||
err = d.parseSchema(newSchemaDocument, newSchema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
currentSchema.refSchema = newSchema
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error {
|
||||
|
||||
if !isKind(documentNode, reflect.Map) {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfType(),
|
||||
ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT},
|
||||
))
|
||||
}
|
||||
|
||||
m := documentNode.(map[string]interface{})
|
||||
for k := range m {
|
||||
schemaProperty := k
|
||||
newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref}
|
||||
currentSchema.AddPropertiesChild(newSchema)
|
||||
err := d.parseSchema(m[k], newSchema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error {
|
||||
|
||||
if !isKind(documentNode, reflect.Map) {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfType(),
|
||||
ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT},
|
||||
))
|
||||
}
|
||||
|
||||
m := documentNode.(map[string]interface{})
|
||||
currentSchema.dependencies = make(map[string]interface{})
|
||||
|
||||
for k := range m {
|
||||
switch reflect.ValueOf(m[k]).Kind() {
|
||||
|
||||
case reflect.Slice:
|
||||
values := m[k].([]interface{})
|
||||
var valuesToRegister []string
|
||||
|
||||
for _, value := range values {
|
||||
if !isKind(value, reflect.String) {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfType(),
|
||||
ErrorDetails{
|
||||
"key": STRING_DEPENDENCY,
|
||||
"type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS,
|
||||
},
|
||||
))
|
||||
} else {
|
||||
valuesToRegister = append(valuesToRegister, value.(string))
|
||||
}
|
||||
currentSchema.dependencies[k] = valuesToRegister
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref}
|
||||
err := d.parseSchema(m[k], depSchema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSchema.dependencies[k] = depSchema
|
||||
|
||||
default:
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfType(),
|
||||
ErrorDetails{
|
||||
"key": STRING_DEPENDENCY,
|
||||
"type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,107 @@
|
|||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Defines resources pooling.
|
||||
// Eases referencing and avoids downloading the same resource twice.
|
||||
//
|
||||
// created 26-02-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/xeipuuv/gojsonreference"
|
||||
)
|
||||
|
||||
type schemaPoolDocument struct {
|
||||
Document interface{}
|
||||
}
|
||||
|
||||
type schemaPool struct {
|
||||
schemaPoolDocuments map[string]*schemaPoolDocument
|
||||
standaloneDocument interface{}
|
||||
}
|
||||
|
||||
func newSchemaPool() *schemaPool {
|
||||
|
||||
p := &schemaPool{}
|
||||
p.schemaPoolDocuments = make(map[string]*schemaPoolDocument)
|
||||
p.standaloneDocument = nil
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *schemaPool) SetStandaloneDocument(document interface{}) {
|
||||
p.standaloneDocument = document
|
||||
}
|
||||
|
||||
func (p *schemaPool) GetStandaloneDocument() (document interface{}) {
|
||||
return p.standaloneDocument
|
||||
}
|
||||
|
||||
func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("Get Document ( %s )", reference.String())
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
// It is not possible to load anything that is not canonical...
|
||||
if !reference.IsCanonical() {
|
||||
return nil, errors.New(formatErrorDescription(
|
||||
Locale.ReferenceMustBeCanonical(),
|
||||
ErrorDetails{"reference": reference},
|
||||
))
|
||||
}
|
||||
|
||||
refToUrl := reference
|
||||
refToUrl.GetUrl().Fragment = ""
|
||||
|
||||
var spd *schemaPoolDocument
|
||||
|
||||
// Try to find the requested document in the pool
|
||||
for k := range p.schemaPoolDocuments {
|
||||
if k == refToUrl.String() {
|
||||
spd = p.schemaPoolDocuments[k]
|
||||
}
|
||||
}
|
||||
|
||||
if spd != nil {
|
||||
if internalLogEnabled {
|
||||
internalLog(" From pool")
|
||||
}
|
||||
return spd, nil
|
||||
}
|
||||
|
||||
jsonReferenceLoader := NewReferenceLoader(reference.String())
|
||||
document, err := jsonReferenceLoader.loadJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
spd = &schemaPoolDocument{Document: document}
|
||||
// add the document to the pool for potential later use
|
||||
p.schemaPoolDocuments[refToUrl.String()] = spd
|
||||
|
||||
return spd, nil
|
||||
}
|
67
vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go
сгенерированный
поставляемый
Normal file
67
vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,67 @@
|
|||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Pool of referenced schemas.
|
||||
//
|
||||
// created 25-06-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type schemaReferencePool struct {
|
||||
documents map[string]*subSchema
|
||||
}
|
||||
|
||||
func newSchemaReferencePool() *schemaReferencePool {
|
||||
|
||||
p := &schemaReferencePool{}
|
||||
p.documents = make(map[string]*subSchema)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog(fmt.Sprintf("Schema Reference ( %s )", ref))
|
||||
}
|
||||
|
||||
if sch, ok := p.documents[ref]; ok {
|
||||
if internalLogEnabled {
|
||||
internalLog(fmt.Sprintf(" From pool"))
|
||||
}
|
||||
return sch, true
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (p *schemaReferencePool) Add(ref string, sch *subSchema) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref))
|
||||
}
|
||||
|
||||
p.documents[ref] = sch
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Helper structure to handle schema types, and the combination of them.
|
||||
//
|
||||
// created 28-02-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type jsonSchemaType struct {
|
||||
types []string
|
||||
}
|
||||
|
||||
// Is the schema typed ? that is containing at least one type
|
||||
// When not typed, the schema does not need any type validation
|
||||
func (t *jsonSchemaType) IsTyped() bool {
|
||||
return len(t.types) > 0
|
||||
}
|
||||
|
||||
func (t *jsonSchemaType) Add(etype string) error {
|
||||
|
||||
if !isStringInSlice(JSON_TYPES, etype) {
|
||||
return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"type": etype}))
|
||||
}
|
||||
|
||||
if t.Contains(etype) {
|
||||
return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype}))
|
||||
}
|
||||
|
||||
t.types = append(t.types, etype)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *jsonSchemaType) Contains(etype string) bool {
|
||||
|
||||
for _, v := range t.types {
|
||||
if v == etype {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *jsonSchemaType) String() string {
|
||||
|
||||
if len(t.types) == 0 {
|
||||
return STRING_UNDEFINED // should never happen
|
||||
}
|
||||
|
||||
// Displayed as a list [type1,type2,...]
|
||||
if len(t.types) > 1 {
|
||||
return fmt.Sprintf("[%s]", strings.Join(t.types, ","))
|
||||
}
|
||||
|
||||
// Only one type: name only
|
||||
return t.types[0]
|
||||
}
|
|
@ -0,0 +1,227 @@
|
|||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Defines the structure of a sub-subSchema.
|
||||
// A sub-subSchema can contain other sub-schemas.
|
||||
//
|
||||
// created 27-02-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/xeipuuv/gojsonreference"
|
||||
)
|
||||
|
||||
const (
|
||||
KEY_SCHEMA = "$subSchema"
|
||||
KEY_ID = "$id"
|
||||
KEY_REF = "$ref"
|
||||
KEY_TITLE = "title"
|
||||
KEY_DESCRIPTION = "description"
|
||||
KEY_TYPE = "type"
|
||||
KEY_ITEMS = "items"
|
||||
KEY_ADDITIONAL_ITEMS = "additionalItems"
|
||||
KEY_PROPERTIES = "properties"
|
||||
KEY_PATTERN_PROPERTIES = "patternProperties"
|
||||
KEY_ADDITIONAL_PROPERTIES = "additionalProperties"
|
||||
KEY_DEFINITIONS = "definitions"
|
||||
KEY_MULTIPLE_OF = "multipleOf"
|
||||
KEY_MINIMUM = "minimum"
|
||||
KEY_MAXIMUM = "maximum"
|
||||
KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum"
|
||||
KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum"
|
||||
KEY_MIN_LENGTH = "minLength"
|
||||
KEY_MAX_LENGTH = "maxLength"
|
||||
KEY_PATTERN = "pattern"
|
||||
KEY_FORMAT = "format"
|
||||
KEY_MIN_PROPERTIES = "minProperties"
|
||||
KEY_MAX_PROPERTIES = "maxProperties"
|
||||
KEY_DEPENDENCIES = "dependencies"
|
||||
KEY_REQUIRED = "required"
|
||||
KEY_MIN_ITEMS = "minItems"
|
||||
KEY_MAX_ITEMS = "maxItems"
|
||||
KEY_UNIQUE_ITEMS = "uniqueItems"
|
||||
KEY_ENUM = "enum"
|
||||
KEY_ONE_OF = "oneOf"
|
||||
KEY_ANY_OF = "anyOf"
|
||||
KEY_ALL_OF = "allOf"
|
||||
KEY_NOT = "not"
|
||||
)
|
||||
|
||||
type subSchema struct {
|
||||
|
||||
// basic subSchema meta properties
|
||||
id *string
|
||||
title *string
|
||||
description *string
|
||||
|
||||
property string
|
||||
|
||||
// Types associated with the subSchema
|
||||
types jsonSchemaType
|
||||
|
||||
// Reference url
|
||||
ref *gojsonreference.JsonReference
|
||||
// Schema referenced
|
||||
refSchema *subSchema
|
||||
// Json reference
|
||||
subSchema *gojsonreference.JsonReference
|
||||
|
||||
// hierarchy
|
||||
parent *subSchema
|
||||
definitions map[string]*subSchema
|
||||
definitionsChildren []*subSchema
|
||||
itemsChildren []*subSchema
|
||||
itemsChildrenIsSingleSchema bool
|
||||
propertiesChildren []*subSchema
|
||||
|
||||
// validation : number / integer
|
||||
multipleOf *float64
|
||||
maximum *float64
|
||||
exclusiveMaximum bool
|
||||
minimum *float64
|
||||
exclusiveMinimum bool
|
||||
|
||||
// validation : string
|
||||
minLength *int
|
||||
maxLength *int
|
||||
pattern *regexp.Regexp
|
||||
format string
|
||||
|
||||
// validation : object
|
||||
minProperties *int
|
||||
maxProperties *int
|
||||
required []string
|
||||
|
||||
dependencies map[string]interface{}
|
||||
additionalProperties interface{}
|
||||
patternProperties map[string]*subSchema
|
||||
|
||||
// validation : array
|
||||
minItems *int
|
||||
maxItems *int
|
||||
uniqueItems bool
|
||||
|
||||
additionalItems interface{}
|
||||
|
||||
// validation : all
|
||||
enum []string
|
||||
|
||||
// validation : subSchema
|
||||
oneOf []*subSchema
|
||||
anyOf []*subSchema
|
||||
allOf []*subSchema
|
||||
not *subSchema
|
||||
}
|
||||
|
||||
func (s *subSchema) AddEnum(i interface{}) error {
|
||||
|
||||
is, err := marshalToJsonString(i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isStringInSlice(s.enum, *is) {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.KeyItemsMustBeUnique(),
|
||||
ErrorDetails{"key": KEY_ENUM},
|
||||
))
|
||||
}
|
||||
|
||||
s.enum = append(s.enum, *is)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *subSchema) ContainsEnum(i interface{}) (bool, error) {
|
||||
|
||||
is, err := marshalToJsonString(i)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return isStringInSlice(s.enum, *is), nil
|
||||
}
|
||||
|
||||
func (s *subSchema) AddOneOf(subSchema *subSchema) {
|
||||
s.oneOf = append(s.oneOf, subSchema)
|
||||
}
|
||||
|
||||
func (s *subSchema) AddAllOf(subSchema *subSchema) {
|
||||
s.allOf = append(s.allOf, subSchema)
|
||||
}
|
||||
|
||||
func (s *subSchema) AddAnyOf(subSchema *subSchema) {
|
||||
s.anyOf = append(s.anyOf, subSchema)
|
||||
}
|
||||
|
||||
func (s *subSchema) SetNot(subSchema *subSchema) {
|
||||
s.not = subSchema
|
||||
}
|
||||
|
||||
func (s *subSchema) AddRequired(value string) error {
|
||||
|
||||
if isStringInSlice(s.required, value) {
|
||||
return errors.New(formatErrorDescription(
|
||||
Locale.KeyItemsMustBeUnique(),
|
||||
ErrorDetails{"key": KEY_REQUIRED},
|
||||
))
|
||||
}
|
||||
|
||||
s.required = append(s.required, value)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *subSchema) AddDefinitionChild(child *subSchema) {
|
||||
s.definitionsChildren = append(s.definitionsChildren, child)
|
||||
}
|
||||
|
||||
func (s *subSchema) AddItemsChild(child *subSchema) {
|
||||
s.itemsChildren = append(s.itemsChildren, child)
|
||||
}
|
||||
|
||||
func (s *subSchema) AddPropertiesChild(child *subSchema) {
|
||||
s.propertiesChildren = append(s.propertiesChildren, child)
|
||||
}
|
||||
|
||||
func (s *subSchema) PatternPropertiesString() string {
|
||||
|
||||
if s.patternProperties == nil || len(s.patternProperties) == 0 {
|
||||
return STRING_UNDEFINED // should never happen
|
||||
}
|
||||
|
||||
patternPropertiesKeySlice := []string{}
|
||||
for pk, _ := range s.patternProperties {
|
||||
patternPropertiesKeySlice = append(patternPropertiesKeySlice, `"`+pk+`"`)
|
||||
}
|
||||
|
||||
if len(patternPropertiesKeySlice) == 1 {
|
||||
return patternPropertiesKeySlice[0]
|
||||
}
|
||||
|
||||
return "[" + strings.Join(patternPropertiesKeySlice, ",") + "]"
|
||||
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Contains const types for schema and JSON.
|
||||
//
|
||||
// created 28-02-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
const (
|
||||
TYPE_ARRAY = `array`
|
||||
TYPE_BOOLEAN = `boolean`
|
||||
TYPE_INTEGER = `integer`
|
||||
TYPE_NUMBER = `number`
|
||||
TYPE_NULL = `null`
|
||||
TYPE_OBJECT = `object`
|
||||
TYPE_STRING = `string`
|
||||
)
|
||||
|
||||
var JSON_TYPES []string
|
||||
var SCHEMA_TYPES []string
|
||||
|
||||
func init() {
|
||||
JSON_TYPES = []string{
|
||||
TYPE_ARRAY,
|
||||
TYPE_BOOLEAN,
|
||||
TYPE_INTEGER,
|
||||
TYPE_NUMBER,
|
||||
TYPE_NULL,
|
||||
TYPE_OBJECT,
|
||||
TYPE_STRING}
|
||||
|
||||
SCHEMA_TYPES = []string{
|
||||
TYPE_ARRAY,
|
||||
TYPE_BOOLEAN,
|
||||
TYPE_INTEGER,
|
||||
TYPE_NUMBER,
|
||||
TYPE_OBJECT,
|
||||
TYPE_STRING}
|
||||
}
|
|
@ -0,0 +1,202 @@
|
|||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Various utility functions.
|
||||
//
|
||||
// created 26-02-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func isKind(what interface{}, kind reflect.Kind) bool {
|
||||
return reflect.ValueOf(what).Kind() == kind
|
||||
}
|
||||
|
||||
func existsMapKey(m map[string]interface{}, k string) bool {
|
||||
_, ok := m[k]
|
||||
return ok
|
||||
}
|
||||
|
||||
func isStringInSlice(s []string, what string) bool {
|
||||
for i := range s {
|
||||
if s[i] == what {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func marshalToJsonString(value interface{}) (*string, error) {
|
||||
|
||||
mBytes, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sBytes := string(mBytes)
|
||||
return &sBytes, nil
|
||||
}
|
||||
|
||||
func isJsonNumber(what interface{}) bool {
|
||||
|
||||
switch what.(type) {
|
||||
|
||||
case json.Number:
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func checkJsonNumber(what interface{}) (isValidFloat64 bool, isValidInt64 bool, isValidInt32 bool) {
|
||||
|
||||
jsonNumber := what.(json.Number)
|
||||
|
||||
_, errFloat64 := jsonNumber.Float64()
|
||||
_, errInt64 := jsonNumber.Int64()
|
||||
|
||||
isValidFloat64 = errFloat64 == nil
|
||||
isValidInt64 = errInt64 == nil
|
||||
|
||||
_, errInt32 := strconv.ParseInt(jsonNumber.String(), 10, 32)
|
||||
isValidInt32 = isValidInt64 && errInt32 == nil
|
||||
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER
|
||||
const (
|
||||
max_json_float = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1
|
||||
min_json_float = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1
|
||||
)
|
||||
|
||||
func isFloat64AnInteger(f float64) bool {
|
||||
|
||||
if math.IsNaN(f) || math.IsInf(f, 0) || f < min_json_float || f > max_json_float {
|
||||
return false
|
||||
}
|
||||
|
||||
return f == float64(int64(f)) || f == float64(uint64(f))
|
||||
}
|
||||
|
||||
func mustBeInteger(what interface{}) *int {
|
||||
|
||||
if isJsonNumber(what) {
|
||||
|
||||
number := what.(json.Number)
|
||||
|
||||
_, _, isValidInt32 := checkJsonNumber(number)
|
||||
|
||||
if isValidInt32 {
|
||||
|
||||
int64Value, err := number.Int64()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
int32Value := int(int64Value)
|
||||
return &int32Value
|
||||
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mustBeNumber(what interface{}) *float64 {
|
||||
|
||||
if isJsonNumber(what) {
|
||||
|
||||
number := what.(json.Number)
|
||||
float64Value, err := number.Float64()
|
||||
|
||||
if err == nil {
|
||||
return &float64Value
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// formats a number so that it is displayed as the smallest string possible
|
||||
func resultErrorFormatJsonNumber(n json.Number) string {
|
||||
|
||||
if int64Value, err := n.Int64(); err == nil {
|
||||
return fmt.Sprintf("%d", int64Value)
|
||||
}
|
||||
|
||||
float64Value, _ := n.Float64()
|
||||
|
||||
return fmt.Sprintf("%g", float64Value)
|
||||
}
|
||||
|
||||
// formats a number so that it is displayed as the smallest string possible
|
||||
func resultErrorFormatNumber(n float64) string {
|
||||
|
||||
if isFloat64AnInteger(n) {
|
||||
return fmt.Sprintf("%d", int64(n))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%g", n)
|
||||
}
|
||||
|
||||
func convertDocumentNode(val interface{}) interface{} {
|
||||
|
||||
if lval, ok := val.([]interface{}); ok {
|
||||
|
||||
res := []interface{}{}
|
||||
for _, v := range lval {
|
||||
res = append(res, convertDocumentNode(v))
|
||||
}
|
||||
|
||||
return res
|
||||
|
||||
}
|
||||
|
||||
if mval, ok := val.(map[interface{}]interface{}); ok {
|
||||
|
||||
res := map[string]interface{}{}
|
||||
|
||||
for k, v := range mval {
|
||||
res[k.(string)] = convertDocumentNode(v)
|
||||
}
|
||||
|
||||
return res
|
||||
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
|
@ -0,0 +1,829 @@
|
|||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Extends Schema and subSchema, implements the validation phase.
|
||||
//
|
||||
// created 28-02-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) {
|
||||
|
||||
var err error
|
||||
|
||||
// load schema
|
||||
|
||||
schema, err := NewSchema(ls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// begine validation
|
||||
|
||||
return schema.Validate(ld)
|
||||
|
||||
}
|
||||
|
||||
func (v *Schema) Validate(l JSONLoader) (*Result, error) {
|
||||
|
||||
// load document
|
||||
|
||||
root, err := l.loadJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// begin validation
|
||||
|
||||
result := &Result{}
|
||||
context := newJsonContext(STRING_CONTEXT_ROOT, nil)
|
||||
v.rootSchema.validateRecursive(v.rootSchema, root, result, context)
|
||||
|
||||
return result, nil
|
||||
|
||||
}
|
||||
|
||||
func (v *subSchema) subValidateWithContext(document interface{}, context *jsonContext) *Result {
|
||||
result := &Result{}
|
||||
v.validateRecursive(v, document, result, context)
|
||||
return result
|
||||
}
|
||||
|
||||
// Walker function to validate the json recursively against the subSchema
|
||||
func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("validateRecursive %s", context.String())
|
||||
internalLog(" %v", currentNode)
|
||||
}
|
||||
|
||||
// Handle referenced schemas, returns directly when a $ref is found
|
||||
if currentSubSchema.refSchema != nil {
|
||||
v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context)
|
||||
return
|
||||
}
|
||||
|
||||
// Check for null value
|
||||
if currentNode == nil {
|
||||
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) {
|
||||
result.addError(
|
||||
new(InvalidTypeError),
|
||||
context,
|
||||
currentNode,
|
||||
ErrorDetails{
|
||||
"expected": currentSubSchema.types.String(),
|
||||
"given": TYPE_NULL,
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context)
|
||||
v.validateCommon(currentSubSchema, currentNode, result, context)
|
||||
|
||||
} else { // Not a null value
|
||||
|
||||
if isJsonNumber(currentNode) {
|
||||
|
||||
value := currentNode.(json.Number)
|
||||
|
||||
_, isValidInt64, _ := checkJsonNumber(value)
|
||||
|
||||
validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isValidInt64 && currentSubSchema.types.Contains(TYPE_INTEGER))
|
||||
|
||||
if currentSubSchema.types.IsTyped() && !validType {
|
||||
|
||||
givenType := TYPE_INTEGER
|
||||
if !isValidInt64 {
|
||||
givenType = TYPE_NUMBER
|
||||
}
|
||||
|
||||
result.addError(
|
||||
new(InvalidTypeError),
|
||||
context,
|
||||
currentNode,
|
||||
ErrorDetails{
|
||||
"expected": currentSubSchema.types.String(),
|
||||
"given": givenType,
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
currentSubSchema.validateSchema(currentSubSchema, value, result, context)
|
||||
v.validateNumber(currentSubSchema, value, result, context)
|
||||
v.validateCommon(currentSubSchema, value, result, context)
|
||||
v.validateString(currentSubSchema, value, result, context)
|
||||
|
||||
} else {
|
||||
|
||||
rValue := reflect.ValueOf(currentNode)
|
||||
rKind := rValue.Kind()
|
||||
|
||||
switch rKind {
|
||||
|
||||
// Slice => JSON array
|
||||
|
||||
case reflect.Slice:
|
||||
|
||||
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) {
|
||||
result.addError(
|
||||
new(InvalidTypeError),
|
||||
context,
|
||||
currentNode,
|
||||
ErrorDetails{
|
||||
"expected": currentSubSchema.types.String(),
|
||||
"given": TYPE_ARRAY,
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
castCurrentNode := currentNode.([]interface{})
|
||||
|
||||
currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context)
|
||||
|
||||
v.validateArray(currentSubSchema, castCurrentNode, result, context)
|
||||
v.validateCommon(currentSubSchema, castCurrentNode, result, context)
|
||||
|
||||
// Map => JSON object
|
||||
|
||||
case reflect.Map:
|
||||
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) {
|
||||
result.addError(
|
||||
new(InvalidTypeError),
|
||||
context,
|
||||
currentNode,
|
||||
ErrorDetails{
|
||||
"expected": currentSubSchema.types.String(),
|
||||
"given": TYPE_OBJECT,
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
castCurrentNode, ok := currentNode.(map[string]interface{})
|
||||
if !ok {
|
||||
castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{})
|
||||
}
|
||||
|
||||
currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context)
|
||||
|
||||
v.validateObject(currentSubSchema, castCurrentNode, result, context)
|
||||
v.validateCommon(currentSubSchema, castCurrentNode, result, context)
|
||||
|
||||
for _, pSchema := range currentSubSchema.propertiesChildren {
|
||||
nextNode, ok := castCurrentNode[pSchema.property]
|
||||
if ok {
|
||||
subContext := newJsonContext(pSchema.property, context)
|
||||
v.validateRecursive(pSchema, nextNode, result, subContext)
|
||||
}
|
||||
}
|
||||
|
||||
// Simple JSON values : string, number, boolean
|
||||
|
||||
case reflect.Bool:
|
||||
|
||||
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) {
|
||||
result.addError(
|
||||
new(InvalidTypeError),
|
||||
context,
|
||||
currentNode,
|
||||
ErrorDetails{
|
||||
"expected": currentSubSchema.types.String(),
|
||||
"given": TYPE_BOOLEAN,
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
value := currentNode.(bool)
|
||||
|
||||
currentSubSchema.validateSchema(currentSubSchema, value, result, context)
|
||||
v.validateNumber(currentSubSchema, value, result, context)
|
||||
v.validateCommon(currentSubSchema, value, result, context)
|
||||
v.validateString(currentSubSchema, value, result, context)
|
||||
|
||||
case reflect.String:
|
||||
|
||||
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) {
|
||||
result.addError(
|
||||
new(InvalidTypeError),
|
||||
context,
|
||||
currentNode,
|
||||
ErrorDetails{
|
||||
"expected": currentSubSchema.types.String(),
|
||||
"given": TYPE_STRING,
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
value := currentNode.(string)
|
||||
|
||||
currentSubSchema.validateSchema(currentSubSchema, value, result, context)
|
||||
v.validateNumber(currentSubSchema, value, result, context)
|
||||
v.validateCommon(currentSubSchema, value, result, context)
|
||||
v.validateString(currentSubSchema, value, result, context)
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
result.incrementScore()
|
||||
}
|
||||
|
||||
// Different kinds of validation there, subSchema / common / array / object / string...
|
||||
func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("validateSchema %s", context.String())
|
||||
internalLog(" %v", currentNode)
|
||||
}
|
||||
|
||||
if len(currentSubSchema.anyOf) > 0 {
|
||||
|
||||
validatedAnyOf := false
|
||||
var bestValidationResult *Result
|
||||
|
||||
for _, anyOfSchema := range currentSubSchema.anyOf {
|
||||
if !validatedAnyOf {
|
||||
validationResult := anyOfSchema.subValidateWithContext(currentNode, context)
|
||||
validatedAnyOf = validationResult.Valid()
|
||||
|
||||
if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) {
|
||||
bestValidationResult = validationResult
|
||||
}
|
||||
}
|
||||
}
|
||||
if !validatedAnyOf {
|
||||
|
||||
result.addError(new(NumberAnyOfError), context, currentNode, ErrorDetails{})
|
||||
|
||||
if bestValidationResult != nil {
|
||||
// add error messages of closest matching subSchema as
|
||||
// that's probably the one the user was trying to match
|
||||
result.mergeErrors(bestValidationResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(currentSubSchema.oneOf) > 0 {
|
||||
|
||||
nbValidated := 0
|
||||
var bestValidationResult *Result
|
||||
|
||||
for _, oneOfSchema := range currentSubSchema.oneOf {
|
||||
validationResult := oneOfSchema.subValidateWithContext(currentNode, context)
|
||||
if validationResult.Valid() {
|
||||
nbValidated++
|
||||
} else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) {
|
||||
bestValidationResult = validationResult
|
||||
}
|
||||
}
|
||||
|
||||
if nbValidated != 1 {
|
||||
|
||||
result.addError(new(NumberOneOfError), context, currentNode, ErrorDetails{})
|
||||
|
||||
if nbValidated == 0 {
|
||||
// add error messages of closest matching subSchema as
|
||||
// that's probably the one the user was trying to match
|
||||
result.mergeErrors(bestValidationResult)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(currentSubSchema.allOf) > 0 {
|
||||
nbValidated := 0
|
||||
|
||||
for _, allOfSchema := range currentSubSchema.allOf {
|
||||
validationResult := allOfSchema.subValidateWithContext(currentNode, context)
|
||||
if validationResult.Valid() {
|
||||
nbValidated++
|
||||
}
|
||||
result.mergeErrors(validationResult)
|
||||
}
|
||||
|
||||
if nbValidated != len(currentSubSchema.allOf) {
|
||||
result.addError(new(NumberAllOfError), context, currentNode, ErrorDetails{})
|
||||
}
|
||||
}
|
||||
|
||||
if currentSubSchema.not != nil {
|
||||
validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context)
|
||||
if validationResult.Valid() {
|
||||
result.addError(new(NumberNotError), context, currentNode, ErrorDetails{})
|
||||
}
|
||||
}
|
||||
|
||||
if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 {
|
||||
if isKind(currentNode, reflect.Map) {
|
||||
for elementKey := range currentNode.(map[string]interface{}) {
|
||||
if dependency, ok := currentSubSchema.dependencies[elementKey]; ok {
|
||||
switch dependency := dependency.(type) {
|
||||
|
||||
case []string:
|
||||
for _, dependOnKey := range dependency {
|
||||
if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved {
|
||||
result.addError(
|
||||
new(MissingDependencyError),
|
||||
context,
|
||||
currentNode,
|
||||
ErrorDetails{"dependency": dependOnKey},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
case *subSchema:
|
||||
dependency.validateRecursive(dependency, currentNode, result, context)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result.incrementScore()
|
||||
}
|
||||
|
||||
func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("validateCommon %s", context.String())
|
||||
internalLog(" %v", value)
|
||||
}
|
||||
|
||||
// enum:
|
||||
if len(currentSubSchema.enum) > 0 {
|
||||
has, err := currentSubSchema.ContainsEnum(value)
|
||||
if err != nil {
|
||||
result.addError(new(InternalError), context, value, ErrorDetails{"error": err})
|
||||
}
|
||||
if !has {
|
||||
result.addError(
|
||||
new(EnumError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{
|
||||
"allowed": strings.Join(currentSubSchema.enum, ", "),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
result.incrementScore()
|
||||
}
|
||||
|
||||
func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *jsonContext) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("validateArray %s", context.String())
|
||||
internalLog(" %v", value)
|
||||
}
|
||||
|
||||
nbItems := len(value)
|
||||
|
||||
// TODO explain
|
||||
if currentSubSchema.itemsChildrenIsSingleSchema {
|
||||
for i := range value {
|
||||
subContext := newJsonContext(strconv.Itoa(i), context)
|
||||
validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext)
|
||||
result.mergeErrors(validationResult)
|
||||
}
|
||||
} else {
|
||||
if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 {
|
||||
|
||||
nbItems := len(currentSubSchema.itemsChildren)
|
||||
nbValues := len(value)
|
||||
|
||||
if nbItems == nbValues {
|
||||
for i := 0; i != nbItems; i++ {
|
||||
subContext := newJsonContext(strconv.Itoa(i), context)
|
||||
validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext)
|
||||
result.mergeErrors(validationResult)
|
||||
}
|
||||
} else if nbItems < nbValues {
|
||||
switch currentSubSchema.additionalItems.(type) {
|
||||
case bool:
|
||||
if !currentSubSchema.additionalItems.(bool) {
|
||||
result.addError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{})
|
||||
}
|
||||
case *subSchema:
|
||||
additionalItemSchema := currentSubSchema.additionalItems.(*subSchema)
|
||||
for i := nbItems; i != nbValues; i++ {
|
||||
subContext := newJsonContext(strconv.Itoa(i), context)
|
||||
validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext)
|
||||
result.mergeErrors(validationResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// minItems & maxItems
|
||||
if currentSubSchema.minItems != nil {
|
||||
if nbItems < int(*currentSubSchema.minItems) {
|
||||
result.addError(
|
||||
new(ArrayMinItemsError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"min": *currentSubSchema.minItems},
|
||||
)
|
||||
}
|
||||
}
|
||||
if currentSubSchema.maxItems != nil {
|
||||
if nbItems > int(*currentSubSchema.maxItems) {
|
||||
result.addError(
|
||||
new(ArrayMaxItemsError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"max": *currentSubSchema.maxItems},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// uniqueItems:
|
||||
if currentSubSchema.uniqueItems {
|
||||
var stringifiedItems []string
|
||||
for _, v := range value {
|
||||
vString, err := marshalToJsonString(v)
|
||||
if err != nil {
|
||||
result.addError(new(InternalError), context, value, ErrorDetails{"err": err})
|
||||
}
|
||||
if isStringInSlice(stringifiedItems, *vString) {
|
||||
result.addError(
|
||||
new(ItemsMustBeUniqueError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"type": TYPE_ARRAY},
|
||||
)
|
||||
}
|
||||
stringifiedItems = append(stringifiedItems, *vString)
|
||||
}
|
||||
}
|
||||
|
||||
result.incrementScore()
|
||||
}
|
||||
|
||||
func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *jsonContext) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("validateObject %s", context.String())
|
||||
internalLog(" %v", value)
|
||||
}
|
||||
|
||||
// minProperties & maxProperties:
|
||||
if currentSubSchema.minProperties != nil {
|
||||
if len(value) < int(*currentSubSchema.minProperties) {
|
||||
result.addError(
|
||||
new(ArrayMinPropertiesError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"min": *currentSubSchema.minProperties},
|
||||
)
|
||||
}
|
||||
}
|
||||
if currentSubSchema.maxProperties != nil {
|
||||
if len(value) > int(*currentSubSchema.maxProperties) {
|
||||
result.addError(
|
||||
new(ArrayMaxPropertiesError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"max": *currentSubSchema.maxProperties},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// required:
|
||||
for _, requiredProperty := range currentSubSchema.required {
|
||||
_, ok := value[requiredProperty]
|
||||
if ok {
|
||||
result.incrementScore()
|
||||
} else {
|
||||
result.addError(
|
||||
new(RequiredError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"property": requiredProperty},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// additionalProperty & patternProperty:
|
||||
if currentSubSchema.additionalProperties != nil {
|
||||
|
||||
switch currentSubSchema.additionalProperties.(type) {
|
||||
case bool:
|
||||
|
||||
if !currentSubSchema.additionalProperties.(bool) {
|
||||
|
||||
for pk := range value {
|
||||
|
||||
found := false
|
||||
for _, spValue := range currentSubSchema.propertiesChildren {
|
||||
if pk == spValue.property {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context)
|
||||
|
||||
if found {
|
||||
|
||||
if pp_has && !pp_match {
|
||||
result.addError(
|
||||
new(AdditionalPropertyNotAllowedError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"property": pk},
|
||||
)
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
if !pp_has || !pp_match {
|
||||
result.addError(
|
||||
new(AdditionalPropertyNotAllowedError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"property": pk},
|
||||
)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case *subSchema:
|
||||
|
||||
additionalPropertiesSchema := currentSubSchema.additionalProperties.(*subSchema)
|
||||
for pk := range value {
|
||||
|
||||
found := false
|
||||
for _, spValue := range currentSubSchema.propertiesChildren {
|
||||
if pk == spValue.property {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context)
|
||||
|
||||
if found {
|
||||
|
||||
if pp_has && !pp_match {
|
||||
validationResult := additionalPropertiesSchema.subValidateWithContext(value[pk], context)
|
||||
result.mergeErrors(validationResult)
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
if !pp_has || !pp_match {
|
||||
validationResult := additionalPropertiesSchema.subValidateWithContext(value[pk], context)
|
||||
result.mergeErrors(validationResult)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
for pk := range value {
|
||||
|
||||
pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context)
|
||||
|
||||
if pp_has && !pp_match {
|
||||
|
||||
result.addError(
|
||||
new(InvalidPropertyPatternError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{
|
||||
"property": pk,
|
||||
"pattern": currentSubSchema.PatternPropertiesString(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
result.incrementScore()
|
||||
}
|
||||
|
||||
func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *jsonContext) (has bool, matched bool) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("validatePatternProperty %s", context.String())
|
||||
internalLog(" %s %v", key, value)
|
||||
}
|
||||
|
||||
has = false
|
||||
|
||||
validatedkey := false
|
||||
|
||||
for pk, pv := range currentSubSchema.patternProperties {
|
||||
if matches, _ := regexp.MatchString(pk, key); matches {
|
||||
has = true
|
||||
subContext := newJsonContext(key, context)
|
||||
validationResult := pv.subValidateWithContext(value, subContext)
|
||||
result.mergeErrors(validationResult)
|
||||
if validationResult.Valid() {
|
||||
validatedkey = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !validatedkey {
|
||||
return has, false
|
||||
}
|
||||
|
||||
result.incrementScore()
|
||||
|
||||
return has, true
|
||||
}
|
||||
|
||||
func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) {
|
||||
|
||||
// Ignore JSON numbers
|
||||
if isJsonNumber(value) {
|
||||
return
|
||||
}
|
||||
|
||||
// Ignore non strings
|
||||
if !isKind(value, reflect.String) {
|
||||
return
|
||||
}
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("validateString %s", context.String())
|
||||
internalLog(" %v", value)
|
||||
}
|
||||
|
||||
stringValue := value.(string)
|
||||
|
||||
// minLength & maxLength:
|
||||
if currentSubSchema.minLength != nil {
|
||||
if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) {
|
||||
result.addError(
|
||||
new(StringLengthGTEError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"min": *currentSubSchema.minLength},
|
||||
)
|
||||
}
|
||||
}
|
||||
if currentSubSchema.maxLength != nil {
|
||||
if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) {
|
||||
result.addError(
|
||||
new(StringLengthLTEError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"max": *currentSubSchema.maxLength},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// pattern:
|
||||
if currentSubSchema.pattern != nil {
|
||||
if !currentSubSchema.pattern.MatchString(stringValue) {
|
||||
result.addError(
|
||||
new(DoesNotMatchPatternError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"pattern": currentSubSchema.pattern},
|
||||
)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// format
|
||||
if currentSubSchema.format != "" {
|
||||
if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) {
|
||||
result.addError(
|
||||
new(DoesNotMatchFormatError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"format": currentSubSchema.format},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
result.incrementScore()
|
||||
}
|
||||
|
||||
func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) {
|
||||
|
||||
// Ignore non numbers
|
||||
if !isJsonNumber(value) {
|
||||
return
|
||||
}
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("validateNumber %s", context.String())
|
||||
internalLog(" %v", value)
|
||||
}
|
||||
|
||||
number := value.(json.Number)
|
||||
float64Value, _ := number.Float64()
|
||||
|
||||
// multipleOf:
|
||||
if currentSubSchema.multipleOf != nil {
|
||||
|
||||
if !isFloat64AnInteger(float64Value / *currentSubSchema.multipleOf) {
|
||||
result.addError(
|
||||
new(MultipleOfError),
|
||||
context,
|
||||
resultErrorFormatJsonNumber(number),
|
||||
ErrorDetails{"multiple": *currentSubSchema.multipleOf},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
//maximum & exclusiveMaximum:
|
||||
if currentSubSchema.maximum != nil {
|
||||
if currentSubSchema.exclusiveMaximum {
|
||||
if float64Value >= *currentSubSchema.maximum {
|
||||
result.addError(
|
||||
new(NumberLTError),
|
||||
context,
|
||||
resultErrorFormatJsonNumber(number),
|
||||
ErrorDetails{
|
||||
"max": resultErrorFormatNumber(*currentSubSchema.maximum),
|
||||
},
|
||||
)
|
||||
}
|
||||
} else {
|
||||
if float64Value > *currentSubSchema.maximum {
|
||||
result.addError(
|
||||
new(NumberLTEError),
|
||||
context,
|
||||
resultErrorFormatJsonNumber(number),
|
||||
ErrorDetails{
|
||||
"max": resultErrorFormatNumber(*currentSubSchema.maximum),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//minimum & exclusiveMinimum:
|
||||
if currentSubSchema.minimum != nil {
|
||||
if currentSubSchema.exclusiveMinimum {
|
||||
if float64Value <= *currentSubSchema.minimum {
|
||||
result.addError(
|
||||
new(NumberGTError),
|
||||
context,
|
||||
resultErrorFormatJsonNumber(number),
|
||||
ErrorDetails{
|
||||
"min": resultErrorFormatNumber(*currentSubSchema.minimum),
|
||||
},
|
||||
)
|
||||
}
|
||||
} else {
|
||||
if float64Value < *currentSubSchema.minimum {
|
||||
result.addError(
|
||||
new(NumberGTEError),
|
||||
context,
|
||||
resultErrorFormatJsonNumber(number),
|
||||
ErrorDetails{
|
||||
"min": resultErrorFormatNumber(*currentSubSchema.minimum),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result.incrementScore()
|
||||
}
|
|
@ -0,0 +1,188 @@
|
|||
|
||||
Copyright (c) 2011-2014 - Canonical Inc.
|
||||
|
||||
This software is licensed under the LGPLv3, included below.
|
||||
|
||||
As a special exception to the GNU Lesser General Public License version 3
|
||||
("LGPL3"), the copyright holders of this Library give you permission to
|
||||
convey to a third party a Combined Work that links statically or dynamically
|
||||
to this Library without providing any Minimal Corresponding Source or
|
||||
Minimal Application Code as set out in 4d or providing the installation
|
||||
information set out in section 4e, provided that you comply with the other
|
||||
provisions of LGPL3 and provided that you meet, for the Application the
|
||||
terms and conditions of the license(s) which apply to the Application.
|
||||
|
||||
Except as stated in this special exception, the provisions of LGPL3 will
|
||||
continue to comply in full to this Library. If you modify this Library, you
|
||||
may apply this exception to your version of this Library, but you are not
|
||||
obliged to do so. If you do not wish to do so, delete this exception
|
||||
statement from your version. This exception does not (and cannot) modify any
|
||||
license terms which apply to the Application, with which you must still
|
||||
comply.
|
||||
|
||||
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
|
@ -0,0 +1,742 @@
|
|||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
|
||||
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
|
||||
|
||||
// Check if we can move the queue at the beginning of the buffer.
|
||||
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
|
||||
if parser.tokens_head != len(parser.tokens) {
|
||||
copy(parser.tokens, parser.tokens[parser.tokens_head:])
|
||||
}
|
||||
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
|
||||
parser.tokens_head = 0
|
||||
}
|
||||
parser.tokens = append(parser.tokens, *token)
|
||||
if pos < 0 {
|
||||
return
|
||||
}
|
||||
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
|
||||
parser.tokens[parser.tokens_head+pos] = *token
|
||||
}
|
||||
|
||||
// Create a new parser object.
|
||||
func yaml_parser_initialize(parser *yaml_parser_t) bool {
|
||||
*parser = yaml_parser_t{
|
||||
raw_buffer: make([]byte, 0, input_raw_buffer_size),
|
||||
buffer: make([]byte, 0, input_buffer_size),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Destroy a parser object.
|
||||
func yaml_parser_delete(parser *yaml_parser_t) {
|
||||
*parser = yaml_parser_t{}
|
||||
}
|
||||
|
||||
// String read handler.
|
||||
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
if parser.input_pos == len(parser.input) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n = copy(buffer, parser.input[parser.input_pos:])
|
||||
parser.input_pos += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// File read handler.
|
||||
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
return parser.input_file.Read(buffer)
|
||||
}
|
||||
|
||||
// Set a string input.
|
||||
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_string_read_handler
|
||||
parser.input = input
|
||||
parser.input_pos = 0
|
||||
}
|
||||
|
||||
// Set a file input.
|
||||
func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_file_read_handler
|
||||
parser.input_file = file
|
||||
}
|
||||
|
||||
// Set the source encoding.
|
||||
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
|
||||
if parser.encoding != yaml_ANY_ENCODING {
|
||||
panic("must set the encoding only once")
|
||||
}
|
||||
parser.encoding = encoding
|
||||
}
|
||||
|
||||
// Create a new emitter object.
|
||||
func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
|
||||
*emitter = yaml_emitter_t{
|
||||
buffer: make([]byte, output_buffer_size),
|
||||
raw_buffer: make([]byte, 0, output_raw_buffer_size),
|
||||
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
|
||||
events: make([]yaml_event_t, 0, initial_queue_size),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Destroy an emitter object.
|
||||
func yaml_emitter_delete(emitter *yaml_emitter_t) {
|
||||
*emitter = yaml_emitter_t{}
|
||||
}
|
||||
|
||||
// String write handler.
|
||||
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// File write handler.
|
||||
func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
_, err := emitter.output_file.Write(buffer)
|
||||
return err
|
||||
}
|
||||
|
||||
// Set a string output.
|
||||
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_string_write_handler
|
||||
emitter.output_buffer = output_buffer
|
||||
}
|
||||
|
||||
// Set a file output.
|
||||
func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_file_write_handler
|
||||
emitter.output_file = file
|
||||
}
|
||||
|
||||
// Set the output encoding.
|
||||
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
|
||||
if emitter.encoding != yaml_ANY_ENCODING {
|
||||
panic("must set the output encoding only once")
|
||||
}
|
||||
emitter.encoding = encoding
|
||||
}
|
||||
|
||||
// Set the canonical output style.
|
||||
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
|
||||
emitter.canonical = canonical
|
||||
}
|
||||
|
||||
//// Set the indentation increment.
|
||||
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
|
||||
if indent < 2 || indent > 9 {
|
||||
indent = 2
|
||||
}
|
||||
emitter.best_indent = indent
|
||||
}
|
||||
|
||||
// Set the preferred line width.
|
||||
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
|
||||
if width < 0 {
|
||||
width = -1
|
||||
}
|
||||
emitter.best_width = width
|
||||
}
|
||||
|
||||
// Set if unescaped non-ASCII characters are allowed.
|
||||
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
|
||||
emitter.unicode = unicode
|
||||
}
|
||||
|
||||
// Set the preferred line break character.
|
||||
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
|
||||
emitter.line_break = line_break
|
||||
}
|
||||
|
||||
///*
|
||||
// * Destroy a token object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(void)
|
||||
//yaml_token_delete(yaml_token_t *token)
|
||||
//{
|
||||
// assert(token); // Non-NULL token object expected.
|
||||
//
|
||||
// switch (token.type)
|
||||
// {
|
||||
// case YAML_TAG_DIRECTIVE_TOKEN:
|
||||
// yaml_free(token.data.tag_directive.handle);
|
||||
// yaml_free(token.data.tag_directive.prefix);
|
||||
// break;
|
||||
//
|
||||
// case YAML_ALIAS_TOKEN:
|
||||
// yaml_free(token.data.alias.value);
|
||||
// break;
|
||||
//
|
||||
// case YAML_ANCHOR_TOKEN:
|
||||
// yaml_free(token.data.anchor.value);
|
||||
// break;
|
||||
//
|
||||
// case YAML_TAG_TOKEN:
|
||||
// yaml_free(token.data.tag.handle);
|
||||
// yaml_free(token.data.tag.suffix);
|
||||
// break;
|
||||
//
|
||||
// case YAML_SCALAR_TOKEN:
|
||||
// yaml_free(token.data.scalar.value);
|
||||
// break;
|
||||
//
|
||||
// default:
|
||||
// break;
|
||||
// }
|
||||
//
|
||||
// memset(token, 0, sizeof(yaml_token_t));
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Check if a string is a valid UTF-8 sequence.
|
||||
// *
|
||||
// * Check 'reader.c' for more details on UTF-8 encoding.
|
||||
// */
|
||||
//
|
||||
//static int
|
||||
//yaml_check_utf8(yaml_char_t *start, size_t length)
|
||||
//{
|
||||
// yaml_char_t *end = start+length;
|
||||
// yaml_char_t *pointer = start;
|
||||
//
|
||||
// while (pointer < end) {
|
||||
// unsigned char octet;
|
||||
// unsigned int width;
|
||||
// unsigned int value;
|
||||
// size_t k;
|
||||
//
|
||||
// octet = pointer[0];
|
||||
// width = (octet & 0x80) == 0x00 ? 1 :
|
||||
// (octet & 0xE0) == 0xC0 ? 2 :
|
||||
// (octet & 0xF0) == 0xE0 ? 3 :
|
||||
// (octet & 0xF8) == 0xF0 ? 4 : 0;
|
||||
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
|
||||
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
|
||||
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
|
||||
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
|
||||
// if (!width) return 0;
|
||||
// if (pointer+width > end) return 0;
|
||||
// for (k = 1; k < width; k ++) {
|
||||
// octet = pointer[k];
|
||||
// if ((octet & 0xC0) != 0x80) return 0;
|
||||
// value = (value << 6) + (octet & 0x3F);
|
||||
// }
|
||||
// if (!((width == 1) ||
|
||||
// (width == 2 && value >= 0x80) ||
|
||||
// (width == 3 && value >= 0x800) ||
|
||||
// (width == 4 && value >= 0x10000))) return 0;
|
||||
//
|
||||
// pointer += width;
|
||||
// }
|
||||
//
|
||||
// return 1;
|
||||
//}
|
||||
//
|
||||
|
||||
// Create STREAM-START.
|
||||
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_START_EVENT,
|
||||
encoding: encoding,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create STREAM-END.
|
||||
func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_END_EVENT,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create DOCUMENT-START.
|
||||
func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
|
||||
tag_directives []yaml_tag_directive_t, implicit bool) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_START_EVENT,
|
||||
version_directive: version_directive,
|
||||
tag_directives: tag_directives,
|
||||
implicit: implicit,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create DOCUMENT-END.
|
||||
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_END_EVENT,
|
||||
implicit: implicit,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
///*
|
||||
// * Create ALIAS.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
|
||||
//{
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// anchor_copy *yaml_char_t = NULL
|
||||
//
|
||||
// assert(event) // Non-NULL event object is expected.
|
||||
// assert(anchor) // Non-NULL anchor is expected.
|
||||
//
|
||||
// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
|
||||
//
|
||||
// anchor_copy = yaml_strdup(anchor)
|
||||
// if (!anchor_copy)
|
||||
// return 0
|
||||
//
|
||||
// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
|
||||
// Create SCALAR.
|
||||
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SCALAR_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
value: value,
|
||||
implicit: plain_implicit,
|
||||
quoted_implicit: quoted_implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SEQUENCE-START.
|
||||
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SEQUENCE_START_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SEQUENCE-END.
|
||||
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SEQUENCE_END_EVENT,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create MAPPING-START.
|
||||
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_START_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create MAPPING-END.
|
||||
func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_END_EVENT,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Destroy an event object.
|
||||
func yaml_event_delete(event *yaml_event_t) {
|
||||
*event = yaml_event_t{}
|
||||
}
|
||||
|
||||
///*
|
||||
// * Create a document object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_initialize(document *yaml_document_t,
|
||||
// version_directive *yaml_version_directive_t,
|
||||
// tag_directives_start *yaml_tag_directive_t,
|
||||
// tag_directives_end *yaml_tag_directive_t,
|
||||
// start_implicit int, end_implicit int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// struct {
|
||||
// start *yaml_node_t
|
||||
// end *yaml_node_t
|
||||
// top *yaml_node_t
|
||||
// } nodes = { NULL, NULL, NULL }
|
||||
// version_directive_copy *yaml_version_directive_t = NULL
|
||||
// struct {
|
||||
// start *yaml_tag_directive_t
|
||||
// end *yaml_tag_directive_t
|
||||
// top *yaml_tag_directive_t
|
||||
// } tag_directives_copy = { NULL, NULL, NULL }
|
||||
// value yaml_tag_directive_t = { NULL, NULL }
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
// assert((tag_directives_start && tag_directives_end) ||
|
||||
// (tag_directives_start == tag_directives_end))
|
||||
// // Valid tag directives are expected.
|
||||
//
|
||||
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// if (version_directive) {
|
||||
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
|
||||
// if (!version_directive_copy) goto error
|
||||
// version_directive_copy.major = version_directive.major
|
||||
// version_directive_copy.minor = version_directive.minor
|
||||
// }
|
||||
//
|
||||
// if (tag_directives_start != tag_directives_end) {
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
|
||||
// goto error
|
||||
// for (tag_directive = tag_directives_start
|
||||
// tag_directive != tag_directives_end; tag_directive ++) {
|
||||
// assert(tag_directive.handle)
|
||||
// assert(tag_directive.prefix)
|
||||
// if (!yaml_check_utf8(tag_directive.handle,
|
||||
// strlen((char *)tag_directive.handle)))
|
||||
// goto error
|
||||
// if (!yaml_check_utf8(tag_directive.prefix,
|
||||
// strlen((char *)tag_directive.prefix)))
|
||||
// goto error
|
||||
// value.handle = yaml_strdup(tag_directive.handle)
|
||||
// value.prefix = yaml_strdup(tag_directive.prefix)
|
||||
// if (!value.handle || !value.prefix) goto error
|
||||
// if (!PUSH(&context, tag_directives_copy, value))
|
||||
// goto error
|
||||
// value.handle = NULL
|
||||
// value.prefix = NULL
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
|
||||
// tag_directives_copy.start, tag_directives_copy.top,
|
||||
// start_implicit, end_implicit, mark, mark)
|
||||
//
|
||||
// return 1
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, nodes)
|
||||
// yaml_free(version_directive_copy)
|
||||
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
|
||||
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
|
||||
// yaml_free(value.handle)
|
||||
// yaml_free(value.prefix)
|
||||
// }
|
||||
// STACK_DEL(&context, tag_directives_copy)
|
||||
// yaml_free(value.handle)
|
||||
// yaml_free(value.prefix)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Destroy a document object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(void)
|
||||
//yaml_document_delete(document *yaml_document_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
//
|
||||
// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// while (!STACK_EMPTY(&context, document.nodes)) {
|
||||
// node yaml_node_t = POP(&context, document.nodes)
|
||||
// yaml_free(node.tag)
|
||||
// switch (node.type) {
|
||||
// case YAML_SCALAR_NODE:
|
||||
// yaml_free(node.data.scalar.value)
|
||||
// break
|
||||
// case YAML_SEQUENCE_NODE:
|
||||
// STACK_DEL(&context, node.data.sequence.items)
|
||||
// break
|
||||
// case YAML_MAPPING_NODE:
|
||||
// STACK_DEL(&context, node.data.mapping.pairs)
|
||||
// break
|
||||
// default:
|
||||
// assert(0) // Should not happen.
|
||||
// }
|
||||
// }
|
||||
// STACK_DEL(&context, document.nodes)
|
||||
//
|
||||
// yaml_free(document.version_directive)
|
||||
// for (tag_directive = document.tag_directives.start
|
||||
// tag_directive != document.tag_directives.end
|
||||
// tag_directive++) {
|
||||
// yaml_free(tag_directive.handle)
|
||||
// yaml_free(tag_directive.prefix)
|
||||
// }
|
||||
// yaml_free(document.tag_directives.start)
|
||||
//
|
||||
// memset(document, 0, sizeof(yaml_document_t))
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Get a document node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(yaml_node_t *)
|
||||
//yaml_document_get_node(document *yaml_document_t, index int)
|
||||
//{
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
|
||||
// return document.nodes.start + index - 1
|
||||
// }
|
||||
// return NULL
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Get the root object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(yaml_node_t *)
|
||||
//yaml_document_get_root_node(document *yaml_document_t)
|
||||
//{
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (document.nodes.top != document.nodes.start) {
|
||||
// return document.nodes.start
|
||||
// }
|
||||
// return NULL
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a scalar node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_scalar(document *yaml_document_t,
|
||||
// tag *yaml_char_t, value *yaml_char_t, length int,
|
||||
// style yaml_scalar_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// value_copy *yaml_char_t = NULL
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
// assert(value) // Non-NULL value is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (length < 0) {
|
||||
// length = strlen((char *)value)
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(value, length)) goto error
|
||||
// value_copy = yaml_malloc(length+1)
|
||||
// if (!value_copy) goto error
|
||||
// memcpy(value_copy, value, length)
|
||||
// value_copy[length] = '\0'
|
||||
//
|
||||
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// yaml_free(tag_copy)
|
||||
// yaml_free(value_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a sequence node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_sequence(document *yaml_document_t,
|
||||
// tag *yaml_char_t, style yaml_sequence_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// struct {
|
||||
// start *yaml_node_item_t
|
||||
// end *yaml_node_item_t
|
||||
// top *yaml_node_item_t
|
||||
// } items = { NULL, NULL, NULL }
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
|
||||
// style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, items)
|
||||
// yaml_free(tag_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a mapping node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_mapping(document *yaml_document_t,
|
||||
// tag *yaml_char_t, style yaml_mapping_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// struct {
|
||||
// start *yaml_node_pair_t
|
||||
// end *yaml_node_pair_t
|
||||
// top *yaml_node_pair_t
|
||||
// } pairs = { NULL, NULL, NULL }
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
|
||||
// style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, pairs)
|
||||
// yaml_free(tag_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Append an item to a sequence node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_append_sequence_item(document *yaml_document_t,
|
||||
// sequence int, item int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
//
|
||||
// assert(document) // Non-NULL document is required.
|
||||
// assert(sequence > 0
|
||||
// && document.nodes.start + sequence <= document.nodes.top)
|
||||
// // Valid sequence id is required.
|
||||
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
|
||||
// // A sequence node is required.
|
||||
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
|
||||
// // Valid item id is required.
|
||||
//
|
||||
// if (!PUSH(&context,
|
||||
// document.nodes.start[sequence-1].data.sequence.items, item))
|
||||
// return 0
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Append a pair of a key and a value to a mapping node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_append_mapping_pair(document *yaml_document_t,
|
||||
// mapping int, key int, value int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
//
|
||||
// pair yaml_node_pair_t
|
||||
//
|
||||
// assert(document) // Non-NULL document is required.
|
||||
// assert(mapping > 0
|
||||
// && document.nodes.start + mapping <= document.nodes.top)
|
||||
// // Valid mapping id is required.
|
||||
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
|
||||
// // A mapping node is required.
|
||||
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
|
||||
// // Valid key id is required.
|
||||
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
|
||||
// // Valid value id is required.
|
||||
//
|
||||
// pair.key = key
|
||||
// pair.value = value
|
||||
//
|
||||
// if (!PUSH(&context,
|
||||
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
|
||||
// return 0
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
//
|
||||
//
|
|
@ -0,0 +1,683 @@
|
|||
package yaml
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
documentNode = 1 << iota
|
||||
mappingNode
|
||||
sequenceNode
|
||||
scalarNode
|
||||
aliasNode
|
||||
)
|
||||
|
||||
type node struct {
|
||||
kind int
|
||||
line, column int
|
||||
tag string
|
||||
value string
|
||||
implicit bool
|
||||
children []*node
|
||||
anchors map[string]*node
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parser, produces a node tree out of a libyaml event stream.
|
||||
|
||||
type parser struct {
|
||||
parser yaml_parser_t
|
||||
event yaml_event_t
|
||||
doc *node
|
||||
}
|
||||
|
||||
func newParser(b []byte) *parser {
|
||||
p := parser{}
|
||||
if !yaml_parser_initialize(&p.parser) {
|
||||
panic("failed to initialize YAML emitter")
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
b = []byte{'\n'}
|
||||
}
|
||||
|
||||
yaml_parser_set_input_string(&p.parser, b)
|
||||
|
||||
p.skip()
|
||||
if p.event.typ != yaml_STREAM_START_EVENT {
|
||||
panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
|
||||
}
|
||||
p.skip()
|
||||
return &p
|
||||
}
|
||||
|
||||
func (p *parser) destroy() {
|
||||
if p.event.typ != yaml_NO_EVENT {
|
||||
yaml_event_delete(&p.event)
|
||||
}
|
||||
yaml_parser_delete(&p.parser)
|
||||
}
|
||||
|
||||
func (p *parser) skip() {
|
||||
if p.event.typ != yaml_NO_EVENT {
|
||||
if p.event.typ == yaml_STREAM_END_EVENT {
|
||||
failf("attempted to go past the end of stream; corrupted value?")
|
||||
}
|
||||
yaml_event_delete(&p.event)
|
||||
}
|
||||
if !yaml_parser_parse(&p.parser, &p.event) {
|
||||
p.fail()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) fail() {
|
||||
var where string
|
||||
var line int
|
||||
if p.parser.problem_mark.line != 0 {
|
||||
line = p.parser.problem_mark.line
|
||||
} else if p.parser.context_mark.line != 0 {
|
||||
line = p.parser.context_mark.line
|
||||
}
|
||||
if line != 0 {
|
||||
where = "line " + strconv.Itoa(line) + ": "
|
||||
}
|
||||
var msg string
|
||||
if len(p.parser.problem) > 0 {
|
||||
msg = p.parser.problem
|
||||
} else {
|
||||
msg = "unknown problem parsing YAML content"
|
||||
}
|
||||
failf("%s%s", where, msg)
|
||||
}
|
||||
|
||||
func (p *parser) anchor(n *node, anchor []byte) {
|
||||
if anchor != nil {
|
||||
p.doc.anchors[string(anchor)] = n
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) parse() *node {
|
||||
switch p.event.typ {
|
||||
case yaml_SCALAR_EVENT:
|
||||
return p.scalar()
|
||||
case yaml_ALIAS_EVENT:
|
||||
return p.alias()
|
||||
case yaml_MAPPING_START_EVENT:
|
||||
return p.mapping()
|
||||
case yaml_SEQUENCE_START_EVENT:
|
||||
return p.sequence()
|
||||
case yaml_DOCUMENT_START_EVENT:
|
||||
return p.document()
|
||||
case yaml_STREAM_END_EVENT:
|
||||
// Happens when attempting to decode an empty buffer.
|
||||
return nil
|
||||
default:
|
||||
panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (p *parser) node(kind int) *node {
|
||||
return &node{
|
||||
kind: kind,
|
||||
line: p.event.start_mark.line,
|
||||
column: p.event.start_mark.column,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) document() *node {
|
||||
n := p.node(documentNode)
|
||||
n.anchors = make(map[string]*node)
|
||||
p.doc = n
|
||||
p.skip()
|
||||
n.children = append(n.children, p.parse())
|
||||
if p.event.typ != yaml_DOCUMENT_END_EVENT {
|
||||
panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
|
||||
}
|
||||
p.skip()
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) alias() *node {
|
||||
n := p.node(aliasNode)
|
||||
n.value = string(p.event.anchor)
|
||||
p.skip()
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) scalar() *node {
|
||||
n := p.node(scalarNode)
|
||||
n.value = string(p.event.value)
|
||||
n.tag = string(p.event.tag)
|
||||
n.implicit = p.event.implicit
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.skip()
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) sequence() *node {
|
||||
n := p.node(sequenceNode)
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.skip()
|
||||
for p.event.typ != yaml_SEQUENCE_END_EVENT {
|
||||
n.children = append(n.children, p.parse())
|
||||
}
|
||||
p.skip()
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) mapping() *node {
|
||||
n := p.node(mappingNode)
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.skip()
|
||||
for p.event.typ != yaml_MAPPING_END_EVENT {
|
||||
n.children = append(n.children, p.parse(), p.parse())
|
||||
}
|
||||
p.skip()
|
||||
return n
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Decoder, unmarshals a node into a provided value.
|
||||
|
||||
type decoder struct {
|
||||
doc *node
|
||||
aliases map[string]bool
|
||||
mapType reflect.Type
|
||||
terrors []string
|
||||
}
|
||||
|
||||
var (
|
||||
mapItemType = reflect.TypeOf(MapItem{})
|
||||
durationType = reflect.TypeOf(time.Duration(0))
|
||||
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
|
||||
ifaceType = defaultMapType.Elem()
|
||||
)
|
||||
|
||||
func newDecoder() *decoder {
|
||||
d := &decoder{mapType: defaultMapType}
|
||||
d.aliases = make(map[string]bool)
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *decoder) terror(n *node, tag string, out reflect.Value) {
|
||||
if n.tag != "" {
|
||||
tag = n.tag
|
||||
}
|
||||
value := n.value
|
||||
if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
|
||||
if len(value) > 10 {
|
||||
value = " `" + value[:7] + "...`"
|
||||
} else {
|
||||
value = " `" + value + "`"
|
||||
}
|
||||
}
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
|
||||
}
|
||||
|
||||
func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
|
||||
terrlen := len(d.terrors)
|
||||
err := u.UnmarshalYAML(func(v interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
d.unmarshal(n, reflect.ValueOf(v))
|
||||
if len(d.terrors) > terrlen {
|
||||
issues := d.terrors[terrlen:]
|
||||
d.terrors = d.terrors[:terrlen]
|
||||
return &TypeError{issues}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if e, ok := err.(*TypeError); ok {
|
||||
d.terrors = append(d.terrors, e.Errors...)
|
||||
return false
|
||||
}
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
|
||||
// if a value is found to implement it.
|
||||
// It returns the initialized and dereferenced out value, whether
|
||||
// unmarshalling was already done by UnmarshalYAML, and if so whether
|
||||
// its types unmarshalled appropriately.
|
||||
//
|
||||
// If n holds a null value, prepare returns before doing anything.
|
||||
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
|
||||
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
|
||||
return out, false, false
|
||||
}
|
||||
again := true
|
||||
for again {
|
||||
again = false
|
||||
if out.Kind() == reflect.Ptr {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(out.Type().Elem()))
|
||||
}
|
||||
out = out.Elem()
|
||||
again = true
|
||||
}
|
||||
if out.CanAddr() {
|
||||
if u, ok := out.Addr().Interface().(Unmarshaler); ok {
|
||||
good = d.callUnmarshaler(n, u)
|
||||
return out, true, good
|
||||
}
|
||||
}
|
||||
}
|
||||
return out, false, false
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
|
||||
switch n.kind {
|
||||
case documentNode:
|
||||
return d.document(n, out)
|
||||
case aliasNode:
|
||||
return d.alias(n, out)
|
||||
}
|
||||
out, unmarshaled, good := d.prepare(n, out)
|
||||
if unmarshaled {
|
||||
return good
|
||||
}
|
||||
switch n.kind {
|
||||
case scalarNode:
|
||||
good = d.scalar(n, out)
|
||||
case mappingNode:
|
||||
good = d.mapping(n, out)
|
||||
case sequenceNode:
|
||||
good = d.sequence(n, out)
|
||||
default:
|
||||
panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
|
||||
}
|
||||
return good
|
||||
}
|
||||
|
||||
func (d *decoder) document(n *node, out reflect.Value) (good bool) {
|
||||
if len(n.children) == 1 {
|
||||
d.doc = n
|
||||
d.unmarshal(n.children[0], out)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
||||
an, ok := d.doc.anchors[n.value]
|
||||
if !ok {
|
||||
failf("unknown anchor '%s' referenced", n.value)
|
||||
}
|
||||
if d.aliases[n.value] {
|
||||
failf("anchor '%s' value contains itself", n.value)
|
||||
}
|
||||
d.aliases[n.value] = true
|
||||
good = d.unmarshal(an, out)
|
||||
delete(d.aliases, n.value)
|
||||
return good
|
||||
}
|
||||
|
||||
var zeroValue reflect.Value
|
||||
|
||||
func resetMap(out reflect.Value) {
|
||||
for _, k := range out.MapKeys() {
|
||||
out.SetMapIndex(k, zeroValue)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
||||
var tag string
|
||||
var resolved interface{}
|
||||
if n.tag == "" && !n.implicit {
|
||||
tag = yaml_STR_TAG
|
||||
resolved = n.value
|
||||
} else {
|
||||
tag, resolved = resolve(n.tag, n.value)
|
||||
if tag == yaml_BINARY_TAG {
|
||||
data, err := base64.StdEncoding.DecodeString(resolved.(string))
|
||||
if err != nil {
|
||||
failf("!!binary value contains invalid base64 data")
|
||||
}
|
||||
resolved = string(data)
|
||||
}
|
||||
}
|
||||
if resolved == nil {
|
||||
if out.Kind() == reflect.Map && !out.CanAddr() {
|
||||
resetMap(out)
|
||||
} else {
|
||||
out.Set(reflect.Zero(out.Type()))
|
||||
}
|
||||
return true
|
||||
}
|
||||
if s, ok := resolved.(string); ok && out.CanAddr() {
|
||||
if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
|
||||
err := u.UnmarshalText([]byte(s))
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
switch out.Kind() {
|
||||
case reflect.String:
|
||||
if tag == yaml_BINARY_TAG {
|
||||
out.SetString(resolved.(string))
|
||||
good = true
|
||||
} else if resolved != nil {
|
||||
out.SetString(n.value)
|
||||
good = true
|
||||
}
|
||||
case reflect.Interface:
|
||||
if resolved == nil {
|
||||
out.Set(reflect.Zero(out.Type()))
|
||||
} else {
|
||||
out.Set(reflect.ValueOf(resolved))
|
||||
}
|
||||
good = true
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
if !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
good = true
|
||||
}
|
||||
case int64:
|
||||
if !out.OverflowInt(resolved) {
|
||||
out.SetInt(resolved)
|
||||
good = true
|
||||
}
|
||||
case uint64:
|
||||
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
good = true
|
||||
}
|
||||
case float64:
|
||||
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
good = true
|
||||
}
|
||||
case string:
|
||||
if out.Type() == durationType {
|
||||
d, err := time.ParseDuration(resolved)
|
||||
if err == nil {
|
||||
out.SetInt(int64(d))
|
||||
good = true
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
good = true
|
||||
}
|
||||
case int64:
|
||||
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
good = true
|
||||
}
|
||||
case uint64:
|
||||
if !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
good = true
|
||||
}
|
||||
case float64:
|
||||
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
good = true
|
||||
}
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch resolved := resolved.(type) {
|
||||
case bool:
|
||||
out.SetBool(resolved)
|
||||
good = true
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
out.SetFloat(float64(resolved))
|
||||
good = true
|
||||
case int64:
|
||||
out.SetFloat(float64(resolved))
|
||||
good = true
|
||||
case uint64:
|
||||
out.SetFloat(float64(resolved))
|
||||
good = true
|
||||
case float64:
|
||||
out.SetFloat(resolved)
|
||||
good = true
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if out.Type().Elem() == reflect.TypeOf(resolved) {
|
||||
// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
|
||||
elem := reflect.New(out.Type().Elem())
|
||||
elem.Elem().Set(reflect.ValueOf(resolved))
|
||||
out.Set(elem)
|
||||
good = true
|
||||
}
|
||||
}
|
||||
if !good {
|
||||
d.terror(n, tag, out)
|
||||
}
|
||||
return good
|
||||
}
|
||||
|
||||
func settableValueOf(i interface{}) reflect.Value {
|
||||
v := reflect.ValueOf(i)
|
||||
sv := reflect.New(v.Type()).Elem()
|
||||
sv.Set(v)
|
||||
return sv
|
||||
}
|
||||
|
||||
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
||||
l := len(n.children)
|
||||
|
||||
var iface reflect.Value
|
||||
switch out.Kind() {
|
||||
case reflect.Slice:
|
||||
out.Set(reflect.MakeSlice(out.Type(), l, l))
|
||||
case reflect.Interface:
|
||||
// No type hints. Will have to use a generic sequence.
|
||||
iface = out
|
||||
out = settableValueOf(make([]interface{}, l))
|
||||
default:
|
||||
d.terror(n, yaml_SEQ_TAG, out)
|
||||
return false
|
||||
}
|
||||
et := out.Type().Elem()
|
||||
|
||||
j := 0
|
||||
for i := 0; i < l; i++ {
|
||||
e := reflect.New(et).Elem()
|
||||
if ok := d.unmarshal(n.children[i], e); ok {
|
||||
out.Index(j).Set(e)
|
||||
j++
|
||||
}
|
||||
}
|
||||
out.Set(out.Slice(0, j))
|
||||
if iface.IsValid() {
|
||||
iface.Set(out)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
||||
switch out.Kind() {
|
||||
case reflect.Struct:
|
||||
return d.mappingStruct(n, out)
|
||||
case reflect.Slice:
|
||||
return d.mappingSlice(n, out)
|
||||
case reflect.Map:
|
||||
// okay
|
||||
case reflect.Interface:
|
||||
if d.mapType.Kind() == reflect.Map {
|
||||
iface := out
|
||||
out = reflect.MakeMap(d.mapType)
|
||||
iface.Set(out)
|
||||
} else {
|
||||
slicev := reflect.New(d.mapType).Elem()
|
||||
if !d.mappingSlice(n, slicev) {
|
||||
return false
|
||||
}
|
||||
out.Set(slicev)
|
||||
return true
|
||||
}
|
||||
default:
|
||||
d.terror(n, yaml_MAP_TAG, out)
|
||||
return false
|
||||
}
|
||||
outt := out.Type()
|
||||
kt := outt.Key()
|
||||
et := outt.Elem()
|
||||
|
||||
mapType := d.mapType
|
||||
if outt.Key() == ifaceType && outt.Elem() == ifaceType {
|
||||
d.mapType = outt
|
||||
}
|
||||
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(outt))
|
||||
}
|
||||
l := len(n.children)
|
||||
for i := 0; i < l; i += 2 {
|
||||
if isMerge(n.children[i]) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
k := reflect.New(kt).Elem()
|
||||
if d.unmarshal(n.children[i], k) {
|
||||
kkind := k.Kind()
|
||||
if kkind == reflect.Interface {
|
||||
kkind = k.Elem().Kind()
|
||||
}
|
||||
if kkind == reflect.Map || kkind == reflect.Slice {
|
||||
failf("invalid map key: %#v", k.Interface())
|
||||
}
|
||||
e := reflect.New(et).Elem()
|
||||
if d.unmarshal(n.children[i+1], e) {
|
||||
out.SetMapIndex(k, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.mapType = mapType
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
|
||||
outt := out.Type()
|
||||
if outt.Elem() != mapItemType {
|
||||
d.terror(n, yaml_MAP_TAG, out)
|
||||
return false
|
||||
}
|
||||
|
||||
mapType := d.mapType
|
||||
d.mapType = outt
|
||||
|
||||
var slice []MapItem
|
||||
var l = len(n.children)
|
||||
for i := 0; i < l; i += 2 {
|
||||
if isMerge(n.children[i]) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
item := MapItem{}
|
||||
k := reflect.ValueOf(&item.Key).Elem()
|
||||
if d.unmarshal(n.children[i], k) {
|
||||
v := reflect.ValueOf(&item.Value).Elem()
|
||||
if d.unmarshal(n.children[i+1], v) {
|
||||
slice = append(slice, item)
|
||||
}
|
||||
}
|
||||
}
|
||||
out.Set(reflect.ValueOf(slice))
|
||||
d.mapType = mapType
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
||||
sinfo, err := getStructInfo(out.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
name := settableValueOf("")
|
||||
l := len(n.children)
|
||||
|
||||
var inlineMap reflect.Value
|
||||
var elemType reflect.Type
|
||||
if sinfo.InlineMap != -1 {
|
||||
inlineMap = out.Field(sinfo.InlineMap)
|
||||
inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
|
||||
elemType = inlineMap.Type().Elem()
|
||||
}
|
||||
|
||||
for i := 0; i < l; i += 2 {
|
||||
ni := n.children[i]
|
||||
if isMerge(ni) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
if !d.unmarshal(ni, name) {
|
||||
continue
|
||||
}
|
||||
if info, ok := sinfo.FieldsMap[name.String()]; ok {
|
||||
var field reflect.Value
|
||||
if info.Inline == nil {
|
||||
field = out.Field(info.Num)
|
||||
} else {
|
||||
field = out.FieldByIndex(info.Inline)
|
||||
}
|
||||
d.unmarshal(n.children[i+1], field)
|
||||
} else if sinfo.InlineMap != -1 {
|
||||
if inlineMap.IsNil() {
|
||||
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
|
||||
}
|
||||
value := reflect.New(elemType).Elem()
|
||||
d.unmarshal(n.children[i+1], value)
|
||||
inlineMap.SetMapIndex(name, value)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func failWantMap() {
|
||||
failf("map merge requires map or sequence of maps as the value")
|
||||
}
|
||||
|
||||
func (d *decoder) merge(n *node, out reflect.Value) {
|
||||
switch n.kind {
|
||||
case mappingNode:
|
||||
d.unmarshal(n, out)
|
||||
case aliasNode:
|
||||
an, ok := d.doc.anchors[n.value]
|
||||
if ok && an.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
d.unmarshal(n, out)
|
||||
case sequenceNode:
|
||||
// Step backwards as earlier nodes take precedence.
|
||||
for i := len(n.children) - 1; i >= 0; i-- {
|
||||
ni := n.children[i]
|
||||
if ni.kind == aliasNode {
|
||||
an, ok := d.doc.anchors[ni.value]
|
||||
if ok && an.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
} else if ni.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
d.unmarshal(ni, out)
|
||||
}
|
||||
default:
|
||||
failWantMap()
|
||||
}
|
||||
}
|
||||
|
||||
func isMerge(n *node) bool {
|
||||
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,306 @@
|
|||
package yaml
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
emitter yaml_emitter_t
|
||||
event yaml_event_t
|
||||
out []byte
|
||||
flow bool
|
||||
}
|
||||
|
||||
func newEncoder() (e *encoder) {
|
||||
e = &encoder{}
|
||||
e.must(yaml_emitter_initialize(&e.emitter))
|
||||
yaml_emitter_set_output_string(&e.emitter, &e.out)
|
||||
yaml_emitter_set_unicode(&e.emitter, true)
|
||||
e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
|
||||
e.emit()
|
||||
e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
|
||||
e.emit()
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *encoder) finish() {
|
||||
e.must(yaml_document_end_event_initialize(&e.event, true))
|
||||
e.emit()
|
||||
e.emitter.open_ended = false
|
||||
e.must(yaml_stream_end_event_initialize(&e.event))
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) destroy() {
|
||||
yaml_emitter_delete(&e.emitter)
|
||||
}
|
||||
|
||||
func (e *encoder) emit() {
|
||||
// This will internally delete the e.event value.
|
||||
if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
|
||||
e.must(false)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) must(ok bool) {
|
||||
if !ok {
|
||||
msg := e.emitter.problem
|
||||
if msg == "" {
|
||||
msg = "unknown problem generating YAML content"
|
||||
}
|
||||
failf("%s", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) marshal(tag string, in reflect.Value) {
|
||||
if !in.IsValid() {
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
iface := in.Interface()
|
||||
if m, ok := iface.(Marshaler); ok {
|
||||
v, err := m.MarshalYAML()
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
if v == nil {
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
in = reflect.ValueOf(v)
|
||||
} else if m, ok := iface.(encoding.TextMarshaler); ok {
|
||||
text, err := m.MarshalText()
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
in = reflect.ValueOf(string(text))
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Interface:
|
||||
if in.IsNil() {
|
||||
e.nilv()
|
||||
} else {
|
||||
e.marshal(tag, in.Elem())
|
||||
}
|
||||
case reflect.Map:
|
||||
e.mapv(tag, in)
|
||||
case reflect.Ptr:
|
||||
if in.IsNil() {
|
||||
e.nilv()
|
||||
} else {
|
||||
e.marshal(tag, in.Elem())
|
||||
}
|
||||
case reflect.Struct:
|
||||
e.structv(tag, in)
|
||||
case reflect.Slice:
|
||||
if in.Type().Elem() == mapItemType {
|
||||
e.itemsv(tag, in)
|
||||
} else {
|
||||
e.slicev(tag, in)
|
||||
}
|
||||
case reflect.String:
|
||||
e.stringv(tag, in)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
if in.Type() == durationType {
|
||||
e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
|
||||
} else {
|
||||
e.intv(tag, in)
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
e.uintv(tag, in)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
e.floatv(tag, in)
|
||||
case reflect.Bool:
|
||||
e.boolv(tag, in)
|
||||
default:
|
||||
panic("cannot marshal type: " + in.Type().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) mapv(tag string, in reflect.Value) {
|
||||
e.mappingv(tag, func() {
|
||||
keys := keyList(in.MapKeys())
|
||||
sort.Sort(keys)
|
||||
for _, k := range keys {
|
||||
e.marshal("", k)
|
||||
e.marshal("", in.MapIndex(k))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) itemsv(tag string, in reflect.Value) {
|
||||
e.mappingv(tag, func() {
|
||||
slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
|
||||
for _, item := range slice {
|
||||
e.marshal("", reflect.ValueOf(item.Key))
|
||||
e.marshal("", reflect.ValueOf(item.Value))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) structv(tag string, in reflect.Value) {
|
||||
sinfo, err := getStructInfo(in.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
e.mappingv(tag, func() {
|
||||
for _, info := range sinfo.FieldsList {
|
||||
var value reflect.Value
|
||||
if info.Inline == nil {
|
||||
value = in.Field(info.Num)
|
||||
} else {
|
||||
value = in.FieldByIndex(info.Inline)
|
||||
}
|
||||
if info.OmitEmpty && isZero(value) {
|
||||
continue
|
||||
}
|
||||
e.marshal("", reflect.ValueOf(info.Key))
|
||||
e.flow = info.Flow
|
||||
e.marshal("", value)
|
||||
}
|
||||
if sinfo.InlineMap >= 0 {
|
||||
m := in.Field(sinfo.InlineMap)
|
||||
if m.Len() > 0 {
|
||||
e.flow = false
|
||||
keys := keyList(m.MapKeys())
|
||||
sort.Sort(keys)
|
||||
for _, k := range keys {
|
||||
if _, found := sinfo.FieldsMap[k.String()]; found {
|
||||
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
|
||||
}
|
||||
e.marshal("", k)
|
||||
e.flow = false
|
||||
e.marshal("", m.MapIndex(k))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) mappingv(tag string, f func()) {
|
||||
implicit := tag == ""
|
||||
style := yaml_BLOCK_MAPPING_STYLE
|
||||
if e.flow {
|
||||
e.flow = false
|
||||
style = yaml_FLOW_MAPPING_STYLE
|
||||
}
|
||||
e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
|
||||
e.emit()
|
||||
f()
|
||||
e.must(yaml_mapping_end_event_initialize(&e.event))
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) slicev(tag string, in reflect.Value) {
|
||||
implicit := tag == ""
|
||||
style := yaml_BLOCK_SEQUENCE_STYLE
|
||||
if e.flow {
|
||||
e.flow = false
|
||||
style = yaml_FLOW_SEQUENCE_STYLE
|
||||
}
|
||||
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
|
||||
e.emit()
|
||||
n := in.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
e.marshal("", in.Index(i))
|
||||
}
|
||||
e.must(yaml_sequence_end_event_initialize(&e.event))
|
||||
e.emit()
|
||||
}
|
||||
|
||||
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
|
||||
//
|
||||
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
|
||||
// in YAML 1.2 and by this package, but these should be marshalled quoted for
|
||||
// the time being for compatibility with other parsers.
|
||||
func isBase60Float(s string) (result bool) {
|
||||
// Fast path.
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
c := s[0]
|
||||
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
|
||||
return false
|
||||
}
|
||||
// Do the full match.
|
||||
return base60float.MatchString(s)
|
||||
}
|
||||
|
||||
// From http://yaml.org/type/float.html, except the regular expression there
|
||||
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
|
||||
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
|
||||
|
||||
func (e *encoder) stringv(tag string, in reflect.Value) {
|
||||
var style yaml_scalar_style_t
|
||||
s := in.String()
|
||||
rtag, rs := resolve("", s)
|
||||
if rtag == yaml_BINARY_TAG {
|
||||
if tag == "" || tag == yaml_STR_TAG {
|
||||
tag = rtag
|
||||
s = rs.(string)
|
||||
} else if tag == yaml_BINARY_TAG {
|
||||
failf("explicitly tagged !!binary data must be base64-encoded")
|
||||
} else {
|
||||
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
|
||||
}
|
||||
}
|
||||
if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
|
||||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||||
} else if strings.Contains(s, "\n") {
|
||||
style = yaml_LITERAL_SCALAR_STYLE
|
||||
} else {
|
||||
style = yaml_PLAIN_SCALAR_STYLE
|
||||
}
|
||||
e.emitScalar(s, "", tag, style)
|
||||
}
|
||||
|
||||
func (e *encoder) boolv(tag string, in reflect.Value) {
|
||||
var s string
|
||||
if in.Bool() {
|
||||
s = "true"
|
||||
} else {
|
||||
s = "false"
|
||||
}
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) intv(tag string, in reflect.Value) {
|
||||
s := strconv.FormatInt(in.Int(), 10)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) uintv(tag string, in reflect.Value) {
|
||||
s := strconv.FormatUint(in.Uint(), 10)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) floatv(tag string, in reflect.Value) {
|
||||
// FIXME: Handle 64 bits here.
|
||||
s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
|
||||
switch s {
|
||||
case "+Inf":
|
||||
s = ".inf"
|
||||
case "-Inf":
|
||||
s = "-.inf"
|
||||
case "NaN":
|
||||
s = ".nan"
|
||||
}
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) nilv() {
|
||||
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
|
||||
implicit := tag == ""
|
||||
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
|
||||
e.emit()
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,394 @@
|
|||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// Set the reader error and return 0.
|
||||
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
|
||||
parser.error = yaml_READER_ERROR
|
||||
parser.problem = problem
|
||||
parser.problem_offset = offset
|
||||
parser.problem_value = value
|
||||
return false
|
||||
}
|
||||
|
||||
// Byte order marks.
|
||||
const (
|
||||
bom_UTF8 = "\xef\xbb\xbf"
|
||||
bom_UTF16LE = "\xff\xfe"
|
||||
bom_UTF16BE = "\xfe\xff"
|
||||
)
|
||||
|
||||
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
|
||||
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
|
||||
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
|
||||
// Ensure that we had enough bytes in the raw buffer.
|
||||
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
|
||||
if !yaml_parser_update_raw_buffer(parser) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Determine the encoding.
|
||||
buf := parser.raw_buffer
|
||||
pos := parser.raw_buffer_pos
|
||||
avail := len(buf) - pos
|
||||
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
|
||||
parser.encoding = yaml_UTF16LE_ENCODING
|
||||
parser.raw_buffer_pos += 2
|
||||
parser.offset += 2
|
||||
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
|
||||
parser.encoding = yaml_UTF16BE_ENCODING
|
||||
parser.raw_buffer_pos += 2
|
||||
parser.offset += 2
|
||||
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
|
||||
parser.encoding = yaml_UTF8_ENCODING
|
||||
parser.raw_buffer_pos += 3
|
||||
parser.offset += 3
|
||||
} else {
|
||||
parser.encoding = yaml_UTF8_ENCODING
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Update the raw buffer.
|
||||
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
|
||||
size_read := 0
|
||||
|
||||
// Return if the raw buffer is full.
|
||||
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Return on EOF.
|
||||
if parser.eof {
|
||||
return true
|
||||
}
|
||||
|
||||
// Move the remaining bytes in the raw buffer to the beginning.
|
||||
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
|
||||
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
|
||||
}
|
||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
|
||||
parser.raw_buffer_pos = 0
|
||||
|
||||
// Call the read handler to fill the buffer.
|
||||
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
|
||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
|
||||
if err == io.EOF {
|
||||
parser.eof = true
|
||||
} else if err != nil {
|
||||
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Ensure that the buffer contains at least `length` characters.
|
||||
// Return true on success, false on failure.
|
||||
//
|
||||
// The length is supposed to be significantly less that the buffer size.
|
||||
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
|
||||
if parser.read_handler == nil {
|
||||
panic("read handler must be set")
|
||||
}
|
||||
|
||||
// If the EOF flag is set and the raw buffer is empty, do nothing.
|
||||
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Return if the buffer contains enough characters.
|
||||
if parser.unread >= length {
|
||||
return true
|
||||
}
|
||||
|
||||
// Determine the input encoding if it is not known yet.
|
||||
if parser.encoding == yaml_ANY_ENCODING {
|
||||
if !yaml_parser_determine_encoding(parser) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Move the unread characters to the beginning of the buffer.
|
||||
buffer_len := len(parser.buffer)
|
||||
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
|
||||
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
|
||||
buffer_len -= parser.buffer_pos
|
||||
parser.buffer_pos = 0
|
||||
} else if parser.buffer_pos == buffer_len {
|
||||
buffer_len = 0
|
||||
parser.buffer_pos = 0
|
||||
}
|
||||
|
||||
// Open the whole buffer for writing, and cut it before returning.
|
||||
parser.buffer = parser.buffer[:cap(parser.buffer)]
|
||||
|
||||
// Fill the buffer until it has enough characters.
|
||||
first := true
|
||||
for parser.unread < length {
|
||||
|
||||
// Fill the raw buffer if necessary.
|
||||
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||||
if !yaml_parser_update_raw_buffer(parser) {
|
||||
parser.buffer = parser.buffer[:buffer_len]
|
||||
return false
|
||||
}
|
||||
}
|
||||
first = false
|
||||
|
||||
// Decode the raw buffer.
|
||||
inner:
|
||||
for parser.raw_buffer_pos != len(parser.raw_buffer) {
|
||||
var value rune
|
||||
var width int
|
||||
|
||||
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
|
||||
|
||||
// Decode the next character.
|
||||
switch parser.encoding {
|
||||
case yaml_UTF8_ENCODING:
|
||||
// Decode a UTF-8 character. Check RFC 3629
|
||||
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
|
||||
//
|
||||
// The following table (taken from the RFC) is used for
|
||||
// decoding.
|
||||
//
|
||||
// Char. number range | UTF-8 octet sequence
|
||||
// (hexadecimal) | (binary)
|
||||
// --------------------+------------------------------------
|
||||
// 0000 0000-0000 007F | 0xxxxxxx
|
||||
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
|
||||
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
|
||||
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
||||
//
|
||||
// Additionally, the characters in the range 0xD800-0xDFFF
|
||||
// are prohibited as they are reserved for use with UTF-16
|
||||
// surrogate pairs.
|
||||
|
||||
// Determine the length of the UTF-8 sequence.
|
||||
octet := parser.raw_buffer[parser.raw_buffer_pos]
|
||||
switch {
|
||||
case octet&0x80 == 0x00:
|
||||
width = 1
|
||||
case octet&0xE0 == 0xC0:
|
||||
width = 2
|
||||
case octet&0xF0 == 0xE0:
|
||||
width = 3
|
||||
case octet&0xF8 == 0xF0:
|
||||
width = 4
|
||||
default:
|
||||
// The leading octet is invalid.
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid leading UTF-8 octet",
|
||||
parser.offset, int(octet))
|
||||
}
|
||||
|
||||
// Check if the raw buffer contains an incomplete character.
|
||||
if width > raw_unread {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-8 octet sequence",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Decode the leading octet.
|
||||
switch {
|
||||
case octet&0x80 == 0x00:
|
||||
value = rune(octet & 0x7F)
|
||||
case octet&0xE0 == 0xC0:
|
||||
value = rune(octet & 0x1F)
|
||||
case octet&0xF0 == 0xE0:
|
||||
value = rune(octet & 0x0F)
|
||||
case octet&0xF8 == 0xF0:
|
||||
value = rune(octet & 0x07)
|
||||
default:
|
||||
value = 0
|
||||
}
|
||||
|
||||
// Check and decode the trailing octets.
|
||||
for k := 1; k < width; k++ {
|
||||
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
|
||||
|
||||
// Check if the octet is valid.
|
||||
if (octet & 0xC0) != 0x80 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid trailing UTF-8 octet",
|
||||
parser.offset+k, int(octet))
|
||||
}
|
||||
|
||||
// Decode the octet.
|
||||
value = (value << 6) + rune(octet&0x3F)
|
||||
}
|
||||
|
||||
// Check the length of the sequence against the value.
|
||||
switch {
|
||||
case width == 1:
|
||||
case width == 2 && value >= 0x80:
|
||||
case width == 3 && value >= 0x800:
|
||||
case width == 4 && value >= 0x10000:
|
||||
default:
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid length of a UTF-8 sequence",
|
||||
parser.offset, -1)
|
||||
}
|
||||
|
||||
// Check the range of the value.
|
||||
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid Unicode character",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
|
||||
var low, high int
|
||||
if parser.encoding == yaml_UTF16LE_ENCODING {
|
||||
low, high = 0, 1
|
||||
} else {
|
||||
low, high = 1, 0
|
||||
}
|
||||
|
||||
// The UTF-16 encoding is not as simple as one might
|
||||
// naively think. Check RFC 2781
|
||||
// (http://www.ietf.org/rfc/rfc2781.txt).
|
||||
//
|
||||
// Normally, two subsequent bytes describe a Unicode
|
||||
// character. However a special technique (called a
|
||||
// surrogate pair) is used for specifying character
|
||||
// values larger than 0xFFFF.
|
||||
//
|
||||
// A surrogate pair consists of two pseudo-characters:
|
||||
// high surrogate area (0xD800-0xDBFF)
|
||||
// low surrogate area (0xDC00-0xDFFF)
|
||||
//
|
||||
// The following formulas are used for decoding
|
||||
// and encoding characters using surrogate pairs:
|
||||
//
|
||||
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
|
||||
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
|
||||
// W1 = 110110yyyyyyyyyy
|
||||
// W2 = 110111xxxxxxxxxx
|
||||
//
|
||||
// where U is the character value, W1 is the high surrogate
|
||||
// area, W2 is the low surrogate area.
|
||||
|
||||
// Check for incomplete UTF-16 character.
|
||||
if raw_unread < 2 {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-16 character",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Get the character.
|
||||
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
|
||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
|
||||
|
||||
// Check for unexpected low surrogate area.
|
||||
if value&0xFC00 == 0xDC00 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"unexpected low surrogate area",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
// Check for a high surrogate area.
|
||||
if value&0xFC00 == 0xD800 {
|
||||
width = 4
|
||||
|
||||
// Check for incomplete surrogate pair.
|
||||
if raw_unread < 4 {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-16 surrogate pair",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Get the next character.
|
||||
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
|
||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
|
||||
|
||||
// Check for a low surrogate area.
|
||||
if value2&0xFC00 != 0xDC00 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"expected low surrogate area",
|
||||
parser.offset+2, int(value2))
|
||||
}
|
||||
|
||||
// Generate the value of the surrogate pair.
|
||||
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
|
||||
} else {
|
||||
width = 2
|
||||
}
|
||||
|
||||
default:
|
||||
panic("impossible")
|
||||
}
|
||||
|
||||
// Check if the character is in the allowed range:
|
||||
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
|
||||
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
|
||||
// | [#x10000-#x10FFFF] (32 bit)
|
||||
switch {
|
||||
case value == 0x09:
|
||||
case value == 0x0A:
|
||||
case value == 0x0D:
|
||||
case value >= 0x20 && value <= 0x7E:
|
||||
case value == 0x85:
|
||||
case value >= 0xA0 && value <= 0xD7FF:
|
||||
case value >= 0xE000 && value <= 0xFFFD:
|
||||
case value >= 0x10000 && value <= 0x10FFFF:
|
||||
default:
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"control characters are not allowed",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
// Move the raw pointers.
|
||||
parser.raw_buffer_pos += width
|
||||
parser.offset += width
|
||||
|
||||
// Finally put the character into the buffer.
|
||||
if value <= 0x7F {
|
||||
// 0000 0000-0000 007F . 0xxxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(value)
|
||||
buffer_len += 1
|
||||
} else if value <= 0x7FF {
|
||||
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 2
|
||||
} else if value <= 0xFFFF {
|
||||
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
|
||||
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 3
|
||||
} else {
|
||||
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
|
||||
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
|
||||
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 4
|
||||
}
|
||||
|
||||
parser.unread++
|
||||
}
|
||||
|
||||
// On EOF, put NUL into the buffer and return.
|
||||
if parser.eof {
|
||||
parser.buffer[buffer_len] = 0
|
||||
buffer_len++
|
||||
parser.unread++
|
||||
break
|
||||
}
|
||||
}
|
||||
parser.buffer = parser.buffer[:buffer_len]
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,203 @@
|
|||
package yaml
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type resolveMapItem struct {
|
||||
value interface{}
|
||||
tag string
|
||||
}
|
||||
|
||||
var resolveTable = make([]byte, 256)
|
||||
var resolveMap = make(map[string]resolveMapItem)
|
||||
|
||||
func init() {
|
||||
t := resolveTable
|
||||
t[int('+')] = 'S' // Sign
|
||||
t[int('-')] = 'S'
|
||||
for _, c := range "0123456789" {
|
||||
t[int(c)] = 'D' // Digit
|
||||
}
|
||||
for _, c := range "yYnNtTfFoO~" {
|
||||
t[int(c)] = 'M' // In map
|
||||
}
|
||||
t[int('.')] = '.' // Float (potentially in map)
|
||||
|
||||
var resolveMapList = []struct {
|
||||
v interface{}
|
||||
tag string
|
||||
l []string
|
||||
}{
|
||||
{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
|
||||
{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
|
||||
{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
|
||||
{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
|
||||
{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
|
||||
{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
|
||||
{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
|
||||
{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
|
||||
{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
|
||||
{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
|
||||
{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
|
||||
{"<<", yaml_MERGE_TAG, []string{"<<"}},
|
||||
}
|
||||
|
||||
m := resolveMap
|
||||
for _, item := range resolveMapList {
|
||||
for _, s := range item.l {
|
||||
m[s] = resolveMapItem{item.v, item.tag}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const longTagPrefix = "tag:yaml.org,2002:"
|
||||
|
||||
func shortTag(tag string) string {
|
||||
// TODO This can easily be made faster and produce less garbage.
|
||||
if strings.HasPrefix(tag, longTagPrefix) {
|
||||
return "!!" + tag[len(longTagPrefix):]
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
func longTag(tag string) string {
|
||||
if strings.HasPrefix(tag, "!!") {
|
||||
return longTagPrefix + tag[2:]
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
func resolvableTag(tag string) bool {
|
||||
switch tag {
|
||||
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func resolve(tag string, in string) (rtag string, out interface{}) {
|
||||
if !resolvableTag(tag) {
|
||||
return tag, in
|
||||
}
|
||||
|
||||
defer func() {
|
||||
switch tag {
|
||||
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
|
||||
return
|
||||
}
|
||||
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
|
||||
}()
|
||||
|
||||
// Any data is accepted as a !!str or !!binary.
|
||||
// Otherwise, the prefix is enough of a hint about what it might be.
|
||||
hint := byte('N')
|
||||
if in != "" {
|
||||
hint = resolveTable[in[0]]
|
||||
}
|
||||
if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
|
||||
// Handle things we can lookup in a map.
|
||||
if item, ok := resolveMap[in]; ok {
|
||||
return item.tag, item.value
|
||||
}
|
||||
|
||||
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
|
||||
// are purposefully unsupported here. They're still quoted on
|
||||
// the way out for compatibility with other parser, though.
|
||||
|
||||
switch hint {
|
||||
case 'M':
|
||||
// We've already checked the map above.
|
||||
|
||||
case '.':
|
||||
// Not in the map, so maybe a normal float.
|
||||
floatv, err := strconv.ParseFloat(in, 64)
|
||||
if err == nil {
|
||||
return yaml_FLOAT_TAG, floatv
|
||||
}
|
||||
|
||||
case 'D', 'S':
|
||||
// Int, float, or timestamp.
|
||||
plain := strings.Replace(in, "_", "", -1)
|
||||
intv, err := strconv.ParseInt(plain, 0, 64)
|
||||
if err == nil {
|
||||
if intv == int64(int(intv)) {
|
||||
return yaml_INT_TAG, int(intv)
|
||||
} else {
|
||||
return yaml_INT_TAG, intv
|
||||
}
|
||||
}
|
||||
uintv, err := strconv.ParseUint(plain, 0, 64)
|
||||
if err == nil {
|
||||
return yaml_INT_TAG, uintv
|
||||
}
|
||||
floatv, err := strconv.ParseFloat(plain, 64)
|
||||
if err == nil {
|
||||
return yaml_FLOAT_TAG, floatv
|
||||
}
|
||||
if strings.HasPrefix(plain, "0b") {
|
||||
intv, err := strconv.ParseInt(plain[2:], 2, 64)
|
||||
if err == nil {
|
||||
if intv == int64(int(intv)) {
|
||||
return yaml_INT_TAG, int(intv)
|
||||
} else {
|
||||
return yaml_INT_TAG, intv
|
||||
}
|
||||
}
|
||||
uintv, err := strconv.ParseUint(plain[2:], 2, 64)
|
||||
if err == nil {
|
||||
return yaml_INT_TAG, uintv
|
||||
}
|
||||
} else if strings.HasPrefix(plain, "-0b") {
|
||||
intv, err := strconv.ParseInt(plain[3:], 2, 64)
|
||||
if err == nil {
|
||||
if intv == int64(int(intv)) {
|
||||
return yaml_INT_TAG, -int(intv)
|
||||
} else {
|
||||
return yaml_INT_TAG, -intv
|
||||
}
|
||||
}
|
||||
}
|
||||
// XXX Handle timestamps here.
|
||||
|
||||
default:
|
||||
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
|
||||
}
|
||||
}
|
||||
if tag == yaml_BINARY_TAG {
|
||||
return yaml_BINARY_TAG, in
|
||||
}
|
||||
if utf8.ValidString(in) {
|
||||
return yaml_STR_TAG, in
|
||||
}
|
||||
return yaml_BINARY_TAG, encodeBase64(in)
|
||||
}
|
||||
|
||||
// encodeBase64 encodes s as base64 that is broken up into multiple lines
|
||||
// as appropriate for the resulting length.
|
||||
func encodeBase64(s string) string {
|
||||
const lineLen = 70
|
||||
encLen := base64.StdEncoding.EncodedLen(len(s))
|
||||
lines := encLen/lineLen + 1
|
||||
buf := make([]byte, encLen*2+lines)
|
||||
in := buf[0:encLen]
|
||||
out := buf[encLen:]
|
||||
base64.StdEncoding.Encode(in, []byte(s))
|
||||
k := 0
|
||||
for i := 0; i < len(in); i += lineLen {
|
||||
j := i + lineLen
|
||||
if j > len(in) {
|
||||
j = len(in)
|
||||
}
|
||||
k += copy(out[k:], in[i:j])
|
||||
if lines > 1 {
|
||||
out[k] = '\n'
|
||||
k++
|
||||
}
|
||||
}
|
||||
return string(out[:k])
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,104 @@
|
|||
package yaml
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type keyList []reflect.Value
|
||||
|
||||
func (l keyList) Len() int { return len(l) }
|
||||
func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||
func (l keyList) Less(i, j int) bool {
|
||||
a := l[i]
|
||||
b := l[j]
|
||||
ak := a.Kind()
|
||||
bk := b.Kind()
|
||||
for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
|
||||
a = a.Elem()
|
||||
ak = a.Kind()
|
||||
}
|
||||
for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
|
||||
b = b.Elem()
|
||||
bk = b.Kind()
|
||||
}
|
||||
af, aok := keyFloat(a)
|
||||
bf, bok := keyFloat(b)
|
||||
if aok && bok {
|
||||
if af != bf {
|
||||
return af < bf
|
||||
}
|
||||
if ak != bk {
|
||||
return ak < bk
|
||||
}
|
||||
return numLess(a, b)
|
||||
}
|
||||
if ak != reflect.String || bk != reflect.String {
|
||||
return ak < bk
|
||||
}
|
||||
ar, br := []rune(a.String()), []rune(b.String())
|
||||
for i := 0; i < len(ar) && i < len(br); i++ {
|
||||
if ar[i] == br[i] {
|
||||
continue
|
||||
}
|
||||
al := unicode.IsLetter(ar[i])
|
||||
bl := unicode.IsLetter(br[i])
|
||||
if al && bl {
|
||||
return ar[i] < br[i]
|
||||
}
|
||||
if al || bl {
|
||||
return bl
|
||||
}
|
||||
var ai, bi int
|
||||
var an, bn int64
|
||||
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
|
||||
an = an*10 + int64(ar[ai]-'0')
|
||||
}
|
||||
for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
|
||||
bn = bn*10 + int64(br[bi]-'0')
|
||||
}
|
||||
if an != bn {
|
||||
return an < bn
|
||||
}
|
||||
if ai != bi {
|
||||
return ai < bi
|
||||
}
|
||||
return ar[i] < br[i]
|
||||
}
|
||||
return len(ar) < len(br)
|
||||
}
|
||||
|
||||
// keyFloat returns a float value for v if it is a number/bool
|
||||
// and whether it is a number/bool or not.
|
||||
func keyFloat(v reflect.Value) (f float64, ok bool) {
|
||||
switch v.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return float64(v.Int()), true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float(), true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return float64(v.Uint()), true
|
||||
case reflect.Bool:
|
||||
if v.Bool() {
|
||||
return 1, true
|
||||
}
|
||||
return 0, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// numLess returns whether a < b.
|
||||
// a and b must necessarily have the same kind.
|
||||
func numLess(a, b reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return a.Int() < b.Int()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return a.Float() < b.Float()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Bool:
|
||||
return !a.Bool() && b.Bool()
|
||||
}
|
||||
panic("not a number")
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
package yaml
|
||||
|
||||
// Set the writer error and return false.
|
||||
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
|
||||
emitter.error = yaml_WRITER_ERROR
|
||||
emitter.problem = problem
|
||||
return false
|
||||
}
|
||||
|
||||
// Flush the output buffer.
|
||||
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
|
||||
if emitter.write_handler == nil {
|
||||
panic("write handler not set")
|
||||
}
|
||||
|
||||
// Check if the buffer is empty.
|
||||
if emitter.buffer_pos == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// If the output encoding is UTF-8, we don't need to recode the buffer.
|
||||
if emitter.encoding == yaml_UTF8_ENCODING {
|
||||
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
|
||||
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
|
||||
}
|
||||
emitter.buffer_pos = 0
|
||||
return true
|
||||
}
|
||||
|
||||
// Recode the buffer into the raw buffer.
|
||||
var low, high int
|
||||
if emitter.encoding == yaml_UTF16LE_ENCODING {
|
||||
low, high = 0, 1
|
||||
} else {
|
||||
high, low = 1, 0
|
||||
}
|
||||
|
||||
pos := 0
|
||||
for pos < emitter.buffer_pos {
|
||||
// See the "reader.c" code for more details on UTF-8 encoding. Note
|
||||
// that we assume that the buffer contains a valid UTF-8 sequence.
|
||||
|
||||
// Read the next UTF-8 character.
|
||||
octet := emitter.buffer[pos]
|
||||
|
||||
var w int
|
||||
var value rune
|
||||
switch {
|
||||
case octet&0x80 == 0x00:
|
||||
w, value = 1, rune(octet&0x7F)
|
||||
case octet&0xE0 == 0xC0:
|
||||
w, value = 2, rune(octet&0x1F)
|
||||
case octet&0xF0 == 0xE0:
|
||||
w, value = 3, rune(octet&0x0F)
|
||||
case octet&0xF8 == 0xF0:
|
||||
w, value = 4, rune(octet&0x07)
|
||||
}
|
||||
for k := 1; k < w; k++ {
|
||||
octet = emitter.buffer[pos+k]
|
||||
value = (value << 6) + (rune(octet) & 0x3F)
|
||||
}
|
||||
pos += w
|
||||
|
||||
// Write the character.
|
||||
if value < 0x10000 {
|
||||
var b [2]byte
|
||||
b[high] = byte(value >> 8)
|
||||
b[low] = byte(value & 0xFF)
|
||||
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
|
||||
} else {
|
||||
// Write the character using a surrogate pair (check "reader.c").
|
||||
var b [4]byte
|
||||
value -= 0x10000
|
||||
b[high] = byte(0xD8 + (value >> 18))
|
||||
b[low] = byte((value >> 10) & 0xFF)
|
||||
b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
|
||||
b[low+2] = byte(value & 0xFF)
|
||||
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
|
||||
}
|
||||
}
|
||||
|
||||
// Write the raw buffer.
|
||||
if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
|
||||
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
|
||||
}
|
||||
emitter.buffer_pos = 0
|
||||
emitter.raw_buffer = emitter.raw_buffer[:0]
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,346 @@
|
|||
// Package yaml implements YAML support for the Go language.
|
||||
//
|
||||
// Source code and other details for the project are available at GitHub:
|
||||
//
|
||||
// https://github.com/go-yaml/yaml
|
||||
//
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// MapSlice encodes and decodes as a YAML map.
|
||||
// The order of keys is preserved when encoding and decoding.
|
||||
type MapSlice []MapItem
|
||||
|
||||
// MapItem is an item in a MapSlice.
|
||||
type MapItem struct {
|
||||
Key, Value interface{}
|
||||
}
|
||||
|
||||
// The Unmarshaler interface may be implemented by types to customize their
|
||||
// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
|
||||
// method receives a function that may be called to unmarshal the original
|
||||
// YAML value into a field or variable. It is safe to call the unmarshal
|
||||
// function parameter more than once if necessary.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalYAML(unmarshal func(interface{}) error) error
|
||||
}
|
||||
|
||||
// The Marshaler interface may be implemented by types to customize their
|
||||
// behavior when being marshaled into a YAML document. The returned value
|
||||
// is marshaled in place of the original value implementing Marshaler.
|
||||
//
|
||||
// If an error is returned by MarshalYAML, the marshaling procedure stops
|
||||
// and returns with the provided error.
|
||||
type Marshaler interface {
|
||||
MarshalYAML() (interface{}, error)
|
||||
}
|
||||
|
||||
// Unmarshal decodes the first document found within the in byte slice
|
||||
// and assigns decoded values into the out value.
|
||||
//
|
||||
// Maps and pointers (to a struct, string, int, etc) are accepted as out
|
||||
// values. If an internal pointer within a struct is not initialized,
|
||||
// the yaml package will initialize it if necessary for unmarshalling
|
||||
// the provided data. The out parameter must not be nil.
|
||||
//
|
||||
// The type of the decoded values should be compatible with the respective
|
||||
// values in out. If one or more values cannot be decoded due to a type
|
||||
// mismatches, decoding continues partially until the end of the YAML
|
||||
// content, and a *yaml.TypeError is returned with details for all
|
||||
// missed values.
|
||||
//
|
||||
// Struct fields are only unmarshalled if they are exported (have an
|
||||
// upper case first letter), and are unmarshalled using the field name
|
||||
// lowercased as the default key. Custom keys may be defined via the
|
||||
// "yaml" name in the field tag: the content preceding the first comma
|
||||
// is used as the key, and the following comma-separated options are
|
||||
// used to tweak the marshalling process (see Marshal).
|
||||
// Conflicting names result in a runtime error.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// type T struct {
|
||||
// F int `yaml:"a,omitempty"`
|
||||
// B int
|
||||
// }
|
||||
// var t T
|
||||
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
||||
//
|
||||
// See the documentation of Marshal for the format of tags and a list of
|
||||
// supported tag options.
|
||||
//
|
||||
func Unmarshal(in []byte, out interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
d := newDecoder()
|
||||
p := newParser(in)
|
||||
defer p.destroy()
|
||||
node := p.parse()
|
||||
if node != nil {
|
||||
v := reflect.ValueOf(out)
|
||||
if v.Kind() == reflect.Ptr && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
d.unmarshal(node, v)
|
||||
}
|
||||
if len(d.terrors) > 0 {
|
||||
return &TypeError{d.terrors}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshal serializes the value provided into a YAML document. The structure
|
||||
// of the generated document will reflect the structure of the value itself.
|
||||
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
|
||||
//
|
||||
// Struct fields are only unmarshalled if they are exported (have an upper case
|
||||
// first letter), and are unmarshalled using the field name lowercased as the
|
||||
// default key. Custom keys may be defined via the "yaml" name in the field
|
||||
// tag: the content preceding the first comma is used as the key, and the
|
||||
// following comma-separated options are used to tweak the marshalling process.
|
||||
// Conflicting names result in a runtime error.
|
||||
//
|
||||
// The field tag format accepted is:
|
||||
//
|
||||
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||
//
|
||||
// The following flags are currently supported:
|
||||
//
|
||||
// omitempty Only include the field if it's not set to the zero
|
||||
// value for the type or to empty slices or maps.
|
||||
// Does not apply to zero valued structs.
|
||||
//
|
||||
// flow Marshal using a flow style (useful for structs,
|
||||
// sequences and maps).
|
||||
//
|
||||
// inline Inline the field, which must be a struct or a map,
|
||||
// causing all of its fields or keys to be processed as if
|
||||
// they were part of the outer struct. For maps, keys must
|
||||
// not conflict with the yaml keys of other struct fields.
|
||||
//
|
||||
// In addition, if the key is "-", the field is ignored.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// type T struct {
|
||||
// F int "a,omitempty"
|
||||
// B int
|
||||
// }
|
||||
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
||||
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
|
||||
//
|
||||
func Marshal(in interface{}) (out []byte, err error) {
|
||||
defer handleErr(&err)
|
||||
e := newEncoder()
|
||||
defer e.destroy()
|
||||
e.marshal("", reflect.ValueOf(in))
|
||||
e.finish()
|
||||
out = e.out
|
||||
return
|
||||
}
|
||||
|
||||
func handleErr(err *error) {
|
||||
if v := recover(); v != nil {
|
||||
if e, ok := v.(yamlError); ok {
|
||||
*err = e.err
|
||||
} else {
|
||||
panic(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type yamlError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func fail(err error) {
|
||||
panic(yamlError{err})
|
||||
}
|
||||
|
||||
func failf(format string, args ...interface{}) {
|
||||
panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
|
||||
}
|
||||
|
||||
// A TypeError is returned by Unmarshal when one or more fields in
|
||||
// the YAML document cannot be properly decoded into the requested
|
||||
// types. When this error is returned, the value is still
|
||||
// unmarshaled partially.
|
||||
type TypeError struct {
|
||||
Errors []string
|
||||
}
|
||||
|
||||
func (e *TypeError) Error() string {
|
||||
return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Maintain a mapping of keys to structure field indexes
|
||||
|
||||
// The code in this section was copied from mgo/bson.
|
||||
|
||||
// structInfo holds details for the serialization of fields of
|
||||
// a given struct.
|
||||
type structInfo struct {
|
||||
FieldsMap map[string]fieldInfo
|
||||
FieldsList []fieldInfo
|
||||
|
||||
// InlineMap is the number of the field in the struct that
|
||||
// contains an ,inline map, or -1 if there's none.
|
||||
InlineMap int
|
||||
}
|
||||
|
||||
type fieldInfo struct {
|
||||
Key string
|
||||
Num int
|
||||
OmitEmpty bool
|
||||
Flow bool
|
||||
|
||||
// Inline holds the field index if the field is part of an inlined struct.
|
||||
Inline []int
|
||||
}
|
||||
|
||||
var structMap = make(map[reflect.Type]*structInfo)
|
||||
var fieldMapMutex sync.RWMutex
|
||||
|
||||
func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||
fieldMapMutex.RLock()
|
||||
sinfo, found := structMap[st]
|
||||
fieldMapMutex.RUnlock()
|
||||
if found {
|
||||
return sinfo, nil
|
||||
}
|
||||
|
||||
n := st.NumField()
|
||||
fieldsMap := make(map[string]fieldInfo)
|
||||
fieldsList := make([]fieldInfo, 0, n)
|
||||
inlineMap := -1
|
||||
for i := 0; i != n; i++ {
|
||||
field := st.Field(i)
|
||||
if field.PkgPath != "" && !field.Anonymous {
|
||||
continue // Private field
|
||||
}
|
||||
|
||||
info := fieldInfo{Num: i}
|
||||
|
||||
tag := field.Tag.Get("yaml")
|
||||
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
|
||||
tag = string(field.Tag)
|
||||
}
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
inline := false
|
||||
fields := strings.Split(tag, ",")
|
||||
if len(fields) > 1 {
|
||||
for _, flag := range fields[1:] {
|
||||
switch flag {
|
||||
case "omitempty":
|
||||
info.OmitEmpty = true
|
||||
case "flow":
|
||||
info.Flow = true
|
||||
case "inline":
|
||||
inline = true
|
||||
default:
|
||||
return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
|
||||
}
|
||||
}
|
||||
tag = fields[0]
|
||||
}
|
||||
|
||||
if inline {
|
||||
switch field.Type.Kind() {
|
||||
case reflect.Map:
|
||||
if inlineMap >= 0 {
|
||||
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
|
||||
}
|
||||
if field.Type.Key() != reflect.TypeOf("") {
|
||||
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
|
||||
}
|
||||
inlineMap = info.Num
|
||||
case reflect.Struct:
|
||||
sinfo, err := getStructInfo(field.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, finfo := range sinfo.FieldsList {
|
||||
if _, found := fieldsMap[finfo.Key]; found {
|
||||
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if finfo.Inline == nil {
|
||||
finfo.Inline = []int{i, finfo.Num}
|
||||
} else {
|
||||
finfo.Inline = append([]int{i}, finfo.Inline...)
|
||||
}
|
||||
fieldsMap[finfo.Key] = finfo
|
||||
fieldsList = append(fieldsList, finfo)
|
||||
}
|
||||
default:
|
||||
//return nil, errors.New("Option ,inline needs a struct value or map field")
|
||||
return nil, errors.New("Option ,inline needs a struct value field")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if tag != "" {
|
||||
info.Key = tag
|
||||
} else {
|
||||
info.Key = strings.ToLower(field.Name)
|
||||
}
|
||||
|
||||
if _, found = fieldsMap[info.Key]; found {
|
||||
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
|
||||
fieldsList = append(fieldsList, info)
|
||||
fieldsMap[info.Key] = info
|
||||
}
|
||||
|
||||
sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
|
||||
|
||||
fieldMapMutex.Lock()
|
||||
structMap[st] = sinfo
|
||||
fieldMapMutex.Unlock()
|
||||
return sinfo, nil
|
||||
}
|
||||
|
||||
func isZero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.String:
|
||||
return len(v.String()) == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
case reflect.Slice:
|
||||
return v.Len() == 0
|
||||
case reflect.Map:
|
||||
return v.Len() == 0
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Struct:
|
||||
vt := v.Type()
|
||||
for i := v.NumField() - 1; i >= 0; i-- {
|
||||
if vt.Field(i).PkgPath != "" {
|
||||
continue // Private field
|
||||
}
|
||||
if !isZero(v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,716 @@
|
|||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// The version directive data.
|
||||
type yaml_version_directive_t struct {
|
||||
major int8 // The major version number.
|
||||
minor int8 // The minor version number.
|
||||
}
|
||||
|
||||
// The tag directive data.
|
||||
type yaml_tag_directive_t struct {
|
||||
handle []byte // The tag handle.
|
||||
prefix []byte // The tag prefix.
|
||||
}
|
||||
|
||||
type yaml_encoding_t int
|
||||
|
||||
// The stream encoding.
|
||||
const (
|
||||
// Let the parser choose the encoding.
|
||||
yaml_ANY_ENCODING yaml_encoding_t = iota
|
||||
|
||||
yaml_UTF8_ENCODING // The default UTF-8 encoding.
|
||||
yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
|
||||
yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
|
||||
)
|
||||
|
||||
type yaml_break_t int
|
||||
|
||||
// Line break types.
|
||||
const (
|
||||
// Let the parser choose the break type.
|
||||
yaml_ANY_BREAK yaml_break_t = iota
|
||||
|
||||
yaml_CR_BREAK // Use CR for line breaks (Mac style).
|
||||
yaml_LN_BREAK // Use LN for line breaks (Unix style).
|
||||
yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
|
||||
)
|
||||
|
||||
type yaml_error_type_t int
|
||||
|
||||
// Many bad things could happen with the parser and emitter.
|
||||
const (
|
||||
// No error is produced.
|
||||
yaml_NO_ERROR yaml_error_type_t = iota
|
||||
|
||||
yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
|
||||
yaml_READER_ERROR // Cannot read or decode the input stream.
|
||||
yaml_SCANNER_ERROR // Cannot scan the input stream.
|
||||
yaml_PARSER_ERROR // Cannot parse the input stream.
|
||||
yaml_COMPOSER_ERROR // Cannot compose a YAML document.
|
||||
yaml_WRITER_ERROR // Cannot write to the output stream.
|
||||
yaml_EMITTER_ERROR // Cannot emit a YAML stream.
|
||||
)
|
||||
|
||||
// The pointer position.
|
||||
type yaml_mark_t struct {
|
||||
index int // The position index.
|
||||
line int // The position line.
|
||||
column int // The position column.
|
||||
}
|
||||
|
||||
// Node Styles
|
||||
|
||||
type yaml_style_t int8
|
||||
|
||||
type yaml_scalar_style_t yaml_style_t
|
||||
|
||||
// Scalar styles.
|
||||
const (
|
||||
// Let the emitter choose the style.
|
||||
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
|
||||
|
||||
yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
|
||||
yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
|
||||
yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
|
||||
yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
|
||||
yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
|
||||
)
|
||||
|
||||
type yaml_sequence_style_t yaml_style_t
|
||||
|
||||
// Sequence styles.
|
||||
const (
|
||||
// Let the emitter choose the style.
|
||||
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
|
||||
|
||||
yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
|
||||
yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
|
||||
)
|
||||
|
||||
type yaml_mapping_style_t yaml_style_t
|
||||
|
||||
// Mapping styles.
|
||||
const (
|
||||
// Let the emitter choose the style.
|
||||
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
|
||||
|
||||
yaml_BLOCK_MAPPING_STYLE // The block mapping style.
|
||||
yaml_FLOW_MAPPING_STYLE // The flow mapping style.
|
||||
)
|
||||
|
||||
// Tokens
|
||||
|
||||
type yaml_token_type_t int
|
||||
|
||||
// Token types.
|
||||
const (
|
||||
// An empty token.
|
||||
yaml_NO_TOKEN yaml_token_type_t = iota
|
||||
|
||||
yaml_STREAM_START_TOKEN // A STREAM-START token.
|
||||
yaml_STREAM_END_TOKEN // A STREAM-END token.
|
||||
|
||||
yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
|
||||
yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
|
||||
yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
|
||||
yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
|
||||
|
||||
yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
|
||||
yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
|
||||
yaml_BLOCK_END_TOKEN // A BLOCK-END token.
|
||||
|
||||
yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
|
||||
yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
|
||||
yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
|
||||
yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
|
||||
|
||||
yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
|
||||
yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
|
||||
yaml_KEY_TOKEN // A KEY token.
|
||||
yaml_VALUE_TOKEN // A VALUE token.
|
||||
|
||||
yaml_ALIAS_TOKEN // An ALIAS token.
|
||||
yaml_ANCHOR_TOKEN // An ANCHOR token.
|
||||
yaml_TAG_TOKEN // A TAG token.
|
||||
yaml_SCALAR_TOKEN // A SCALAR token.
|
||||
)
|
||||
|
||||
func (tt yaml_token_type_t) String() string {
|
||||
switch tt {
|
||||
case yaml_NO_TOKEN:
|
||||
return "yaml_NO_TOKEN"
|
||||
case yaml_STREAM_START_TOKEN:
|
||||
return "yaml_STREAM_START_TOKEN"
|
||||
case yaml_STREAM_END_TOKEN:
|
||||
return "yaml_STREAM_END_TOKEN"
|
||||
case yaml_VERSION_DIRECTIVE_TOKEN:
|
||||
return "yaml_VERSION_DIRECTIVE_TOKEN"
|
||||
case yaml_TAG_DIRECTIVE_TOKEN:
|
||||
return "yaml_TAG_DIRECTIVE_TOKEN"
|
||||
case yaml_DOCUMENT_START_TOKEN:
|
||||
return "yaml_DOCUMENT_START_TOKEN"
|
||||
case yaml_DOCUMENT_END_TOKEN:
|
||||
return "yaml_DOCUMENT_END_TOKEN"
|
||||
case yaml_BLOCK_SEQUENCE_START_TOKEN:
|
||||
return "yaml_BLOCK_SEQUENCE_START_TOKEN"
|
||||
case yaml_BLOCK_MAPPING_START_TOKEN:
|
||||
return "yaml_BLOCK_MAPPING_START_TOKEN"
|
||||
case yaml_BLOCK_END_TOKEN:
|
||||
return "yaml_BLOCK_END_TOKEN"
|
||||
case yaml_FLOW_SEQUENCE_START_TOKEN:
|
||||
return "yaml_FLOW_SEQUENCE_START_TOKEN"
|
||||
case yaml_FLOW_SEQUENCE_END_TOKEN:
|
||||
return "yaml_FLOW_SEQUENCE_END_TOKEN"
|
||||
case yaml_FLOW_MAPPING_START_TOKEN:
|
||||
return "yaml_FLOW_MAPPING_START_TOKEN"
|
||||
case yaml_FLOW_MAPPING_END_TOKEN:
|
||||
return "yaml_FLOW_MAPPING_END_TOKEN"
|
||||
case yaml_BLOCK_ENTRY_TOKEN:
|
||||
return "yaml_BLOCK_ENTRY_TOKEN"
|
||||
case yaml_FLOW_ENTRY_TOKEN:
|
||||
return "yaml_FLOW_ENTRY_TOKEN"
|
||||
case yaml_KEY_TOKEN:
|
||||
return "yaml_KEY_TOKEN"
|
||||
case yaml_VALUE_TOKEN:
|
||||
return "yaml_VALUE_TOKEN"
|
||||
case yaml_ALIAS_TOKEN:
|
||||
return "yaml_ALIAS_TOKEN"
|
||||
case yaml_ANCHOR_TOKEN:
|
||||
return "yaml_ANCHOR_TOKEN"
|
||||
case yaml_TAG_TOKEN:
|
||||
return "yaml_TAG_TOKEN"
|
||||
case yaml_SCALAR_TOKEN:
|
||||
return "yaml_SCALAR_TOKEN"
|
||||
}
|
||||
return "<unknown token>"
|
||||
}
|
||||
|
||||
// The token structure.
|
||||
type yaml_token_t struct {
|
||||
// The token type.
|
||||
typ yaml_token_type_t
|
||||
|
||||
// The start/end of the token.
|
||||
start_mark, end_mark yaml_mark_t
|
||||
|
||||
// The stream encoding (for yaml_STREAM_START_TOKEN).
|
||||
encoding yaml_encoding_t
|
||||
|
||||
// The alias/anchor/scalar value or tag/tag directive handle
|
||||
// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
|
||||
value []byte
|
||||
|
||||
// The tag suffix (for yaml_TAG_TOKEN).
|
||||
suffix []byte
|
||||
|
||||
// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
|
||||
prefix []byte
|
||||
|
||||
// The scalar style (for yaml_SCALAR_TOKEN).
|
||||
style yaml_scalar_style_t
|
||||
|
||||
// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
|
||||
major, minor int8
|
||||
}
|
||||
|
||||
// Events
|
||||
|
||||
type yaml_event_type_t int8
|
||||
|
||||
// Event types.
|
||||
const (
|
||||
// An empty event.
|
||||
yaml_NO_EVENT yaml_event_type_t = iota
|
||||
|
||||
yaml_STREAM_START_EVENT // A STREAM-START event.
|
||||
yaml_STREAM_END_EVENT // A STREAM-END event.
|
||||
yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
|
||||
yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
|
||||
yaml_ALIAS_EVENT // An ALIAS event.
|
||||
yaml_SCALAR_EVENT // A SCALAR event.
|
||||
yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
|
||||
yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
|
||||
yaml_MAPPING_START_EVENT // A MAPPING-START event.
|
||||
yaml_MAPPING_END_EVENT // A MAPPING-END event.
|
||||
)
|
||||
|
||||
// The event structure.
|
||||
type yaml_event_t struct {
|
||||
|
||||
// The event type.
|
||||
typ yaml_event_type_t
|
||||
|
||||
// The start and end of the event.
|
||||
start_mark, end_mark yaml_mark_t
|
||||
|
||||
// The document encoding (for yaml_STREAM_START_EVENT).
|
||||
encoding yaml_encoding_t
|
||||
|
||||
// The version directive (for yaml_DOCUMENT_START_EVENT).
|
||||
version_directive *yaml_version_directive_t
|
||||
|
||||
// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
|
||||
tag_directives []yaml_tag_directive_t
|
||||
|
||||
// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
|
||||
anchor []byte
|
||||
|
||||
// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
||||
tag []byte
|
||||
|
||||
// The scalar value (for yaml_SCALAR_EVENT).
|
||||
value []byte
|
||||
|
||||
// Is the document start/end indicator implicit, or the tag optional?
|
||||
// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
|
||||
implicit bool
|
||||
|
||||
// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
|
||||
quoted_implicit bool
|
||||
|
||||
// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
||||
style yaml_style_t
|
||||
}
|
||||
|
||||
func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
|
||||
func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
|
||||
func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
|
||||
|
||||
// Nodes
|
||||
|
||||
const (
|
||||
yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
|
||||
yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
|
||||
yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
|
||||
yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
|
||||
yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
|
||||
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
|
||||
|
||||
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
|
||||
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
|
||||
|
||||
// Not in original libyaml.
|
||||
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
|
||||
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
|
||||
|
||||
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
|
||||
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
|
||||
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
|
||||
)
|
||||
|
||||
type yaml_node_type_t int
|
||||
|
||||
// Node types.
|
||||
const (
|
||||
// An empty node.
|
||||
yaml_NO_NODE yaml_node_type_t = iota
|
||||
|
||||
yaml_SCALAR_NODE // A scalar node.
|
||||
yaml_SEQUENCE_NODE // A sequence node.
|
||||
yaml_MAPPING_NODE // A mapping node.
|
||||
)
|
||||
|
||||
// An element of a sequence node.
|
||||
type yaml_node_item_t int
|
||||
|
||||
// An element of a mapping node.
|
||||
type yaml_node_pair_t struct {
|
||||
key int // The key of the element.
|
||||
value int // The value of the element.
|
||||
}
|
||||
|
||||
// The node structure.
|
||||
type yaml_node_t struct {
|
||||
typ yaml_node_type_t // The node type.
|
||||
tag []byte // The node tag.
|
||||
|
||||
// The node data.
|
||||
|
||||
// The scalar parameters (for yaml_SCALAR_NODE).
|
||||
scalar struct {
|
||||
value []byte // The scalar value.
|
||||
length int // The length of the scalar value.
|
||||
style yaml_scalar_style_t // The scalar style.
|
||||
}
|
||||
|
||||
// The sequence parameters (for YAML_SEQUENCE_NODE).
|
||||
sequence struct {
|
||||
items_data []yaml_node_item_t // The stack of sequence items.
|
||||
style yaml_sequence_style_t // The sequence style.
|
||||
}
|
||||
|
||||
// The mapping parameters (for yaml_MAPPING_NODE).
|
||||
mapping struct {
|
||||
pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
|
||||
pairs_start *yaml_node_pair_t // The beginning of the stack.
|
||||
pairs_end *yaml_node_pair_t // The end of the stack.
|
||||
pairs_top *yaml_node_pair_t // The top of the stack.
|
||||
style yaml_mapping_style_t // The mapping style.
|
||||
}
|
||||
|
||||
start_mark yaml_mark_t // The beginning of the node.
|
||||
end_mark yaml_mark_t // The end of the node.
|
||||
|
||||
}
|
||||
|
||||
// The document structure.
|
||||
type yaml_document_t struct {
|
||||
|
||||
// The document nodes.
|
||||
nodes []yaml_node_t
|
||||
|
||||
// The version directive.
|
||||
version_directive *yaml_version_directive_t
|
||||
|
||||
// The list of tag directives.
|
||||
tag_directives_data []yaml_tag_directive_t
|
||||
tag_directives_start int // The beginning of the tag directives list.
|
||||
tag_directives_end int // The end of the tag directives list.
|
||||
|
||||
start_implicit int // Is the document start indicator implicit?
|
||||
end_implicit int // Is the document end indicator implicit?
|
||||
|
||||
// The start/end of the document.
|
||||
start_mark, end_mark yaml_mark_t
|
||||
}
|
||||
|
||||
// The prototype of a read handler.
|
||||
//
|
||||
// The read handler is called when the parser needs to read more bytes from the
|
||||
// source. The handler should write not more than size bytes to the buffer.
|
||||
// The number of written bytes should be set to the size_read variable.
|
||||
//
|
||||
// [in,out] data A pointer to an application data specified by
|
||||
// yaml_parser_set_input().
|
||||
// [out] buffer The buffer to write the data from the source.
|
||||
// [in] size The size of the buffer.
|
||||
// [out] size_read The actual number of bytes read from the source.
|
||||
//
|
||||
// On success, the handler should return 1. If the handler failed,
|
||||
// the returned value should be 0. On EOF, the handler should set the
|
||||
// size_read to 0 and return 1.
|
||||
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
|
||||
|
||||
// This structure holds information about a potential simple key.
|
||||
type yaml_simple_key_t struct {
|
||||
possible bool // Is a simple key possible?
|
||||
required bool // Is a simple key required?
|
||||
token_number int // The number of the token.
|
||||
mark yaml_mark_t // The position mark.
|
||||
}
|
||||
|
||||
// The states of the parser.
|
||||
type yaml_parser_state_t int
|
||||
|
||||
const (
|
||||
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
|
||||
|
||||
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
|
||||
yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
|
||||
yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
||||
yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
||||
yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
|
||||
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
|
||||
yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
|
||||
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
|
||||
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
|
||||
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
|
||||
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
||||
yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
|
||||
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
|
||||
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
|
||||
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
||||
yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
||||
yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
||||
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
|
||||
yaml_PARSE_END_STATE // Expect nothing.
|
||||
)
|
||||
|
||||
func (ps yaml_parser_state_t) String() string {
|
||||
switch ps {
|
||||
case yaml_PARSE_STREAM_START_STATE:
|
||||
return "yaml_PARSE_STREAM_START_STATE"
|
||||
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
|
||||
return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
|
||||
case yaml_PARSE_DOCUMENT_START_STATE:
|
||||
return "yaml_PARSE_DOCUMENT_START_STATE"
|
||||
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
|
||||
return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
|
||||
case yaml_PARSE_DOCUMENT_END_STATE:
|
||||
return "yaml_PARSE_DOCUMENT_END_STATE"
|
||||
case yaml_PARSE_BLOCK_NODE_STATE:
|
||||
return "yaml_PARSE_BLOCK_NODE_STATE"
|
||||
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
|
||||
return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
|
||||
case yaml_PARSE_FLOW_NODE_STATE:
|
||||
return "yaml_PARSE_FLOW_NODE_STATE"
|
||||
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
|
||||
return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
|
||||
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
|
||||
return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
|
||||
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
|
||||
return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
|
||||
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
|
||||
return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
|
||||
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
|
||||
return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
|
||||
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
|
||||
return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
|
||||
case yaml_PARSE_END_STATE:
|
||||
return "yaml_PARSE_END_STATE"
|
||||
}
|
||||
return "<unknown parser state>"
|
||||
}
|
||||
|
||||
// This structure holds aliases data.
|
||||
type yaml_alias_data_t struct {
|
||||
anchor []byte // The anchor.
|
||||
index int // The node id.
|
||||
mark yaml_mark_t // The anchor mark.
|
||||
}
|
||||
|
||||
// The parser structure.
|
||||
//
|
||||
// All members are internal. Manage the structure using the
|
||||
// yaml_parser_ family of functions.
|
||||
type yaml_parser_t struct {
|
||||
|
||||
// Error handling
|
||||
|
||||
error yaml_error_type_t // Error type.
|
||||
|
||||
problem string // Error description.
|
||||
|
||||
// The byte about which the problem occured.
|
||||
problem_offset int
|
||||
problem_value int
|
||||
problem_mark yaml_mark_t
|
||||
|
||||
// The error context.
|
||||
context string
|
||||
context_mark yaml_mark_t
|
||||
|
||||
// Reader stuff
|
||||
|
||||
read_handler yaml_read_handler_t // Read handler.
|
||||
|
||||
input_file io.Reader // File input data.
|
||||
input []byte // String input data.
|
||||
input_pos int
|
||||
|
||||
eof bool // EOF flag
|
||||
|
||||
buffer []byte // The working buffer.
|
||||
buffer_pos int // The current position of the buffer.
|
||||
|
||||
unread int // The number of unread characters in the buffer.
|
||||
|
||||
raw_buffer []byte // The raw buffer.
|
||||
raw_buffer_pos int // The current position of the buffer.
|
||||
|
||||
encoding yaml_encoding_t // The input encoding.
|
||||
|
||||
offset int // The offset of the current position (in bytes).
|
||||
mark yaml_mark_t // The mark of the current position.
|
||||
|
||||
// Scanner stuff
|
||||
|
||||
stream_start_produced bool // Have we started to scan the input stream?
|
||||
stream_end_produced bool // Have we reached the end of the input stream?
|
||||
|
||||
flow_level int // The number of unclosed '[' and '{' indicators.
|
||||
|
||||
tokens []yaml_token_t // The tokens queue.
|
||||
tokens_head int // The head of the tokens queue.
|
||||
tokens_parsed int // The number of tokens fetched from the queue.
|
||||
token_available bool // Does the tokens queue contain a token ready for dequeueing.
|
||||
|
||||
indent int // The current indentation level.
|
||||
indents []int // The indentation levels stack.
|
||||
|
||||
simple_key_allowed bool // May a simple key occur at the current position?
|
||||
simple_keys []yaml_simple_key_t // The stack of simple keys.
|
||||
|
||||
// Parser stuff
|
||||
|
||||
state yaml_parser_state_t // The current parser state.
|
||||
states []yaml_parser_state_t // The parser states stack.
|
||||
marks []yaml_mark_t // The stack of marks.
|
||||
tag_directives []yaml_tag_directive_t // The list of TAG directives.
|
||||
|
||||
// Dumper stuff
|
||||
|
||||
aliases []yaml_alias_data_t // The alias data.
|
||||
|
||||
document *yaml_document_t // The currently parsed document.
|
||||
}
|
||||
|
||||
// Emitter Definitions
|
||||
|
||||
// The prototype of a write handler.
|
||||
//
|
||||
// The write handler is called when the emitter needs to flush the accumulated
|
||||
// characters to the output. The handler should write @a size bytes of the
|
||||
// @a buffer to the output.
|
||||
//
|
||||
// @param[in,out] data A pointer to an application data specified by
|
||||
// yaml_emitter_set_output().
|
||||
// @param[in] buffer The buffer with bytes to be written.
|
||||
// @param[in] size The size of the buffer.
|
||||
//
|
||||
// @returns On success, the handler should return @c 1. If the handler failed,
|
||||
// the returned value should be @c 0.
|
||||
//
|
||||
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
|
||||
|
||||
type yaml_emitter_state_t int
|
||||
|
||||
// The emitter states.
|
||||
const (
|
||||
// Expect STREAM-START.
|
||||
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
|
||||
|
||||
yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
|
||||
yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
|
||||
yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
||||
yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
||||
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
|
||||
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
|
||||
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
||||
yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
||||
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
|
||||
yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
||||
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
|
||||
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
|
||||
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
||||
yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
|
||||
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
|
||||
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
|
||||
yaml_EMIT_END_STATE // Expect nothing.
|
||||
)
|
||||
|
||||
// The emitter structure.
|
||||
//
|
||||
// All members are internal. Manage the structure using the @c yaml_emitter_
|
||||
// family of functions.
|
||||
type yaml_emitter_t struct {
|
||||
|
||||
// Error handling
|
||||
|
||||
error yaml_error_type_t // Error type.
|
||||
problem string // Error description.
|
||||
|
||||
// Writer stuff
|
||||
|
||||
write_handler yaml_write_handler_t // Write handler.
|
||||
|
||||
output_buffer *[]byte // String output data.
|
||||
output_file io.Writer // File output data.
|
||||
|
||||
buffer []byte // The working buffer.
|
||||
buffer_pos int // The current position of the buffer.
|
||||
|
||||
raw_buffer []byte // The raw buffer.
|
||||
raw_buffer_pos int // The current position of the buffer.
|
||||
|
||||
encoding yaml_encoding_t // The stream encoding.
|
||||
|
||||
// Emitter stuff
|
||||
|
||||
canonical bool // If the output is in the canonical style?
|
||||
best_indent int // The number of indentation spaces.
|
||||
best_width int // The preferred width of the output lines.
|
||||
unicode bool // Allow unescaped non-ASCII characters?
|
||||
line_break yaml_break_t // The preferred line break.
|
||||
|
||||
state yaml_emitter_state_t // The current emitter state.
|
||||
states []yaml_emitter_state_t // The stack of states.
|
||||
|
||||
events []yaml_event_t // The event queue.
|
||||
events_head int // The head of the event queue.
|
||||
|
||||
indents []int // The stack of indentation levels.
|
||||
|
||||
tag_directives []yaml_tag_directive_t // The list of tag directives.
|
||||
|
||||
indent int // The current indentation level.
|
||||
|
||||
flow_level int // The current flow level.
|
||||
|
||||
root_context bool // Is it the document root context?
|
||||
sequence_context bool // Is it a sequence context?
|
||||
mapping_context bool // Is it a mapping context?
|
||||
simple_key_context bool // Is it a simple mapping key context?
|
||||
|
||||
line int // The current line.
|
||||
column int // The current column.
|
||||
whitespace bool // If the last character was a whitespace?
|
||||
indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
|
||||
open_ended bool // If an explicit document end is required?
|
||||
|
||||
// Anchor analysis.
|
||||
anchor_data struct {
|
||||
anchor []byte // The anchor value.
|
||||
alias bool // Is it an alias?
|
||||
}
|
||||
|
||||
// Tag analysis.
|
||||
tag_data struct {
|
||||
handle []byte // The tag handle.
|
||||
suffix []byte // The tag suffix.
|
||||
}
|
||||
|
||||
// Scalar analysis.
|
||||
scalar_data struct {
|
||||
value []byte // The scalar value.
|
||||
multiline bool // Does the scalar contain line breaks?
|
||||
flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
|
||||
block_plain_allowed bool // Can the scalar be expressed in the block plain style?
|
||||
single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
|
||||
block_allowed bool // Can the scalar be expressed in the literal or folded styles?
|
||||
style yaml_scalar_style_t // The output style.
|
||||
}
|
||||
|
||||
// Dumper stuff
|
||||
|
||||
opened bool // If the stream was already opened?
|
||||
closed bool // If the stream was already closed?
|
||||
|
||||
// The information associated with the document nodes.
|
||||
anchors *struct {
|
||||
references int // The number of references.
|
||||
anchor int // The anchor id.
|
||||
serialized bool // If the node has been emitted?
|
||||
}
|
||||
|
||||
last_anchor_id int // The last assigned anchor id.
|
||||
|
||||
document *yaml_document_t // The currently emitted document.
|
||||
}
|
|
@ -0,0 +1,173 @@
|
|||
package yaml
|
||||
|
||||
const (
|
||||
// The size of the input raw buffer.
|
||||
input_raw_buffer_size = 512
|
||||
|
||||
// The size of the input buffer.
|
||||
// It should be possible to decode the whole raw buffer.
|
||||
input_buffer_size = input_raw_buffer_size * 3
|
||||
|
||||
// The size of the output buffer.
|
||||
output_buffer_size = 128
|
||||
|
||||
// The size of the output raw buffer.
|
||||
// It should be possible to encode the whole output buffer.
|
||||
output_raw_buffer_size = (output_buffer_size*2 + 2)
|
||||
|
||||
// The size of other stacks and queues.
|
||||
initial_stack_size = 16
|
||||
initial_queue_size = 16
|
||||
initial_string_size = 16
|
||||
)
|
||||
|
||||
// Check if the character at the specified position is an alphabetical
|
||||
// character, a digit, '_', or '-'.
|
||||
func is_alpha(b []byte, i int) bool {
|
||||
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is a digit.
|
||||
func is_digit(b []byte, i int) bool {
|
||||
return b[i] >= '0' && b[i] <= '9'
|
||||
}
|
||||
|
||||
// Get the value of a digit.
|
||||
func as_digit(b []byte, i int) int {
|
||||
return int(b[i]) - '0'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is a hex-digit.
|
||||
func is_hex(b []byte, i int) bool {
|
||||
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
|
||||
}
|
||||
|
||||
// Get the value of a hex-digit.
|
||||
func as_hex(b []byte, i int) int {
|
||||
bi := b[i]
|
||||
if bi >= 'A' && bi <= 'F' {
|
||||
return int(bi) - 'A' + 10
|
||||
}
|
||||
if bi >= 'a' && bi <= 'f' {
|
||||
return int(bi) - 'a' + 10
|
||||
}
|
||||
return int(bi) - '0'
|
||||
}
|
||||
|
||||
// Check if the character is ASCII.
|
||||
func is_ascii(b []byte, i int) bool {
|
||||
return b[i] <= 0x7F
|
||||
}
|
||||
|
||||
// Check if the character at the start of the buffer can be printed unescaped.
|
||||
func is_printable(b []byte, i int) bool {
|
||||
return ((b[i] == 0x0A) || // . == #x0A
|
||||
(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
|
||||
(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
|
||||
(b[i] > 0xC2 && b[i] < 0xED) ||
|
||||
(b[i] == 0xED && b[i+1] < 0xA0) ||
|
||||
(b[i] == 0xEE) ||
|
||||
(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
|
||||
!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
|
||||
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is NUL.
|
||||
func is_z(b []byte, i int) bool {
|
||||
return b[i] == 0x00
|
||||
}
|
||||
|
||||
// Check if the beginning of the buffer is a BOM.
|
||||
func is_bom(b []byte, i int) bool {
|
||||
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is space.
|
||||
func is_space(b []byte, i int) bool {
|
||||
return b[i] == ' '
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is tab.
|
||||
func is_tab(b []byte, i int) bool {
|
||||
return b[i] == '\t'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is blank (space or tab).
|
||||
func is_blank(b []byte, i int) bool {
|
||||
//return is_space(b, i) || is_tab(b, i)
|
||||
return b[i] == ' ' || b[i] == '\t'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is a line break.
|
||||
func is_break(b []byte, i int) bool {
|
||||
return (b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
|
||||
}
|
||||
|
||||
func is_crlf(b []byte, i int) bool {
|
||||
return b[i] == '\r' && b[i+1] == '\n'
|
||||
}
|
||||
|
||||
// Check if the character is a line break or NUL.
|
||||
func is_breakz(b []byte, i int) bool {
|
||||
//return is_break(b, i) || is_z(b, i)
|
||||
return ( // is_break:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||
// is_z:
|
||||
b[i] == 0)
|
||||
}
|
||||
|
||||
// Check if the character is a line break, space, or NUL.
|
||||
func is_spacez(b []byte, i int) bool {
|
||||
//return is_space(b, i) || is_breakz(b, i)
|
||||
return ( // is_space:
|
||||
b[i] == ' ' ||
|
||||
// is_breakz:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||
b[i] == 0)
|
||||
}
|
||||
|
||||
// Check if the character is a line break, space, tab, or NUL.
|
||||
func is_blankz(b []byte, i int) bool {
|
||||
//return is_blank(b, i) || is_breakz(b, i)
|
||||
return ( // is_blank:
|
||||
b[i] == ' ' || b[i] == '\t' ||
|
||||
// is_breakz:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||
b[i] == 0)
|
||||
}
|
||||
|
||||
// Determine the width of the character.
|
||||
func width(b byte) int {
|
||||
// Don't replace these by a switch without first
|
||||
// confirming that it is being inlined.
|
||||
if b&0x80 == 0x00 {
|
||||
return 1
|
||||
}
|
||||
if b&0xE0 == 0xC0 {
|
||||
return 2
|
||||
}
|
||||
if b&0xF0 == 0xE0 {
|
||||
return 3
|
||||
}
|
||||
if b&0xF8 == 0xF0 {
|
||||
return 4
|
||||
}
|
||||
return 0
|
||||
|
||||
}
|
Загрузка…
Ссылка в новой задаче