diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 6171f69bc71fb..3e286f3a7ae9f 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -185,7 +185,7 @@ }, { "ImportPath": "github.com/hashicorp/hcl", - "Rev": "4de51957ef8d4aba6e285ddfc587633bbfc7c0e8" + "Rev": "71c7409f1abba841e528a80556ed2c67671744c3" }, { "ImportPath": "github.com/hashicorp/logutils", diff --git a/api/ssh_agent.go b/api/ssh_agent.go index 182e13b873390..c5db0671c74fe 100644 --- a/api/ssh_agent.go +++ b/api/ssh_agent.go @@ -8,18 +8,22 @@ import ( "os" "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" "github.com/mitchellh/mapstructure" ) const ( - // Default path at which SSH backend will be mounted in Vault server + // SSHHelperDefaultMountPoint is the default path at which SSH backend will be + // mounted in the Vault server. SSHHelperDefaultMountPoint = "ssh" - // Echo request message sent as OTP by the vault-ssh-helper + // VerifyEchoRequest is the echo request message sent as OTP by the helper. VerifyEchoRequest = "verify-echo-request" - // Echo response message sent as a response to OTP matching echo request + // VerifyEchoResponse is the echo response message sent as a response to OTP + // matching echo request. VerifyEchoResponse = "verify-echo-response" ) @@ -55,8 +59,7 @@ type SSHHelperConfig struct { TLSSkipVerify bool `hcl:"tls_skip_verify"` } -// TLSClient returns a HTTP client that uses TLS verification (TLS 1.2) for a given -// certificate pool. +// SetTLSParameters sets the TLS parameters for this SSH agent. func (c *SSHHelperConfig) SetTLSParameters(clientConfig *Config, certPool *x509.CertPool) { tlsConfig := &tls.Config{ InsecureSkipVerify: c.TLSSkipVerify, @@ -112,29 +115,48 @@ func (c *SSHHelperConfig) NewClient() (*Client, error) { // Vault address is a required parameter. // Mount point defaults to "ssh". func LoadSSHHelperConfig(path string) (*SSHHelperConfig, error) { - var config SSHHelperConfig contents, err := ioutil.ReadFile(path) - if !os.IsNotExist(err) { - obj, err := hcl.Parse(string(contents)) - if err != nil { - return nil, err - } + if err != nil && !os.IsNotExist(err) { + return nil, multierror.Prefix(err, "ssh_helper:") + } + return ParseSSHHelperConfig(string(contents)) +} - if err := hcl.DecodeObject(&config, obj); err != nil { - return nil, err - } - } else { - return nil, err +// ParseSSHHelperConfig parses the given contents as a string for the SSHHelper +// configuration. +func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { + root, err := hcl.Parse(string(contents)) + if err != nil { + return nil, fmt.Errorf("ssh_helper: error parsing config: %s", err) } - if config.VaultAddr == "" { - return nil, fmt.Errorf("config missing vault_addr") + list, ok := root.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("ssh_helper: error parsing config: file doesn't contain a root object") } - if config.SSHMountPoint == "" { - config.SSHMountPoint = SSHHelperDefaultMountPoint + + valid := []string{ + "vault_addr", + "ssh_mount_point", + "ca_cert", + "ca_path", + "allowed_cidr_list", + "tls_skip_verify", + } + if err := checkHCLKeys(list, valid); err != nil { + return nil, multierror.Prefix(err, "ssh_helper:") } - return &config, nil + var c SSHHelperConfig + c.SSHMountPoint = SSHHelperDefaultMountPoint + if err := hcl.DecodeObject(&c, list); err != nil { + return nil, multierror.Prefix(err, "ssh_helper:") + } + + if c.VaultAddr == "" { + return nil, fmt.Errorf("ssh_helper: missing config 'vault_addr'") + } + return &c, nil } // SSHHelper creates an SSHHelper object which can talk to Vault server with SSH backend @@ -189,3 +211,31 @@ func (c *SSHHelper) Verify(otp string) (*SSHVerifyResponse, error) { } return &verifyResp, nil } + +func checkHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) + } + + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} + } + + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf( + "invalid key '%s' on line %d", key, item.Assign.Line)) + } + } + + return result +} diff --git a/api/ssh_agent_test.go b/api/ssh_agent_test.go index 6bdb0456fd643..80e4f22aa4792 100644 --- a/api/ssh_agent_test.go +++ b/api/ssh_agent_test.go @@ -2,6 +2,7 @@ package api import ( "fmt" + "strings" "testing" ) @@ -28,3 +29,41 @@ func TestSSH_CreateTLSClient(t *testing.T) { panic(fmt.Sprintf("error creating client with TLS transport")) } } + +func TestParseSSHHelperConfig(t *testing.T) { + config, err := ParseSSHHelperConfig(` + vault_addr = "1.2.3.4" +`) + if err != nil { + t.Fatal(err) + } + + if config.SSHMountPoint != SSHHelperDefaultMountPoint { + t.Errorf("expected %q to be %q", config.SSHMountPoint, SSHHelperDefaultMountPoint) + } +} + +func TestParseSSHHelperConfig_missingVaultAddr(t *testing.T) { + _, err := ParseSSHHelperConfig("") + if err == nil { + t.Fatal("expected error") + } + + if !strings.Contains(err.Error(), "ssh_helper: missing config 'vault_addr'") { + t.Errorf("bad error: %s", err) + } +} + +func TestParseSSHHelperConfig_badKeys(t *testing.T) { + _, err := ParseSSHHelperConfig(` +vault_addr = "1.2.3.4" +nope = "bad" +`) + if err == nil { + t.Fatal("expected error") + } + + if !strings.Contains(err.Error(), "ssh_helper: invalid key 'nope' on line 3") { + t.Errorf("bad error: %s", err) + } +} diff --git a/command/config.go b/command/config.go index 73b0a4a9c1c3c..8078b016a66ce 100644 --- a/command/config.go +++ b/command/config.go @@ -5,7 +5,9 @@ import ( "io/ioutil" "os" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" "github.com/mitchellh/go-homedir" ) @@ -44,22 +46,65 @@ func LoadConfig(path string) (*Config, error) { return nil, fmt.Errorf("Error expanding config path: %s", err) } - var config Config contents, err := ioutil.ReadFile(path) - if !os.IsNotExist(err) { - if err != nil { - return nil, err - } + if err != nil && !os.IsNotExist(err) { + return nil, err + } - obj, err := hcl.Parse(string(contents)) - if err != nil { - return nil, err - } + return ParseConfig(string(contents)) +} + +// ParseConfig parses the given configuration as a string. +func ParseConfig(contents string) (*Config, error) { + root, err := hcl.Parse(contents) + if err != nil { + return nil, err + } + + // Top-level item should be the object list + list, ok := root.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("Failed to parse config: does not contain a root object") + } + + valid := []string{ + "token_helper", + } + if err := checkHCLKeys(list, valid); err != nil { + return nil, err + } + + var c Config + if err := hcl.DecodeObject(&c, list); err != nil { + return nil, err + } + return &c, nil +} + +func checkHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) + } + + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} + } - if err := hcl.DecodeObject(&config, obj); err != nil { - return nil, err + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf( + "invalid key '%s' on line %d", key, item.Assign.Line)) } } - return &config, nil + return result } diff --git a/command/config_test.go b/command/config_test.go index 3ef182d9e33d4..e8c94d3d57664 100644 --- a/command/config_test.go +++ b/command/config_test.go @@ -3,6 +3,7 @@ package command import ( "path/filepath" "reflect" + "strings" "testing" ) @@ -19,3 +20,28 @@ func TestLoadConfig(t *testing.T) { t.Fatalf("bad: %#v", config) } } + +func TestLoadConfig_noExist(t *testing.T) { + config, err := LoadConfig("nope/not-once/.never") + if err != nil { + t.Fatal(err) + } + + if config.TokenHelper != "" { + t.Errorf("expected %q to be %q", config.TokenHelper, "") + } +} + +func TestParseConfig_badKeys(t *testing.T) { + _, err := ParseConfig(` +token_helper = "/token" +nope = "true" +`) + if err == nil { + t.Fatal("expected error") + } + + if !strings.Contains(err.Error(), "invalid key 'nope' on line 3") { + t.Errorf("bad error: %s", err.Error()) + } +} diff --git a/command/policy_write_test.go b/command/policy_write_test.go index e48f7b7d24b37..5d98fefe99c81 100644 --- a/command/policy_write_test.go +++ b/command/policy_write_test.go @@ -24,7 +24,7 @@ func TestPolicyWrite(t *testing.T) { args := []string{ "-address", addr, "foo", - "./test-fixtures/config.hcl", + "./test-fixtures/policy.hcl", } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) diff --git a/command/server/config.go b/command/server/config.go index cce2d36f3ac30..e66e0c5b36946 100644 --- a/command/server/config.go +++ b/command/server/config.go @@ -4,13 +4,15 @@ import ( "fmt" "io" "io/ioutil" + "log" "os" "path/filepath" "strings" "time" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" - hclobj "github.com/hashicorp/hcl/hcl" + "github.com/hashicorp/hcl/hcl/ast" ) // Config is the configuration for the vault server. @@ -157,9 +159,12 @@ func LoadConfigFile(path string) (*Config, error) { if err != nil { return nil, err } + return ParseConfig(string(d)) +} +func ParseConfig(d string) (*Config, error) { // Parse! - obj, err := hcl.Parse(string(d)) + obj, err := hcl.Parse(d) if err != nil { return nil, err } @@ -181,47 +186,80 @@ func LoadConfigFile(path string) (*Config, error) { } } - if objs := obj.Get("listener", false); objs != nil { - result.Listeners, err = loadListeners(objs) - if err != nil { - return nil, err - } + list, ok := obj.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing: file doesn't contain a root object") } - if objs := obj.Get("backend", false); objs != nil { - result.Backend, err = loadBackend(objs) - if err != nil { - return nil, err - } + + valid := []string{ + "backend", + "ha_backend", + "listener", + "disable_cache", + "disable_mlock", + "telemetry", + "default_lease_ttl", + "max_lease_ttl", + + // TODO: Remove in 0.6.0 + // Deprecated keys + "statsd_addr", + "statsite_addr", } - if objs := obj.Get("ha_backend", false); objs != nil { - result.HABackend, err = loadBackend(objs) - if err != nil { + if err := checkHCLKeys(list, valid); err != nil { + return nil, err + } + + // TODO: Remove in 0.6.0 + // Preflight checks for deprecated keys + sda := list.Filter("statsd_addr") + ssa := list.Filter("statsite_addr") + if len(sda.Items) > 0 || len(ssa.Items) > 0 { + log.Println("[WARN] The top-level keys 'statsd_addr' and 'statsite_addr' " + + "have been moved into a 'telemetry' block instead. Please update your " + + "Vault configuration as this deprecation will be removed in the next " + + "major release. Values specified in a 'telemetry' block will take " + + "precendence.") + + t := struct { + StatsdAddr string `hcl:"statsd_addr"` + StatsiteAddr string `hcl:"statsite_addr"` + }{} + if err := hcl.DecodeObject(&t, list); err != nil { return nil, err } + + result.Telemetry = &Telemetry{ + StatsdAddr: t.StatsdAddr, + StatsiteAddr: t.StatsiteAddr, + } } - // A little hacky but upgrades the old stats config directives to the new way - if result.Telemetry == nil { - statsdAddr := obj.Get("statsd_addr", false) - statsiteAddr := obj.Get("statsite_addr", false) + if o := list.Filter("backend"); len(o.Items) > 0 { + if err := parseBackends(&result, o); err != nil { + return nil, fmt.Errorf("error parsing 'backend': %s", err) + } + } - if statsdAddr != nil || statsiteAddr != nil { - result.Telemetry = &Telemetry{ - StatsdAddr: getString(statsdAddr), - StatsiteAddr: getString(statsiteAddr), - } + if o := list.Filter("ha_backend"); len(o.Items) > 0 { + if err := parseHABackends(&result, o); err != nil { + return nil, fmt.Errorf("error parsing 'ha_backend': %s", err) } } - return &result, nil -} + if o := list.Filter("listener"); len(o.Items) > 0 { + if err := parseListeners(&result, o); err != nil { + return nil, fmt.Errorf("error parsing 'listener': %s", err) + } + } -func getString(o *hclobj.Object) string { - if o == nil || o.Type != hclobj.ValueTypeString { - return "" + if o := list.Filter("telemetry"); len(o.Items) > 0 { + if err := parseTelemetry(&result, o); err != nil { + return nil, fmt.Errorf("error parsing 'telemetry': %s", err) + } } - return o.Value.(string) + return &result, nil } // LoadConfigDir loads all the configurations in the given directory @@ -301,106 +339,163 @@ func isTemporaryFile(name string) bool { (strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs } -func loadListeners(os *hclobj.Object) ([]*Listener, error) { - var allNames []*hclobj.Object - - // Really confusing iteration. The key is the false/true parameter - // of whether we're expanding or not. We first iterate over all - // the "listeners" - for _, o1 := range os.Elem(false) { - // Iterate expand to get the list of types - for _, o2 := range o1.Elem(true) { - switch o2.Type { - case hclobj.ValueTypeList: - // This switch is for JSON, to allow them to do this: - // - // "tcp": [{ ... }, { ... }] - // - // To configure multiple listeners of the same type. - for _, o3 := range o2.Elem(true) { - o3.Key = o2.Key - allNames = append(allNames, o3) - } - case hclobj.ValueTypeObject: - // This is for the standard `listener "tcp" { ... }` syntax - allNames = append(allNames, o2) - } - } +func parseBackends(result *Config, list *ast.ObjectList) error { + if len(list.Items) > 1 { + return fmt.Errorf("only one 'backend' block is permitted") + } + + // Get our item + item := list.Items[0] + + key := "backend" + if len(item.Keys) > 0 { + key = item.Keys[0].Token.Value().(string) + } + + var m map[string]string + if err := hcl.DecodeObject(&m, item.Val); err != nil { + return multierror.Prefix(err, fmt.Sprintf("backend.%s:", key)) + } + + // Pull out the advertise address since it's commong to all backends + var advertiseAddr string + if v, ok := m["advertise_addr"]; ok { + advertiseAddr = v + delete(m, "advertise_addr") } - if len(allNames) == 0 { - return nil, nil + result.Backend = &Backend{ + AdvertiseAddr: advertiseAddr, + Type: strings.ToLower(key), + Config: m, + } + return nil +} + +func parseHABackends(result *Config, list *ast.ObjectList) error { + if len(list.Items) > 1 { + return fmt.Errorf("only one 'ha_backend' block is permitted") } - // Now go over all the types and their children in order to get - // all of the actual resources. - result := make([]*Listener, 0, len(allNames)) - for _, obj := range allNames { - k := obj.Key + // Get our item + item := list.Items[0] + + key := "backend" + if len(item.Keys) > 0 { + key = item.Keys[0].Token.Value().(string) + } + + var m map[string]string + if err := hcl.DecodeObject(&m, item.Val); err != nil { + return multierror.Prefix(err, fmt.Sprintf("ha_backend.%s:", key)) + } + + // Pull out the advertise address since it's commong to all backends + var advertiseAddr string + if v, ok := m["advertise_addr"]; ok { + advertiseAddr = v + delete(m, "advertise_addr") + } + + result.HABackend = &Backend{ + AdvertiseAddr: advertiseAddr, + Type: strings.ToLower(key), + Config: m, + } + return nil +} + +func parseListeners(result *Config, list *ast.ObjectList) error { + listeners := make([]*Listener, 0, len(list.Items)) + for _, item := range list.Items { + key := "listener" + if len(item.Keys) > 0 { + key = item.Keys[0].Token.Value().(string) + } + + valid := []string{ + "address", + "tls_disable", + "tls_cert_file", + "tls_key_file", + "tls_min_version", + } + if err := checkHCLKeys(item.Val, valid); err != nil { + return multierror.Prefix(err, fmt.Sprintf("listeners.%s:", key)) + } - var config map[string]string - if err := hcl.DecodeObject(&config, obj); err != nil { - return nil, fmt.Errorf( - "Error reading config for %s: %s", - k, - err) + var m map[string]string + if err := hcl.DecodeObject(&m, item.Val); err != nil { + return multierror.Prefix(err, fmt.Sprintf("listeners.%s:", key)) } - result = append(result, &Listener{ - Type: k, - Config: config, + listeners = append(listeners, &Listener{ + Type: strings.ToLower(key), + Config: m, }) } - return result, nil + result.Listeners = listeners + return nil } -func loadBackend(os *hclobj.Object) (*Backend, error) { - var allNames []*hclobj.Object +func parseTelemetry(result *Config, list *ast.ObjectList) error { + if len(list.Items) > 1 { + return fmt.Errorf("only one 'telemetry' block is permitted") + } - // See loadListeners - for _, o1 := range os.Elem(false) { - // Iterate expand to get the list of types - for _, o2 := range o1.Elem(true) { - // Iterate non-expand to get the full list of types - for _, o3 := range o2.Elem(false) { - allNames = append(allNames, o3) - } - } + // Get our one item + item := list.Items[0] + + // Check for invalid keys + valid := []string{ + "statsite_address", + "statsd_address", + "disable_hostname", + } + if err := checkHCLKeys(item.Val, valid); err != nil { + return multierror.Prefix(err, "telemetry:") } - if len(allNames) == 0 { - return nil, nil + var t Telemetry + if err := hcl.DecodeObject(&t, item.Val); err != nil { + return multierror.Prefix(err, "telemetry:") } - if len(allNames) > 1 { - keys := make([]string, 0, len(allNames)) - for _, o := range allNames { - keys = append(keys, o.Key) - } - return nil, fmt.Errorf( - "Multiple backends declared. Only one is allowed: %v", keys) + if result.Telemetry == nil { + result.Telemetry = &Telemetry{} } - // Now go over all the types and their children in order to get - // all of the actual resources. - var result Backend - obj := allNames[0] - result.Type = obj.Key + if err := hcl.DecodeObject(&result.Telemetry, item.Val); err != nil { + return multierror.Prefix(err, "telemetry:") + } + return nil +} - var config map[string]string - if err := hcl.DecodeObject(&config, obj); err != nil { - return nil, fmt.Errorf( - "Error reading config for backend %s: %s", - result.Type, - err) +func checkHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) } - if v, ok := config["advertise_addr"]; ok { - result.AdvertiseAddr = v - delete(config, "advertise_addr") + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} } - result.Config = config - return &result, nil + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf( + "invalid key '%s' on line %d", key, item.Assign.Line)) + } + } + + return result } diff --git a/command/server/config_test.go b/command/server/config_test.go index b29654700d285..505e8fa08c9ad 100644 --- a/command/server/config_test.go +++ b/command/server/config_test.go @@ -2,6 +2,7 @@ package server import ( "reflect" + "strings" "testing" "time" ) @@ -53,7 +54,7 @@ func TestLoadConfigFile(t *testing.T) { DefaultLeaseTTLRaw: "10h", } if !reflect.DeepEqual(config, expected) { - t.Fatalf("bad: %#v", config) + t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected) } } @@ -92,7 +93,7 @@ func TestLoadConfigFile_json(t *testing.T) { DefaultLeaseTTLRaw: "10h", } if !reflect.DeepEqual(config, expected) { - t.Fatalf("bad: %#v", config) + t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected) } } @@ -110,6 +111,12 @@ func TestLoadConfigFile_json2(t *testing.T) { "address": "127.0.0.1:443", }, }, + &Listener{ + Type: "tcp", + Config: map[string]string{ + "address": "127.0.0.1:444", + }, + }, }, Backend: &Backend{ @@ -133,7 +140,7 @@ func TestLoadConfigFile_json2(t *testing.T) { }, } if !reflect.DeepEqual(config, expected) { - t.Fatalf("bad: %#v", config) + t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected) } } @@ -176,3 +183,67 @@ func TestLoadConfigDir(t *testing.T) { t.Fatalf("bad: %#v", config) } } + +func TestParseConfig_badTopLevel(t *testing.T) { + _, err := ParseConfig(strings.TrimSpace(` +backend {} +bad = "one" +nope = "yes" +`)) + + if err == nil { + t.Fatal("expected error") + } + + if !strings.Contains(err.Error(), "invalid key 'bad' on line 2") { + t.Errorf("bad error: %q", err) + } + + if !strings.Contains(err.Error(), "invalid key 'nope' on line 3") { + t.Errorf("bad error: %q", err) + } +} + +func TestParseConfig_badListener(t *testing.T) { + _, err := ParseConfig(strings.TrimSpace(` +listener "tcp" { + address = "1.2.3.3" + bad = "one" + nope = "yes" +} +`)) + + if err == nil { + t.Fatal("expected error") + } + + if !strings.Contains(err.Error(), "listeners.tcp: invalid key 'bad' on line 3") { + t.Errorf("bad error: %q", err) + } + + if !strings.Contains(err.Error(), "listeners.tcp: invalid key 'nope' on line 4") { + t.Errorf("bad error: %q", err) + } +} + +func TestParseConfig_badTelemetry(t *testing.T) { + _, err := ParseConfig(strings.TrimSpace(` +telemetry { + statsd_address = "1.2.3.3" + bad = "one" + nope = "yes" +} +`)) + + if err == nil { + t.Fatal("expected error") + } + + if !strings.Contains(err.Error(), "telemetry: invalid key 'bad' on line 3") { + t.Errorf("bad error: %q", err) + } + + if !strings.Contains(err.Error(), "telemetry: invalid key 'nope' on line 4") { + t.Errorf("bad error: %q", err) + } +} diff --git a/command/server/test-fixtures/config.hcl.json b/command/server/test-fixtures/config.hcl.json index a47ba5c872359..02ab8eabeef67 100644 --- a/command/server/test-fixtures/config.hcl.json +++ b/command/server/test-fixtures/config.hcl.json @@ -1,20 +1,17 @@ { - "listener": { - "tcp": { - "address": "127.0.0.1:443" - } - }, - - "backend": { - "consul": { - "foo": "bar" - } - }, - - "telemetry": { - "statsite_address": "baz" - }, - - "max_lease_ttl": "10h", - "default_lease_ttl": "10h" + "listener":{ + "tcp":{ + "address":"127.0.0.1:443" + } + }, + "backend":{ + "consul":{ + "foo":"bar" + } + }, + "telemetry":{ + "statsite_address":"baz" + }, + "max_lease_ttl":"10h", + "default_lease_ttl":"10h" } diff --git a/command/server/test-fixtures/config2.hcl.json b/command/server/test-fixtures/config2.hcl.json index 73644801721bd..142278e0019c3 100644 --- a/command/server/test-fixtures/config2.hcl.json +++ b/command/server/test-fixtures/config2.hcl.json @@ -1,25 +1,29 @@ { - "listener": { - "tcp": [{ - "address": "127.0.0.1:443" - }] + "listener":[ + { + "tcp":{ + "address":"127.0.0.1:443" + } }, - - "backend": { - "consul": { - "foo": "bar" - } - }, - - "ha_backend": { - "consul": { - "bar": "baz" - } - }, - - "telemetry": { - "statsd_address": "bar", - "statsite_address": "foo", - "disable_hostname": true + { + "tcp":{ + "address":"127.0.0.1:444" + } + } + ], + "backend":{ + "consul":{ + "foo":"bar" + } + }, + "ha_backend":{ + "consul":{ + "bar":"baz" } + }, + "telemetry":{ + "statsd_address":"bar", + "statsite_address":"foo", + "disable_hostname":true + } } diff --git a/command/test-fixtures/policy.hcl b/command/test-fixtures/policy.hcl new file mode 100644 index 0000000000000..7d46bdeabe16f --- /dev/null +++ b/command/test-fixtures/policy.hcl @@ -0,0 +1,7 @@ +path "secret/foo" { + policy = "write" +} + +path "secret/bar/*" { + capabilities = ["create", "read", "update"] +} diff --git a/vault/policy.go b/vault/policy.go index 7e459f59a98b3..b37ced139fce7 100644 --- a/vault/policy.go +++ b/vault/policy.go @@ -4,7 +4,9 @@ import ( "fmt" "strings" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" ) const ( @@ -50,13 +52,13 @@ var ( // an ACL configuration. type Policy struct { Name string `hcl:"name"` - Paths []*PathCapabilities `hcl:"path,expand"` + Paths []*PathCapabilities `hcl:"-"` Raw string } -// Capability represents a policy for a path in the namespace +// PathCapabilities represents a policy for a path in the namespace. type PathCapabilities struct { - Prefix string `hcl:",key"` + Prefix string Policy string Capabilities []string CapabilitiesBitmap uint32 `hcl:"-"` @@ -67,16 +69,66 @@ type PathCapabilities struct { // intermediary set of policies, before being compiled into // the ACL func Parse(rules string) (*Policy, error) { - // Decode the rules - p := &Policy{Raw: rules} - if err := hcl.Decode(p, rules); err != nil { - return nil, fmt.Errorf("Failed to parse ACL rules: %v", err) + // Parse the rules + root, err := hcl.Parse(rules) + if err != nil { + return nil, fmt.Errorf("Failed to parse policy: %s", err) } - // Validate the path policy - for _, pc := range p.Paths { - // Strip a leading '/' as paths in Vault start after the / in the API - // path + // Top-level item should be the object list + list, ok := root.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("Failed to parse policy: does not contain a root object") + } + + // Check for invalid top-level keys + valid := []string{ + "name", + "path", + } + if err := checkHCLKeys(list, valid); err != nil { + return nil, fmt.Errorf("Failed to parse policy: %s", err) + } + + // Create the initial policy and store the raw text of the rules + var p Policy + p.Raw = rules + if err := hcl.DecodeObject(&p, list); err != nil { + return nil, fmt.Errorf("Failed to parse policy: %s", err) + } + + if o := list.Filter("path"); len(o.Items) > 0 { + if err := parsePaths(&p, o); err != nil { + return nil, fmt.Errorf("Failed to parse policy: %s", err) + } + } + + return &p, nil +} + +func parsePaths(result *Policy, list *ast.ObjectList) error { + paths := make([]*PathCapabilities, 0, len(list.Items)) + for _, item := range list.Items { + key := "path" + if len(item.Keys) > 0 { + key = item.Keys[0].Token.Value().(string) + } + + valid := []string{ + "policy", + "capabilities", + } + if err := checkHCLKeys(item.Val, valid); err != nil { + return multierror.Prefix(err, fmt.Sprintf("path %q:", key)) + } + + var pc PathCapabilities + pc.Prefix = key + if err := hcl.DecodeObject(&pc, item.Val); err != nil { + return multierror.Prefix(err, fmt.Sprintf("path %q:", key)) + } + + // Strip a leading '/' as paths in Vault start after the / in the API path if len(pc.Prefix) > 0 && pc.Prefix[0] == '/' { pc.Prefix = pc.Prefix[1:] } @@ -88,15 +140,19 @@ func Parse(rules string) (*Policy, error) { } // Map old-style policies into capabilities - switch pc.Policy { - case OldDenyPathPolicy: - pc.Capabilities = []string{DenyCapability} - case OldReadPathPolicy: - pc.Capabilities = append(pc.Capabilities, []string{ReadCapability, ListCapability}...) - case OldWritePathPolicy: - pc.Capabilities = append(pc.Capabilities, []string{CreateCapability, ReadCapability, UpdateCapability, DeleteCapability, ListCapability}...) - case OldSudoPathPolicy: - pc.Capabilities = append(pc.Capabilities, []string{CreateCapability, ReadCapability, UpdateCapability, DeleteCapability, ListCapability, SudoCapability}...) + if len(pc.Policy) > 0 { + switch pc.Policy { + case OldDenyPathPolicy: + pc.Capabilities = []string{DenyCapability} + case OldReadPathPolicy: + pc.Capabilities = append(pc.Capabilities, []string{ReadCapability, ListCapability}...) + case OldWritePathPolicy: + pc.Capabilities = append(pc.Capabilities, []string{CreateCapability, ReadCapability, UpdateCapability, DeleteCapability, ListCapability}...) + case OldSudoPathPolicy: + pc.Capabilities = append(pc.Capabilities, []string{CreateCapability, ReadCapability, UpdateCapability, DeleteCapability, ListCapability, SudoCapability}...) + default: + return fmt.Errorf("path %q: invalid policy '%s'", key, pc.Policy) + } } // Initialize the map @@ -111,11 +167,43 @@ func Parse(rules string) (*Policy, error) { case CreateCapability, ReadCapability, UpdateCapability, DeleteCapability, ListCapability, SudoCapability: pc.CapabilitiesBitmap |= cap2Int[cap] default: - return nil, fmt.Errorf("Invalid capability: %#v", pc) + return fmt.Errorf("path %q: invalid capability '%s'", key, cap) } } PathFinished: + + paths = append(paths, &pc) + } + + result.Paths = paths + return nil +} + +func checkHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) + } + + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} } - return p, nil + + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf( + "invalid key '%s' on line %d", key, item.Assign.Line)) + } + } + + return result } diff --git a/vault/policy_test.go b/vault/policy_test.go index 7ba6eb9f7c30b..589810ec9ef7c 100644 --- a/vault/policy_test.go +++ b/vault/policy_test.go @@ -1,11 +1,43 @@ package vault import ( - "fmt" "reflect" + "strings" "testing" ) +var rawPolicy = strings.TrimSpace(` +# Developer policy +name = "dev" + +# Deny all paths by default +path "*" { + policy = "deny" +} + +# Allow full access to staging +path "stage/*" { + policy = "sudo" +} + +# Limited read privilege to production +path "prod/version" { + policy = "read" +} + +# Read access to foobar +# Also tests stripping of leading slash +path "/foo/bar" { + policy = "read" +} + +# Add capabilities for creation and sudo to foobar +# This will be separate; they are combined when compiled into an ACL +path "foo/bar" { + capabilities = ["create", "sudo"] +} +`) + func TestPolicy_Parse(t *testing.T) { p, err := Parse(rawPolicy) if err != nil { @@ -13,7 +45,7 @@ func TestPolicy_Parse(t *testing.T) { } if p.Name != "dev" { - t.Fatalf("bad: %#v", p) + t.Fatalf("bad name: %q", p.Name) } expect := []*PathCapabilities{ @@ -48,46 +80,71 @@ func TestPolicy_Parse(t *testing.T) { }, CreateCapabilityInt | SudoCapabilityInt, false}, } if !reflect.DeepEqual(p.Paths, expect) { - ret := fmt.Sprintf("bad:\nexpected:\n") - for _, v := range expect { - ret = fmt.Sprintf("%s\n%#v", ret, *v) - } - ret = fmt.Sprintf("%s\n\ngot:\n", ret) - for _, v := range p.Paths { - ret = fmt.Sprintf("%s\n%#v", ret, *v) - } - t.Fatalf("%s\n", ret) + t.Errorf("expected \n\n%#v\n\n to be \n\n%#v\n\n", p.Paths, expect) } } -var rawPolicy = ` -# Developer policy -name = "dev" +func TestPolicy_ParseBadRoot(t *testing.T) { + _, err := Parse(strings.TrimSpace(` +name = "test" +bad = "foo" +nope = "yes" +`)) + if err == nil { + t.Fatalf("expected error") + } -# Deny all paths by default -path "*" { - policy = "deny" + if !strings.Contains(err.Error(), "invalid key 'bad' on line 2") { + t.Errorf("bad error: %q", err) + } + + if !strings.Contains(err.Error(), "invalid key 'nope' on line 3") { + t.Errorf("bad error: %q", err) + } } -# Allow full access to staging -path "stage/*" { - policy = "sudo" +func TestPolicy_ParseBadPath(t *testing.T) { + _, err := Parse(strings.TrimSpace(` +path "/" { + capabilities = ["read"] + capabilites = ["read"] } +`)) + if err == nil { + t.Fatalf("expected error") + } -# Limited read privilege to production -path "prod/version" { - policy = "read" + if !strings.Contains(err.Error(), "invalid key 'capabilites' on line 3") { + t.Errorf("bad error: %s", err) + } } -# Read access to foobar -# Also tests stripping of leading slash -path "/foo/bar" { - policy = "read" +func TestPolicy_ParseBadPolicy(t *testing.T) { + _, err := Parse(strings.TrimSpace(` +path "/" { + policy = "banana" } +`)) + if err == nil { + t.Fatalf("expected error") + } -# Add capabilities for creation and sudo to foobar -# This will be separate; they are combined when compiled into an ACL -path "foo/bar" { - capabilities = ["create", "sudo"] + if !strings.Contains(err.Error(), `path "/": invalid policy 'banana'`) { + t.Errorf("bad error: %s", err) + } +} + +func TestPolicy_ParseBadCapabilities(t *testing.T) { + _, err := Parse(strings.TrimSpace(` +path "/" { + capabilities = ["read", "banana"] +} +`)) + if err == nil { + t.Fatalf("expected error") + } + + if !strings.Contains(err.Error(), `path "/": invalid capability 'banana'`) { + t.Errorf("bad error: %s", err) + } } -` diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore index e8acb0a8d9462..8ed84fe016185 100644 --- a/vendor/github.com/hashicorp/hcl/.gitignore +++ b/vendor/github.com/hashicorp/hcl/.gitignore @@ -1 +1,7 @@ y.output + +# ignore intellij files +.idea +*.iml +*.ipr +*.iws diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml new file mode 100644 index 0000000000000..83dc540ef9146 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.travis.yml @@ -0,0 +1,3 @@ +sudo: false +language: go +go: 1.5 diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md index c69d17e9b32f9..acec6662e7ca4 100644 --- a/vendor/github.com/hashicorp/hcl/README.md +++ b/vendor/github.com/hashicorp/hcl/README.md @@ -1,5 +1,7 @@ # HCL +[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) + HCL (HashiCorp Configuration Language) is a configuration language built by HashiCorp. The goal of HCL is to build a structured configuration language that is both human and machine friendly for use with command-line tools, but @@ -46,10 +48,8 @@ and JSON as the interoperability layer. ## Syntax -The complete grammar -[can be found here](https://github.com/hashicorp/hcl/blob/master/hcl/parse.y), -if you're more comfortable reading specifics, but a high-level overview -of the syntax and grammar are listed here. +For a complete grammar, please see the parser itself. A high-level overview +of the syntax and grammar is listed here. * Single line comments start with `#` or `//` @@ -64,6 +64,16 @@ of the syntax and grammar are listed here. * Strings are double-quoted and can contain any UTF-8 characters. Example: `"Hello, World"` + * Multi-line strings start with `< 1 { + itemVal = n.Filter(keyStr) + done[keyStr] = struct{}{} + } - // Decode! - if err := d.decode(fieldName, o, val); err != nil { - return err - } + // Make the field name + fieldName := fmt.Sprintf("%s.%s", name, keyStr) + + // Get the key/value as reflection values + key := reflect.ValueOf(keyStr) + val := reflect.Indirect(reflect.New(resultElemType)) - // Set the value on the map - resultMap.SetMapIndex(key, val) + // If we have a pre-existing value in the map, use that + oldVal := resultMap.MapIndex(key) + if oldVal.IsValid() { + val.Set(oldVal) } + + // Decode! + if err := d.decode(fieldName, itemVal, val); err != nil { + return err + } + + // Set the value on the map + resultMap.SetMapIndex(key, val) } // Set the final map if we can @@ -256,13 +369,13 @@ func (d *decoder) decodeMap(name string, o *hcl.Object, result reflect.Value) er return nil } -func (d *decoder) decodePtr(name string, o *hcl.Object, result reflect.Value) error { +func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { // Create an element of the concrete (non pointer) type and decode // into that. Then set the value of the pointer to this type. resultType := result.Type() resultElemType := resultType.Elem() val := reflect.New(resultElemType) - if err := d.decode(name, o, reflect.Indirect(val)); err != nil { + if err := d.decode(name, node, reflect.Indirect(val)); err != nil { return err } @@ -270,7 +383,7 @@ func (d *decoder) decodePtr(name string, o *hcl.Object, result reflect.Value) er return nil } -func (d *decoder) decodeSlice(name string, o *hcl.Object, result reflect.Value) error { +func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { // If we have an interface, then we can address the interface, // but not the slice itself, so get the element but set the interface set := result @@ -287,52 +400,86 @@ func (d *decoder) decodeSlice(name string, o *hcl.Object, result reflect.Value) resultSliceType, 0, 0) } - // Determine how we're doing this - expand := true - switch o.Type { - case hcl.ValueTypeObject: - expand = false + // Figure out the items we'll be copying into the slice + var items []ast.Node + switch n := node.(type) { + case *ast.ObjectList: + items = make([]ast.Node, len(n.Items)) + for i, item := range n.Items { + items[i] = item + } + case *ast.ObjectType: + items = []ast.Node{n} + case *ast.ListType: + items = n.List default: - // Array or anything else: we expand values and take it all + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("unknown slice type: %T", node), + } } - i := 0 - for _, o := range o.Elem(expand) { + for i, item := range items { fieldName := fmt.Sprintf("%s[%d]", name, i) // Decode val := reflect.Indirect(reflect.New(resultElemType)) - if err := d.decode(fieldName, o, val); err != nil { + if err := d.decode(fieldName, item, val); err != nil { return err } // Append it onto the slice result = reflect.Append(result, val) - - i += 1 } set.Set(result) return nil } -func (d *decoder) decodeString(name string, o *hcl.Object, result reflect.Value) error { - switch o.Type { - case hcl.ValueTypeInt: - result.Set(reflect.ValueOf( - strconv.FormatInt(int64(o.Value.(int)), 10)).Convert(result.Type())) - case hcl.ValueTypeString: - result.Set(reflect.ValueOf(o.Value.(string)).Convert(result.Type())) - default: - return fmt.Errorf("%s: unknown type to string: %v", name, o.Type) +func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) + return nil + case token.STRING, token.HEREDOC: + result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) + return nil + } } - return nil + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type for string %T", name, node), + } } -func (d *decoder) decodeStruct(name string, o *hcl.Object, result reflect.Value) error { - if o.Type != hcl.ValueTypeObject { - return fmt.Errorf("%s: not an object type for struct (%v)", name, o.Type) +func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { + var item *ast.ObjectItem + if it, ok := node.(*ast.ObjectItem); ok { + item = it + node = it.Val + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + // Handle the special case where the object itself is a literal. Previously + // the yacc parser would always ensure top-level elements were arrays. The new + // parser does not make the same guarantees, thus we need to convert any + // top-level literal elements into a list. + if _, ok := node.(*ast.LiteralType); ok { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + list, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), + } } // This slice will keep track of all the structs we'll be decoding. @@ -355,9 +502,11 @@ func (d *decoder) decodeStruct(name string, o *hcl.Object, result reflect.Value) if fieldType.Anonymous { fieldKind := fieldType.Type.Kind() if fieldKind != reflect.Struct { - return fmt.Errorf( - "%s: unsupported type to struct: %s", - fieldType.Name, fieldKind) + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unsupported type to struct: %s", + fieldType.Name, fieldKind), + } } // We have an embedded field. We "squash" the fields down @@ -401,21 +550,23 @@ func (d *decoder) decodeStruct(name string, o *hcl.Object, result reflect.Value) fieldName := fieldType.Name - // This is whether or not we expand the object into its children - // later. - expand := false - tagValue := fieldType.Tag.Get(tagName) tagParts := strings.SplitN(tagValue, ",", 2) if len(tagParts) >= 2 { switch tagParts[1] { - case "expand": - expand = true case "decodedFields": decodedFieldsVal = append(decodedFieldsVal, field) continue case "key": - field.SetString(o.Key) + if item == nil { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: %s asked for 'key', impossible", + name, fieldName), + } + } + + field.SetString(item.Keys[0].Token.Value().(string)) continue case "unusedKeys": unusedKeysVal = append(unusedKeysVal, field) @@ -427,9 +578,13 @@ func (d *decoder) decodeStruct(name string, o *hcl.Object, result reflect.Value) fieldName = tagParts[0] } - // Find the element matching this name - obj := o.Get(fieldName, true) - if obj == nil { + // Determine the element we'll use to decode. If it is a single + // match (only object with the field), then we decode it exactly. + // If it is a prefix match, then we decode the matches. + filter := list.Filter(fieldName) + prefixMatches := filter.Children() + matches := filter.Elem() + if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { continue } @@ -439,8 +594,18 @@ func (d *decoder) decodeStruct(name string, o *hcl.Object, result reflect.Value) // Create the field name and decode. We range over the elements // because we actually want the value. fieldName = fmt.Sprintf("%s.%s", name, fieldName) - for _, obj := range obj.Elem(expand) { - if err := d.decode(fieldName, obj, field); err != nil { + if len(prefixMatches.Items) > 0 { + if err := d.decode(fieldName, prefixMatches, field); err != nil { + return err + } + } + for _, match := range matches.Items { + var decodeNode ast.Node = match.Val + if ot, ok := decodeNode.(*ast.ObjectType); ok { + decodeNode = &ast.ObjectList{Items: ot.List.Items} + } + + if err := d.decode(fieldName, decodeNode, field); err != nil { return err } } @@ -457,27 +622,14 @@ func (d *decoder) decodeStruct(name string, o *hcl.Object, result reflect.Value) } } - // If we want to know what keys are unused, compile that - if len(unusedKeysVal) > 0 { - /* - unusedKeys := make([]string, 0, int(obj.Len())-len(usedKeys)) - - for _, elem := range obj.Elem { - k := elem.Key() - if _, ok := usedKeys[k]; !ok { - unusedKeys = append(unusedKeys, k) - } - } - - if len(unusedKeys) == 0 { - unusedKeys = nil - } + return nil +} - for _, v := range unusedKeysVal { - v.Set(reflect.ValueOf(unusedKeys)) - } - */ +// findNodeType returns the type of ast.Node +func findNodeType() reflect.Type { + var nodeContainer struct { + Node ast.Node } - - return nil + value := reflect.ValueOf(nodeContainer).FieldByName("Node") + return value.Type() } diff --git a/vendor/github.com/hashicorp/hcl/decoder_test.go b/vendor/github.com/hashicorp/hcl/decoder_test.go new file mode 100644 index 0000000000000..8ab85964d207c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/decoder_test.go @@ -0,0 +1,701 @@ +package hcl + +import ( + "io/ioutil" + "path/filepath" + "reflect" + "testing" + + "github.com/hashicorp/hcl/hcl/ast" +) + +func TestDecode_interface(t *testing.T) { + cases := []struct { + File string + Err bool + Out interface{} + }{ + { + "basic.hcl", + false, + map[string]interface{}{ + "foo": "bar", + "bar": "${file(\"bing/bong.txt\")}", + }, + }, + { + "basic_squish.hcl", + false, + map[string]interface{}{ + "foo": "bar", + "bar": "${file(\"bing/bong.txt\")}", + "foo-bar": "baz", + }, + }, + { + "empty.hcl", + false, + map[string]interface{}{ + "resource": []map[string]interface{}{ + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{}, + }, + }, + }, + }, + }, + { + "tfvars.hcl", + false, + map[string]interface{}{ + "regularvar": "Should work", + "map.key1": "Value", + "map.key2": "Other value", + }, + }, + { + "escape.hcl", + false, + map[string]interface{}{ + "foo": "bar\"baz\\n", + }, + }, + { + "interpolate_escape.hcl", + false, + map[string]interface{}{ + "foo": "${file(\"bing/bong.txt\")}", + }, + }, + { + "float.hcl", + false, + map[string]interface{}{ + "a": 1.02, + }, + }, + { + "multiline_bad.hcl", + true, + nil, + }, + { + "multiline_no_marker.hcl", + true, + nil, + }, + { + "multiline.hcl", + false, + map[string]interface{}{"foo": "bar\nbaz\n"}, + }, + { + "multiline_no_eof.hcl", + false, + map[string]interface{}{"foo": "bar\nbaz\n", "key": "value"}, + }, + { + "multiline.json", + false, + map[string]interface{}{"foo": "bar\nbaz"}, + }, + { + "scientific.json", + false, + map[string]interface{}{ + "a": 1e-10, + "b": 1e+10, + "c": 1e10, + "d": 1.2e-10, + "e": 1.2e+10, + "f": 1.2e10, + }, + }, + { + "scientific.hcl", + false, + map[string]interface{}{ + "a": 1e-10, + "b": 1e+10, + "c": 1e10, + "d": 1.2e-10, + "e": 1.2e+10, + "f": 1.2e10, + }, + }, + { + "terraform_heroku.hcl", + false, + map[string]interface{}{ + "name": "terraform-test-app", + "config_vars": []map[string]interface{}{ + map[string]interface{}{ + "FOO": "bar", + }, + }, + }, + }, + { + "structure_multi.hcl", + false, + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{ + "baz": []map[string]interface{}{ + map[string]interface{}{"key": 7}, + }, + }, + map[string]interface{}{ + "bar": []map[string]interface{}{ + map[string]interface{}{"key": 12}, + }, + }, + }, + }, + }, + { + "structure_multi.json", + false, + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{ + "baz": []map[string]interface{}{ + map[string]interface{}{"key": 7}, + }, + }, + map[string]interface{}{ + "bar": []map[string]interface{}{ + map[string]interface{}{"key": 12}, + }, + }, + }, + }, + }, + { + "structure_list.hcl", + false, + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{ + "key": 7, + }, + map[string]interface{}{ + "key": 12, + }, + }, + }, + }, + { + "structure_list.json", + false, + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{ + "key": 7, + }, + map[string]interface{}{ + "key": 12, + }, + }, + }, + }, + { + "structure_list_deep.json", + false, + map[string]interface{}{ + "bar": []map[string]interface{}{ + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{ + "name": "terraform_example", + "ingress": []map[string]interface{}{ + map[string]interface{}{ + "from_port": 22, + }, + map[string]interface{}{ + "from_port": 80, + }, + }, + }, + }, + }, + }, + }, + }, + + { + "nested_block_comment.hcl", + false, + map[string]interface{}{ + "bar": "value", + }, + }, + + { + "unterminated_block_comment.hcl", + true, + nil, + }, + + { + "unterminated_brace.hcl", + true, + nil, + }, + + { + "object_list.json", + false, + map[string]interface{}{ + "resource": []map[string]interface{}{ + map[string]interface{}{ + "aws_instance": []map[string]interface{}{ + map[string]interface{}{ + "db": []map[string]interface{}{ + map[string]interface{}{ + "vpc": "foo", + "provisioner": []map[string]interface{}{ + map[string]interface{}{ + "file": []map[string]interface{}{ + map[string]interface{}{ + "source": "foo", + "destination": "bar", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for _, tc := range cases { + t.Logf("Testing: %s", tc.File) + d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File)) + if err != nil { + t.Fatalf("err: %s", err) + } + + var out interface{} + err = Decode(&out, string(d)) + if (err != nil) != tc.Err { + t.Fatalf("Input: %s\n\nError: %s", tc.File, err) + } + + if !reflect.DeepEqual(out, tc.Out) { + t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out) + } + } +} + +func TestDecode_equal(t *testing.T) { + cases := []struct { + One, Two string + }{ + { + "basic.hcl", + "basic.json", + }, + { + "float.hcl", + "float.json", + }, + /* + { + "structure.hcl", + "structure.json", + }, + */ + { + "structure.hcl", + "structure_flat.json", + }, + { + "terraform_heroku.hcl", + "terraform_heroku.json", + }, + } + + for _, tc := range cases { + p1 := filepath.Join(fixtureDir, tc.One) + p2 := filepath.Join(fixtureDir, tc.Two) + + d1, err := ioutil.ReadFile(p1) + if err != nil { + t.Fatalf("err: %s", err) + } + + d2, err := ioutil.ReadFile(p2) + if err != nil { + t.Fatalf("err: %s", err) + } + + var i1, i2 interface{} + err = Decode(&i1, string(d1)) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = Decode(&i2, string(d2)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(i1, i2) { + t.Fatalf( + "%s != %s\n\n%#v\n\n%#v", + tc.One, tc.Two, + i1, i2) + } + } +} + +func TestDecode_flatMap(t *testing.T) { + var val map[string]map[string]string + + err := Decode(&val, testReadFile(t, "structure_flatmap.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]map[string]string{ + "foo": map[string]string{ + "foo": "bar", + "key": "7", + }, + } + + if !reflect.DeepEqual(val, expected) { + t.Fatalf("Actual: %#v\n\nExpected: %#v", val, expected) + } +} + +func TestDecode_structure(t *testing.T) { + type V struct { + Key int + Foo string + } + + var actual V + + err := Decode(&actual, testReadFile(t, "flat.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := V{ + Key: 7, + Foo: "bar", + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected) + } +} + +func TestDecode_structurePtr(t *testing.T) { + type V struct { + Key int + Foo string + } + + var actual *V + + err := Decode(&actual, testReadFile(t, "flat.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &V{ + Key: 7, + Foo: "bar", + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected) + } +} + +func TestDecode_structureArray(t *testing.T) { + // This test is extracted from a failure in Consul (consul.io), + // hence the interesting structure naming. + + type KeyPolicyType string + + type KeyPolicy struct { + Prefix string `hcl:",key"` + Policy KeyPolicyType + } + + type Policy struct { + Keys []KeyPolicy `hcl:"key,expand"` + } + + expected := Policy{ + Keys: []KeyPolicy{ + KeyPolicy{ + Prefix: "", + Policy: "read", + }, + KeyPolicy{ + Prefix: "foo/", + Policy: "write", + }, + KeyPolicy{ + Prefix: "foo/bar/", + Policy: "read", + }, + KeyPolicy{ + Prefix: "foo/bar/baz", + Policy: "deny", + }, + }, + } + + files := []string{ + "decode_policy.hcl", + "decode_policy.json", + } + + for _, f := range files { + var actual Policy + + err := Decode(&actual, testReadFile(t, f)) + if err != nil { + t.Fatalf("Input: %s\n\nerr: %s", f, err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected) + } + } +} + +func TestDecode_sliceExpand(t *testing.T) { + type testInner struct { + Name string `hcl:",key"` + Key string + } + + type testStruct struct { + Services []testInner `hcl:"service,expand"` + } + + expected := testStruct{ + Services: []testInner{ + testInner{ + Name: "my-service-0", + Key: "value", + }, + testInner{ + Name: "my-service-1", + Key: "value", + }, + }, + } + + files := []string{ + "slice_expand.hcl", + } + + for _, f := range files { + t.Logf("Testing: %s", f) + + var actual testStruct + err := Decode(&actual, testReadFile(t, f)) + if err != nil { + t.Fatalf("Input: %s\n\nerr: %s", f, err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected) + } + } +} + +func TestDecode_structureMap(t *testing.T) { + // This test is extracted from a failure in Terraform (terraform.io), + // hence the interesting structure naming. + + type hclVariable struct { + Default interface{} + Description string + Fields []string `hcl:",decodedFields"` + } + + type rawConfig struct { + Variable map[string]hclVariable + } + + expected := rawConfig{ + Variable: map[string]hclVariable{ + "foo": hclVariable{ + Default: "bar", + Description: "bar", + Fields: []string{"Default", "Description"}, + }, + + "amis": hclVariable{ + Default: []map[string]interface{}{ + map[string]interface{}{ + "east": "foo", + }, + }, + Fields: []string{"Default"}, + }, + }, + } + + files := []string{ + "decode_tf_variable.hcl", + "decode_tf_variable.json", + } + + for _, f := range files { + t.Logf("Testing: %s", f) + + var actual rawConfig + err := Decode(&actual, testReadFile(t, f)) + if err != nil { + t.Fatalf("Input: %s\n\nerr: %s", f, err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected) + } + } +} + +func TestDecode_interfaceNonPointer(t *testing.T) { + var value interface{} + err := Decode(value, testReadFile(t, "basic_int_string.hcl")) + if err == nil { + t.Fatal("should error") + } +} + +func TestDecode_intString(t *testing.T) { + var value struct { + Count int + } + + err := Decode(&value, testReadFile(t, "basic_int_string.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + if value.Count != 3 { + t.Fatalf("bad: %#v", value.Count) + } +} + +func TestDecode_Node(t *testing.T) { + // given + var value struct { + Content ast.Node + Nested struct { + Content ast.Node + } + } + + content := ` +content { + hello = "world" +} +` + + // when + err := Decode(&value, content) + + // then + if err != nil { + t.Errorf("unable to decode content, %v", err) + return + } + + // verify ast.Node can be decoded later + var v map[string]interface{} + err = DecodeObject(&v, value.Content) + if err != nil { + t.Errorf("unable to decode content, %v", err) + return + } + + if v["hello"] != "world" { + t.Errorf("expected mapping to be returned") + } +} + +func TestDecode_NestedNode(t *testing.T) { + // given + var value struct { + Nested struct { + Content ast.Node + } + } + + content := ` +nested "content" { + hello = "world" +} +` + + // when + err := Decode(&value, content) + + // then + if err != nil { + t.Errorf("unable to decode content, %v", err) + return + } + + // verify ast.Node can be decoded later + var v map[string]interface{} + err = DecodeObject(&v, value.Nested.Content) + if err != nil { + t.Errorf("unable to decode content, %v", err) + return + } + + if v["hello"] != "world" { + t.Errorf("expected mapping to be returned") + } +} + +// https://github.com/hashicorp/hcl/issues/60 +func TestDecode_topLevelKeys(t *testing.T) { + type Template struct { + Source string + } + + templates := struct { + Templates []*Template `hcl:"template"` + }{} + + err := Decode(&templates, ` + template { + source = "blah" + } + + template { + source = "blahblah" + }`) + + if err != nil { + t.Fatal(err) + } + + if templates.Templates[0].Source != "blah" { + t.Errorf("bad source: %s", templates.Templates[0].Source) + } + + if templates.Templates[1].Source != "blahblah" { + t.Errorf("bad source: %s", templates.Templates[1].Source) + } +} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go index 14bd9ba68c5e7..575a20b50b5c1 100644 --- a/vendor/github.com/hashicorp/hcl/hcl.go +++ b/vendor/github.com/hashicorp/hcl/hcl.go @@ -1,4 +1,4 @@ -// hcl is a package for decoding HCL into usable Go structures. +// Package hcl decodes HCL into usable Go structures. // // hcl input can come in either pure HCL format or JSON format. // It can be parsed into an AST, and then decoded into a structure, diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go new file mode 100644 index 0000000000000..f8bb71a047062 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -0,0 +1,211 @@ +// Package ast declares the types used to represent syntax trees for HCL +// (HashiCorp Configuration Language) +package ast + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/token" +) + +// Node is an element in the abstract syntax tree. +type Node interface { + node() + Pos() token.Pos +} + +func (File) node() {} +func (ObjectList) node() {} +func (ObjectKey) node() {} +func (ObjectItem) node() {} +func (Comment) node() {} +func (CommentGroup) node() {} +func (ObjectType) node() {} +func (LiteralType) node() {} +func (ListType) node() {} + +// File represents a single HCL file +type File struct { + Node Node // usually a *ObjectList + Comments []*CommentGroup // list of all comments in the source +} + +func (f *File) Pos() token.Pos { + return f.Node.Pos() +} + +// ObjectList represents a list of ObjectItems. An HCL file itself is an +// ObjectList. +type ObjectList struct { + Items []*ObjectItem +} + +func (o *ObjectList) Add(item *ObjectItem) { + o.Items = append(o.Items, item) +} + +// Filter filters out the objects with the given key list as a prefix. +// +// The returned list of objects contain ObjectItems where the keys have +// this prefix already stripped off. This might result in objects with +// zero-length key lists if they have no children. +// +// If no matches are found, an empty ObjectList (non-nil) is returned. +func (o *ObjectList) Filter(keys ...string) *ObjectList { + var result ObjectList + for _, item := range o.Items { + // If there aren't enough keys, then ignore this + if len(item.Keys) < len(keys) { + continue + } + + match := true + for i, key := range item.Keys[:len(keys)] { + key := key.Token.Value().(string) + if key != keys[i] && !strings.EqualFold(key, keys[i]) { + match = false + break + } + } + if !match { + continue + } + + // Strip off the prefix from the children + newItem := *item + newItem.Keys = newItem.Keys[len(keys):] + result.Add(&newItem) + } + + return &result +} + +// Children returns further nested objects (key length > 0) within this +// ObjectList. This should be used with Filter to get at child items. +func (o *ObjectList) Children() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) > 0 { + result.Add(item) + } + } + + return &result +} + +// Elem returns items in the list that are direct element assignments +// (key length == 0). This should be used with Filter to get at elements. +func (o *ObjectList) Elem() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) == 0 { + result.Add(item) + } + } + + return &result +} + +func (o *ObjectList) Pos() token.Pos { + // always returns the uninitiliazed position + return o.Items[0].Pos() +} + +// ObjectItem represents a HCL Object Item. An item is represented with a key +// (or keys). It can be an assignment or an object (both normal and nested) +type ObjectItem struct { + // keys is only one length long if it's of type assignment. If it's a + // nested object it can be larger than one. In that case "assign" is + // invalid as there is no assignments for a nested object. + Keys []*ObjectKey + + // assign contains the position of "=", if any + Assign token.Pos + + // val is the item itself. It can be an object,list, number, bool or a + // string. If key length is larger than one, val can be only of type + // Object. + Val Node + + LeadComment *CommentGroup // associated lead comment + LineComment *CommentGroup // associated line comment +} + +func (o *ObjectItem) Pos() token.Pos { + return o.Keys[0].Pos() +} + +// ObjectKeys are either an identifier or of type string. +type ObjectKey struct { + Token token.Token +} + +func (o *ObjectKey) Pos() token.Pos { + return o.Token.Pos +} + +// LiteralType represents a literal of basic type. Valid types are: +// token.NUMBER, token.FLOAT, token.BOOL and token.STRING +type LiteralType struct { + Token token.Token + + // associated line comment, only when used in a list + LineComment *CommentGroup +} + +func (l *LiteralType) Pos() token.Pos { + return l.Token.Pos +} + +// ListStatement represents a HCL List type +type ListType struct { + Lbrack token.Pos // position of "[" + Rbrack token.Pos // position of "]" + List []Node // the elements in lexical order +} + +func (l *ListType) Pos() token.Pos { + return l.Lbrack +} + +func (l *ListType) Add(node Node) { + l.List = append(l.List, node) +} + +// ObjectType represents a HCL Object Type +type ObjectType struct { + Lbrace token.Pos // position of "{" + Rbrace token.Pos // position of "}" + List *ObjectList // the nodes in lexical order +} + +func (o *ObjectType) Pos() token.Pos { + return o.Lbrace +} + +// Comment node represents a single //, # style or /*- style commment +type Comment struct { + Start token.Pos // position of / or # + Text string +} + +func (c *Comment) Pos() token.Pos { + return c.Start +} + +// CommentGroup node represents a sequence of comments with no other tokens and +// no empty lines between. +type CommentGroup struct { + List []*Comment // len(List) > 0 +} + +func (c *CommentGroup) Pos() token.Pos { + return c.List[0].Pos() +} + +//------------------------------------------------------------------- +// GoStringer +//------------------------------------------------------------------- + +func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go new file mode 100644 index 0000000000000..942256cadcf45 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go @@ -0,0 +1,200 @@ +package ast + +import ( + "reflect" + "strings" + "testing" + + "github.com/hashicorp/hcl/hcl/token" +) + +func TestObjectListFilter(t *testing.T) { + var cases = []struct { + Filter []string + Input []*ObjectItem + Output []*ObjectItem + }{ + { + []string{"foo"}, + []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{ + Token: token.Token{Type: token.STRING, Text: `"foo"`}, + }, + }, + }, + }, + []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{}, + }, + }, + }, + + { + []string{"foo"}, + []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + }, + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}}, + }, + }, + }, + []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + }, + }, + }, + } + + for _, tc := range cases { + input := &ObjectList{Items: tc.Input} + expected := &ObjectList{Items: tc.Output} + if actual := input.Filter(tc.Filter...); !reflect.DeepEqual(actual, expected) { + t.Fatalf("in order: input, expected, actual\n\n%#v\n\n%#v\n\n%#v", input, expected, actual) + } + } +} + +func TestWalk(t *testing.T) { + items := []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + Val: &LiteralType{Token: token.Token{Type: token.STRING, Text: `"example"`}}, + }, + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}}, + }, + }, + } + + node := &ObjectList{Items: items} + + order := []string{ + "*ast.ObjectList", + "*ast.ObjectItem", + "*ast.ObjectKey", + "*ast.ObjectKey", + "*ast.LiteralType", + "*ast.ObjectItem", + "*ast.ObjectKey", + } + count := 0 + + Walk(node, func(n Node) (Node, bool) { + if n == nil { + return n, false + } + + typeName := reflect.TypeOf(n).String() + if order[count] != typeName { + t.Errorf("expected '%s' got: '%s'", order[count], typeName) + } + count++ + return n, true + }) +} + +func TestWalkEquality(t *testing.T) { + items := []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, + }, + }, + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + }, + } + + node := &ObjectList{Items: items} + + rewritten := Walk(node, func(n Node) (Node, bool) { return n, true }) + + newNode, ok := rewritten.(*ObjectList) + if !ok { + t.Fatalf("expected Objectlist, got %T", rewritten) + } + + if !reflect.DeepEqual(node, newNode) { + t.Fatal("rewritten node is not equal to the given node") + } + + if len(newNode.Items) != 2 { + t.Error("expected newNode length 2, got: %d", len(newNode.Items)) + } + + expected := []string{ + `"foo"`, + `"bar"`, + } + + for i, item := range newNode.Items { + if len(item.Keys) != 1 { + t.Error("expected keys newNode length 1, got: %d", len(item.Keys)) + } + + if item.Keys[0].Token.Text != expected[i] { + t.Errorf("expected key %s, got %s", expected[i], item.Keys[0].Token.Text) + } + + if item.Val != nil { + t.Errorf("expected item value should be nil") + } + } +} + +func TestWalkRewrite(t *testing.T) { + items := []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + }, + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}}, + }, + }, + } + + node := &ObjectList{Items: items} + + suffix := "_example" + node = Walk(node, func(n Node) (Node, bool) { + switch i := n.(type) { + case *ObjectKey: + i.Token.Text = i.Token.Text + suffix + n = i + } + return n, true + }).(*ObjectList) + + Walk(node, func(n Node) (Node, bool) { + switch i := n.(type) { + case *ObjectKey: + if !strings.HasSuffix(i.Token.Text, suffix) { + t.Errorf("Token '%s' should have suffix: %s", i.Token.Text, suffix) + } + } + return n, true + }) + +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go new file mode 100644 index 0000000000000..ba07ad42b022e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go @@ -0,0 +1,52 @@ +package ast + +import "fmt" + +// WalkFunc describes a function to be called for each node during a Walk. The +// returned node can be used to rewrite the AST. Walking stops the returned +// bool is false. +type WalkFunc func(Node) (Node, bool) + +// Walk traverses an AST in depth-first order: It starts by calling fn(node); +// node must not be nil. If fn returns true, Walk invokes fn recursively for +// each of the non-nil children of node, followed by a call of fn(nil). The +// returned node of fn can be used to rewrite the passed node to fn. +func Walk(node Node, fn WalkFunc) Node { + rewritten, ok := fn(node) + if !ok { + return rewritten + } + + switch n := node.(type) { + case *File: + n.Node = Walk(n.Node, fn) + case *ObjectList: + for i, item := range n.Items { + n.Items[i] = Walk(item, fn).(*ObjectItem) + } + case *ObjectKey: + // nothing to do + case *ObjectItem: + for i, k := range n.Keys { + n.Keys[i] = Walk(k, fn).(*ObjectKey) + } + + if n.Val != nil { + n.Val = Walk(n.Val, fn) + } + case *LiteralType: + // nothing to do + case *ListType: + for i, l := range n.List { + n.List[i] = Walk(l, fn) + } + case *ObjectType: + n.List = Walk(n.List, fn).(*ObjectList) + default: + // should we panic here? + fmt.Printf("unknown type: %T\n", n) + } + + fn(nil) + return rewritten +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go new file mode 100644 index 0000000000000..afc1e4eb12a24 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go @@ -0,0 +1,164 @@ +// Derivative work from: +// - https://golang.org/src/cmd/gofmt/gofmt.go +// - https://github.com/fatih/hclfmt + +package fmtcmd + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl/hcl/printer" +) + +var ( + ErrWriteStdin = errors.New("cannot use write option with standard input") +) + +type Options struct { + List bool // list files whose formatting differs + Write bool // write result to (source) file instead of stdout + Diff bool // display diffs instead of rewriting files +} + +func isValidFile(f os.FileInfo, extensions []string) bool { + if !f.IsDir() && !strings.HasPrefix(f.Name(), ".") { + for _, ext := range extensions { + if strings.HasSuffix(f.Name(), "."+ext) { + return true + } + } + } + + return false +} + +// If in == nil, the source is the contents of the file with the given filename. +func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts Options) error { + if in == nil { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + in = f + } + + src, err := ioutil.ReadAll(in) + if err != nil { + return err + } + + res, err := printer.Format(src) + if err != nil { + return err + } + // Files should end with newlines + res = append(res, []byte("\n")...) + + if !bytes.Equal(src, res) { + // formatting has changed + if opts.List { + fmt.Fprintln(out, filename) + } + if opts.Write { + err = ioutil.WriteFile(filename, res, 0644) + if err != nil { + return err + } + } + if opts.Diff { + data, err := diff(src, res) + if err != nil { + return fmt.Errorf("computing diff: %s", err) + } + fmt.Fprintf(out, "diff a/%s b/%s\n", filename, filename) + out.Write(data) + } + } + + if !opts.List && !opts.Write && !opts.Diff { + _, err = out.Write(res) + } + + return err +} + +func walkDir(path string, extensions []string, stdout io.Writer, opts Options) error { + visitFile := func(path string, f os.FileInfo, err error) error { + if err == nil && isValidFile(f, extensions) { + err = processFile(path, nil, stdout, false, opts) + } + return err + } + + return filepath.Walk(path, visitFile) +} + +func Run( + paths, extensions []string, + stdin io.Reader, + stdout io.Writer, + opts Options, +) error { + if len(paths) == 0 { + if opts.Write { + return ErrWriteStdin + } + if err := processFile("", stdin, stdout, true, opts); err != nil { + return err + } + return nil + } + + for _, path := range paths { + switch dir, err := os.Stat(path); { + case err != nil: + return err + case dir.IsDir(): + if err := walkDir(path, extensions, stdout, opts); err != nil { + return err + } + default: + if err := processFile(path, nil, stdout, false, opts); err != nil { + return err + } + } + } + + return nil +} + +func diff(b1, b2 []byte) (data []byte, err error) { + f1, err := ioutil.TempFile("", "") + if err != nil { + return + } + defer os.Remove(f1.Name()) + defer f1.Close() + + f2, err := ioutil.TempFile("", "") + if err != nil { + return + } + defer os.Remove(f2.Name()) + defer f2.Close() + + f1.Write(b1) + f2.Write(b2) + + data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + err = nil + } + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go new file mode 100644 index 0000000000000..4467b3eb6459b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go @@ -0,0 +1,431 @@ +package fmtcmd + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "regexp" + "sort" + "syscall" + "testing" +) + +var fixtureExtensions = []string{"hcl"} + +func init() { + sort.Sort(ByFilename(fixtures)) +} + +func TestIsValidFile(t *testing.T) { + const fixtureDir = "./test-fixtures" + + cases := []struct { + Path string + Expected bool + }{ + {"good.hcl", true}, + {".hidden.ignore", false}, + {"file.ignore", false}, + {"dir.ignore", false}, + } + + for _, tc := range cases { + file, err := os.Stat(filepath.Join(fixtureDir, tc.Path)) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if res := isValidFile(file, fixtureExtensions); res != tc.Expected { + t.Errorf("want: %b, got: %b", tc.Expected, res) + } + } +} + +func TestRunMultiplePaths(t *testing.T) { + path1, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path1) + path2, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path2) + + var expectedOut bytes.Buffer + for _, path := range []string{path1, path2} { + for _, fixture := range fixtures { + if !bytes.Equal(fixture.golden, fixture.input) { + expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n") + } + } + } + + _, stdout := mockIO() + err = Run( + []string{path1, path2}, + fixtureExtensions, + nil, stdout, + Options{ + List: true, + }, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if stdout.String() != expectedOut.String() { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + } +} + +func TestRunSubDirectories(t *testing.T) { + pathParent, err := ioutil.TempDir("", "") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(pathParent) + + path1, err := renderFixtures(pathParent) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + path2, err := renderFixtures(pathParent) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + paths := []string{path1, path2} + sort.Strings(paths) + + var expectedOut bytes.Buffer + for _, path := range paths { + for _, fixture := range fixtures { + if !bytes.Equal(fixture.golden, fixture.input) { + expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n") + } + } + } + + _, stdout := mockIO() + err = Run( + []string{pathParent}, + fixtureExtensions, + nil, stdout, + Options{ + List: true, + }, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if stdout.String() != expectedOut.String() { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + } +} + +func TestRunStdin(t *testing.T) { + var expectedOut bytes.Buffer + for i, fixture := range fixtures { + if i != 0 { + expectedOut.WriteString("\n") + } + expectedOut.Write(fixture.golden) + } + + stdin, stdout := mockIO() + for _, fixture := range fixtures { + stdin.Write(fixture.input) + } + + err := Run( + []string{}, + fixtureExtensions, + stdin, stdout, + Options{}, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if !bytes.Equal(stdout.Bytes(), expectedOut.Bytes()) { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + } +} + +func TestRunStdinAndWrite(t *testing.T) { + var expectedOut = []byte{} + + stdin, stdout := mockIO() + stdin.WriteString("") + err := Run( + []string{}, []string{}, + stdin, stdout, + Options{ + Write: true, + }, + ) + + if err != ErrWriteStdin { + t.Errorf("error want:\n%s\ngot:\n%s", ErrWriteStdin, err) + } + if !bytes.Equal(stdout.Bytes(), expectedOut) { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + } +} + +func TestRunFileError(t *testing.T) { + path, err := ioutil.TempDir("", "") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path) + filename := filepath.Join(path, "unreadable.hcl") + + var expectedError = &os.PathError{ + Op: "open", + Path: filename, + Err: syscall.EACCES, + } + + err = ioutil.WriteFile(filename, []byte{}, 0000) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + _, stdout := mockIO() + err = Run( + []string{path}, + fixtureExtensions, + nil, stdout, + Options{}, + ) + + if !reflect.DeepEqual(err, expectedError) { + t.Errorf("error want: %#v, got: %#v", expectedError, err) + } +} + +func TestRunNoOptions(t *testing.T) { + path, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path) + + var expectedOut bytes.Buffer + for _, fixture := range fixtures { + expectedOut.Write(fixture.golden) + } + + _, stdout := mockIO() + err = Run( + []string{path}, + fixtureExtensions, + nil, stdout, + Options{}, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if stdout.String() != expectedOut.String() { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + } +} + +func TestRunList(t *testing.T) { + path, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path) + + var expectedOut bytes.Buffer + for _, fixture := range fixtures { + if !bytes.Equal(fixture.golden, fixture.input) { + expectedOut.WriteString(fmt.Sprintln(filepath.Join(path, fixture.filename))) + } + } + + _, stdout := mockIO() + err = Run( + []string{path}, + fixtureExtensions, + nil, stdout, + Options{ + List: true, + }, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if stdout.String() != expectedOut.String() { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + } +} + +func TestRunWrite(t *testing.T) { + path, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path) + + _, stdout := mockIO() + err = Run( + []string{path}, + fixtureExtensions, + nil, stdout, + Options{ + Write: true, + }, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + for _, fixture := range fixtures { + res, err := ioutil.ReadFile(filepath.Join(path, fixture.filename)) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if !bytes.Equal(res, fixture.golden) { + t.Errorf("file %q contents want:\n%s\ngot:\n%s", fixture.filename, fixture.golden, res) + } + } +} + +func TestRunDiff(t *testing.T) { + path, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path) + + var expectedOut bytes.Buffer + for _, fixture := range fixtures { + if len(fixture.diff) > 0 { + expectedOut.WriteString( + regexp.QuoteMeta( + fmt.Sprintf("diff a/%s/%s b/%s/%s\n", path, fixture.filename, path, fixture.filename), + ), + ) + // Need to use regex to ignore datetimes in diff. + expectedOut.WriteString(`--- .+?\n`) + expectedOut.WriteString(`\+\+\+ .+?\n`) + expectedOut.WriteString(regexp.QuoteMeta(string(fixture.diff))) + } + } + + _, stdout := mockIO() + err = Run( + []string{path}, + fixtureExtensions, + nil, stdout, + Options{ + Diff: true, + }, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if !regexp.MustCompile(expectedOut.String()).Match(stdout.Bytes()) { + t.Errorf("stdout want match:\n%s\ngot:\n%q", expectedOut, stdout) + } +} + +func mockIO() (stdin, stdout *bytes.Buffer) { + return new(bytes.Buffer), new(bytes.Buffer) +} + +type fixture struct { + filename string + input, golden, diff []byte +} + +type ByFilename []fixture + +func (s ByFilename) Len() int { return len(s) } +func (s ByFilename) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByFilename) Less(i, j int) bool { return len(s[i].filename) > len(s[j].filename) } + +var fixtures = []fixture{ + { + "noop.hcl", + []byte(`resource "aws_security_group" "firewall" { + count = 5 +} +`), + []byte(`resource "aws_security_group" "firewall" { + count = 5 +} +`), + []byte(``), + }, { + "align_equals.hcl", + []byte(`variable "foo" { + default = "bar" + description = "bar" +} +`), + []byte(`variable "foo" { + default = "bar" + description = "bar" +} +`), + []byte(`@@ -1,4 +1,4 @@ + variable "foo" { +- default = "bar" ++ default = "bar" + description = "bar" + } +`), + }, { + "indentation.hcl", + []byte(`provider "aws" { + access_key = "foo" + secret_key = "bar" +} +`), + []byte(`provider "aws" { + access_key = "foo" + secret_key = "bar" +} +`), + []byte(`@@ -1,4 +1,4 @@ + provider "aws" { +- access_key = "foo" +- secret_key = "bar" ++ access_key = "foo" ++ secret_key = "bar" + } +`), + }, +} + +// parent can be an empty string, in which case the system's default +// temporary directory will be used. +func renderFixtures(parent string) (path string, err error) { + path, err = ioutil.TempDir(parent, "") + if err != nil { + return "", err + } + + for _, fixture := range fixtures { + err = ioutil.WriteFile(filepath.Join(path, fixture.filename), []byte(fixture.input), 0644) + if err != nil { + os.RemoveAll(path) + return "", err + } + } + + return path, nil +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore new file mode 100644 index 0000000000000..9977a2836c1a0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore @@ -0,0 +1 @@ +invalid diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/dir.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/dir.ignore new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore new file mode 100644 index 0000000000000..9977a2836c1a0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore @@ -0,0 +1 @@ +invalid diff --git a/vendor/github.com/hashicorp/hcl/hcl/lex.go b/vendor/github.com/hashicorp/hcl/hcl/lex.go deleted file mode 100644 index 54eb1c8abbfb5..0000000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/lex.go +++ /dev/null @@ -1,445 +0,0 @@ -package hcl - -import ( - "bytes" - "fmt" - "strconv" - "unicode" - "unicode/utf8" -) - -//go:generate go tool yacc -p "hcl" parse.y - -// The parser expects the lexer to return 0 on EOF. -const lexEOF = 0 - -// The parser uses the type Lex as a lexer. It must provide -// the methods Lex(*SymType) int and Error(string). -type hclLex struct { - Input string - - lastNumber bool - pos int - width int - col, line int - lastCol, lastLine int - err error -} - -// The parser calls this method to get each new token. -func (x *hclLex) Lex(yylval *hclSymType) int { - for { - c := x.next() - if c == lexEOF { - return lexEOF - } - - // Ignore all whitespace except a newline which we handle - // specially later. - if unicode.IsSpace(c) { - x.lastNumber = false - continue - } - - // Consume all comments - switch c { - case '#': - fallthrough - case '/': - // Starting comment - if !x.consumeComment(c) { - return lexEOF - } - continue - } - - // If it is a number, lex the number - if c >= '0' && c <= '9' { - x.lastNumber = true - x.backup() - return x.lexNumber(yylval) - } - - // This is a hacky way to find 'e' and lex it, but it works. - if x.lastNumber { - switch c { - case 'e': - fallthrough - case 'E': - switch x.next() { - case '+': - return EPLUS - case '-': - return EMINUS - default: - x.backup() - return EPLUS - } - } - } - x.lastNumber = false - - switch c { - case '.': - return PERIOD - case '-': - return MINUS - case ',': - return COMMA - case '=': - return EQUAL - case '[': - return LEFTBRACKET - case ']': - return RIGHTBRACKET - case '{': - return LEFTBRACE - case '}': - return RIGHTBRACE - case '"': - return x.lexString(yylval) - case '<': - return x.lexHeredoc(yylval) - default: - x.backup() - return x.lexId(yylval) - } - } -} - -func (x *hclLex) consumeComment(c rune) bool { - single := c == '#' - if !single { - c = x.next() - if c != '/' && c != '*' { - x.backup() - x.createErr(fmt.Sprintf("comment expected, got '%c'", c)) - return false - } - - single = c == '/' - } - - nested := 1 - for { - c = x.next() - if c == lexEOF { - x.backup() - if single { - // Single line comments can end with an EOF - return true - } - - // Multi-line comments must end with a */ - x.createErr(fmt.Sprintf("end of multi-line comment expected, got EOF")) - return false - } - - // Single line comments continue until a '\n' - if single { - if c == '\n' { - return true - } - - continue - } - - // Multi-line comments continue until a '*/' - switch c { - case '/': - c = x.next() - if c == '*' { - nested++ - } else { - x.backup() - } - case '*': - c = x.next() - if c == '/' { - return true - } else { - x.backup() - } - default: - // Continue - } - } -} - -// lexId lexes an identifier -func (x *hclLex) lexId(yylval *hclSymType) int { - var b bytes.Buffer - first := true - for { - c := x.next() - if c == lexEOF { - break - } - - if !unicode.IsDigit(c) && !unicode.IsLetter(c) && - c != '_' && c != '-' && c != '.' { - x.backup() - - if first { - x.createErr("Invalid identifier") - return lexEOF - } - - break - } - - first = false - if _, err := b.WriteRune(c); err != nil { - return lexEOF - } - } - - yylval.str = b.String() - - switch yylval.str { - case "true": - yylval.b = true - return BOOL - case "false": - yylval.b = false - return BOOL - } - - return IDENTIFIER -} - -// lexHeredoc extracts a string from the input in heredoc format -func (x *hclLex) lexHeredoc(yylval *hclSymType) int { - if x.next() != '<' { - x.createErr("Heredoc must start with <<") - return lexEOF - } - - // Now determine the marker - var buf bytes.Buffer - for { - c := x.next() - if c == lexEOF { - return lexEOF - } - - // Newline signals the end of the marker - if c == '\n' { - break - } - - if _, err := buf.WriteRune(c); err != nil { - return lexEOF - } - } - - marker := buf.String() - if marker == "" { - x.createErr("Heredoc must have a marker, e.g. < 0 { - for _, c := range cs { - if _, err := buf.WriteRune(c); err != nil { - return lexEOF - } - } - } - } - - if c == lexEOF { - return lexEOF - } - - // If we hit a newline, then reset to check - if c == '\n' { - check = true - } - - if _, err := buf.WriteRune(c); err != nil { - return lexEOF - } - } - - yylval.str = buf.String() - return STRING -} - -// lexNumber lexes out a number -func (x *hclLex) lexNumber(yylval *hclSymType) int { - var b bytes.Buffer - gotPeriod := false - for { - c := x.next() - if c == lexEOF { - break - } - - if c == '.' { - if gotPeriod { - x.backup() - break - } - - gotPeriod = true - } else if c < '0' || c > '9' { - x.backup() - break - } - - if _, err := b.WriteRune(c); err != nil { - x.createErr(fmt.Sprintf("Internal error: %s", err)) - return lexEOF - } - } - - if !gotPeriod { - v, err := strconv.ParseInt(b.String(), 0, 0) - if err != nil { - x.createErr(fmt.Sprintf("Expected number: %s", err)) - return lexEOF - } - - yylval.num = int(v) - return NUMBER - } - - f, err := strconv.ParseFloat(b.String(), 64) - if err != nil { - x.createErr(fmt.Sprintf("Expected float: %s", err)) - return lexEOF - } - - yylval.f = float64(f) - return FLOAT -} - -// lexString extracts a string from the input -func (x *hclLex) lexString(yylval *hclSymType) int { - braces := 0 - - var b bytes.Buffer - for { - c := x.next() - if c == lexEOF { - break - } - - // String end - if c == '"' && braces == 0 { - break - } - - // If we hit a newline, then its an error - if c == '\n' { - x.createErr(fmt.Sprintf("Newline before string closed")) - return lexEOF - } - - // If we're escaping a quote, then escape the quote - if c == '\\' { - n := x.next() - switch n { - case '"': - c = n - case 'n': - c = '\n' - case '\\': - c = n - default: - x.backup() - } - } - - // If we're starting into variable, mark it - if braces == 0 && c == '$' && x.peek() == '{' { - braces += 1 - - if _, err := b.WriteRune(c); err != nil { - return lexEOF - } - c = x.next() - } else if braces > 0 && c == '{' { - braces += 1 - } - if braces > 0 && c == '}' { - braces -= 1 - } - - if _, err := b.WriteRune(c); err != nil { - return lexEOF - } - } - - yylval.str = b.String() - return STRING -} - -// Return the next rune for the lexer. -func (x *hclLex) next() rune { - if int(x.pos) >= len(x.Input) { - x.width = 0 - return lexEOF - } - - r, w := utf8.DecodeRuneInString(x.Input[x.pos:]) - x.width = w - x.pos += x.width - - x.col += 1 - if x.line == 0 { - x.line = 1 - } - if r == '\n' { - x.line += 1 - x.col = 0 - } - - return r -} - -// peek returns but does not consume the next rune in the input -func (x *hclLex) peek() rune { - r := x.next() - x.backup() - return r -} - -// backup steps back one rune. Can only be called once per next. -func (x *hclLex) backup() { - x.col -= 1 - x.pos -= x.width -} - -// createErr records the given error -func (x *hclLex) createErr(msg string) { - x.err = fmt.Errorf("Line %d, column %d: %s", x.line, x.col, msg) -} - -// The parser calls this method on a parse error. -func (x *hclLex) Error(s string) { - x.createErr(s) -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/object.go b/vendor/github.com/hashicorp/hcl/hcl/object.go deleted file mode 100644 index e7b493a504fc5..0000000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/object.go +++ /dev/null @@ -1,128 +0,0 @@ -package hcl - -import ( - "fmt" - "strings" -) - -//go:generate stringer -type=ValueType - -// ValueType is an enum represnting the type of a value in -// a LiteralNode. -type ValueType byte - -const ( - ValueTypeUnknown ValueType = iota - ValueTypeFloat - ValueTypeInt - ValueTypeString - ValueTypeBool - ValueTypeNil - ValueTypeList - ValueTypeObject -) - -// Object represents any element of HCL: an object itself, a list, -// a literal, etc. -type Object struct { - Key string - Type ValueType - Value interface{} - Next *Object -} - -// GoString is an implementation of the GoStringer interface. -func (o *Object) GoString() string { - return fmt.Sprintf("*%#v", *o) -} - -// Get gets all the objects that match the given key. -// -// It returns the resulting objects as a single Object structure with -// the linked list populated. -func (o *Object) Get(k string, insensitive bool) *Object { - if o.Type != ValueTypeObject { - return nil - } - - for _, o := range o.Elem(true) { - if o.Key != k { - if !insensitive || !strings.EqualFold(o.Key, k) { - continue - } - } - - return o - } - - return nil -} - -// Elem returns all the elements that are part of this object. -func (o *Object) Elem(expand bool) []*Object { - if !expand { - result := make([]*Object, 0, 1) - current := o - for current != nil { - obj := *current - obj.Next = nil - result = append(result, &obj) - - current = current.Next - } - - return result - } - - if o.Value == nil { - return nil - } - - switch o.Type { - case ValueTypeList: - return o.Value.([]*Object) - case ValueTypeObject: - result := make([]*Object, 0, 5) - for _, obj := range o.Elem(false) { - result = append(result, obj.Value.([]*Object)...) - } - return result - default: - return []*Object{o} - } -} - -// Len returns the number of objects in this object structure. -func (o *Object) Len() (i int) { - current := o - for current != nil { - i += 1 - current = current.Next - } - - return -} - -// ObjectList is a list of objects. -type ObjectList []*Object - -// Flat returns a flattened list structure of the objects. -func (l ObjectList) Flat() []*Object { - m := make(map[string]*Object) - result := make([]*Object, 0, len(l)) - for _, obj := range l { - prev, ok := m[obj.Key] - if !ok { - m[obj.Key] = obj - result = append(result, obj) - continue - } - - for prev.Next != nil { - prev = prev.Next - } - prev.Next = obj - } - - return result -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parse.go b/vendor/github.com/hashicorp/hcl/hcl/parse.go deleted file mode 100644 index 21bd2a4c3b6c9..0000000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parse.go +++ /dev/null @@ -1,39 +0,0 @@ -package hcl - -import ( - "sync" - - "github.com/hashicorp/go-multierror" -) - -// hclErrors are the errors built up from parsing. These should not -// be accessed directly. -var hclErrors []error -var hclLock sync.Mutex -var hclResult *Object - -// Parse parses the given string and returns the result. -func Parse(v string) (*Object, error) { - hclLock.Lock() - defer hclLock.Unlock() - hclErrors = nil - hclResult = nil - - // Parse - lex := &hclLex{Input: v} - hclParse(lex) - - // If we have an error in the lexer itself, return it - if lex.err != nil { - return nil, lex.err - } - - // Build up the errors - var err error - if len(hclErrors) > 0 { - err = &multierror.Error{Errors: hclErrors} - hclResult = nil - } - - return hclResult, err -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parse.y b/vendor/github.com/hashicorp/hcl/hcl/parse.y deleted file mode 100644 index 6144a87ab37c8..0000000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parse.y +++ /dev/null @@ -1,259 +0,0 @@ -// This is the yacc input for creating the parser for HCL. - -%{ -package hcl - -import ( - "fmt" - "strconv" -) - -%} - -%union { - b bool - f float64 - num int - str string - obj *Object - objlist []*Object -} - -%type float -%type int -%type list listitems objectlist -%type block number object objectitem -%type listitem -%type blockId exp objectkey - -%token BOOL -%token FLOAT -%token NUMBER -%token COMMA IDENTIFIER EQUAL NEWLINE STRING MINUS -%token LEFTBRACE RIGHTBRACE LEFTBRACKET RIGHTBRACKET PERIOD -%token EPLUS EMINUS - -%% - -top: - { - hclResult = &Object{Type: ValueTypeObject} - } -| objectlist - { - hclResult = &Object{ - Type: ValueTypeObject, - Value: ObjectList($1).Flat(), - } - } - -objectlist: - objectitem - { - $$ = []*Object{$1} - } -| objectlist objectitem - { - $$ = append($1, $2) - } - -object: - LEFTBRACE objectlist RIGHTBRACE - { - $$ = &Object{ - Type: ValueTypeObject, - Value: ObjectList($2).Flat(), - } - } -| LEFTBRACE RIGHTBRACE - { - $$ = &Object{ - Type: ValueTypeObject, - } - } - -objectkey: - IDENTIFIER - { - $$ = $1 - } -| STRING - { - $$ = $1 - } - -objectitem: - objectkey EQUAL number - { - $$ = $3 - $$.Key = $1 - } -| objectkey EQUAL BOOL - { - $$ = &Object{ - Key: $1, - Type: ValueTypeBool, - Value: $3, - } - } -| objectkey EQUAL STRING - { - $$ = &Object{ - Key: $1, - Type: ValueTypeString, - Value: $3, - } - } -| objectkey EQUAL object - { - $3.Key = $1 - $$ = $3 - } -| objectkey EQUAL list - { - $$ = &Object{ - Key: $1, - Type: ValueTypeList, - Value: $3, - } - } -| block - { - $$ = $1 - } - -block: - blockId object - { - $2.Key = $1 - $$ = $2 - } -| blockId block - { - $$ = &Object{ - Key: $1, - Type: ValueTypeObject, - Value: []*Object{$2}, - } - } - -blockId: - IDENTIFIER - { - $$ = $1 - } -| STRING - { - $$ = $1 - } - -list: - LEFTBRACKET listitems RIGHTBRACKET - { - $$ = $2 - } -| LEFTBRACKET listitems COMMA RIGHTBRACKET - { - $$ = $2 - } -| LEFTBRACKET RIGHTBRACKET - { - $$ = nil - } - -listitems: - listitem - { - $$ = []*Object{$1} - } -| listitems COMMA listitem - { - $$ = append($1, $3) - } - -listitem: - number - { - $$ = $1 - } -| STRING - { - $$ = &Object{ - Type: ValueTypeString, - Value: $1, - } - } - -number: - int - { - $$ = &Object{ - Type: ValueTypeInt, - Value: $1, - } - } -| float - { - $$ = &Object{ - Type: ValueTypeFloat, - Value: $1, - } - } -| int exp - { - fs := fmt.Sprintf("%d%s", $1, $2) - f, err := strconv.ParseFloat(fs, 64) - if err != nil { - panic(err) - } - - $$ = &Object{ - Type: ValueTypeFloat, - Value: f, - } - } -| float exp - { - fs := fmt.Sprintf("%f%s", $1, $2) - f, err := strconv.ParseFloat(fs, 64) - if err != nil { - panic(err) - } - - $$ = &Object{ - Type: ValueTypeFloat, - Value: f, - } - } - -int: - MINUS int - { - $$ = $2 * -1 - } -| NUMBER - { - $$ = $1 - } - -float: - MINUS float - { - $$ = $2 * -1 - } -| FLOAT - { - $$ = $1 - } - -exp: - EPLUS NUMBER - { - $$ = "e" + strconv.FormatInt(int64($2), 10) - } -| EMINUS NUMBER - { - $$ = "e-" + strconv.FormatInt(int64($2), 10) - } - -%% diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go new file mode 100644 index 0000000000000..5c99381dfbf13 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go @@ -0,0 +1,17 @@ +package parser + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/token" +) + +// PosError is a parse error that contains a position. +type PosError struct { + Pos token.Pos + Err error +} + +func (e *PosError) Error() string { + return fmt.Sprintf("At %s: %s", e.Pos, e.Err) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go new file mode 100644 index 0000000000000..32399fec5d0ae --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go @@ -0,0 +1,9 @@ +package parser + +import ( + "testing" +) + +func TestPosError_impl(t *testing.T) { + var _ error = new(PosError) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go new file mode 100644 index 0000000000000..086c08769e629 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -0,0 +1,422 @@ +// Package parser implements a parser for HCL (HashiCorp Configuration +// Language) +package parser + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/scanner" + "github.com/hashicorp/hcl/hcl/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + comments []*ast.CommentGroup + leadComment *ast.CommentGroup // last lead comment + lineComment *ast.CommentGroup // last line comment + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = &PosError{Pos: pos, Err: errors.New(msg)} + } + + f.Node, err = p.objectList() + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + f.Comments = p.comments + return f, nil +} + +func (p *Parser) objectList() (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + } + return node, nil +} + +func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { + endline = p.tok.Pos.Line + + // count the endline if it's multiline comment, ie starting with /* + if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { + // don't use range here - no need to decode Unicode code points + for i := 0; i < len(p.tok.Text); i++ { + if p.tok.Text[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} + p.tok = p.sc.Scan() + return +} + +func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { + var list []*ast.Comment + endline = p.tok.Pos.Line + + for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + // add comment group to the comments list + comments = &ast.CommentGroup{List: list} + p.comments = append(p.comments, comments) + + return +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + if p.leadComment != nil { + o.LeadComment = p.leadComment + p.leadComment = nil + } + + switch p.tok.Type { + case token.ASSIGN: + o.Assign = p.tok.Pos + o.Val, err = p.object() + if err != nil { + return nil, err + } + case token.LBRACE: + o.Val, err = p.objectType() + if err != nil { + return nil, err + } + } + + // do a look-ahead for line comment + p.scan() + if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { + o.LineComment = p.lineComment + p.lineComment = nil + } + p.unscan() + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + return nil, errEofToken + case token.ASSIGN: + // assignment or object only, but not nested objects. this is not + // allowed: `foo bar = {}` + if keyCount > 1 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), + } + } + + if keyCount == 0 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: errors.New("no object keys found!"), + } + } + + return keys, nil + case token.LBRACE: + // object + return keys, nil + case token.IDENT, token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{Token: p.tok}) + case token.ILLEGAL: + fmt.Println("illegal") + default: + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), + } + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (ast.Node, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.COMMENT: + // implement comment + case token.EOF: + return nil, errEofToken + } + + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("Unknown token: %+v", tok), + } +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{ + Lbrace: p.tok.Pos, + } + + l, err := p.objectList() + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + // If there is no error, we should be at a RBRACE to end the object + if p.tok.Type != token.RBRACE { + return nil, fmt.Errorf("object expected closing RBRACE got: %s", p.tok.Type) + } + + o.List = l + o.Rbrace = p.tok.Pos // advanced via parseObjectList + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{ + Lbrack: p.tok.Pos, + } + + needComma := false + for { + tok := p.scan() + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: + if needComma { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token: %s. Expecting %s", tok.Type, token.COMMA), + } + } + + node, err := p.literalType() + if err != nil { + return nil, err + } + + l.Add(node) + needComma = true + case token.COMMA: + // get next list item or we are at the end + // do a look-ahead for line comment + p.scan() + if p.lineComment != nil { + lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) + if ok { + lit.LineComment = p.lineComment + l.List[len(l.List)-1] = lit + p.lineComment = nil + } + } + p.unscan() + + needComma = false + continue + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + l.Rbrack = p.tok.Pos + return l, nil + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), + } + } + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok, + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. In the process, it collects any +// comment groups encountered, and remembers the last lead and line comments. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + // Otherwise read the next token from the scanner and Save it to the buffer + // in case we unscan later. + prev := p.tok + p.tok = p.sc.Scan() + + if p.tok.Type == token.COMMENT { + var comment *ast.CommentGroup + var endline int + + // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", + // p.tok.Pos.Line, prev.Pos.Line, endline) + if p.tok.Pos.Line == prev.Pos.Line { + // The comment is on same line as the previous token; it + // cannot be a lead comment but may be a line comment. + comment, endline = p.consumeCommentGroup(0) + if p.tok.Pos.Line != endline { + // The next token is on a different line, thus + // the last comment group is a line comment. + p.lineComment = comment + } + } + + // consume successor comments, if any + endline = -1 + for p.tok.Type == token.COMMENT { + comment, endline = p.consumeCommentGroup(1) + } + + if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { + switch p.tok.Type { + case token.RBRACE, token.RBRACK: + // Do not count for these cases + default: + // The next token is following on the line immediately after the + // comment group, thus the last comment group is a lead comment. + p.leadComment = comment + } + } + + } + + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go new file mode 100644 index 0000000000000..ffe6eddd5f86d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go @@ -0,0 +1,334 @@ +package parser + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/token" +) + +func TestType(t *testing.T) { + var literals = []struct { + typ token.Type + src string + }{ + {token.STRING, `foo = "foo"`}, + {token.NUMBER, `foo = 123`}, + {token.NUMBER, `foo = -29`}, + {token.FLOAT, `foo = 123.12`}, + {token.FLOAT, `foo = -123.12`}, + {token.BOOL, `foo = true`}, + {token.HEREDOC, "foo = < 0 { + commented = true + buf.WriteByte(newline) + } + + buf.Write(p.indent([]byte(comment.Text))) + buf.WriteByte(newline) + if index != len(o.List.Items) { + buf.WriteByte(newline) // do not print on the end + } + } + } + } + + if index == len(o.List.Items) { + p.prev = o.Rbrace + break + } + + // check if we have adjacent one liner items. If yes we'll going to align + // the comments. + var aligned []*ast.ObjectItem + for _, item := range o.List.Items[index:] { + // we don't group one line lists + if len(o.List.Items) == 1 { + break + } + + // one means a oneliner with out any lead comment + // two means a oneliner with lead comment + // anything else might be something else + cur := lines(string(p.objectItem(item))) + if cur > 2 { + break + } + + curPos := item.Pos() + + nextPos := token.Pos{} + if index != len(o.List.Items)-1 { + nextPos = o.List.Items[index+1].Pos() + } + + prevPos := token.Pos{} + if index != 0 { + prevPos = o.List.Items[index-1].Pos() + } + + // fmt.Println("DEBUG ----------------") + // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) + // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) + // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) + + if curPos.Line+1 == nextPos.Line { + aligned = append(aligned, item) + index++ + continue + } + + if curPos.Line-1 == prevPos.Line { + aligned = append(aligned, item) + index++ + + // finish if we have a new line or comment next. This happens + // if the next item is not adjacent + if curPos.Line+1 != nextPos.Line { + break + } + continue + } + + break + } + + // put newlines if the items are between other non aligned items. + // newlines are also added if there is a standalone comment already, so + // check it too + if !commented && index != len(aligned) { + buf.WriteByte(newline) + } + + if len(aligned) >= 1 { + p.prev = aligned[len(aligned)-1].Pos() + + items := p.alignedItems(aligned) + buf.Write(p.indent(items)) + } else { + p.prev = o.List.Items[index].Pos() + + buf.Write(p.indent(p.objectItem(o.List.Items[index]))) + index++ + } + + buf.WriteByte(newline) + } + + buf.WriteString("}") + return buf.Bytes() +} + +func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { + var buf bytes.Buffer + + // find the longest key and value length, needed for alignment + var longestKeyLen int // longest key length + var longestValLen int // longest value length + for _, item := range items { + key := len(item.Keys[0].Token.Text) + val := len(p.output(item.Val)) + + if key > longestKeyLen { + longestKeyLen = key + } + + if val > longestValLen { + longestValLen = val + } + } + + for i, item := range items { + if item.LeadComment != nil { + for _, comment := range item.LeadComment.List { + buf.WriteString(comment.Text) + buf.WriteByte(newline) + } + } + + for i, k := range item.Keys { + keyLen := len(k.Token.Text) + buf.WriteString(k.Token.Text) + for i := 0; i < longestKeyLen-keyLen+1; i++ { + buf.WriteByte(blank) + } + + // reach end of key + if i == len(item.Keys)-1 && len(item.Keys) == 1 { + buf.WriteString("=") + buf.WriteByte(blank) + } + } + + val := p.output(item.Val) + valLen := len(val) + buf.Write(val) + + if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { + for i := 0; i < longestValLen-valLen+1; i++ { + buf.WriteByte(blank) + } + + for _, comment := range item.LineComment.List { + buf.WriteString(comment.Text) + } + } + + // do not print for the last item + if i != len(items)-1 { + buf.WriteByte(newline) + } + } + + return buf.Bytes() +} + +// list returns the printable HCL form of an list type. +func (p *printer) list(l *ast.ListType) []byte { + var buf bytes.Buffer + buf.WriteString("[") + + var longestLine int + for _, item := range l.List { + // for now we assume that the list only contains literal types + if lit, ok := item.(*ast.LiteralType); ok { + lineLen := len(lit.Token.Text) + if lineLen > longestLine { + longestLine = lineLen + } + } + } + + insertSpaceBeforeItem := false + for i, item := range l.List { + if item.Pos().Line != l.Lbrack.Line { + // multiline list, add newline before we add each item + buf.WriteByte(newline) + insertSpaceBeforeItem = false + // also indent each line + val := p.output(item) + curLen := len(val) + buf.Write(p.indent(val)) + buf.WriteString(",") + + if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { + // if the next item doesn't have any comments, do not align + buf.WriteByte(blank) // align one space + for i := 0; i < longestLine-curLen; i++ { + buf.WriteByte(blank) + } + + for _, comment := range lit.LineComment.List { + buf.WriteString(comment.Text) + } + } + + if i == len(l.List)-1 { + buf.WriteByte(newline) + } + } else { + if insertSpaceBeforeItem { + buf.WriteByte(blank) + insertSpaceBeforeItem = false + } + buf.Write(p.output(item)) + if i != len(l.List)-1 { + buf.WriteString(",") + insertSpaceBeforeItem = true + } + } + + } + + buf.WriteString("]") + return buf.Bytes() +} + +// indent indents the lines of the given buffer for each non-empty line +func (p *printer) indent(buf []byte) []byte { + var prefix []byte + if p.cfg.SpacesWidth != 0 { + for i := 0; i < p.cfg.SpacesWidth; i++ { + prefix = append(prefix, blank) + } + } else { + prefix = []byte{tab} + } + + var res []byte + bol := true + for _, c := range buf { + if bol && c != '\n' { + res = append(res, prefix...) + } + + res = append(res, c) + bol = c == '\n' + } + return res +} + +// unindent removes all the indentation from the tombstoned lines +func (p *printer) unindent(buf []byte) []byte { + var res []byte + for i := 0; i < len(buf); i++ { + skip := len(buf)-i <= len(unindent) + if !skip { + skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) + } + if skip { + res = append(res, buf[i]) + continue + } + + // We have a marker. we have to backtrace here and clean out + // any whitespace ahead of our tombstone up to a \n + for j := len(res) - 1; j >= 0; j-- { + if res[j] == '\n' { + break + } + + res = res[:j] + } + + // Skip the entire unindent marker + i += len(unindent) - 1 + } + + return res +} + +// heredocIndent marks all the 2nd and further lines as unindentable +func (p *printer) heredocIndent(buf []byte) []byte { + var res []byte + bol := false + for _, c := range buf { + if bol && c != '\n' { + res = append(res, unindent...) + } + res = append(res, c) + bol = c == '\n' + } + return res +} + +func lines(txt string) int { + endline := 1 + for i := 0; i < len(txt); i++ { + if txt[i] == '\n' { + endline++ + } + } + return endline +} + +// ---------------------------------------------------------------------------- +// Tracing support + +func (p *printer) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + i := 2 * p.indentTrace + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *printer, msg string) *printer { + p.printTrace(msg, "(") + p.indentTrace++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *printer) { + p.indentTrace-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go new file mode 100644 index 0000000000000..fb9df58d4bfe3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go @@ -0,0 +1,64 @@ +// Package printer implements printing of AST nodes to HCL format. +package printer + +import ( + "bytes" + "io" + "text/tabwriter" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" +) + +var DefaultConfig = Config{ + SpacesWidth: 2, +} + +// A Config node controls the output of Fprint. +type Config struct { + SpacesWidth int // if set, it will use spaces instead of tabs for alignment +} + +func (c *Config) Fprint(output io.Writer, node ast.Node) error { + p := &printer{ + cfg: *c, + comments: make([]*ast.CommentGroup, 0), + standaloneComments: make([]*ast.CommentGroup, 0), + // enableTrace: true, + } + + p.collectComments(node) + + if _, err := output.Write(p.unindent(p.output(node))); err != nil { + return err + } + + // flush tabwriter, if any + var err error + if tw, _ := output.(*tabwriter.Writer); tw != nil { + err = tw.Flush() + } + + return err +} + +// Fprint "pretty-prints" an HCL node to output +// It calls Config.Fprint with default settings. +func Fprint(output io.Writer, node ast.Node) error { + return DefaultConfig.Fprint(output, node) +} + +// Format formats src HCL and returns the result. +func Format(src []byte) ([]byte, error) { + node, err := parser.Parse(src) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + if err := DefaultConfig.Fprint(&buf, node); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go new file mode 100644 index 0000000000000..86aa946b2923a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go @@ -0,0 +1,143 @@ +package printer + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/hashicorp/hcl/hcl/parser" +) + +var update = flag.Bool("update", false, "update golden files") + +const ( + dataDir = "testdata" +) + +type entry struct { + source, golden string +} + +// Use go test -update to create/update the respective golden files. +var data = []entry{ + {"complexhcl.input", "complexhcl.golden"}, + {"list.input", "list.golden"}, + {"comment.input", "comment.golden"}, + {"comment_aligned.input", "comment_aligned.golden"}, + {"comment_standalone.input", "comment_standalone.golden"}, +} + +func TestFiles(t *testing.T) { + for _, e := range data { + source := filepath.Join(dataDir, e.source) + golden := filepath.Join(dataDir, e.golden) + check(t, source, golden) + } +} + +func check(t *testing.T, source, golden string) { + src, err := ioutil.ReadFile(source) + if err != nil { + t.Error(err) + return + } + + res, err := format(src) + if err != nil { + t.Error(err) + return + } + + // update golden files if necessary + if *update { + if err := ioutil.WriteFile(golden, res, 0644); err != nil { + t.Error(err) + } + return + } + + // get golden + gld, err := ioutil.ReadFile(golden) + if err != nil { + t.Error(err) + return + } + + // formatted source and golden must be the same + if err := diff(source, golden, res, gld); err != nil { + t.Error(err) + return + } +} + +// diff compares a and b. +func diff(aname, bname string, a, b []byte) error { + var buf bytes.Buffer // holding long error message + + // compare lengths + if len(a) != len(b) { + fmt.Fprintf(&buf, "\nlength changed: len(%s) = %d, len(%s) = %d", aname, len(a), bname, len(b)) + } + + // compare contents + line := 1 + offs := 1 + for i := 0; i < len(a) && i < len(b); i++ { + ch := a[i] + if ch != b[i] { + fmt.Fprintf(&buf, "\n%s:%d:%d: %s", aname, line, i-offs+1, lineAt(a, offs)) + fmt.Fprintf(&buf, "\n%s:%d:%d: %s", bname, line, i-offs+1, lineAt(b, offs)) + fmt.Fprintf(&buf, "\n\n") + break + } + if ch == '\n' { + line++ + offs = i + 1 + } + } + + if buf.Len() > 0 { + return errors.New(buf.String()) + } + return nil +} + +// format parses src, prints the corresponding AST, verifies the resulting +// src is syntactically correct, and returns the resulting src or an error +// if any. +func format(src []byte) ([]byte, error) { + // parse src + node, err := parser.Parse(src) + if err != nil { + return nil, fmt.Errorf("parse: %s\n%s", err, src) + } + + var buf bytes.Buffer + + cfg := &Config{} + if err := cfg.Fprint(&buf, node); err != nil { + return nil, fmt.Errorf("print: %s", err) + } + + // make sure formatted output is syntactically correct + res := buf.Bytes() + + if _, err := parser.Parse(src); err != nil { + return nil, fmt.Errorf("parse: %s\n%s", err, src) + } + + return res, nil +} + +// lineAt returns the line in text starting at offset offs. +func lineAt(text []byte, offs int) []byte { + i := offs + for i < len(text) && text[i] != '\n' { + i++ + } + return text[offs:i] +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden new file mode 100644 index 0000000000000..e86215f53f442 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden @@ -0,0 +1,36 @@ +// A standalone comment is a comment which is not attached to any kind of node + +// This comes from Terraform, as a test +variable "foo" { + # Standalone comment should be still here + + default = "bar" + description = "bar" # yooo +} + +/* This is a multi line standalone +comment*/ + +// fatih arslan +/* This is a developer test +account and a multine comment */ +developer = ["fatih", "arslan"] // fatih arslan + +# One line here +numbers = [1, 2] // another line here + +# Another comment +variable = { + description = "bar" # another yooo + + foo { + # Nested standalone + + bar = "fatih" + } +} + +// lead comment +foo { + bar = "fatih" // line comment 2 +} // line comment 3 \ No newline at end of file diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input new file mode 100644 index 0000000000000..57c37ac1de308 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input @@ -0,0 +1,37 @@ +// A standalone comment is a comment which is not attached to any kind of node + + // This comes from Terraform, as a test +variable "foo" { + # Standalone comment should be still here + + default = "bar" + description = "bar" # yooo +} + +/* This is a multi line standalone +comment*/ + + +// fatih arslan +/* This is a developer test +account and a multine comment */ +developer = [ "fatih", "arslan"] // fatih arslan + +# One line here +numbers = [1,2] // another line here + + # Another comment +variable = { + description = "bar" # another yooo + foo { + # Nested standalone + + bar = "fatih" + } +} + + // lead comment +foo { + bar = "fatih" // line comment 2 +} // line comment 3 + diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden new file mode 100644 index 0000000000000..e8469e5c4f9f3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden @@ -0,0 +1,32 @@ +aligned { + # We have some aligned items below + foo = "fatih" # yoo1 + default = "bar" # yoo2 + bar = "bar and foo" # yoo3 + + default = { + bar = "example" + } + + #deneme arslan + fatih = ["fatih"] # yoo4 + + #fatih arslan + fatiharslan = ["arslan"] // yoo5 + + default = { + bar = "example" + } + + security_groups = [ + "foo", # kenya 1 + "${aws_security_group.firewall.foo}", # kenya 2 + ] + + security_groups2 = [ + "foo", # kenya 1 + "bar", # kenya 1.5 + "${aws_security_group.firewall.foo}", # kenya 2 + "foobar", # kenya 3 + ] +} \ No newline at end of file diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input new file mode 100644 index 0000000000000..bd43ab1adca58 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input @@ -0,0 +1,28 @@ +aligned { +# We have some aligned items below + foo = "fatih" # yoo1 + default = "bar" # yoo2 + bar = "bar and foo" # yoo3 + default = { + bar = "example" + } + #deneme arslan + fatih = ["fatih"] # yoo4 + #fatih arslan + fatiharslan = ["arslan"] // yoo5 + default = { + bar = "example" + } + +security_groups = [ + "foo", # kenya 1 + "${aws_security_group.firewall.foo}", # kenya 2 +] + +security_groups2 = [ + "foo", # kenya 1 + "bar", # kenya 1.5 + "${aws_security_group.firewall.foo}", # kenya 2 + "foobar", # kenya 3 +] +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.golden new file mode 100644 index 0000000000000..962dbf2b36db1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.golden @@ -0,0 +1,16 @@ +// A standalone comment + +aligned { + # Standalone 1 + + a = "bar" # yoo1 + default = "bar" # yoo2 + + # Standalone 2 +} + +# Standalone 3 + +numbers = [1, 2] // another line here + +# Standalone 4 diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.input new file mode 100644 index 0000000000000..4436cb16c0141 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.input @@ -0,0 +1,16 @@ +// A standalone comment + +aligned { + # Standalone 1 + + a = "bar" # yoo1 + default = "bar" # yoo2 + + # Standalone 2 +} + + # Standalone 3 + +numbers = [1,2] // another line here + + # Standalone 4 diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/complexhcl.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/complexhcl.golden new file mode 100644 index 0000000000000..b733a27e461e1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/complexhcl.golden @@ -0,0 +1,54 @@ +variable "foo" { + default = "bar" + description = "bar" +} + +developer = ["fatih", "arslan"] + +provider "aws" { + access_key = "foo" + secret_key = "bar" +} + +provider "do" { + api_key = "${var.foo}" +} + +resource "aws_security_group" "firewall" { + count = 5 +} + +resource aws_instance "web" { + ami = "${var.foo}" + + security_groups = [ + "foo", + "${aws_security_group.firewall.foo}", + ] + + network_interface { + device_index = 0 + description = "Main network interface" + } + + network_interface = { + device_index = 1 + + description = < 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + tok = token.IDENT + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '#', '/': + tok = token.COMMENT + s.scanComment(ch) + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '<': + tok = token.HEREDOC + s.scanHeredoc() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case '=': + tok = token.ASSIGN + case '+': + tok = token.ADD + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + tok = token.SUB + } + default: + s.err("illegal char") + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +func (s *Scanner) scanComment(ch rune) { + // single line comments + if ch == '#' || (ch == '/' && s.peek() != '*') { + ch = s.next() + for ch != '\n' && ch >= 0 && ch != eof { + ch = s.next() + } + if ch != eof && ch >= 0 { + s.unread() + } + return + } + + // be sure we get the character after /* This allows us to find comment's + // that are not erminated + if ch == '/' { + s.next() + ch = s.next() // read character after "/*" + } + + // look for /* - style comments + for { + if ch < 0 || ch == eof { + s.err("comment not terminated") + break + } + + ch0 := ch + ch = s.next() + if ch0 == '*' && ch == '/' { + break + } + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + if ch == '0' { + // check for hexadecimal, octal or float + ch = s.next() + if ch == 'x' || ch == 'X' { + // hexadecimal + ch = s.next() + found := false + for isHexadecimal(ch) { + ch = s.next() + found = true + } + + if !found { + s.err("illegal hexadecimal number") + } + + if ch != eof { + s.unread() + } + + return token.NUMBER + } + + // now it's either something like: 0421(octal) or 0.1231(float) + illegalOctal := false + for isDecimal(ch) { + ch = s.next() + if ch == '8' || ch == '9' { + // this is just a possibility. For example 0159 is illegal, but + // 0159.23 is valid. So we mark a possible illegal octal. If + // the next character is not a period, we'll print the error. + illegalOctal = true + } + } + + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if illegalOctal { + s.err("illegal octal number") + } + + if ch != eof { + s.unread() + } + return token.NUMBER + } + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + return token.NUMBER +} + +// scanMantissa scans the mantissa begining from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanHeredoc scans a heredoc string. +func (s *Scanner) scanHeredoc() { + // Scan the second '<' in example: '< 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + // we scanned all digits, put the last non digit char back + s.unread() + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isDigit returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isDecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go new file mode 100644 index 0000000000000..8abcbf340c6df --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go @@ -0,0 +1,536 @@ +package scanner + +import ( + "bytes" + "fmt" + "testing" + + "github.com/hashicorp/hcl/hcl/token" + "strings" +) + +var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + +type tokenPair struct { + tok token.Type + text string +} + +var tokenLists = map[string][]tokenPair{ + "comment": []tokenPair{ + {token.COMMENT, "//"}, + {token.COMMENT, "////"}, + {token.COMMENT, "// comment"}, + {token.COMMENT, "// /* comment */"}, + {token.COMMENT, "// // comment //"}, + {token.COMMENT, "//" + f100}, + {token.COMMENT, "#"}, + {token.COMMENT, "##"}, + {token.COMMENT, "# comment"}, + {token.COMMENT, "# /* comment */"}, + {token.COMMENT, "# # comment #"}, + {token.COMMENT, "#" + f100}, + {token.COMMENT, "/**/"}, + {token.COMMENT, "/***/"}, + {token.COMMENT, "/* comment */"}, + {token.COMMENT, "/* // comment */"}, + {token.COMMENT, "/* /* comment */"}, + {token.COMMENT, "/*\n comment\n*/"}, + {token.COMMENT, "/*" + f100 + "*/"}, + }, + "operator": []tokenPair{ + {token.LBRACK, "["}, + {token.LBRACE, "{"}, + {token.COMMA, ","}, + {token.PERIOD, "."}, + {token.RBRACK, "]"}, + {token.RBRACE, "}"}, + {token.ASSIGN, "="}, + {token.ADD, "+"}, + {token.SUB, "-"}, + }, + "bool": []tokenPair{ + {token.BOOL, "true"}, + {token.BOOL, "false"}, + }, + "ident": []tokenPair{ + {token.IDENT, "a"}, + {token.IDENT, "a0"}, + {token.IDENT, "foobar"}, + {token.IDENT, "foo-bar"}, + {token.IDENT, "abc123"}, + {token.IDENT, "LGTM"}, + {token.IDENT, "_"}, + {token.IDENT, "_abc123"}, + {token.IDENT, "abc123_"}, + {token.IDENT, "_abc_123_"}, + {token.IDENT, "_äöü"}, + {token.IDENT, "_本"}, + {token.IDENT, "äöü"}, + {token.IDENT, "本"}, + {token.IDENT, "a۰۱۸"}, + {token.IDENT, "foo६४"}, + {token.IDENT, "bar9876"}, + }, + "heredoc": []tokenPair{ + {token.HEREDOC, "< 0 for %q", s.ErrorCount, src) + } +} + +func testTokenList(t *testing.T, tokenList []tokenPair) { + // create artifical source code + buf := new(bytes.Buffer) + for _, ident := range tokenList { + fmt.Fprintf(buf, "%s\n", ident.text) + } + + s := New(buf.Bytes()) + for _, ident := range tokenList { + tok := s.Scan() + if tok.Type != ident.tok { + t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text) + } + + if tok.Text != ident.text { + t.Errorf("text = %q want %q", tok.String(), ident.text) + } + + } +} + +func countNewlines(s string) int { + n := 0 + for _, ch := range s { + if ch == '\n' { + n++ + } + } + return n +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go new file mode 100644 index 0000000000000..e87ac63563552 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -0,0 +1,245 @@ +package strconv + +import ( + "errors" + "unicode/utf8" +) + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// Unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +func Unquote(s string) (t string, err error) { + n := len(s) + if n < 2 { + return "", ErrSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", ErrSyntax + } + s = s[1 : n-1] + + if quote != '"' { + return "", ErrSyntax + } + if contains(s, '\n') { + return "", ErrSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { + switch quote { + case '"': + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + // If we're starting a '${}' then let it through un-unquoted. + // Specifically: we don't unquote any characters within the `${}` + // section, except for escaped quotes, which we handle specifically. + if s[0] == '$' && len(s) > 1 && s[1] == '{' { + buf = append(buf, '$', '{') + s = s[2:] + + // Continue reading until we find the closing brace, copying as-is + braces := 1 + for len(s) > 0 && braces > 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError { + return "", ErrSyntax + } + + s = s[size:] + + // We special case escaped double quotes in interpolations, converting + // them to straight double quotes. + if r == '\\' { + if q, _ := utf8.DecodeRuneInString(s); q == '"' { + continue + } + } + + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + + switch r { + case '{': + braces++ + case '}': + braces-- + } + } + if braces != 0 { + return "", ErrSyntax + } + if len(s) == 0 { + // If there's no string left, we're done! + break + } else { + // If there's more left, we need to pop back up to the top of the loop + // in case there's another interpolation in this string. + continue + } + } + + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", ErrSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} + +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"'): + err = ErrSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = ErrSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = ErrSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = ErrSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = ErrSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = ErrSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = ErrSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = ErrSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"': + if c != quote { + err = ErrSyntax + return + } + value = rune(c) + default: + err = ErrSyntax + return + } + tail = s + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go new file mode 100644 index 0000000000000..4a810aa38ae00 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go @@ -0,0 +1,93 @@ +package strconv + +import "testing" + +type quoteTest struct { + in string + out string + ascii string +} + +var quotetests = []quoteTest{ + {"\a\b\f\r\n\t\v", `"\a\b\f\r\n\t\v"`, `"\a\b\f\r\n\t\v"`}, + {"\\", `"\\"`, `"\\"`}, + {"abc\xffdef", `"abc\xffdef"`, `"abc\xffdef"`}, + {"\u263a", `"☺"`, `"\u263a"`}, + {"\U0010ffff", `"\U0010ffff"`, `"\U0010ffff"`}, + {"\x04", `"\x04"`, `"\x04"`}, +} + +type unQuoteTest struct { + in string + out string +} + +var unquotetests = []unQuoteTest{ + {`""`, ""}, + {`"a"`, "a"}, + {`"abc"`, "abc"}, + {`"☺"`, "☺"}, + {`"hello world"`, "hello world"}, + {`"\xFF"`, "\xFF"}, + {`"\377"`, "\377"}, + {`"\u1234"`, "\u1234"}, + {`"\U00010111"`, "\U00010111"}, + {`"\U0001011111"`, "\U0001011111"}, + {`"\a\b\f\n\r\t\v\\\""`, "\a\b\f\n\r\t\v\\\""}, + {`"'"`, "'"}, + {`"${file("foo")}"`, `${file("foo")}`}, + {`"${file(\"foo\")}"`, `${file("foo")}`}, + {`"echo ${var.region}${element(split(",",var.zones),0)}"`, + `echo ${var.region}${element(split(",",var.zones),0)}`}, +} + +var misquoted = []string{ + ``, + `"`, + `"a`, + `"'`, + `b"`, + `"\"`, + `"\9"`, + `"\19"`, + `"\129"`, + `'\'`, + `'\9'`, + `'\19'`, + `'\129'`, + `'ab'`, + `"\x1!"`, + `"\U12345678"`, + `"\z"`, + "`", + "`xxx", + "`\"", + `"\'"`, + `'\"'`, + "\"\n\"", + "\"\\n\n\"", + "'\n'", + `"${"`, + `"${foo{}"`, +} + +func TestUnquote(t *testing.T) { + for _, tt := range unquotetests { + if out, err := Unquote(tt.in); err != nil || out != tt.out { + t.Errorf("Unquote(%#q) = %q, %v want %q, nil", tt.in, out, err, tt.out) + } + } + + // run the quote tests too, backward + for _, tt := range quotetests { + if in, err := Unquote(tt.out); in != tt.in { + t.Errorf("Unquote(%#q) = %q, %v, want %q, nil", tt.out, in, err, tt.in) + } + } + + for _, s := range misquoted { + if out, err := Unquote(s); out != "" || err != ErrSyntax { + t.Errorf("Unquote(%#q) = %q, %v want %q, %v", s, out, err, "", ErrSyntax) + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go new file mode 100644 index 0000000000000..59c1bb72d4a4e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go new file mode 100644 index 0000000000000..696ee8da4c837 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go @@ -0,0 +1,170 @@ +// Package token defines constants representing the lexical tokens for HCL +// (HashiCorp Configuration Language) +package token + +import ( + "fmt" + "strconv" + "strings" + + hclstrconv "github.com/hashicorp/hcl/hcl/strconv" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string + JSON bool +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + COMMENT + + identifier_beg + IDENT // literals + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + HEREDOC // <= ValueType(len(_ValueType_index)-1) { - return fmt.Sprintf("ValueType(%d)", i) - } - return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/y.go b/vendor/github.com/hashicorp/hcl/hcl/y.go deleted file mode 100644 index f3ae8f29fc96b..0000000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/y.go +++ /dev/null @@ -1,762 +0,0 @@ -//line parse.y:4 -package hcl - -import __yyfmt__ "fmt" - -//line parse.y:4 -import ( - "fmt" - "strconv" -) - -//line parse.y:13 -type hclSymType struct { - yys int - b bool - f float64 - num int - str string - obj *Object - objlist []*Object -} - -const BOOL = 57346 -const FLOAT = 57347 -const NUMBER = 57348 -const COMMA = 57349 -const IDENTIFIER = 57350 -const EQUAL = 57351 -const NEWLINE = 57352 -const STRING = 57353 -const MINUS = 57354 -const LEFTBRACE = 57355 -const RIGHTBRACE = 57356 -const LEFTBRACKET = 57357 -const RIGHTBRACKET = 57358 -const PERIOD = 57359 -const EPLUS = 57360 -const EMINUS = 57361 - -var hclToknames = [...]string{ - "$end", - "error", - "$unk", - "BOOL", - "FLOAT", - "NUMBER", - "COMMA", - "IDENTIFIER", - "EQUAL", - "NEWLINE", - "STRING", - "MINUS", - "LEFTBRACE", - "RIGHTBRACE", - "LEFTBRACKET", - "RIGHTBRACKET", - "PERIOD", - "EPLUS", - "EMINUS", -} -var hclStatenames = [...]string{} - -const hclEofCode = 1 -const hclErrCode = 2 -const hclMaxDepth = 200 - -//line parse.y:259 - -//line yacctab:1 -var hclExca = [...]int{ - -1, 1, - 1, -1, - -2, 0, - -1, 6, - 9, 7, - -2, 17, - -1, 7, - 9, 8, - -2, 18, -} - -const hclNprod = 36 -const hclPrivate = 57344 - -var hclTokenNames []string -var hclStates []string - -const hclLast = 64 - -var hclAct = [...]int{ - - 35, 3, 21, 22, 9, 30, 31, 29, 17, 26, - 25, 11, 44, 26, 25, 18, 24, 13, 10, 23, - 24, 43, 19, 2, 42, 26, 25, 38, 39, 9, - 32, 37, 24, 26, 25, 6, 45, 27, 7, 37, - 24, 40, 36, 6, 34, 46, 7, 14, 6, 28, - 15, 7, 13, 16, 5, 41, 1, 4, 8, 33, - 20, 0, 0, 12, -} -var hclPact = [...]int{ - - 40, -1000, 40, -1000, 9, -1000, -1000, -1000, 39, -1000, - 4, -1000, -1000, 35, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -13, -13, 28, 8, -1000, -1000, 27, -1000, -1000, - 49, 18, -1000, 5, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 20, -1000, -1000, -} -var hclPgo = [...]int{ - - 0, 3, 2, 60, 59, 23, 54, 42, 11, 1, - 0, 58, 7, 57, 56, -} -var hclR1 = [...]int{ - - 0, 14, 14, 5, 5, 8, 8, 13, 13, 9, - 9, 9, 9, 9, 9, 6, 6, 11, 11, 3, - 3, 3, 4, 4, 10, 10, 7, 7, 7, 7, - 2, 2, 1, 1, 12, 12, -} -var hclR2 = [...]int{ - - 0, 0, 1, 1, 2, 3, 2, 1, 1, 3, - 3, 3, 3, 3, 1, 2, 2, 1, 1, 3, - 4, 2, 1, 3, 1, 1, 1, 1, 2, 2, - 2, 1, 2, 1, 2, 2, -} -var hclChk = [...]int{ - - -1000, -14, -5, -9, -13, -6, 8, 11, -11, -9, - 9, -8, -6, 13, 8, 11, -7, 4, 11, -8, - -3, -2, -1, 15, 12, 6, 5, -5, 14, -12, - 18, 19, -12, -4, 16, -10, -7, 11, -2, -1, - 14, 6, 6, 16, 7, 16, -10, -} -var hclDef = [...]int{ - - 1, -2, 2, 3, 0, 14, -2, -2, 0, 4, - 0, 15, 16, 0, 17, 18, 9, 10, 11, 12, - 13, 26, 27, 0, 0, 31, 33, 0, 6, 28, - 0, 0, 29, 0, 21, 22, 24, 25, 30, 32, - 5, 34, 35, 19, 0, 20, 23, -} -var hclTok1 = [...]int{ - - 1, -} -var hclTok2 = [...]int{ - - 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, -} -var hclTok3 = [...]int{ - 0, -} - -var hclErrorMessages = [...]struct { - state int - token int - msg string -}{} - -//line yaccpar:1 - -/* parser for yacc output */ - -var ( - hclDebug = 0 - hclErrorVerbose = false -) - -type hclLexer interface { - Lex(lval *hclSymType) int - Error(s string) -} - -type hclParser interface { - Parse(hclLexer) int - Lookahead() int -} - -type hclParserImpl struct { - lookahead func() int -} - -func (p *hclParserImpl) Lookahead() int { - return p.lookahead() -} - -func hclNewParser() hclParser { - p := &hclParserImpl{ - lookahead: func() int { return -1 }, - } - return p -} - -const hclFlag = -1000 - -func hclTokname(c int) string { - if c >= 1 && c-1 < len(hclToknames) { - if hclToknames[c-1] != "" { - return hclToknames[c-1] - } - } - return __yyfmt__.Sprintf("tok-%v", c) -} - -func hclStatname(s int) string { - if s >= 0 && s < len(hclStatenames) { - if hclStatenames[s] != "" { - return hclStatenames[s] - } - } - return __yyfmt__.Sprintf("state-%v", s) -} - -func hclErrorMessage(state, lookAhead int) string { - const TOKSTART = 4 - - if !hclErrorVerbose { - return "syntax error" - } - - for _, e := range hclErrorMessages { - if e.state == state && e.token == lookAhead { - return "syntax error: " + e.msg - } - } - - res := "syntax error: unexpected " + hclTokname(lookAhead) - - // To match Bison, suggest at most four expected tokens. - expected := make([]int, 0, 4) - - // Look for shiftable tokens. - base := hclPact[state] - for tok := TOKSTART; tok-1 < len(hclToknames); tok++ { - if n := base + tok; n >= 0 && n < hclLast && hclChk[hclAct[n]] == tok { - if len(expected) == cap(expected) { - return res - } - expected = append(expected, tok) - } - } - - if hclDef[state] == -2 { - i := 0 - for hclExca[i] != -1 || hclExca[i+1] != state { - i += 2 - } - - // Look for tokens that we accept or reduce. - for i += 2; hclExca[i] >= 0; i += 2 { - tok := hclExca[i] - if tok < TOKSTART || hclExca[i+1] == 0 { - continue - } - if len(expected) == cap(expected) { - return res - } - expected = append(expected, tok) - } - - // If the default action is to accept or reduce, give up. - if hclExca[i+1] != 0 { - return res - } - } - - for i, tok := range expected { - if i == 0 { - res += ", expecting " - } else { - res += " or " - } - res += hclTokname(tok) - } - return res -} - -func hcllex1(lex hclLexer, lval *hclSymType) (char, token int) { - token = 0 - char = lex.Lex(lval) - if char <= 0 { - token = hclTok1[0] - goto out - } - if char < len(hclTok1) { - token = hclTok1[char] - goto out - } - if char >= hclPrivate { - if char < hclPrivate+len(hclTok2) { - token = hclTok2[char-hclPrivate] - goto out - } - } - for i := 0; i < len(hclTok3); i += 2 { - token = hclTok3[i+0] - if token == char { - token = hclTok3[i+1] - goto out - } - } - -out: - if token == 0 { - token = hclTok2[1] /* unknown char */ - } - if hclDebug >= 3 { - __yyfmt__.Printf("lex %s(%d)\n", hclTokname(token), uint(char)) - } - return char, token -} - -func hclParse(hcllex hclLexer) int { - return hclNewParser().Parse(hcllex) -} - -func (hclrcvr *hclParserImpl) Parse(hcllex hclLexer) int { - var hcln int - var hcllval hclSymType - var hclVAL hclSymType - var hclDollar []hclSymType - _ = hclDollar // silence set and not used - hclS := make([]hclSymType, hclMaxDepth) - - Nerrs := 0 /* number of errors */ - Errflag := 0 /* error recovery flag */ - hclstate := 0 - hclchar := -1 - hcltoken := -1 // hclchar translated into internal numbering - hclrcvr.lookahead = func() int { return hclchar } - defer func() { - // Make sure we report no lookahead when not parsing. - hclstate = -1 - hclchar = -1 - hcltoken = -1 - }() - hclp := -1 - goto hclstack - -ret0: - return 0 - -ret1: - return 1 - -hclstack: - /* put a state and value onto the stack */ - if hclDebug >= 4 { - __yyfmt__.Printf("char %v in %v\n", hclTokname(hcltoken), hclStatname(hclstate)) - } - - hclp++ - if hclp >= len(hclS) { - nyys := make([]hclSymType, len(hclS)*2) - copy(nyys, hclS) - hclS = nyys - } - hclS[hclp] = hclVAL - hclS[hclp].yys = hclstate - -hclnewstate: - hcln = hclPact[hclstate] - if hcln <= hclFlag { - goto hcldefault /* simple state */ - } - if hclchar < 0 { - hclchar, hcltoken = hcllex1(hcllex, &hcllval) - } - hcln += hcltoken - if hcln < 0 || hcln >= hclLast { - goto hcldefault - } - hcln = hclAct[hcln] - if hclChk[hcln] == hcltoken { /* valid shift */ - hclchar = -1 - hcltoken = -1 - hclVAL = hcllval - hclstate = hcln - if Errflag > 0 { - Errflag-- - } - goto hclstack - } - -hcldefault: - /* default state action */ - hcln = hclDef[hclstate] - if hcln == -2 { - if hclchar < 0 { - hclchar, hcltoken = hcllex1(hcllex, &hcllval) - } - - /* look through exception table */ - xi := 0 - for { - if hclExca[xi+0] == -1 && hclExca[xi+1] == hclstate { - break - } - xi += 2 - } - for xi += 2; ; xi += 2 { - hcln = hclExca[xi+0] - if hcln < 0 || hcln == hcltoken { - break - } - } - hcln = hclExca[xi+1] - if hcln < 0 { - goto ret0 - } - } - if hcln == 0 { - /* error ... attempt to resume parsing */ - switch Errflag { - case 0: /* brand new error */ - hcllex.Error(hclErrorMessage(hclstate, hcltoken)) - Nerrs++ - if hclDebug >= 1 { - __yyfmt__.Printf("%s", hclStatname(hclstate)) - __yyfmt__.Printf(" saw %s\n", hclTokname(hcltoken)) - } - fallthrough - - case 1, 2: /* incompletely recovered error ... try again */ - Errflag = 3 - - /* find a state where "error" is a legal shift action */ - for hclp >= 0 { - hcln = hclPact[hclS[hclp].yys] + hclErrCode - if hcln >= 0 && hcln < hclLast { - hclstate = hclAct[hcln] /* simulate a shift of "error" */ - if hclChk[hclstate] == hclErrCode { - goto hclstack - } - } - - /* the current p has no shift on "error", pop stack */ - if hclDebug >= 2 { - __yyfmt__.Printf("error recovery pops state %d\n", hclS[hclp].yys) - } - hclp-- - } - /* there is no state on the stack with an error shift ... abort */ - goto ret1 - - case 3: /* no shift yet; clobber input char */ - if hclDebug >= 2 { - __yyfmt__.Printf("error recovery discards %s\n", hclTokname(hcltoken)) - } - if hcltoken == hclEofCode { - goto ret1 - } - hclchar = -1 - hcltoken = -1 - goto hclnewstate /* try again in the same state */ - } - } - - /* reduction by production hcln */ - if hclDebug >= 2 { - __yyfmt__.Printf("reduce %v in:\n\t%v\n", hcln, hclStatname(hclstate)) - } - - hclnt := hcln - hclpt := hclp - _ = hclpt // guard against "declared and not used" - - hclp -= hclR2[hcln] - // hclp is now the index of $0. Perform the default action. Iff the - // reduced production is ε, $1 is possibly out of range. - if hclp+1 >= len(hclS) { - nyys := make([]hclSymType, len(hclS)*2) - copy(nyys, hclS) - hclS = nyys - } - hclVAL = hclS[hclp+1] - - /* consult goto table to find next state */ - hcln = hclR1[hcln] - hclg := hclPgo[hcln] - hclj := hclg + hclS[hclp].yys + 1 - - if hclj >= hclLast { - hclstate = hclAct[hclg] - } else { - hclstate = hclAct[hclj] - if hclChk[hclstate] != -hcln { - hclstate = hclAct[hclg] - } - } - // dummy call; replaced with literal code - switch hclnt { - - case 1: - hclDollar = hclS[hclpt-0 : hclpt+1] - //line parse.y:39 - { - hclResult = &Object{Type: ValueTypeObject} - } - case 2: - hclDollar = hclS[hclpt-1 : hclpt+1] - //line parse.y:43 - { - hclResult = &Object{ - Type: ValueTypeObject, - Value: ObjectList(hclDollar[1].objlist).Flat(), - } - } - case 3: - hclDollar = hclS[hclpt-1 : hclpt+1] - //line parse.y:52 - { - hclVAL.objlist = []*Object{hclDollar[1].obj} - } - case 4: - hclDollar = hclS[hclpt-2 : hclpt+1] - //line parse.y:56 - { - hclVAL.objlist = append(hclDollar[1].objlist, hclDollar[2].obj) - } - case 5: - hclDollar = hclS[hclpt-3 : hclpt+1] - //line parse.y:62 - { - hclVAL.obj = &Object{ - Type: ValueTypeObject, - Value: ObjectList(hclDollar[2].objlist).Flat(), - } - } - case 6: - hclDollar = hclS[hclpt-2 : hclpt+1] - //line parse.y:69 - { - hclVAL.obj = &Object{ - Type: ValueTypeObject, - } - } - case 7: - hclDollar = hclS[hclpt-1 : hclpt+1] - //line parse.y:77 - { - hclVAL.str = hclDollar[1].str - } - case 8: - hclDollar = hclS[hclpt-1 : hclpt+1] - //line parse.y:81 - { - hclVAL.str = hclDollar[1].str - } - case 9: - hclDollar = hclS[hclpt-3 : hclpt+1] - //line parse.y:87 - { - hclVAL.obj = hclDollar[3].obj - hclVAL.obj.Key = hclDollar[1].str - } - case 10: - hclDollar = hclS[hclpt-3 : hclpt+1] - //line parse.y:92 - { - hclVAL.obj = &Object{ - Key: hclDollar[1].str, - Type: ValueTypeBool, - Value: hclDollar[3].b, - } - } - case 11: - hclDollar = hclS[hclpt-3 : hclpt+1] - //line parse.y:100 - { - hclVAL.obj = &Object{ - Key: hclDollar[1].str, - Type: ValueTypeString, - Value: hclDollar[3].str, - } - } - case 12: - hclDollar = hclS[hclpt-3 : hclpt+1] - //line parse.y:108 - { - hclDollar[3].obj.Key = hclDollar[1].str - hclVAL.obj = hclDollar[3].obj - } - case 13: - hclDollar = hclS[hclpt-3 : hclpt+1] - //line parse.y:113 - { - hclVAL.obj = &Object{ - Key: hclDollar[1].str, - Type: ValueTypeList, - Value: hclDollar[3].objlist, - } - } - case 14: - hclDollar = hclS[hclpt-1 : hclpt+1] - //line parse.y:121 - { - hclVAL.obj = hclDollar[1].obj - } - case 15: - hclDollar = hclS[hclpt-2 : hclpt+1] - //line parse.y:127 - { - hclDollar[2].obj.Key = hclDollar[1].str - hclVAL.obj = hclDollar[2].obj - } - case 16: - hclDollar = hclS[hclpt-2 : hclpt+1] - //line parse.y:132 - { - hclVAL.obj = &Object{ - Key: hclDollar[1].str, - Type: ValueTypeObject, - Value: []*Object{hclDollar[2].obj}, - } - } - case 17: - hclDollar = hclS[hclpt-1 : hclpt+1] - //line parse.y:142 - { - hclVAL.str = hclDollar[1].str - } - case 18: - hclDollar = hclS[hclpt-1 : hclpt+1] - //line parse.y:146 - { - hclVAL.str = hclDollar[1].str - } - case 19: - hclDollar = hclS[hclpt-3 : hclpt+1] - //line parse.y:152 - { - hclVAL.objlist = hclDollar[2].objlist - } - case 20: - hclDollar = hclS[hclpt-4 : hclpt+1] - //line parse.y:156 - { - hclVAL.objlist = hclDollar[2].objlist - } - case 21: - hclDollar = hclS[hclpt-2 : hclpt+1] - //line parse.y:160 - { - hclVAL.objlist = nil - } - case 22: - hclDollar = hclS[hclpt-1 : hclpt+1] - //line parse.y:166 - { - hclVAL.objlist = []*Object{hclDollar[1].obj} - } - case 23: - hclDollar = hclS[hclpt-3 : hclpt+1] - //line parse.y:170 - { - hclVAL.objlist = append(hclDollar[1].objlist, hclDollar[3].obj) - } - case 24: - hclDollar = hclS[hclpt-1 : hclpt+1] - //line parse.y:176 - { - hclVAL.obj = hclDollar[1].obj - } - case 25: - hclDollar = hclS[hclpt-1 : hclpt+1] - //line parse.y:180 - { - hclVAL.obj = &Object{ - Type: ValueTypeString, - Value: hclDollar[1].str, - } - } - case 26: - hclDollar = hclS[hclpt-1 : hclpt+1] - //line parse.y:189 - { - hclVAL.obj = &Object{ - Type: ValueTypeInt, - Value: hclDollar[1].num, - } - } - case 27: - hclDollar = hclS[hclpt-1 : hclpt+1] - //line parse.y:196 - { - hclVAL.obj = &Object{ - Type: ValueTypeFloat, - Value: hclDollar[1].f, - } - } - case 28: - hclDollar = hclS[hclpt-2 : hclpt+1] - //line parse.y:203 - { - fs := fmt.Sprintf("%d%s", hclDollar[1].num, hclDollar[2].str) - f, err := strconv.ParseFloat(fs, 64) - if err != nil { - panic(err) - } - - hclVAL.obj = &Object{ - Type: ValueTypeFloat, - Value: f, - } - } - case 29: - hclDollar = hclS[hclpt-2 : hclpt+1] - //line parse.y:216 - { - fs := fmt.Sprintf("%f%s", hclDollar[1].f, hclDollar[2].str) - f, err := strconv.ParseFloat(fs, 64) - if err != nil { - panic(err) - } - - hclVAL.obj = &Object{ - Type: ValueTypeFloat, - Value: f, - } - } - case 30: - hclDollar = hclS[hclpt-2 : hclpt+1] - //line parse.y:231 - { - hclVAL.num = hclDollar[2].num * -1 - } - case 31: - hclDollar = hclS[hclpt-1 : hclpt+1] - //line parse.y:235 - { - hclVAL.num = hclDollar[1].num - } - case 32: - hclDollar = hclS[hclpt-2 : hclpt+1] - //line parse.y:241 - { - hclVAL.f = hclDollar[2].f * -1 - } - case 33: - hclDollar = hclS[hclpt-1 : hclpt+1] - //line parse.y:245 - { - hclVAL.f = hclDollar[1].f - } - case 34: - hclDollar = hclS[hclpt-2 : hclpt+1] - //line parse.y:251 - { - hclVAL.str = "e" + strconv.FormatInt(int64(hclDollar[2].num), 10) - } - case 35: - hclDollar = hclS[hclpt-2 : hclpt+1] - //line parse.y:255 - { - hclVAL.str = "e-" + strconv.FormatInt(int64(hclDollar[2].num), 10) - } - } - goto hclstack /* stack new state and value */ -} diff --git a/vendor/github.com/hashicorp/hcl/hcl_test.go b/vendor/github.com/hashicorp/hcl/hcl_test.go new file mode 100644 index 0000000000000..31dff7c9e5980 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl_test.go @@ -0,0 +1,19 @@ +package hcl + +import ( + "io/ioutil" + "path/filepath" + "testing" +) + +// This is the directory where our test fixtures are. +const fixtureDir = "./test-fixtures" + +func testReadFile(t *testing.T, n string) string { + d, err := ioutil.ReadFile(filepath.Join(fixtureDir, n)) + if err != nil { + t.Fatalf("err: %s", err) + } + + return string(d) +} diff --git a/vendor/github.com/hashicorp/hcl/json/lex.go b/vendor/github.com/hashicorp/hcl/json/lex.go deleted file mode 100644 index 0b07e3621c680..0000000000000 --- a/vendor/github.com/hashicorp/hcl/json/lex.go +++ /dev/null @@ -1,256 +0,0 @@ -package json - -import ( - "bytes" - "fmt" - "strconv" - "unicode" - "unicode/utf8" -) - -//go:generate go tool yacc -p "json" parse.y - -// This marks the end of the lexer -const lexEOF = 0 - -// The parser uses the type Lex as a lexer. It must provide -// the methods Lex(*SymType) int and Error(string). -type jsonLex struct { - Input string - - pos int - width int - col, line int - err error -} - -// The parser calls this method to get each new token. -func (x *jsonLex) Lex(yylval *jsonSymType) int { - for { - c := x.next() - if c == lexEOF { - return lexEOF - } - - // Ignore all whitespace except a newline which we handle - // specially later. - if unicode.IsSpace(c) { - continue - } - - // If it is a number, lex the number - if c >= '0' && c <= '9' { - x.backup() - return x.lexNumber(yylval) - } - - switch c { - case 'e': - fallthrough - case 'E': - switch x.next() { - case '+': - return EPLUS - case '-': - return EMINUS - default: - x.backup() - return EPLUS - } - case '.': - return PERIOD - case '-': - return MINUS - case ':': - return COLON - case ',': - return COMMA - case '[': - return LEFTBRACKET - case ']': - return RIGHTBRACKET - case '{': - return LEFTBRACE - case '}': - return RIGHTBRACE - case '"': - return x.lexString(yylval) - default: - x.backup() - return x.lexId(yylval) - } - } -} - -// lexId lexes an identifier -func (x *jsonLex) lexId(yylval *jsonSymType) int { - var b bytes.Buffer - first := true - for { - c := x.next() - if c == lexEOF { - break - } - - if !unicode.IsDigit(c) && !unicode.IsLetter(c) && c != '_' && c != '-' { - x.backup() - - if first { - x.createErr("Invalid identifier") - return lexEOF - } - - break - } - - first = false - if _, err := b.WriteRune(c); err != nil { - return lexEOF - } - } - - switch v := b.String(); v { - case "true": - return TRUE - case "false": - return FALSE - case "null": - return NULL - default: - x.createErr(fmt.Sprintf("Invalid identifier: %s", v)) - return lexEOF - } -} - -// lexNumber lexes out a number -func (x *jsonLex) lexNumber(yylval *jsonSymType) int { - var b bytes.Buffer - gotPeriod := false - for { - c := x.next() - if c == lexEOF { - break - } - - if c == '.' { - if gotPeriod { - x.backup() - break - } - - gotPeriod = true - } else if c < '0' || c > '9' { - x.backup() - break - } - - if _, err := b.WriteRune(c); err != nil { - x.createErr(fmt.Sprintf("Internal error: %s", err)) - return lexEOF - } - } - - if !gotPeriod { - v, err := strconv.ParseInt(b.String(), 0, 0) - if err != nil { - x.createErr(fmt.Sprintf("Expected number: %s", err)) - return lexEOF - } - - yylval.num = int(v) - return NUMBER - } - - f, err := strconv.ParseFloat(b.String(), 64) - if err != nil { - x.createErr(fmt.Sprintf("Expected float: %s", err)) - return lexEOF - } - - yylval.f = float64(f) - return FLOAT -} - -// lexString extracts a string from the input -func (x *jsonLex) lexString(yylval *jsonSymType) int { - var b bytes.Buffer - for { - c := x.next() - if c == lexEOF { - break - } - - // String end - if c == '"' { - break - } - - // If we're escaping a quote, then escape the quote - if c == '\\' { - n := x.next() - switch n { - case '"': - c = n - case 'n': - c = '\n' - case '\\': - c = n - default: - x.backup() - } - } - - if _, err := b.WriteRune(c); err != nil { - return lexEOF - } - } - - yylval.str = b.String() - return STRING -} - -// Return the next rune for the lexer. -func (x *jsonLex) next() rune { - if int(x.pos) >= len(x.Input) { - x.width = 0 - return lexEOF - } - - r, w := utf8.DecodeRuneInString(x.Input[x.pos:]) - x.width = w - x.pos += x.width - - x.col += 1 - if x.line == 0 { - x.line = 1 - } - if r == '\n' { - x.line += 1 - x.col = 0 - } - - return r -} - -// peek returns but does not consume the next rune in the input -func (x *jsonLex) peek() rune { - r := x.next() - x.backup() - return r -} - -// backup steps back one rune. Can only be called once per next. -func (x *jsonLex) backup() { - x.col -= 1 - x.pos -= x.width -} - -// createErr records the given error -func (x *jsonLex) createErr(msg string) { - x.err = fmt.Errorf("Line %d, column %d: %s", x.line, x.col, msg) -} - -// The parser calls this method on a parse error. -func (x *jsonLex) Error(s string) { - x.createErr(s) -} diff --git a/vendor/github.com/hashicorp/hcl/json/parse.go b/vendor/github.com/hashicorp/hcl/json/parse.go deleted file mode 100644 index 9ab454a44b48d..0000000000000 --- a/vendor/github.com/hashicorp/hcl/json/parse.go +++ /dev/null @@ -1,40 +0,0 @@ -package json - -import ( - "sync" - - "github.com/hashicorp/hcl/hcl" - "github.com/hashicorp/go-multierror" -) - -// jsonErrors are the errors built up from parsing. These should not -// be accessed directly. -var jsonErrors []error -var jsonLock sync.Mutex -var jsonResult *hcl.Object - -// Parse parses the given string and returns the result. -func Parse(v string) (*hcl.Object, error) { - jsonLock.Lock() - defer jsonLock.Unlock() - jsonErrors = nil - jsonResult = nil - - // Parse - lex := &jsonLex{Input: v} - jsonParse(lex) - - // If we have an error in the lexer itself, return it - if lex.err != nil { - return nil, lex.err - } - - // Build up the errors - var err error - if len(jsonErrors) > 0 { - err = &multierror.Error{Errors: jsonErrors} - jsonResult = nil - } - - return jsonResult, err -} diff --git a/vendor/github.com/hashicorp/hcl/json/parse.y b/vendor/github.com/hashicorp/hcl/json/parse.y deleted file mode 100644 index 237e4ae59071a..0000000000000 --- a/vendor/github.com/hashicorp/hcl/json/parse.y +++ /dev/null @@ -1,210 +0,0 @@ -// This is the yacc input for creating the parser for HCL JSON. - -%{ -package json - -import ( - "fmt" - "strconv" - - "github.com/hashicorp/hcl/hcl" -) - -%} - -%union { - f float64 - num int - str string - obj *hcl.Object - objlist []*hcl.Object -} - -%type float -%type int -%type number object pair value -%type array elements members -%type exp - -%token FLOAT -%token NUMBER -%token COLON COMMA IDENTIFIER EQUAL NEWLINE STRING -%token LEFTBRACE RIGHTBRACE LEFTBRACKET RIGHTBRACKET -%token TRUE FALSE NULL MINUS PERIOD EPLUS EMINUS - -%% - -top: - object - { - jsonResult = $1 - } - -object: - LEFTBRACE members RIGHTBRACE - { - $$ = &hcl.Object{ - Type: hcl.ValueTypeObject, - Value: hcl.ObjectList($2).Flat(), - } - } -| LEFTBRACE RIGHTBRACE - { - $$ = &hcl.Object{Type: hcl.ValueTypeObject} - } - -members: - pair - { - $$ = []*hcl.Object{$1} - } -| members COMMA pair - { - $$ = append($1, $3) - } - -pair: - STRING COLON value - { - $3.Key = $1 - $$ = $3 - } - -value: - STRING - { - $$ = &hcl.Object{ - Type: hcl.ValueTypeString, - Value: $1, - } - } -| number - { - $$ = $1 - } -| object - { - $$ = $1 - } -| array - { - $$ = &hcl.Object{ - Type: hcl.ValueTypeList, - Value: $1, - } - } -| TRUE - { - $$ = &hcl.Object{ - Type: hcl.ValueTypeBool, - Value: true, - } - } -| FALSE - { - $$ = &hcl.Object{ - Type: hcl.ValueTypeBool, - Value: false, - } - } -| NULL - { - $$ = &hcl.Object{ - Type: hcl.ValueTypeNil, - Value: nil, - } - } - -array: - LEFTBRACKET RIGHTBRACKET - { - $$ = nil - } -| LEFTBRACKET elements RIGHTBRACKET - { - $$ = $2 - } - -elements: - value - { - $$ = []*hcl.Object{$1} - } -| elements COMMA value - { - $$ = append($1, $3) - } - -number: - int - { - $$ = &hcl.Object{ - Type: hcl.ValueTypeInt, - Value: $1, - } - } -| float - { - $$ = &hcl.Object{ - Type: hcl.ValueTypeFloat, - Value: $1, - } - } -| int exp - { - fs := fmt.Sprintf("%d%s", $1, $2) - f, err := strconv.ParseFloat(fs, 64) - if err != nil { - panic(err) - } - - $$ = &hcl.Object{ - Type: hcl.ValueTypeFloat, - Value: f, - } - } -| float exp - { - fs := fmt.Sprintf("%f%s", $1, $2) - f, err := strconv.ParseFloat(fs, 64) - if err != nil { - panic(err) - } - - $$ = &hcl.Object{ - Type: hcl.ValueTypeFloat, - Value: f, - } - } - -int: - MINUS int - { - $$ = $2 * -1 - } -| NUMBER - { - $$ = $1 - } - -float: - MINUS float - { - $$ = $2 * -1 - } -| FLOAT - { - $$ = $1 - } - -exp: - EPLUS NUMBER - { - $$ = "e" + strconv.FormatInt(int64($2), 10) - } -| EMINUS NUMBER - { - $$ = "e-" + strconv.FormatInt(int64($2), 10) - } - -%% diff --git a/vendor/github.com/hashicorp/hcl/json/parser/flatten.go b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go new file mode 100644 index 0000000000000..6eb14a253928f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go @@ -0,0 +1,111 @@ +package parser + +import "github.com/hashicorp/hcl/hcl/ast" + +// flattenObjects takes an AST node, walks it, and flattens +func flattenObjects(node ast.Node) { + ast.Walk(node, func(n ast.Node) (ast.Node, bool) { + // We only care about lists, because this is what we modify + list, ok := n.(*ast.ObjectList) + if !ok { + return n, true + } + + // Rebuild the item list + items := make([]*ast.ObjectItem, 0, len(list.Items)) + frontier := make([]*ast.ObjectItem, len(list.Items)) + copy(frontier, list.Items) + for len(frontier) > 0 { + // Pop the current item + n := len(frontier) + item := frontier[n-1] + frontier = frontier[:n-1] + + switch v := item.Val.(type) { + case *ast.ObjectType: + items, frontier = flattenObjectType(v, item, items, frontier) + case *ast.ListType: + items, frontier = flattenListType(v, item, items, frontier) + default: + items = append(items, item) + } + } + + // Reverse the list since the frontier model runs things backwards + for i := len(items)/2 - 1; i >= 0; i-- { + opp := len(items) - 1 - i + items[i], items[opp] = items[opp], items[i] + } + + // Done! Set the original items + list.Items = items + return n, true + }) +} + +func flattenListType( + ot *ast.ListType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // All the elements of this object must also be objects! + for _, subitem := range ot.List { + if _, ok := subitem.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, elem := range ot.List { + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: item.Keys, + Assign: item.Assign, + Val: elem, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} + +func flattenObjectType( + ot *ast.ObjectType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list has no items we do not have to flatten anything + if ot.List.Items == nil { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List.Items { + if _, ok := subitem.Val.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, subitem := range ot.List.Items { + // Copy the new key + keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) + copy(keys, item.Keys) + copy(keys[len(item.Keys):], subitem.Keys) + + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: keys, + Assign: item.Assign, + Val: subitem.Val, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go new file mode 100644 index 0000000000000..65d56c9b85bdf --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go @@ -0,0 +1,297 @@ +package parser + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/json/scanner" + "github.com/hashicorp/hcl/json/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = fmt.Errorf("%s: %s", pos, msg) + } + + // The root must be an object in JSON + object, err := p.object() + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + // We make our final node an object list so it is more HCL compatible + f.Node = object.List + + // Flatten it, which finds patterns and turns them into more HCL-like + // AST trees. + flattenObjects(f.Node) + + return f, nil +} + +func (p *Parser) objectList() (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // Check for a followup comma. If it isn't a comma, then we're done + if tok := p.scan(); tok.Type != token.COMMA { + break + } + } + return node, nil +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + switch p.tok.Type { + case token.COLON: + o.Val, err = p.objectValue() + if err != nil { + return nil, err + } + } + + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + return nil, errEofToken + case token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{ + Token: p.tok.HCLToken(), + }) + case token.COLON: + // Done + return keys, nil + case token.ILLEGAL: + fmt.Println("illegal") + default: + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) objectValue() (ast.Node, error) { + defer un(trace(p, "ParseObjectValue")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (*ast.ObjectType, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.LBRACE: + return p.objectType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{} + + l, err := p.objectList() + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + o.List = l + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{} + + for { + tok := p.scan() + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING: + node, err := p.literalType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.COMMA: + continue + case token.LBRACE: + node, err := p.objectType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + return l, nil + default: + return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) + } + + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok.HCLToken(), + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + p.tok = p.sc.Scan() + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser_test.go b/vendor/github.com/hashicorp/hcl/json/parser/parser_test.go new file mode 100644 index 0000000000000..8c66fb9ca5cb9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser_test.go @@ -0,0 +1,338 @@ +package parser + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/token" +) + +func TestType(t *testing.T) { + var literals = []struct { + typ token.Type + src string + }{ + {token.STRING, `"foo": "bar"`}, + {token.NUMBER, `"foo": 123`}, + {token.FLOAT, `"foo": 123.12`}, + {token.FLOAT, `"foo": -123.12`}, + {token.BOOL, `"foo": true`}, + {token.STRING, `"foo": null`}, + } + + for _, l := range literals { + t.Logf("Testing: %s", l.src) + + p := newParser([]byte(l.src)) + item, err := p.objectItem() + if err != nil { + t.Error(err) + } + + lit, ok := item.Val.(*ast.LiteralType) + if !ok { + t.Errorf("node should be of type LiteralType, got: %T", item.Val) + } + + if lit.Token.Type != l.typ { + t.Errorf("want: %s, got: %s", l.typ, lit.Token.Type) + } + } +} + +func TestListType(t *testing.T) { + var literals = []struct { + src string + tokens []token.Type + }{ + { + `"foo": ["123", 123]`, + []token.Type{token.STRING, token.NUMBER}, + }, + { + `"foo": [123, "123",]`, + []token.Type{token.NUMBER, token.STRING}, + }, + { + `"foo": []`, + []token.Type{}, + }, + { + `"foo": ["123", 123]`, + []token.Type{token.STRING, token.NUMBER}, + }, + { + `"foo": ["123", {}]`, + []token.Type{token.STRING, token.LBRACE}, + }, + } + + for _, l := range literals { + t.Logf("Testing: %s", l.src) + + p := newParser([]byte(l.src)) + item, err := p.objectItem() + if err != nil { + t.Error(err) + } + + list, ok := item.Val.(*ast.ListType) + if !ok { + t.Errorf("node should be of type LiteralType, got: %T", item.Val) + } + + tokens := []token.Type{} + for _, li := range list.List { + switch v := li.(type) { + case *ast.LiteralType: + tokens = append(tokens, v.Token.Type) + case *ast.ObjectType: + tokens = append(tokens, token.LBRACE) + } + } + + equals(t, l.tokens, tokens) + } +} + +func TestObjectType(t *testing.T) { + var literals = []struct { + src string + nodeType []ast.Node + itemLen int + }{ + { + `"foo": {}`, + nil, + 0, + }, + { + `"foo": { + "bar": "fatih" + }`, + []ast.Node{&ast.LiteralType{}}, + 1, + }, + { + `"foo": { + "bar": "fatih", + "baz": ["arslan"] + }`, + []ast.Node{ + &ast.LiteralType{}, + &ast.ListType{}, + }, + 2, + }, + { + `"foo": { + "bar": {} + }`, + []ast.Node{ + &ast.ObjectType{}, + }, + 1, + }, + { + `"foo": { + "bar": {}, + "foo": true + }`, + []ast.Node{ + &ast.ObjectType{}, + &ast.LiteralType{}, + }, + 2, + }, + } + + for _, l := range literals { + t.Logf("Testing:\n%s\n", l.src) + + p := newParser([]byte(l.src)) + // p.enableTrace = true + item, err := p.objectItem() + if err != nil { + t.Error(err) + } + + // we know that the ObjectKey name is foo for all cases, what matters + // is the object + obj, ok := item.Val.(*ast.ObjectType) + if !ok { + t.Errorf("node should be of type LiteralType, got: %T", item.Val) + } + + // check if the total length of items are correct + equals(t, l.itemLen, len(obj.List.Items)) + + // check if the types are correct + for i, item := range obj.List.Items { + equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val)) + } + } +} + +func TestFlattenObjects(t *testing.T) { + var literals = []struct { + src string + nodeType []ast.Node + itemLen int + }{ + { + `{ + "foo": [ + { + "foo": "svh", + "bar": "fatih" + } + ] + }`, + []ast.Node{ + &ast.ObjectType{}, + &ast.LiteralType{}, + &ast.LiteralType{}, + }, + 3, + }, + { + `{ + "variable": { + "foo": {} + } + }`, + []ast.Node{ + &ast.ObjectType{}, + }, + 1, + }, + } + + for _, l := range literals { + t.Logf("Testing:\n%s\n", l.src) + + f, err := Parse([]byte(l.src)) + if err != nil { + t.Error(err) + } + + // the first object is always an ObjectList so just assert that one + // so we can use it as such + obj, ok := f.Node.(*ast.ObjectList) + if !ok { + t.Errorf("node should be *ast.ObjectList, got: %T", f.Node) + } + + // check if the types are correct + var i int + for _, item := range obj.Items { + equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val)) + i++ + + if obj, ok := item.Val.(*ast.ObjectType); ok { + for _, item := range obj.List.Items { + equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val)) + i++ + } + } + } + + // check if the number of items is correct + equals(t, l.itemLen, i) + + } +} + +func TestObjectKey(t *testing.T) { + keys := []struct { + exp []token.Type + src string + }{ + {[]token.Type{token.STRING}, `"foo": {}`}, + } + + for _, k := range keys { + p := newParser([]byte(k.src)) + keys, err := p.objectKey() + if err != nil { + t.Fatal(err) + } + + tokens := []token.Type{} + for _, o := range keys { + tokens = append(tokens, o.Token.Type) + } + + equals(t, k.exp, tokens) + } + + errKeys := []struct { + src string + }{ + {`foo 12 {}`}, + {`foo bar = {}`}, + {`foo []`}, + {`12 {}`}, + } + + for _, k := range errKeys { + p := newParser([]byte(k.src)) + _, err := p.objectKey() + if err == nil { + t.Errorf("case '%s' should give an error", k.src) + } + } +} + +// Official HCL tests +func TestParse(t *testing.T) { + cases := []struct { + Name string + Err bool + }{ + { + "array.json", + false, + }, + { + "basic.json", + false, + }, + { + "object.json", + false, + }, + { + "types.json", + false, + }, + } + + const fixtureDir = "./test-fixtures" + + for _, tc := range cases { + d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name)) + if err != nil { + t.Fatalf("err: %s", err) + } + + _, err = Parse(d) + if (err != nil) != tc.Err { + t.Fatalf("Input: %s\n\nError: %s", tc.Name, err) + } + } +} + +// equals fails the test if exp is not equal to act. +func equals(tb testing.TB, exp, act interface{}) { + if !reflect.DeepEqual(exp, act) { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) + tb.FailNow() + } +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json new file mode 100644 index 0000000000000..e320f17ab2508 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json @@ -0,0 +1,4 @@ +{ + "foo": [1, 2, "bar"], + "bar": "baz" +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json new file mode 100644 index 0000000000000..b54bde96c1bc7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json @@ -0,0 +1,3 @@ +{ + "foo": "bar" +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json new file mode 100644 index 0000000000000..72168a3ccb497 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json @@ -0,0 +1,5 @@ +{ + "foo": { + "bar": [1,2] + } +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json new file mode 100644 index 0000000000000..9a142a6ca6445 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json @@ -0,0 +1,10 @@ +{ + "foo": "bar", + "bar": 7, + "baz": [1,2,3], + "foo": -12, + "bar": 3.14159, + "foo": true, + "bar": false, + "foo": null +} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go new file mode 100644 index 0000000000000..477f71ff3db86 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go @@ -0,0 +1,451 @@ +package scanner + +import ( + "bytes" + "fmt" + "os" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/json/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } else if lit == "null" { + tok = token.NULL + } else { + s.err("illegal char") + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case ':': + tok = token.COLON + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + s.err("illegal char") + } + default: + s.err("illegal char: " + string(ch)) + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + zero := ch == '0' + pos := s.srcPos + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + + // If we have a larger number and this is zero, error + if zero && pos != s.srcPos { + s.err("numbers cannot start with 0") + } + + return token.NUMBER +} + +// scanMantissa scans the mantissa begining from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if ch == '\n' || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' && braces == 0 { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + // we scanned all digits, put the last non digit char back + s.unread() + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isHexadecimal returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isHexadecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go new file mode 100644 index 0000000000000..fe2d75524d515 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go @@ -0,0 +1,363 @@ +package scanner + +import ( + "bytes" + "fmt" + "testing" + + "github.com/hashicorp/hcl/json/token" +) + +var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + +type tokenPair struct { + tok token.Type + text string +} + +var tokenLists = map[string][]tokenPair{ + "operator": []tokenPair{ + {token.LBRACK, "["}, + {token.LBRACE, "{"}, + {token.COMMA, ","}, + {token.PERIOD, "."}, + {token.RBRACK, "]"}, + {token.RBRACE, "}"}, + }, + "bool": []tokenPair{ + {token.BOOL, "true"}, + {token.BOOL, "false"}, + }, + "string": []tokenPair{ + {token.STRING, `" "`}, + {token.STRING, `"a"`}, + {token.STRING, `"本"`}, + {token.STRING, `"${file("foo")}"`}, + {token.STRING, `"${file(\"foo\")}"`}, + {token.STRING, `"\a"`}, + {token.STRING, `"\b"`}, + {token.STRING, `"\f"`}, + {token.STRING, `"\n"`}, + {token.STRING, `"\r"`}, + {token.STRING, `"\t"`}, + {token.STRING, `"\v"`}, + {token.STRING, `"\""`}, + {token.STRING, `"\000"`}, + {token.STRING, `"\777"`}, + {token.STRING, `"\x00"`}, + {token.STRING, `"\xff"`}, + {token.STRING, `"\u0000"`}, + {token.STRING, `"\ufA16"`}, + {token.STRING, `"\U00000000"`}, + {token.STRING, `"\U0000ffAB"`}, + {token.STRING, `"` + f100 + `"`}, + }, + "number": []tokenPair{ + {token.NUMBER, "0"}, + {token.NUMBER, "1"}, + {token.NUMBER, "9"}, + {token.NUMBER, "42"}, + {token.NUMBER, "1234567890"}, + {token.NUMBER, "-0"}, + {token.NUMBER, "-1"}, + {token.NUMBER, "-9"}, + {token.NUMBER, "-42"}, + {token.NUMBER, "-1234567890"}, + }, + "float": []tokenPair{ + {token.FLOAT, "0."}, + {token.FLOAT, "1."}, + {token.FLOAT, "42."}, + {token.FLOAT, "01234567890."}, + {token.FLOAT, ".0"}, + {token.FLOAT, ".1"}, + {token.FLOAT, ".42"}, + {token.FLOAT, ".0123456789"}, + {token.FLOAT, "0.0"}, + {token.FLOAT, "1.0"}, + {token.FLOAT, "42.0"}, + {token.FLOAT, "01234567890.0"}, + {token.FLOAT, "0e0"}, + {token.FLOAT, "1e0"}, + {token.FLOAT, "42e0"}, + {token.FLOAT, "01234567890e0"}, + {token.FLOAT, "0E0"}, + {token.FLOAT, "1E0"}, + {token.FLOAT, "42E0"}, + {token.FLOAT, "01234567890E0"}, + {token.FLOAT, "0e+10"}, + {token.FLOAT, "1e-10"}, + {token.FLOAT, "42e+10"}, + {token.FLOAT, "01234567890e-10"}, + {token.FLOAT, "0E+10"}, + {token.FLOAT, "1E-10"}, + {token.FLOAT, "42E+10"}, + {token.FLOAT, "01234567890E-10"}, + {token.FLOAT, "01.8e0"}, + {token.FLOAT, "1.4e0"}, + {token.FLOAT, "42.2e0"}, + {token.FLOAT, "01234567890.12e0"}, + {token.FLOAT, "0.E0"}, + {token.FLOAT, "1.12E0"}, + {token.FLOAT, "42.123E0"}, + {token.FLOAT, "01234567890.213E0"}, + {token.FLOAT, "0.2e+10"}, + {token.FLOAT, "1.2e-10"}, + {token.FLOAT, "42.54e+10"}, + {token.FLOAT, "01234567890.98e-10"}, + {token.FLOAT, "0.1E+10"}, + {token.FLOAT, "1.1E-10"}, + {token.FLOAT, "42.1E+10"}, + {token.FLOAT, "01234567890.1E-10"}, + {token.FLOAT, "-0.0"}, + {token.FLOAT, "-1.0"}, + {token.FLOAT, "-42.0"}, + {token.FLOAT, "-01234567890.0"}, + {token.FLOAT, "-0e0"}, + {token.FLOAT, "-1e0"}, + {token.FLOAT, "-42e0"}, + {token.FLOAT, "-01234567890e0"}, + {token.FLOAT, "-0E0"}, + {token.FLOAT, "-1E0"}, + {token.FLOAT, "-42E0"}, + {token.FLOAT, "-01234567890E0"}, + {token.FLOAT, "-0e+10"}, + {token.FLOAT, "-1e-10"}, + {token.FLOAT, "-42e+10"}, + {token.FLOAT, "-01234567890e-10"}, + {token.FLOAT, "-0E+10"}, + {token.FLOAT, "-1E-10"}, + {token.FLOAT, "-42E+10"}, + {token.FLOAT, "-01234567890E-10"}, + {token.FLOAT, "-01.8e0"}, + {token.FLOAT, "-1.4e0"}, + {token.FLOAT, "-42.2e0"}, + {token.FLOAT, "-01234567890.12e0"}, + {token.FLOAT, "-0.E0"}, + {token.FLOAT, "-1.12E0"}, + {token.FLOAT, "-42.123E0"}, + {token.FLOAT, "-01234567890.213E0"}, + {token.FLOAT, "-0.2e+10"}, + {token.FLOAT, "-1.2e-10"}, + {token.FLOAT, "-42.54e+10"}, + {token.FLOAT, "-01234567890.98e-10"}, + {token.FLOAT, "-0.1E+10"}, + {token.FLOAT, "-1.1E-10"}, + {token.FLOAT, "-42.1E+10"}, + {token.FLOAT, "-01234567890.1E-10"}, + }, +} + +var orderedTokenLists = []string{ + "comment", + "operator", + "bool", + "string", + "number", + "float", +} + +func TestPosition(t *testing.T) { + // create artifical source code + buf := new(bytes.Buffer) + + for _, listName := range orderedTokenLists { + for _, ident := range tokenLists[listName] { + fmt.Fprintf(buf, "\t\t\t\t%s\n", ident.text) + } + } + + s := New(buf.Bytes()) + + pos := token.Pos{"", 4, 1, 5} + s.Scan() + for _, listName := range orderedTokenLists { + + for _, k := range tokenLists[listName] { + curPos := s.tokPos + // fmt.Printf("[%q] s = %+v:%+v\n", k.text, curPos.Offset, curPos.Column) + + if curPos.Offset != pos.Offset { + t.Fatalf("offset = %d, want %d for %q", curPos.Offset, pos.Offset, k.text) + } + if curPos.Line != pos.Line { + t.Fatalf("line = %d, want %d for %q", curPos.Line, pos.Line, k.text) + } + if curPos.Column != pos.Column { + t.Fatalf("column = %d, want %d for %q", curPos.Column, pos.Column, k.text) + } + pos.Offset += 4 + len(k.text) + 1 // 4 tabs + token bytes + newline + pos.Line += countNewlines(k.text) + 1 // each token is on a new line + + s.Error = func(pos token.Pos, msg string) { + t.Errorf("error %q for %q", msg, k.text) + } + + s.Scan() + } + } + // make sure there were no token-internal errors reported by scanner + if s.ErrorCount != 0 { + t.Errorf("%d errors", s.ErrorCount) + } +} + +func TestComment(t *testing.T) { + testTokenList(t, tokenLists["comment"]) +} + +func TestOperator(t *testing.T) { + testTokenList(t, tokenLists["operator"]) +} + +func TestBool(t *testing.T) { + testTokenList(t, tokenLists["bool"]) +} + +func TestIdent(t *testing.T) { + testTokenList(t, tokenLists["ident"]) +} + +func TestString(t *testing.T) { + testTokenList(t, tokenLists["string"]) +} + +func TestNumber(t *testing.T) { + testTokenList(t, tokenLists["number"]) +} + +func TestFloat(t *testing.T) { + testTokenList(t, tokenLists["float"]) +} + +func TestRealExample(t *testing.T) { + complexReal := ` +{ + "variable": { + "foo": { + "default": "bar", + "description": "bar", + "depends_on": ["something"] + } + } +}` + + literals := []struct { + tokenType token.Type + literal string + }{ + {token.LBRACE, `{`}, + {token.STRING, `"variable"`}, + {token.COLON, `:`}, + {token.LBRACE, `{`}, + {token.STRING, `"foo"`}, + {token.COLON, `:`}, + {token.LBRACE, `{`}, + {token.STRING, `"default"`}, + {token.COLON, `:`}, + {token.STRING, `"bar"`}, + {token.COMMA, `,`}, + {token.STRING, `"description"`}, + {token.COLON, `:`}, + {token.STRING, `"bar"`}, + {token.COMMA, `,`}, + {token.STRING, `"depends_on"`}, + {token.COLON, `:`}, + {token.LBRACK, `[`}, + {token.STRING, `"something"`}, + {token.RBRACK, `]`}, + {token.RBRACE, `}`}, + {token.RBRACE, `}`}, + {token.RBRACE, `}`}, + {token.EOF, ``}, + } + + s := New([]byte(complexReal)) + for _, l := range literals { + tok := s.Scan() + if l.tokenType != tok.Type { + t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String()) + } + + if l.literal != tok.Text { + t.Errorf("got: %s want %s\n", tok, l.literal) + } + } + +} + +func TestError(t *testing.T) { + testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL) + testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL) + + testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", token.STRING) + testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", token.STRING) + + testError(t, `01238`, "1:7", "numbers cannot start with 0", token.NUMBER) + testError(t, `01238123`, "1:10", "numbers cannot start with 0", token.NUMBER) + testError(t, `'aa'`, "1:1", "illegal char: '", token.ILLEGAL) + + testError(t, `"`, "1:2", "literal not terminated", token.STRING) + testError(t, `"abc`, "1:5", "literal not terminated", token.STRING) + testError(t, `"abc`+"\n", "1:5", "literal not terminated", token.STRING) +} + +func testError(t *testing.T, src, pos, msg string, tok token.Type) { + s := New([]byte(src)) + + errorCalled := false + s.Error = func(p token.Pos, m string) { + if !errorCalled { + if pos != p.String() { + t.Errorf("pos = %q, want %q for %q", p, pos, src) + } + + if m != msg { + t.Errorf("msg = %q, want %q for %q", m, msg, src) + } + errorCalled = true + } + } + + tk := s.Scan() + if tk.Type != tok { + t.Errorf("tok = %s, want %s for %q", tk, tok, src) + } + if !errorCalled { + t.Errorf("error handler not called for %q", src) + } + if s.ErrorCount == 0 { + t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src) + } +} + +func testTokenList(t *testing.T, tokenList []tokenPair) { + // create artifical source code + buf := new(bytes.Buffer) + for _, ident := range tokenList { + fmt.Fprintf(buf, "%s\n", ident.text) + } + + s := New(buf.Bytes()) + for _, ident := range tokenList { + tok := s.Scan() + if tok.Type != ident.tok { + t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text) + } + + if tok.Text != ident.text { + t.Errorf("text = %q want %q", tok.String(), ident.text) + } + + } +} + +func countNewlines(s string) int { + n := 0 + for _, ch := range s { + if ch == '\n' { + n++ + } + } + return n +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go new file mode 100644 index 0000000000000..59c1bb72d4a4e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go new file mode 100644 index 0000000000000..95a0c3eee653a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token.go @@ -0,0 +1,118 @@ +package token + +import ( + "fmt" + "strconv" + + hcltoken "github.com/hashicorp/hcl/hcl/token" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + + identifier_beg + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + NULL // null + literal_end + identifier_end + + operator_beg + LBRACK // [ + LBRACE // { + COMMA // , + PERIOD // . + COLON // : + + RBRACK // ] + RBRACE // } + + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + + NUMBER: "NUMBER", + FLOAT: "FLOAT", + BOOL: "BOOL", + STRING: "STRING", + NULL: "NULL", + + LBRACK: "LBRACK", + LBRACE: "LBRACE", + COMMA: "COMMA", + PERIOD: "PERIOD", + COLON: "COLON", + + RBRACK: "RBRACK", + RBRACE: "RBRACE", +} + +// String returns the string corresponding to the token tok. +func (t Type) String() string { + s := "" + if 0 <= t && t < Type(len(tokens)) { + s = tokens[t] + } + if s == "" { + s = "token(" + strconv.Itoa(int(t)) + ")" + } + return s +} + +// IsIdentifier returns true for tokens corresponding to identifiers and basic +// type literals; it returns false otherwise. +func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } + +// IsLiteral returns true for tokens corresponding to basic type literals; it +// returns false otherwise. +func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } + +// String returns the token's literal text. Note that this is only +// applicable for certain token types, such as token.IDENT, +// token.STRING, etc.. +func (t Token) String() string { + return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) +} + +// HCLToken converts this token to an HCL token. +// +// The token type must be a literal type or this will panic. +func (t Token) HCLToken() hcltoken.Token { + switch t.Type { + case BOOL: + return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} + case FLOAT: + return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} + case NULL: + return hcltoken.Token{Type: hcltoken.STRING, Text: ""} + case NUMBER: + return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} + case STRING: + return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} + default: + panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) + } +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token_test.go b/vendor/github.com/hashicorp/hcl/json/token/token_test.go new file mode 100644 index 0000000000000..a83fdd55bb4f6 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token_test.go @@ -0,0 +1,34 @@ +package token + +import ( + "testing" +) + +func TestTypeString(t *testing.T) { + var tokens = []struct { + tt Type + str string + }{ + {ILLEGAL, "ILLEGAL"}, + {EOF, "EOF"}, + {NUMBER, "NUMBER"}, + {FLOAT, "FLOAT"}, + {BOOL, "BOOL"}, + {STRING, "STRING"}, + {NULL, "NULL"}, + {LBRACK, "LBRACK"}, + {LBRACE, "LBRACE"}, + {COMMA, "COMMA"}, + {PERIOD, "PERIOD"}, + {RBRACK, "RBRACK"}, + {RBRACE, "RBRACE"}, + } + + for _, token := range tokens { + if token.tt.String() != token.str { + t.Errorf("want: %q got:%q\n", token.str, token.tt) + + } + } + +} diff --git a/vendor/github.com/hashicorp/hcl/json/y.go b/vendor/github.com/hashicorp/hcl/json/y.go deleted file mode 100644 index a57649e526ca9..0000000000000 --- a/vendor/github.com/hashicorp/hcl/json/y.go +++ /dev/null @@ -1,699 +0,0 @@ -//line parse.y:3 -package json - -import __yyfmt__ "fmt" - -//line parse.y:5 -import ( - "fmt" - "strconv" - - "github.com/hashicorp/hcl/hcl" -) - -//line parse.y:15 -type jsonSymType struct { - yys int - f float64 - num int - str string - obj *hcl.Object - objlist []*hcl.Object -} - -const FLOAT = 57346 -const NUMBER = 57347 -const COLON = 57348 -const COMMA = 57349 -const IDENTIFIER = 57350 -const EQUAL = 57351 -const NEWLINE = 57352 -const STRING = 57353 -const LEFTBRACE = 57354 -const RIGHTBRACE = 57355 -const LEFTBRACKET = 57356 -const RIGHTBRACKET = 57357 -const TRUE = 57358 -const FALSE = 57359 -const NULL = 57360 -const MINUS = 57361 -const PERIOD = 57362 -const EPLUS = 57363 -const EMINUS = 57364 - -var jsonToknames = [...]string{ - "$end", - "error", - "$unk", - "FLOAT", - "NUMBER", - "COLON", - "COMMA", - "IDENTIFIER", - "EQUAL", - "NEWLINE", - "STRING", - "LEFTBRACE", - "RIGHTBRACE", - "LEFTBRACKET", - "RIGHTBRACKET", - "TRUE", - "FALSE", - "NULL", - "MINUS", - "PERIOD", - "EPLUS", - "EMINUS", -} -var jsonStatenames = [...]string{} - -const jsonEofCode = 1 -const jsonErrCode = 2 -const jsonMaxDepth = 200 - -//line parse.y:210 - -//line yacctab:1 -var jsonExca = [...]int{ - -1, 1, - 1, -1, - -2, 0, -} - -const jsonNprod = 28 -const jsonPrivate = 57344 - -var jsonTokenNames []string -var jsonStates []string - -const jsonLast = 53 - -var jsonAct = [...]int{ - - 12, 25, 24, 3, 20, 27, 28, 7, 13, 3, - 21, 22, 30, 17, 18, 19, 23, 25, 24, 26, - 25, 24, 36, 32, 13, 3, 10, 22, 33, 17, - 18, 19, 23, 35, 34, 23, 38, 9, 7, 39, - 5, 29, 6, 8, 37, 15, 2, 1, 4, 31, - 16, 14, 11, -} -var jsonPact = [...]int{ - - -9, -1000, -1000, 27, 30, -1000, -1000, 20, -1000, -4, - 13, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -16, -16, -3, 16, -1000, -1000, -1000, 28, 17, -1000, - -1000, 29, -1000, -1000, -1000, -1000, -1000, -1000, 13, -1000, -} -var jsonPgo = [...]int{ - - 0, 10, 4, 51, 45, 42, 0, 50, 49, 48, - 19, 47, -} -var jsonR1 = [...]int{ - - 0, 11, 4, 4, 9, 9, 5, 6, 6, 6, - 6, 6, 6, 6, 7, 7, 8, 8, 3, 3, - 3, 3, 2, 2, 1, 1, 10, 10, -} -var jsonR2 = [...]int{ - - 0, 1, 3, 2, 1, 3, 3, 1, 1, 1, - 1, 1, 1, 1, 2, 3, 1, 3, 1, 1, - 2, 2, 2, 1, 2, 1, 2, 2, -} -var jsonChk = [...]int{ - - -1000, -11, -4, 12, -9, 13, -5, 11, 13, 7, - 6, -5, -6, 11, -3, -4, -7, 16, 17, 18, - -2, -1, 14, 19, 5, 4, -10, 21, 22, -10, - 15, -8, -6, -2, -1, 5, 5, 15, 7, -6, -} -var jsonDef = [...]int{ - - 0, -2, 1, 0, 0, 3, 4, 0, 2, 0, - 0, 5, 6, 7, 8, 9, 10, 11, 12, 13, - 18, 19, 0, 0, 23, 25, 20, 0, 0, 21, - 14, 0, 16, 22, 24, 26, 27, 15, 0, 17, -} -var jsonTok1 = [...]int{ - - 1, -} -var jsonTok2 = [...]int{ - - 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, -} -var jsonTok3 = [...]int{ - 0, -} - -var jsonErrorMessages = [...]struct { - state int - token int - msg string -}{} - -//line yaccpar:1 - -/* parser for yacc output */ - -var ( - jsonDebug = 0 - jsonErrorVerbose = false -) - -type jsonLexer interface { - Lex(lval *jsonSymType) int - Error(s string) -} - -type jsonParser interface { - Parse(jsonLexer) int - Lookahead() int -} - -type jsonParserImpl struct { - lookahead func() int -} - -func (p *jsonParserImpl) Lookahead() int { - return p.lookahead() -} - -func jsonNewParser() jsonParser { - p := &jsonParserImpl{ - lookahead: func() int { return -1 }, - } - return p -} - -const jsonFlag = -1000 - -func jsonTokname(c int) string { - if c >= 1 && c-1 < len(jsonToknames) { - if jsonToknames[c-1] != "" { - return jsonToknames[c-1] - } - } - return __yyfmt__.Sprintf("tok-%v", c) -} - -func jsonStatname(s int) string { - if s >= 0 && s < len(jsonStatenames) { - if jsonStatenames[s] != "" { - return jsonStatenames[s] - } - } - return __yyfmt__.Sprintf("state-%v", s) -} - -func jsonErrorMessage(state, lookAhead int) string { - const TOKSTART = 4 - - if !jsonErrorVerbose { - return "syntax error" - } - - for _, e := range jsonErrorMessages { - if e.state == state && e.token == lookAhead { - return "syntax error: " + e.msg - } - } - - res := "syntax error: unexpected " + jsonTokname(lookAhead) - - // To match Bison, suggest at most four expected tokens. - expected := make([]int, 0, 4) - - // Look for shiftable tokens. - base := jsonPact[state] - for tok := TOKSTART; tok-1 < len(jsonToknames); tok++ { - if n := base + tok; n >= 0 && n < jsonLast && jsonChk[jsonAct[n]] == tok { - if len(expected) == cap(expected) { - return res - } - expected = append(expected, tok) - } - } - - if jsonDef[state] == -2 { - i := 0 - for jsonExca[i] != -1 || jsonExca[i+1] != state { - i += 2 - } - - // Look for tokens that we accept or reduce. - for i += 2; jsonExca[i] >= 0; i += 2 { - tok := jsonExca[i] - if tok < TOKSTART || jsonExca[i+1] == 0 { - continue - } - if len(expected) == cap(expected) { - return res - } - expected = append(expected, tok) - } - - // If the default action is to accept or reduce, give up. - if jsonExca[i+1] != 0 { - return res - } - } - - for i, tok := range expected { - if i == 0 { - res += ", expecting " - } else { - res += " or " - } - res += jsonTokname(tok) - } - return res -} - -func jsonlex1(lex jsonLexer, lval *jsonSymType) (char, token int) { - token = 0 - char = lex.Lex(lval) - if char <= 0 { - token = jsonTok1[0] - goto out - } - if char < len(jsonTok1) { - token = jsonTok1[char] - goto out - } - if char >= jsonPrivate { - if char < jsonPrivate+len(jsonTok2) { - token = jsonTok2[char-jsonPrivate] - goto out - } - } - for i := 0; i < len(jsonTok3); i += 2 { - token = jsonTok3[i+0] - if token == char { - token = jsonTok3[i+1] - goto out - } - } - -out: - if token == 0 { - token = jsonTok2[1] /* unknown char */ - } - if jsonDebug >= 3 { - __yyfmt__.Printf("lex %s(%d)\n", jsonTokname(token), uint(char)) - } - return char, token -} - -func jsonParse(jsonlex jsonLexer) int { - return jsonNewParser().Parse(jsonlex) -} - -func (jsonrcvr *jsonParserImpl) Parse(jsonlex jsonLexer) int { - var jsonn int - var jsonlval jsonSymType - var jsonVAL jsonSymType - var jsonDollar []jsonSymType - _ = jsonDollar // silence set and not used - jsonS := make([]jsonSymType, jsonMaxDepth) - - Nerrs := 0 /* number of errors */ - Errflag := 0 /* error recovery flag */ - jsonstate := 0 - jsonchar := -1 - jsontoken := -1 // jsonchar translated into internal numbering - jsonrcvr.lookahead = func() int { return jsonchar } - defer func() { - // Make sure we report no lookahead when not parsing. - jsonstate = -1 - jsonchar = -1 - jsontoken = -1 - }() - jsonp := -1 - goto jsonstack - -ret0: - return 0 - -ret1: - return 1 - -jsonstack: - /* put a state and value onto the stack */ - if jsonDebug >= 4 { - __yyfmt__.Printf("char %v in %v\n", jsonTokname(jsontoken), jsonStatname(jsonstate)) - } - - jsonp++ - if jsonp >= len(jsonS) { - nyys := make([]jsonSymType, len(jsonS)*2) - copy(nyys, jsonS) - jsonS = nyys - } - jsonS[jsonp] = jsonVAL - jsonS[jsonp].yys = jsonstate - -jsonnewstate: - jsonn = jsonPact[jsonstate] - if jsonn <= jsonFlag { - goto jsondefault /* simple state */ - } - if jsonchar < 0 { - jsonchar, jsontoken = jsonlex1(jsonlex, &jsonlval) - } - jsonn += jsontoken - if jsonn < 0 || jsonn >= jsonLast { - goto jsondefault - } - jsonn = jsonAct[jsonn] - if jsonChk[jsonn] == jsontoken { /* valid shift */ - jsonchar = -1 - jsontoken = -1 - jsonVAL = jsonlval - jsonstate = jsonn - if Errflag > 0 { - Errflag-- - } - goto jsonstack - } - -jsondefault: - /* default state action */ - jsonn = jsonDef[jsonstate] - if jsonn == -2 { - if jsonchar < 0 { - jsonchar, jsontoken = jsonlex1(jsonlex, &jsonlval) - } - - /* look through exception table */ - xi := 0 - for { - if jsonExca[xi+0] == -1 && jsonExca[xi+1] == jsonstate { - break - } - xi += 2 - } - for xi += 2; ; xi += 2 { - jsonn = jsonExca[xi+0] - if jsonn < 0 || jsonn == jsontoken { - break - } - } - jsonn = jsonExca[xi+1] - if jsonn < 0 { - goto ret0 - } - } - if jsonn == 0 { - /* error ... attempt to resume parsing */ - switch Errflag { - case 0: /* brand new error */ - jsonlex.Error(jsonErrorMessage(jsonstate, jsontoken)) - Nerrs++ - if jsonDebug >= 1 { - __yyfmt__.Printf("%s", jsonStatname(jsonstate)) - __yyfmt__.Printf(" saw %s\n", jsonTokname(jsontoken)) - } - fallthrough - - case 1, 2: /* incompletely recovered error ... try again */ - Errflag = 3 - - /* find a state where "error" is a legal shift action */ - for jsonp >= 0 { - jsonn = jsonPact[jsonS[jsonp].yys] + jsonErrCode - if jsonn >= 0 && jsonn < jsonLast { - jsonstate = jsonAct[jsonn] /* simulate a shift of "error" */ - if jsonChk[jsonstate] == jsonErrCode { - goto jsonstack - } - } - - /* the current p has no shift on "error", pop stack */ - if jsonDebug >= 2 { - __yyfmt__.Printf("error recovery pops state %d\n", jsonS[jsonp].yys) - } - jsonp-- - } - /* there is no state on the stack with an error shift ... abort */ - goto ret1 - - case 3: /* no shift yet; clobber input char */ - if jsonDebug >= 2 { - __yyfmt__.Printf("error recovery discards %s\n", jsonTokname(jsontoken)) - } - if jsontoken == jsonEofCode { - goto ret1 - } - jsonchar = -1 - jsontoken = -1 - goto jsonnewstate /* try again in the same state */ - } - } - - /* reduction by production jsonn */ - if jsonDebug >= 2 { - __yyfmt__.Printf("reduce %v in:\n\t%v\n", jsonn, jsonStatname(jsonstate)) - } - - jsonnt := jsonn - jsonpt := jsonp - _ = jsonpt // guard against "declared and not used" - - jsonp -= jsonR2[jsonn] - // jsonp is now the index of $0. Perform the default action. Iff the - // reduced production is ε, $1 is possibly out of range. - if jsonp+1 >= len(jsonS) { - nyys := make([]jsonSymType, len(jsonS)*2) - copy(nyys, jsonS) - jsonS = nyys - } - jsonVAL = jsonS[jsonp+1] - - /* consult goto table to find next state */ - jsonn = jsonR1[jsonn] - jsong := jsonPgo[jsonn] - jsonj := jsong + jsonS[jsonp].yys + 1 - - if jsonj >= jsonLast { - jsonstate = jsonAct[jsong] - } else { - jsonstate = jsonAct[jsonj] - if jsonChk[jsonstate] != -jsonn { - jsonstate = jsonAct[jsong] - } - } - // dummy call; replaced with literal code - switch jsonnt { - - case 1: - jsonDollar = jsonS[jsonpt-1 : jsonpt+1] - //line parse.y:39 - { - jsonResult = jsonDollar[1].obj - } - case 2: - jsonDollar = jsonS[jsonpt-3 : jsonpt+1] - //line parse.y:45 - { - jsonVAL.obj = &hcl.Object{ - Type: hcl.ValueTypeObject, - Value: hcl.ObjectList(jsonDollar[2].objlist).Flat(), - } - } - case 3: - jsonDollar = jsonS[jsonpt-2 : jsonpt+1] - //line parse.y:52 - { - jsonVAL.obj = &hcl.Object{Type: hcl.ValueTypeObject} - } - case 4: - jsonDollar = jsonS[jsonpt-1 : jsonpt+1] - //line parse.y:58 - { - jsonVAL.objlist = []*hcl.Object{jsonDollar[1].obj} - } - case 5: - jsonDollar = jsonS[jsonpt-3 : jsonpt+1] - //line parse.y:62 - { - jsonVAL.objlist = append(jsonDollar[1].objlist, jsonDollar[3].obj) - } - case 6: - jsonDollar = jsonS[jsonpt-3 : jsonpt+1] - //line parse.y:68 - { - jsonDollar[3].obj.Key = jsonDollar[1].str - jsonVAL.obj = jsonDollar[3].obj - } - case 7: - jsonDollar = jsonS[jsonpt-1 : jsonpt+1] - //line parse.y:75 - { - jsonVAL.obj = &hcl.Object{ - Type: hcl.ValueTypeString, - Value: jsonDollar[1].str, - } - } - case 8: - jsonDollar = jsonS[jsonpt-1 : jsonpt+1] - //line parse.y:82 - { - jsonVAL.obj = jsonDollar[1].obj - } - case 9: - jsonDollar = jsonS[jsonpt-1 : jsonpt+1] - //line parse.y:86 - { - jsonVAL.obj = jsonDollar[1].obj - } - case 10: - jsonDollar = jsonS[jsonpt-1 : jsonpt+1] - //line parse.y:90 - { - jsonVAL.obj = &hcl.Object{ - Type: hcl.ValueTypeList, - Value: jsonDollar[1].objlist, - } - } - case 11: - jsonDollar = jsonS[jsonpt-1 : jsonpt+1] - //line parse.y:97 - { - jsonVAL.obj = &hcl.Object{ - Type: hcl.ValueTypeBool, - Value: true, - } - } - case 12: - jsonDollar = jsonS[jsonpt-1 : jsonpt+1] - //line parse.y:104 - { - jsonVAL.obj = &hcl.Object{ - Type: hcl.ValueTypeBool, - Value: false, - } - } - case 13: - jsonDollar = jsonS[jsonpt-1 : jsonpt+1] - //line parse.y:111 - { - jsonVAL.obj = &hcl.Object{ - Type: hcl.ValueTypeNil, - Value: nil, - } - } - case 14: - jsonDollar = jsonS[jsonpt-2 : jsonpt+1] - //line parse.y:120 - { - jsonVAL.objlist = nil - } - case 15: - jsonDollar = jsonS[jsonpt-3 : jsonpt+1] - //line parse.y:124 - { - jsonVAL.objlist = jsonDollar[2].objlist - } - case 16: - jsonDollar = jsonS[jsonpt-1 : jsonpt+1] - //line parse.y:130 - { - jsonVAL.objlist = []*hcl.Object{jsonDollar[1].obj} - } - case 17: - jsonDollar = jsonS[jsonpt-3 : jsonpt+1] - //line parse.y:134 - { - jsonVAL.objlist = append(jsonDollar[1].objlist, jsonDollar[3].obj) - } - case 18: - jsonDollar = jsonS[jsonpt-1 : jsonpt+1] - //line parse.y:140 - { - jsonVAL.obj = &hcl.Object{ - Type: hcl.ValueTypeInt, - Value: jsonDollar[1].num, - } - } - case 19: - jsonDollar = jsonS[jsonpt-1 : jsonpt+1] - //line parse.y:147 - { - jsonVAL.obj = &hcl.Object{ - Type: hcl.ValueTypeFloat, - Value: jsonDollar[1].f, - } - } - case 20: - jsonDollar = jsonS[jsonpt-2 : jsonpt+1] - //line parse.y:154 - { - fs := fmt.Sprintf("%d%s", jsonDollar[1].num, jsonDollar[2].str) - f, err := strconv.ParseFloat(fs, 64) - if err != nil { - panic(err) - } - - jsonVAL.obj = &hcl.Object{ - Type: hcl.ValueTypeFloat, - Value: f, - } - } - case 21: - jsonDollar = jsonS[jsonpt-2 : jsonpt+1] - //line parse.y:167 - { - fs := fmt.Sprintf("%f%s", jsonDollar[1].f, jsonDollar[2].str) - f, err := strconv.ParseFloat(fs, 64) - if err != nil { - panic(err) - } - - jsonVAL.obj = &hcl.Object{ - Type: hcl.ValueTypeFloat, - Value: f, - } - } - case 22: - jsonDollar = jsonS[jsonpt-2 : jsonpt+1] - //line parse.y:182 - { - jsonVAL.num = jsonDollar[2].num * -1 - } - case 23: - jsonDollar = jsonS[jsonpt-1 : jsonpt+1] - //line parse.y:186 - { - jsonVAL.num = jsonDollar[1].num - } - case 24: - jsonDollar = jsonS[jsonpt-2 : jsonpt+1] - //line parse.y:192 - { - jsonVAL.f = jsonDollar[2].f * -1 - } - case 25: - jsonDollar = jsonS[jsonpt-1 : jsonpt+1] - //line parse.y:196 - { - jsonVAL.f = jsonDollar[1].f - } - case 26: - jsonDollar = jsonS[jsonpt-2 : jsonpt+1] - //line parse.y:202 - { - jsonVAL.str = "e" + strconv.FormatInt(int64(jsonDollar[2].num), 10) - } - case 27: - jsonDollar = jsonS[jsonpt-2 : jsonpt+1] - //line parse.y:206 - { - jsonVAL.str = "e-" + strconv.FormatInt(int64(jsonDollar[2].num), 10) - } - } - goto jsonstack /* stack new state and value */ -} diff --git a/vendor/github.com/hashicorp/hcl/lex_test.go b/vendor/github.com/hashicorp/hcl/lex_test.go new file mode 100644 index 0000000000000..f7ee37886b24f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/lex_test.go @@ -0,0 +1,37 @@ +package hcl + +import ( + "testing" +) + +func TestLexMode(t *testing.T) { + cases := []struct { + Input string + Mode lexModeValue + }{ + { + "", + lexModeHcl, + }, + { + "foo", + lexModeHcl, + }, + { + "{}", + lexModeJson, + }, + { + " {}", + lexModeJson, + }, + } + + for i, tc := range cases { + actual := lexMode(tc.Input) + + if actual != tc.Mode { + t.Fatalf("%d: %#v", i, actual) + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go index 5237d54bb70aa..d0719c2ab12b7 100644 --- a/vendor/github.com/hashicorp/hcl/parse.go +++ b/vendor/github.com/hashicorp/hcl/parse.go @@ -3,19 +3,20 @@ package hcl import ( "fmt" - "github.com/hashicorp/hcl/hcl" - "github.com/hashicorp/hcl/json" + "github.com/hashicorp/hcl/hcl/ast" + hclParser "github.com/hashicorp/hcl/hcl/parser" + jsonParser "github.com/hashicorp/hcl/json/parser" ) // Parse parses the given input and returns the root object. // // The input format can be either HCL or JSON. -func Parse(input string) (*hcl.Object, error) { +func Parse(input string) (*ast.File, error) { switch lexMode(input) { case lexModeHcl: - return hcl.Parse(input) + return hclParser.Parse([]byte(input)) case lexModeJson: - return json.Parse(input) + return jsonParser.Parse([]byte(input)) } return nil, fmt.Errorf("unknown config format") diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/object_list.json b/vendor/github.com/hashicorp/hcl/test-fixtures/object_list.json new file mode 100644 index 0000000000000..73f367438d2c3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/object_list.json @@ -0,0 +1,15 @@ +{ + "resource": { + "aws_instance": { + "db": { + "vpc": "foo", + "provisioner": [{ + "file": { + "source": "foo", + "destination": "bar" + } + }] + } + } + } +}