From 69d26a21adddbe4c5e3658a87b70c4c3f8952db9 Mon Sep 17 00:00:00 2001 From: Oliver Steele Date: Sun, 9 Jul 2017 11:18:35 -0400 Subject: [PATCH] Rename chunk -> token --- liquid.go | 4 ---- parser/ast.go | 12 +++++----- parser/chunktype_string.go | 16 ------------- parser/parser.go | 42 +++++++++++++++++------------------ parser/scanner.go | 22 +++++++++--------- parser/scanner_test.go | 12 +++++----- parser/{chunk.go => token.go} | 34 ++++++++++++++-------------- parser/tokentype_string.go | 16 +++++++++++++ render/compiler.go | 8 +++---- render/context.go | 8 +++---- render/node_context.go | 2 +- render/nodes.go | 8 +++---- 12 files changed, 90 insertions(+), 94 deletions(-) delete mode 100644 parser/chunktype_string.go rename parser/{chunk.go => token.go} (60%) create mode 100644 parser/tokentype_string.go diff --git a/liquid.go b/liquid.go index 0c12ddc..86916b9 100644 --- a/liquid.go +++ b/liquid.go @@ -18,10 +18,6 @@ import ( // Bindings is a map of variable names to values. type Bindings map[string]interface{} -// TagParser parses the argument string "args" from a tag "{% tagname args %}", -// and returns a renderer. -// type TagParser func(chunks.RenderContext) (string, error) - // A Renderer returns the rendered string for a block. type Renderer func(render.Context) (string, error) diff --git a/parser/ast.go b/parser/ast.go index 512a076..6f22bb8 100644 --- a/parser/ast.go +++ b/parser/ast.go @@ -9,7 +9,7 @@ type ASTNode interface{} // ASTBlock represents a {% tag %}…{% endtag %}. type ASTBlock struct { - Chunk + Token syntax BlockSyntax Body []ASTNode // Body is the nodes before the first branch Branches []*ASTBlock // E.g. else and elseif w/in an if @@ -20,19 +20,19 @@ type ASTRaw struct { Slices []string } -// ASTTag is a tag. +// ASTTag is a tag {% tag %} that is not a block start or end. type ASTTag struct { - Chunk + Token } -// ASTText is a text chunk, that is rendered verbatim. +// ASTText is a text span, that is rendered verbatim. type ASTText struct { - Chunk + Token } // ASTObject is an {{ object }} object. type ASTObject struct { - Chunk + Token Expr expression.Expression } diff --git a/parser/chunktype_string.go b/parser/chunktype_string.go deleted file mode 100644 index a1d1165..0000000 --- a/parser/chunktype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=ChunkType"; DO NOT EDIT. - -package parser - -import "fmt" - -const _ChunkType_name = "TextChunkTypeTagChunkTypeObjChunkType" - -var _ChunkType_index = [...]uint8{0, 13, 25, 37} - -func (i ChunkType) String() string { - if i < 0 || i >= ChunkType(len(_ChunkType_index)-1) { - return fmt.Sprintf("ChunkType(%d)", i) - } - return _ChunkType_name[_ChunkType_index[i]:_ChunkType_index[i+1]] -} diff --git a/parser/parser.go b/parser/parser.go index 31bb195..4a3555a 100644 --- a/parser/parser.go +++ b/parser/parser.go @@ -19,11 +19,11 @@ func parseErrorf(format string, a ...interface{}) ParseError { // Parse parses a source template. It returns an AST root, that can be compiled and evaluated. func (c Config) Parse(source string) (ASTNode, error) { tokens := Scan(source, "") - return c.parseChunks(tokens) + return c.parseTokens(tokens) } -// Parse creates an AST from a sequence of Chunks. -func (c Config) parseChunks(chunks []Chunk) (ASTNode, error) { // nolint: gocyclo +// Parse creates an AST from a sequence of tokens. +func (c Config) parseTokens(tokens []Token) (ASTNode, error) { // nolint: gocyclo // a stack of control tag state, for matching nested {%if}{%endif%} etc. type frame struct { syntax BlockSyntax @@ -41,35 +41,35 @@ func (c Config) parseChunks(chunks []Chunk) (ASTNode, error) { // nolint: gocycl inComment = false inRaw = false ) - for _, ch := range chunks { + for _, tok := range tokens { switch { // The parser needs to know about comment and raw, because tags inside // needn't match each other e.g. {%comment%}{%if%}{%endcomment%} // TODO is this true? case inComment: - if ch.Type == TagChunkType && ch.Name == "endcomment" { + if tok.Type == TagTokenType && tok.Name == "endcomment" { inComment = false } case inRaw: - if ch.Type == TagChunkType && ch.Name == "endraw" { + if tok.Type == TagTokenType && tok.Name == "endraw" { inRaw = false } else { - rawTag.Slices = append(rawTag.Slices, ch.Source) + rawTag.Slices = append(rawTag.Slices, tok.Source) } - case ch.Type == ObjChunkType: - expr, err := expression.Parse(ch.Args) + case tok.Type == ObjTokenType: + expr, err := expression.Parse(tok.Args) if err != nil { return nil, err } - *ap = append(*ap, &ASTObject{ch, expr}) - case ch.Type == TextChunkType: - *ap = append(*ap, &ASTText{Chunk: ch}) - case ch.Type == TagChunkType: - if cs, ok := g.BlockSyntax(ch.Name); ok { + *ap = append(*ap, &ASTObject{tok, expr}) + case tok.Type == TextTokenType: + *ap = append(*ap, &ASTText{Token: tok}) + case tok.Type == TagTokenType: + if cs, ok := g.BlockSyntax(tok.Name); ok { switch { - case ch.Name == "comment": + case tok.Name == "comment": inComment = true - case ch.Name == "raw": + case tok.Name == "raw": inRaw = true rawTag = &ASTRaw{} *ap = append(*ap, rawTag) @@ -78,17 +78,17 @@ func (c Config) parseChunks(chunks []Chunk) (ASTNode, error) { // nolint: gocycl if sd != nil { suffix = "; immediate parent is " + sd.TagName() } - return nil, parseErrorf("%s not inside %s%s", ch.Name, strings.Join(cs.ParentTags(), " or "), suffix) + return nil, parseErrorf("%s not inside %s%s", tok.Name, strings.Join(cs.ParentTags(), " or "), suffix) case cs.IsBlockStart(): push := func() { stack = append(stack, frame{syntax: sd, node: bn, ap: ap}) - sd, bn = cs, &ASTBlock{Chunk: ch, syntax: cs} + sd, bn = cs, &ASTBlock{Token: tok, syntax: cs} *ap = append(*ap, bn) } push() ap = &bn.Body case cs.IsBranch(): - n := &ASTBlock{Chunk: ch, syntax: cs} + n := &ASTBlock{Token: tok, syntax: cs} bn.Branches = append(bn.Branches, n) ap = &n.Body case cs.IsBlockEnd(): @@ -99,10 +99,10 @@ func (c Config) parseChunks(chunks []Chunk) (ASTNode, error) { // nolint: gocycl } pop() default: - panic(fmt.Errorf("block type %q", ch.Name)) + panic(fmt.Errorf("block type %q", tok.Name)) } } else { - *ap = append(*ap, &ASTTag{ch}) + *ap = append(*ap, &ASTTag{tok}) } } } diff --git a/parser/scanner.go b/parser/scanner.go index f5a6b2a..23b9808 100644 --- a/parser/scanner.go +++ b/parser/scanner.go @@ -5,35 +5,35 @@ import ( "strings" ) -var chunkMatcher = regexp.MustCompile(`{{\s*(.+?)\s*}}|{%\s*(\w+)(?:\s+((?:[^%]|%[^}])+?))?\s*%}`) +var tokenMatcher = regexp.MustCompile(`{{\s*(.+?)\s*}}|{%\s*(\w+)(?:\s+((?:[^%]|%[^}])+?))?\s*%}`) -// Scan breaks a string into a sequence of Chunks. -func Scan(data string, pathname string) []Chunk { +// Scan breaks a string into a sequence of Tokens. +func Scan(data string, pathname string) []Token { // TODO error on unterminated {{ and {% // TODO probably an error when a tag contains a {{ or {%, at least outside of a string var ( p, pe = 0, len(data) si = SourceInfo{pathname, 1} - out = make([]Chunk, 0) + out = make([]Token, 0) ) - for _, m := range chunkMatcher.FindAllStringSubmatchIndex(data, -1) { + for _, m := range tokenMatcher.FindAllStringSubmatchIndex(data, -1) { ts, te := m[0], m[1] if p < ts { - out = append(out, Chunk{Type: TextChunkType, SourceInfo: si, Source: data[p:ts]}) + out = append(out, Token{Type: TextTokenType, SourceInfo: si, Source: data[p:ts]}) si.lineNo += strings.Count(data[p:ts], "\n") } source := data[ts:te] switch data[ts+1] { case '{': - out = append(out, Chunk{ - Type: ObjChunkType, + out = append(out, Token{ + Type: ObjTokenType, SourceInfo: si, Source: source, Args: data[m[2]:m[3]], }) case '%': - c := Chunk{ - Type: TagChunkType, + c := Token{ + Type: TagTokenType, SourceInfo: si, Source: source, Name: data[m[4]:m[5]], @@ -47,7 +47,7 @@ func Scan(data string, pathname string) []Chunk { p = te } if p < pe { - out = append(out, Chunk{Type: TextChunkType, SourceInfo: si, Source: data[p:]}) + out = append(out, Token{Type: TextTokenType, SourceInfo: si, Source: data[p:]}) } return out } diff --git a/parser/scanner_test.go b/parser/scanner_test.go index c9e8b1e..c39cdab 100644 --- a/parser/scanner_test.go +++ b/parser/scanner_test.go @@ -26,37 +26,37 @@ func TestChunkScanner(t *testing.T) { tokens := Scan("12", "") require.NotNil(t, tokens) require.Len(t, tokens, 1) - require.Equal(t, TextChunkType, tokens[0].Type) + require.Equal(t, TextTokenType, tokens[0].Type) require.Equal(t, "12", tokens[0].Source) tokens = Scan("{{obj}}", "") require.NotNil(t, tokens) require.Len(t, tokens, 1) - require.Equal(t, ObjChunkType, tokens[0].Type) + require.Equal(t, ObjTokenType, tokens[0].Type) require.Equal(t, "obj", tokens[0].Args) tokens = Scan("{{ obj }}", "") require.NotNil(t, tokens) require.Len(t, tokens, 1) - require.Equal(t, ObjChunkType, tokens[0].Type) + require.Equal(t, ObjTokenType, tokens[0].Type) require.Equal(t, "obj", tokens[0].Args) tokens = Scan("{%tag args%}", "") require.NotNil(t, tokens) require.Len(t, tokens, 1) - require.Equal(t, TagChunkType, tokens[0].Type) + require.Equal(t, TagTokenType, tokens[0].Type) require.Equal(t, "tag", tokens[0].Name) require.Equal(t, "args", tokens[0].Args) tokens = Scan("{% tag args %}", "") require.NotNil(t, tokens) require.Len(t, tokens, 1) - require.Equal(t, TagChunkType, tokens[0].Type) + require.Equal(t, TagTokenType, tokens[0].Type) require.Equal(t, "tag", tokens[0].Name) require.Equal(t, "args", tokens[0].Args) tokens = Scan("pre{% tag args %}mid{{ object }}post", "") - require.Equal(t, `[TextChunkType{"pre"} TagChunkType{Tag:"tag", Args:"args"} TextChunkType{"mid"} ObjChunkType{"object"} TextChunkType{"post"}]`, fmt.Sprint(tokens)) + require.Equal(t, `[TextTokenType{"pre"} TagTokenType{Tag:"tag", Args:"args"} TextTokenType{"mid"} ObjTokenType{"object"} TextTokenType{"post"}]`, fmt.Sprint(tokens)) for i, test := range scannerCountTests { t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) { diff --git a/parser/chunk.go b/parser/token.go similarity index 60% rename from parser/chunk.go rename to parser/token.go index 566ff32..14e138d 100644 --- a/parser/chunk.go +++ b/parser/token.go @@ -2,27 +2,27 @@ package parser import "fmt" -// A Chunk is either an object {{a.b}}, a tag {%if a>b%}, or a text chunk (anything outside of {{}} and {%%}.) -type Chunk struct { - Type ChunkType +// A Token is either an object {{a.b}}, a tag {%if a>b%}, or a text chunk (anything outside of {{}} and {%%}.) +type Token struct { + Type TokenType SourceInfo SourceInfo Name string // Name is the tag name of a tag Chunk. E.g. the tag name of "{% if 1 %}" is "if". Args string // Parameters is the tag arguments of a tag Chunk. E.g. the tag arguments of "{% if 1 %}" is "1". - Source string // Source is the entirety of the chunk, including the "{{", "{%", etc. markers. + Source string // Source is the entirety of the token, including the "{{", "{%", etc. markers. } -// ChunkType is the type of a Chunk -type ChunkType int +// TokenType is the type of a Chunk +type TokenType int -//go:generate stringer -type=ChunkType +//go:generate stringer -type=TokenType const ( - // TextChunkType is the type of a text Chunk - TextChunkType ChunkType = iota - // TagChunkType is the type of a tag Chunk "{%…%}" - TagChunkType - // ObjChunkType is the type of an object Chunk "{{…}}" - ObjChunkType + // TextTokenType is the type of a text Chunk + TextTokenType TokenType = iota + // TagTokenType is the type of a tag Chunk "{%…%}" + TagTokenType + // ObjTokenType is the type of an object Chunk "{{…}}" + ObjTokenType ) // SourceInfo contains a Chunk's source information @@ -31,13 +31,13 @@ type SourceInfo struct { lineNo int } -func (c Chunk) String() string { +func (c Token) String() string { switch c.Type { - case TextChunkType: + case TextTokenType: return fmt.Sprintf("%v{%#v}", c.Type, c.Source) - case TagChunkType: + case TagTokenType: return fmt.Sprintf("%v{Tag:%#v, Args:%#v}", c.Type, c.Name, c.Args) - case ObjChunkType: + case ObjTokenType: return fmt.Sprintf("%v{%#v}", c.Type, c.Args) default: return fmt.Sprintf("%v{%#v}", c.Type, c.Source) diff --git a/parser/tokentype_string.go b/parser/tokentype_string.go new file mode 100644 index 0000000..6230c80 --- /dev/null +++ b/parser/tokentype_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=TokenType"; DO NOT EDIT. + +package parser + +import "fmt" + +const _TokenType_name = "TextTokenTypeTagTokenTypeObjTokenType" + +var _TokenType_index = [...]uint8{0, 13, 25, 37} + +func (i TokenType) String() string { + if i < 0 || i >= TokenType(len(_TokenType_index)-1) { + return fmt.Sprintf("TokenType(%d)", i) + } + return _TokenType_name[_TokenType_index[i]:_TokenType_index[i+1]] +} diff --git a/render/compiler.go b/render/compiler.go index adfb46d..396497a 100644 --- a/render/compiler.go +++ b/render/compiler.go @@ -42,7 +42,7 @@ func (c Config) compileNode(n parser.ASTNode) (Node, error) { return nil, compilationErrorf("undefined tag %q", n.Name) } node := BlockNode{ - Chunk: n.Chunk, + Token: n.Token, Body: body, Branches: branches, } @@ -68,13 +68,13 @@ func (c Config) compileNode(n parser.ASTNode) (Node, error) { if err != nil { return nil, err } - return &TagNode{n.Chunk, f}, nil + return &TagNode{n.Token, f}, nil } return nil, compilationErrorf("unknown tag: %s", n.Name) case *parser.ASTText: - return &TextNode{n.Chunk}, nil + return &TextNode{n.Token}, nil case *parser.ASTObject: - return &ObjectNode{n.Chunk, n.Expr}, nil + return &ObjectNode{n.Token, n.Expr}, nil default: panic(fmt.Errorf("un-compilable node type %T", n)) } diff --git a/render/context.go b/render/context.go index a002354..83ee5d6 100644 --- a/render/context.go +++ b/render/context.go @@ -134,9 +134,9 @@ func (c rendererContext) SourceFile() string { func (c rendererContext) TagArgs() string { switch { case c.node != nil: - return c.node.Chunk.Args + return c.node.Token.Args case c.cn != nil: - return c.cn.Chunk.Args + return c.cn.Token.Args default: return "" } @@ -145,9 +145,9 @@ func (c rendererContext) TagArgs() string { func (c rendererContext) TagName() string { switch { case c.node != nil: - return c.node.Chunk.Name + return c.node.Token.Name case c.cn != nil: - return c.cn.Chunk.Name + return c.cn.Token.Name default: return "" } diff --git a/render/node_context.go b/render/node_context.go index d888a59..abd381d 100644 --- a/render/node_context.go +++ b/render/node_context.go @@ -4,7 +4,7 @@ import ( "github.com/osteele/liquid/expression" ) -// nodeContext is the evaluation context for chunk AST rendering. +// nodeContext provides the evaluation context for rendering the AST. type nodeContext struct { bindings map[string]interface{} config Config diff --git a/render/nodes.go b/render/nodes.go index b3b8ae2..5a04b5d 100644 --- a/render/nodes.go +++ b/render/nodes.go @@ -13,7 +13,7 @@ type Node interface { // BlockNode represents a {% tag %}…{% endtag %}. type BlockNode struct { - parser.Chunk + parser.Token renderer func(io.Writer, Context) error Body []Node Branches []*BlockNode @@ -26,18 +26,18 @@ type RawNode struct { // TagNode renders itself via a render function that is created during parsing. type TagNode struct { - parser.Chunk + parser.Token renderer func(io.Writer, Context) error } // TextNode is a text chunk, that is rendered verbatim. type TextNode struct { - parser.Chunk + parser.Token } // ObjectNode is an {{ object }} object. type ObjectNode struct { - parser.Chunk + parser.Token expr expression.Expression }