1
0
mirror of https://github.com/danog/liquid.git synced 2024-11-30 07:28:56 +01:00

Rename chunk -> token

This commit is contained in:
Oliver Steele 2017-07-09 11:18:35 -04:00
parent 6f7b67f4d7
commit 69d26a21ad
12 changed files with 90 additions and 94 deletions

View File

@ -18,10 +18,6 @@ import (
// Bindings is a map of variable names to values.
type Bindings map[string]interface{}
// TagParser parses the argument string "args" from a tag "{% tagname args %}",
// and returns a renderer.
// type TagParser func(chunks.RenderContext) (string, error)
// A Renderer returns the rendered string for a block.
type Renderer func(render.Context) (string, error)

View File

@ -9,7 +9,7 @@ type ASTNode interface{}
// ASTBlock represents a {% tag %}…{% endtag %}.
type ASTBlock struct {
Chunk
Token
syntax BlockSyntax
Body []ASTNode // Body is the nodes before the first branch
Branches []*ASTBlock // E.g. else and elseif w/in an if
@ -20,19 +20,19 @@ type ASTRaw struct {
Slices []string
}
// ASTTag is a tag.
// ASTTag is a tag {% tag %} that is not a block start or end.
type ASTTag struct {
Chunk
Token
}
// ASTText is a text chunk, that is rendered verbatim.
// ASTText is a text span, that is rendered verbatim.
type ASTText struct {
Chunk
Token
}
// ASTObject is an {{ object }} object.
type ASTObject struct {
Chunk
Token
Expr expression.Expression
}

View File

@ -1,16 +0,0 @@
// Code generated by "stringer -type=ChunkType"; DO NOT EDIT.
package parser
import "fmt"
const _ChunkType_name = "TextChunkTypeTagChunkTypeObjChunkType"
var _ChunkType_index = [...]uint8{0, 13, 25, 37}
func (i ChunkType) String() string {
if i < 0 || i >= ChunkType(len(_ChunkType_index)-1) {
return fmt.Sprintf("ChunkType(%d)", i)
}
return _ChunkType_name[_ChunkType_index[i]:_ChunkType_index[i+1]]
}

View File

@ -19,11 +19,11 @@ func parseErrorf(format string, a ...interface{}) ParseError {
// Parse parses a source template. It returns an AST root, that can be compiled and evaluated.
func (c Config) Parse(source string) (ASTNode, error) {
tokens := Scan(source, "")
return c.parseChunks(tokens)
return c.parseTokens(tokens)
}
// Parse creates an AST from a sequence of Chunks.
func (c Config) parseChunks(chunks []Chunk) (ASTNode, error) { // nolint: gocyclo
// Parse creates an AST from a sequence of tokens.
func (c Config) parseTokens(tokens []Token) (ASTNode, error) { // nolint: gocyclo
// a stack of control tag state, for matching nested {%if}{%endif%} etc.
type frame struct {
syntax BlockSyntax
@ -41,35 +41,35 @@ func (c Config) parseChunks(chunks []Chunk) (ASTNode, error) { // nolint: gocycl
inComment = false
inRaw = false
)
for _, ch := range chunks {
for _, tok := range tokens {
switch {
// The parser needs to know about comment and raw, because tags inside
// needn't match each other e.g. {%comment%}{%if%}{%endcomment%}
// TODO is this true?
case inComment:
if ch.Type == TagChunkType && ch.Name == "endcomment" {
if tok.Type == TagTokenType && tok.Name == "endcomment" {
inComment = false
}
case inRaw:
if ch.Type == TagChunkType && ch.Name == "endraw" {
if tok.Type == TagTokenType && tok.Name == "endraw" {
inRaw = false
} else {
rawTag.Slices = append(rawTag.Slices, ch.Source)
rawTag.Slices = append(rawTag.Slices, tok.Source)
}
case ch.Type == ObjChunkType:
expr, err := expression.Parse(ch.Args)
case tok.Type == ObjTokenType:
expr, err := expression.Parse(tok.Args)
if err != nil {
return nil, err
}
*ap = append(*ap, &ASTObject{ch, expr})
case ch.Type == TextChunkType:
*ap = append(*ap, &ASTText{Chunk: ch})
case ch.Type == TagChunkType:
if cs, ok := g.BlockSyntax(ch.Name); ok {
*ap = append(*ap, &ASTObject{tok, expr})
case tok.Type == TextTokenType:
*ap = append(*ap, &ASTText{Token: tok})
case tok.Type == TagTokenType:
if cs, ok := g.BlockSyntax(tok.Name); ok {
switch {
case ch.Name == "comment":
case tok.Name == "comment":
inComment = true
case ch.Name == "raw":
case tok.Name == "raw":
inRaw = true
rawTag = &ASTRaw{}
*ap = append(*ap, rawTag)
@ -78,17 +78,17 @@ func (c Config) parseChunks(chunks []Chunk) (ASTNode, error) { // nolint: gocycl
if sd != nil {
suffix = "; immediate parent is " + sd.TagName()
}
return nil, parseErrorf("%s not inside %s%s", ch.Name, strings.Join(cs.ParentTags(), " or "), suffix)
return nil, parseErrorf("%s not inside %s%s", tok.Name, strings.Join(cs.ParentTags(), " or "), suffix)
case cs.IsBlockStart():
push := func() {
stack = append(stack, frame{syntax: sd, node: bn, ap: ap})
sd, bn = cs, &ASTBlock{Chunk: ch, syntax: cs}
sd, bn = cs, &ASTBlock{Token: tok, syntax: cs}
*ap = append(*ap, bn)
}
push()
ap = &bn.Body
case cs.IsBranch():
n := &ASTBlock{Chunk: ch, syntax: cs}
n := &ASTBlock{Token: tok, syntax: cs}
bn.Branches = append(bn.Branches, n)
ap = &n.Body
case cs.IsBlockEnd():
@ -99,10 +99,10 @@ func (c Config) parseChunks(chunks []Chunk) (ASTNode, error) { // nolint: gocycl
}
pop()
default:
panic(fmt.Errorf("block type %q", ch.Name))
panic(fmt.Errorf("block type %q", tok.Name))
}
} else {
*ap = append(*ap, &ASTTag{ch})
*ap = append(*ap, &ASTTag{tok})
}
}
}

View File

@ -5,35 +5,35 @@ import (
"strings"
)
var chunkMatcher = regexp.MustCompile(`{{\s*(.+?)\s*}}|{%\s*(\w+)(?:\s+((?:[^%]|%[^}])+?))?\s*%}`)
var tokenMatcher = regexp.MustCompile(`{{\s*(.+?)\s*}}|{%\s*(\w+)(?:\s+((?:[^%]|%[^}])+?))?\s*%}`)
// Scan breaks a string into a sequence of Chunks.
func Scan(data string, pathname string) []Chunk {
// Scan breaks a string into a sequence of Tokens.
func Scan(data string, pathname string) []Token {
// TODO error on unterminated {{ and {%
// TODO probably an error when a tag contains a {{ or {%, at least outside of a string
var (
p, pe = 0, len(data)
si = SourceInfo{pathname, 1}
out = make([]Chunk, 0)
out = make([]Token, 0)
)
for _, m := range chunkMatcher.FindAllStringSubmatchIndex(data, -1) {
for _, m := range tokenMatcher.FindAllStringSubmatchIndex(data, -1) {
ts, te := m[0], m[1]
if p < ts {
out = append(out, Chunk{Type: TextChunkType, SourceInfo: si, Source: data[p:ts]})
out = append(out, Token{Type: TextTokenType, SourceInfo: si, Source: data[p:ts]})
si.lineNo += strings.Count(data[p:ts], "\n")
}
source := data[ts:te]
switch data[ts+1] {
case '{':
out = append(out, Chunk{
Type: ObjChunkType,
out = append(out, Token{
Type: ObjTokenType,
SourceInfo: si,
Source: source,
Args: data[m[2]:m[3]],
})
case '%':
c := Chunk{
Type: TagChunkType,
c := Token{
Type: TagTokenType,
SourceInfo: si,
Source: source,
Name: data[m[4]:m[5]],
@ -47,7 +47,7 @@ func Scan(data string, pathname string) []Chunk {
p = te
}
if p < pe {
out = append(out, Chunk{Type: TextChunkType, SourceInfo: si, Source: data[p:]})
out = append(out, Token{Type: TextTokenType, SourceInfo: si, Source: data[p:]})
}
return out
}

View File

@ -26,37 +26,37 @@ func TestChunkScanner(t *testing.T) {
tokens := Scan("12", "")
require.NotNil(t, tokens)
require.Len(t, tokens, 1)
require.Equal(t, TextChunkType, tokens[0].Type)
require.Equal(t, TextTokenType, tokens[0].Type)
require.Equal(t, "12", tokens[0].Source)
tokens = Scan("{{obj}}", "")
require.NotNil(t, tokens)
require.Len(t, tokens, 1)
require.Equal(t, ObjChunkType, tokens[0].Type)
require.Equal(t, ObjTokenType, tokens[0].Type)
require.Equal(t, "obj", tokens[0].Args)
tokens = Scan("{{ obj }}", "")
require.NotNil(t, tokens)
require.Len(t, tokens, 1)
require.Equal(t, ObjChunkType, tokens[0].Type)
require.Equal(t, ObjTokenType, tokens[0].Type)
require.Equal(t, "obj", tokens[0].Args)
tokens = Scan("{%tag args%}", "")
require.NotNil(t, tokens)
require.Len(t, tokens, 1)
require.Equal(t, TagChunkType, tokens[0].Type)
require.Equal(t, TagTokenType, tokens[0].Type)
require.Equal(t, "tag", tokens[0].Name)
require.Equal(t, "args", tokens[0].Args)
tokens = Scan("{% tag args %}", "")
require.NotNil(t, tokens)
require.Len(t, tokens, 1)
require.Equal(t, TagChunkType, tokens[0].Type)
require.Equal(t, TagTokenType, tokens[0].Type)
require.Equal(t, "tag", tokens[0].Name)
require.Equal(t, "args", tokens[0].Args)
tokens = Scan("pre{% tag args %}mid{{ object }}post", "")
require.Equal(t, `[TextChunkType{"pre"} TagChunkType{Tag:"tag", Args:"args"} TextChunkType{"mid"} ObjChunkType{"object"} TextChunkType{"post"}]`, fmt.Sprint(tokens))
require.Equal(t, `[TextTokenType{"pre"} TagTokenType{Tag:"tag", Args:"args"} TextTokenType{"mid"} ObjTokenType{"object"} TextTokenType{"post"}]`, fmt.Sprint(tokens))
for i, test := range scannerCountTests {
t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) {

View File

@ -2,27 +2,27 @@ package parser
import "fmt"
// A Chunk is either an object {{a.b}}, a tag {%if a>b%}, or a text chunk (anything outside of {{}} and {%%}.)
type Chunk struct {
Type ChunkType
// A Token is either an object {{a.b}}, a tag {%if a>b%}, or a text chunk (anything outside of {{}} and {%%}.)
type Token struct {
Type TokenType
SourceInfo SourceInfo
Name string // Name is the tag name of a tag Chunk. E.g. the tag name of "{% if 1 %}" is "if".
Args string // Parameters is the tag arguments of a tag Chunk. E.g. the tag arguments of "{% if 1 %}" is "1".
Source string // Source is the entirety of the chunk, including the "{{", "{%", etc. markers.
Source string // Source is the entirety of the token, including the "{{", "{%", etc. markers.
}
// ChunkType is the type of a Chunk
type ChunkType int
// TokenType is the type of a Chunk
type TokenType int
//go:generate stringer -type=ChunkType
//go:generate stringer -type=TokenType
const (
// TextChunkType is the type of a text Chunk
TextChunkType ChunkType = iota
// TagChunkType is the type of a tag Chunk "{%…%}"
TagChunkType
// ObjChunkType is the type of an object Chunk "{{…}}"
ObjChunkType
// TextTokenType is the type of a text Chunk
TextTokenType TokenType = iota
// TagTokenType is the type of a tag Chunk "{%…%}"
TagTokenType
// ObjTokenType is the type of an object Chunk "{{…}}"
ObjTokenType
)
// SourceInfo contains a Chunk's source information
@ -31,13 +31,13 @@ type SourceInfo struct {
lineNo int
}
func (c Chunk) String() string {
func (c Token) String() string {
switch c.Type {
case TextChunkType:
case TextTokenType:
return fmt.Sprintf("%v{%#v}", c.Type, c.Source)
case TagChunkType:
case TagTokenType:
return fmt.Sprintf("%v{Tag:%#v, Args:%#v}", c.Type, c.Name, c.Args)
case ObjChunkType:
case ObjTokenType:
return fmt.Sprintf("%v{%#v}", c.Type, c.Args)
default:
return fmt.Sprintf("%v{%#v}", c.Type, c.Source)

View File

@ -0,0 +1,16 @@
// Code generated by "stringer -type=TokenType"; DO NOT EDIT.
package parser
import "fmt"
const _TokenType_name = "TextTokenTypeTagTokenTypeObjTokenType"
var _TokenType_index = [...]uint8{0, 13, 25, 37}
func (i TokenType) String() string {
if i < 0 || i >= TokenType(len(_TokenType_index)-1) {
return fmt.Sprintf("TokenType(%d)", i)
}
return _TokenType_name[_TokenType_index[i]:_TokenType_index[i+1]]
}

View File

@ -42,7 +42,7 @@ func (c Config) compileNode(n parser.ASTNode) (Node, error) {
return nil, compilationErrorf("undefined tag %q", n.Name)
}
node := BlockNode{
Chunk: n.Chunk,
Token: n.Token,
Body: body,
Branches: branches,
}
@ -68,13 +68,13 @@ func (c Config) compileNode(n parser.ASTNode) (Node, error) {
if err != nil {
return nil, err
}
return &TagNode{n.Chunk, f}, nil
return &TagNode{n.Token, f}, nil
}
return nil, compilationErrorf("unknown tag: %s", n.Name)
case *parser.ASTText:
return &TextNode{n.Chunk}, nil
return &TextNode{n.Token}, nil
case *parser.ASTObject:
return &ObjectNode{n.Chunk, n.Expr}, nil
return &ObjectNode{n.Token, n.Expr}, nil
default:
panic(fmt.Errorf("un-compilable node type %T", n))
}

View File

@ -134,9 +134,9 @@ func (c rendererContext) SourceFile() string {
func (c rendererContext) TagArgs() string {
switch {
case c.node != nil:
return c.node.Chunk.Args
return c.node.Token.Args
case c.cn != nil:
return c.cn.Chunk.Args
return c.cn.Token.Args
default:
return ""
}
@ -145,9 +145,9 @@ func (c rendererContext) TagArgs() string {
func (c rendererContext) TagName() string {
switch {
case c.node != nil:
return c.node.Chunk.Name
return c.node.Token.Name
case c.cn != nil:
return c.cn.Chunk.Name
return c.cn.Token.Name
default:
return ""
}

View File

@ -4,7 +4,7 @@ import (
"github.com/osteele/liquid/expression"
)
// nodeContext is the evaluation context for chunk AST rendering.
// nodeContext provides the evaluation context for rendering the AST.
type nodeContext struct {
bindings map[string]interface{}
config Config

View File

@ -13,7 +13,7 @@ type Node interface {
// BlockNode represents a {% tag %}…{% endtag %}.
type BlockNode struct {
parser.Chunk
parser.Token
renderer func(io.Writer, Context) error
Body []Node
Branches []*BlockNode
@ -26,18 +26,18 @@ type RawNode struct {
// TagNode renders itself via a render function that is created during parsing.
type TagNode struct {
parser.Chunk
parser.Token
renderer func(io.Writer, Context) error
}
// TextNode is a text chunk, that is rendered verbatim.
type TextNode struct {
parser.Chunk
parser.Token
}
// ObjectNode is an {{ object }} object.
type ObjectNode struct {
parser.Chunk
parser.Token
expr expression.Expression
}