1
0
mirror of https://github.com/danog/blackfriday.git synced 2024-11-26 20:14:43 +01:00

Several fixes after code review

This commit is contained in:
Vytautas Šaltenis 2017-02-02 11:52:49 +02:00
parent 120bb2fae1
commit ea57e93666
4 changed files with 44 additions and 37 deletions

View File

@ -764,7 +764,7 @@ func (p *parser) table(data []byte) int {
} }
// include the newline in data sent to tableRow // include the newline in data sent to tableRow
if i < len(data)-1 && data[i] == '\n' { if i < len(data) && data[i] == '\n' {
i++ i++
} }
p.tableRow(data[rowStart:i], columns, false) p.tableRow(data[rowStart:i], columns, false)
@ -945,7 +945,7 @@ func (p *parser) quotePrefix(data []byte) int {
i++ i++
} }
if i < len(data) && data[i] == '>' { if i < len(data) && data[i] == '>' {
if i < len(data)-1 && data[i+1] == ' ' { if i+1 < len(data) && data[i+1] == ' ' {
return i + 2 return i + 2
} }
return i + 1 return i + 1
@ -1005,7 +1005,7 @@ func (p *parser) quote(data []byte) int {
// returns prefix length for block code // returns prefix length for block code
func (p *parser) codePrefix(data []byte) int { func (p *parser) codePrefix(data []byte) int {
if data[0] == '\t' { if len(data) >= 1 && data[0] == '\t' {
return 1 return 1
} }
if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
@ -1073,7 +1073,7 @@ func (p *parser) uliPrefix(data []byte) int {
if i >= len(data)-1 { if i >= len(data)-1 {
return 0 return 0
} }
// need a *, +, or - followed by a space // need one of {'*', '+', '-'} followed by a space or a tab
if (data[i] != '*' && data[i] != '+' && data[i] != '-') || if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
(data[i+1] != ' ' && data[i+1] != '\t') { (data[i+1] != ' ' && data[i+1] != '\t') {
return 0 return 0
@ -1099,7 +1099,7 @@ func (p *parser) oliPrefix(data []byte) int {
return 0 return 0
} }
// we need >= 1 digits followed by a dot and a space // we need >= 1 digits followed by a dot and a space or a tab
if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') {
return 0 return 0
} }
@ -1112,7 +1112,7 @@ func (p *parser) dliPrefix(data []byte) int {
return 0 return 0
} }
i := 0 i := 0
// need a : followed by a spaces // need a ':' followed by a space or a tab
if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') { if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') {
return 0 return 0
} }
@ -1194,11 +1194,12 @@ func finalizeList(block *Node) {
func (p *parser) listItem(data []byte, flags *ListType) int { func (p *parser) listItem(data []byte, flags *ListType) int {
// keep track of the indentation of the first line // keep track of the indentation of the first line
itemIndent := 0 itemIndent := 0
if data[itemIndent] == '\t' { if data[0] == '\t' {
itemIndent += 4 itemIndent += 4
} } else {
for itemIndent < 3 && data[itemIndent] == ' ' { for itemIndent < 3 && data[itemIndent] == ' ' {
itemIndent++ itemIndent++
}
} }
var bulletChar byte = '*' var bulletChar byte = '*'
@ -1436,8 +1437,7 @@ func (p *parser) paragraph(data []byte) int {
// did this blank line followed by a definition list item? // did this blank line followed by a definition list item?
if p.flags&DefinitionLists != 0 { if p.flags&DefinitionLists != 0 {
if i < len(data)-1 && data[i+1] == ':' { if i < len(data)-1 && data[i+1] == ':' {
ret := p.list(data[prev:], ListTypeDefinition) return p.list(data[prev:], ListTypeDefinition)
return ret
} }
} }

View File

@ -38,11 +38,11 @@ func BenchmarkEscapeHTML(b *testing.B) {
[]byte("[1]: http://example.com/?foo=1&bar=2"), []byte("[1]: http://example.com/?foo=1&bar=2"),
[]byte("[2]: http://att.com/ \"AT&T\""), []byte("[2]: http://att.com/ \"AT&T\""),
} }
var buff bytes.Buffer var buf bytes.Buffer
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
for _, t := range tests { for _, t := range tests {
escapeHTML(&buff, t) escapeHTML(&buf, t)
buff.Reset() buf.Reset()
} }
} }
} }

24
html.go
View File

@ -559,11 +559,11 @@ func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkSt
} else { } else {
if entering { if entering {
dest = r.addAbsPrefix(dest) dest = r.addAbsPrefix(dest)
var hrefBuff bytes.Buffer var hrefBuf bytes.Buffer
hrefBuff.WriteString("href=\"") hrefBuf.WriteString("href=\"")
escLink(&hrefBuff, dest) escLink(&hrefBuf, dest)
hrefBuff.WriteByte('"') hrefBuf.WriteByte('"')
attrs = append(attrs, hrefBuff.String()) attrs = append(attrs, hrefBuf.String())
if node.NoteID != 0 { if node.NoteID != 0 {
r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node))
break break
@ -939,17 +939,17 @@ func (r *HTMLRenderer) writeDocumentFooter(w *bytes.Buffer) {
func (r *HTMLRenderer) Render(ast *Node) []byte { func (r *HTMLRenderer) Render(ast *Node) []byte {
//println("render_Blackfriday") //println("render_Blackfriday")
//dump(ast) //dump(ast)
var buff bytes.Buffer var buf bytes.Buffer
r.writeDocumentHeader(&buff) r.writeDocumentHeader(&buf)
if r.Extensions&TOC != 0 || r.Extensions&OmitContents != 0 { if r.Extensions&TOC != 0 || r.Extensions&OmitContents != 0 {
r.writeTOC(&buff, ast) r.writeTOC(&buf, ast)
if r.Extensions&OmitContents != 0 { if r.Extensions&OmitContents != 0 {
return buff.Bytes() return buf.Bytes()
} }
} }
ast.Walk(func(node *Node, entering bool) WalkStatus { ast.Walk(func(node *Node, entering bool) WalkStatus {
return r.RenderNode(&buff, node, entering) return r.RenderNode(&buf, node, entering)
}) })
r.writeDocumentFooter(&buff) r.writeDocumentFooter(&buf)
return buff.Bytes() return buf.Bytes()
} }

View File

@ -715,14 +715,11 @@ func linkEndsWithEntity(data []byte, linkEnd int) bool {
return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd
} }
var prefixes = [][]byte{ // hasPrefixCaseInsensitive is a custom implementation of
[]byte("http://"), // strings.HasPrefix(strings.ToLower(s), prefix)
[]byte("https://"), // we rolled our own because ToLower pulls in a huge machinery of lowercasing
[]byte("ftp://"), // anything from Unicode and that's very slow. Since this func will only be
[]byte("file://"), // used on ASCII protocol prefixes, we can take shortcuts.
[]byte("mailto:"),
}
func hasPrefixCaseInsensitive(s, prefix []byte) bool { func hasPrefixCaseInsensitive(s, prefix []byte) bool {
if len(s) < len(prefix) { if len(s) < len(prefix) {
return false return false
@ -736,12 +733,22 @@ func hasPrefixCaseInsensitive(s, prefix []byte) bool {
return true return true
} }
var protocolPrefixes = [][]byte{
[]byte("http://"),
[]byte("https://"),
[]byte("ftp://"),
[]byte("file://"),
[]byte("mailto:"),
}
const shortestPrefix = 6 // len("ftp://"), the shortest of the above
func maybeAutoLink(p *parser, data []byte, offset int) (int, *Node) { func maybeAutoLink(p *parser, data []byte, offset int) (int, *Node) {
// quick check to rule out most false hits // quick check to rule out most false hits
if p.insideLink || len(data) < offset+6 { // 6 is the len() of the shortest of the prefixes if p.insideLink || len(data) < offset+shortestPrefix {
return 0, nil return 0, nil
} }
for _, prefix := range prefixes { for _, prefix := range protocolPrefixes {
endOfHead := offset + 8 // 8 is the len() of the longest prefix endOfHead := offset + 8 // 8 is the len() of the longest prefix
if endOfHead > len(data) { if endOfHead > len(data) {
endOfHead = len(data) endOfHead = len(data)