2011-05-30 01:00:31 +02:00
|
|
|
//
|
2011-06-28 04:11:32 +02:00
|
|
|
// Blackfriday Markdown Processor
|
|
|
|
// Available at http://github.com/russross/blackfriday
|
|
|
|
//
|
|
|
|
// Copyright © 2011 Russ Ross <russ@russross.com>.
|
2011-06-28 19:30:10 +02:00
|
|
|
// Distributed under the Simplified BSD License.
|
2011-06-28 04:11:32 +02:00
|
|
|
// See README.md for details.
|
2011-05-30 01:00:31 +02:00
|
|
|
//
|
|
|
|
|
|
|
|
//
|
|
|
|
// Functions to parse inline elements.
|
|
|
|
//
|
|
|
|
|
|
|
|
package blackfriday
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2014-01-25 20:42:34 +01:00
|
|
|
"regexp"
|
2013-07-01 03:37:52 +02:00
|
|
|
"strconv"
|
2011-05-30 01:00:31 +02:00
|
|
|
)
|
|
|
|
|
2014-01-25 20:42:34 +01:00
|
|
|
var (
|
2014-04-27 23:40:44 +02:00
|
|
|
urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+`
|
2014-01-25 20:42:34 +01:00
|
|
|
anchorRe = regexp.MustCompile(`^(<a\shref="` + urlRe + `"(\stitle="[^"<>]+")?\s?>` + urlRe + `<\/a>)`)
|
|
|
|
)
|
|
|
|
|
2011-05-30 01:00:31 +02:00
|
|
|
// Functions to parse text within a block
|
|
|
|
// Each function returns the number of chars taken care of
|
|
|
|
// data is the complete block being rendered
|
|
|
|
// offset is the number of valid chars before the current cursor
|
|
|
|
|
2015-11-01 20:35:43 +01:00
|
|
|
func (p *parser) inline(data []byte) {
|
2011-06-01 00:28:07 +02:00
|
|
|
// this is called recursively: enforce a maximum depth
|
2011-07-07 19:56:45 +02:00
|
|
|
if p.nesting >= p.maxNesting {
|
2011-05-30 01:00:31 +02:00
|
|
|
return
|
|
|
|
}
|
2011-07-07 19:56:45 +02:00
|
|
|
p.nesting++
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
i, end := 0, 0
|
|
|
|
for i < len(data) {
|
2016-03-30 13:38:19 +02:00
|
|
|
// Stop at EOL
|
|
|
|
if data[i] == '\n' && i+1 == len(data) {
|
|
|
|
break
|
|
|
|
}
|
2015-10-27 20:56:16 +01:00
|
|
|
// Copy inactive chars into the output, but first check for one quirk:
|
|
|
|
// 'h', 'm' and 'f' all might trigger a check for autolink processing
|
|
|
|
// and end this run of inactive characters. However, there's one nasty
|
|
|
|
// case where breaking this run would be bad: in smartypants fraction
|
|
|
|
// detection, we expect things like "1/2th" to be in a single run. So
|
|
|
|
// we check here if an 'h' is followed by 't' (from 'http') and if it's
|
|
|
|
// not, we short circuit the 'h' into the run of inactive characters.
|
2015-10-31 13:52:46 +01:00
|
|
|
//
|
|
|
|
// Also, in a similar fashion maybeLineBreak breaks this run of chars,
|
|
|
|
// but smartDash processor relies on seeing context around the dashes.
|
|
|
|
// Fix this somehow.
|
2015-10-27 20:56:16 +01:00
|
|
|
for end < len(data) {
|
2015-10-31 13:52:46 +01:00
|
|
|
if data[end] == ' ' {
|
2015-11-01 20:57:30 +01:00
|
|
|
consumed, br := maybeLineBreak(p, data, end)
|
2015-10-31 13:52:46 +01:00
|
|
|
if consumed > 0 {
|
2016-03-30 13:38:19 +02:00
|
|
|
p.currBlock.appendChild(text(data[i:end]))
|
2015-10-31 13:52:46 +01:00
|
|
|
if br {
|
2016-03-30 13:38:19 +02:00
|
|
|
p.currBlock.appendChild(NewNode(Hardbreak))
|
2015-10-31 13:52:46 +01:00
|
|
|
}
|
|
|
|
i = end
|
|
|
|
i += consumed
|
|
|
|
end = i
|
|
|
|
} else {
|
|
|
|
end++
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2015-10-27 20:56:16 +01:00
|
|
|
if p.inlineCallback[data[end]] != nil {
|
|
|
|
if end+1 < len(data) && data[end] == 'h' && data[end+1] != 't' {
|
|
|
|
end++
|
|
|
|
} else {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
end++
|
|
|
|
}
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
2016-03-30 13:38:19 +02:00
|
|
|
p.currBlock.appendChild(text(data[i:end]))
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
if end >= len(data) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
i = end
|
|
|
|
|
|
|
|
// call the trigger
|
2011-07-07 19:56:45 +02:00
|
|
|
handler := p.inlineCallback[data[end]]
|
2015-11-01 20:57:30 +01:00
|
|
|
if consumed := handler(p, data, i); consumed == 0 {
|
2011-06-01 00:28:07 +02:00
|
|
|
// no action from the callback; buffer the byte for later
|
2011-05-30 01:00:31 +02:00
|
|
|
end = i + 1
|
|
|
|
} else {
|
2011-06-01 00:28:07 +02:00
|
|
|
// skip past whatever the callback used
|
|
|
|
i += consumed
|
2011-05-30 01:00:31 +02:00
|
|
|
end = i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-07 19:56:45 +02:00
|
|
|
p.nesting--
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// single and double emphasis parsing
|
2015-11-01 20:35:43 +01:00
|
|
|
func emphasis(p *parser, data []byte, offset int) int {
|
2011-05-30 01:00:31 +02:00
|
|
|
data = data[offset:]
|
|
|
|
c := data[0]
|
|
|
|
ret := 0
|
|
|
|
|
|
|
|
if len(data) > 2 && data[1] != c {
|
|
|
|
// whitespace cannot follow an opening emphasis;
|
|
|
|
// strikethrough only takes two characters '~~'
|
|
|
|
if c == '~' || isspace(data[1]) {
|
|
|
|
return 0
|
|
|
|
}
|
2015-11-01 20:57:30 +01:00
|
|
|
if ret = helperEmphasis(p, data[1:], c); ret == 0 {
|
2011-05-30 01:00:31 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret + 1
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(data) > 3 && data[1] == c && data[2] != c {
|
|
|
|
if isspace(data[2]) {
|
|
|
|
return 0
|
|
|
|
}
|
2015-11-01 20:57:30 +01:00
|
|
|
if ret = helperDoubleEmphasis(p, data[2:], c); ret == 0 {
|
2011-05-30 01:00:31 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret + 2
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c {
|
|
|
|
if c == '~' || isspace(data[3]) {
|
|
|
|
return 0
|
|
|
|
}
|
2015-11-01 20:57:30 +01:00
|
|
|
if ret = helperTripleEmphasis(p, data, 3, c); ret == 0 {
|
2011-05-30 01:00:31 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret + 3
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2015-11-01 20:35:43 +01:00
|
|
|
func codeSpan(p *parser, data []byte, offset int) int {
|
2011-05-30 01:00:31 +02:00
|
|
|
data = data[offset:]
|
|
|
|
|
|
|
|
nb := 0
|
|
|
|
|
|
|
|
// count the number of backticks in the delimiter
|
|
|
|
for nb < len(data) && data[nb] == '`' {
|
|
|
|
nb++
|
|
|
|
}
|
|
|
|
|
|
|
|
// find the next delimiter
|
|
|
|
i, end := 0, 0
|
|
|
|
for end = nb; end < len(data) && i < nb; end++ {
|
|
|
|
if data[end] == '`' {
|
|
|
|
i++
|
|
|
|
} else {
|
|
|
|
i = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-27 22:35:11 +02:00
|
|
|
// no matching delimiter?
|
2011-05-30 01:00:31 +02:00
|
|
|
if i < nb && end >= len(data) {
|
2011-06-27 22:35:11 +02:00
|
|
|
return 0
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// trim outside whitespace
|
2011-06-29 00:02:12 +02:00
|
|
|
fBegin := nb
|
2011-07-01 18:03:03 +02:00
|
|
|
for fBegin < end && data[fBegin] == ' ' {
|
2011-06-29 00:02:12 +02:00
|
|
|
fBegin++
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
2011-06-29 00:02:12 +02:00
|
|
|
fEnd := end - nb
|
2011-07-01 18:03:03 +02:00
|
|
|
for fEnd > fBegin && data[fEnd-1] == ' ' {
|
2011-06-29 00:02:12 +02:00
|
|
|
fEnd--
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
2011-06-27 22:35:11 +02:00
|
|
|
// render the code span
|
2011-06-29 23:38:35 +02:00
|
|
|
if fBegin != fEnd {
|
2016-03-30 13:38:19 +02:00
|
|
|
code := NewNode(Code)
|
|
|
|
code.Literal = data[fBegin:fEnd]
|
|
|
|
p.currBlock.appendChild(code)
|
2011-06-29 23:38:35 +02:00
|
|
|
}
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
return end
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2011-06-25 23:45:51 +02:00
|
|
|
// newline preceded by two spaces becomes <br>
|
2015-11-01 20:35:43 +01:00
|
|
|
func maybeLineBreak(p *parser, data []byte, offset int) (int, bool) {
|
2015-10-27 20:33:43 +01:00
|
|
|
origOffset := offset
|
|
|
|
for offset < len(data) && data[offset] == ' ' {
|
|
|
|
offset++
|
2011-06-28 00:06:16 +02:00
|
|
|
}
|
2015-10-27 20:33:43 +01:00
|
|
|
if offset < len(data) && data[offset] == '\n' {
|
|
|
|
if offset-origOffset >= 2 {
|
2015-10-31 13:52:46 +01:00
|
|
|
return offset - origOffset + 1, true
|
2015-10-27 20:33:43 +01:00
|
|
|
}
|
2015-10-31 13:52:46 +01:00
|
|
|
return offset - origOffset, false
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
2015-10-31 13:52:46 +01:00
|
|
|
return 0, false
|
2015-10-27 20:33:43 +01:00
|
|
|
}
|
2011-05-30 01:00:31 +02:00
|
|
|
|
2015-10-27 20:33:43 +01:00
|
|
|
// newline without two spaces works when HardLineBreak is enabled
|
2015-11-01 20:35:43 +01:00
|
|
|
func lineBreak(p *parser, data []byte, offset int) int {
|
2015-10-27 20:33:43 +01:00
|
|
|
if p.flags&HardLineBreak != 0 {
|
2016-03-30 13:38:19 +02:00
|
|
|
p.currBlock.appendChild(NewNode(Hardbreak))
|
2015-10-27 20:33:43 +01:00
|
|
|
return 1
|
2015-04-22 14:04:08 +02:00
|
|
|
}
|
2015-10-27 20:33:43 +01:00
|
|
|
return 0
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
2013-06-25 03:18:47 +02:00
|
|
|
type linkType int
|
|
|
|
|
|
|
|
const (
|
|
|
|
linkNormal linkType = iota
|
|
|
|
linkImg
|
|
|
|
linkDeferredFootnote
|
2013-07-01 03:37:52 +02:00
|
|
|
linkInlineFootnote
|
2013-06-25 03:18:47 +02:00
|
|
|
)
|
|
|
|
|
2015-10-12 20:18:33 +02:00
|
|
|
func isReferenceStyleLink(data []byte, pos int, t linkType) bool {
|
|
|
|
if t == linkDeferredFootnote {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^'
|
|
|
|
}
|
|
|
|
|
2015-11-01 20:35:43 +01:00
|
|
|
func maybeImage(p *parser, data []byte, offset int) int {
|
2015-10-26 19:47:20 +01:00
|
|
|
if offset < len(data)-1 && data[offset+1] == '[' {
|
2015-11-01 20:57:30 +01:00
|
|
|
return link(p, data, offset)
|
2015-10-26 19:47:20 +01:00
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2015-11-01 20:35:43 +01:00
|
|
|
func maybeInlineFootnote(p *parser, data []byte, offset int) int {
|
2015-10-26 19:47:20 +01:00
|
|
|
if offset < len(data)-1 && data[offset+1] == '[' {
|
2015-11-01 20:57:30 +01:00
|
|
|
return link(p, data, offset)
|
2015-10-26 19:47:20 +01:00
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2013-06-25 03:18:47 +02:00
|
|
|
// '[': parse a link or an image or a footnote
|
2015-11-01 20:35:43 +01:00
|
|
|
func link(p *parser, data []byte, offset int) int {
|
2013-09-09 07:51:46 +02:00
|
|
|
// no links allowed inside regular links, footnote, and deferred footnotes
|
2013-09-11 20:47:43 +02:00
|
|
|
if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') {
|
2011-06-24 19:50:03 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2015-11-04 20:32:53 +01:00
|
|
|
var t linkType
|
|
|
|
switch {
|
|
|
|
// special case: ![^text] == deferred footnote (that follows something with
|
|
|
|
// an exclamation point)
|
2015-10-26 17:16:57 +01:00
|
|
|
case p.flags&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^':
|
2015-11-04 20:32:53 +01:00
|
|
|
t = linkDeferredFootnote
|
2013-06-25 03:18:47 +02:00
|
|
|
// ![alt] == image
|
2015-10-26 19:47:20 +01:00
|
|
|
case offset >= 0 && data[offset] == '!':
|
2015-11-04 20:32:53 +01:00
|
|
|
t = linkImg
|
2015-10-26 19:47:20 +01:00
|
|
|
offset += 1
|
2013-06-25 03:18:47 +02:00
|
|
|
// ^[text] == inline footnote
|
|
|
|
// [^refId] == deferred footnote
|
2015-10-26 17:16:57 +01:00
|
|
|
case p.flags&Footnotes != 0:
|
2015-10-26 19:47:20 +01:00
|
|
|
if offset >= 0 && data[offset] == '^' {
|
2013-07-01 03:37:52 +02:00
|
|
|
t = linkInlineFootnote
|
2015-10-26 19:47:20 +01:00
|
|
|
offset += 1
|
2013-07-01 03:37:52 +02:00
|
|
|
} else if len(data)-1 > offset && data[offset+1] == '^' {
|
2013-06-25 03:18:47 +02:00
|
|
|
t = linkDeferredFootnote
|
|
|
|
}
|
2015-11-04 20:32:53 +01:00
|
|
|
// [text] == regular link
|
|
|
|
default:
|
|
|
|
t = linkNormal
|
2013-06-25 03:18:47 +02:00
|
|
|
}
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
data = data[offset:]
|
|
|
|
|
2013-06-25 03:18:47 +02:00
|
|
|
var (
|
2015-05-06 14:55:04 +02:00
|
|
|
i = 1
|
|
|
|
noteId int
|
|
|
|
title, link, altContent []byte
|
|
|
|
textHasNl = false
|
2013-06-25 03:18:47 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
if t == linkDeferredFootnote {
|
|
|
|
i++
|
|
|
|
}
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
// look for the matching closing bracket
|
|
|
|
for level := 1; level > 0 && i < len(data); i++ {
|
|
|
|
switch {
|
|
|
|
case data[i] == '\n':
|
2011-06-29 00:02:12 +02:00
|
|
|
textHasNl = true
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
case data[i-1] == '\\':
|
|
|
|
continue
|
|
|
|
|
|
|
|
case data[i] == '[':
|
|
|
|
level++
|
|
|
|
|
|
|
|
case data[i] == ']':
|
|
|
|
level--
|
|
|
|
if level <= 0 {
|
|
|
|
i-- // compensate for extra i++ in for loop
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-06-29 00:02:12 +02:00
|
|
|
txtE := i
|
2011-05-30 01:00:31 +02:00
|
|
|
i++
|
|
|
|
|
|
|
|
// skip any amount of whitespace or newline
|
|
|
|
// (this is much more lax than original markdown syntax)
|
|
|
|
for i < len(data) && isspace(data[i]) {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
// inline style link
|
|
|
|
switch {
|
|
|
|
case i < len(data) && data[i] == '(':
|
|
|
|
// skip initial whitespace
|
|
|
|
i++
|
|
|
|
|
|
|
|
for i < len(data) && isspace(data[i]) {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
2011-06-29 00:02:12 +02:00
|
|
|
linkB := i
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
// look for link end: ' " )
|
2011-07-05 22:22:21 +02:00
|
|
|
findlinkend:
|
2011-05-30 01:00:31 +02:00
|
|
|
for i < len(data) {
|
2011-07-05 22:22:21 +02:00
|
|
|
switch {
|
|
|
|
case data[i] == '\\':
|
2011-05-30 01:00:31 +02:00
|
|
|
i += 2
|
2011-07-05 22:22:21 +02:00
|
|
|
|
|
|
|
case data[i] == ')' || data[i] == '\'' || data[i] == '"':
|
|
|
|
break findlinkend
|
|
|
|
|
|
|
|
default:
|
2011-05-30 01:00:31 +02:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
2011-06-29 00:02:12 +02:00
|
|
|
linkE := i
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
// look for title end if present
|
2011-06-29 00:02:12 +02:00
|
|
|
titleB, titleE := 0, 0
|
2011-05-30 01:00:31 +02:00
|
|
|
if data[i] == '\'' || data[i] == '"' {
|
|
|
|
i++
|
2011-06-29 00:02:12 +02:00
|
|
|
titleB = i
|
2011-05-30 01:00:31 +02:00
|
|
|
|
2011-07-05 22:22:21 +02:00
|
|
|
findtitleend:
|
2011-05-30 01:00:31 +02:00
|
|
|
for i < len(data) {
|
2011-07-05 22:22:21 +02:00
|
|
|
switch {
|
|
|
|
case data[i] == '\\':
|
2011-05-30 01:00:31 +02:00
|
|
|
i += 2
|
2011-07-05 22:22:21 +02:00
|
|
|
|
|
|
|
case data[i] == ')':
|
|
|
|
break findtitleend
|
|
|
|
|
|
|
|
default:
|
2011-05-30 01:00:31 +02:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// skip whitespace after title
|
2011-06-29 00:02:12 +02:00
|
|
|
titleE = i - 1
|
|
|
|
for titleE > titleB && isspace(data[titleE]) {
|
|
|
|
titleE--
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// check for closing quote presence
|
2011-06-29 00:02:12 +02:00
|
|
|
if data[titleE] != '\'' && data[titleE] != '"' {
|
|
|
|
titleB, titleE = 0, 0
|
|
|
|
linkE = i
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove whitespace at the end of the link
|
2011-06-29 00:02:12 +02:00
|
|
|
for linkE > linkB && isspace(data[linkE-1]) {
|
|
|
|
linkE--
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// remove optional angle brackets around the link
|
2011-06-29 00:02:12 +02:00
|
|
|
if data[linkB] == '<' {
|
|
|
|
linkB++
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
2011-06-29 00:02:12 +02:00
|
|
|
if data[linkE-1] == '>' {
|
|
|
|
linkE--
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// build escaped link and title
|
2011-06-29 00:02:12 +02:00
|
|
|
if linkE > linkB {
|
|
|
|
link = data[linkB:linkE]
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
2011-06-29 00:02:12 +02:00
|
|
|
if titleE > titleB {
|
|
|
|
title = data[titleB:titleE]
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
i++
|
|
|
|
|
|
|
|
// reference style link
|
2015-10-12 20:18:33 +02:00
|
|
|
case isReferenceStyleLink(data, i, t):
|
2011-05-30 01:00:31 +02:00
|
|
|
var id []byte
|
2015-05-06 14:55:04 +02:00
|
|
|
altContentConsidered := false
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
// look for the id
|
|
|
|
i++
|
2011-06-29 00:02:12 +02:00
|
|
|
linkB := i
|
2011-05-30 01:00:31 +02:00
|
|
|
for i < len(data) && data[i] != ']' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
2011-06-29 00:02:12 +02:00
|
|
|
linkE := i
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
// find the reference
|
2011-06-29 00:02:12 +02:00
|
|
|
if linkB == linkE {
|
|
|
|
if textHasNl {
|
2011-05-31 19:11:04 +02:00
|
|
|
var b bytes.Buffer
|
2011-05-30 01:00:31 +02:00
|
|
|
|
2011-06-29 00:02:12 +02:00
|
|
|
for j := 1; j < txtE; j++ {
|
2011-05-30 01:00:31 +02:00
|
|
|
switch {
|
|
|
|
case data[j] != '\n':
|
|
|
|
b.WriteByte(data[j])
|
|
|
|
case data[j-1] != ' ':
|
|
|
|
b.WriteByte(' ')
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
id = b.Bytes()
|
|
|
|
} else {
|
2011-06-29 00:02:12 +02:00
|
|
|
id = data[1:txtE]
|
2015-05-06 14:55:04 +02:00
|
|
|
altContentConsidered = true
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
} else {
|
2011-06-29 00:02:12 +02:00
|
|
|
id = data[linkB:linkE]
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
2014-12-17 00:17:49 +01:00
|
|
|
// find the reference with matching id
|
|
|
|
lr, ok := p.getRef(string(id))
|
2011-05-30 01:00:31 +02:00
|
|
|
if !ok {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// keep link and title from reference
|
|
|
|
link = lr.link
|
|
|
|
title = lr.title
|
2015-05-06 14:55:04 +02:00
|
|
|
if altContentConsidered {
|
|
|
|
altContent = lr.text
|
2014-12-19 01:36:46 +01:00
|
|
|
}
|
2011-05-30 01:00:31 +02:00
|
|
|
i++
|
|
|
|
|
2013-07-01 03:37:52 +02:00
|
|
|
// shortcut reference style link or reference or inline footnote
|
2011-05-30 01:00:31 +02:00
|
|
|
default:
|
|
|
|
var id []byte
|
|
|
|
|
|
|
|
// craft the id
|
2011-06-29 00:02:12 +02:00
|
|
|
if textHasNl {
|
2011-05-31 19:11:04 +02:00
|
|
|
var b bytes.Buffer
|
2011-05-30 01:00:31 +02:00
|
|
|
|
2011-06-29 00:02:12 +02:00
|
|
|
for j := 1; j < txtE; j++ {
|
2011-05-30 01:00:31 +02:00
|
|
|
switch {
|
|
|
|
case data[j] != '\n':
|
|
|
|
b.WriteByte(data[j])
|
|
|
|
case data[j-1] != ' ':
|
|
|
|
b.WriteByte(' ')
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
id = b.Bytes()
|
|
|
|
} else {
|
2013-06-25 03:18:47 +02:00
|
|
|
if t == linkDeferredFootnote {
|
2013-07-01 03:37:52 +02:00
|
|
|
id = data[2:txtE] // get rid of the ^
|
2013-06-25 03:18:47 +02:00
|
|
|
} else {
|
|
|
|
id = data[1:txtE]
|
|
|
|
}
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
2013-07-01 03:37:52 +02:00
|
|
|
if t == linkInlineFootnote {
|
|
|
|
// create a new reference
|
|
|
|
noteId = len(p.notes) + 1
|
|
|
|
|
|
|
|
var fragment []byte
|
|
|
|
if len(id) > 0 {
|
|
|
|
if len(id) < 16 {
|
|
|
|
fragment = make([]byte, len(id))
|
|
|
|
} else {
|
|
|
|
fragment = make([]byte, 16)
|
|
|
|
}
|
|
|
|
copy(fragment, slugify(id))
|
|
|
|
} else {
|
|
|
|
fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteId))...)
|
|
|
|
}
|
2011-05-30 01:00:31 +02:00
|
|
|
|
2013-07-01 03:37:52 +02:00
|
|
|
ref := &reference{
|
|
|
|
noteId: noteId,
|
|
|
|
hasBlock: false,
|
|
|
|
link: fragment,
|
|
|
|
title: id,
|
|
|
|
}
|
|
|
|
|
|
|
|
p.notes = append(p.notes, ref)
|
|
|
|
|
|
|
|
link = ref.link
|
|
|
|
title = ref.title
|
|
|
|
} else {
|
|
|
|
// find the reference with matching id
|
2014-12-17 00:17:49 +01:00
|
|
|
lr, ok := p.getRef(string(id))
|
2013-07-01 03:37:52 +02:00
|
|
|
if !ok {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
if t == linkDeferredFootnote {
|
|
|
|
lr.noteId = len(p.notes) + 1
|
|
|
|
p.notes = append(p.notes, lr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// keep link and title from reference
|
|
|
|
link = lr.link
|
|
|
|
// if inline footnote, title == footnote contents
|
|
|
|
title = lr.title
|
|
|
|
noteId = lr.noteId
|
|
|
|
}
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
// rewind the whitespace
|
2011-06-29 00:02:12 +02:00
|
|
|
i = txtE + 1
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
2011-06-29 00:02:12 +02:00
|
|
|
var uLink []byte
|
2013-07-09 00:34:12 +02:00
|
|
|
if t == linkNormal || t == linkImg {
|
|
|
|
if len(link) > 0 {
|
|
|
|
var uLinkBuf bytes.Buffer
|
|
|
|
unescapeText(&uLinkBuf, link)
|
|
|
|
uLink = uLinkBuf.Bytes()
|
|
|
|
}
|
2011-05-30 01:00:31 +02:00
|
|
|
|
2013-07-09 00:34:12 +02:00
|
|
|
// links need something to click on and somewhere to go
|
2016-03-30 13:38:19 +02:00
|
|
|
if len(uLink) == 0 || (t == linkNormal && txtE <= 1) {
|
2013-07-09 00:34:12 +02:00
|
|
|
return 0
|
|
|
|
}
|
2011-06-29 21:00:54 +02:00
|
|
|
}
|
|
|
|
|
2011-05-30 01:00:31 +02:00
|
|
|
// call the relevant rendering function
|
2013-06-25 03:18:47 +02:00
|
|
|
switch t {
|
|
|
|
case linkNormal:
|
2016-03-30 13:38:19 +02:00
|
|
|
linkNode := NewNode(Link)
|
|
|
|
linkNode.Destination = normalizeURI(uLink)
|
|
|
|
linkNode.Title = title
|
|
|
|
p.currBlock.appendChild(linkNode)
|
2015-05-06 14:55:04 +02:00
|
|
|
if len(altContent) > 0 {
|
2016-03-30 13:38:19 +02:00
|
|
|
linkNode.appendChild(text(altContent))
|
2014-12-19 01:36:46 +01:00
|
|
|
} else {
|
2016-03-30 13:38:19 +02:00
|
|
|
// links cannot contain other links, so turn off link parsing
|
|
|
|
// temporarily and recurse
|
|
|
|
insideLink := p.insideLink
|
|
|
|
p.insideLink = true
|
|
|
|
tmpNode := p.currBlock
|
|
|
|
p.currBlock = linkNode
|
|
|
|
p.inline(data[1:txtE])
|
|
|
|
p.currBlock = tmpNode
|
|
|
|
p.insideLink = insideLink
|
2014-12-19 01:36:46 +01:00
|
|
|
}
|
2013-06-25 03:18:47 +02:00
|
|
|
|
|
|
|
case linkImg:
|
2016-03-30 13:38:19 +02:00
|
|
|
linkNode := NewNode(Image)
|
|
|
|
linkNode.Destination = uLink
|
|
|
|
linkNode.Title = title
|
|
|
|
p.currBlock.appendChild(linkNode)
|
|
|
|
linkNode.appendChild(text(data[1:txtE]))
|
2015-10-26 19:47:20 +01:00
|
|
|
i += 1
|
2013-06-25 03:18:47 +02:00
|
|
|
|
2016-03-30 13:38:19 +02:00
|
|
|
case linkInlineFootnote, linkDeferredFootnote:
|
|
|
|
linkNode := NewNode(Link)
|
|
|
|
linkNode.Destination = link
|
|
|
|
linkNode.Title = title
|
|
|
|
linkNode.NoteID = noteId
|
|
|
|
p.currBlock.appendChild(linkNode)
|
|
|
|
if t == linkInlineFootnote {
|
|
|
|
i++
|
|
|
|
}
|
2013-06-25 03:18:47 +02:00
|
|
|
|
|
|
|
default:
|
|
|
|
return 0
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
2011-06-29 21:00:54 +02:00
|
|
|
return i
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
2015-11-01 20:35:43 +01:00
|
|
|
func (p *parser) inlineHtmlComment(data []byte) int {
|
2015-10-11 10:01:48 +02:00
|
|
|
if len(data) < 5 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
i := 5
|
|
|
|
// scan for an end-of-comment marker, across lines if necessary
|
|
|
|
for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
// no end-of-comment marker
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return i + 1
|
|
|
|
}
|
|
|
|
|
2016-03-30 13:38:19 +02:00
|
|
|
func stripMailto(link []byte) []byte {
|
|
|
|
if bytes.HasPrefix(link, []byte("mailto://")) {
|
|
|
|
return link[9:]
|
|
|
|
} else if bytes.HasPrefix(link, []byte("mailto:")) {
|
|
|
|
return link[7:]
|
|
|
|
} else {
|
|
|
|
return link
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-30 01:00:31 +02:00
|
|
|
// '<' when tags or autolinks are allowed
|
2015-11-01 20:35:43 +01:00
|
|
|
func leftAngle(p *parser, data []byte, offset int) int {
|
2011-05-30 01:00:31 +02:00
|
|
|
data = data[offset:]
|
2015-10-26 17:16:57 +01:00
|
|
|
altype := LinkTypeNotAutolink
|
2011-05-30 01:00:31 +02:00
|
|
|
end := tagLength(data, &altype)
|
2015-11-01 20:57:30 +01:00
|
|
|
if size := p.inlineHtmlComment(data); size > 0 {
|
2015-10-11 10:01:48 +02:00
|
|
|
end = size
|
|
|
|
}
|
2011-05-30 01:00:31 +02:00
|
|
|
if end > 2 {
|
2015-10-26 17:16:57 +01:00
|
|
|
if altype != LinkTypeNotAutolink {
|
2011-06-29 00:02:12 +02:00
|
|
|
var uLink bytes.Buffer
|
|
|
|
unescapeText(&uLink, data[1:end+1-2])
|
2011-06-29 23:38:35 +02:00
|
|
|
if uLink.Len() > 0 {
|
2016-03-30 13:38:19 +02:00
|
|
|
link := uLink.Bytes()
|
|
|
|
node := NewNode(Link)
|
|
|
|
node.Destination = link
|
|
|
|
if altype == LinkTypeEmail {
|
|
|
|
node.Destination = append([]byte("mailto:"), link...)
|
|
|
|
}
|
|
|
|
p.currBlock.appendChild(node)
|
|
|
|
node.appendChild(text(stripMailto(link)))
|
2011-06-29 23:38:35 +02:00
|
|
|
}
|
2011-06-29 19:13:17 +02:00
|
|
|
} else {
|
2016-03-30 13:38:19 +02:00
|
|
|
htmlTag := NewNode(HtmlSpan)
|
|
|
|
htmlTag.Literal = data[:end]
|
|
|
|
p.currBlock.appendChild(htmlTag)
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return end
|
|
|
|
}
|
|
|
|
|
|
|
|
// '\\' backslash escape
|
2013-10-01 09:15:55 +02:00
|
|
|
var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~")
|
2011-05-30 01:00:31 +02:00
|
|
|
|
2015-11-01 20:35:43 +01:00
|
|
|
func escape(p *parser, data []byte, offset int) int {
|
2011-05-30 01:00:31 +02:00
|
|
|
data = data[offset:]
|
|
|
|
|
|
|
|
if len(data) > 1 {
|
2015-10-27 20:33:43 +01:00
|
|
|
if p.flags&BackslashLineBreak != 0 && data[1] == '\n' {
|
2016-03-30 13:38:19 +02:00
|
|
|
p.currBlock.appendChild(NewNode(Hardbreak))
|
2015-10-27 20:33:43 +01:00
|
|
|
return 2
|
|
|
|
}
|
2011-05-30 01:00:31 +02:00
|
|
|
if bytes.IndexByte(escapeChars, data[1]) < 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2016-03-30 13:38:19 +02:00
|
|
|
p.currBlock.appendChild(text(data[1:2]))
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return 2
|
|
|
|
}
|
|
|
|
|
|
|
|
func unescapeText(ob *bytes.Buffer, src []byte) {
|
|
|
|
i := 0
|
|
|
|
for i < len(src) {
|
|
|
|
org := i
|
|
|
|
for i < len(src) && src[i] != '\\' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
if i > org {
|
|
|
|
ob.Write(src[org:i])
|
|
|
|
}
|
|
|
|
|
|
|
|
if i+1 >= len(src) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
ob.WriteByte(src[i+1])
|
|
|
|
i += 2
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// '&' escaped when it doesn't belong to an entity
|
|
|
|
// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+;
|
2015-11-01 20:35:43 +01:00
|
|
|
func entity(p *parser, data []byte, offset int) int {
|
2011-05-30 01:00:31 +02:00
|
|
|
data = data[offset:]
|
|
|
|
|
|
|
|
end := 1
|
|
|
|
|
|
|
|
if end < len(data) && data[end] == '#' {
|
|
|
|
end++
|
|
|
|
}
|
|
|
|
|
|
|
|
for end < len(data) && isalnum(data[end]) {
|
|
|
|
end++
|
|
|
|
}
|
|
|
|
|
|
|
|
if end < len(data) && data[end] == ';' {
|
|
|
|
end++ // real entity
|
|
|
|
} else {
|
|
|
|
return 0 // lone '&'
|
|
|
|
}
|
|
|
|
|
2016-03-30 13:38:19 +02:00
|
|
|
ent := data[:end]
|
|
|
|
// undo & escaping or it will be converted to &amp; by another
|
|
|
|
// escaper in the renderer
|
|
|
|
if bytes.Equal(ent, []byte("&")) {
|
|
|
|
ent = []byte{'&'}
|
|
|
|
}
|
|
|
|
p.currBlock.appendChild(text(ent))
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
return end
|
|
|
|
}
|
|
|
|
|
2014-01-26 22:40:26 +01:00
|
|
|
func linkEndsWithEntity(data []byte, linkEnd int) bool {
|
|
|
|
entityRanges := htmlEntity.FindAllIndex(data[:linkEnd], -1)
|
2015-10-28 20:21:51 +01:00
|
|
|
return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd
|
2014-01-26 22:40:26 +01:00
|
|
|
}
|
|
|
|
|
2015-11-01 20:35:43 +01:00
|
|
|
func maybeAutoLink(p *parser, data []byte, offset int) int {
|
2015-10-27 20:56:16 +01:00
|
|
|
// quick check to rule out most false hits
|
|
|
|
if p.insideLink || len(data) < offset+6 { // 6 is the len() of the shortest prefix below
|
2011-05-31 19:49:49 +02:00
|
|
|
return 0
|
|
|
|
}
|
2015-10-27 20:56:16 +01:00
|
|
|
prefixes := []string{
|
|
|
|
"http://",
|
|
|
|
"https://",
|
|
|
|
"ftp://",
|
|
|
|
"file://",
|
|
|
|
"mailto:",
|
|
|
|
}
|
|
|
|
for _, prefix := range prefixes {
|
|
|
|
endOfHead := offset + 8 // 8 is the len() of the longest prefix
|
|
|
|
if endOfHead > len(data) {
|
|
|
|
endOfHead = len(data)
|
|
|
|
}
|
|
|
|
head := bytes.ToLower(data[offset:endOfHead])
|
|
|
|
if bytes.HasPrefix(head, []byte(prefix)) {
|
2015-11-01 20:57:30 +01:00
|
|
|
return autoLink(p, data, offset)
|
2015-10-27 20:56:16 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
2011-05-31 19:49:49 +02:00
|
|
|
|
2015-11-01 20:35:43 +01:00
|
|
|
func autoLink(p *parser, data []byte, offset int) int {
|
2014-01-25 20:42:34 +01:00
|
|
|
// Now a more expensive check to see if we're not inside an anchor element
|
|
|
|
anchorStart := offset
|
|
|
|
offsetFromAnchor := 0
|
|
|
|
for anchorStart > 0 && data[anchorStart] != '<' {
|
|
|
|
anchorStart--
|
|
|
|
offsetFromAnchor++
|
|
|
|
}
|
|
|
|
|
|
|
|
anchorStr := anchorRe.Find(data[anchorStart:])
|
|
|
|
if anchorStr != nil {
|
2016-03-30 13:38:19 +02:00
|
|
|
anchorClose := NewNode(HtmlSpan)
|
|
|
|
anchorClose.Literal = anchorStr[offsetFromAnchor:]
|
|
|
|
p.currBlock.appendChild(anchorClose)
|
2014-01-25 20:42:34 +01:00
|
|
|
return len(anchorStr) - offsetFromAnchor
|
|
|
|
}
|
|
|
|
|
2011-05-31 19:49:49 +02:00
|
|
|
// scan backward for a word boundary
|
|
|
|
rewind := 0
|
2013-08-09 11:24:26 +02:00
|
|
|
for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) {
|
2011-05-31 19:49:49 +02:00
|
|
|
rewind++
|
|
|
|
}
|
|
|
|
if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters
|
|
|
|
return 0
|
|
|
|
}
|
2011-05-30 01:00:31 +02:00
|
|
|
|
2011-06-29 00:02:12 +02:00
|
|
|
origData := data
|
2011-05-30 23:36:31 +02:00
|
|
|
data = data[offset-rewind:]
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
if !isSafeLink(data) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-06-29 00:02:12 +02:00
|
|
|
linkEnd := 0
|
2014-01-25 20:59:38 +01:00
|
|
|
for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) {
|
2011-06-29 00:02:12 +02:00
|
|
|
linkEnd++
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Skip punctuation at the end of the link
|
2014-01-26 22:40:26 +01:00
|
|
|
if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' {
|
|
|
|
linkEnd--
|
|
|
|
}
|
|
|
|
|
|
|
|
// But don't skip semicolon if it's a part of escaped entity:
|
|
|
|
if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) {
|
2011-06-29 00:02:12 +02:00
|
|
|
linkEnd--
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// See if the link finishes with a punctuation sign that can be closed.
|
|
|
|
var copen byte
|
2011-06-29 00:02:12 +02:00
|
|
|
switch data[linkEnd-1] {
|
2011-05-30 01:00:31 +02:00
|
|
|
case '"':
|
|
|
|
copen = '"'
|
|
|
|
case '\'':
|
|
|
|
copen = '\''
|
|
|
|
case ')':
|
|
|
|
copen = '('
|
|
|
|
case ']':
|
|
|
|
copen = '['
|
|
|
|
case '}':
|
|
|
|
copen = '{'
|
|
|
|
default:
|
|
|
|
copen = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
if copen != 0 {
|
2011-06-29 00:02:12 +02:00
|
|
|
bufEnd := offset - rewind + linkEnd - 2
|
2011-05-30 01:00:31 +02:00
|
|
|
|
2011-06-29 00:02:12 +02:00
|
|
|
openDelim := 1
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
/* Try to close the final punctuation sign in this same line;
|
|
|
|
* if we managed to close it outside of the URL, that means that it's
|
|
|
|
* not part of the URL. If it closes inside the URL, that means it
|
|
|
|
* is part of the URL.
|
|
|
|
*
|
|
|
|
* Examples:
|
|
|
|
*
|
|
|
|
* foo http://www.pokemon.com/Pikachu_(Electric) bar
|
|
|
|
* => http://www.pokemon.com/Pikachu_(Electric)
|
|
|
|
*
|
|
|
|
* foo (http://www.pokemon.com/Pikachu_(Electric)) bar
|
|
|
|
* => http://www.pokemon.com/Pikachu_(Electric)
|
|
|
|
*
|
|
|
|
* foo http://www.pokemon.com/Pikachu_(Electric)) bar
|
|
|
|
* => http://www.pokemon.com/Pikachu_(Electric))
|
|
|
|
*
|
|
|
|
* (foo http://www.pokemon.com/Pikachu_(Electric)) bar
|
|
|
|
* => foo http://www.pokemon.com/Pikachu_(Electric)
|
|
|
|
*/
|
|
|
|
|
2011-06-29 00:02:12 +02:00
|
|
|
for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 {
|
|
|
|
if origData[bufEnd] == data[linkEnd-1] {
|
|
|
|
openDelim++
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
2011-06-29 00:02:12 +02:00
|
|
|
if origData[bufEnd] == copen {
|
|
|
|
openDelim--
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
2011-06-29 00:02:12 +02:00
|
|
|
bufEnd--
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
2011-06-29 00:02:12 +02:00
|
|
|
if openDelim == 0 {
|
|
|
|
linkEnd--
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-29 19:13:17 +02:00
|
|
|
var uLink bytes.Buffer
|
|
|
|
unescapeText(&uLink, data[:linkEnd])
|
2011-05-30 01:00:31 +02:00
|
|
|
|
2011-06-29 23:38:35 +02:00
|
|
|
if uLink.Len() > 0 {
|
2016-03-30 13:38:19 +02:00
|
|
|
node := NewNode(Link)
|
|
|
|
node.Destination = uLink.Bytes()
|
|
|
|
p.currBlock.appendChild(node)
|
|
|
|
node.appendChild(text(uLink.Bytes()))
|
2011-06-29 23:38:35 +02:00
|
|
|
}
|
2011-05-30 01:00:31 +02:00
|
|
|
|
2015-10-27 20:56:16 +01:00
|
|
|
return linkEnd
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
2014-01-25 20:59:38 +01:00
|
|
|
func isEndOfLink(char byte) bool {
|
|
|
|
return isspace(char) || char == '<'
|
|
|
|
}
|
|
|
|
|
2015-02-25 14:27:13 +01:00
|
|
|
var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")}
|
|
|
|
var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")}
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
func isSafeLink(link []byte) bool {
|
2015-02-25 14:27:13 +01:00
|
|
|
for _, path := range validPaths {
|
|
|
|
if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) {
|
|
|
|
if len(link) == len(path) {
|
|
|
|
return true
|
|
|
|
} else if isalnum(link[len(path)]) {
|
2015-02-20 10:06:55 +01:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
2015-02-25 14:27:13 +01:00
|
|
|
}
|
|
|
|
|
2011-05-30 01:00:31 +02:00
|
|
|
for _, prefix := range validUris {
|
|
|
|
// TODO: handle unicode here
|
|
|
|
// case-insensitive prefix test
|
2011-05-30 22:42:38 +02:00
|
|
|
if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) {
|
2011-05-30 01:00:31 +02:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// return the length of the given tag, or 0 is it's not valid
|
2015-10-26 17:16:57 +01:00
|
|
|
func tagLength(data []byte, autolink *LinkType) int {
|
2011-05-30 01:00:31 +02:00
|
|
|
var i, j int
|
|
|
|
|
|
|
|
// a valid tag can't be shorter than 3 chars
|
|
|
|
if len(data) < 3 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// begins with a '<' optionally followed by '/', followed by letter or number
|
|
|
|
if data[0] != '<' {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
if data[1] == '/' {
|
|
|
|
i = 2
|
|
|
|
} else {
|
|
|
|
i = 1
|
|
|
|
}
|
|
|
|
|
|
|
|
if !isalnum(data[i]) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// scheme test
|
2015-10-26 17:16:57 +01:00
|
|
|
*autolink = LinkTypeNotAutolink
|
2011-05-30 01:00:31 +02:00
|
|
|
|
2011-06-25 00:39:50 +02:00
|
|
|
// try to find the beginning of an URI
|
2011-05-30 01:00:31 +02:00
|
|
|
for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
2011-07-03 18:30:28 +02:00
|
|
|
if i > 1 && i < len(data) && data[i] == '@' {
|
2011-05-31 05:44:52 +02:00
|
|
|
if j = isMailtoAutoLink(data[i:]); j != 0 {
|
2015-10-26 17:16:57 +01:00
|
|
|
*autolink = LinkTypeEmail
|
2011-05-30 01:00:31 +02:00
|
|
|
return i + j
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-03 18:30:28 +02:00
|
|
|
if i > 2 && i < len(data) && data[i] == ':' {
|
2015-10-26 17:16:57 +01:00
|
|
|
*autolink = LinkTypeNormal
|
2011-05-30 01:00:31 +02:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
// complete autolink test: no whitespace or ' or "
|
|
|
|
switch {
|
|
|
|
case i >= len(data):
|
2015-10-26 17:16:57 +01:00
|
|
|
*autolink = LinkTypeNotAutolink
|
2011-05-30 01:00:31 +02:00
|
|
|
case *autolink != 0:
|
|
|
|
j = i
|
|
|
|
|
|
|
|
for i < len(data) {
|
|
|
|
if data[i] == '\\' {
|
|
|
|
i += 2
|
2011-07-05 22:22:21 +02:00
|
|
|
} else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) {
|
|
|
|
break
|
2011-05-30 01:00:31 +02:00
|
|
|
} else {
|
2011-07-05 22:22:21 +02:00
|
|
|
i++
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
if i > j && data[i] == '>' {
|
|
|
|
return i + 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// one of the forbidden chars has been found
|
2015-10-26 17:16:57 +01:00
|
|
|
*autolink = LinkTypeNotAutolink
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// look for something looking like a tag end
|
|
|
|
for i < len(data) && data[i] != '>' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return i + 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// look for the address part of a mail autolink and '>'
|
|
|
|
// this is less strict than the original markdown e-mail address matching
|
2011-05-31 05:44:52 +02:00
|
|
|
func isMailtoAutoLink(data []byte) int {
|
2011-05-30 01:00:31 +02:00
|
|
|
nb := 0
|
|
|
|
|
|
|
|
// address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@'
|
|
|
|
for i := 0; i < len(data); i++ {
|
|
|
|
if isalnum(data[i]) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
switch data[i] {
|
|
|
|
case '@':
|
|
|
|
nb++
|
|
|
|
|
|
|
|
case '-', '.', '_':
|
|
|
|
break
|
|
|
|
|
|
|
|
case '>':
|
|
|
|
if nb == 1 {
|
|
|
|
return i + 1
|
|
|
|
} else {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// look for the next emph char, skipping other constructs
|
2011-07-05 22:22:21 +02:00
|
|
|
func helperFindEmphChar(data []byte, c byte) int {
|
2015-10-12 20:06:27 +02:00
|
|
|
i := 0
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
for i < len(data) {
|
|
|
|
for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
// do not count escaped chars
|
|
|
|
if i != 0 && data[i-1] == '\\' {
|
|
|
|
i++
|
|
|
|
continue
|
|
|
|
}
|
2015-10-13 18:57:07 +02:00
|
|
|
if data[i] == c {
|
|
|
|
return i
|
|
|
|
}
|
2011-05-30 01:00:31 +02:00
|
|
|
|
|
|
|
if data[i] == '`' {
|
|
|
|
// skip a code span
|
2011-06-29 00:02:12 +02:00
|
|
|
tmpI := 0
|
2011-05-30 01:00:31 +02:00
|
|
|
i++
|
|
|
|
for i < len(data) && data[i] != '`' {
|
2011-06-29 00:02:12 +02:00
|
|
|
if tmpI == 0 && data[i] == c {
|
|
|
|
tmpI = i
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i >= len(data) {
|
2011-06-29 00:02:12 +02:00
|
|
|
return tmpI
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
i++
|
2011-07-05 22:22:21 +02:00
|
|
|
} else if data[i] == '[' {
|
|
|
|
// skip a link
|
|
|
|
tmpI := 0
|
|
|
|
i++
|
|
|
|
for i < len(data) && data[i] != ']' {
|
|
|
|
if tmpI == 0 && data[i] == c {
|
|
|
|
tmpI = i
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
i++
|
2011-07-05 22:22:21 +02:00
|
|
|
}
|
|
|
|
i++
|
|
|
|
for i < len(data) && (data[i] == ' ' || data[i] == '\n') {
|
2011-05-30 01:00:31 +02:00
|
|
|
i++
|
2011-07-05 22:22:21 +02:00
|
|
|
}
|
|
|
|
if i >= len(data) {
|
|
|
|
return tmpI
|
|
|
|
}
|
|
|
|
if data[i] != '[' && data[i] != '(' { // not a link
|
|
|
|
if tmpI > 0 {
|
2011-06-29 00:02:12 +02:00
|
|
|
return tmpI
|
2011-07-05 22:22:21 +02:00
|
|
|
} else {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cc := data[i]
|
|
|
|
i++
|
|
|
|
for i < len(data) && data[i] != cc {
|
|
|
|
if tmpI == 0 && data[i] == c {
|
2014-12-29 00:32:26 +01:00
|
|
|
return i
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
2011-07-05 22:22:21 +02:00
|
|
|
if i >= len(data) {
|
|
|
|
return tmpI
|
|
|
|
}
|
|
|
|
i++
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2015-11-01 20:35:43 +01:00
|
|
|
func helperEmphasis(p *parser, data []byte, c byte) int {
|
2011-05-30 01:00:31 +02:00
|
|
|
i := 0
|
|
|
|
|
|
|
|
// skip one symbol if coming from emph3
|
|
|
|
if len(data) > 1 && data[0] == c && data[1] == c {
|
|
|
|
i = 1
|
|
|
|
}
|
|
|
|
|
|
|
|
for i < len(data) {
|
2011-07-05 22:22:21 +02:00
|
|
|
length := helperFindEmphChar(data[i:], c)
|
2011-05-30 01:00:31 +02:00
|
|
|
if length == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
i += length
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
if i+1 < len(data) && data[i+1] == c {
|
|
|
|
i++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if data[i] == c && !isspace(data[i-1]) {
|
|
|
|
|
2015-10-26 17:16:57 +01:00
|
|
|
if p.flags&NoIntraEmphasis != 0 {
|
2011-05-30 01:00:31 +02:00
|
|
|
if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-30 13:38:19 +02:00
|
|
|
emph := NewNode(Emph)
|
|
|
|
p.currBlock.appendChild(emph)
|
|
|
|
tmp := p.currBlock
|
|
|
|
p.currBlock = emph
|
|
|
|
p.inline(data[:i])
|
|
|
|
p.currBlock = tmp
|
2011-06-29 21:00:54 +02:00
|
|
|
return i + 1
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2015-11-01 20:35:43 +01:00
|
|
|
func helperDoubleEmphasis(p *parser, data []byte, c byte) int {
|
2011-05-30 01:00:31 +02:00
|
|
|
i := 0
|
|
|
|
|
|
|
|
for i < len(data) {
|
2011-07-05 22:22:21 +02:00
|
|
|
length := helperFindEmphChar(data[i:], c)
|
2011-05-30 01:00:31 +02:00
|
|
|
if length == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
i += length
|
|
|
|
|
|
|
|
if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) {
|
2016-03-30 13:38:19 +02:00
|
|
|
nodeType := Strong
|
|
|
|
if c == '~' {
|
|
|
|
nodeType = Del
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
2016-03-30 13:38:19 +02:00
|
|
|
node := NewNode(nodeType)
|
|
|
|
p.currBlock.appendChild(node)
|
|
|
|
tmp := p.currBlock
|
|
|
|
p.currBlock = node
|
|
|
|
p.inline(data[:i])
|
|
|
|
p.currBlock = tmp
|
2011-06-29 21:00:54 +02:00
|
|
|
return i + 2
|
2011-05-30 01:00:31 +02:00
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2015-11-01 20:35:43 +01:00
|
|
|
func helperTripleEmphasis(p *parser, data []byte, offset int, c byte) int {
|
2011-05-30 01:00:31 +02:00
|
|
|
i := 0
|
2011-06-29 00:02:12 +02:00
|
|
|
origData := data
|
2011-05-30 01:00:31 +02:00
|
|
|
data = data[offset:]
|
|
|
|
|
|
|
|
for i < len(data) {
|
2011-07-05 22:22:21 +02:00
|
|
|
length := helperFindEmphChar(data[i:], c)
|
2011-05-30 01:00:31 +02:00
|
|
|
if length == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
i += length
|
|
|
|
|
|
|
|
// skip whitespace preceded symbols
|
|
|
|
if data[i] != c || isspace(data[i-1]) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
2011-06-29 19:13:17 +02:00
|
|
|
case i+2 < len(data) && data[i+1] == c && data[i+2] == c:
|
2011-05-30 01:00:31 +02:00
|
|
|
// triple symbol found
|
2016-03-30 13:38:19 +02:00
|
|
|
strong := NewNode(Strong)
|
|
|
|
em := NewNode(Emph)
|
|
|
|
strong.appendChild(em)
|
|
|
|
p.currBlock.appendChild(strong)
|
|
|
|
tmp := p.currBlock
|
|
|
|
p.currBlock = em
|
|
|
|
p.inline(data[:i])
|
|
|
|
p.currBlock = tmp
|
2011-06-29 21:00:54 +02:00
|
|
|
return i + 3
|
2011-05-30 01:00:31 +02:00
|
|
|
case (i+1 < len(data) && data[i+1] == c):
|
|
|
|
// double symbol found, hand over to emph1
|
2015-11-01 20:57:30 +01:00
|
|
|
length = helperEmphasis(p, origData[offset-2:], c)
|
2011-05-30 01:00:31 +02:00
|
|
|
if length == 0 {
|
|
|
|
return 0
|
|
|
|
} else {
|
|
|
|
return length - 2
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
// single symbol found, hand over to emph2
|
2015-11-01 20:57:30 +01:00
|
|
|
length = helperDoubleEmphasis(p, origData[offset-1:], c)
|
2011-05-30 01:00:31 +02:00
|
|
|
if length == 0 {
|
|
|
|
return 0
|
|
|
|
} else {
|
|
|
|
return length - 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
2016-03-30 13:38:19 +02:00
|
|
|
|
|
|
|
func text(s []byte) *Node {
|
|
|
|
node := NewNode(Text)
|
|
|
|
node.Literal = s
|
|
|
|
return node
|
|
|
|
}
|
|
|
|
|
|
|
|
func normalizeURI(s []byte) []byte {
|
|
|
|
return s // TODO: implement
|
|
|
|
}
|