2011-05-28 00:12:21 +02:00
|
|
|
//
|
|
|
|
// Black Friday Markdown Processor
|
2011-05-29 01:37:18 +02:00
|
|
|
// Originally based on http://github.com/tanoku/upskirt
|
2011-05-28 00:12:21 +02:00
|
|
|
// by Russ Ross <russ@russross.com>
|
|
|
|
//
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
//
|
|
|
|
//
|
|
|
|
// Markdown parsing and processing
|
|
|
|
//
|
|
|
|
//
|
|
|
|
|
|
|
|
package blackfriday
|
2011-05-25 00:14:35 +02:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2011-05-26 04:46:16 +02:00
|
|
|
"sort"
|
2011-05-25 21:59:30 +02:00
|
|
|
"unicode"
|
2011-05-25 00:14:35 +02:00
|
|
|
)
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// These are the supported markdown parsing extensions.
|
|
|
|
// OR these values together to select multiple extensions.
|
2011-05-25 00:14:35 +02:00
|
|
|
const (
|
2011-05-29 05:17:53 +02:00
|
|
|
EXTENSION_NO_INTRA_EMPHASIS = 1 << iota
|
|
|
|
EXTENSION_TABLES
|
|
|
|
EXTENSION_FENCED_CODE
|
|
|
|
EXTENSION_AUTOLINK
|
|
|
|
EXTENSION_STRIKETHROUGH
|
|
|
|
EXTENSION_LAX_HTML_BLOCKS
|
|
|
|
EXTENSION_SPACE_HEADERS
|
2011-05-25 00:14:35 +02:00
|
|
|
)
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// These are the possible flag values for the link renderer.
|
|
|
|
// Only a single one of these values will be used; they are not ORed together.
|
|
|
|
// These are mostly of interest if you are writing a new output format.
|
2011-05-25 00:14:35 +02:00
|
|
|
const (
|
2011-05-29 05:17:53 +02:00
|
|
|
LINK_TYPE_NOT_AUTOLINK = iota
|
|
|
|
LINK_TYPE_NORMAL
|
|
|
|
LINK_TYPE_EMAIL
|
2011-05-25 00:14:35 +02:00
|
|
|
)
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// These are the possible flag values for the listitem renderer.
|
|
|
|
// Multiple flag values may be ORed together.
|
|
|
|
// These are mostly of interest if you are writing a new output format.
|
2011-05-25 21:59:30 +02:00
|
|
|
const (
|
2011-05-29 05:17:53 +02:00
|
|
|
LIST_TYPE_ORDERED = 1 << iota
|
|
|
|
LIST_ITEM_CONTAINS_BLOCK
|
|
|
|
LIST_ITEM_END_OF_LIST
|
2011-05-25 21:59:30 +02:00
|
|
|
)
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// These are the possible flag values for the table cell renderer.
|
|
|
|
// Only a single one of these values will be used; they are not ORed together.
|
|
|
|
// These are mostly of interest if you are writing a new output format.
|
2011-05-25 21:59:30 +02:00
|
|
|
const (
|
2011-05-29 05:17:53 +02:00
|
|
|
TABLE_ALIGNMENT_LEFT = 1 << iota
|
|
|
|
TABLE_ALIGNMENT_RIGHT
|
|
|
|
TABLE_ALIGNMENT_CENTER = (TABLE_ALIGNMENT_LEFT | TABLE_ALIGNMENT_RIGHT)
|
2011-05-25 21:59:30 +02:00
|
|
|
)
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// The size of a tab stop.
|
|
|
|
const TAB_SIZE = 4
|
|
|
|
|
|
|
|
// These are the tags that are recognized as HTML block tags.
|
|
|
|
// Any of these can be included in markdown text without special escaping.
|
2011-05-28 17:49:21 +02:00
|
|
|
var block_tags = map[string]bool{
|
|
|
|
"p": true,
|
|
|
|
"dl": true,
|
|
|
|
"h1": true,
|
|
|
|
"h2": true,
|
|
|
|
"h3": true,
|
|
|
|
"h4": true,
|
|
|
|
"h5": true,
|
|
|
|
"h6": true,
|
|
|
|
"ol": true,
|
|
|
|
"ul": true,
|
|
|
|
"del": true,
|
|
|
|
"div": true,
|
|
|
|
"ins": true,
|
|
|
|
"pre": true,
|
|
|
|
"form": true,
|
|
|
|
"math": true,
|
|
|
|
"table": true,
|
|
|
|
"iframe": true,
|
|
|
|
"script": true,
|
|
|
|
"fieldset": true,
|
|
|
|
"noscript": true,
|
|
|
|
"blockquote": true,
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// This struct defines the rendering interface.
|
|
|
|
// A series of callback functions are registered to form a complete renderer.
|
|
|
|
// A single interface{} value field is provided, and that value is handed to
|
|
|
|
// each callback. Leaving a field blank suppresses rendering that type of output
|
|
|
|
// except where noted.
|
|
|
|
//
|
|
|
|
// This is mostly of interest if you are implementing a new rendering format.
|
|
|
|
// Most users will use the convenience functions to fill in this structure.
|
2011-05-28 21:00:47 +02:00
|
|
|
type Renderer struct {
|
2011-05-25 21:59:30 +02:00
|
|
|
// block-level callbacks---nil skips the block
|
|
|
|
blockcode func(ob *bytes.Buffer, text []byte, lang string, opaque interface{})
|
|
|
|
blockquote func(ob *bytes.Buffer, text []byte, opaque interface{})
|
|
|
|
blockhtml func(ob *bytes.Buffer, text []byte, opaque interface{})
|
|
|
|
header func(ob *bytes.Buffer, text []byte, level int, opaque interface{})
|
|
|
|
hrule func(ob *bytes.Buffer, opaque interface{})
|
|
|
|
list func(ob *bytes.Buffer, text []byte, flags int, opaque interface{})
|
|
|
|
listitem func(ob *bytes.Buffer, text []byte, flags int, opaque interface{})
|
|
|
|
paragraph func(ob *bytes.Buffer, text []byte, opaque interface{})
|
|
|
|
table func(ob *bytes.Buffer, header []byte, body []byte, opaque interface{})
|
|
|
|
table_row func(ob *bytes.Buffer, text []byte, opaque interface{})
|
|
|
|
table_cell func(ob *bytes.Buffer, text []byte, flags int, opaque interface{})
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// span-level callbacks---nil or return 0 prints the span verbatim
|
2011-05-26 17:47:41 +02:00
|
|
|
autolink func(ob *bytes.Buffer, link []byte, kind int, opaque interface{}) int
|
|
|
|
codespan func(ob *bytes.Buffer, text []byte, opaque interface{}) int
|
|
|
|
double_emphasis func(ob *bytes.Buffer, text []byte, opaque interface{}) int
|
|
|
|
emphasis func(ob *bytes.Buffer, text []byte, opaque interface{}) int
|
|
|
|
image func(ob *bytes.Buffer, link []byte, title []byte, alt []byte, opaque interface{}) int
|
|
|
|
linebreak func(ob *bytes.Buffer, opaque interface{}) int
|
|
|
|
link func(ob *bytes.Buffer, link []byte, title []byte, content []byte, opaque interface{}) int
|
|
|
|
raw_html_tag func(ob *bytes.Buffer, tag []byte, opaque interface{}) int
|
|
|
|
triple_emphasis func(ob *bytes.Buffer, text []byte, opaque interface{}) int
|
|
|
|
strikethrough func(ob *bytes.Buffer, text []byte, opaque interface{}) int
|
2011-05-26 04:46:16 +02:00
|
|
|
|
|
|
|
// low-level callbacks---nil copies input directly into the output
|
|
|
|
entity func(ob *bytes.Buffer, entity []byte, opaque interface{})
|
|
|
|
normal_text func(ob *bytes.Buffer, text []byte, opaque interface{})
|
|
|
|
|
|
|
|
// header and footer
|
|
|
|
doc_header func(ob *bytes.Buffer, opaque interface{})
|
|
|
|
doc_footer func(ob *bytes.Buffer, opaque interface{})
|
|
|
|
|
2011-05-25 21:59:30 +02:00
|
|
|
// user data---passed back to every callback
|
|
|
|
opaque interface{}
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
|
|
|
|
2011-05-28 21:00:47 +02:00
|
|
|
//
|
2011-05-26 04:46:16 +02:00
|
|
|
|
2011-05-25 00:14:35 +02:00
|
|
|
type render struct {
|
2011-05-28 21:00:47 +02:00
|
|
|
mk *Renderer
|
2011-05-26 04:46:16 +02:00
|
|
|
refs link_ref_array
|
|
|
|
active_char [256]int
|
|
|
|
ext_flags uint32
|
|
|
|
nesting int
|
|
|
|
max_nesting int
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
const (
|
|
|
|
MD_CHAR_NONE = iota
|
|
|
|
MD_CHAR_EMPHASIS
|
|
|
|
MD_CHAR_CODESPAN
|
|
|
|
MD_CHAR_LINEBREAK
|
|
|
|
MD_CHAR_LINK
|
|
|
|
MD_CHAR_LANGLE
|
|
|
|
MD_CHAR_ESCAPE
|
|
|
|
MD_CHAR_ENTITITY
|
|
|
|
MD_CHAR_AUTOLINK
|
|
|
|
)
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// Public interface
|
|
|
|
//
|
2011-05-26 17:47:41 +02:00
|
|
|
//
|
2011-05-25 00:14:35 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// Parse and render a block of markdown-encoded text.
|
|
|
|
// The renderer is used to format the output, and extensions dictates which
|
|
|
|
// non-standard extensions are enabled.
|
2011-05-29 06:37:12 +02:00
|
|
|
func Markdown(input []byte, renderer *Renderer, extensions uint32) []byte {
|
2011-05-29 05:17:53 +02:00
|
|
|
// no point in parsing if we can't render
|
|
|
|
if renderer == nil {
|
2011-05-29 06:37:12 +02:00
|
|
|
return nil
|
2011-05-26 04:46:16 +02:00
|
|
|
}
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// fill in the character-level parsers
|
|
|
|
markdown_char_ptrs[MD_CHAR_NONE] = nil
|
|
|
|
markdown_char_ptrs[MD_CHAR_EMPHASIS] = char_emphasis
|
|
|
|
markdown_char_ptrs[MD_CHAR_CODESPAN] = char_codespan
|
|
|
|
markdown_char_ptrs[MD_CHAR_LINEBREAK] = char_linebreak
|
|
|
|
markdown_char_ptrs[MD_CHAR_LINK] = char_link
|
|
|
|
markdown_char_ptrs[MD_CHAR_LANGLE] = char_langle_tag
|
|
|
|
markdown_char_ptrs[MD_CHAR_ESCAPE] = char_escape
|
|
|
|
markdown_char_ptrs[MD_CHAR_ENTITITY] = char_entity
|
|
|
|
markdown_char_ptrs[MD_CHAR_AUTOLINK] = char_autolink
|
2011-05-26 04:46:16 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// fill in the render structure
|
|
|
|
rndr := new(render)
|
|
|
|
rndr.mk = renderer
|
|
|
|
rndr.ext_flags = extensions
|
|
|
|
rndr.max_nesting = 16
|
2011-05-26 04:46:16 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
if rndr.mk.emphasis != nil || rndr.mk.double_emphasis != nil || rndr.mk.triple_emphasis != nil {
|
|
|
|
rndr.active_char['*'] = MD_CHAR_EMPHASIS
|
|
|
|
rndr.active_char['_'] = MD_CHAR_EMPHASIS
|
|
|
|
if extensions&EXTENSION_STRIKETHROUGH != 0 {
|
|
|
|
rndr.active_char['~'] = MD_CHAR_EMPHASIS
|
2011-05-26 04:46:16 +02:00
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
}
|
|
|
|
if rndr.mk.codespan != nil {
|
|
|
|
rndr.active_char['`'] = MD_CHAR_CODESPAN
|
|
|
|
}
|
|
|
|
if rndr.mk.linebreak != nil {
|
|
|
|
rndr.active_char['\n'] = MD_CHAR_LINEBREAK
|
|
|
|
}
|
|
|
|
if rndr.mk.image != nil || rndr.mk.link != nil {
|
|
|
|
rndr.active_char['['] = MD_CHAR_LINK
|
|
|
|
}
|
|
|
|
rndr.active_char['<'] = MD_CHAR_LANGLE
|
|
|
|
rndr.active_char['\\'] = MD_CHAR_ESCAPE
|
|
|
|
rndr.active_char['&'] = MD_CHAR_ENTITITY
|
2011-05-26 04:46:16 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
if extensions&EXTENSION_AUTOLINK != 0 {
|
|
|
|
rndr.active_char['h'] = MD_CHAR_AUTOLINK // http, https
|
|
|
|
rndr.active_char['H'] = MD_CHAR_AUTOLINK
|
2011-05-26 04:46:16 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
rndr.active_char['f'] = MD_CHAR_AUTOLINK // ftp
|
|
|
|
rndr.active_char['F'] = MD_CHAR_AUTOLINK
|
|
|
|
|
|
|
|
rndr.active_char['m'] = MD_CHAR_AUTOLINK // mailto
|
|
|
|
rndr.active_char['M'] = MD_CHAR_AUTOLINK
|
|
|
|
}
|
|
|
|
|
|
|
|
// first pass: look for references, copy everything else
|
|
|
|
text := bytes.NewBuffer(nil)
|
|
|
|
beg, end := 0, 0
|
|
|
|
for beg < len(input) { // iterate over lines
|
|
|
|
if end = is_ref(rndr, input[beg:]); end > 0 {
|
|
|
|
beg += end
|
|
|
|
} else { // skip to the next line
|
|
|
|
end = beg
|
|
|
|
for end < len(input) && input[end] != '\n' && input[end] != '\r' {
|
|
|
|
end++
|
|
|
|
}
|
|
|
|
|
|
|
|
// add the line body if present
|
|
|
|
if end > beg {
|
|
|
|
expand_tabs(text, input[beg:end])
|
|
|
|
}
|
|
|
|
|
|
|
|
for end < len(input) && (input[end] == '\n' || input[end] == '\r') {
|
|
|
|
// add one \n per newline
|
|
|
|
if input[end] == '\n' || (end+1 < len(input) && input[end+1] != '\n') {
|
|
|
|
text.WriteByte('\n')
|
|
|
|
}
|
|
|
|
end++
|
|
|
|
}
|
|
|
|
|
|
|
|
beg = end
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// sort the reference array
|
|
|
|
if len(rndr.refs) > 1 {
|
|
|
|
sort.Sort(rndr.refs)
|
|
|
|
}
|
|
|
|
|
|
|
|
// second pass: actual rendering
|
2011-05-29 06:50:33 +02:00
|
|
|
output := bytes.NewBuffer(nil)
|
2011-05-29 05:17:53 +02:00
|
|
|
if rndr.mk.doc_header != nil {
|
|
|
|
rndr.mk.doc_header(output, rndr.mk.opaque)
|
|
|
|
}
|
|
|
|
|
|
|
|
if text.Len() > 0 {
|
|
|
|
// add a final newline if not already present
|
|
|
|
finalchar := text.Bytes()[text.Len()-1]
|
|
|
|
if finalchar != '\n' && finalchar != '\r' {
|
|
|
|
text.WriteByte('\n')
|
|
|
|
}
|
|
|
|
parse_block(output, rndr, text.Bytes())
|
|
|
|
}
|
|
|
|
|
|
|
|
if rndr.mk.doc_footer != nil {
|
|
|
|
rndr.mk.doc_footer(output, rndr.mk.opaque)
|
|
|
|
}
|
|
|
|
|
|
|
|
if rndr.nesting != 0 {
|
|
|
|
panic("Nesting level did not end at zero")
|
|
|
|
}
|
2011-05-29 06:37:12 +02:00
|
|
|
|
2011-05-29 06:50:33 +02:00
|
|
|
return output.Bytes()
|
2011-05-29 05:17:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
// Inline parsing
|
|
|
|
// Functions to parse text within a block.
|
|
|
|
//
|
|
|
|
|
|
|
|
// closures to render active chars, each:
|
|
|
|
// returns the number of chars taken care of
|
|
|
|
// data is the complete block being rendered
|
|
|
|
// offset is the number of valid chars before the data
|
|
|
|
//
|
|
|
|
// Note: this is filled in in Markdown to prevent an initilization loop
|
|
|
|
var markdown_char_ptrs [9]func(ob *bytes.Buffer, rndr *render, data []byte, offset int) int
|
|
|
|
|
|
|
|
func parse_inline(ob *bytes.Buffer, rndr *render, data []byte) {
|
|
|
|
if rndr.nesting >= rndr.max_nesting {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
rndr.nesting++
|
|
|
|
|
|
|
|
i, end := 0, 0
|
|
|
|
for i < len(data) {
|
|
|
|
// copy inactive chars into the output
|
|
|
|
for end < len(data) && rndr.active_char[data[end]] == 0 {
|
|
|
|
end++
|
|
|
|
}
|
|
|
|
|
|
|
|
if rndr.mk.normal_text != nil {
|
|
|
|
rndr.mk.normal_text(ob, data[i:end], rndr.mk.opaque)
|
|
|
|
} else {
|
|
|
|
ob.Write(data[i:end])
|
|
|
|
}
|
|
|
|
|
|
|
|
if end >= len(data) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
i = end
|
|
|
|
|
|
|
|
// call the trigger
|
|
|
|
action := rndr.active_char[data[end]]
|
|
|
|
end = markdown_char_ptrs[action](ob, rndr, data, i)
|
|
|
|
|
|
|
|
if end == 0 { // no action from the callback
|
|
|
|
end = i + 1
|
|
|
|
} else {
|
|
|
|
i += end
|
|
|
|
end = i
|
|
|
|
}
|
2011-05-26 04:46:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
rndr.nesting--
|
|
|
|
}
|
|
|
|
|
2011-05-26 17:47:41 +02:00
|
|
|
// single and double emphasis parsing
|
2011-05-26 04:46:16 +02:00
|
|
|
func char_emphasis(ob *bytes.Buffer, rndr *render, data []byte, offset int) int {
|
2011-05-26 17:47:41 +02:00
|
|
|
data = data[offset:]
|
|
|
|
c := data[0]
|
|
|
|
ret := 0
|
|
|
|
|
|
|
|
if len(data) > 2 && data[1] != c {
|
|
|
|
// whitespace cannot follow an opening emphasis;
|
|
|
|
// strikethrough only takes two characters '~~'
|
2011-05-27 21:38:10 +02:00
|
|
|
if c == '~' || isspace(data[1]) {
|
2011-05-26 17:47:41 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
if ret = parse_emph1(ob, rndr, data[1:], c); ret == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret + 1
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(data) > 3 && data[1] == c && data[2] != c {
|
2011-05-27 21:38:10 +02:00
|
|
|
if isspace(data[2]) {
|
2011-05-26 17:47:41 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
if ret = parse_emph2(ob, rndr, data[2:], c); ret == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret + 2
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c {
|
2011-05-27 21:38:10 +02:00
|
|
|
if c == '~' || isspace(data[3]) {
|
2011-05-26 17:47:41 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
if ret = parse_emph3(ob, rndr, data, 3, c); ret == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret + 3
|
|
|
|
}
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func char_codespan(ob *bytes.Buffer, rndr *render, data []byte, offset int) int {
|
2011-05-26 17:47:41 +02:00
|
|
|
data = data[offset:]
|
2011-05-26 20:10:16 +02:00
|
|
|
|
|
|
|
nb := 0
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// count the number of backticks in the delimiter
|
2011-05-26 20:10:16 +02:00
|
|
|
for nb < len(data) && data[nb] == '`' {
|
|
|
|
nb++
|
|
|
|
}
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// find the next delimiter
|
2011-05-26 20:10:16 +02:00
|
|
|
i, end := 0, 0
|
|
|
|
for end = nb; end < len(data) && i < nb; end++ {
|
|
|
|
if data[end] == '`' {
|
|
|
|
i++
|
|
|
|
} else {
|
|
|
|
i = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if i < nb && end >= len(data) {
|
|
|
|
return 0 // no matching delimiter
|
|
|
|
}
|
|
|
|
|
|
|
|
// trim outside whitespace
|
|
|
|
f_begin := nb
|
|
|
|
for f_begin < end && (data[f_begin] == ' ' || data[f_begin] == '\t') {
|
|
|
|
f_begin++
|
|
|
|
}
|
|
|
|
|
|
|
|
f_end := end - nb
|
|
|
|
for f_end > nb && (data[f_end-1] == ' ' || data[f_end-1] == '\t') {
|
|
|
|
f_end--
|
|
|
|
}
|
|
|
|
|
|
|
|
// real code span
|
|
|
|
if rndr.mk.codespan == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
if f_begin < f_end {
|
2011-05-27 06:27:33 +02:00
|
|
|
if rndr.mk.codespan(ob, data[f_begin:f_end], rndr.mk.opaque) == 0 {
|
2011-05-26 20:10:16 +02:00
|
|
|
end = 0
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if rndr.mk.codespan(ob, nil, rndr.mk.opaque) == 0 {
|
|
|
|
end = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return end
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
}
|
|
|
|
|
2011-05-26 20:10:16 +02:00
|
|
|
// '\n' preceded by two spaces
|
2011-05-26 04:46:16 +02:00
|
|
|
func char_linebreak(ob *bytes.Buffer, rndr *render, data []byte, offset int) int {
|
2011-05-26 20:10:16 +02:00
|
|
|
if offset < 2 || data[offset-1] != ' ' || data[offset-2] != ' ' {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove trailing spaces from ob and render
|
|
|
|
ob_bytes := ob.Bytes()
|
|
|
|
end := len(ob_bytes)
|
|
|
|
for end > 0 && ob_bytes[end-1] == ' ' {
|
|
|
|
end--
|
|
|
|
}
|
|
|
|
ob.Truncate(end)
|
|
|
|
|
|
|
|
if rndr.mk.linebreak == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
if rndr.mk.linebreak(ob, rndr.mk.opaque) > 0 {
|
|
|
|
return 1
|
|
|
|
} else {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// '[': parse a link or an image
|
2011-05-26 04:46:16 +02:00
|
|
|
func char_link(ob *bytes.Buffer, rndr *render, data []byte, offset int) int {
|
2011-05-26 22:22:59 +02:00
|
|
|
is_img := offset > 0 && data[offset-1] == '!'
|
|
|
|
|
2011-05-26 17:47:41 +02:00
|
|
|
data = data[offset:]
|
2011-05-26 22:22:59 +02:00
|
|
|
|
|
|
|
i := 1
|
|
|
|
var title, link []byte
|
|
|
|
text_has_nl := false
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// check whether the correct renderer exists
|
2011-05-26 22:22:59 +02:00
|
|
|
if (is_img && rndr.mk.image == nil) || (!is_img && rndr.mk.link == nil) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// look for the matching closing bracket
|
2011-05-26 22:22:59 +02:00
|
|
|
for level := 1; level > 0 && i < len(data); i++ {
|
|
|
|
switch {
|
|
|
|
case data[i] == '\n':
|
|
|
|
text_has_nl = true
|
|
|
|
|
|
|
|
case data[i-1] == '\\':
|
|
|
|
continue
|
|
|
|
|
|
|
|
case data[i] == '[':
|
|
|
|
level++
|
|
|
|
|
|
|
|
case data[i] == ']':
|
|
|
|
level--
|
2011-05-27 21:38:10 +02:00
|
|
|
if level <= 0 {
|
|
|
|
i-- // compensate for extra i++ in for loop
|
|
|
|
}
|
2011-05-26 22:22:59 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
txt_e := i
|
|
|
|
i++
|
|
|
|
|
|
|
|
// skip any amount of whitespace or newline
|
2011-05-27 21:38:10 +02:00
|
|
|
// (this is much more lax than original markdown syntax)
|
|
|
|
for i < len(data) && isspace(data[i]) {
|
2011-05-26 22:22:59 +02:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
// inline style link
|
|
|
|
switch {
|
|
|
|
case i < len(data) && data[i] == '(':
|
2011-05-28 17:49:21 +02:00
|
|
|
// skip initial whitespace
|
2011-05-26 22:22:59 +02:00
|
|
|
i++
|
|
|
|
|
2011-05-27 21:38:10 +02:00
|
|
|
for i < len(data) && isspace(data[i]) {
|
2011-05-26 22:22:59 +02:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
link_b := i
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// look for link end: ' " )
|
2011-05-26 22:22:59 +02:00
|
|
|
for i < len(data) {
|
|
|
|
if data[i] == '\\' {
|
|
|
|
i += 2
|
|
|
|
} else {
|
|
|
|
if data[i] == ')' || data[i] == '\'' || data[i] == '"' {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
link_e := i
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// look for title end if present
|
2011-05-26 22:22:59 +02:00
|
|
|
title_b, title_e := 0, 0
|
|
|
|
if data[i] == '\'' || data[i] == '"' {
|
|
|
|
i++
|
|
|
|
title_b = i
|
|
|
|
|
|
|
|
for i < len(data) {
|
|
|
|
if data[i] == '\\' {
|
|
|
|
i += 2
|
|
|
|
} else {
|
|
|
|
if data[i] == ')' {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// skip whitespace after title
|
2011-05-26 22:22:59 +02:00
|
|
|
title_e = i - 1
|
2011-05-27 21:38:10 +02:00
|
|
|
for title_e > title_b && isspace(data[title_e]) {
|
2011-05-26 22:22:59 +02:00
|
|
|
title_e--
|
|
|
|
}
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// check for closing quote presence
|
2011-05-26 22:22:59 +02:00
|
|
|
if data[title_e] != '\'' && data[title_e] != '"' {
|
|
|
|
title_b, title_e = 0, 0
|
|
|
|
link_e = i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove whitespace at the end of the link
|
2011-05-27 21:38:10 +02:00
|
|
|
for link_e > link_b && isspace(data[link_e-1]) {
|
2011-05-26 22:22:59 +02:00
|
|
|
link_e--
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove optional angle brackets around the link
|
|
|
|
if data[link_b] == '<' {
|
|
|
|
link_b++
|
|
|
|
}
|
|
|
|
if data[link_e-1] == '>' {
|
|
|
|
link_e--
|
|
|
|
}
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// build escaped link and title
|
2011-05-26 22:22:59 +02:00
|
|
|
if link_e > link_b {
|
|
|
|
link = data[link_b:link_e]
|
|
|
|
}
|
|
|
|
|
|
|
|
if title_e > title_b {
|
|
|
|
title = data[title_b:title_e]
|
|
|
|
}
|
|
|
|
|
|
|
|
i++
|
|
|
|
|
|
|
|
// reference style link
|
|
|
|
case i < len(data) && data[i] == '[':
|
|
|
|
var id []byte
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// look for the id
|
2011-05-26 22:22:59 +02:00
|
|
|
i++
|
|
|
|
link_b := i
|
|
|
|
for i < len(data) && data[i] != ']' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
link_e := i
|
|
|
|
|
|
|
|
// find the link_ref
|
|
|
|
if link_b == link_e {
|
|
|
|
if text_has_nl {
|
|
|
|
b := bytes.NewBuffer(nil)
|
|
|
|
|
|
|
|
for j := 1; j < txt_e; j++ {
|
|
|
|
switch {
|
|
|
|
case data[j] != '\n':
|
|
|
|
b.WriteByte(data[j])
|
|
|
|
case data[j-1] != ' ':
|
|
|
|
b.WriteByte(' ')
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
id = b.Bytes()
|
|
|
|
} else {
|
|
|
|
id = data[1:txt_e]
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
id = data[link_b:link_e]
|
|
|
|
}
|
|
|
|
|
|
|
|
// find the link_ref with matching id
|
2011-05-28 00:12:21 +02:00
|
|
|
index := sortDotSearch(len(rndr.refs), func(i int) bool {
|
2011-05-27 21:38:10 +02:00
|
|
|
return !byteslice_less(rndr.refs[i].id, id)
|
|
|
|
})
|
2011-05-26 22:22:59 +02:00
|
|
|
if index >= len(rndr.refs) || !bytes.Equal(rndr.refs[index].id, id) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
lr := rndr.refs[index]
|
|
|
|
|
|
|
|
// keep link and title from link_ref
|
|
|
|
link = lr.link
|
|
|
|
title = lr.title
|
|
|
|
i++
|
|
|
|
|
|
|
|
// shortcut reference style link
|
|
|
|
default:
|
|
|
|
var id []byte
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// craft the id
|
2011-05-26 22:22:59 +02:00
|
|
|
if text_has_nl {
|
|
|
|
b := bytes.NewBuffer(nil)
|
|
|
|
|
|
|
|
for j := 1; j < txt_e; j++ {
|
|
|
|
switch {
|
|
|
|
case data[j] != '\n':
|
|
|
|
b.WriteByte(data[j])
|
|
|
|
case data[j-1] != ' ':
|
|
|
|
b.WriteByte(' ')
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
id = b.Bytes()
|
|
|
|
} else {
|
|
|
|
id = data[1:txt_e]
|
|
|
|
}
|
|
|
|
|
|
|
|
// find the link_ref with matching id
|
2011-05-28 00:12:21 +02:00
|
|
|
index := sortDotSearch(len(rndr.refs), func(i int) bool {
|
2011-05-27 21:38:10 +02:00
|
|
|
return !byteslice_less(rndr.refs[i].id, id)
|
|
|
|
})
|
2011-05-26 22:22:59 +02:00
|
|
|
if index >= len(rndr.refs) || !bytes.Equal(rndr.refs[index].id, id) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
lr := rndr.refs[index]
|
|
|
|
|
|
|
|
// keep link and title from link_ref
|
|
|
|
link = lr.link
|
|
|
|
title = lr.title
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// rewind the whitespace
|
2011-05-26 22:22:59 +02:00
|
|
|
i = txt_e + 1
|
|
|
|
}
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// build content: img alt is escaped, link content is parsed
|
2011-05-26 22:22:59 +02:00
|
|
|
content := bytes.NewBuffer(nil)
|
|
|
|
if txt_e > 1 {
|
|
|
|
if is_img {
|
|
|
|
content.Write(data[1:txt_e])
|
|
|
|
} else {
|
|
|
|
parse_inline(content, rndr, data[1:txt_e])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var u_link []byte
|
|
|
|
if len(link) > 0 {
|
|
|
|
u_link_buf := bytes.NewBuffer(nil)
|
2011-05-28 17:49:21 +02:00
|
|
|
unescape_text(u_link_buf, link)
|
2011-05-26 22:22:59 +02:00
|
|
|
u_link = u_link_buf.Bytes()
|
|
|
|
}
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// call the relevant rendering function
|
2011-05-26 22:22:59 +02:00
|
|
|
ret := 0
|
|
|
|
if is_img {
|
|
|
|
ob_size := ob.Len()
|
|
|
|
ob_bytes := ob.Bytes()
|
|
|
|
if ob_size > 0 && ob_bytes[ob_size-1] == '!' {
|
|
|
|
ob.Truncate(ob_size - 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = rndr.mk.image(ob, u_link, title, content.Bytes(), rndr.mk.opaque)
|
|
|
|
} else {
|
|
|
|
ret = rndr.mk.link(ob, u_link, title, content.Bytes(), rndr.mk.opaque)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ret > 0 {
|
|
|
|
return i
|
|
|
|
}
|
2011-05-26 04:46:16 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-05-26 20:10:16 +02:00
|
|
|
// '<' when tags or autolinks are allowed
|
2011-05-26 04:46:16 +02:00
|
|
|
func char_langle_tag(ob *bytes.Buffer, rndr *render, data []byte, offset int) int {
|
2011-05-26 17:47:41 +02:00
|
|
|
data = data[offset:]
|
2011-05-29 05:17:53 +02:00
|
|
|
altype := LINK_TYPE_NOT_AUTOLINK
|
2011-05-26 20:10:16 +02:00
|
|
|
end := tag_length(data, &altype)
|
|
|
|
ret := 0
|
|
|
|
|
|
|
|
if end > 2 {
|
|
|
|
switch {
|
2011-05-29 05:17:53 +02:00
|
|
|
case rndr.mk.autolink != nil && altype != LINK_TYPE_NOT_AUTOLINK:
|
2011-05-26 20:10:16 +02:00
|
|
|
u_link := bytes.NewBuffer(nil)
|
2011-05-28 17:49:21 +02:00
|
|
|
unescape_text(u_link, data[1:end+1-2])
|
2011-05-26 20:10:16 +02:00
|
|
|
ret = rndr.mk.autolink(ob, u_link.Bytes(), altype, rndr.mk.opaque)
|
|
|
|
case rndr.mk.raw_html_tag != nil:
|
|
|
|
ret = rndr.mk.raw_html_tag(ob, data[:end], rndr.mk.opaque)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ret == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return end
|
2011-05-26 04:46:16 +02:00
|
|
|
}
|
|
|
|
|
2011-05-26 20:10:16 +02:00
|
|
|
// '\\' backslash escape
|
|
|
|
var escape_chars = []byte("\\`*_{}[]()#+-.!:|&<>")
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
func char_escape(ob *bytes.Buffer, rndr *render, data []byte, offset int) int {
|
2011-05-26 17:47:41 +02:00
|
|
|
data = data[offset:]
|
2011-05-26 20:10:16 +02:00
|
|
|
|
|
|
|
if len(data) > 1 {
|
|
|
|
if bytes.IndexByte(escape_chars, data[1]) < 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
if rndr.mk.normal_text != nil {
|
|
|
|
rndr.mk.normal_text(ob, data[1:2], rndr.mk.opaque)
|
|
|
|
} else {
|
|
|
|
ob.WriteByte(data[1])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 2
|
2011-05-26 04:46:16 +02:00
|
|
|
}
|
|
|
|
|
2011-05-26 20:10:16 +02:00
|
|
|
// '&' escaped when it doesn't belong to an entity
|
|
|
|
// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+;
|
2011-05-26 04:46:16 +02:00
|
|
|
func char_entity(ob *bytes.Buffer, rndr *render, data []byte, offset int) int {
|
2011-05-26 17:47:41 +02:00
|
|
|
data = data[offset:]
|
2011-05-26 20:10:16 +02:00
|
|
|
|
|
|
|
end := 1
|
|
|
|
|
|
|
|
if end < len(data) && data[end] == '#' {
|
|
|
|
end++
|
|
|
|
}
|
|
|
|
|
2011-05-27 21:38:10 +02:00
|
|
|
for end < len(data) && isalnum(data[end]) {
|
2011-05-26 20:10:16 +02:00
|
|
|
end++
|
|
|
|
}
|
|
|
|
|
|
|
|
if end < len(data) && data[end] == ';' {
|
|
|
|
end++ // real entity
|
|
|
|
} else {
|
|
|
|
return 0 // lone '&'
|
|
|
|
}
|
|
|
|
|
|
|
|
if rndr.mk.entity != nil {
|
|
|
|
rndr.mk.entity(ob, data[:end], rndr.mk.opaque)
|
|
|
|
} else {
|
|
|
|
ob.Write(data[:end])
|
|
|
|
}
|
|
|
|
|
|
|
|
return end
|
2011-05-26 04:46:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func char_autolink(ob *bytes.Buffer, rndr *render, data []byte, offset int) int {
|
2011-05-26 22:22:59 +02:00
|
|
|
orig_data := data
|
2011-05-26 17:47:41 +02:00
|
|
|
data = data[offset:]
|
2011-05-26 22:22:59 +02:00
|
|
|
|
|
|
|
if offset > 0 {
|
2011-05-27 21:38:10 +02:00
|
|
|
if !isspace(orig_data[offset-1]) && !ispunct(orig_data[offset-1]) {
|
2011-05-26 22:22:59 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !is_safe_link(data) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
link_end := 0
|
2011-05-27 21:38:10 +02:00
|
|
|
for link_end < len(data) && !isspace(data[link_end]) {
|
2011-05-26 22:22:59 +02:00
|
|
|
link_end++
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip punctuation at the end of the link
|
|
|
|
if (data[link_end-1] == '.' || data[link_end-1] == ',' || data[link_end-1] == ';') && data[link_end-2] != '\\' {
|
|
|
|
link_end--
|
|
|
|
}
|
|
|
|
|
|
|
|
// See if the link finishes with a punctuation sign that can be closed.
|
|
|
|
var copen byte
|
|
|
|
switch data[link_end-1] {
|
|
|
|
case '"':
|
|
|
|
copen = '"'
|
|
|
|
case '\'':
|
|
|
|
copen = '\''
|
|
|
|
case ')':
|
|
|
|
copen = '('
|
|
|
|
case ']':
|
|
|
|
copen = '['
|
|
|
|
case '}':
|
|
|
|
copen = '{'
|
|
|
|
default:
|
|
|
|
copen = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
if copen != 0 {
|
|
|
|
buf_end := offset + link_end - 2
|
|
|
|
|
|
|
|
open_delim := 1
|
|
|
|
|
|
|
|
/* Try to close the final punctuation sign in this same line;
|
|
|
|
* if we managed to close it outside of the URL, that means that it's
|
|
|
|
* not part of the URL. If it closes inside the URL, that means it
|
|
|
|
* is part of the URL.
|
|
|
|
*
|
|
|
|
* Examples:
|
|
|
|
*
|
|
|
|
* foo http://www.pokemon.com/Pikachu_(Electric) bar
|
|
|
|
* => http://www.pokemon.com/Pikachu_(Electric)
|
|
|
|
*
|
|
|
|
* foo (http://www.pokemon.com/Pikachu_(Electric)) bar
|
|
|
|
* => http://www.pokemon.com/Pikachu_(Electric)
|
|
|
|
*
|
|
|
|
* foo http://www.pokemon.com/Pikachu_(Electric)) bar
|
|
|
|
* => http://www.pokemon.com/Pikachu_(Electric))
|
|
|
|
*
|
|
|
|
* (foo http://www.pokemon.com/Pikachu_(Electric)) bar
|
|
|
|
* => foo http://www.pokemon.com/Pikachu_(Electric)
|
|
|
|
*/
|
|
|
|
|
|
|
|
for buf_end >= 0 && orig_data[buf_end] != '\n' && open_delim != 0 {
|
|
|
|
if orig_data[buf_end] == data[link_end-1] {
|
|
|
|
open_delim++
|
|
|
|
}
|
|
|
|
|
|
|
|
if orig_data[buf_end] == copen {
|
|
|
|
open_delim--
|
|
|
|
}
|
|
|
|
|
|
|
|
buf_end--
|
|
|
|
}
|
|
|
|
|
|
|
|
if open_delim == 0 {
|
|
|
|
link_end--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if rndr.mk.autolink != nil {
|
|
|
|
u_link := bytes.NewBuffer(nil)
|
2011-05-28 17:49:21 +02:00
|
|
|
unescape_text(u_link, data[:link_end])
|
2011-05-26 22:22:59 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
rndr.mk.autolink(ob, u_link.Bytes(), LINK_TYPE_NORMAL, rndr.mk.opaque)
|
2011-05-26 22:22:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return link_end
|
|
|
|
}
|
|
|
|
|
|
|
|
var valid_uris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")}
|
|
|
|
|
|
|
|
func is_safe_link(link []byte) bool {
|
|
|
|
for _, prefix := range valid_uris {
|
2011-05-27 21:38:10 +02:00
|
|
|
if len(link) > len(prefix) && !byteslice_less(link[:len(prefix)], prefix) && !byteslice_less(prefix, link[:len(prefix)]) && isalnum(link[len(prefix)]) {
|
2011-05-26 22:22:59 +02:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
2011-05-26 17:47:41 +02:00
|
|
|
}
|
|
|
|
|
2011-05-26 20:10:16 +02:00
|
|
|
// return the length of the given tag, or 0 is it's not valid
|
|
|
|
func tag_length(data []byte, autolink *int) int {
|
|
|
|
var i, j int
|
|
|
|
|
|
|
|
// a valid tag can't be shorter than 3 chars
|
|
|
|
if len(data) < 3 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// begins with a '<' optionally followed by '/', followed by letter or number
|
|
|
|
if data[0] != '<' {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
if data[1] == '/' {
|
|
|
|
i = 2
|
|
|
|
} else {
|
|
|
|
i = 1
|
|
|
|
}
|
|
|
|
|
2011-05-27 21:38:10 +02:00
|
|
|
if !isalnum(data[i]) {
|
2011-05-26 20:10:16 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// scheme test
|
2011-05-29 05:17:53 +02:00
|
|
|
*autolink = LINK_TYPE_NOT_AUTOLINK
|
2011-05-26 20:10:16 +02:00
|
|
|
|
|
|
|
// try to find the beggining of an URI
|
2011-05-27 21:38:10 +02:00
|
|
|
for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') {
|
2011-05-26 20:10:16 +02:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
if i > 1 && data[i] == '@' {
|
|
|
|
if j = is_mail_autolink(data[i:]); j != 0 {
|
2011-05-29 05:17:53 +02:00
|
|
|
*autolink = LINK_TYPE_EMAIL
|
2011-05-26 20:10:16 +02:00
|
|
|
return i + j
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if i > 2 && data[i] == ':' {
|
2011-05-29 05:17:53 +02:00
|
|
|
*autolink = LINK_TYPE_NORMAL
|
2011-05-26 20:10:16 +02:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
// complete autolink test: no whitespace or ' or "
|
|
|
|
switch {
|
|
|
|
case i >= len(data):
|
2011-05-29 05:17:53 +02:00
|
|
|
*autolink = LINK_TYPE_NOT_AUTOLINK
|
2011-05-26 20:10:16 +02:00
|
|
|
case *autolink != 0:
|
|
|
|
j = i
|
|
|
|
|
|
|
|
for i < len(data) {
|
|
|
|
if data[i] == '\\' {
|
|
|
|
i += 2
|
|
|
|
} else {
|
2011-05-27 21:38:10 +02:00
|
|
|
if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) {
|
2011-05-26 20:10:16 +02:00
|
|
|
break
|
|
|
|
} else {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
if i > j && data[i] == '>' {
|
|
|
|
return i + 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// one of the forbidden chars has been found
|
2011-05-29 05:17:53 +02:00
|
|
|
*autolink = LINK_TYPE_NOT_AUTOLINK
|
2011-05-26 20:10:16 +02:00
|
|
|
}
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// look for something looking like a tag end
|
2011-05-26 20:10:16 +02:00
|
|
|
for i < len(data) && data[i] != '>' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return i + 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// look for the address part of a mail autolink and '>'
|
|
|
|
// this is less strict than the original markdown e-mail address matching
|
|
|
|
func is_mail_autolink(data []byte) int {
|
|
|
|
nb := 0
|
|
|
|
|
|
|
|
// address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@'
|
|
|
|
for i := 0; i < len(data); i++ {
|
2011-05-27 21:38:10 +02:00
|
|
|
if isalnum(data[i]) {
|
2011-05-26 20:10:16 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
switch data[i] {
|
|
|
|
case '@':
|
|
|
|
nb++
|
|
|
|
|
|
|
|
case '-', '.', '_':
|
|
|
|
break
|
|
|
|
|
|
|
|
case '>':
|
|
|
|
if nb == 1 {
|
|
|
|
return i + 1
|
|
|
|
} else {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-05-26 17:47:41 +02:00
|
|
|
// look for the next emph char, skipping other constructs
|
|
|
|
func find_emph_char(data []byte, c byte) int {
|
|
|
|
i := 1
|
|
|
|
|
|
|
|
for i < len(data) {
|
|
|
|
for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' {
|
|
|
|
i++
|
|
|
|
}
|
2011-05-27 06:27:33 +02:00
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
2011-05-26 17:47:41 +02:00
|
|
|
if data[i] == c {
|
|
|
|
return i
|
|
|
|
}
|
|
|
|
|
|
|
|
// do not count escaped chars
|
|
|
|
if i != 0 && data[i-1] == '\\' {
|
|
|
|
i++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if data[i] == '`' {
|
|
|
|
// skip a code span
|
|
|
|
tmp_i := 0
|
|
|
|
i++
|
|
|
|
for i < len(data) && data[i] != '`' {
|
|
|
|
if tmp_i == 0 && data[i] == c {
|
|
|
|
tmp_i = i
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i >= len(data) {
|
|
|
|
return tmp_i
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
} else {
|
|
|
|
if data[i] == '[' {
|
|
|
|
// skip a link
|
|
|
|
tmp_i := 0
|
|
|
|
i++
|
|
|
|
for i < len(data) && data[i] != ']' {
|
|
|
|
if tmp_i == 0 && data[i] == c {
|
|
|
|
tmp_i = i
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
for i < len(data) && (data[i] == ' ' || data[i] == '\t' || data[i] == '\n') {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i >= len(data) {
|
|
|
|
return tmp_i
|
|
|
|
}
|
|
|
|
if data[i] != '[' && data[i] != '(' { // not a link
|
|
|
|
if tmp_i > 0 {
|
|
|
|
return tmp_i
|
|
|
|
} else {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cc := data[i]
|
|
|
|
i++
|
|
|
|
for i < len(data) && data[i] != cc {
|
|
|
|
if tmp_i == 0 && data[i] == c {
|
|
|
|
tmp_i = i
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i >= len(data) {
|
|
|
|
return tmp_i
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-05-26 04:46:16 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-05-26 17:47:41 +02:00
|
|
|
func parse_emph1(ob *bytes.Buffer, rndr *render, data []byte, c byte) int {
|
|
|
|
i := 0
|
|
|
|
|
|
|
|
if rndr.mk.emphasis == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// skip one symbol if coming from emph3
|
|
|
|
if len(data) > 1 && data[0] == c && data[1] == c {
|
|
|
|
i = 1
|
|
|
|
}
|
|
|
|
|
|
|
|
for i < len(data) {
|
|
|
|
length := find_emph_char(data[i:], c)
|
|
|
|
if length == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
i += length
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
if i+1 < len(data) && data[i+1] == c {
|
|
|
|
i++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2011-05-27 21:38:10 +02:00
|
|
|
if data[i] == c && !isspace(data[i-1]) {
|
2011-05-26 17:47:41 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
if rndr.ext_flags&EXTENSION_NO_INTRA_EMPHASIS != 0 {
|
2011-05-27 21:38:10 +02:00
|
|
|
if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) {
|
2011-05-26 17:47:41 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
work := bytes.NewBuffer(nil)
|
|
|
|
parse_inline(work, rndr, data[:i])
|
|
|
|
r := rndr.mk.emphasis(ob, work.Bytes(), rndr.mk.opaque)
|
|
|
|
if r > 0 {
|
|
|
|
return i + 1
|
|
|
|
} else {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func parse_emph2(ob *bytes.Buffer, rndr *render, data []byte, c byte) int {
|
|
|
|
render_method := rndr.mk.double_emphasis
|
|
|
|
if c == '~' {
|
|
|
|
render_method = rndr.mk.strikethrough
|
|
|
|
}
|
|
|
|
|
|
|
|
if render_method == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
i := 0
|
|
|
|
|
|
|
|
for i < len(data) {
|
|
|
|
length := find_emph_char(data[i:], c)
|
|
|
|
if length == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
i += length
|
|
|
|
|
2011-05-27 21:38:10 +02:00
|
|
|
if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) {
|
2011-05-26 17:47:41 +02:00
|
|
|
work := bytes.NewBuffer(nil)
|
|
|
|
parse_inline(work, rndr, data[:i])
|
|
|
|
r := render_method(ob, work.Bytes(), rndr.mk.opaque)
|
|
|
|
if r > 0 {
|
|
|
|
return i + 2
|
|
|
|
} else {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func parse_emph3(ob *bytes.Buffer, rndr *render, data []byte, offset int, c byte) int {
|
|
|
|
i := 0
|
|
|
|
orig_data := data
|
|
|
|
data = data[offset:]
|
|
|
|
|
|
|
|
for i < len(data) {
|
|
|
|
length := find_emph_char(data[i:], c)
|
|
|
|
if length == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
i += length
|
|
|
|
|
|
|
|
// skip whitespace preceded symbols
|
2011-05-27 21:38:10 +02:00
|
|
|
if data[i] != c || isspace(data[i-1]) {
|
2011-05-26 17:47:41 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case (i+2 < len(data) && data[i+1] == c && data[i+2] == c && rndr.mk.triple_emphasis != nil):
|
|
|
|
// triple symbol found
|
|
|
|
work := bytes.NewBuffer(nil)
|
|
|
|
|
|
|
|
parse_inline(work, rndr, data[:i])
|
|
|
|
r := rndr.mk.triple_emphasis(ob, work.Bytes(), rndr.mk.opaque)
|
|
|
|
if r > 0 {
|
|
|
|
return i + 3
|
|
|
|
} else {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
case (i+1 < len(data) && data[i+1] == c):
|
2011-05-28 17:49:21 +02:00
|
|
|
// double symbol found, hand over to emph1
|
2011-05-26 17:47:41 +02:00
|
|
|
length = parse_emph1(ob, rndr, orig_data[offset-2:], c)
|
|
|
|
if length == 0 {
|
|
|
|
return 0
|
|
|
|
} else {
|
|
|
|
return length - 2
|
|
|
|
}
|
|
|
|
default:
|
2011-05-28 17:49:21 +02:00
|
|
|
// single symbol found, hand over to emph2
|
2011-05-26 17:47:41 +02:00
|
|
|
length = parse_emph2(ob, rndr, orig_data[offset-1:], c)
|
|
|
|
if length == 0 {
|
|
|
|
return 0
|
|
|
|
} else {
|
|
|
|
return length - 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
2011-05-26 04:46:16 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
|
|
|
|
//
|
|
|
|
// Block parsing
|
|
|
|
// Functions to parse block-level elements.
|
|
|
|
//
|
|
|
|
|
2011-05-25 21:59:30 +02:00
|
|
|
// parse block-level data
|
|
|
|
func parse_block(ob *bytes.Buffer, rndr *render, data []byte) {
|
2011-05-26 04:46:16 +02:00
|
|
|
if rndr.nesting >= rndr.max_nesting {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
rndr.nesting++
|
2011-05-25 21:59:30 +02:00
|
|
|
|
|
|
|
for len(data) > 0 {
|
|
|
|
if is_atxheader(rndr, data) {
|
|
|
|
data = data[parse_atxheader(ob, rndr, data):]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if data[0] == '<' && rndr.mk.blockhtml != nil {
|
|
|
|
if i := parse_htmlblock(ob, rndr, data, true); i > 0 {
|
|
|
|
data = data[i:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if i := is_empty(data); i > 0 {
|
|
|
|
data = data[i:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if is_hrule(data) {
|
|
|
|
if rndr.mk.hrule != nil {
|
|
|
|
rndr.mk.hrule(ob, rndr.mk.opaque)
|
|
|
|
}
|
|
|
|
var i int
|
|
|
|
for i = 0; i < len(data) && data[i] != '\n'; i++ {
|
|
|
|
}
|
|
|
|
data = data[i:]
|
|
|
|
continue
|
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
if rndr.ext_flags&EXTENSION_FENCED_CODE != 0 {
|
2011-05-25 21:59:30 +02:00
|
|
|
if i := parse_fencedcode(ob, rndr, data); i > 0 {
|
|
|
|
data = data[i:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
if rndr.ext_flags&EXTENSION_TABLES != 0 {
|
2011-05-25 21:59:30 +02:00
|
|
|
if i := parse_table(ob, rndr, data); i > 0 {
|
|
|
|
data = data[i:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if prefix_quote(data) > 0 {
|
|
|
|
data = data[parse_blockquote(ob, rndr, data):]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if prefix_code(data) > 0 {
|
|
|
|
data = data[parse_blockcode(ob, rndr, data):]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if prefix_uli(data) > 0 {
|
|
|
|
data = data[parse_list(ob, rndr, data, 0):]
|
|
|
|
continue
|
|
|
|
}
|
2011-05-26 00:00:01 +02:00
|
|
|
if prefix_oli(data) > 0 {
|
2011-05-29 05:17:53 +02:00
|
|
|
data = data[parse_list(ob, rndr, data, LIST_TYPE_ORDERED):]
|
2011-05-26 00:00:01 +02:00
|
|
|
continue
|
|
|
|
}
|
2011-05-25 21:59:30 +02:00
|
|
|
|
2011-05-25 23:41:25 +02:00
|
|
|
data = data[parse_paragraph(ob, rndr, data):]
|
2011-05-25 21:59:30 +02:00
|
|
|
}
|
2011-05-26 04:46:16 +02:00
|
|
|
|
|
|
|
rndr.nesting--
|
2011-05-25 21:59:30 +02:00
|
|
|
}
|
|
|
|
|
2011-05-25 00:14:35 +02:00
|
|
|
func is_atxheader(rndr *render, data []byte) bool {
|
|
|
|
if data[0] != '#' {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
if rndr.ext_flags&EXTENSION_SPACE_HEADERS != 0 {
|
2011-05-25 00:14:35 +02:00
|
|
|
level := 0
|
|
|
|
for level < len(data) && level < 6 && data[level] == '#' {
|
|
|
|
level++
|
|
|
|
}
|
|
|
|
if level < len(data) && data[level] != ' ' && data[level] != '\t' {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func parse_atxheader(ob *bytes.Buffer, rndr *render, data []byte) int {
|
|
|
|
level := 0
|
|
|
|
for level < len(data) && level < 6 && data[level] == '#' {
|
|
|
|
level++
|
|
|
|
}
|
|
|
|
i, end := 0, 0
|
|
|
|
for i = level; i < len(data) && (data[i] == ' ' || data[i] == '\t'); i++ {
|
|
|
|
}
|
|
|
|
for end = i; end < len(data) && data[end] != '\n'; end++ {
|
|
|
|
}
|
|
|
|
skip := end
|
|
|
|
for end > 0 && data[end-1] == '#' {
|
|
|
|
end--
|
|
|
|
}
|
|
|
|
for end > 0 && (data[end-1] == ' ' || data[end-1] == '\t') {
|
|
|
|
end--
|
|
|
|
}
|
|
|
|
if end > i {
|
2011-05-25 21:59:30 +02:00
|
|
|
work := bytes.NewBuffer(nil)
|
2011-05-25 00:14:35 +02:00
|
|
|
parse_inline(work, rndr, data[i:end])
|
2011-05-25 21:59:30 +02:00
|
|
|
if rndr.mk.header != nil {
|
|
|
|
rndr.mk.header(ob, work.Bytes(), level, rndr.mk.opaque)
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return skip
|
|
|
|
}
|
|
|
|
|
2011-05-25 23:41:25 +02:00
|
|
|
func is_headerline(data []byte) int {
|
2011-05-26 00:00:01 +02:00
|
|
|
i := 0
|
|
|
|
|
|
|
|
// test of level 1 header
|
|
|
|
if data[i] == '=' {
|
|
|
|
for i = 1; i < len(data) && data[i] == '='; i++ {
|
|
|
|
}
|
|
|
|
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i >= len(data) || data[i] == '\n' {
|
|
|
|
return 1
|
|
|
|
} else {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// test of level 2 header
|
|
|
|
if data[i] == '-' {
|
|
|
|
for i = 1; i < len(data) && data[i] == '-'; i++ {
|
|
|
|
}
|
|
|
|
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i >= len(data) || data[i] == '\n' {
|
|
|
|
return 2
|
|
|
|
} else {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0
|
2011-05-25 23:41:25 +02:00
|
|
|
}
|
|
|
|
|
2011-05-25 00:14:35 +02:00
|
|
|
func parse_htmlblock(ob *bytes.Buffer, rndr *render, data []byte, do_render bool) int {
|
|
|
|
var i, j int
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// identify the opening tag
|
2011-05-25 00:14:35 +02:00
|
|
|
if len(data) < 2 || data[0] != '<' {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
curtag, tagfound := find_block_tag(data[1:])
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// handle special cases
|
2011-05-25 00:14:35 +02:00
|
|
|
if !tagfound {
|
|
|
|
|
|
|
|
// HTML comment, laxist form
|
|
|
|
if len(data) > 5 && data[1] == '!' && data[2] == '-' && data[3] == '-' {
|
|
|
|
i = 5
|
|
|
|
|
|
|
|
for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
|
|
|
|
if i < len(data) {
|
|
|
|
j = is_empty(data[i:])
|
|
|
|
}
|
|
|
|
|
|
|
|
if j > 0 {
|
|
|
|
size := i + j
|
2011-05-25 21:59:30 +02:00
|
|
|
if do_render && rndr.mk.blockhtml != nil {
|
|
|
|
rndr.mk.blockhtml(ob, data[:size], rndr.mk.opaque)
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
|
|
|
return size
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// HR, which is the only self-closing block tag considered
|
2011-05-28 00:12:21 +02:00
|
|
|
if len(data) > 4 && (data[1] == 'h' || data[1] == 'H') && (data[2] == 'r' || data[2] == 'R') {
|
2011-05-25 00:14:35 +02:00
|
|
|
i = 3
|
|
|
|
for i < len(data) && data[i] != '>' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
if i+1 < len(data) {
|
|
|
|
i++
|
|
|
|
j = is_empty(data[i:])
|
|
|
|
if j > 0 {
|
|
|
|
size := i + j
|
2011-05-25 21:59:30 +02:00
|
|
|
if do_render && rndr.mk.blockhtml != nil {
|
|
|
|
rndr.mk.blockhtml(ob, data[:size], rndr.mk.opaque)
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
|
|
|
return size
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// no special case recognized
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// look for an unindented matching closing tag
|
2011-05-25 00:14:35 +02:00
|
|
|
// followed by a blank line
|
|
|
|
i = 1
|
|
|
|
found := false
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// if not found, try a second pass looking for indented match
|
2011-05-25 21:59:30 +02:00
|
|
|
// but not if tag is "ins" or "del" (following original Markdown.pl)
|
2011-05-25 00:14:35 +02:00
|
|
|
if curtag != "ins" && curtag != "del" {
|
|
|
|
i = 1
|
|
|
|
for i < len(data) {
|
|
|
|
i++
|
|
|
|
for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
if i+2+len(curtag) >= len(data) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
j = htmlblock_end(curtag, rndr, data[i-1:])
|
|
|
|
|
|
|
|
if j > 0 {
|
|
|
|
i += j - 1
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !found {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// the end of the block has been found
|
2011-05-25 21:59:30 +02:00
|
|
|
if do_render && rndr.mk.blockhtml != nil {
|
|
|
|
rndr.mk.blockhtml(ob, data[:i], rndr.mk.opaque)
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return i
|
|
|
|
}
|
|
|
|
|
2011-05-25 21:59:30 +02:00
|
|
|
func find_block_tag(data []byte) (string, bool) {
|
|
|
|
i := 0
|
|
|
|
for i < len(data) && ((data[i] >= '0' && data[i] <= '9') || (data[i] >= 'A' && data[i] <= 'Z') || (data[i] >= 'a' && data[i] <= 'z')) {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i >= len(data) {
|
|
|
|
return "", false
|
|
|
|
}
|
|
|
|
key := string(data[:i])
|
2011-05-28 17:49:21 +02:00
|
|
|
if block_tags[key] {
|
2011-05-25 21:59:30 +02:00
|
|
|
return key, true
|
|
|
|
}
|
|
|
|
return "", false
|
|
|
|
}
|
2011-05-25 00:14:35 +02:00
|
|
|
|
2011-05-25 21:59:30 +02:00
|
|
|
func htmlblock_end(tag string, rndr *render, data []byte) int {
|
2011-05-26 04:46:16 +02:00
|
|
|
// assume data[0] == '<' && data[1] == '/' already tested
|
2011-05-25 21:59:30 +02:00
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// check if tag is a match
|
2011-05-25 21:59:30 +02:00
|
|
|
if len(tag)+3 >= len(data) || bytes.Compare(data[2:2+len(tag)], []byte(tag)) != 0 || data[len(tag)+2] != '>' {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// check white lines
|
2011-05-25 21:59:30 +02:00
|
|
|
i := len(tag) + 3
|
|
|
|
w := 0
|
|
|
|
if i < len(data) {
|
|
|
|
if w = is_empty(data[i:]); w == 0 {
|
|
|
|
return 0 // non-blank after tag
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
2011-05-25 21:59:30 +02:00
|
|
|
}
|
|
|
|
i += w
|
|
|
|
w = 0
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
if rndr.ext_flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
|
2011-05-25 21:59:30 +02:00
|
|
|
if i < len(data) {
|
|
|
|
w = is_empty(data[i:])
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
2011-05-25 21:59:30 +02:00
|
|
|
} else {
|
|
|
|
if i < len(data) {
|
|
|
|
if w = is_empty(data[i:]); w == 0 {
|
|
|
|
return 0 // non-blank line after tag line
|
|
|
|
}
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
|
|
|
}
|
2011-05-25 21:59:30 +02:00
|
|
|
|
|
|
|
return i + w
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
|
|
|
|
2011-05-25 21:59:30 +02:00
|
|
|
func is_empty(data []byte) int {
|
|
|
|
var i int
|
|
|
|
for i = 0; i < len(data) && data[i] != '\n'; i++ {
|
|
|
|
if data[i] != ' ' && data[i] != '\t' {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return i + 1
|
|
|
|
}
|
2011-05-25 00:14:35 +02:00
|
|
|
|
2011-05-25 21:59:30 +02:00
|
|
|
func is_hrule(data []byte) bool {
|
2011-05-26 04:46:16 +02:00
|
|
|
// skip initial spaces
|
2011-05-25 21:59:30 +02:00
|
|
|
if len(data) < 3 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
i := 0
|
|
|
|
if data[0] == ' ' {
|
|
|
|
i++
|
|
|
|
if data[1] == ' ' {
|
|
|
|
i++
|
|
|
|
if data[2] == ' ' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// look at the hrule char
|
2011-05-25 21:59:30 +02:00
|
|
|
if i+2 >= len(data) || (data[i] != '*' && data[i] != '-' && data[i] != '_') {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
c := data[i]
|
2011-05-25 00:14:35 +02:00
|
|
|
|
2011-05-25 21:59:30 +02:00
|
|
|
// the whole line must be the char or whitespace
|
|
|
|
n := 0
|
|
|
|
for i < len(data) && data[i] != '\n' {
|
|
|
|
switch {
|
|
|
|
case data[i] == c:
|
|
|
|
n++
|
|
|
|
case data[i] != ' ' && data[i] != '\t':
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
2011-05-25 00:14:35 +02:00
|
|
|
|
2011-05-25 21:59:30 +02:00
|
|
|
return n >= 3
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
|
|
|
|
2011-05-25 21:59:30 +02:00
|
|
|
func is_codefence(data []byte, syntax **string) int {
|
|
|
|
i, n := 0, 0
|
2011-05-25 00:14:35 +02:00
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// skip initial spaces
|
2011-05-25 21:59:30 +02:00
|
|
|
if len(data) < 3 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
if data[0] == ' ' {
|
|
|
|
i++
|
|
|
|
if data[1] == ' ' {
|
|
|
|
i++
|
|
|
|
if data[2] == ' ' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// look at the hrule char
|
2011-05-25 21:59:30 +02:00
|
|
|
if i+2 >= len(data) || !(data[i] == '~' || data[i] == '`') {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
c := data[i]
|
|
|
|
|
|
|
|
// the whole line must be the char or whitespace
|
|
|
|
for i < len(data) && data[i] == c {
|
|
|
|
n++
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
if n < 3 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
if syntax != nil {
|
|
|
|
syn := 0
|
|
|
|
|
|
|
|
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
syntax_start := i
|
|
|
|
|
|
|
|
if i < len(data) && data[i] == '{' {
|
|
|
|
i++
|
|
|
|
syntax_start++
|
|
|
|
|
|
|
|
for i < len(data) && data[i] != '}' && data[i] != '\n' {
|
|
|
|
syn++
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
if i == len(data) || data[i] != '}' {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// string all whitespace at the beginning and the end
|
|
|
|
// of the {} block
|
2011-05-27 21:38:10 +02:00
|
|
|
for syn > 0 && isspace(data[syntax_start]) {
|
2011-05-25 21:59:30 +02:00
|
|
|
syntax_start++
|
|
|
|
syn--
|
|
|
|
}
|
|
|
|
|
2011-05-27 21:38:10 +02:00
|
|
|
for syn > 0 && isspace(data[syntax_start+syn-1]) {
|
2011-05-25 21:59:30 +02:00
|
|
|
syn--
|
|
|
|
}
|
|
|
|
|
|
|
|
i++
|
|
|
|
} else {
|
2011-05-27 21:38:10 +02:00
|
|
|
for i < len(data) && !isspace(data[i]) {
|
2011-05-25 21:59:30 +02:00
|
|
|
syn++
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
language := string(data[syntax_start : syntax_start+syn])
|
|
|
|
*syntax = &language
|
|
|
|
}
|
|
|
|
|
|
|
|
for i < len(data) && data[i] != '\n' {
|
2011-05-27 21:38:10 +02:00
|
|
|
if !isspace(data[i]) {
|
2011-05-25 21:59:30 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
return i + 1
|
|
|
|
}
|
|
|
|
|
|
|
|
func parse_fencedcode(ob *bytes.Buffer, rndr *render, data []byte) int {
|
|
|
|
var lang *string
|
|
|
|
beg := is_codefence(data, &lang)
|
|
|
|
if beg == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
work := bytes.NewBuffer(nil)
|
|
|
|
|
|
|
|
for beg < len(data) {
|
|
|
|
fence_end := is_codefence(data[beg:], nil)
|
|
|
|
if fence_end != 0 {
|
|
|
|
beg += fence_end
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
var end int
|
|
|
|
for end = beg + 1; end < len(data) && data[end-1] != '\n'; end++ {
|
|
|
|
}
|
|
|
|
|
|
|
|
if beg < end {
|
|
|
|
// verbatim copy to the working buffer, escaping entities
|
|
|
|
if is_empty(data[beg:]) > 0 {
|
|
|
|
work.WriteByte('\n')
|
|
|
|
} else {
|
|
|
|
work.Write(data[beg:end])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
beg = end
|
|
|
|
}
|
|
|
|
|
|
|
|
if work.Len() > 0 && work.Bytes()[work.Len()-1] != '\n' {
|
|
|
|
work.WriteByte('\n')
|
|
|
|
}
|
|
|
|
|
|
|
|
if rndr.mk.blockcode != nil {
|
|
|
|
syntax := ""
|
|
|
|
if lang != nil {
|
|
|
|
syntax = *lang
|
|
|
|
}
|
|
|
|
|
|
|
|
rndr.mk.blockcode(ob, work.Bytes(), syntax, rndr.mk.opaque)
|
|
|
|
}
|
|
|
|
|
|
|
|
return beg
|
|
|
|
}
|
|
|
|
|
|
|
|
func parse_table(ob *bytes.Buffer, rndr *render, data []byte) int {
|
|
|
|
header_work := bytes.NewBuffer(nil)
|
|
|
|
i, columns, col_data := parse_table_header(header_work, rndr, data)
|
|
|
|
if i > 0 {
|
|
|
|
body_work := bytes.NewBuffer(nil)
|
|
|
|
|
|
|
|
for i < len(data) {
|
|
|
|
pipes, row_start := 0, i
|
|
|
|
for ; i < len(data) && data[i] != '\n'; i++ {
|
|
|
|
if data[i] == '|' {
|
|
|
|
pipes++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if pipes == 0 || i == len(data) {
|
|
|
|
i = row_start
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
parse_table_row(body_work, rndr, data[row_start:i], columns, col_data)
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
if rndr.mk.table != nil {
|
|
|
|
rndr.mk.table(ob, header_work.Bytes(), body_work.Bytes(), rndr.mk.opaque)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return i
|
|
|
|
}
|
|
|
|
|
|
|
|
func parse_table_header(ob *bytes.Buffer, rndr *render, data []byte) (size int, columns int, column_data []int) {
|
|
|
|
i, pipes := 0, 0
|
|
|
|
column_data = []int{}
|
|
|
|
for i = 0; i < len(data) && data[i] != '\n'; i++ {
|
|
|
|
if data[i] == '|' {
|
|
|
|
pipes++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if i == len(data) || pipes == 0 {
|
|
|
|
return 0, 0, column_data
|
|
|
|
}
|
|
|
|
|
|
|
|
header_end := i
|
|
|
|
|
|
|
|
if data[0] == '|' {
|
|
|
|
pipes--
|
|
|
|
}
|
|
|
|
|
|
|
|
if i > 2 && data[i-1] == '|' {
|
|
|
|
pipes--
|
|
|
|
}
|
|
|
|
|
|
|
|
columns = pipes + 1
|
|
|
|
column_data = make([]int, columns)
|
|
|
|
|
|
|
|
// parse the header underline
|
|
|
|
i++
|
|
|
|
if i < len(data) && data[i] == '|' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
under_end := i
|
|
|
|
for under_end < len(data) && data[under_end] != '\n' {
|
|
|
|
under_end++
|
|
|
|
}
|
|
|
|
|
|
|
|
col := 0
|
|
|
|
for ; col < columns && i < under_end; col++ {
|
|
|
|
dashes := 0
|
|
|
|
|
|
|
|
for i < under_end && (data[i] == ' ' || data[i] == '\t') {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
if data[i] == ':' {
|
|
|
|
i++
|
2011-05-29 05:17:53 +02:00
|
|
|
column_data[col] |= TABLE_ALIGNMENT_LEFT
|
2011-05-25 21:59:30 +02:00
|
|
|
dashes++
|
|
|
|
}
|
|
|
|
|
|
|
|
for i < under_end && data[i] == '-' {
|
|
|
|
i++
|
|
|
|
dashes++
|
|
|
|
}
|
|
|
|
|
|
|
|
if i < under_end && data[i] == ':' {
|
|
|
|
i++
|
2011-05-29 05:17:53 +02:00
|
|
|
column_data[col] |= TABLE_ALIGNMENT_RIGHT
|
2011-05-25 21:59:30 +02:00
|
|
|
dashes++
|
|
|
|
}
|
|
|
|
|
|
|
|
for i < under_end && (data[i] == ' ' || data[i] == '\t') {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
if i < under_end && data[i] != '|' {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if dashes < 3 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
if col < columns {
|
|
|
|
return 0, 0, column_data
|
|
|
|
}
|
|
|
|
|
|
|
|
parse_table_row(ob, rndr, data[:header_end], columns, column_data)
|
|
|
|
size = under_end + 1
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func parse_table_row(ob *bytes.Buffer, rndr *render, data []byte, columns int, col_data []int) {
|
|
|
|
i, col := 0, 0
|
|
|
|
row_work := bytes.NewBuffer(nil)
|
|
|
|
|
|
|
|
if i < len(data) && data[i] == '|' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
for col = 0; col < columns && i < len(data); col++ {
|
2011-05-27 21:38:10 +02:00
|
|
|
for i < len(data) && isspace(data[i]) {
|
2011-05-25 21:59:30 +02:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
cell_start := i
|
|
|
|
|
|
|
|
for i < len(data) && data[i] != '|' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
cell_end := i - 1
|
|
|
|
|
2011-05-27 21:38:10 +02:00
|
|
|
for cell_end > cell_start && isspace(data[cell_end]) {
|
2011-05-25 21:59:30 +02:00
|
|
|
cell_end--
|
|
|
|
}
|
|
|
|
|
|
|
|
cell_work := bytes.NewBuffer(nil)
|
|
|
|
parse_inline(cell_work, rndr, data[cell_start:cell_end+1])
|
|
|
|
|
|
|
|
if rndr.mk.table_cell != nil {
|
|
|
|
cdata := 0
|
|
|
|
if col < len(col_data) {
|
|
|
|
cdata = col_data[col]
|
|
|
|
}
|
|
|
|
rndr.mk.table_cell(row_work, cell_work.Bytes(), cdata, rndr.mk.opaque)
|
|
|
|
}
|
|
|
|
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
for ; col < columns; col++ {
|
|
|
|
empty_cell := []byte{}
|
|
|
|
if rndr.mk.table_cell != nil {
|
|
|
|
cdata := 0
|
|
|
|
if col < len(col_data) {
|
|
|
|
cdata = col_data[col]
|
|
|
|
}
|
|
|
|
rndr.mk.table_cell(row_work, empty_cell, cdata, rndr.mk.opaque)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if rndr.mk.table_row != nil {
|
|
|
|
rndr.mk.table_row(ob, row_work.Bytes(), rndr.mk.opaque)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// returns blockquote prefix length
|
|
|
|
func prefix_quote(data []byte) int {
|
|
|
|
i := 0
|
|
|
|
for i < len(data) && i < 3 && data[i] == ' ' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i < len(data) && data[i] == '>' {
|
|
|
|
if i+1 < len(data) && (data[i+1] == ' ' || data[i+1] == '\t') {
|
|
|
|
return i + 2
|
|
|
|
}
|
|
|
|
return i + 1
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// parse a blockquote fragment
|
2011-05-25 21:59:30 +02:00
|
|
|
func parse_blockquote(ob *bytes.Buffer, rndr *render, data []byte) int {
|
|
|
|
out := bytes.NewBuffer(nil)
|
|
|
|
work := bytes.NewBuffer(nil)
|
|
|
|
beg, end := 0, 0
|
|
|
|
for beg < len(data) {
|
|
|
|
for end = beg + 1; end < len(data) && data[end-1] != '\n'; end++ {
|
|
|
|
}
|
|
|
|
|
|
|
|
if pre := prefix_quote(data[beg:]); pre > 0 {
|
2011-05-26 04:46:16 +02:00
|
|
|
beg += pre // skip prefix
|
2011-05-25 21:59:30 +02:00
|
|
|
} else {
|
|
|
|
// empty line followed by non-quote line
|
|
|
|
if is_empty(data[beg:]) > 0 && (end >= len(data) || (prefix_quote(data[end:]) == 0 && is_empty(data[end:]) == 0)) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if beg < end { // copy into the in-place working buffer
|
|
|
|
work.Write(data[beg:end])
|
|
|
|
}
|
|
|
|
beg = end
|
|
|
|
}
|
|
|
|
|
|
|
|
parse_block(out, rndr, work.Bytes())
|
|
|
|
if rndr.mk.blockquote != nil {
|
|
|
|
rndr.mk.blockquote(ob, out.Bytes(), rndr.mk.opaque)
|
|
|
|
}
|
|
|
|
return end
|
|
|
|
}
|
|
|
|
|
|
|
|
// returns prefix length for block code
|
|
|
|
func prefix_code(data []byte) int {
|
|
|
|
if len(data) > 0 && data[0] == '\t' {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
if len(data) > 3 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
|
|
|
|
return 4
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func parse_blockcode(ob *bytes.Buffer, rndr *render, data []byte) int {
|
|
|
|
work := bytes.NewBuffer(nil)
|
|
|
|
|
|
|
|
beg, end := 0, 0
|
|
|
|
for beg < len(data) {
|
|
|
|
for end = beg + 1; end < len(data) && data[end-1] != '\n'; end++ {
|
|
|
|
}
|
|
|
|
|
2011-05-28 00:12:21 +02:00
|
|
|
if pre := prefix_code(data[beg:end]); pre > 0 {
|
2011-05-25 21:59:30 +02:00
|
|
|
beg += pre
|
|
|
|
} else {
|
2011-05-28 00:12:21 +02:00
|
|
|
if is_empty(data[beg:end]) == 0 {
|
2011-05-25 21:59:30 +02:00
|
|
|
// non-empty non-prefixed line breaks the pre
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if beg < end {
|
|
|
|
// verbatim copy to the working buffer, escaping entities
|
2011-05-28 00:12:21 +02:00
|
|
|
if is_empty(data[beg:end]) > 0 {
|
2011-05-25 21:59:30 +02:00
|
|
|
work.WriteByte('\n')
|
|
|
|
} else {
|
2011-05-28 00:12:21 +02:00
|
|
|
work.Write(data[beg:end])
|
2011-05-25 21:59:30 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
beg = end
|
|
|
|
}
|
|
|
|
|
|
|
|
// trim all the \n off the end of work
|
|
|
|
workbytes := work.Bytes()
|
|
|
|
n := 0
|
|
|
|
for len(workbytes) > n && workbytes[len(workbytes)-n-1] == '\n' {
|
|
|
|
n++
|
|
|
|
}
|
|
|
|
if n > 0 {
|
|
|
|
work = bytes.NewBuffer(workbytes[:len(workbytes)-n])
|
|
|
|
}
|
|
|
|
|
|
|
|
work.WriteByte('\n')
|
|
|
|
|
|
|
|
if rndr.mk.blockcode != nil {
|
|
|
|
rndr.mk.blockcode(ob, work.Bytes(), "", rndr.mk.opaque)
|
|
|
|
}
|
|
|
|
|
|
|
|
return beg
|
|
|
|
}
|
|
|
|
|
|
|
|
// returns unordered list item prefix
|
|
|
|
func prefix_uli(data []byte) int {
|
|
|
|
i := 0
|
|
|
|
for i < len(data) && i < 3 && data[i] == ' ' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i+1 >= len(data) || (data[i] != '*' && data[i] != '+' && data[i] != '-') || (data[i+1] != ' ' && data[i+1] != '\t') {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return i + 2
|
|
|
|
}
|
|
|
|
|
|
|
|
// returns ordered list item prefix
|
|
|
|
func prefix_oli(data []byte) int {
|
|
|
|
i := 0
|
|
|
|
for i < len(data) && i < 3 && data[i] == ' ' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i >= len(data) || data[i] < '0' || data[i] > '9' {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
for i < len(data) && data[i] >= '0' && data[i] <= '9' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i+1 >= len(data) || data[i] != '.' || (data[i+1] != ' ' && data[i+1] != '\t') {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return i + 2
|
|
|
|
}
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// parse ordered or unordered list block
|
2011-05-25 21:59:30 +02:00
|
|
|
func parse_list(ob *bytes.Buffer, rndr *render, data []byte, flags int) int {
|
|
|
|
work := bytes.NewBuffer(nil)
|
|
|
|
|
2011-05-26 00:00:01 +02:00
|
|
|
i, j := 0, 0
|
2011-05-25 21:59:30 +02:00
|
|
|
for i < len(data) {
|
2011-05-26 04:46:16 +02:00
|
|
|
j = parse_listitem(work, rndr, data[i:], &flags)
|
2011-05-25 21:59:30 +02:00
|
|
|
i += j
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
if j == 0 || flags&LIST_ITEM_END_OF_LIST != 0 {
|
2011-05-25 21:59:30 +02:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if rndr.mk.list != nil {
|
|
|
|
rndr.mk.list(ob, work.Bytes(), flags, rndr.mk.opaque)
|
|
|
|
}
|
|
|
|
return i
|
|
|
|
}
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// parse a single list item
|
|
|
|
// assumes initial prefix is already removed
|
|
|
|
func parse_listitem(ob *bytes.Buffer, rndr *render, data []byte, flags *int) int {
|
|
|
|
// keep track of the first indentation prefix
|
2011-05-25 23:41:25 +02:00
|
|
|
beg, end, pre, sublist, orgpre, i := 0, 0, 0, 0, 0, 0
|
2011-05-25 21:59:30 +02:00
|
|
|
|
|
|
|
for orgpre < 3 && orgpre < len(data) && data[orgpre] == ' ' {
|
|
|
|
orgpre++
|
|
|
|
}
|
|
|
|
|
2011-05-25 23:41:25 +02:00
|
|
|
beg = prefix_uli(data)
|
|
|
|
if beg == 0 {
|
|
|
|
beg = prefix_oli(data)
|
|
|
|
}
|
|
|
|
if beg == 0 {
|
2011-05-26 04:46:16 +02:00
|
|
|
return 0
|
2011-05-25 23:41:25 +02:00
|
|
|
}
|
|
|
|
|
2011-05-28 17:49:21 +02:00
|
|
|
// skip leading whitespace on first line
|
|
|
|
for beg < len(data) && data[beg] == ' ' {
|
|
|
|
beg++
|
|
|
|
}
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// skip to the beginning of the following line
|
2011-05-25 23:41:25 +02:00
|
|
|
end = beg
|
|
|
|
for end < len(data) && data[end-1] != '\n' {
|
|
|
|
end++
|
|
|
|
}
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// get working buffers
|
2011-05-25 23:41:25 +02:00
|
|
|
work := bytes.NewBuffer(nil)
|
|
|
|
inter := bytes.NewBuffer(nil)
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// put the first line into the working buffer
|
2011-05-25 23:41:25 +02:00
|
|
|
work.Write(data[beg:end])
|
|
|
|
beg = end
|
|
|
|
|
|
|
|
// process the following lines
|
|
|
|
in_empty, has_inside_empty := false, false
|
|
|
|
for beg < len(data) {
|
|
|
|
end++
|
|
|
|
|
|
|
|
for end < len(data) && data[end-1] != '\n' {
|
|
|
|
end++
|
|
|
|
}
|
|
|
|
|
|
|
|
// process an empty line
|
|
|
|
if is_empty(data[beg:end]) > 0 {
|
|
|
|
in_empty = true
|
|
|
|
beg = end
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// calculate the indentation
|
2011-05-25 23:41:25 +02:00
|
|
|
i = 0
|
|
|
|
for i < 4 && beg+i < end && data[beg+i] == ' ' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
pre = i
|
|
|
|
if data[beg] == '\t' {
|
|
|
|
i = 1
|
|
|
|
pre = 8
|
|
|
|
}
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// check for a new item
|
2011-05-25 23:41:25 +02:00
|
|
|
chunk := data[beg+i : end]
|
|
|
|
if (prefix_uli(chunk) > 0 && !is_hrule(chunk)) || prefix_oli(chunk) > 0 {
|
|
|
|
if in_empty {
|
|
|
|
has_inside_empty = true
|
|
|
|
}
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
if pre == orgpre { // the following item must have the same indentation
|
|
|
|
break
|
2011-05-25 23:41:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if sublist == 0 {
|
|
|
|
sublist = work.Len()
|
|
|
|
}
|
|
|
|
} else {
|
2011-05-26 04:46:16 +02:00
|
|
|
// only join indented stuff after empty lines
|
2011-05-25 23:41:25 +02:00
|
|
|
if in_empty && i < 4 && data[beg] != '\t' {
|
2011-05-29 05:17:53 +02:00
|
|
|
*flags |= LIST_ITEM_END_OF_LIST
|
2011-05-25 23:41:25 +02:00
|
|
|
break
|
|
|
|
} else {
|
|
|
|
if in_empty {
|
|
|
|
work.WriteByte('\n')
|
|
|
|
has_inside_empty = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
in_empty = false
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// add the line into the working buffer without prefix
|
2011-05-25 23:41:25 +02:00
|
|
|
work.Write(data[beg+i : end])
|
|
|
|
beg = end
|
|
|
|
}
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// render li contents
|
2011-05-25 23:41:25 +02:00
|
|
|
if has_inside_empty {
|
2011-05-29 05:17:53 +02:00
|
|
|
*flags |= LIST_ITEM_CONTAINS_BLOCK
|
2011-05-25 23:41:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
workbytes := work.Bytes()
|
2011-05-29 05:17:53 +02:00
|
|
|
if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 {
|
2011-05-25 23:41:25 +02:00
|
|
|
// intermediate render of block li
|
|
|
|
if sublist > 0 && sublist < len(workbytes) {
|
|
|
|
parse_block(inter, rndr, workbytes[:sublist])
|
|
|
|
parse_block(inter, rndr, workbytes[sublist:])
|
|
|
|
} else {
|
|
|
|
parse_block(inter, rndr, workbytes)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// intermediate render of inline li
|
|
|
|
if sublist > 0 && sublist < len(workbytes) {
|
|
|
|
parse_inline(inter, rndr, workbytes[:sublist])
|
2011-05-28 00:12:21 +02:00
|
|
|
parse_block(inter, rndr, workbytes[sublist:])
|
2011-05-25 23:41:25 +02:00
|
|
|
} else {
|
|
|
|
parse_inline(inter, rndr, workbytes)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
// render li itself
|
2011-05-25 23:41:25 +02:00
|
|
|
if rndr.mk.listitem != nil {
|
2011-05-26 04:46:16 +02:00
|
|
|
rndr.mk.listitem(ob, inter.Bytes(), *flags, rndr.mk.opaque)
|
2011-05-25 23:41:25 +02:00
|
|
|
}
|
|
|
|
|
2011-05-26 04:46:16 +02:00
|
|
|
return beg
|
2011-05-25 21:59:30 +02:00
|
|
|
}
|
|
|
|
|
2011-05-25 23:41:25 +02:00
|
|
|
func parse_paragraph(ob *bytes.Buffer, rndr *render, data []byte) int {
|
2011-05-26 00:00:01 +02:00
|
|
|
i, end, level := 0, 0, 0
|
|
|
|
|
|
|
|
for i < len(data) {
|
|
|
|
for end = i + 1; end < len(data) && data[end-1] != '\n'; end++ {
|
|
|
|
}
|
|
|
|
|
|
|
|
if is_empty(data[i:]) > 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if level = is_headerline(data[i:]); level > 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
if rndr.ext_flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
|
2011-05-26 00:00:01 +02:00
|
|
|
if data[i] == '<' && rndr.mk.blockhtml != nil && parse_htmlblock(ob, rndr, data[i:], false) > 0 {
|
|
|
|
end = i
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if is_atxheader(rndr, data[i:]) || is_hrule(data[i:]) {
|
|
|
|
end = i
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
i = end
|
|
|
|
}
|
|
|
|
|
|
|
|
work := data
|
|
|
|
size := i
|
|
|
|
for size > 0 && work[size-1] == '\n' {
|
|
|
|
size--
|
|
|
|
}
|
|
|
|
|
|
|
|
if level == 0 {
|
|
|
|
tmp := bytes.NewBuffer(nil)
|
|
|
|
parse_inline(tmp, rndr, work[:size])
|
|
|
|
if rndr.mk.paragraph != nil {
|
|
|
|
rndr.mk.paragraph(ob, tmp.Bytes(), rndr.mk.opaque)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if size > 0 {
|
|
|
|
beg := 0
|
|
|
|
i = size
|
|
|
|
size--
|
|
|
|
|
|
|
|
for size > 0 && work[size] != '\n' {
|
|
|
|
size--
|
|
|
|
}
|
|
|
|
|
|
|
|
beg = size + 1
|
|
|
|
for size > 0 && work[size-1] == '\n' {
|
|
|
|
size--
|
|
|
|
}
|
|
|
|
|
|
|
|
if size > 0 {
|
|
|
|
tmp := bytes.NewBuffer(nil)
|
|
|
|
parse_inline(tmp, rndr, work[:size])
|
|
|
|
if rndr.mk.paragraph != nil {
|
|
|
|
rndr.mk.paragraph(ob, tmp.Bytes(), rndr.mk.opaque)
|
|
|
|
}
|
|
|
|
|
|
|
|
work = work[beg:]
|
|
|
|
size = i - beg
|
|
|
|
} else {
|
|
|
|
size = i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
header_work := bytes.NewBuffer(nil)
|
|
|
|
parse_inline(header_work, rndr, work[:size])
|
|
|
|
|
|
|
|
if rndr.mk.header != nil {
|
|
|
|
rndr.mk.header(ob, header_work.Bytes(), level, rndr.mk.opaque)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return end
|
2011-05-25 23:41:25 +02:00
|
|
|
}
|
|
|
|
|
2011-05-25 21:59:30 +02:00
|
|
|
|
|
|
|
//
|
2011-05-29 05:17:53 +02:00
|
|
|
// Link references
|
2011-05-25 21:59:30 +02:00
|
|
|
//
|
2011-05-29 05:17:53 +02:00
|
|
|
// This section implements support for references that (usually) appear
|
|
|
|
// as footnotes in a document, and can be referenced anywhere in the document.
|
|
|
|
// The basic format is:
|
2011-05-25 21:59:30 +02:00
|
|
|
//
|
2011-05-29 05:17:53 +02:00
|
|
|
// [1]: http://www.google.com/ "Google"
|
|
|
|
// [2]: http://www.github.com/ "Github"
|
2011-05-25 21:59:30 +02:00
|
|
|
//
|
2011-05-29 05:17:53 +02:00
|
|
|
// Anywhere in the document, the reference can be linked by referring to its
|
|
|
|
// label, i.e., 1 and 2 in this example, as in:
|
|
|
|
//
|
|
|
|
// This library is hosted on [Github][2], a git hosting site.
|
2011-05-25 21:59:30 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// References are parsed and stored in this struct.
|
|
|
|
type link_ref struct {
|
|
|
|
id []byte
|
|
|
|
link []byte
|
|
|
|
title []byte
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// The list of all reference links is stored in this type.
|
|
|
|
type link_ref_array []*link_ref
|
2011-05-28 00:12:21 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// Find the length of a list of references.
|
|
|
|
// This implements an interface needed for sorting.
|
|
|
|
func (elt link_ref_array) Len() int {
|
|
|
|
return len(elt)
|
2011-05-25 21:59:30 +02:00
|
|
|
}
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// Test if one reference is less than another (case-insensitive).
|
|
|
|
// This implements an interface needed for sorting.
|
|
|
|
func (elt link_ref_array) Less(i, j int) bool {
|
|
|
|
return byteslice_less(elt[i].id, elt[j].id)
|
|
|
|
}
|
2011-05-26 20:10:16 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// Compare two []byte values (case-insensitive), returning
|
|
|
|
// true if a is less than b.
|
|
|
|
func byteslice_less(a []byte, b []byte) bool {
|
|
|
|
// adapted from bytes.Compare in stdlib
|
|
|
|
m := len(a)
|
|
|
|
if m > len(b) {
|
|
|
|
m = len(b)
|
|
|
|
}
|
|
|
|
for i, ac := range a[0:m] {
|
|
|
|
// do a case-insensitive comparison
|
|
|
|
ai, bi := unicode.ToLower(int(ac)), unicode.ToLower(int(b[i]))
|
|
|
|
switch {
|
|
|
|
case ai > bi:
|
|
|
|
return false
|
|
|
|
case ai < bi:
|
|
|
|
return true
|
2011-05-26 20:10:16 +02:00
|
|
|
}
|
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
switch {
|
|
|
|
case len(a) < len(b):
|
|
|
|
return true
|
|
|
|
case len(a) > len(b):
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return false
|
2011-05-26 20:10:16 +02:00
|
|
|
}
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// Swap two references.
|
|
|
|
// This implements an interface needed for sorting.
|
|
|
|
func (elt link_ref_array) Swap(i, j int) {
|
|
|
|
elt[i], elt[j] = elt[j], elt[i]
|
|
|
|
}
|
2011-05-25 00:14:35 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// Check whether or not data starts with a reference link.
|
|
|
|
// If so, it is parsed and stored in the list of references
|
|
|
|
// (in the render struct).
|
|
|
|
// Returns the number of bytes to skip to move past it, or zero
|
|
|
|
// if there is the first line is not a reference.
|
|
|
|
func is_ref(rndr *render, data []byte) int {
|
|
|
|
// up to 3 optional leading spaces
|
|
|
|
if len(data) < 4 {
|
|
|
|
return 0
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
i := 0
|
|
|
|
for i < 3 && data[i] == ' ' {
|
|
|
|
i++
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
if data[i] == ' ' {
|
|
|
|
return 0
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
|
|
|
|
// id part: anything but a newline between brackets
|
|
|
|
if data[i] != '[' {
|
|
|
|
return 0
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
i++
|
|
|
|
id_offset := i
|
|
|
|
for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
|
|
|
|
i++
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
if i >= len(data) || data[i] != ']' {
|
|
|
|
return 0
|
2011-05-25 00:14:35 +02:00
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
id_end := i
|
2011-05-25 00:14:35 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// spacer: colon (space | tab)* newline? (space | tab)*
|
|
|
|
i++
|
|
|
|
if i >= len(data) || data[i] != ':' {
|
|
|
|
return 0
|
2011-05-25 21:59:30 +02:00
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
i++
|
|
|
|
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
|
|
|
i++
|
2011-05-25 21:59:30 +02:00
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
|
|
|
|
i++
|
|
|
|
if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
|
|
|
|
i++
|
2011-05-25 21:59:30 +02:00
|
|
|
}
|
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
2011-05-25 21:59:30 +02:00
|
|
|
}
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// link: whitespace-free sequence, optionally between angle brackets
|
|
|
|
if data[i] == '<' {
|
2011-05-26 00:00:01 +02:00
|
|
|
i++
|
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
link_offset := i
|
|
|
|
for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
|
|
|
|
i++
|
2011-05-27 21:38:10 +02:00
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
link_end := i
|
|
|
|
if data[link_offset] == '<' && data[link_end-1] == '>' {
|
|
|
|
link_offset++
|
|
|
|
link_end--
|
2011-05-27 21:38:10 +02:00
|
|
|
}
|
2011-05-27 06:27:33 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
|
|
|
|
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
|
|
|
i++
|
2011-05-27 06:27:33 +02:00
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
|
2011-05-27 06:27:33 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// compute end-of-line
|
|
|
|
line_end := 0
|
|
|
|
if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
|
|
|
|
line_end = i
|
2011-05-27 06:27:33 +02:00
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
|
|
|
|
line_end++
|
2011-05-29 01:37:18 +02:00
|
|
|
}
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// optional (space|tab)* spacer after a newline
|
|
|
|
if line_end > 0 {
|
|
|
|
i = line_end + 1
|
|
|
|
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
|
|
|
i++
|
2011-05-29 01:37:18 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// optional title: any non-newline sequence enclosed in '"() alone on its line
|
|
|
|
title_offset, title_end := 0, 0
|
|
|
|
if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
|
2011-05-27 06:27:33 +02:00
|
|
|
i++
|
2011-05-29 05:17:53 +02:00
|
|
|
title_offset = i
|
2011-05-27 06:27:33 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// look for EOL
|
|
|
|
for i < len(data) && data[i] != '\n' && data[i] != '\r' {
|
|
|
|
i++
|
2011-05-27 06:27:33 +02:00
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
|
|
|
|
title_end = i + 1
|
|
|
|
} else {
|
|
|
|
title_end = i
|
2011-05-29 01:37:18 +02:00
|
|
|
}
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// step back
|
|
|
|
i--
|
|
|
|
for i > title_offset && (data[i] == ' ' || data[i] == '\t') {
|
|
|
|
i--
|
2011-05-29 01:37:18 +02:00
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
if i > title_offset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
|
|
|
|
line_end = title_end
|
|
|
|
title_end = i
|
2011-05-29 01:37:18 +02:00
|
|
|
}
|
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
if line_end == 0 { // garbage after the link
|
2011-05-29 01:37:18 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// a valid ref has been found
|
|
|
|
if rndr == nil {
|
|
|
|
return line_end
|
2011-05-29 01:37:18 +02:00
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
item := &link_ref{id: data[id_offset:id_end], link: data[link_offset:link_end], title: data[title_offset:title_end]}
|
|
|
|
rndr.refs = append(rndr.refs, item)
|
2011-05-29 01:37:18 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
return line_end
|
2011-05-29 01:37:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
//
|
|
|
|
//
|
|
|
|
// Miscellaneous helper functions
|
|
|
|
//
|
|
|
|
//
|
2011-05-29 01:37:18 +02:00
|
|
|
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// Test if a character is a punctuation symbol.
|
|
|
|
// Taken from a private function in regexp in the stdlib.
|
|
|
|
func ispunct(c byte) bool {
|
|
|
|
for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
|
|
|
|
if c == r {
|
|
|
|
return true
|
2011-05-29 01:37:18 +02:00
|
|
|
}
|
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
return false
|
2011-05-29 01:37:18 +02:00
|
|
|
}
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// this is sort.Search, reproduced here because an older
|
|
|
|
// version of the library had a bug
|
|
|
|
func sortDotSearch(n int, f func(int) bool) int {
|
|
|
|
// Define f(-1) == false and f(n) == true.
|
|
|
|
// Invariant: f(i-1) == false, f(j) == true.
|
|
|
|
i, j := 0, n
|
|
|
|
for i < j {
|
|
|
|
h := i + (j-i)/2 // avoid overflow when computing h
|
|
|
|
// i ≤ h < j
|
|
|
|
if !f(h) {
|
|
|
|
i = h + 1 // preserves f(i-1) == false
|
|
|
|
} else {
|
|
|
|
j = h // preserves f(j) == true
|
2011-05-29 01:37:18 +02:00
|
|
|
}
|
|
|
|
}
|
2011-05-29 05:17:53 +02:00
|
|
|
// i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
|
2011-05-29 01:37:18 +02:00
|
|
|
return i
|
|
|
|
}
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// Test if a character is a whitespace character.
|
|
|
|
func isspace(c byte) bool {
|
|
|
|
return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
|
2011-05-29 01:37:18 +02:00
|
|
|
}
|
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// Test if a character is a letter or a digit.
|
|
|
|
func isalnum(c byte) bool {
|
|
|
|
return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
2011-05-29 01:37:18 +02:00
|
|
|
}
|
2011-05-27 06:27:33 +02:00
|
|
|
|
2011-05-29 05:17:53 +02:00
|
|
|
// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
|
2011-05-26 04:46:16 +02:00
|
|
|
func expand_tabs(ob *bytes.Buffer, line []byte) {
|
|
|
|
i, tab := 0, 0
|
|
|
|
|
|
|
|
for i < len(line) {
|
|
|
|
org := i
|
|
|
|
for i < len(line) && line[i] != '\t' {
|
|
|
|
i++
|
|
|
|
tab++
|
|
|
|
}
|
|
|
|
|
|
|
|
if i > org {
|
|
|
|
ob.Write(line[org:i])
|
|
|
|
}
|
|
|
|
|
|
|
|
if i >= len(line) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
ob.WriteByte(' ')
|
|
|
|
tab++
|
2011-05-29 05:17:53 +02:00
|
|
|
if tab%TAB_SIZE == 0 {
|
2011-05-26 04:46:16 +02:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|