This commit is contained in:
2024-11-03 17:16:20 +01:00
commit fd6412d6f2
8090 changed files with 778406 additions and 0 deletions

72
node_modules/markdown-it/lib/rules_inline/autolink.mjs generated vendored Normal file
View File

@@ -0,0 +1,72 @@
// Process autolinks '<protocol:...>'
/* eslint max-len:0 */
const EMAIL_RE = /^([a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*)$/
/* eslint-disable-next-line no-control-regex */
const AUTOLINK_RE = /^([a-zA-Z][a-zA-Z0-9+.-]{1,31}):([^<>\x00-\x20]*)$/
export default function autolink (state, silent) {
let pos = state.pos
if (state.src.charCodeAt(pos) !== 0x3C/* < */) { return false }
const start = state.pos
const max = state.posMax
for (;;) {
if (++pos >= max) return false
const ch = state.src.charCodeAt(pos)
if (ch === 0x3C /* < */) return false
if (ch === 0x3E /* > */) break
}
const url = state.src.slice(start + 1, pos)
if (AUTOLINK_RE.test(url)) {
const fullUrl = state.md.normalizeLink(url)
if (!state.md.validateLink(fullUrl)) { return false }
if (!silent) {
const token_o = state.push('link_open', 'a', 1)
token_o.attrs = [['href', fullUrl]]
token_o.markup = 'autolink'
token_o.info = 'auto'
const token_t = state.push('text', '', 0)
token_t.content = state.md.normalizeLinkText(url)
const token_c = state.push('link_close', 'a', -1)
token_c.markup = 'autolink'
token_c.info = 'auto'
}
state.pos += url.length + 2
return true
}
if (EMAIL_RE.test(url)) {
const fullUrl = state.md.normalizeLink('mailto:' + url)
if (!state.md.validateLink(fullUrl)) { return false }
if (!silent) {
const token_o = state.push('link_open', 'a', 1)
token_o.attrs = [['href', fullUrl]]
token_o.markup = 'autolink'
token_o.info = 'auto'
const token_t = state.push('text', '', 0)
token_t.content = state.md.normalizeLinkText(url)
const token_c = state.push('link_close', 'a', -1)
token_c.markup = 'autolink'
token_c.info = 'auto'
}
state.pos += url.length + 2
return true
}
return false
}

View File

@@ -0,0 +1,60 @@
// Parse backticks
export default function backtick (state, silent) {
let pos = state.pos
const ch = state.src.charCodeAt(pos)
if (ch !== 0x60/* ` */) { return false }
const start = pos
pos++
const max = state.posMax
// scan marker length
while (pos < max && state.src.charCodeAt(pos) === 0x60/* ` */) { pos++ }
const marker = state.src.slice(start, pos)
const openerLength = marker.length
if (state.backticksScanned && (state.backticks[openerLength] || 0) <= start) {
if (!silent) state.pending += marker
state.pos += openerLength
return true
}
let matchEnd = pos
let matchStart
// Nothing found in the cache, scan until the end of the line (or until marker is found)
while ((matchStart = state.src.indexOf('`', matchEnd)) !== -1) {
matchEnd = matchStart + 1
// scan marker length
while (matchEnd < max && state.src.charCodeAt(matchEnd) === 0x60/* ` */) { matchEnd++ }
const closerLength = matchEnd - matchStart
if (closerLength === openerLength) {
// Found matching closer length.
if (!silent) {
const token = state.push('code_inline', 'code', 0)
token.markup = marker
token.content = state.src.slice(pos, matchStart)
.replace(/\n/g, ' ')
.replace(/^ (.+) $/, '$1')
}
state.pos = matchEnd
return true
}
// Some different length found, put it in cache as upper limit of where closer can be found
state.backticks[closerLength] = matchStart
}
// Scanned through the end, didn't find anything
state.backticksScanned = true
if (!silent) state.pending += marker
state.pos += openerLength
return true
}

View File

@@ -0,0 +1,124 @@
// For each opening emphasis-like marker find a matching closing one
//
function processDelimiters (delimiters) {
const openersBottom = {}
const max = delimiters.length
if (!max) return
// headerIdx is the first delimiter of the current (where closer is) delimiter run
let headerIdx = 0
let lastTokenIdx = -2 // needs any value lower than -1
const jumps = []
for (let closerIdx = 0; closerIdx < max; closerIdx++) {
const closer = delimiters[closerIdx]
jumps.push(0)
// markers belong to same delimiter run if:
// - they have adjacent tokens
// - AND markers are the same
//
if (delimiters[headerIdx].marker !== closer.marker || lastTokenIdx !== closer.token - 1) {
headerIdx = closerIdx
}
lastTokenIdx = closer.token
// Length is only used for emphasis-specific "rule of 3",
// if it's not defined (in strikethrough or 3rd party plugins),
// we can default it to 0 to disable those checks.
//
closer.length = closer.length || 0
if (!closer.close) continue
// Previously calculated lower bounds (previous fails)
// for each marker, each delimiter length modulo 3,
// and for whether this closer can be an opener;
// https://github.com/commonmark/cmark/commit/34250e12ccebdc6372b8b49c44fab57c72443460
/* eslint-disable-next-line no-prototype-builtins */
if (!openersBottom.hasOwnProperty(closer.marker)) {
openersBottom[closer.marker] = [-1, -1, -1, -1, -1, -1]
}
const minOpenerIdx = openersBottom[closer.marker][(closer.open ? 3 : 0) + (closer.length % 3)]
let openerIdx = headerIdx - jumps[headerIdx] - 1
let newMinOpenerIdx = openerIdx
for (; openerIdx > minOpenerIdx; openerIdx -= jumps[openerIdx] + 1) {
const opener = delimiters[openerIdx]
if (opener.marker !== closer.marker) continue
if (opener.open && opener.end < 0) {
let isOddMatch = false
// from spec:
//
// If one of the delimiters can both open and close emphasis, then the
// sum of the lengths of the delimiter runs containing the opening and
// closing delimiters must not be a multiple of 3 unless both lengths
// are multiples of 3.
//
if (opener.close || closer.open) {
if ((opener.length + closer.length) % 3 === 0) {
if (opener.length % 3 !== 0 || closer.length % 3 !== 0) {
isOddMatch = true
}
}
}
if (!isOddMatch) {
// If previous delimiter cannot be an opener, we can safely skip
// the entire sequence in future checks. This is required to make
// sure algorithm has linear complexity (see *_*_*_*_*_... case).
//
const lastJump = openerIdx > 0 && !delimiters[openerIdx - 1].open
? jumps[openerIdx - 1] + 1
: 0
jumps[closerIdx] = closerIdx - openerIdx + lastJump
jumps[openerIdx] = lastJump
closer.open = false
opener.end = closerIdx
opener.close = false
newMinOpenerIdx = -1
// treat next token as start of run,
// it optimizes skips in **<...>**a**<...>** pathological case
lastTokenIdx = -2
break
}
}
}
if (newMinOpenerIdx !== -1) {
// If match for this delimiter run failed, we want to set lower bound for
// future lookups. This is required to make sure algorithm has linear
// complexity.
//
// See details here:
// https://github.com/commonmark/cmark/issues/178#issuecomment-270417442
//
openersBottom[closer.marker][(closer.open ? 3 : 0) + ((closer.length || 0) % 3)] = newMinOpenerIdx
}
}
}
export default function link_pairs (state) {
const tokens_meta = state.tokens_meta
const max = state.tokens_meta.length
processDelimiters(state.delimiters)
for (let curr = 0; curr < max; curr++) {
if (tokens_meta[curr] && tokens_meta[curr].delimiters) {
processDelimiters(tokens_meta[curr].delimiters)
}
}
}

123
node_modules/markdown-it/lib/rules_inline/emphasis.mjs generated vendored Normal file
View File

@@ -0,0 +1,123 @@
// Process *this* and _that_
//
// Insert each marker as a separate text token, and add it to delimiter list
//
function emphasis_tokenize (state, silent) {
const start = state.pos
const marker = state.src.charCodeAt(start)
if (silent) { return false }
if (marker !== 0x5F /* _ */ && marker !== 0x2A /* * */) { return false }
const scanned = state.scanDelims(state.pos, marker === 0x2A)
for (let i = 0; i < scanned.length; i++) {
const token = state.push('text', '', 0)
token.content = String.fromCharCode(marker)
state.delimiters.push({
// Char code of the starting marker (number).
//
marker,
// Total length of these series of delimiters.
//
length: scanned.length,
// A position of the token this delimiter corresponds to.
//
token: state.tokens.length - 1,
// If this delimiter is matched as a valid opener, `end` will be
// equal to its position, otherwise it's `-1`.
//
end: -1,
// Boolean flags that determine if this delimiter could open or close
// an emphasis.
//
open: scanned.can_open,
close: scanned.can_close
})
}
state.pos += scanned.length
return true
}
function postProcess (state, delimiters) {
const max = delimiters.length
for (let i = max - 1; i >= 0; i--) {
const startDelim = delimiters[i]
if (startDelim.marker !== 0x5F/* _ */ && startDelim.marker !== 0x2A/* * */) {
continue
}
// Process only opening markers
if (startDelim.end === -1) {
continue
}
const endDelim = delimiters[startDelim.end]
// If the previous delimiter has the same marker and is adjacent to this one,
// merge those into one strong delimiter.
//
// `<em><em>whatever</em></em>` -> `<strong>whatever</strong>`
//
const isStrong = i > 0 &&
delimiters[i - 1].end === startDelim.end + 1 &&
// check that first two markers match and adjacent
delimiters[i - 1].marker === startDelim.marker &&
delimiters[i - 1].token === startDelim.token - 1 &&
// check that last two markers are adjacent (we can safely assume they match)
delimiters[startDelim.end + 1].token === endDelim.token + 1
const ch = String.fromCharCode(startDelim.marker)
const token_o = state.tokens[startDelim.token]
token_o.type = isStrong ? 'strong_open' : 'em_open'
token_o.tag = isStrong ? 'strong' : 'em'
token_o.nesting = 1
token_o.markup = isStrong ? ch + ch : ch
token_o.content = ''
const token_c = state.tokens[endDelim.token]
token_c.type = isStrong ? 'strong_close' : 'em_close'
token_c.tag = isStrong ? 'strong' : 'em'
token_c.nesting = -1
token_c.markup = isStrong ? ch + ch : ch
token_c.content = ''
if (isStrong) {
state.tokens[delimiters[i - 1].token].content = ''
state.tokens[delimiters[startDelim.end + 1].token].content = ''
i--
}
}
}
// Walk through delimiter list and replace text tokens with tags
//
function emphasis_post_process (state) {
const tokens_meta = state.tokens_meta
const max = state.tokens_meta.length
postProcess(state, state.delimiters)
for (let curr = 0; curr < max; curr++) {
if (tokens_meta[curr] && tokens_meta[curr].delimiters) {
postProcess(state, tokens_meta[curr].delimiters)
}
}
}
export default {
tokenize: emphasis_tokenize,
postProcess: emphasis_post_process
}

51
node_modules/markdown-it/lib/rules_inline/entity.mjs generated vendored Normal file
View File

@@ -0,0 +1,51 @@
// Process html entity - &#123;, &#xAF;, &quot;, ...
import { decodeHTML } from 'entities'
import { isValidEntityCode, fromCodePoint } from '../common/utils.mjs'
const DIGITAL_RE = /^&#((?:x[a-f0-9]{1,6}|[0-9]{1,7}));/i
const NAMED_RE = /^&([a-z][a-z0-9]{1,31});/i
export default function entity (state, silent) {
const pos = state.pos
const max = state.posMax
if (state.src.charCodeAt(pos) !== 0x26/* & */) return false
if (pos + 1 >= max) return false
const ch = state.src.charCodeAt(pos + 1)
if (ch === 0x23 /* # */) {
const match = state.src.slice(pos).match(DIGITAL_RE)
if (match) {
if (!silent) {
const code = match[1][0].toLowerCase() === 'x' ? parseInt(match[1].slice(1), 16) : parseInt(match[1], 10)
const token = state.push('text_special', '', 0)
token.content = isValidEntityCode(code) ? fromCodePoint(code) : fromCodePoint(0xFFFD)
token.markup = match[0]
token.info = 'entity'
}
state.pos += match[0].length
return true
}
} else {
const match = state.src.slice(pos).match(NAMED_RE)
if (match) {
const decoded = decodeHTML(match[0])
if (decoded !== match[0]) {
if (!silent) {
const token = state.push('text_special', '', 0)
token.content = decoded
token.markup = match[0]
token.info = 'entity'
}
state.pos += match[0].length
return true
}
}
}
return false
}

69
node_modules/markdown-it/lib/rules_inline/escape.mjs generated vendored Normal file
View File

@@ -0,0 +1,69 @@
// Process escaped chars and hardbreaks
import { isSpace } from '../common/utils.mjs'
const ESCAPED = []
for (let i = 0; i < 256; i++) { ESCAPED.push(0) }
'\\!"#$%&\'()*+,./:;<=>?@[]^_`{|}~-'
.split('').forEach(function (ch) { ESCAPED[ch.charCodeAt(0)] = 1 })
export default function escape (state, silent) {
let pos = state.pos
const max = state.posMax
if (state.src.charCodeAt(pos) !== 0x5C/* \ */) return false
pos++
// '\' at the end of the inline block
if (pos >= max) return false
let ch1 = state.src.charCodeAt(pos)
if (ch1 === 0x0A) {
if (!silent) {
state.push('hardbreak', 'br', 0)
}
pos++
// skip leading whitespaces from next line
while (pos < max) {
ch1 = state.src.charCodeAt(pos)
if (!isSpace(ch1)) break
pos++
}
state.pos = pos
return true
}
let escapedStr = state.src[pos]
if (ch1 >= 0xD800 && ch1 <= 0xDBFF && pos + 1 < max) {
const ch2 = state.src.charCodeAt(pos + 1)
if (ch2 >= 0xDC00 && ch2 <= 0xDFFF) {
escapedStr += state.src[pos + 1]
pos++
}
}
const origStr = '\\' + escapedStr
if (!silent) {
const token = state.push('text_special', '', 0)
if (ch1 < 256 && ESCAPED[ch1] !== 0) {
token.content = escapedStr
} else {
token.content = origStr
}
token.markup = origStr
token.info = 'escape'
}
state.pos = pos + 1
return true
}

View File

@@ -0,0 +1,38 @@
// Clean up tokens after emphasis and strikethrough postprocessing:
// merge adjacent text nodes into one and re-calculate all token levels
//
// This is necessary because initially emphasis delimiter markers (*, _, ~)
// are treated as their own separate text tokens. Then emphasis rule either
// leaves them as text (needed to merge with adjacent text) or turns them
// into opening/closing tags (which messes up levels inside).
//
export default function fragments_join (state) {
let curr, last
let level = 0
const tokens = state.tokens
const max = state.tokens.length
for (curr = last = 0; curr < max; curr++) {
// re-calculate levels after emphasis/strikethrough turns some text nodes
// into opening/closing tags
if (tokens[curr].nesting < 0) level-- // closing tag
tokens[curr].level = level
if (tokens[curr].nesting > 0) level++ // opening tag
if (tokens[curr].type === 'text' &&
curr + 1 < max &&
tokens[curr + 1].type === 'text') {
// collapse two adjacent text nodes
tokens[curr + 1].content = tokens[curr].content + tokens[curr + 1].content
} else {
if (curr !== last) { tokens[last] = tokens[curr] }
last++
}
}
if (curr !== last) {
tokens.length = last
}
}

View File

@@ -0,0 +1,50 @@
// Process html tags
import { HTML_TAG_RE } from '../common/html_re.mjs'
function isLinkOpen (str) {
return /^<a[>\s]/i.test(str)
}
function isLinkClose (str) {
return /^<\/a\s*>/i.test(str)
}
function isLetter (ch) {
/* eslint no-bitwise:0 */
const lc = ch | 0x20 // to lower case
return (lc >= 0x61/* a */) && (lc <= 0x7a/* z */)
}
export default function html_inline (state, silent) {
if (!state.md.options.html) { return false }
// Check start
const max = state.posMax
const pos = state.pos
if (state.src.charCodeAt(pos) !== 0x3C/* < */ ||
pos + 2 >= max) {
return false
}
// Quick fail on second char
const ch = state.src.charCodeAt(pos + 1)
if (ch !== 0x21/* ! */ &&
ch !== 0x3F/* ? */ &&
ch !== 0x2F/* / */ &&
!isLetter(ch)) {
return false
}
const match = state.src.slice(pos).match(HTML_TAG_RE)
if (!match) { return false }
if (!silent) {
const token = state.push('html_inline', '', 0)
token.content = match[0]
if (isLinkOpen(token.content)) state.linkLevel++
if (isLinkClose(token.content)) state.linkLevel--
}
state.pos += match[0].length
return true
}

138
node_modules/markdown-it/lib/rules_inline/image.mjs generated vendored Normal file
View File

@@ -0,0 +1,138 @@
// Process ![image](<src> "title")
import { normalizeReference, isSpace } from '../common/utils.mjs'
export default function image (state, silent) {
let code, content, label, pos, ref, res, title, start
let href = ''
const oldPos = state.pos
const max = state.posMax
if (state.src.charCodeAt(state.pos) !== 0x21/* ! */) { return false }
if (state.src.charCodeAt(state.pos + 1) !== 0x5B/* [ */) { return false }
const labelStart = state.pos + 2
const labelEnd = state.md.helpers.parseLinkLabel(state, state.pos + 1, false)
// parser failed to find ']', so it's not a valid link
if (labelEnd < 0) { return false }
pos = labelEnd + 1
if (pos < max && state.src.charCodeAt(pos) === 0x28/* ( */) {
//
// Inline link
//
// [link]( <href> "title" )
// ^^ skipping these spaces
pos++
for (; pos < max; pos++) {
code = state.src.charCodeAt(pos)
if (!isSpace(code) && code !== 0x0A) { break }
}
if (pos >= max) { return false }
// [link]( <href> "title" )
// ^^^^^^ parsing link destination
start = pos
res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax)
if (res.ok) {
href = state.md.normalizeLink(res.str)
if (state.md.validateLink(href)) {
pos = res.pos
} else {
href = ''
}
}
// [link]( <href> "title" )
// ^^ skipping these spaces
start = pos
for (; pos < max; pos++) {
code = state.src.charCodeAt(pos)
if (!isSpace(code) && code !== 0x0A) { break }
}
// [link]( <href> "title" )
// ^^^^^^^ parsing link title
res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax)
if (pos < max && start !== pos && res.ok) {
title = res.str
pos = res.pos
// [link]( <href> "title" )
// ^^ skipping these spaces
for (; pos < max; pos++) {
code = state.src.charCodeAt(pos)
if (!isSpace(code) && code !== 0x0A) { break }
}
} else {
title = ''
}
if (pos >= max || state.src.charCodeAt(pos) !== 0x29/* ) */) {
state.pos = oldPos
return false
}
pos++
} else {
//
// Link reference
//
if (typeof state.env.references === 'undefined') { return false }
if (pos < max && state.src.charCodeAt(pos) === 0x5B/* [ */) {
start = pos + 1
pos = state.md.helpers.parseLinkLabel(state, pos)
if (pos >= 0) {
label = state.src.slice(start, pos++)
} else {
pos = labelEnd + 1
}
} else {
pos = labelEnd + 1
}
// covers label === '' and label === undefined
// (collapsed reference link and shortcut reference link respectively)
if (!label) { label = state.src.slice(labelStart, labelEnd) }
ref = state.env.references[normalizeReference(label)]
if (!ref) {
state.pos = oldPos
return false
}
href = ref.href
title = ref.title
}
//
// We found the end of the link, and know for a fact it's a valid link;
// so all that's left to do is to call tokenizer.
//
if (!silent) {
content = state.src.slice(labelStart, labelEnd)
const tokens = []
state.md.inline.parse(
content,
state.md,
state.env,
tokens
)
const token = state.push('image', 'img', 0)
const attrs = [['src', href], ['alt', '']]
token.attrs = attrs
token.children = tokens
token.content = content
if (title) {
attrs.push(['title', title])
}
}
state.pos = pos
state.posMax = max
return true
}

139
node_modules/markdown-it/lib/rules_inline/link.mjs generated vendored Normal file
View File

@@ -0,0 +1,139 @@
// Process [link](<to> "stuff")
import { normalizeReference, isSpace } from '../common/utils.mjs'
export default function link (state, silent) {
let code, label, res, ref
let href = ''
let title = ''
let start = state.pos
let parseReference = true
if (state.src.charCodeAt(state.pos) !== 0x5B/* [ */) { return false }
const oldPos = state.pos
const max = state.posMax
const labelStart = state.pos + 1
const labelEnd = state.md.helpers.parseLinkLabel(state, state.pos, true)
// parser failed to find ']', so it's not a valid link
if (labelEnd < 0) { return false }
let pos = labelEnd + 1
if (pos < max && state.src.charCodeAt(pos) === 0x28/* ( */) {
//
// Inline link
//
// might have found a valid shortcut link, disable reference parsing
parseReference = false
// [link]( <href> "title" )
// ^^ skipping these spaces
pos++
for (; pos < max; pos++) {
code = state.src.charCodeAt(pos)
if (!isSpace(code) && code !== 0x0A) { break }
}
if (pos >= max) { return false }
// [link]( <href> "title" )
// ^^^^^^ parsing link destination
start = pos
res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax)
if (res.ok) {
href = state.md.normalizeLink(res.str)
if (state.md.validateLink(href)) {
pos = res.pos
} else {
href = ''
}
// [link]( <href> "title" )
// ^^ skipping these spaces
start = pos
for (; pos < max; pos++) {
code = state.src.charCodeAt(pos)
if (!isSpace(code) && code !== 0x0A) { break }
}
// [link]( <href> "title" )
// ^^^^^^^ parsing link title
res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax)
if (pos < max && start !== pos && res.ok) {
title = res.str
pos = res.pos
// [link]( <href> "title" )
// ^^ skipping these spaces
for (; pos < max; pos++) {
code = state.src.charCodeAt(pos)
if (!isSpace(code) && code !== 0x0A) { break }
}
}
}
if (pos >= max || state.src.charCodeAt(pos) !== 0x29/* ) */) {
// parsing a valid shortcut link failed, fallback to reference
parseReference = true
}
pos++
}
if (parseReference) {
//
// Link reference
//
if (typeof state.env.references === 'undefined') { return false }
if (pos < max && state.src.charCodeAt(pos) === 0x5B/* [ */) {
start = pos + 1
pos = state.md.helpers.parseLinkLabel(state, pos)
if (pos >= 0) {
label = state.src.slice(start, pos++)
} else {
pos = labelEnd + 1
}
} else {
pos = labelEnd + 1
}
// covers label === '' and label === undefined
// (collapsed reference link and shortcut reference link respectively)
if (!label) { label = state.src.slice(labelStart, labelEnd) }
ref = state.env.references[normalizeReference(label)]
if (!ref) {
state.pos = oldPos
return false
}
href = ref.href
title = ref.title
}
//
// We found the end of the link, and know for a fact it's a valid link;
// so all that's left to do is to call tokenizer.
//
if (!silent) {
state.pos = labelStart
state.posMax = labelEnd
const token_o = state.push('link_open', 'a', 1)
const attrs = [['href', href]]
token_o.attrs = attrs
if (title) {
attrs.push(['title', title])
}
state.linkLevel++
state.md.inline.tokenize(state)
state.linkLevel--
state.push('link_close', 'a', -1)
}
state.pos = pos
state.posMax = max
return true
}

56
node_modules/markdown-it/lib/rules_inline/linkify.mjs generated vendored Normal file
View File

@@ -0,0 +1,56 @@
// Process links like https://example.org/
// RFC3986: scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
const SCHEME_RE = /(?:^|[^a-z0-9.+-])([a-z][a-z0-9.+-]*)$/i
export default function linkify (state, silent) {
if (!state.md.options.linkify) return false
if (state.linkLevel > 0) return false
const pos = state.pos
const max = state.posMax
if (pos + 3 > max) return false
if (state.src.charCodeAt(pos) !== 0x3A/* : */) return false
if (state.src.charCodeAt(pos + 1) !== 0x2F/* / */) return false
if (state.src.charCodeAt(pos + 2) !== 0x2F/* / */) return false
const match = state.pending.match(SCHEME_RE)
if (!match) return false
const proto = match[1]
const link = state.md.linkify.matchAtStart(state.src.slice(pos - proto.length))
if (!link) return false
let url = link.url
// invalid link, but still detected by linkify somehow;
// need to check to prevent infinite loop below
if (url.length <= proto.length) return false
// disallow '*' at the end of the link (conflicts with emphasis)
url = url.replace(/\*+$/, '')
const fullUrl = state.md.normalizeLink(url)
if (!state.md.validateLink(fullUrl)) return false
if (!silent) {
state.pending = state.pending.slice(0, -proto.length)
const token_o = state.push('link_open', 'a', 1)
token_o.attrs = [['href', fullUrl]]
token_o.markup = 'linkify'
token_o.info = 'auto'
const token_t = state.push('text', '', 0)
token_t.content = state.md.normalizeLinkText(url)
const token_c = state.push('link_close', 'a', -1)
token_c.markup = 'linkify'
token_c.info = 'auto'
}
state.pos += url.length - proto.length
return true
}

42
node_modules/markdown-it/lib/rules_inline/newline.mjs generated vendored Normal file
View File

@@ -0,0 +1,42 @@
// Proceess '\n'
import { isSpace } from '../common/utils.mjs'
export default function newline (state, silent) {
let pos = state.pos
if (state.src.charCodeAt(pos) !== 0x0A/* \n */) { return false }
const pmax = state.pending.length - 1
const max = state.posMax
// ' \n' -> hardbreak
// Lookup in pending chars is bad practice! Don't copy to other rules!
// Pending string is stored in concat mode, indexed lookups will cause
// convertion to flat mode.
if (!silent) {
if (pmax >= 0 && state.pending.charCodeAt(pmax) === 0x20) {
if (pmax >= 1 && state.pending.charCodeAt(pmax - 1) === 0x20) {
// Find whitespaces tail of pending chars.
let ws = pmax - 1
while (ws >= 1 && state.pending.charCodeAt(ws - 1) === 0x20) ws--
state.pending = state.pending.slice(0, ws)
state.push('hardbreak', 'br', 0)
} else {
state.pending = state.pending.slice(0, -1)
state.push('softbreak', 'br', 0)
}
} else {
state.push('softbreak', 'br', 0)
}
}
pos++
// skip heading spaces for next line
while (pos < max && isSpace(state.src.charCodeAt(pos))) { pos++ }
state.pos = pos
return true
}

View File

@@ -0,0 +1,123 @@
// Inline parser state
import Token from '../token.mjs'
import { isWhiteSpace, isPunctChar, isMdAsciiPunct } from '../common/utils.mjs'
function StateInline (src, md, env, outTokens) {
this.src = src
this.env = env
this.md = md
this.tokens = outTokens
this.tokens_meta = Array(outTokens.length)
this.pos = 0
this.posMax = this.src.length
this.level = 0
this.pending = ''
this.pendingLevel = 0
// Stores { start: end } pairs. Useful for backtrack
// optimization of pairs parse (emphasis, strikes).
this.cache = {}
// List of emphasis-like delimiters for current tag
this.delimiters = []
// Stack of delimiter lists for upper level tags
this._prev_delimiters = []
// backtick length => last seen position
this.backticks = {}
this.backticksScanned = false
// Counter used to disable inline linkify-it execution
// inside <a> and markdown links
this.linkLevel = 0
}
// Flush pending text
//
StateInline.prototype.pushPending = function () {
const token = new Token('text', '', 0)
token.content = this.pending
token.level = this.pendingLevel
this.tokens.push(token)
this.pending = ''
return token
}
// Push new token to "stream".
// If pending text exists - flush it as text token
//
StateInline.prototype.push = function (type, tag, nesting) {
if (this.pending) {
this.pushPending()
}
const token = new Token(type, tag, nesting)
let token_meta = null
if (nesting < 0) {
// closing tag
this.level--
this.delimiters = this._prev_delimiters.pop()
}
token.level = this.level
if (nesting > 0) {
// opening tag
this.level++
this._prev_delimiters.push(this.delimiters)
this.delimiters = []
token_meta = { delimiters: this.delimiters }
}
this.pendingLevel = this.level
this.tokens.push(token)
this.tokens_meta.push(token_meta)
return token
}
// Scan a sequence of emphasis-like markers, and determine whether
// it can start an emphasis sequence or end an emphasis sequence.
//
// - start - position to scan from (it should point at a valid marker);
// - canSplitWord - determine if these markers can be found inside a word
//
StateInline.prototype.scanDelims = function (start, canSplitWord) {
const max = this.posMax
const marker = this.src.charCodeAt(start)
// treat beginning of the line as a whitespace
const lastChar = start > 0 ? this.src.charCodeAt(start - 1) : 0x20
let pos = start
while (pos < max && this.src.charCodeAt(pos) === marker) { pos++ }
const count = pos - start
// treat end of the line as a whitespace
const nextChar = pos < max ? this.src.charCodeAt(pos) : 0x20
const isLastPunctChar = isMdAsciiPunct(lastChar) || isPunctChar(String.fromCharCode(lastChar))
const isNextPunctChar = isMdAsciiPunct(nextChar) || isPunctChar(String.fromCharCode(nextChar))
const isLastWhiteSpace = isWhiteSpace(lastChar)
const isNextWhiteSpace = isWhiteSpace(nextChar)
const left_flanking =
!isNextWhiteSpace && (!isNextPunctChar || isLastWhiteSpace || isLastPunctChar)
const right_flanking =
!isLastWhiteSpace && (!isLastPunctChar || isNextWhiteSpace || isNextPunctChar)
const can_open = left_flanking && (canSplitWord || !right_flanking || isLastPunctChar)
const can_close = right_flanking && (canSplitWord || !left_flanking || isNextPunctChar)
return { can_open, can_close, length: count }
}
// re-export Token class to use in block rules
StateInline.prototype.Token = Token
export default StateInline

View File

@@ -0,0 +1,127 @@
// ~~strike through~~
//
// Insert each marker as a separate text token, and add it to delimiter list
//
function strikethrough_tokenize (state, silent) {
const start = state.pos
const marker = state.src.charCodeAt(start)
if (silent) { return false }
if (marker !== 0x7E/* ~ */) { return false }
const scanned = state.scanDelims(state.pos, true)
let len = scanned.length
const ch = String.fromCharCode(marker)
if (len < 2) { return false }
let token
if (len % 2) {
token = state.push('text', '', 0)
token.content = ch
len--
}
for (let i = 0; i < len; i += 2) {
token = state.push('text', '', 0)
token.content = ch + ch
state.delimiters.push({
marker,
length: 0, // disable "rule of 3" length checks meant for emphasis
token: state.tokens.length - 1,
end: -1,
open: scanned.can_open,
close: scanned.can_close
})
}
state.pos += scanned.length
return true
}
function postProcess (state, delimiters) {
let token
const loneMarkers = []
const max = delimiters.length
for (let i = 0; i < max; i++) {
const startDelim = delimiters[i]
if (startDelim.marker !== 0x7E/* ~ */) {
continue
}
if (startDelim.end === -1) {
continue
}
const endDelim = delimiters[startDelim.end]
token = state.tokens[startDelim.token]
token.type = 's_open'
token.tag = 's'
token.nesting = 1
token.markup = '~~'
token.content = ''
token = state.tokens[endDelim.token]
token.type = 's_close'
token.tag = 's'
token.nesting = -1
token.markup = '~~'
token.content = ''
if (state.tokens[endDelim.token - 1].type === 'text' &&
state.tokens[endDelim.token - 1].content === '~') {
loneMarkers.push(endDelim.token - 1)
}
}
// If a marker sequence has an odd number of characters, it's splitted
// like this: `~~~~~` -> `~` + `~~` + `~~`, leaving one marker at the
// start of the sequence.
//
// So, we have to move all those markers after subsequent s_close tags.
//
while (loneMarkers.length) {
const i = loneMarkers.pop()
let j = i + 1
while (j < state.tokens.length && state.tokens[j].type === 's_close') {
j++
}
j--
if (i !== j) {
token = state.tokens[j]
state.tokens[j] = state.tokens[i]
state.tokens[i] = token
}
}
}
// Walk through delimiter list and replace text tokens with tags
//
function strikethrough_postProcess (state) {
const tokens_meta = state.tokens_meta
const max = state.tokens_meta.length
postProcess(state, state.delimiters)
for (let curr = 0; curr < max; curr++) {
if (tokens_meta[curr] && tokens_meta[curr].delimiters) {
postProcess(state, tokens_meta[curr].delimiters)
}
}
}
export default {
tokenize: strikethrough_tokenize,
postProcess: strikethrough_postProcess
}

86
node_modules/markdown-it/lib/rules_inline/text.mjs generated vendored Normal file
View File

@@ -0,0 +1,86 @@
// Skip text characters for text token, place those to pending buffer
// and increment current pos
// Rule to skip pure text
// '{}$%@~+=:' reserved for extentions
// !, ", #, $, %, &, ', (, ), *, +, ,, -, ., /, :, ;, <, =, >, ?, @, [, \, ], ^, _, `, {, |, }, or ~
// !!!! Don't confuse with "Markdown ASCII Punctuation" chars
// http://spec.commonmark.org/0.15/#ascii-punctuation-character
function isTerminatorChar (ch) {
switch (ch) {
case 0x0A/* \n */:
case 0x21/* ! */:
case 0x23/* # */:
case 0x24/* $ */:
case 0x25/* % */:
case 0x26/* & */:
case 0x2A/* * */:
case 0x2B/* + */:
case 0x2D/* - */:
case 0x3A/* : */:
case 0x3C/* < */:
case 0x3D/* = */:
case 0x3E/* > */:
case 0x40/* @ */:
case 0x5B/* [ */:
case 0x5C/* \ */:
case 0x5D/* ] */:
case 0x5E/* ^ */:
case 0x5F/* _ */:
case 0x60/* ` */:
case 0x7B/* { */:
case 0x7D/* } */:
case 0x7E/* ~ */:
return true
default:
return false
}
}
export default function text (state, silent) {
let pos = state.pos
while (pos < state.posMax && !isTerminatorChar(state.src.charCodeAt(pos))) {
pos++
}
if (pos === state.pos) { return false }
if (!silent) { state.pending += state.src.slice(state.pos, pos) }
state.pos = pos
return true
}
// Alternative implementation, for memory.
//
// It costs 10% of performance, but allows extend terminators list, if place it
// to `ParserInline` property. Probably, will switch to it sometime, such
// flexibility required.
/*
var TERMINATOR_RE = /[\n!#$%&*+\-:<=>@[\\\]^_`{}~]/;
module.exports = function text(state, silent) {
var pos = state.pos,
idx = state.src.slice(pos).search(TERMINATOR_RE);
// first char is terminator -> empty text
if (idx === 0) { return false; }
// no terminator -> text till end of string
if (idx < 0) {
if (!silent) { state.pending += state.src.slice(pos); }
state.pos = state.src.length;
return true;
}
if (!silent) { state.pending += state.src.slice(pos, pos + idx); }
state.pos += idx;
return true;
}; */