inital
This commit is contained in:
13
node_modules/markdown-it/lib/rules_core/block.mjs
generated
vendored
Normal file
13
node_modules/markdown-it/lib/rules_core/block.mjs
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
export default function block (state) {
|
||||
let token
|
||||
|
||||
if (state.inlineMode) {
|
||||
token = new state.Token('inline', '', 0)
|
||||
token.content = state.src
|
||||
token.map = [0, 1]
|
||||
token.children = []
|
||||
state.tokens.push(token)
|
||||
} else {
|
||||
state.md.block.parse(state.src, state.md, state.env, state.tokens)
|
||||
}
|
||||
}
|
11
node_modules/markdown-it/lib/rules_core/inline.mjs
generated
vendored
Normal file
11
node_modules/markdown-it/lib/rules_core/inline.mjs
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
export default function inline (state) {
|
||||
const tokens = state.tokens
|
||||
|
||||
// Parse inlines
|
||||
for (let i = 0, l = tokens.length; i < l; i++) {
|
||||
const tok = tokens[i]
|
||||
if (tok.type === 'inline') {
|
||||
state.md.inline.parse(tok.content, state.md, state.env, tok.children)
|
||||
}
|
||||
}
|
||||
}
|
134
node_modules/markdown-it/lib/rules_core/linkify.mjs
generated
vendored
Normal file
134
node_modules/markdown-it/lib/rules_core/linkify.mjs
generated
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
// Replace link-like texts with link nodes.
|
||||
//
|
||||
// Currently restricted by `md.validateLink()` to http/https/ftp
|
||||
//
|
||||
|
||||
import { arrayReplaceAt } from '../common/utils.mjs'
|
||||
|
||||
function isLinkOpen (str) {
|
||||
return /^<a[>\s]/i.test(str)
|
||||
}
|
||||
function isLinkClose (str) {
|
||||
return /^<\/a\s*>/i.test(str)
|
||||
}
|
||||
|
||||
export default function linkify (state) {
|
||||
const blockTokens = state.tokens
|
||||
|
||||
if (!state.md.options.linkify) { return }
|
||||
|
||||
for (let j = 0, l = blockTokens.length; j < l; j++) {
|
||||
if (blockTokens[j].type !== 'inline' ||
|
||||
!state.md.linkify.pretest(blockTokens[j].content)) {
|
||||
continue
|
||||
}
|
||||
|
||||
let tokens = blockTokens[j].children
|
||||
|
||||
let htmlLinkLevel = 0
|
||||
|
||||
// We scan from the end, to keep position when new tags added.
|
||||
// Use reversed logic in links start/end match
|
||||
for (let i = tokens.length - 1; i >= 0; i--) {
|
||||
const currentToken = tokens[i]
|
||||
|
||||
// Skip content of markdown links
|
||||
if (currentToken.type === 'link_close') {
|
||||
i--
|
||||
while (tokens[i].level !== currentToken.level && tokens[i].type !== 'link_open') {
|
||||
i--
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip content of html tag links
|
||||
if (currentToken.type === 'html_inline') {
|
||||
if (isLinkOpen(currentToken.content) && htmlLinkLevel > 0) {
|
||||
htmlLinkLevel--
|
||||
}
|
||||
if (isLinkClose(currentToken.content)) {
|
||||
htmlLinkLevel++
|
||||
}
|
||||
}
|
||||
if (htmlLinkLevel > 0) { continue }
|
||||
|
||||
if (currentToken.type === 'text' && state.md.linkify.test(currentToken.content)) {
|
||||
const text = currentToken.content
|
||||
let links = state.md.linkify.match(text)
|
||||
|
||||
// Now split string to nodes
|
||||
const nodes = []
|
||||
let level = currentToken.level
|
||||
let lastPos = 0
|
||||
|
||||
// forbid escape sequence at the start of the string,
|
||||
// this avoids http\://example.com/ from being linkified as
|
||||
// http:<a href="//example.com/">//example.com/</a>
|
||||
if (links.length > 0 &&
|
||||
links[0].index === 0 &&
|
||||
i > 0 &&
|
||||
tokens[i - 1].type === 'text_special') {
|
||||
links = links.slice(1)
|
||||
}
|
||||
|
||||
for (let ln = 0; ln < links.length; ln++) {
|
||||
const url = links[ln].url
|
||||
const fullUrl = state.md.normalizeLink(url)
|
||||
if (!state.md.validateLink(fullUrl)) { continue }
|
||||
|
||||
let urlText = links[ln].text
|
||||
|
||||
// Linkifier might send raw hostnames like "example.com", where url
|
||||
// starts with domain name. So we prepend http:// in those cases,
|
||||
// and remove it afterwards.
|
||||
//
|
||||
if (!links[ln].schema) {
|
||||
urlText = state.md.normalizeLinkText('http://' + urlText).replace(/^http:\/\//, '')
|
||||
} else if (links[ln].schema === 'mailto:' && !/^mailto:/i.test(urlText)) {
|
||||
urlText = state.md.normalizeLinkText('mailto:' + urlText).replace(/^mailto:/, '')
|
||||
} else {
|
||||
urlText = state.md.normalizeLinkText(urlText)
|
||||
}
|
||||
|
||||
const pos = links[ln].index
|
||||
|
||||
if (pos > lastPos) {
|
||||
const token = new state.Token('text', '', 0)
|
||||
token.content = text.slice(lastPos, pos)
|
||||
token.level = level
|
||||
nodes.push(token)
|
||||
}
|
||||
|
||||
const token_o = new state.Token('link_open', 'a', 1)
|
||||
token_o.attrs = [['href', fullUrl]]
|
||||
token_o.level = level++
|
||||
token_o.markup = 'linkify'
|
||||
token_o.info = 'auto'
|
||||
nodes.push(token_o)
|
||||
|
||||
const token_t = new state.Token('text', '', 0)
|
||||
token_t.content = urlText
|
||||
token_t.level = level
|
||||
nodes.push(token_t)
|
||||
|
||||
const token_c = new state.Token('link_close', 'a', -1)
|
||||
token_c.level = --level
|
||||
token_c.markup = 'linkify'
|
||||
token_c.info = 'auto'
|
||||
nodes.push(token_c)
|
||||
|
||||
lastPos = links[ln].lastIndex
|
||||
}
|
||||
if (lastPos < text.length) {
|
||||
const token = new state.Token('text', '', 0)
|
||||
token.content = text.slice(lastPos)
|
||||
token.level = level
|
||||
nodes.push(token)
|
||||
}
|
||||
|
||||
// replace current node
|
||||
blockTokens[j].children = tokens = arrayReplaceAt(tokens, i, nodes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
17
node_modules/markdown-it/lib/rules_core/normalize.mjs
generated
vendored
Normal file
17
node_modules/markdown-it/lib/rules_core/normalize.mjs
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
// Normalize input string
|
||||
|
||||
// https://spec.commonmark.org/0.29/#line-ending
|
||||
const NEWLINES_RE = /\r\n?|\n/g
|
||||
const NULL_RE = /\0/g
|
||||
|
||||
export default function normalize (state) {
|
||||
let str
|
||||
|
||||
// Normalize newlines
|
||||
str = state.src.replace(NEWLINES_RE, '\n')
|
||||
|
||||
// Replace NULL characters
|
||||
str = str.replace(NULL_RE, '\uFFFD')
|
||||
|
||||
state.src = str
|
||||
}
|
101
node_modules/markdown-it/lib/rules_core/replacements.mjs
generated
vendored
Normal file
101
node_modules/markdown-it/lib/rules_core/replacements.mjs
generated
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
// Simple typographic replacements
|
||||
//
|
||||
// (c) (C) → ©
|
||||
// (tm) (TM) → ™
|
||||
// (r) (R) → ®
|
||||
// +- → ±
|
||||
// ... → … (also ?.... → ?.., !.... → !..)
|
||||
// ???????? → ???, !!!!! → !!!, `,,` → `,`
|
||||
// -- → –, --- → —
|
||||
//
|
||||
|
||||
// TODO:
|
||||
// - fractionals 1/2, 1/4, 3/4 -> ½, ¼, ¾
|
||||
// - multiplications 2 x 4 -> 2 × 4
|
||||
|
||||
const RARE_RE = /\+-|\.\.|\?\?\?\?|!!!!|,,|--/
|
||||
|
||||
// Workaround for phantomjs - need regex without /g flag,
|
||||
// or root check will fail every second time
|
||||
const SCOPED_ABBR_TEST_RE = /\((c|tm|r)\)/i
|
||||
|
||||
const SCOPED_ABBR_RE = /\((c|tm|r)\)/ig
|
||||
const SCOPED_ABBR = {
|
||||
c: '©',
|
||||
r: '®',
|
||||
tm: '™'
|
||||
}
|
||||
|
||||
function replaceFn (match, name) {
|
||||
return SCOPED_ABBR[name.toLowerCase()]
|
||||
}
|
||||
|
||||
function replace_scoped (inlineTokens) {
|
||||
let inside_autolink = 0
|
||||
|
||||
for (let i = inlineTokens.length - 1; i >= 0; i--) {
|
||||
const token = inlineTokens[i]
|
||||
|
||||
if (token.type === 'text' && !inside_autolink) {
|
||||
token.content = token.content.replace(SCOPED_ABBR_RE, replaceFn)
|
||||
}
|
||||
|
||||
if (token.type === 'link_open' && token.info === 'auto') {
|
||||
inside_autolink--
|
||||
}
|
||||
|
||||
if (token.type === 'link_close' && token.info === 'auto') {
|
||||
inside_autolink++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function replace_rare (inlineTokens) {
|
||||
let inside_autolink = 0
|
||||
|
||||
for (let i = inlineTokens.length - 1; i >= 0; i--) {
|
||||
const token = inlineTokens[i]
|
||||
|
||||
if (token.type === 'text' && !inside_autolink) {
|
||||
if (RARE_RE.test(token.content)) {
|
||||
token.content = token.content
|
||||
.replace(/\+-/g, '±')
|
||||
// .., ..., ....... -> …
|
||||
// but ?..... & !..... -> ?.. & !..
|
||||
.replace(/\.{2,}/g, '…').replace(/([?!])…/g, '$1..')
|
||||
.replace(/([?!]){4,}/g, '$1$1$1').replace(/,{2,}/g, ',')
|
||||
// em-dash
|
||||
.replace(/(^|[^-])---(?=[^-]|$)/mg, '$1\u2014')
|
||||
// en-dash
|
||||
.replace(/(^|\s)--(?=\s|$)/mg, '$1\u2013')
|
||||
.replace(/(^|[^-\s])--(?=[^-\s]|$)/mg, '$1\u2013')
|
||||
}
|
||||
}
|
||||
|
||||
if (token.type === 'link_open' && token.info === 'auto') {
|
||||
inside_autolink--
|
||||
}
|
||||
|
||||
if (token.type === 'link_close' && token.info === 'auto') {
|
||||
inside_autolink++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default function replace (state) {
|
||||
let blkIdx
|
||||
|
||||
if (!state.md.options.typographer) { return }
|
||||
|
||||
for (blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) {
|
||||
if (state.tokens[blkIdx].type !== 'inline') { continue }
|
||||
|
||||
if (SCOPED_ABBR_TEST_RE.test(state.tokens[blkIdx].content)) {
|
||||
replace_scoped(state.tokens[blkIdx].children)
|
||||
}
|
||||
|
||||
if (RARE_RE.test(state.tokens[blkIdx].content)) {
|
||||
replace_rare(state.tokens[blkIdx].children)
|
||||
}
|
||||
}
|
||||
}
|
193
node_modules/markdown-it/lib/rules_core/smartquotes.mjs
generated
vendored
Normal file
193
node_modules/markdown-it/lib/rules_core/smartquotes.mjs
generated
vendored
Normal file
@@ -0,0 +1,193 @@
|
||||
// Convert straight quotation marks to typographic ones
|
||||
//
|
||||
|
||||
import { isWhiteSpace, isPunctChar, isMdAsciiPunct } from '../common/utils.mjs'
|
||||
|
||||
const QUOTE_TEST_RE = /['"]/
|
||||
const QUOTE_RE = /['"]/g
|
||||
const APOSTROPHE = '\u2019' /* ’ */
|
||||
|
||||
function replaceAt (str, index, ch) {
|
||||
return str.slice(0, index) + ch + str.slice(index + 1)
|
||||
}
|
||||
|
||||
function process_inlines (tokens, state) {
|
||||
let j
|
||||
|
||||
const stack = []
|
||||
|
||||
for (let i = 0; i < tokens.length; i++) {
|
||||
const token = tokens[i]
|
||||
|
||||
const thisLevel = tokens[i].level
|
||||
|
||||
for (j = stack.length - 1; j >= 0; j--) {
|
||||
if (stack[j].level <= thisLevel) { break }
|
||||
}
|
||||
stack.length = j + 1
|
||||
|
||||
if (token.type !== 'text') { continue }
|
||||
|
||||
let text = token.content
|
||||
let pos = 0
|
||||
let max = text.length
|
||||
|
||||
/* eslint no-labels:0,block-scoped-var:0 */
|
||||
OUTER:
|
||||
while (pos < max) {
|
||||
QUOTE_RE.lastIndex = pos
|
||||
const t = QUOTE_RE.exec(text)
|
||||
if (!t) { break }
|
||||
|
||||
let canOpen = true
|
||||
let canClose = true
|
||||
pos = t.index + 1
|
||||
const isSingle = (t[0] === "'")
|
||||
|
||||
// Find previous character,
|
||||
// default to space if it's the beginning of the line
|
||||
//
|
||||
let lastChar = 0x20
|
||||
|
||||
if (t.index - 1 >= 0) {
|
||||
lastChar = text.charCodeAt(t.index - 1)
|
||||
} else {
|
||||
for (j = i - 1; j >= 0; j--) {
|
||||
if (tokens[j].type === 'softbreak' || tokens[j].type === 'hardbreak') break // lastChar defaults to 0x20
|
||||
if (!tokens[j].content) continue // should skip all tokens except 'text', 'html_inline' or 'code_inline'
|
||||
|
||||
lastChar = tokens[j].content.charCodeAt(tokens[j].content.length - 1)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Find next character,
|
||||
// default to space if it's the end of the line
|
||||
//
|
||||
let nextChar = 0x20
|
||||
|
||||
if (pos < max) {
|
||||
nextChar = text.charCodeAt(pos)
|
||||
} else {
|
||||
for (j = i + 1; j < tokens.length; j++) {
|
||||
if (tokens[j].type === 'softbreak' || tokens[j].type === 'hardbreak') break // nextChar defaults to 0x20
|
||||
if (!tokens[j].content) continue // should skip all tokens except 'text', 'html_inline' or 'code_inline'
|
||||
|
||||
nextChar = tokens[j].content.charCodeAt(0)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
const isLastPunctChar = isMdAsciiPunct(lastChar) || isPunctChar(String.fromCharCode(lastChar))
|
||||
const isNextPunctChar = isMdAsciiPunct(nextChar) || isPunctChar(String.fromCharCode(nextChar))
|
||||
|
||||
const isLastWhiteSpace = isWhiteSpace(lastChar)
|
||||
const isNextWhiteSpace = isWhiteSpace(nextChar)
|
||||
|
||||
if (isNextWhiteSpace) {
|
||||
canOpen = false
|
||||
} else if (isNextPunctChar) {
|
||||
if (!(isLastWhiteSpace || isLastPunctChar)) {
|
||||
canOpen = false
|
||||
}
|
||||
}
|
||||
|
||||
if (isLastWhiteSpace) {
|
||||
canClose = false
|
||||
} else if (isLastPunctChar) {
|
||||
if (!(isNextWhiteSpace || isNextPunctChar)) {
|
||||
canClose = false
|
||||
}
|
||||
}
|
||||
|
||||
if (nextChar === 0x22 /* " */ && t[0] === '"') {
|
||||
if (lastChar >= 0x30 /* 0 */ && lastChar <= 0x39 /* 9 */) {
|
||||
// special case: 1"" - count first quote as an inch
|
||||
canClose = canOpen = false
|
||||
}
|
||||
}
|
||||
|
||||
if (canOpen && canClose) {
|
||||
// Replace quotes in the middle of punctuation sequence, but not
|
||||
// in the middle of the words, i.e.:
|
||||
//
|
||||
// 1. foo " bar " baz - not replaced
|
||||
// 2. foo-"-bar-"-baz - replaced
|
||||
// 3. foo"bar"baz - not replaced
|
||||
//
|
||||
canOpen = isLastPunctChar
|
||||
canClose = isNextPunctChar
|
||||
}
|
||||
|
||||
if (!canOpen && !canClose) {
|
||||
// middle of word
|
||||
if (isSingle) {
|
||||
token.content = replaceAt(token.content, t.index, APOSTROPHE)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if (canClose) {
|
||||
// this could be a closing quote, rewind the stack to get a match
|
||||
for (j = stack.length - 1; j >= 0; j--) {
|
||||
let item = stack[j]
|
||||
if (stack[j].level < thisLevel) { break }
|
||||
if (item.single === isSingle && stack[j].level === thisLevel) {
|
||||
item = stack[j]
|
||||
|
||||
let openQuote
|
||||
let closeQuote
|
||||
if (isSingle) {
|
||||
openQuote = state.md.options.quotes[2]
|
||||
closeQuote = state.md.options.quotes[3]
|
||||
} else {
|
||||
openQuote = state.md.options.quotes[0]
|
||||
closeQuote = state.md.options.quotes[1]
|
||||
}
|
||||
|
||||
// replace token.content *before* tokens[item.token].content,
|
||||
// because, if they are pointing at the same token, replaceAt
|
||||
// could mess up indices when quote length != 1
|
||||
token.content = replaceAt(token.content, t.index, closeQuote)
|
||||
tokens[item.token].content = replaceAt(
|
||||
tokens[item.token].content, item.pos, openQuote)
|
||||
|
||||
pos += closeQuote.length - 1
|
||||
if (item.token === i) { pos += openQuote.length - 1 }
|
||||
|
||||
text = token.content
|
||||
max = text.length
|
||||
|
||||
stack.length = j
|
||||
continue OUTER
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (canOpen) {
|
||||
stack.push({
|
||||
token: i,
|
||||
pos: t.index,
|
||||
single: isSingle,
|
||||
level: thisLevel
|
||||
})
|
||||
} else if (canClose && isSingle) {
|
||||
token.content = replaceAt(token.content, t.index, APOSTROPHE)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default function smartquotes (state) {
|
||||
/* eslint max-depth:0 */
|
||||
if (!state.md.options.typographer) { return }
|
||||
|
||||
for (let blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) {
|
||||
if (state.tokens[blkIdx].type !== 'inline' ||
|
||||
!QUOTE_TEST_RE.test(state.tokens[blkIdx].content)) {
|
||||
continue
|
||||
}
|
||||
|
||||
process_inlines(state.tokens[blkIdx].children, state)
|
||||
}
|
||||
}
|
17
node_modules/markdown-it/lib/rules_core/state_core.mjs
generated
vendored
Normal file
17
node_modules/markdown-it/lib/rules_core/state_core.mjs
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
// Core state object
|
||||
//
|
||||
|
||||
import Token from '../token.mjs'
|
||||
|
||||
function StateCore (src, md, env) {
|
||||
this.src = src
|
||||
this.env = env
|
||||
this.tokens = []
|
||||
this.inlineMode = false
|
||||
this.md = md // link to parser instance
|
||||
}
|
||||
|
||||
// re-export Token class to use in core rules
|
||||
StateCore.prototype.Token = Token
|
||||
|
||||
export default StateCore
|
43
node_modules/markdown-it/lib/rules_core/text_join.mjs
generated
vendored
Normal file
43
node_modules/markdown-it/lib/rules_core/text_join.mjs
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
// Join raw text tokens with the rest of the text
|
||||
//
|
||||
// This is set as a separate rule to provide an opportunity for plugins
|
||||
// to run text replacements after text join, but before escape join.
|
||||
//
|
||||
// For example, `\:)` shouldn't be replaced with an emoji.
|
||||
//
|
||||
|
||||
export default function text_join (state) {
|
||||
let curr, last
|
||||
const blockTokens = state.tokens
|
||||
const l = blockTokens.length
|
||||
|
||||
for (let j = 0; j < l; j++) {
|
||||
if (blockTokens[j].type !== 'inline') continue
|
||||
|
||||
const tokens = blockTokens[j].children
|
||||
const max = tokens.length
|
||||
|
||||
for (curr = 0; curr < max; curr++) {
|
||||
if (tokens[curr].type === 'text_special') {
|
||||
tokens[curr].type = 'text'
|
||||
}
|
||||
}
|
||||
|
||||
for (curr = last = 0; curr < max; curr++) {
|
||||
if (tokens[curr].type === 'text' &&
|
||||
curr + 1 < max &&
|
||||
tokens[curr + 1].type === 'text') {
|
||||
// collapse two adjacent text nodes
|
||||
tokens[curr + 1].content = tokens[curr].content + tokens[curr + 1].content
|
||||
} else {
|
||||
if (curr !== last) { tokens[last] = tokens[curr] }
|
||||
|
||||
last++
|
||||
}
|
||||
}
|
||||
|
||||
if (curr !== last) {
|
||||
tokens.length = last
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user