From 82ad1c6e1fb5effa5d7b7b1a5d5dffe68d2becdb Mon Sep 17 00:00:00 2001
From: Ben McCann <322311+benmccann@users.noreply.github.com>
Date: Wed, 6 Oct 2021 16:23:13 -0700
Subject: [PATCH] Convert source to ESM
---
lib/marked.esm.js | 463 ++++++++++++++++++--------------------------
lib/marked.js | 279 ++++++++++----------------
marked.min.js | 2 +-
src/Lexer.js | 12 +-
src/Parser.js | 16 +-
src/Renderer.js | 10 +-
src/Slugger.js | 4 +-
src/TextRenderer.js | 4 +-
src/Tokenizer.js | 10 +-
src/defaults.js | 10 +-
src/esm-entry.js | 37 ++--
src/helpers.js | 39 ++--
src/marked.js | 22 +--
src/rules.js | 13 +-
14 files changed, 364 insertions(+), 557 deletions(-)
diff --git a/lib/marked.esm.js b/lib/marked.esm.js
index 145501bfbc..f6910b5acc 100644
--- a/lib/marked.esm.js
+++ b/lib/marked.esm.js
@@ -9,10 +9,6 @@
* The code in this file is generated from files in ./src/
*/
-var esmEntry$1 = {exports: {}};
-
-var defaults$5 = {exports: {}};
-
function getDefaults$1() {
return {
baseUrl: null,
@@ -37,20 +33,15 @@ function getDefaults$1() {
};
}
-function changeDefaults$1(newDefaults) {
- defaults$5.exports.defaults = newDefaults;
+function changeDefaults(newDefaults) {
+ module.exports.defaults = newDefaults;
}
-defaults$5.exports = {
- defaults: getDefaults$1(),
- getDefaults: getDefaults$1,
- changeDefaults: changeDefaults$1
-};
+const defaults$1 = getDefaults$1();
/**
* Helpers
*/
-
const escapeTest = /[&<>"']/;
const escapeReplace = /[&<>"']/g;
const escapeTestNoEncode = /[<>"']|&(?!#?\w+;)/;
@@ -63,7 +54,7 @@ const escapeReplacements = {
"'": '''
};
const getEscapeReplacement = (ch) => escapeReplacements[ch];
-function escape$3(html, encode) {
+function escape(html, encode) {
if (encode) {
if (escapeTest.test(html)) {
return html.replace(escapeReplace, getEscapeReplacement);
@@ -79,7 +70,7 @@ function escape$3(html, encode) {
const unescapeTest = /&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/ig;
-function unescape$1(html) {
+function unescape(html) {
// explicitly match decimal, hex, and named HTML entities
return html.replace(unescapeTest, (_, n) => {
n = n.toLowerCase();
@@ -94,7 +85,7 @@ function unescape$1(html) {
}
const caret = /(^|[^\[])\^/g;
-function edit$1(regex, opt) {
+function edit(regex, opt) {
regex = regex.source || regex;
opt = opt || '';
const obj = {
@@ -113,11 +104,11 @@ function edit$1(regex, opt) {
const nonWordAndColonTest = /[^\w:]/g;
const originIndependentUrl = /^$|^[a-z][a-z0-9+.-]*:|^[?#]/i;
-function cleanUrl$1(sanitize, base, href) {
+function cleanUrl(sanitize, base, href) {
if (sanitize) {
let prot;
try {
- prot = decodeURIComponent(unescape$1(href))
+ prot = decodeURIComponent(unescape(href))
.replace(nonWordAndColonTest, '')
.toLowerCase();
} catch (e) {
@@ -151,7 +142,7 @@ function resolveUrl(base, href) {
if (justDomain.test(base)) {
baseUrls[' ' + base] = base + '/';
} else {
- baseUrls[' ' + base] = rtrim$1(base, '/', true);
+ baseUrls[' ' + base] = rtrim(base, '/', true);
}
}
base = baseUrls[' ' + base];
@@ -172,9 +163,9 @@ function resolveUrl(base, href) {
}
}
-const noopTest$1 = { exec: function noopTest() {} };
+const noopTest = { exec: function noopTest() {} };
-function merge$2(obj) {
+function merge(obj) {
let i = 1,
target,
key;
@@ -191,7 +182,7 @@ function merge$2(obj) {
return obj;
}
-function splitCells$1(tableRow, count) {
+function splitCells(tableRow, count) {
// ensure that every cell-delimiting pipe has a space
// before it to distinguish it from an escaped pipe
const row = tableRow.replace(/\|/g, (match, offset, str) => {
@@ -230,7 +221,7 @@ function splitCells$1(tableRow, count) {
// Remove trailing 'c's. Equivalent to str.replace(/c*$/, '').
// /c*$/ is vulnerable to REDOS.
// invert: Remove suffix of non-c chars instead. Default falsey.
-function rtrim$1(str, c, invert) {
+function rtrim(str, c, invert) {
const l = str.length;
if (l === 0) {
return '';
@@ -254,7 +245,7 @@ function rtrim$1(str, c, invert) {
return str.substr(0, l - suffLen);
}
-function findClosingBracket$1(str, b) {
+function findClosingBracket(str, b) {
if (str.indexOf(b[1]) === -1) {
return -1;
}
@@ -276,14 +267,14 @@ function findClosingBracket$1(str, b) {
return -1;
}
-function checkSanitizeDeprecation$1(opt) {
+function checkSanitizeDeprecation(opt) {
if (opt && opt.sanitize && !opt.silent) {
console.warn('marked(): sanitize and sanitizer parameters are deprecated since version 0.7.0, should not be used and will be removed in the future. Read more here: https://marked.js.org/#/USING_ADVANCED.md#options');
}
}
// copied from https://stackoverflow.com/a/5450113/806777
-function repeatString$1(pattern, count) {
+function repeatString(pattern, count) {
if (count < 1) {
return '';
}
@@ -298,32 +289,9 @@ function repeatString$1(pattern, count) {
return result + pattern;
}
-var helpers = {
- escape: escape$3,
- unescape: unescape$1,
- edit: edit$1,
- cleanUrl: cleanUrl$1,
- resolveUrl,
- noopTest: noopTest$1,
- merge: merge$2,
- splitCells: splitCells$1,
- rtrim: rtrim$1,
- findClosingBracket: findClosingBracket$1,
- checkSanitizeDeprecation: checkSanitizeDeprecation$1,
- repeatString: repeatString$1
-};
-
-const { defaults: defaults$4 } = defaults$5.exports;
-const {
- rtrim,
- splitCells,
- escape: escape$2,
- findClosingBracket
-} = helpers;
-
function outputLink(cap, link, raw, lexer) {
const href = link.href;
- const title = link.title ? escape$2(link.title) : null;
+ const title = link.title ? escape(link.title) : null;
const text = cap[1].replace(/\\([\[\]])/g, '$1');
if (cap[0].charAt(0) !== '!') {
@@ -344,7 +312,7 @@ function outputLink(cap, link, raw, lexer) {
raw,
href,
title,
- text: escape$2(text)
+ text: escape(text)
};
}
}
@@ -380,9 +348,9 @@ function indentCodeCompensation(raw, text) {
/**
* Tokenizer
*/
-var Tokenizer_1$1 = class Tokenizer {
+class Tokenizer {
constructor(options) {
- this.options = options || defaults$4;
+ this.options = options || defaults$1;
}
space(src) {
@@ -641,7 +609,7 @@ var Tokenizer_1$1 = class Tokenizer {
};
if (this.options.sanitize) {
token.type = 'paragraph';
- token.text = this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape$2(cap[0]);
+ token.text = this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape(cap[0]);
token.tokens = [];
this.lexer.inline(token.text, token.tokens);
}
@@ -771,7 +739,7 @@ var Tokenizer_1$1 = class Tokenizer {
return {
type: 'escape',
raw: cap[0],
- text: escape$2(cap[1])
+ text: escape(cap[1])
};
}
}
@@ -800,7 +768,7 @@ var Tokenizer_1$1 = class Tokenizer {
text: this.options.sanitize
? (this.options.sanitizer
? this.options.sanitizer(cap[0])
- : escape$2(cap[0]))
+ : escape(cap[0]))
: cap[0]
};
}
@@ -955,7 +923,7 @@ var Tokenizer_1$1 = class Tokenizer {
if (hasNonSpaceChars && hasSpaceCharsOnBothEnds) {
text = text.substring(1, text.length - 1);
}
- text = escape$2(text, true);
+ text = escape(text, true);
return {
type: 'codespan',
raw: cap[0],
@@ -991,10 +959,10 @@ var Tokenizer_1$1 = class Tokenizer {
if (cap) {
let text, href;
if (cap[2] === '@') {
- text = escape$2(this.options.mangle ? mangle(cap[1]) : cap[1]);
+ text = escape(this.options.mangle ? mangle(cap[1]) : cap[1]);
href = 'mailto:' + text;
} else {
- text = escape$2(cap[1]);
+ text = escape(cap[1]);
href = text;
}
@@ -1019,7 +987,7 @@ var Tokenizer_1$1 = class Tokenizer {
if (cap = this.rules.inline.url.exec(src)) {
let text, href;
if (cap[2] === '@') {
- text = escape$2(this.options.mangle ? mangle(cap[0]) : cap[0]);
+ text = escape(this.options.mangle ? mangle(cap[0]) : cap[0]);
href = 'mailto:' + text;
} else {
// do extended autolink path validation
@@ -1028,7 +996,7 @@ var Tokenizer_1$1 = class Tokenizer {
prevCapZero = cap[0];
cap[0] = this.rules.inline._backpedal.exec(cap[0])[0];
} while (prevCapZero !== cap[0]);
- text = escape$2(cap[0]);
+ text = escape(cap[0]);
if (cap[1] === 'www.') {
href = 'http://' + text;
} else {
@@ -1056,9 +1024,9 @@ var Tokenizer_1$1 = class Tokenizer {
if (cap) {
let text;
if (this.lexer.state.inRawBlock) {
- text = this.options.sanitize ? (this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape$2(cap[0])) : cap[0];
+ text = this.options.sanitize ? (this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape(cap[0])) : cap[0];
} else {
- text = escape$2(this.options.smartypants ? smartypants(cap[0]) : cap[0]);
+ text = escape(this.options.smartypants ? smartypants(cap[0]) : cap[0]);
}
return {
type: 'text',
@@ -1067,18 +1035,12 @@ var Tokenizer_1$1 = class Tokenizer {
};
}
}
-};
-
-const {
- noopTest,
- edit,
- merge: merge$1
-} = helpers;
+}
/**
* Block-Level Grammar
*/
-const block$1 = {
+const block = {
newline: /^(?: *(?:\n|$))+/,
code: /^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/,
fences: /^ {0,3}(`{3,}(?=[^`\n]*\n)|~{3,})([^\n]*)\n(?:|([\s\S]*?)\n)(?: {0,3}\1[~`]* *(?=\n|$)|$)/,
@@ -1105,89 +1067,89 @@ const block$1 = {
text: /^[^\n]+/
};
-block$1._label = /(?!\s*\])(?:\\[\[\]]|[^\[\]])+/;
-block$1._title = /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/;
-block$1.def = edit(block$1.def)
- .replace('label', block$1._label)
- .replace('title', block$1._title)
+block._label = /(?!\s*\])(?:\\[\[\]]|[^\[\]])+/;
+block._title = /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/;
+block.def = edit(block.def)
+ .replace('label', block._label)
+ .replace('title', block._title)
.getRegex();
-block$1.bullet = /(?:[*+-]|\d{1,9}[.)])/;
-block$1.listItemStart = edit(/^( *)(bull) */)
- .replace('bull', block$1.bullet)
+block.bullet = /(?:[*+-]|\d{1,9}[.)])/;
+block.listItemStart = edit(/^( *)(bull) */)
+ .replace('bull', block.bullet)
.getRegex();
-block$1.list = edit(block$1.list)
- .replace(/bull/g, block$1.bullet)
+block.list = edit(block.list)
+ .replace(/bull/g, block.bullet)
.replace('hr', '\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))')
- .replace('def', '\\n+(?=' + block$1.def.source + ')')
+ .replace('def', '\\n+(?=' + block.def.source + ')')
.getRegex();
-block$1._tag = 'address|article|aside|base|basefont|blockquote|body|caption'
+block._tag = 'address|article|aside|base|basefont|blockquote|body|caption'
+ '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption'
+ '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe'
+ '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option'
+ '|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr'
+ '|track|ul';
-block$1._comment = /|$)/;
-block$1.html = edit(block$1.html, 'i')
- .replace('comment', block$1._comment)
- .replace('tag', block$1._tag)
+block._comment = /|$)/;
+block.html = edit(block.html, 'i')
+ .replace('comment', block._comment)
+ .replace('tag', block._tag)
.replace('attribute', / +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/)
.getRegex();
-block$1.paragraph = edit(block$1._paragraph)
- .replace('hr', block$1.hr)
+block.paragraph = edit(block._paragraph)
+ .replace('hr', block.hr)
.replace('heading', ' {0,3}#{1,6} ')
.replace('|lheading', '') // setex headings don't interrupt commonmark paragraphs
.replace('blockquote', ' {0,3}>')
.replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n')
.replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
.replace('html', '?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)')
- .replace('tag', block$1._tag) // pars can be interrupted by type (6) html blocks
+ .replace('tag', block._tag) // pars can be interrupted by type (6) html blocks
.getRegex();
-block$1.blockquote = edit(block$1.blockquote)
- .replace('paragraph', block$1.paragraph)
+block.blockquote = edit(block.blockquote)
+ .replace('paragraph', block.paragraph)
.getRegex();
/**
* Normal Block Grammar
*/
-block$1.normal = merge$1({}, block$1);
+block.normal = merge({}, block);
/**
* GFM Block Grammar
*/
-block$1.gfm = merge$1({}, block$1.normal, {
+block.gfm = merge({}, block.normal, {
table: '^ *([^\\n ].*\\|.*)\\n' // Header
+ ' {0,3}(?:\\| *)?(:?-+:? *(?:\\| *:?-+:? *)*)(?:\\| *)?' // Align
+ '(?:\\n((?:(?! *\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)' // Cells
});
-block$1.gfm.table = edit(block$1.gfm.table)
- .replace('hr', block$1.hr)
+block.gfm.table = edit(block.gfm.table)
+ .replace('hr', block.hr)
.replace('heading', ' {0,3}#{1,6} ')
.replace('blockquote', ' {0,3}>')
.replace('code', ' {4}[^\\n]')
.replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n')
.replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
.replace('html', '?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)')
- .replace('tag', block$1._tag) // tables can be interrupted by type (6) html blocks
+ .replace('tag', block._tag) // tables can be interrupted by type (6) html blocks
.getRegex();
/**
* Pedantic grammar (original John Gruber's loose markdown specification)
*/
-block$1.pedantic = merge$1({}, block$1.normal, {
+block.pedantic = merge({}, block.normal, {
html: edit(
'^ *(?:comment *(?:\\n|\\s*$)'
+ '|<(tag)[\\s\\S]+?\\1> *(?:\\n{2,}|\\s*$)' // closed tag
+ '|'
- + (escaped ? code : escape$1(code, true))
+ + (escaped ? code : escape(code, true))
+ '
\n';
}
return '
'
- + (escaped ? code : escape$1(code, true))
+ + (escaped ? code : escape(code, true))
+ '
\n';
}
@@ -1992,7 +1938,7 @@ var Renderer_1$1 = class Renderer {
if (href === null) {
return text;
}
- let out = ' {
@@ -2509,11 +2428,11 @@ function marked$1(src, opt, callback) {
}
try {
- const tokens = Lexer$1.lex(src, opt);
+ const tokens = Lexer.lex(src, opt);
if (opt.walkTokens) {
- marked$1.walkTokens(tokens, opt.walkTokens);
+ marked.walkTokens(tokens, opt.walkTokens);
}
- return Parser$1.parse(tokens, opt);
+ return Parser.parse(tokens, opt);
} catch (e) {
e.message += '\nPlease report this to https://github.com/markedjs/marked.';
if (opt.silent) {
@@ -2529,24 +2448,24 @@ function marked$1(src, opt, callback) {
* Options
*/
-marked$1.options =
-marked$1.setOptions = function(opt) {
- merge(marked$1.defaults, opt);
- changeDefaults(marked$1.defaults);
- return marked$1;
+marked.options =
+marked.setOptions = function(opt) {
+ merge(marked.defaults, opt);
+ changeDefaults(marked.defaults);
+ return marked;
};
-marked$1.getDefaults = getDefaults;
+marked.getDefaults = getDefaults$1;
-marked$1.defaults = defaults;
+marked.defaults = defaults$1;
/**
* Use Extension
*/
-marked$1.use = function(...args) {
+marked.use = function(...args) {
const opts = merge({}, ...args);
- const extensions = marked$1.defaults.extensions || { renderers: {}, childTokens: {} };
+ const extensions = marked.defaults.extensions || { renderers: {}, childTokens: {} };
let hasExtensions;
args.forEach((pack) => {
@@ -2605,7 +2524,7 @@ marked$1.use = function(...args) {
// ==-- Parse "overwrite" extensions --== //
if (pack.renderer) {
- const renderer = marked$1.defaults.renderer || new Renderer$1();
+ const renderer = marked.defaults.renderer || new Renderer();
for (const prop in pack.renderer) {
const prevRenderer = renderer[prop];
// Replace renderer with func to run extension, but fall back if false
@@ -2620,7 +2539,7 @@ marked$1.use = function(...args) {
opts.renderer = renderer;
}
if (pack.tokenizer) {
- const tokenizer = marked$1.defaults.tokenizer || new Tokenizer$1();
+ const tokenizer = marked.defaults.tokenizer || new Tokenizer();
for (const prop in pack.tokenizer) {
const prevTokenizer = tokenizer[prop];
// Replace tokenizer with func to run extension, but fall back if false
@@ -2637,7 +2556,7 @@ marked$1.use = function(...args) {
// ==-- Parse WalkTokens extensions --== //
if (pack.walkTokens) {
- const walkTokens = marked$1.defaults.walkTokens;
+ const walkTokens = marked.defaults.walkTokens;
opts.walkTokens = (token) => {
pack.walkTokens.call(this, token);
if (walkTokens) {
@@ -2650,7 +2569,7 @@ marked$1.use = function(...args) {
opts.extensions = extensions;
}
- marked$1.setOptions(opts);
+ marked.setOptions(opts);
});
};
@@ -2658,32 +2577,32 @@ marked$1.use = function(...args) {
* Run callback for every token
*/
-marked$1.walkTokens = function(tokens, callback) {
+marked.walkTokens = function(tokens, callback) {
for (const token of tokens) {
callback(token);
switch (token.type) {
case 'table': {
for (const cell of token.header) {
- marked$1.walkTokens(cell.tokens, callback);
+ marked.walkTokens(cell.tokens, callback);
}
for (const row of token.rows) {
for (const cell of row) {
- marked$1.walkTokens(cell.tokens, callback);
+ marked.walkTokens(cell.tokens, callback);
}
}
break;
}
case 'list': {
- marked$1.walkTokens(token.items, callback);
+ marked.walkTokens(token.items, callback);
break;
}
default: {
- if (marked$1.defaults.extensions && marked$1.defaults.extensions.childTokens && marked$1.defaults.extensions.childTokens[token.type]) { // Walk any extensions
- marked$1.defaults.extensions.childTokens[token.type].forEach(function(childTokens) {
- marked$1.walkTokens(token[childTokens], callback);
+ if (marked.defaults.extensions && marked.defaults.extensions.childTokens && marked.defaults.extensions.childTokens[token.type]) { // Walk any extensions
+ marked.defaults.extensions.childTokens[token.type].forEach(function(childTokens) {
+ marked.walkTokens(token[childTokens], callback);
});
} else if (token.tokens) {
- marked$1.walkTokens(token.tokens, callback);
+ marked.walkTokens(token.tokens, callback);
}
}
}
@@ -2693,7 +2612,7 @@ marked$1.walkTokens = function(tokens, callback) {
/**
* Parse Inline
*/
-marked$1.parseInline = function(src, opt) {
+marked.parseInline = function(src, opt) {
// throw error in case of non string input
if (typeof src === 'undefined' || src === null) {
throw new Error('marked.parseInline(): input parameter is undefined or null');
@@ -2703,15 +2622,15 @@ marked$1.parseInline = function(src, opt) {
+ Object.prototype.toString.call(src) + ', string expected');
}
- opt = merge({}, marked$1.defaults, opt || {});
+ opt = merge({}, marked.defaults, opt || {});
checkSanitizeDeprecation(opt);
try {
- const tokens = Lexer$1.lexInline(src, opt);
+ const tokens = Lexer.lexInline(src, opt);
if (opt.walkTokens) {
- marked$1.walkTokens(tokens, opt.walkTokens);
+ marked.walkTokens(tokens, opt.walkTokens);
}
- return Parser$1.parseInline(tokens, opt);
+ return Parser.parseInline(tokens, opt);
} catch (e) {
e.message += '\nPlease report this to https://github.com/markedjs/marked.';
if (opt.silent) {
@@ -2727,37 +2646,25 @@ marked$1.parseInline = function(src, opt) {
* Expose
*/
-marked$1.Parser = Parser$1;
-marked$1.parser = Parser$1.parse;
-marked$1.Renderer = Renderer$1;
-marked$1.TextRenderer = TextRenderer$1;
-marked$1.Lexer = Lexer$1;
-marked$1.lexer = Lexer$1.lex;
-marked$1.Tokenizer = Tokenizer$1;
-marked$1.Slugger = Slugger$1;
-marked$1.parse = marked$1;
-
-var marked_1 = marked$1;
-
-const marked = marked_1;
-const Lexer = Lexer_1$1;
-const Parser = Parser_1$1;
-const Tokenizer = Tokenizer_1$1;
-const Renderer = Renderer_1$1;
-const TextRenderer = TextRenderer_1$1;
-const Slugger = Slugger_1$1;
-
-esmEntry$1.exports = marked;
-var parse = esmEntry$1.exports.parse = marked;
-var Parser_1 = esmEntry$1.exports.Parser = Parser;
-var parser = esmEntry$1.exports.parser = Parser.parse;
-var Renderer_1 = esmEntry$1.exports.Renderer = Renderer;
-var TextRenderer_1 = esmEntry$1.exports.TextRenderer = TextRenderer;
-var Lexer_1 = esmEntry$1.exports.Lexer = Lexer;
-var lexer = esmEntry$1.exports.lexer = Lexer.lex;
-var Tokenizer_1 = esmEntry$1.exports.Tokenizer = Tokenizer;
-var Slugger_1 = esmEntry$1.exports.Slugger = Slugger;
-
-var esmEntry = esmEntry$1.exports;
-
-export { Lexer_1 as Lexer, Parser_1 as Parser, Renderer_1 as Renderer, Slugger_1 as Slugger, TextRenderer_1 as TextRenderer, Tokenizer_1 as Tokenizer, esmEntry as default, lexer, parse, parser };
+marked.Parser = Parser;
+marked.parser = Parser.parse;
+marked.Renderer = Renderer;
+marked.TextRenderer = TextRenderer;
+marked.Lexer = Lexer;
+marked.lexer = Lexer.lex;
+marked.Tokenizer = Tokenizer;
+marked.Slugger = Slugger;
+marked.parse = marked;
+
+const options = marked.options;
+const setOptions = marked.setOptions;
+const getDefaults = marked.getDefaults;
+const defaults = marked.defaults;
+const use = marked.use;
+const walkTokens = marked.walkTokens;
+const parseInline = marked.parseInline;
+const parse = marked;
+const parser = Parser.parse;
+const lexer = Lexer.lex;
+
+export { Lexer, Parser, Renderer, Slugger, TextRenderer, Tokenizer, marked as default, defaults, getDefaults, lexer, options, parse, parseInline, parser, setOptions, use, walkTokens };
diff --git a/lib/marked.js b/lib/marked.js
index dfe27f780a..04d5ed3b09 100644
--- a/lib/marked.js
+++ b/lib/marked.js
@@ -69,9 +69,7 @@
throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.");
}
- var defaults$5 = {exports: {}};
-
- function getDefaults$1() {
+ function getDefaults() {
return {
baseUrl: null,
breaks: false,
@@ -94,16 +92,10 @@
xhtml: false
};
}
-
- function changeDefaults$1(newDefaults) {
- defaults$5.exports.defaults = newDefaults;
+ function changeDefaults(newDefaults) {
+ module.exports.defaults = newDefaults;
}
-
- defaults$5.exports = {
- defaults: getDefaults$1(),
- getDefaults: getDefaults$1,
- changeDefaults: changeDefaults$1
- };
+ var defaults = getDefaults();
/**
* Helpers
@@ -124,7 +116,7 @@
return escapeReplacements[ch];
};
- function escape$2(html, encode) {
+ function escape(html, encode) {
if (encode) {
if (escapeTest.test(html)) {
return html.replace(escapeReplace, getEscapeReplacement);
@@ -137,10 +129,8 @@
return html;
}
-
var unescapeTest = /&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/ig;
-
- function unescape$1(html) {
+ function unescape(html) {
// explicitly match decimal, hex, and named HTML entities
return html.replace(unescapeTest, function (_, n) {
n = n.toLowerCase();
@@ -153,10 +143,8 @@
return '';
});
}
-
var caret = /(^|[^\[])\^/g;
-
- function edit$1(regex, opt) {
+ function edit(regex, opt) {
regex = regex.source || regex;
opt = opt || '';
var obj = {
@@ -172,16 +160,14 @@
};
return obj;
}
-
var nonWordAndColonTest = /[^\w:]/g;
var originIndependentUrl = /^$|^[a-z][a-z0-9+.-]*:|^[?#]/i;
-
- function cleanUrl$1(sanitize, base, href) {
+ function cleanUrl(sanitize, base, href) {
if (sanitize) {
var prot;
try {
- prot = decodeURIComponent(unescape$1(href)).replace(nonWordAndColonTest, '').toLowerCase();
+ prot = decodeURIComponent(unescape(href)).replace(nonWordAndColonTest, '').toLowerCase();
} catch (e) {
return null;
}
@@ -203,12 +189,10 @@
return href;
}
-
var baseUrls = {};
var justDomain = /^[^:]+:\/*[^/]*$/;
var protocol = /^([^:]+:)[\s\S]*$/;
var domain = /^([^:]+:\/*[^/]*)[\s\S]*$/;
-
function resolveUrl(base, href) {
if (!baseUrls[' ' + base]) {
// we can ignore everything in base after the last slash of its path component,
@@ -217,7 +201,7 @@
if (justDomain.test(base)) {
baseUrls[' ' + base] = base + '/';
} else {
- baseUrls[' ' + base] = rtrim$1(base, '/', true);
+ baseUrls[' ' + base] = rtrim(base, '/', true);
}
}
@@ -240,12 +224,10 @@
return base + href;
}
}
-
- var noopTest$1 = {
+ var noopTest = {
exec: function noopTest() {}
};
-
- function merge$2(obj) {
+ function merge(obj) {
var i = 1,
target,
key;
@@ -262,8 +244,7 @@
return obj;
}
-
- function splitCells$1(tableRow, count) {
+ function splitCells(tableRow, count) {
// ensure that every cell-delimiting pipe has a space
// before it to distinguish it from an escaped pipe
var row = tableRow.replace(/\|/g, function (match, offset, str) {
@@ -312,8 +293,7 @@
// /c*$/ is vulnerable to REDOS.
// invert: Remove suffix of non-c chars instead. Default falsey.
-
- function rtrim$1(str, c, invert) {
+ function rtrim(str, c, invert) {
var l = str.length;
if (l === 0) {
@@ -337,8 +317,7 @@
return str.substr(0, l - suffLen);
}
-
- function findClosingBracket$1(str, b) {
+ function findClosingBracket(str, b) {
if (str.indexOf(b[1]) === -1) {
return -1;
}
@@ -363,15 +342,13 @@
return -1;
}
-
- function checkSanitizeDeprecation$1(opt) {
+ function checkSanitizeDeprecation(opt) {
if (opt && opt.sanitize && !opt.silent) {
console.warn('marked(): sanitize and sanitizer parameters are deprecated since version 0.7.0, should not be used and will be removed in the future. Read more here: https://marked.js.org/#/USING_ADVANCED.md#options');
}
} // copied from https://stackoverflow.com/a/5450113/806777
-
- function repeatString$1(pattern, count) {
+ function repeatString(pattern, count) {
if (count < 1) {
return '';
}
@@ -390,30 +367,9 @@
return result + pattern;
}
- var helpers = {
- escape: escape$2,
- unescape: unescape$1,
- edit: edit$1,
- cleanUrl: cleanUrl$1,
- resolveUrl: resolveUrl,
- noopTest: noopTest$1,
- merge: merge$2,
- splitCells: splitCells$1,
- rtrim: rtrim$1,
- findClosingBracket: findClosingBracket$1,
- checkSanitizeDeprecation: checkSanitizeDeprecation$1,
- repeatString: repeatString$1
- };
-
- var defaults$4 = defaults$5.exports.defaults;
- var rtrim = helpers.rtrim,
- splitCells = helpers.splitCells,
- _escape = helpers.escape,
- findClosingBracket = helpers.findClosingBracket;
-
function outputLink(cap, link, raw, lexer) {
var href = link.href;
- var title = link.title ? _escape(link.title) : null;
+ var title = link.title ? escape(link.title) : null;
var text = cap[1].replace(/\\([\[\]])/g, '$1');
if (cap[0].charAt(0) !== '!') {
@@ -434,7 +390,7 @@
raw: raw,
href: href,
title: title,
- text: _escape(text)
+ text: escape(text)
};
}
}
@@ -468,9 +424,9 @@
*/
- var Tokenizer_1 = /*#__PURE__*/function () {
+ var Tokenizer = /*#__PURE__*/function () {
function Tokenizer(options) {
- this.options = options || defaults$4;
+ this.options = options || defaults;
}
var _proto = Tokenizer.prototype;
@@ -742,7 +698,7 @@
if (this.options.sanitize) {
token.type = 'paragraph';
- token.text = this.options.sanitizer ? this.options.sanitizer(cap[0]) : _escape(cap[0]);
+ token.text = this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape(cap[0]);
token.tokens = [];
this.lexer.inline(token.text, token.tokens);
}
@@ -881,14 +837,14 @@
}
};
- _proto.escape = function escape(src) {
+ _proto.escape = function escape$1(src) {
var cap = this.rules.inline.escape.exec(src);
if (cap) {
return {
type: 'escape',
raw: cap[0],
- text: _escape(cap[1])
+ text: escape(cap[1])
};
}
};
@@ -914,7 +870,7 @@
raw: cap[0],
inLink: this.lexer.state.inLink,
inRawBlock: this.lexer.state.inRawBlock,
- text: this.options.sanitize ? this.options.sanitizer ? this.options.sanitizer(cap[0]) : _escape(cap[0]) : cap[0]
+ text: this.options.sanitize ? this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape(cap[0]) : cap[0]
};
}
};
@@ -1084,7 +1040,7 @@
text = text.substring(1, text.length - 1);
}
- text = _escape(text, true);
+ text = escape(text, true);
return {
type: 'codespan',
raw: cap[0],
@@ -1124,10 +1080,10 @@
var text, href;
if (cap[2] === '@') {
- text = _escape(this.options.mangle ? mangle(cap[1]) : cap[1]);
+ text = escape(this.options.mangle ? mangle(cap[1]) : cap[1]);
href = 'mailto:' + text;
} else {
- text = _escape(cap[1]);
+ text = escape(cap[1]);
href = text;
}
@@ -1152,7 +1108,7 @@
var text, href;
if (cap[2] === '@') {
- text = _escape(this.options.mangle ? mangle(cap[0]) : cap[0]);
+ text = escape(this.options.mangle ? mangle(cap[0]) : cap[0]);
href = 'mailto:' + text;
} else {
// do extended autolink path validation
@@ -1163,7 +1119,7 @@
cap[0] = this.rules.inline._backpedal.exec(cap[0])[0];
} while (prevCapZero !== cap[0]);
- text = _escape(cap[0]);
+ text = escape(cap[0]);
if (cap[1] === 'www.') {
href = 'http://' + text;
@@ -1193,9 +1149,9 @@
var text;
if (this.lexer.state.inRawBlock) {
- text = this.options.sanitize ? this.options.sanitizer ? this.options.sanitizer(cap[0]) : _escape(cap[0]) : cap[0];
+ text = this.options.sanitize ? this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape(cap[0]) : cap[0];
} else {
- text = _escape(this.options.smartypants ? smartypants(cap[0]) : cap[0]);
+ text = escape(this.options.smartypants ? smartypants(cap[0]) : cap[0]);
}
return {
@@ -1209,14 +1165,11 @@
return Tokenizer;
}();
- var noopTest = helpers.noopTest,
- edit = helpers.edit,
- merge$1 = helpers.merge;
/**
* Block-Level Grammar
*/
- var block$1 = {
+ var block = {
newline: /^(?: *(?:\n|$))+/,
code: /^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/,
fences: /^ {0,3}(`{3,}(?=[^`\n]*\n)|~{3,})([^\n]*)\n(?:|([\s\S]*?)\n)(?: {0,3}\1[~`]* *(?=\n|$)|$)/,
@@ -1242,56 +1195,56 @@
_paragraph: /^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html| +\n)[^\n]+)*)/,
text: /^[^\n]+/
};
- block$1._label = /(?!\s*\])(?:\\[\[\]]|[^\[\]])+/;
- block$1._title = /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/;
- block$1.def = edit(block$1.def).replace('label', block$1._label).replace('title', block$1._title).getRegex();
- block$1.bullet = /(?:[*+-]|\d{1,9}[.)])/;
- block$1.listItemStart = edit(/^( *)(bull) */).replace('bull', block$1.bullet).getRegex();
- block$1.list = edit(block$1.list).replace(/bull/g, block$1.bullet).replace('hr', '\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))').replace('def', '\\n+(?=' + block$1.def.source + ')').getRegex();
- block$1._tag = 'address|article|aside|base|basefont|blockquote|body|caption' + '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption' + '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe' + '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option' + '|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr' + '|track|ul';
- block$1._comment = /|$)/;
- block$1.html = edit(block$1.html, 'i').replace('comment', block$1._comment).replace('tag', block$1._tag).replace('attribute', / +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/).getRegex();
- block$1.paragraph = edit(block$1._paragraph).replace('hr', block$1.hr).replace('heading', ' {0,3}#{1,6} ').replace('|lheading', '') // setex headings don't interrupt commonmark paragraphs
+ block._label = /(?!\s*\])(?:\\[\[\]]|[^\[\]])+/;
+ block._title = /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/;
+ block.def = edit(block.def).replace('label', block._label).replace('title', block._title).getRegex();
+ block.bullet = /(?:[*+-]|\d{1,9}[.)])/;
+ block.listItemStart = edit(/^( *)(bull) */).replace('bull', block.bullet).getRegex();
+ block.list = edit(block.list).replace(/bull/g, block.bullet).replace('hr', '\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))').replace('def', '\\n+(?=' + block.def.source + ')').getRegex();
+ block._tag = 'address|article|aside|base|basefont|blockquote|body|caption' + '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption' + '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe' + '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option' + '|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr' + '|track|ul';
+ block._comment = /|$)/;
+ block.html = edit(block.html, 'i').replace('comment', block._comment).replace('tag', block._tag).replace('attribute', / +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/).getRegex();
+ block.paragraph = edit(block._paragraph).replace('hr', block.hr).replace('heading', ' {0,3}#{1,6} ').replace('|lheading', '') // setex headings don't interrupt commonmark paragraphs
.replace('blockquote', ' {0,3}>').replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n').replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
- .replace('html', '?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)').replace('tag', block$1._tag) // pars can be interrupted by type (6) html blocks
+ .replace('html', '?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)').replace('tag', block._tag) // pars can be interrupted by type (6) html blocks
.getRegex();
- block$1.blockquote = edit(block$1.blockquote).replace('paragraph', block$1.paragraph).getRegex();
+ block.blockquote = edit(block.blockquote).replace('paragraph', block.paragraph).getRegex();
/**
* Normal Block Grammar
*/
- block$1.normal = merge$1({}, block$1);
+ block.normal = merge({}, block);
/**
* GFM Block Grammar
*/
- block$1.gfm = merge$1({}, block$1.normal, {
+ block.gfm = merge({}, block.normal, {
table: '^ *([^\\n ].*\\|.*)\\n' // Header
+ ' {0,3}(?:\\| *)?(:?-+:? *(?:\\| *:?-+:? *)*)(?:\\| *)?' // Align
+ '(?:\\n((?:(?! *\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)' // Cells
});
- block$1.gfm.table = edit(block$1.gfm.table).replace('hr', block$1.hr).replace('heading', ' {0,3}#{1,6} ').replace('blockquote', ' {0,3}>').replace('code', ' {4}[^\\n]').replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n').replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
- .replace('html', '?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)').replace('tag', block$1._tag) // tables can be interrupted by type (6) html blocks
+ block.gfm.table = edit(block.gfm.table).replace('hr', block.hr).replace('heading', ' {0,3}#{1,6} ').replace('blockquote', ' {0,3}>').replace('code', ' {4}[^\\n]').replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n').replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
+ .replace('html', '?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)').replace('tag', block._tag) // tables can be interrupted by type (6) html blocks
.getRegex();
/**
* Pedantic grammar (original John Gruber's loose markdown specification)
*/
- block$1.pedantic = merge$1({}, block$1.normal, {
+ block.pedantic = merge({}, block.normal, {
html: edit('^ *(?:comment *(?:\\n|\\s*$)' + '|<(tag)[\\s\\S]+?\\1> *(?:\\n{2,}|\\s*$)' // closed tag
- + '|' + (escaped ? _code : escape$1(_code, true)) + '
\n';
+ return '' + (escaped ? _code : escape(_code, true)) + '
\n';
}
- return '' + (escaped ? _code : escape$1(_code, true)) + '
\n';
+ return '' + (escaped ? _code : escape(_code, true)) + '
\n';
};
_proto.blockquote = function blockquote(quote) {
@@ -2070,7 +2011,7 @@
return text;
}
- var out = 'e.length)&&(t=e.length);for(var u=0,n=new Array(t);u'+(u?e:V(e,!0))+"
\n":""+(u?e:V(e,!0))+"
\n"},t.blockquote=function(e){return"\n"+e+"\n"},t.html=function(e){return e},t.heading=function(e,t,u,n){return this.options.headerIds?"
"+e+"
\n"},t.table=function(e,t){return""+e+"
"},t.br=function(){return this.options.xhtml?""+se(e.message+"",!0)+"";throw e}}return ae.options=ae.setOptions=function(e){return re(ae.defaults,e),le(ae.defaults),ae},ae.getDefaults=$,ae.defaults=x,ae.use=function(){for(var u=this,e=arguments.length,t=new Array(e),n=0;n
"+se(e.message+"",!0)+"";throw e}},ae.Parser=te,ae.parser=te.parse,ae.Renderer=ne,ae.TextRenderer=S,ae.Lexer=ee,ae.lexer=ee.lex,ae.Tokenizer=ue,ae.Slugger=B,ae.parse=ae}); \ No newline at end of file +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e="undefined"!=typeof globalThis?globalThis:e||self).marked=t()}(this,function(){"use strict";function r(e,t){for(var u=0;u
'+(u?e:c(e,!0))+"
\n":""+(u?e:c(e,!0))+"
\n"},t.blockquote=function(e){return"\n"+e+"\n"},t.html=function(e){return e},t.heading=function(e,t,u,n){return this.options.headerIds?"
"+e+"
\n"},t.table=function(e,t){return""+e+"
"},t.br=function(){return this.options.xhtml?""+c(e.message+"",!0)+"";throw e}}return L.options=L.setOptions=function(e){return b(L.defaults,e),e=L.defaults,module.exports.defaults=e,L},L.getDefaults=e,L.defaults=s,L.use=function(){for(var u=this,e=arguments.length,t=new Array(e),n=0;n
"+c(e.message+"",!0)+"";throw e}},L.Parser=j,L.parser=j.parse,L.Renderer=Z,L.TextRenderer=q,L.Lexer=I,L.lexer=I.lex,L.Tokenizer=z,L.Slugger=O,L.parse=L}); \ No newline at end of file diff --git a/src/Lexer.js b/src/Lexer.js index d34aa47bc0..72c311bdba 100644 --- a/src/Lexer.js +++ b/src/Lexer.js @@ -1,7 +1,7 @@ -const Tokenizer = require('./Tokenizer.js'); -const { defaults } = require('./defaults.js'); -const { block, inline } = require('./rules.js'); -const { repeatString } = require('./helpers.js'); +import Tokenizer from './Tokenizer.js'; +import { defaults } from './defaults.js'; +import { block, inline } from './rules.js'; +import { repeatString } from './helpers.js'; /** * smartypants text replacement @@ -47,7 +47,7 @@ function mangle(text) { /** * Block Lexer */ -module.exports = class Lexer { +export default class Lexer { constructor(options) { this.tokens = []; this.tokens.links = Object.create(null); @@ -488,4 +488,4 @@ module.exports = class Lexer { return tokens; } -}; +} diff --git a/src/Parser.js b/src/Parser.js index a7519f9134..3455aee428 100644 --- a/src/Parser.js +++ b/src/Parser.js @@ -1,15 +1,15 @@ -const Renderer = require('./Renderer.js'); -const TextRenderer = require('./TextRenderer.js'); -const Slugger = require('./Slugger.js'); -const { defaults } = require('./defaults.js'); -const { +import Renderer from './Renderer.js'; +import TextRenderer from './TextRenderer.js'; +import Slugger from './Slugger.js'; +import { defaults } from './defaults.js'; +import { unescape -} = require('./helpers.js'); +} from './helpers.js'; /** * Parsing & Compiling */ -module.exports = class Parser { +export default class Parser { constructor(options) { this.options = options || defaults; this.options.renderer = this.options.renderer || new Renderer(); @@ -283,4 +283,4 @@ module.exports = class Parser { } return out; } -}; +} diff --git a/src/Renderer.js b/src/Renderer.js index 1fa9714602..c412d915c7 100644 --- a/src/Renderer.js +++ b/src/Renderer.js @@ -1,13 +1,13 @@ -const { defaults } = require('./defaults.js'); -const { +import { defaults } from './defaults.js'; +import { cleanUrl, escape -} = require('./helpers.js'); +} from './helpers.js'; /** * Renderer */ -module.exports = class Renderer { +export default class Renderer { constructor(options) { this.options = options || defaults; } @@ -163,4 +163,4 @@ module.exports = class Renderer { text(text) { return text; } -}; +} diff --git a/src/Slugger.js b/src/Slugger.js index db385f54dd..a7995ebd49 100644 --- a/src/Slugger.js +++ b/src/Slugger.js @@ -1,7 +1,7 @@ /** * Slugger generates header id */ -module.exports = class Slugger { +export default class Slugger { constructor() { this.seen = {}; } @@ -46,4 +46,4 @@ module.exports = class Slugger { const slug = this.serialize(value); return this.getNextSafeSlug(slug, options.dryrun); } -}; +} diff --git a/src/TextRenderer.js b/src/TextRenderer.js index 48c36e6999..657043ff86 100644 --- a/src/TextRenderer.js +++ b/src/TextRenderer.js @@ -2,7 +2,7 @@ * TextRenderer * returns only the textual part of the token */ -module.exports = class TextRenderer { +export default class TextRenderer { // no need for block level renderers strong(text) { return text; @@ -39,4 +39,4 @@ module.exports = class TextRenderer { br() { return ''; } -}; +} diff --git a/src/Tokenizer.js b/src/Tokenizer.js index b834832a02..c3f644d8fa 100644 --- a/src/Tokenizer.js +++ b/src/Tokenizer.js @@ -1,10 +1,10 @@ -const { defaults } = require('./defaults.js'); -const { +import { defaults } from './defaults.js'; +import { rtrim, splitCells, escape, findClosingBracket -} = require('./helpers.js'); +} from './helpers.js'; function outputLink(cap, link, raw, lexer) { const href = link.href; @@ -65,7 +65,7 @@ function indentCodeCompensation(raw, text) { /** * Tokenizer */ -module.exports = class Tokenizer { +export default class Tokenizer { constructor(options) { this.options = options || defaults; } @@ -752,4 +752,4 @@ module.exports = class Tokenizer { }; } } -}; +} diff --git a/src/defaults.js b/src/defaults.js index a4b451fe2f..63f4b6276d 100644 --- a/src/defaults.js +++ b/src/defaults.js @@ -1,4 +1,4 @@ -function getDefaults() { +export function getDefaults() { return { baseUrl: null, breaks: false, @@ -22,12 +22,8 @@ function getDefaults() { }; } -function changeDefaults(newDefaults) { +export function changeDefaults(newDefaults) { module.exports.defaults = newDefaults; } -module.exports = { - defaults: getDefaults(), - getDefaults, - changeDefaults -}; +export const defaults = getDefaults(); diff --git a/src/esm-entry.js b/src/esm-entry.js index 72056d6da0..c5163d3f82 100644 --- a/src/esm-entry.js +++ b/src/esm-entry.js @@ -1,18 +1,21 @@ -const marked = require('./marked.js'); -const Lexer = require('./Lexer.js'); -const Parser = require('./Parser.js'); -const Tokenizer = require('./Tokenizer.js'); -const Renderer = require('./Renderer.js'); -const TextRenderer = require('./TextRenderer.js'); -const Slugger = require('./Slugger.js'); +import marked from './marked.js'; +import Lexer from './Lexer.js'; +import Parser from './Parser.js'; -module.exports = marked; -module.exports.parse = marked; -module.exports.Parser = Parser; -module.exports.parser = Parser.parse; -module.exports.Renderer = Renderer; -module.exports.TextRenderer = TextRenderer; -module.exports.Lexer = Lexer; -module.exports.lexer = Lexer.lex; -module.exports.Tokenizer = Tokenizer; -module.exports.Slugger = Slugger; +export default marked; +export const options = marked.options; +export const setOptions = marked.setOptions; +export const getDefaults = marked.getDefaults; +export const defaults = marked.defaults; +export const use = marked.use; +export const walkTokens = marked.walkTokens; +export const parseInline = marked.parseInline; +export const parse = marked; +export const parser = Parser.parse; +export const lexer = Lexer.lex; +export { default as Lexer } from './Lexer.js'; +export { default as Parser } from './Parser.js'; +export { default as Tokenizer } from './Tokenizer.js'; +export { default as Renderer } from './Renderer.js'; +export { default as TextRenderer } from './TextRenderer.js'; +export { default as Slugger } from './Slugger.js'; diff --git a/src/helpers.js b/src/helpers.js index e0b48d74ea..cd4b73291e 100644 --- a/src/helpers.js +++ b/src/helpers.js @@ -13,7 +13,7 @@ const escapeReplacements = { "'": ''' }; const getEscapeReplacement = (ch) => escapeReplacements[ch]; -function escape(html, encode) { +export function escape(html, encode) { if (encode) { if (escapeTest.test(html)) { return html.replace(escapeReplace, getEscapeReplacement); @@ -29,7 +29,7 @@ function escape(html, encode) { const unescapeTest = /&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/ig; -function unescape(html) { +export function unescape(html) { // explicitly match decimal, hex, and named HTML entities return html.replace(unescapeTest, (_, n) => { n = n.toLowerCase(); @@ -44,7 +44,7 @@ function unescape(html) { } const caret = /(^|[^\[])\^/g; -function edit(regex, opt) { +export function edit(regex, opt) { regex = regex.source || regex; opt = opt || ''; const obj = { @@ -63,7 +63,7 @@ function edit(regex, opt) { const nonWordAndColonTest = /[^\w:]/g; const originIndependentUrl = /^$|^[a-z][a-z0-9+.-]*:|^[?#]/i; -function cleanUrl(sanitize, base, href) { +export function cleanUrl(sanitize, base, href) { if (sanitize) { let prot; try { @@ -93,7 +93,7 @@ const justDomain = /^[^:]+:\/*[^/]*$/; const protocol = /^([^:]+:)[\s\S]*$/; const domain = /^([^:]+:\/*[^/]*)[\s\S]*$/; -function resolveUrl(base, href) { +export function resolveUrl(base, href) { if (!baseUrls[' ' + base]) { // we can ignore everything in base after the last slash of its path component, // but we might need to add _that_ @@ -122,9 +122,9 @@ function resolveUrl(base, href) { } } -const noopTest = { exec: function noopTest() {} }; +export const noopTest = { exec: function noopTest() {} }; -function merge(obj) { +export function merge(obj) { let i = 1, target, key; @@ -141,7 +141,7 @@ function merge(obj) { return obj; } -function splitCells(tableRow, count) { +export function splitCells(tableRow, count) { // ensure that every cell-delimiting pipe has a space // before it to distinguish it from an escaped pipe const row = tableRow.replace(/\|/g, (match, offset, str) => { @@ -180,7 +180,7 @@ function splitCells(tableRow, count) { // Remove trailing 'c's. Equivalent to str.replace(/c*$/, ''). // /c*$/ is vulnerable to REDOS. // invert: Remove suffix of non-c chars instead. Default falsey. -function rtrim(str, c, invert) { +export function rtrim(str, c, invert) { const l = str.length; if (l === 0) { return ''; @@ -204,7 +204,7 @@ function rtrim(str, c, invert) { return str.substr(0, l - suffLen); } -function findClosingBracket(str, b) { +export function findClosingBracket(str, b) { if (str.indexOf(b[1]) === -1) { return -1; } @@ -226,14 +226,14 @@ function findClosingBracket(str, b) { return -1; } -function checkSanitizeDeprecation(opt) { +export function checkSanitizeDeprecation(opt) { if (opt && opt.sanitize && !opt.silent) { console.warn('marked(): sanitize and sanitizer parameters are deprecated since version 0.7.0, should not be used and will be removed in the future. Read more here: https://marked.js.org/#/USING_ADVANCED.md#options'); } } // copied from https://stackoverflow.com/a/5450113/806777 -function repeatString(pattern, count) { +export function repeatString(pattern, count) { if (count < 1) { return ''; } @@ -247,18 +247,3 @@ function repeatString(pattern, count) { } return result + pattern; } - -module.exports = { - escape, - unescape, - edit, - cleanUrl, - resolveUrl, - noopTest, - merge, - splitCells, - rtrim, - findClosingBracket, - checkSanitizeDeprecation, - repeatString -}; diff --git a/src/marked.js b/src/marked.js index f9186bb372..0e889cdf89 100644 --- a/src/marked.js +++ b/src/marked.js @@ -1,19 +1,19 @@ -const Lexer = require('./Lexer.js'); -const Parser = require('./Parser.js'); -const Tokenizer = require('./Tokenizer.js'); -const Renderer = require('./Renderer.js'); -const TextRenderer = require('./TextRenderer.js'); -const Slugger = require('./Slugger.js'); -const { +import Lexer from './Lexer.js'; +import Parser from './Parser.js'; +import Tokenizer from './Tokenizer.js'; +import Renderer from './Renderer.js'; +import TextRenderer from './TextRenderer.js'; +import Slugger from './Slugger.js'; +import { merge, checkSanitizeDeprecation, escape -} = require('./helpers.js'); -const { +} from './helpers.js'; +import { getDefaults, changeDefaults, defaults -} = require('./defaults.js'); +} from './defaults.js'; /** * Marked @@ -334,4 +334,4 @@ marked.Tokenizer = Tokenizer; marked.Slugger = Slugger; marked.parse = marked; -module.exports = marked; +export default marked; diff --git a/src/rules.js b/src/rules.js index e99d87b365..f64242a15f 100644 --- a/src/rules.js +++ b/src/rules.js @@ -1,13 +1,13 @@ -const { +import { noopTest, edit, merge -} = require('./helpers.js'); +} from './helpers.js'; /** * Block-Level Grammar */ -const block = { +export const block = { newline: /^(?: *(?:\n|$))+/, code: /^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/, fences: /^ {0,3}(`{3,}(?=[^`\n]*\n)|~{3,})([^\n]*)\n(?:|([\s\S]*?)\n)(?: {0,3}\1[~`]* *(?=\n|$)|$)/, @@ -139,7 +139,7 @@ block.pedantic = merge({}, block.normal, { /** * Inline-Level Grammar */ -const inline = { +export const inline = { escape: /^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/, autolink: /^<(scheme:[^\s\x00-\x1f<>]*|email)>/, url: noopTest, @@ -283,8 +283,3 @@ inline.breaks = merge({}, inline.gfm, { .replace(/\{2,\}/g, '*') .getRegex() }); - -module.exports = { - block, - inline -};