Skip to content

Commit

Permalink
Merge pull request #1652 from UziTech/codespan-newline
Browse files Browse the repository at this point in the history
Codespan newline
  • Loading branch information
styfle committed May 3, 2020
2 parents f9388f1 + c2d6d2f commit 0a57e47
Show file tree
Hide file tree
Showing 6 changed files with 144 additions and 24 deletions.
2 changes: 1 addition & 1 deletion docs/USING_PRO.md
Expand Up @@ -155,7 +155,7 @@ console.log(marked('$ latex code $\n\n` other code `'));
- table(*string* src)
- lheading(*string* src)
- paragraph(*string* src)
- text(*string* src)
- text(*string* src, *array* tokens)

### Inline level tokenizer methods

Expand Down
20 changes: 16 additions & 4 deletions src/Lexer.js
Expand Up @@ -112,7 +112,7 @@ module.exports = class Lexer {
*/
blockTokens(src, tokens = [], top = true) {
src = src.replace(/^ +$/gm, '');
let token, i, l;
let token, i, l, lastToken;

while (src) {
// newline
Expand All @@ -127,7 +127,13 @@ module.exports = class Lexer {
// code
if (token = this.tokenizer.code(src, tokens)) {
src = src.substring(token.raw.length);
tokens.push(token);
if (token.type) {
tokens.push(token);
} else {
lastToken = tokens[tokens.length - 1];
lastToken.raw += '\n' + token.raw;
lastToken.text += '\n' + token.text;
}
continue;
}

Expand Down Expand Up @@ -219,9 +225,15 @@ module.exports = class Lexer {
}

// text
if (token = this.tokenizer.text(src)) {
if (token = this.tokenizer.text(src, tokens)) {
src = src.substring(token.raw.length);
tokens.push(token);
if (token.type) {
tokens.push(token);
} else {
lastToken = tokens[tokens.length - 1];
lastToken.raw += '\n' + token.raw;
lastToken.text += '\n' + token.text;
}
continue;
}

Expand Down
41 changes: 28 additions & 13 deletions src/Tokenizer.js
Expand Up @@ -84,21 +84,21 @@ module.exports = class Tokenizer {
const lastToken = tokens[tokens.length - 1];
// An indented code block cannot interrupt a paragraph.
if (lastToken && lastToken.type === 'paragraph') {
tokens.pop();
lastToken.text += '\n' + cap[0].trimRight();
lastToken.raw += '\n' + cap[0];
return lastToken;
} else {
const text = cap[0].replace(/^ {4}/gm, '');
return {
type: 'code',
raw: cap[0],
codeBlockStyle: 'indented',
text: !this.options.pedantic
? rtrim(text, '\n')
: text
text: cap[0].trimRight()
};
}

const text = cap[0].replace(/^ {4}/gm, '');
return {
type: 'code',
raw: cap[0],
codeBlockStyle: 'indented',
text: !this.options.pedantic
? rtrim(text, '\n')
: text
};
}
}

Expand Down Expand Up @@ -374,9 +374,17 @@ module.exports = class Tokenizer {
}
}

text(src) {
text(src, tokens) {
const cap = this.rules.block.text.exec(src);
if (cap) {
const lastToken = tokens[tokens.length - 1];
if (lastToken && lastToken.type === 'text') {
return {
raw: cap[0],
text: cap[0]
};
}

return {
type: 'text',
raw: cap[0],
Expand Down Expand Up @@ -504,10 +512,17 @@ module.exports = class Tokenizer {
codespan(src) {
const cap = this.rules.inline.code.exec(src);
if (cap) {
let text = cap[2].replace(/\n/g, ' ');
const hasNonSpaceChars = /[^ ]/.test(text);
const hasSpaceCharsOnBothEnds = text.startsWith(' ') && text.endsWith(' ');
if (hasNonSpaceChars && hasSpaceCharsOnBothEnds) {
text = text.substring(1, text.length - 1);
}
text = escape(text, true);
return {
type: 'codespan',
raw: cap[0],
text: escape(cap[2].trim(), true)
text
};
}
}
Expand Down
5 changes: 5 additions & 0 deletions test/specs/new/codespan_newline.html
@@ -0,0 +1,5 @@
<p><code>code code</code></p>

<ul>
<li><code>code code</code></li>
</ul>
5 changes: 5 additions & 0 deletions test/specs/new/codespan_newline.md
@@ -0,0 +1,5 @@
`code
code`

- `code
code`
95 changes: 89 additions & 6 deletions test/unit/Lexer-spec.js
Expand Up @@ -752,12 +752,95 @@ a | b
});
});

it('code', () => {
expectInlineTokens({
md: '`code`',
tokens: [
{ type: 'codespan', raw: '`code`', text: 'code' }
]
describe('codespan', () => {
it('code', () => {
expectInlineTokens({
md: '`code`',
tokens: [
{ type: 'codespan', raw: '`code`', text: 'code' }
]
});
});

it('only spaces not stripped', () => {
expectInlineTokens({
md: '` `',
tokens: [
{ type: 'codespan', raw: '` `', text: ' ' }
]
});
});

it('beginning space only not stripped', () => {
expectInlineTokens({
md: '` a`',
tokens: [
{ type: 'codespan', raw: '` a`', text: ' a' }
]
});
});

it('end space only not stripped', () => {
expectInlineTokens({
md: '`a `',
tokens: [
{ type: 'codespan', raw: '`a `', text: 'a ' }
]
});
});

it('begin and end spaces are stripped', () => {
expectInlineTokens({
md: '` a `',
tokens: [
{ type: 'codespan', raw: '` a `', text: 'a' }
]
});
});

it('begin and end newlines are stripped', () => {
expectInlineTokens({
md: '`\na\n`',
tokens: [
{ type: 'codespan', raw: '`\na\n`', text: 'a' }
]
});
});

it('begin and end tabs are not stripped', () => {
expectInlineTokens({
md: '`\ta\t`',
tokens: [
{ type: 'codespan', raw: '`\ta\t`', text: '\ta\t' }
]
});
});

it('begin and end newlines', () => {
expectInlineTokens({
md: '`\na\n`',
tokens: [
{ type: 'codespan', raw: '`\na\n`', text: 'a' }
]
});
});

it('begin and end multiple spaces only one stripped', () => {
expectInlineTokens({
md: '` a `',
tokens: [
{ type: 'codespan', raw: '` a `', text: ' a ' }
]
});
});

it('newline to space', () => {
expectInlineTokens({
md: '`a\nb`',
tokens: [
{ type: 'codespan', raw: '`a\nb`', text: 'a b' }
]
});
});
});

Expand Down

1 comment on commit 0a57e47

@vercel
Copy link

@vercel vercel bot commented on 0a57e47 May 3, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.