diff --git a/cmd/esbuild/main.go b/cmd/esbuild/main.go index 19cd7e1fca1..7e8dd728f5f 100644 --- a/cmd/esbuild/main.go +++ b/cmd/esbuild/main.go @@ -80,7 +80,7 @@ var helpText = func(colors logger.Colors) string { --jsx-fragment=... What to use for JSX instead of React.Fragment --jsx=... Set to "preserve" to disable transforming JSX to JS --keep-names Preserve "name" on functions and classes - --legal-comments=... Where to place license comments (none | inline | + --legal-comments=... Where to place legal comments (none | inline | eof | linked | external, default eof when bundling and inline otherwise) --log-level=... Disable logging (verbose | debug | info | warning | diff --git a/internal/bundler/linker.go b/internal/bundler/linker.go index e26e307b72a..77b312b5aac 100644 --- a/internal/bundler/linker.go +++ b/internal/bundler/linker.go @@ -5021,7 +5021,7 @@ func (c *linkerContext) generateChunkCSS(chunks []chunkInfo, chunkIndex int, chu chunkWaitGroup.Done() } -// Add all unique license comments to the end of the file. These are +// Add all unique legal comments to the end of the file. These are // deduplicated because some projects have thousands of files with the same // comment. The comment must be preserved in the output for legal reasons but // at the same time we want to generate a small bundle when minifying. diff --git a/internal/css_lexer/css_lexer.go b/internal/css_lexer/css_lexer.go index 0fa72f76f9c..94e21aca021 100644 --- a/internal/css_lexer/css_lexer.go +++ b/internal/css_lexer/css_lexer.go @@ -158,7 +158,7 @@ type lexer struct { current int codePoint rune Token Token - licenseCommentsBefore []Comment + legalCommentsBefore []Comment approximateNewlineCount int sourceMappingURL logger.Span } @@ -171,7 +171,7 @@ type Comment struct { type TokenizeResult struct { Tokens []Token - LicenseComments []Comment + LegalComments []Comment ApproximateLineCount int32 SourceMapComment logger.Span } @@ -198,26 +198,26 @@ func Tokenize(log logger.Log, source logger.Source) TokenizeResult { var tokens []Token var comments []Comment for lexer.Token.Kind != TEndOfFile { - if lexer.licenseCommentsBefore != nil { - for _, comment := range lexer.licenseCommentsBefore { + if lexer.legalCommentsBefore != nil { + for _, comment := range lexer.legalCommentsBefore { comment.TokenIndexAfter = uint32(len(tokens)) comments = append(comments, comment) } - lexer.licenseCommentsBefore = nil + lexer.legalCommentsBefore = nil } tokens = append(tokens, lexer.Token) lexer.next() } - if lexer.licenseCommentsBefore != nil { - for _, comment := range lexer.licenseCommentsBefore { + if lexer.legalCommentsBefore != nil { + for _, comment := range lexer.legalCommentsBefore { comment.TokenIndexAfter = uint32(len(tokens)) comments = append(comments, comment) } - lexer.licenseCommentsBefore = nil + lexer.legalCommentsBefore = nil } return TokenizeResult{ Tokens: tokens, - LicenseComments: comments, + LegalComments: comments, ApproximateLineCount: int32(lexer.approximateNewlineCount) + 1, SourceMapComment: lexer.sourceMappingURL, } @@ -452,7 +452,7 @@ func (lexer *lexer) next() { func (lexer *lexer) consumeToEndOfMultiLineComment(startRange logger.Range) { startOfSourceMappingURL := 0 - isLicenseComment := false + isLegalComment := false switch lexer.codePoint { case '#', '@': @@ -462,8 +462,8 @@ func (lexer *lexer) consumeToEndOfMultiLineComment(startRange logger.Range) { } case '!': - // Remember if this is a license comment - isLicenseComment = true + // Remember if this is a legal comment + isLegalComment = true } for { @@ -485,10 +485,10 @@ func (lexer *lexer) consumeToEndOfMultiLineComment(startRange logger.Range) { lexer.sourceMappingURL = logger.Span{Text: text[:r.Len], Range: r} } - // Record license comments - if text := lexer.source.Contents[startRange.Loc.Start:commentEnd]; isLicenseComment || containsAtPreserveOrAtLicense(text) { + // Record legal comments + if text := lexer.source.Contents[startRange.Loc.Start:commentEnd]; isLegalComment || containsAtPreserveOrAtLicense(text) { text = helpers.RemoveMultiLineCommentIndent(lexer.source.Contents[:startRange.Loc.Start], text) - lexer.licenseCommentsBefore = append(lexer.licenseCommentsBefore, Comment{Loc: startRange.Loc, Text: text}) + lexer.legalCommentsBefore = append(lexer.legalCommentsBefore, Comment{Loc: startRange.Loc, Text: text}) } return } diff --git a/internal/css_parser/css_parser.go b/internal/css_parser/css_parser.go index 53043be3293..7911ada83c3 100644 --- a/internal/css_parser/css_parser.go +++ b/internal/css_parser/css_parser.go @@ -15,18 +15,18 @@ import ( // support for parsing https://drafts.csswg.org/css-nesting-1/. type parser struct { - log logger.Log - source logger.Source - tracker logger.LineColumnTracker - options Options - tokens []css_lexer.Token - licenseComments []css_lexer.Comment - stack []css_lexer.T - index int - end int - licenseCommentIndex int - prevError logger.Loc - importRecords []ast.ImportRecord + log logger.Log + source logger.Source + tracker logger.LineColumnTracker + options Options + tokens []css_lexer.Token + legalComments []css_lexer.Comment + stack []css_lexer.T + index int + end int + legalCommentIndex int + prevError logger.Loc + importRecords []ast.ImportRecord } type Options struct { @@ -38,13 +38,13 @@ type Options struct { func Parse(log logger.Log, source logger.Source, options Options) css_ast.AST { result := css_lexer.Tokenize(log, source) p := parser{ - log: log, - source: source, - tracker: logger.MakeLineColumnTracker(&source), - options: options, - tokens: result.Tokens, - licenseComments: result.LicenseComments, - prevError: logger.Loc{Start: -1}, + log: log, + source: source, + tracker: logger.MakeLineColumnTracker(&source), + options: options, + tokens: result.Tokens, + legalComments: result.LegalComments, + prevError: logger.Loc{Start: -1}, } p.end = len(p.tokens) rules := p.parseListOfRules(ruleContext{ @@ -169,10 +169,10 @@ func (p *parser) parseListOfRules(context ruleContext) []css_ast.Rule { loop: for { - // If there are any license comments immediately before the current token, + // If there are any legal comments immediately before the current token, // turn them all into comment rules and append them to the current rule list - for p.licenseCommentIndex < len(p.licenseComments) { - comment := p.licenseComments[p.licenseCommentIndex] + for p.legalCommentIndex < len(p.legalComments) { + comment := p.legalComments[p.legalCommentIndex] if comment.TokenIndexAfter != uint32(p.index) { break } @@ -180,7 +180,7 @@ loop: if context.isTopLevel { locs = append(locs, comment.Loc) } - p.licenseCommentIndex++ + p.legalCommentIndex++ } switch p.current().Kind { diff --git a/internal/css_parser/css_parser_test.go b/internal/css_parser/css_parser_test.go index f5a19c989d2..d0f8dafec70 100644 --- a/internal/css_parser/css_parser_test.go +++ b/internal/css_parser/css_parser_test.go @@ -839,7 +839,7 @@ func TestAtImport(t *testing.T) { expectParseError(t, "@import \"foo.css\" {}", ": warning: Expected \";\" but found end of file\n") } -func TestLicenseComment(t *testing.T) { +func TestLegalComment(t *testing.T) { expectPrinted(t, "/*!*/@import \"x\";", "/*!*/\n@import \"x\";\n") expectPrinted(t, "/*!*/@charset \"UTF-8\";", "/*!*/\n@charset \"UTF-8\";\n") expectPrinted(t, "/*!*/ @import \"x\";", "/*!*/\n@import \"x\";\n") diff --git a/internal/js_parser/js_parser_test.go b/internal/js_parser/js_parser_test.go index c45de1a8689..a120c239c85 100644 --- a/internal/js_parser/js_parser_test.go +++ b/internal/js_parser/js_parser_test.go @@ -234,14 +234,14 @@ func TestStrictMode(t *testing.T) { expectPrinted(t, "'use strict'", "\"use strict\";\n") expectPrinted(t, "`use strict`", "`use strict`;\n") - expectPrinted(t, "//! @license comment\n 'use strict'", "\"use strict\";\n//! @license comment\n") - expectPrinted(t, "/*! @license comment */ 'use strict'", "\"use strict\";\n/*! @license comment */\n") - expectPrinted(t, "function f() { //! @license comment\n 'use strict' }", "function f() {\n //! @license comment\n \"use strict\";\n}\n") - expectPrinted(t, "function f() { /*! @license comment */ 'use strict' }", "function f() {\n /*! @license comment */\n \"use strict\";\n}\n") - expectParseError(t, "//! @license comment\n 'use strict'", "") - expectParseError(t, "/*! @license comment */ 'use strict'", "") - expectParseError(t, "function f() { //! @license comment\n 'use strict' }", "") - expectParseError(t, "function f() { /*! @license comment */ 'use strict' }", "") + expectPrinted(t, "//! @legal comment\n 'use strict'", "\"use strict\";\n//! @legal comment\n") + expectPrinted(t, "/*! @legal comment */ 'use strict'", "\"use strict\";\n/*! @legal comment */\n") + expectPrinted(t, "function f() { //! @legal comment\n 'use strict' }", "function f() {\n //! @legal comment\n \"use strict\";\n}\n") + expectPrinted(t, "function f() { /*! @legal comment */ 'use strict' }", "function f() {\n /*! @legal comment */\n \"use strict\";\n}\n") + expectParseError(t, "//! @legal comment\n 'use strict'", "") + expectParseError(t, "/*! @legal comment */ 'use strict'", "") + expectParseError(t, "function f() { //! @legal comment\n 'use strict' }", "") + expectParseError(t, "function f() { /*! @legal comment */ 'use strict' }", "") nonSimple := ": error: Cannot use a \"use strict\" directive in a function with a non-simple parameter list\n" expectParseError(t, "function f() { 'use strict' }", "") diff --git a/scripts/end-to-end-tests.js b/scripts/end-to-end-tests.js index abfcefe402e..f08be85207e 100644 --- a/scripts/end-to-end-tests.js +++ b/scripts/end-to-end-tests.js @@ -2138,7 +2138,7 @@ }), test(['entry.js', '--outfile=node.js', '--target=es6'], { 'entry.js': ` - //! @license comment + //! @legal comment 'use strict' function f(a) { a **= 2