Skip to content

Commit

Permalink
Merge pull request #2118 from antoniusanggito/cwthedev-Iss2062_records
Browse files Browse the repository at this point in the history
Add support for new record types (cont.)
  • Loading branch information
bitwiseman authored Nov 28, 2022
2 parents ca52d26 + 40020a4 commit 12bc378
Show file tree
Hide file tree
Showing 3 changed files with 75 additions and 0 deletions.
14 changes: 14 additions & 0 deletions js/src/javascript/tokenizer.js
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,7 @@ Tokenizer.prototype._get_next_token = function(previous_token, open_token) { //

token = token || this._read_non_javascript(c);
token = token || this._read_string(c);
token = token || this._read_pair(c, this._input.peek(1)); // Issue #2062 hack for record type '#{'
token = token || this._read_word(previous_token);
token = token || this._read_singles(c);
token = token || this._read_comment(c);
Expand Down Expand Up @@ -225,6 +226,19 @@ Tokenizer.prototype._read_singles = function(c) {
return token;
};

Tokenizer.prototype._read_pair = function(c, d) {
var token = null;
if (c === '#' && d === '{') {
token = this._create_token(TOKEN.START_BLOCK, c + d);
}

if (token) {
this._input.next();
this._input.next();
}
return token;
};

Tokenizer.prototype._read_punctuation = function() {
var resulting_string = this.__patterns.punct.read();

Expand Down
15 changes: 15 additions & 0 deletions python/jsbeautifier/javascript/tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,9 @@ def _get_next_token(self, previous_token, open_token):

token = token or self._read_non_javascript(c)
token = token or self._read_string(c)
token = token or self._read_pair(
c, self._input.peek(1)
) # Issue #2062 hack for record type '#{'
token = token or self._read_word(previous_token)
token = token or self._read_singles(c)
token = token or self._read_comment(c)
Expand Down Expand Up @@ -257,6 +260,18 @@ def _read_singles(self, c):

return token

def _read_pair(self, c, d):
token = None

if c == "#" and d == "{":
token = self._create_token(TOKEN.START_BLOCK, c + d)

if token is not None:
self._input.next()
self._input.next()

return token

def _read_word(self, previous_token):
resulting_string = self._patterns.identifier.read()

Expand Down
46 changes: 46 additions & 0 deletions test/data/javascript/tests.js
Original file line number Diff line number Diff line change
Expand Up @@ -5377,6 +5377,52 @@ exports.test_data = {
]
}
]
}, {
name: "Record data type",
description: "",
tests: [{
comment: 'regular record with primitive',
input: 'a = #{ b:"c", d:1, e:true };',
output: [
'a = #{',
' b: "c",',
' d: 1,',
' e: true',
'};'
]
},
{
comment: 'nested record',
input: 'a = #{b:#{ c:1,d:2,}, e:"f"};',
output: [
'a = #{',
' b: #{',
' c: 1,',
' d: 2,',
' },',
' e: "f"',
'};'
]
},
{
comment: '# not directly followed by { is not handled as record',
unchanged: [
'a = # {',
' b: 1,',
' d: true',
'};'
]
},
{
comment: 'example of already valid and beautified record',
unchanged: [
'a = #{',
' b: 1,',
' d: true',
'};'
]
}
]
}, {
// =======================================================
// New tests groups should be added above this line.
Expand Down

0 comments on commit 12bc378

Please sign in to comment.