Skip to content

Commit 11c36a3

Browse files
ammaraskartaleinat
authored andcommitted
[3.6] bpo-33899: Make tokenize module mirror end-of-file is end-of-line behavior (GH-7891) (GH-8134)
Most of the change involves fixing up the test suite, which previously made the assumption that there wouldn't be a new line if the input didn't end in one. Contributed by Ammar Askar. (cherry picked from commit c4ef489)
1 parent c6671ae commit 11c36a3

File tree

3 files changed

+59
-18
lines changed

3 files changed

+59
-18
lines changed

Lib/test/test_tokenize.py

Lines changed: 45 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
from test import support
22
from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
33
STRING, ENDMARKER, ENCODING, tok_name, detect_encoding,
4-
open as tokenize_open, Untokenizer)
4+
open as tokenize_open, Untokenizer, generate_tokens,
5+
NEWLINE)
56
from io import BytesIO
67
import unittest
78
from unittest import TestCase, mock
@@ -11,27 +12,51 @@
1112
import token
1213

1314

15+
# Converts a source string into a list of textual representation
16+
# of the tokens such as:
17+
# ` NAME 'if' (1, 0) (1, 2)`
18+
# to make writing tests easier.
19+
def stringify_tokens_from_source(token_generator, source_string):
20+
result = []
21+
num_lines = len(source_string.splitlines())
22+
missing_trailing_nl = source_string[-1] not in '\r\n'
23+
24+
for type, token, start, end, line in token_generator:
25+
if type == ENDMARKER:
26+
break
27+
# Ignore the new line on the last line if the input lacks one
28+
if missing_trailing_nl and type == NEWLINE and end[0] == num_lines:
29+
continue
30+
type = tok_name[type]
31+
result.append(f" {type:10} {token!r:13} {start} {end}")
32+
33+
return result
34+
1435
class TokenizeTest(TestCase):
1536
# Tests for the tokenize module.
1637

1738
# The tests can be really simple. Given a small fragment of source
18-
# code, print out a table with tokens. The ENDMARKER is omitted for
19-
# brevity.
39+
# code, print out a table with tokens. The ENDMARKER, ENCODING and
40+
# final NEWLINE are omitted for brevity.
2041

2142
def check_tokenize(self, s, expected):
2243
# Format the tokens in s in a table format.
23-
# The ENDMARKER is omitted.
24-
result = []
44+
# The ENDMARKER and final NEWLINE are omitted.
2545
f = BytesIO(s.encode('utf-8'))
26-
for type, token, start, end, line in tokenize(f.readline):
27-
if type == ENDMARKER:
28-
break
29-
type = tok_name[type]
30-
result.append(f" {type:10} {token!r:13} {start} {end}")
46+
result = stringify_tokens_from_source(tokenize(f.readline), s)
47+
3148
self.assertEqual(result,
3249
[" ENCODING 'utf-8' (0, 0) (0, 0)"] +
3350
expected.rstrip().splitlines())
3451

52+
def test_implicit_newline(self):
53+
# Make sure that the tokenizer puts in an implicit NEWLINE
54+
# when the input lacks a trailing new line.
55+
f = BytesIO("x".encode('utf-8'))
56+
tokens = list(tokenize(f.readline))
57+
self.assertEqual(tokens[-2].type, NEWLINE)
58+
self.assertEqual(tokens[-1].type, ENDMARKER)
59+
3560
def test_basic(self):
3661
self.check_tokenize("1 + 1", """\
3762
NUMBER '1' (1, 0) (1, 1)
@@ -993,8 +1018,8 @@ def readline():
9931018
else:
9941019
return b''
9951020

996-
# skip the initial encoding token and the end token
997-
tokens = list(_tokenize(readline, encoding='utf-8'))[1:-1]
1021+
# skip the initial encoding token and the end tokens
1022+
tokens = list(_tokenize(readline, encoding='utf-8'))[1:-2]
9981023
expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
9991024
self.assertEqual(tokens, expected_tokens,
10001025
"bytes not decoded with encoding")
@@ -1010,8 +1035,8 @@ def readline():
10101035
else:
10111036
return b''
10121037

1013-
# skip the end token
1014-
tokens = list(_tokenize(readline, encoding=None))[:-1]
1038+
# skip the end tokens
1039+
tokens = list(_tokenize(readline, encoding=None))[:-2]
10151040
expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
10161041
self.assertEqual(tokens, expected_tokens,
10171042
"string not tokenized when encoding is None")
@@ -1322,18 +1347,21 @@ def test_oneline_defs(self):
13221347

13231348
# Test that 500 consequent, one-line defs is OK
13241349
toks = list(tokenize(BytesIO(buf.encode('utf-8')).readline))
1325-
self.assertEqual(toks[-2].string, 'OK') # [-1] is always ENDMARKER
1350+
self.assertEqual(toks[-3].string, 'OK') # [-1] is always ENDMARKER
1351+
# [-2] is always NEWLINE
13261352

13271353
def assertExactTypeEqual(self, opstr, *optypes):
13281354
tokens = list(tokenize(BytesIO(opstr.encode('utf-8')).readline))
13291355
num_optypes = len(optypes)
1330-
self.assertEqual(len(tokens), 2 + num_optypes)
1356+
self.assertEqual(len(tokens), 3 + num_optypes)
13311357
self.assertEqual(token.tok_name[tokens[0].exact_type],
13321358
token.tok_name[ENCODING])
13331359
for i in range(num_optypes):
13341360
self.assertEqual(token.tok_name[tokens[i + 1].exact_type],
13351361
token.tok_name[optypes[i]])
13361362
self.assertEqual(token.tok_name[tokens[1 + num_optypes].exact_type],
1363+
token.tok_name[token.NEWLINE])
1364+
self.assertEqual(token.tok_name[tokens[2 + num_optypes].exact_type],
13371365
token.tok_name[token.ENDMARKER])
13381366

13391367
def test_exact_type(self):
@@ -1484,7 +1512,7 @@ def test_roundtrip(self):
14841512
self.check_roundtrip("if x == 1:\n"
14851513
" print(x)\n")
14861514
self.check_roundtrip("# This is a comment\n"
1487-
"# This also")
1515+
"# This also\n")
14881516

14891517
# Some people use different formatting conventions, which makes
14901518
# untokenize a little trickier. Note that this test involves trailing

Lib/tokenize.py

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -507,8 +507,15 @@ def _tokenize(readline, encoding):
507507
# BOM will already have been stripped.
508508
encoding = "utf-8"
509509
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
510-
while True: # loop over lines in stream
510+
last_line = b''
511+
line = b''
512+
while True: # loop over lines in stream
511513
try:
514+
# We capture the value of the line variable here because
515+
# readline uses the empty string '' to signal end of input,
516+
# hence `line` itself will always be overwritten at the end
517+
# of this loop.
518+
last_line = line
512519
line = readline()
513520
except StopIteration:
514521
line = b''
@@ -719,6 +726,9 @@ def _tokenize(readline, encoding):
719726
yield stashed
720727
stashed = None
721728

729+
# Add an implicit NEWLINE if the input doesn't end in one
730+
if last_line and last_line[-1] not in '\r\n':
731+
yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
722732
for indent in indents[1:]: # pop remaining indent levels
723733
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
724734
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
Tokenize module now implicitly emits a NEWLINE when provided with input that
2+
does not have a trailing new line. This behavior now matches what the C
3+
tokenizer does internally. Contributed by Ammar Askar.

0 commit comments

Comments
 (0)