parent
6780f9f22a
commit
bfbdf435f1
534
lex2.py
534
lex2.py
|
@ -1,12 +1,15 @@
|
|||
import re
|
||||
|
||||
class Token:
|
||||
def __init__(self, rule, y, x, s, role='single'):
|
||||
self.rule = rule
|
||||
valid_name_re = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$')
|
||||
reserved_names = ['start', 'null', 'end']
|
||||
|
||||
class Token(object):
|
||||
def __init__(self, name, y, x, s, **vargs):
|
||||
self.name = name
|
||||
self.y = y
|
||||
self.x = x
|
||||
self.string = s
|
||||
self.role = role
|
||||
self.vargs = vargs
|
||||
def add_to_string(self, s):
|
||||
self.string += s
|
||||
def __repr__(self):
|
||||
|
@ -14,203 +17,394 @@ class Token:
|
|||
s = self.string
|
||||
else:
|
||||
s = self.string[:10] + '...'
|
||||
return "<Token(%r, %d, %d, %r)>" % (self.rule, self.y, self.x, s)
|
||||
return "<Token(%r, %d, %d, %r)>" % (self.name, self.y, self.x, s)
|
||||
def render(self):
|
||||
return (self,)
|
||||
|
||||
class Rule:
|
||||
def __init__(self):
|
||||
self.name = 'null'
|
||||
def add_token(self, lexer, s, role='single'):
|
||||
t = Token(self, lexer.y, lexer.x, s, role)
|
||||
lexer.curr_tokens.append(t)
|
||||
lexer.x += len(s)
|
||||
def add_to_last_token(self, lexer, s):
|
||||
assert lexer.curr_tokens
|
||||
lexer.curr_tokens[-1].add_to_string(s)
|
||||
lexer.x += len(s)
|
||||
def match(self):
|
||||
raise Exception, "not implemented"
|
||||
|
||||
class NullRule(Rule):
|
||||
def __init__(self):
|
||||
self.name = 'null'
|
||||
def match(self):
|
||||
raise Exception, "null rule does not match!"
|
||||
|
||||
class NewlineRule(Rule):
|
||||
def __init__(self):
|
||||
self.name = 'newline'
|
||||
def match(self):
|
||||
raise Exception, "newline rule does not match!"
|
||||
name = 'abstract'
|
||||
def match(self, lexer, context=[]):
|
||||
raise Exception, "%s rule cannot match!" % self.name
|
||||
def make_token(self, lexer, s, name, **vargs):
|
||||
return Token(name, lexer.y, lexer.x, s, **vargs)
|
||||
|
||||
class ConstantRule(Rule):
|
||||
def __init__(self, name="unnamed_constant", const="foo"):
|
||||
self.name = name
|
||||
self.const = const
|
||||
def match(self, lexer):
|
||||
if lexer.lines[lexer.y][lexer.x:].startswith(self.const):
|
||||
self.add_token(lexer, self.const)
|
||||
def __init__(self, name, constant):
|
||||
assert valid_name_re.match(name), 'invalid name %r' % name
|
||||
assert name not in reserved_names, "reserved rule name: %r" % name
|
||||
self.name = name
|
||||
self.constant = constant
|
||||
def match(self, lexer, context=[]):
|
||||
if lexer.lines[lexer.y][lexer.x:].startswith(self.constant):
|
||||
lexer.add_token(self.make_token(lexer, self.constant, self.name))
|
||||
lexer.x += len(self.constant)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
class RegexRule(Rule):
|
||||
def __init__(self, name="unnamed_regex", expr="[^ ]+"):
|
||||
self.name = name
|
||||
self.expr = expr
|
||||
self.re = re.compile(expr)
|
||||
def match(self, lexer):
|
||||
class PatternRule(Rule):
|
||||
def __init__(self, name, pattern):
|
||||
assert valid_name_re.match(name), 'invalid name %r' % name
|
||||
assert name not in reserved_names, "reserved rule name: %r" % name
|
||||
self.name = name
|
||||
self.pattern = pattern
|
||||
self.re = re.compile(pattern)
|
||||
def match(self, lexer, context=[]):
|
||||
m = self.re.match(lexer.lines[lexer.y], lexer.x)
|
||||
if m:
|
||||
self.add_token(lexer, m.group(0))
|
||||
lexer.add_token(self.make_token(lexer, m.group(0), self.name))
|
||||
lexer.x += len(m.group(0))
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
class RegionRule(Rule):
|
||||
def __init__(self, name, start, mid, end):
|
||||
self.name = name
|
||||
def __init__(self, name, start, grammar, end):
|
||||
assert valid_name_re.match(name), 'invalid name %r' % name
|
||||
assert name not in reserved_names, "reserved rule name: %r" % name
|
||||
self.name = name
|
||||
self.start = start
|
||||
self.grammar = grammar
|
||||
self.end = end
|
||||
self.start_re = re.compile(start)
|
||||
self.mid_re = re.compile(mid)
|
||||
self.end_re = re.compile(end)
|
||||
def match(self, lexer):
|
||||
lt = lexer.last_token
|
||||
l = lexer.lines[lexer.y]
|
||||
if lt is not None and lt.rule.name == self.name and lt.role != 'end':
|
||||
saw_mid = False
|
||||
while lexer.x < len(l):
|
||||
m_end = self.end_re.match(l, lexer.x)
|
||||
if m_end:
|
||||
self.add_token(lexer, m_end.group(0), 'end')
|
||||
return True
|
||||
m_mid = self.mid_re.match(l, lexer.x)
|
||||
if m_mid:
|
||||
s = m_mid.group(0)
|
||||
else:
|
||||
s = l[lexer.x]
|
||||
if saw_mid:
|
||||
self.add_to_last_token(lexer, s)
|
||||
else:
|
||||
self.add_token(lexer, s, 'mid')
|
||||
saw_mid = True
|
||||
return True
|
||||
else:
|
||||
m = self.start_re.match(l, lexer.x)
|
||||
if m:
|
||||
self.add_token(lexer, m.group(0), 'start')
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
def _add_from_regex(self, context, name, lexer, m):
|
||||
t_name = '.'.join(context + [self.name, name])
|
||||
t = self.make_token(lexer, m.group(0), t_name)
|
||||
lexer.add_token(t)
|
||||
lexer.x += len(m.group(0))
|
||||
def match(self, lexer, context=[]):
|
||||
m = self.start_re.match(lexer.lines[lexer.y], lexer.x)
|
||||
if m:
|
||||
self._add_from_regex(context, 'start', lexer, m)
|
||||
|
||||
class DynamicRegionRule(Rule):
|
||||
def __init__(self, name, start, mid, end_fmt):
|
||||
self.name = name
|
||||
self.start_re = re.compile(start)
|
||||
self.mid_re = re.compile(mid)
|
||||
self.end_fmt = end_fmt
|
||||
def add_token(self, lexer, s, role, end_re):
|
||||
t = Token(self, lexer.y, lexer.x, s, role)
|
||||
t.end_re = end_re
|
||||
lexer.curr_tokens.append(t)
|
||||
lexer.x += len(s)
|
||||
def match(self, lexer):
|
||||
lt = lexer.last_token
|
||||
l = lexer.lines[lexer.y]
|
||||
if lt is not None and lt.rule.name == self.name and lt.role != 'end':
|
||||
saw_mid = False
|
||||
while lexer.x < len(l):
|
||||
m_end = self.end_re.match(l, lexer.x)
|
||||
if m_end:
|
||||
self.add_token(lexer, m_end.group(0), 'end', None)
|
||||
return True
|
||||
m_mid = self.mid_re.match(l, lexer.x)
|
||||
if m_mid:
|
||||
s = m_mid.group(0)
|
||||
else:
|
||||
s = l[lexer.x]
|
||||
if saw_mid:
|
||||
self.add_to_last_token(lexer, s)
|
||||
else:
|
||||
self.add_token(lexer, s, 'mid', lt.end_re)
|
||||
saw_mid = True
|
||||
null_t_name = '.'.join(context + [self.name, 'null'])
|
||||
null_t = None
|
||||
|
||||
if self.end:
|
||||
end_re = re.compile(self.end % m.groupdict())
|
||||
|
||||
done = False
|
||||
while not done and lexer.y < len(lexer.lines):
|
||||
line = lexer.lines[lexer.y]
|
||||
if len(line) == 0:
|
||||
null_t = Token(null_t_name, lexer.y, lexer.x, '')
|
||||
lexer.add_token(null_t)
|
||||
while not done and lexer.x < len(line):
|
||||
if self.end:
|
||||
m = end_re.match(line, lexer.x)
|
||||
if m:
|
||||
self._add_from_regex(context, 'end', lexer, m)
|
||||
done = True
|
||||
continue
|
||||
|
||||
found = False
|
||||
for rule in self.grammar.rules:
|
||||
if rule.match(lexer, context + [self.name]):
|
||||
found = True
|
||||
null_t = None
|
||||
break
|
||||
if not found:
|
||||
if null_t is None:
|
||||
null_t = Token(null_t_name, lexer.y, lexer.x, '')
|
||||
lexer.add_token(null_t)
|
||||
null_t.add_to_string(line[lexer.x])
|
||||
lexer.x += 1
|
||||
|
||||
null_t = None
|
||||
lexer.y += 1
|
||||
lexer.x = 0
|
||||
return True
|
||||
else:
|
||||
m = self.start_re.match(l, lexer.x)
|
||||
if m:
|
||||
end_re = re.compile(self.end_fmt % m.groups())
|
||||
self.add_token(lexer, m.group(0), 'start', end_re)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
return False
|
||||
|
||||
class Grammar:
|
||||
rules = []
|
||||
|
||||
class NullGrammar(Grammar):
|
||||
pass
|
||||
|
||||
class PodGrammar(Grammar):
|
||||
pass
|
||||
|
||||
class StringGrammar(Grammar):
|
||||
rules = [
|
||||
PatternRule('escaped', '\\.'),
|
||||
]
|
||||
|
||||
class TestGrammar(Grammar):
|
||||
rules = [
|
||||
RegionRule(
|
||||
name='heredoc',
|
||||
start="<< *(?P<heredoc>[a-zA-Z0-9_]+) *;",
|
||||
grammar=StringGrammar(),
|
||||
end='^%(heredoc)s$',
|
||||
),
|
||||
RegionRule(
|
||||
name='string1',
|
||||
start='"',
|
||||
grammar=StringGrammar(),
|
||||
end='"',
|
||||
),
|
||||
RegionRule(
|
||||
name='string2',
|
||||
start="'",
|
||||
grammar=StringGrammar(),
|
||||
end="'",
|
||||
),
|
||||
PatternRule(
|
||||
name='word',
|
||||
pattern='[^ \t\n]+',
|
||||
),
|
||||
]
|
||||
|
||||
class PerlGrammar(Grammar):
|
||||
rules = [
|
||||
RegionRule(
|
||||
name='heredoc',
|
||||
start="<< *(?P<heredoc>[a-zA-Z0-9_]+) *;",
|
||||
grammar=StringGrammar(),
|
||||
end='^%(heredoc)s$',
|
||||
),
|
||||
RegionRule(
|
||||
name='endblock',
|
||||
start="^__END__|__DATA__ *$",
|
||||
grammar=NullGrammar(),
|
||||
end='',
|
||||
),
|
||||
RegionRule(
|
||||
name='pod',
|
||||
start='^=[a-zA-Z0-9_]+',
|
||||
grammar=PodGrammar(),
|
||||
end='^=cut',
|
||||
),
|
||||
PatternRule(
|
||||
name='comment',
|
||||
pattern='#.*$',
|
||||
),
|
||||
RegionRule(
|
||||
name='string1',
|
||||
start='"',
|
||||
grammar=StringGrammar(),
|
||||
end='"',
|
||||
),
|
||||
RegionRule(
|
||||
name='string2',
|
||||
start="'",
|
||||
grammar=StringGrammar(),
|
||||
end="'",
|
||||
),
|
||||
RegionRule(
|
||||
name='evalstring',
|
||||
start="`",
|
||||
grammar=StringGrammar(),
|
||||
end="`",
|
||||
),
|
||||
PatternRule(
|
||||
name='number',
|
||||
pattern='0?\.[0-9]+|[0-9]+(?:\.[0-9]+)?',
|
||||
),
|
||||
PatternRule(
|
||||
name='keyword',
|
||||
pattern="(?<!->)(?:STDIN|STDERR|STDOUT|and|cmp|continue|do|else|elsif|eq|eval|foreach|for|if|last|my|next|ne|not|or|our|package|require|return|sub|undef|unless|until|use|while)(?![a-zA-Z0-9_])",
|
||||
),
|
||||
PatternRule(
|
||||
name='hash_bareword_index',
|
||||
pattern='(?<={) *[A-Za-z0-9_]+(?=})',
|
||||
),
|
||||
PatternRule(
|
||||
name='literal_hash_bareword_index',
|
||||
pattern='[A-Za-z0-9_]+(?= *=>)',
|
||||
),
|
||||
PatternRule(
|
||||
name='length_scalar',
|
||||
pattern=r"\$#[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*",
|
||||
),
|
||||
PatternRule(
|
||||
name='system_scalar',
|
||||
pattern=r"\$[][><ab/'\"_@\?#\$!%^|&*()](?![A-Za-z0-9_])",
|
||||
),
|
||||
PatternRule(
|
||||
name='system_array',
|
||||
pattern="@_",
|
||||
),
|
||||
PatternRule(
|
||||
name='scalar',
|
||||
pattern="\$\$*[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*",
|
||||
),
|
||||
PatternRule(
|
||||
name='array',
|
||||
pattern="@\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*",
|
||||
),
|
||||
PatternRule(
|
||||
name='hash',
|
||||
pattern="%\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*",
|
||||
),
|
||||
PatternRule(
|
||||
name='dereference',
|
||||
pattern="[@%\$&\*](?={)",
|
||||
),
|
||||
RegionRule(
|
||||
name='quoted_region1',
|
||||
start='q[rqwx]? *(?P<delim>[^ #])',
|
||||
grammar=Grammar(),
|
||||
end='%(delim)s',
|
||||
),
|
||||
RegionRule(
|
||||
name='quoted_region2',
|
||||
start='q[rqwx]?#',
|
||||
grammar=Grammar(),
|
||||
end='#',
|
||||
),
|
||||
RegionRule(
|
||||
name='bracket_quoted_region1',
|
||||
start='q[rqwx]? *\(',
|
||||
grammar=Grammar(),
|
||||
end='\)',
|
||||
),
|
||||
RegionRule(
|
||||
name='bracket_quoted_region2',
|
||||
start='q[rqwx]? *{',
|
||||
grammar=Grammar(),
|
||||
end='}',
|
||||
),
|
||||
RegionRule(
|
||||
name='bracket_quoted_region3',
|
||||
start='q[rqwx]? *<',
|
||||
grammar=Grammar(),
|
||||
end='>',
|
||||
),
|
||||
RegionRule(
|
||||
name='bracket_quoted_region4',
|
||||
start='q[rqwx]? *\[',
|
||||
grammar=Grammar(),
|
||||
end='\]',
|
||||
),
|
||||
RegionRule(
|
||||
name='implicit_match_regex',
|
||||
start='(?:(?<==~)|(?<=!~)|(?<=\()) */',
|
||||
grammar=StringGrammar(),
|
||||
end='/',
|
||||
),
|
||||
RegionRule(
|
||||
name='explicit_match_regex1',
|
||||
start='m *(?P<delim>[^ #])',
|
||||
grammar=StringGrammar(),
|
||||
end='%(delim)s',
|
||||
),
|
||||
RegionRule(
|
||||
name='explicit_match_regex1',
|
||||
start='m#',
|
||||
grammar=StringGrammar(),
|
||||
end='#',
|
||||
),
|
||||
# we officially don't support the bullshit s{a}{b} thing perl has going.
|
||||
# those guys are on crack. we only support things like s#a#b# or s/a/b/.
|
||||
# same comments as above apply
|
||||
#{'name': 'replace regex',
|
||||
# 'expr': r"""(?:y|tr|s)([^<[{(A-Za-z0-9 \t\n])(?:\\.|[^\\])*?\1(?:\\.|[^\\])*?\1[a-z]*""",
|
||||
# 'action': lex.make_token},
|
||||
|
||||
PatternRule(
|
||||
name='package',
|
||||
pattern="(?<=package )(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*",
|
||||
),
|
||||
PatternRule(
|
||||
name='sub',
|
||||
pattern="(?<=sub )[a-zA-Z_][a-zA-Z_0-9]*(?=[ \n]*{)",
|
||||
),
|
||||
PatternRule(
|
||||
name='use',
|
||||
pattern="(?<=use )(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*",
|
||||
),
|
||||
PatternRule(
|
||||
name='label',
|
||||
pattern='[a-zA-Z_][a-zA-Z0-9_]*:',
|
||||
),
|
||||
PatternRule(
|
||||
name='instance_method',
|
||||
pattern="(?<=->)[a-zA-Z_][a-zA-Z_0-9]*",
|
||||
),
|
||||
PatternRule(
|
||||
name='static_method',
|
||||
pattern="&?(?:[a-zA-Z_][a-zA-Z_0-9]*::)+[a-zA-Z_][a-zA-Z_0-9]*",
|
||||
),
|
||||
PatternRule(
|
||||
name='builtin_method',
|
||||
pattern="(?<!->)&?(?:write|warn|wantarray|waitpid|wait|vec|values|utime|use|untie|unshift|unpack|unlink|undef|umask|ucfirst|uc|truncate|times|time|tied|tie|telldir|tell|syswrite|system|sysseek|sysread|sysopen|syscall|symlink|substr|sub|study|stat|srand|sqrt|sprintf|split|splice|sort|socketpair|socket|sleep|sin|shutdown|shmwrite|shmread|shmget|shmctl|shift|setsockopt|setservent|setpwent|setprotoent|setpriority|setpgrp|setnetent|sethostent|setgrent|send|semop|semget|semctl|select|seekdir|seek|scalar|rmdir|rindex|rewinddir|reverse|return|reset|require|rename|ref|redo|recv|readpipe|readlink|readline|readdir|read|rand|quotemeta|push|prototype|printf|print|pos|pop|pipe|package|pack|our|ord|opendir|open|oct|no|next|my|msgsnd|msgrcv|msgget|msgctl|mkdir|map|lstat|log|lock|localtime|local|listen|link|length|lcfirst|lc|last|kill|keys|join|ioctl|int|index|import|hex|grep|goto|gmtime|glob|getsockopt|getsockname|getservent|getservbyport|getservbyname|getpwuid|getpwnam|getpwent|getprotoent|getprotobynumber|getprotobyname|getpriority|getppid|getpgrp|getpeername|getnetent|getnetbyname|getnetbyaddr|getlogin|gethostent|gethostbyname|gethostbyaddr|getgrnam|getgrgid|getgrent|getc|formline|format|fork|flock|fileno|fcntl|exp|exit|exists|exec|eval|eof|endservent|endpwent|endprotoent|endnetent|endhostent|endgrent|each|dump|do|die|delete|defined|dbmopen|dbmclose|crypt|cos|continue|connect|closedir|close|chroot|chr|chown|chop|chomp|chmod|chdir|caller|bless|binmode|bind|atan2|alarm|accept|abs)(?![a-zA-Z0-9_])",
|
||||
),
|
||||
PatternRule(
|
||||
name='method',
|
||||
pattern="&(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*",
|
||||
),
|
||||
PatternRule(
|
||||
name='ref_method',
|
||||
pattern="&\$(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*",
|
||||
),
|
||||
PatternRule(
|
||||
name='bareword_method',
|
||||
pattern="(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]* *\(",
|
||||
),
|
||||
PatternRule(
|
||||
name='delimiter',
|
||||
pattern="\(|\)|\[|\]|{|}|,|;|->|=>|=|\?|(?<!:):(?!=:)",
|
||||
),
|
||||
PatternRule(
|
||||
name='unary_operator',
|
||||
pattern="\+=|-=|\*=|/=|//=|%=|&=\|\^=|>>=|<<=|\*\*=",
|
||||
),
|
||||
PatternRule(
|
||||
name='operator',
|
||||
pattern="\+|<=>|<>|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|=~|!~|!=|%|!|\.",
|
||||
),
|
||||
PatternRule(
|
||||
name='bareword',
|
||||
pattern='(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*',
|
||||
),
|
||||
]
|
||||
|
||||
class Lexer:
|
||||
rules = [
|
||||
RegionRule('heredoc', "<< *([a-zA-Z0-9_]+) *;", '.', '^%s$'),
|
||||
RegionRule('string1', '"', '\\.|.', '"'),
|
||||
RegexRule('word'),
|
||||
]
|
||||
null = NullRule()
|
||||
newline = NewlineRule()
|
||||
def __init__(self):
|
||||
self.lines = None
|
||||
self.y = 0
|
||||
self.x = 0
|
||||
self.last_token = None
|
||||
self.curr_tokens = []
|
||||
def __init__(self, name, grammar):
|
||||
self.name = name
|
||||
self.grammar = grammar
|
||||
self.y = 0
|
||||
self.x = 0
|
||||
self.lines = None
|
||||
self.tokens = []
|
||||
|
||||
def lex(self, lines, y=0, x=0, last_token=None, next_token=None):
|
||||
self.lines = lines
|
||||
self.y = y
|
||||
self.x = x
|
||||
self.last_token = None
|
||||
self.curr_tokens = []
|
||||
def add_token(self, t):
|
||||
self.tokens.append(t)
|
||||
|
||||
def lex(self, lines, y=0, x=0):
|
||||
self.y = y
|
||||
self.x = x
|
||||
self.lines = lines
|
||||
self.tokens = []
|
||||
|
||||
def __iter__(self):
|
||||
if self.lines is None:
|
||||
raise Exception, "no lines to lex"
|
||||
return self
|
||||
|
||||
def match(self):
|
||||
for rule in self.rules:
|
||||
match = rule.match(self)
|
||||
if match:
|
||||
assert self.curr_tokens
|
||||
return True
|
||||
return False
|
||||
|
||||
def add_to_null_token(self):
|
||||
c = self.lines[self.y][self.x]
|
||||
if self.curr_tokens:
|
||||
assert self.curr_tokens[0].rule.name == 'null', self.curr_tokens[0].rule.name
|
||||
self.curr_tokens[0].add_to_string(c)
|
||||
else:
|
||||
self.curr_tokens.append(self.make_null_token(c))
|
||||
self.x += 1
|
||||
|
||||
def make_null_token(self, c):
|
||||
return Token(self.null, self.y, self.x, c)
|
||||
def make_newline_token(self):
|
||||
return Token(self.newline, self.y, self.x, '\n')
|
||||
|
||||
def pop_curr_token(self):
|
||||
t = self.curr_tokens.pop(0)
|
||||
self.last_token = t
|
||||
return t
|
||||
|
||||
def next(self):
|
||||
if self.curr_tokens:
|
||||
return self.pop_curr_token()
|
||||
null_t_name = 'null'
|
||||
null_t = None
|
||||
|
||||
while self.y < len(self.lines):
|
||||
while self.x < len(self.lines[self.y]):
|
||||
t = self.match()
|
||||
if t:
|
||||
return self.pop_curr_token()
|
||||
else:
|
||||
self.add_to_null_token()
|
||||
line = self.lines[self.y]
|
||||
while self.x < len(line):
|
||||
for rule in self.grammar.rules:
|
||||
if rule.match(self):
|
||||
assert self.tokens, "AAAAA %s" % repr(self.tokens)
|
||||
return self.tokens.pop(0)
|
||||
if null_t is None:
|
||||
null_t = Token(null_t_name, self.y, self.x, '')
|
||||
self.add_token(null_t)
|
||||
null_t.add_to_string(line[self.x])
|
||||
self.x += 1
|
||||
self.y += 1
|
||||
self.x = 0
|
||||
#self.curr_tokens.append(self.make_newline_token())
|
||||
if self.curr_tokens:
|
||||
return self.pop_curr_token()
|
||||
|
||||
raise StopIteration
|
||||
if self.tokens:
|
||||
return self.tokens.pop(0)
|
||||
else:
|
||||
raise StopIteration
|
||||
|
|
7
test.py
7
test.py
|
@ -1,7 +1,7 @@
|
|||
import lex2
|
||||
|
||||
lines = [
|
||||
'this is the first line',
|
||||
'this is the first line',
|
||||
'<< EOF;'
|
||||
' abracadra hocus pocus',
|
||||
' EOF',
|
||||
|
@ -9,7 +9,8 @@ lines = [
|
|||
'"this is a double-quoted string" and this is not...',
|
||||
"we're done",
|
||||
]
|
||||
l = lex2.Lexer()
|
||||
g = lex2.TestGrammar()
|
||||
l = lex2.Lexer('lexer', g)
|
||||
l.lex(lines)
|
||||
for t in l:
|
||||
print '%-20r %r' % (t.rule.name, t.string)
|
||||
print '%-20r %r' % (t.name, t.string)
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
import sys
|
||||
import lex2
|
||||
|
||||
paths = sys.argv[1:]
|
||||
for path in paths:
|
||||
f = open(path, 'r')
|
||||
data = f.read()
|
||||
f.close()
|
||||
|
||||
lines = data.split('\n')
|
||||
|
||||
grammar = lex2.PerlGrammar()
|
||||
lexer = lex2.Lexer('lexer', grammar)
|
||||
|
||||
lexer.lex(lines)
|
||||
print path
|
||||
for token in lexer:
|
||||
print '%-20s| %s' % (token.name, token.string)
|
Loading…
Reference in New Issue