pmacs3/lex2.py

431 lines
18 KiB
Python
Raw Normal View History

2007-03-06 10:05:38 -05:00
import re
2007-03-27 22:52:47 -04:00
valid_name_re = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$')
2007-03-31 22:39:37 -04:00
reserved_names = ['start', 'middle', 'end', 'null']
2007-03-27 22:52:47 -04:00
2007-06-05 20:01:05 -04:00
class RuleContext:
# to be clear:
# x, y: where the rule processing began
# rule: the rule which began
# flag: a signal to be used to resume the rule correctly
# context: the previous rule namespace(s)
# matchd: the dictionary returned by the rule's matching
def __init__(self, y, x, rule, flag, context, matchd):
self.y = y
self.x = x
self.rule = rule
self.context = context
self.matchd = matchd
2007-03-27 22:52:47 -04:00
class Token(object):
2007-04-01 19:08:36 -04:00
def __init__(self, name, rule, y, x, s, **vargs):
2007-03-27 22:52:47 -04:00
self.name = name
2007-05-02 00:17:12 -04:00
self.rule = rule
2007-03-06 10:05:38 -05:00
self.y = y
self.x = x
self.string = s
2007-03-27 22:52:47 -04:00
self.vargs = vargs
2007-05-02 00:17:12 -04:00
def copy(self):
return Token(self.name, None, self.y, self.x, self.string, **self.vargs)
2007-03-06 10:05:38 -05:00
def add_to_string(self, s):
self.string += s
2007-05-02 00:17:12 -04:00
def end_x(self):
return self.x + len(self.string)
2007-04-09 19:21:43 -04:00
def __eq__(self, other):
return (self.y == other.y and
self.x == other.x and
self.string == other.string and
2007-05-02 00:17:12 -04:00
self.name == other.name and
2007-04-09 19:21:43 -04:00
self.vargs == other.vargs)
2007-03-06 10:05:38 -05:00
def __repr__(self):
if len(self.string) < 10:
s = self.string
else:
s = self.string[:10] + '...'
2007-04-01 19:08:36 -04:00
return "<Token(%r, %r, %d, %d, %r)>" % (self.name, self.rule, self.y, self.x, s)
2007-03-27 22:52:47 -04:00
def render(self):
return (self,)
2007-03-06 10:05:38 -05:00
class Rule:
2007-03-27 22:52:47 -04:00
name = 'abstract'
def match(self, lexer, context=[], d={}):
2007-03-27 22:52:47 -04:00
raise Exception, "%s rule cannot match!" % self.name
def make_token(self, lexer, s, name, **vargs):
2007-04-01 19:08:36 -04:00
return Token(name, self, lexer.y, lexer.x, s, **vargs)
2007-03-06 10:05:38 -05:00
class ConstantRule(Rule):
2007-03-27 22:52:47 -04:00
def __init__(self, name, constant):
assert valid_name_re.match(name), 'invalid name %r' % name
assert name not in reserved_names, "reserved rule name: %r" % name
self.name = name
2007-03-27 22:52:47 -04:00
self.constant = constant
2007-06-05 20:01:05 -04:00
def match(self, lexer, context=[], d={}):
2007-03-27 22:52:47 -04:00
if lexer.lines[lexer.y][lexer.x:].startswith(self.constant):
2007-03-28 01:09:04 -04:00
name = '.'.join(context + [self.name])
2007-06-05 20:01:05 -04:00
lexer.add_token(self.make_token(lexer, self.constant, name, grammar=lexer.grammar))
2007-03-27 22:52:47 -04:00
lexer.x += len(self.constant)
2007-03-06 10:05:38 -05:00
return True
else:
return False
2007-03-27 22:52:47 -04:00
class PatternRule(Rule):
def __init__(self, name, pattern):
assert valid_name_re.match(name), 'invalid name %r' % name
assert name not in reserved_names, "reserved rule name: %r" % name
self.name = name
self.pattern = pattern
self.re = re.compile(pattern)
2007-06-05 20:01:05 -04:00
def match(self, lexer, context=[], d={}):
2007-03-06 10:05:38 -05:00
m = self.re.match(lexer.lines[lexer.y], lexer.x)
if m:
2007-03-28 01:09:04 -04:00
name = '.'.join(context + [self.name])
2007-06-05 20:01:05 -04:00
lexer.add_token(self.make_token(lexer, m.group(0), name, grammar=lexer.grammar))
2007-03-27 22:52:47 -04:00
lexer.x += len(m.group(0))
2007-03-06 10:05:38 -05:00
return True
else:
return False
class ContextPatternRule(Rule):
def __init__(self, name, pattern, fallback):
assert valid_name_re.match(name), 'invalid name %r' % name
assert name not in reserved_names, "reserved rule name: %r" % name
2007-06-05 20:01:05 -04:00
self.name = name
self.pattern = pattern
self.fallback = fallback
self.fallback_re = re.compile(fallback)
2007-06-05 20:01:05 -04:00
def match(self, lexer, context=[], d={}):
try:
r = re.compile(self.pattern % d)
except KeyError:
r = self.fallback_re
m = r.match(lexer.lines[lexer.y], lexer.x)
if m:
name = '.'.join(context + [self.name])
2007-06-05 20:01:05 -04:00
lexer.add_token(self.make_token(lexer, m.group(0), name, grammar=lexer.grammar))
lexer.x += len(m.group(0))
return True
else:
return False
2007-03-06 10:05:38 -05:00
class RegionRule(Rule):
2007-03-27 22:52:47 -04:00
def __init__(self, name, start, grammar, end):
assert valid_name_re.match(name), 'invalid name %r' % name
assert name not in reserved_names, "reserved rule name: %r" % name
self.name = name
self.start = start
self.grammar = grammar
self.end = end
2007-03-06 10:05:38 -05:00
self.start_re = re.compile(start)
2007-06-05 20:01:05 -04:00
def _add_from_regex(self, context, name, lexer, m, grammar):
2007-03-27 22:52:47 -04:00
t_name = '.'.join(context + [self.name, name])
2007-06-05 20:01:05 -04:00
t = self.make_token(lexer, m.group(0), t_name, grammar=grammar)
2007-03-27 22:52:47 -04:00
lexer.add_token(t)
lexer.x += len(m.group(0))
2007-06-05 20:01:05 -04:00
def restart(self, lexer, rulecontext):
pass
def match(self, lexer, context=[], d={}):
2007-03-27 22:52:47 -04:00
m = self.start_re.match(lexer.lines[lexer.y], lexer.x)
2007-06-05 20:01:05 -04:00
# see if we can match our start token
2007-03-27 22:52:47 -04:00
if m:
2007-06-05 20:01:05 -04:00
2007-03-31 22:39:37 -04:00
# ok, so create our start token, and get ready to start reading data
d = m.groupdict()
2007-06-05 20:01:05 -04:00
lexer.context.append(RuleContext(lexer.y, lexer.x, self, 'start',
list(context), dict(d)))
self._add_from_regex(context, 'start', lexer, m, lexer.grammar)
2007-03-27 22:52:47 -04:00
null_t_name = '.'.join(context + [self.name, 'null'])
null_t = None
2007-03-31 22:39:37 -04:00
# if we have an end token, then build it here. notice that it can
# reference named groups from the start token. if we have no end,
# well, then, we're never getting out of here alive!
2007-03-27 22:52:47 -04:00
if self.end:
end_re = re.compile(self.end % d)
2007-03-27 22:52:47 -04:00
2007-03-31 22:39:37 -04:00
# ok, so as long as we aren't done (we haven't found an end token),
# keep reading input
2007-03-27 22:52:47 -04:00
done = False
while not done and lexer.y < len(lexer.lines):
2007-03-31 21:37:42 -04:00
old_y = lexer.y
2007-06-05 20:01:05 -04:00
# if this line is empty, then we skip it, but here we insert
2007-03-31 22:39:37 -04:00
# an empty null token just so we have something
2007-03-31 21:37:42 -04:00
if len(lexer.lines[lexer.y]) == 0:
2007-04-01 19:08:36 -04:00
null_t = Token(null_t_name, None, lexer.y, lexer.x, '')
2007-03-27 22:52:47 -04:00
lexer.add_token(null_t)
2007-03-31 21:37:42 -04:00
null_t = None
2007-03-31 22:39:37 -04:00
# ok, as long as we haven't found the end token, and have more
# data on the current line to read, we will process tokens
2007-03-31 21:37:42 -04:00
while not done and lexer.y == old_y and lexer.x < len(lexer.lines[lexer.y]):
2007-03-31 22:39:37 -04:00
# if we are looking for an end token, then see if we've
# found it. if so, then we are done!
2007-03-27 22:52:47 -04:00
if self.end:
2007-03-31 21:37:42 -04:00
m = end_re.match(lexer.lines[lexer.y], lexer.x)
2007-03-27 22:52:47 -04:00
if m:
2007-06-05 20:01:05 -04:00
self._add_from_regex(context, 'end', lexer, m, None)
2007-03-27 22:52:47 -04:00
done = True
2007-03-31 22:39:37 -04:00
break
2007-03-27 22:52:47 -04:00
2007-03-31 22:39:37 -04:00
# ok, we need to check all our rules now, in order. if we
# find a token, note that we found one and exit the loop
2007-03-27 22:52:47 -04:00
found = False
for rule in self.grammar.rules:
if rule.match(lexer, context + [self.name], d):
2007-06-05 20:01:05 -04:00
found = True
2007-03-27 22:52:47 -04:00
null_t = None
break
2007-03-31 22:39:37 -04:00
# if we never found a token, then we need to add another
# character to the current null token (which we should
# create if it isn't set).
2007-03-27 22:52:47 -04:00
if not found:
if null_t is None:
2007-04-01 19:08:36 -04:00
null_t = Token(null_t_name, None, lexer.y, lexer.x, '')
2007-03-27 22:52:47 -04:00
lexer.add_token(null_t)
2007-03-31 21:37:42 -04:00
null_t.add_to_string(lexer.lines[lexer.y][lexer.x])
2007-03-27 22:52:47 -04:00
lexer.x += 1
2007-03-31 21:37:42 -04:00
2007-03-31 22:39:37 -04:00
# ok, since we're soon going to be on a different line (or
# already are), we want a new null token. so forget about the
2007-06-05 20:01:05 -04:00
# current one (i.e. stop adding to it).
2007-03-27 22:52:47 -04:00
null_t = None
2007-03-31 22:39:37 -04:00
# if we're still on the same line at this point (and not done)
# then that means we're finished with the line and should move
# on to the next one here
2007-03-31 21:37:42 -04:00
if not done and old_y == lexer.y:
2007-06-05 20:01:05 -04:00
lexer.save_context()
2007-03-28 18:38:32 -04:00
lexer.y += 1
lexer.x = 0
2007-03-31 22:39:37 -04:00
# alright, we're finally done procesing the region, so return true
2007-06-05 20:01:05 -04:00
lexer.context.pop(-1)
2007-03-06 10:05:38 -05:00
return True
else:
2007-03-31 22:39:37 -04:00
# region was not matched; we never started. so return false
2007-03-27 22:52:47 -04:00
return False
2007-03-06 10:05:38 -05:00
2007-03-28 01:09:04 -04:00
class DualRegionRule(Rule):
def __init__(self, name, start, grammar1, middle, grammar2, end):
assert valid_name_re.match(name), 'invalid name %r' % name
assert name not in reserved_names, "reserved rule name: %r" % name
self.name = name
self.start = start
self.grammar1 = grammar1
self.middle = middle
self.grammar2 = grammar2
self.end = end
self.start_re = re.compile(start)
2007-06-05 20:01:05 -04:00
def _add_from_regex(self, context, name, lexer, m, grammar=None):
2007-03-28 01:09:04 -04:00
t_name = '.'.join(context + [self.name, name])
2007-06-05 20:01:05 -04:00
t = self.make_token(lexer, m.group(0), t_name, grammar=grammar)
2007-03-28 01:09:04 -04:00
lexer.add_token(t)
lexer.x += len(m.group(0))
def match(self, lexer, context=[], d={}):
2007-03-31 22:39:37 -04:00
m1 = self.start_re.match(lexer.lines[lexer.y], lexer.x)
# see if we can match out start token
if m1:
# ok, so create our start token, and get ready to start reading data
2007-06-05 20:01:05 -04:00
self._add_from_regex(context, 'start', lexer, m1, lexer.grammar)
2007-03-28 01:09:04 -04:00
null_t_name = '.'.join(context + [self.name, 'null'])
null_t = None
2007-03-27 22:52:47 -04:00
2007-03-31 22:39:37 -04:00
d1 = m1.groupdict()
2007-06-05 20:01:05 -04:00
lexer.context.append(RuleContext(lexer.y, lexer.x, self, 'start',
list(context), dict(d1)))
2007-03-28 01:09:04 -04:00
d2 = {}
2007-03-31 22:39:37 -04:00
2007-03-28 01:09:04 -04:00
middle_re = re.compile(self.middle % d1)
2007-03-27 22:52:47 -04:00
2007-03-31 22:39:37 -04:00
# ok, so as long as we aren't done (we haven't found an end token),
# keep reading input
2007-03-28 01:09:04 -04:00
done = False
while not done and lexer.y < len(lexer.lines):
2007-03-31 22:39:37 -04:00
old_y = lexer.y
# if this line is empty, then we will skip it, but here weinsert
# an empty null token just so we have something
if len(lexer.lines[lexer.y]) == 0:
2007-04-01 19:08:36 -04:00
null_t = Token(null_t_name, None, lexer.y, lexer.x, '')
2007-03-28 01:09:04 -04:00
lexer.add_token(null_t)
2007-03-31 22:39:37 -04:00
null_t = None
# ok, as long as we haven't found the end token, and have more
# data on the current line to read, we will process tokens
while not done and lexer.y == old_y and lexer.x < len(lexer.lines[lexer.y]):
# see if we have found the middle token. if so, we can then
# proceed to "stage 2"
m2 = middle_re.match(lexer.lines[lexer.y], lexer.x)
if m2:
d2 = m2.groupdict()
2007-06-05 20:01:05 -04:00
self._add_from_regex(context, 'middle', lexer, m2, None)
2007-03-28 01:09:04 -04:00
done = True
2007-03-31 22:39:37 -04:00
break
2007-03-27 22:52:47 -04:00
2007-03-31 22:39:37 -04:00
# ok, we need to check all our rules now, in order. if we
# find a token, note that we found one and exit the loop
2007-03-28 01:09:04 -04:00
found = False
for rule in self.grammar1.rules:
if rule.match(lexer, context + [self.name], d1):
2007-03-28 01:09:04 -04:00
found = True
null_t = None
break
2007-03-31 22:39:37 -04:00
# if we never found a token, then we need to add another
# character to the current null token (which we should
# create if it isn't set).
2007-03-28 01:09:04 -04:00
if not found:
if null_t is None:
2007-04-01 19:08:36 -04:00
null_t = Token(null_t_name, None, lexer.y, lexer.x, '')
2007-03-28 01:09:04 -04:00
lexer.add_token(null_t)
2007-03-31 22:39:37 -04:00
null_t.add_to_string(lexer.lines[lexer.y][lexer.x])
2007-03-28 01:09:04 -04:00
lexer.x += 1
2007-03-31 22:39:37 -04:00
# ok, since we're soon going to be on a different line (or
# already are), we want a new null token. so forget about the
# current one.
2007-03-28 01:09:04 -04:00
null_t = None
2007-03-31 22:39:37 -04:00
# if we're still on the same line at this point (and not done)
# then that means we're finished with the line and should move
# on to the next one here
if not done and old_y == lexer.y:
2007-06-05 20:01:05 -04:00
lexer.save_context()
2007-03-28 01:09:04 -04:00
lexer.y += 1
lexer.x = 0
2007-03-27 22:52:47 -04:00
2007-03-31 22:39:37 -04:00
# ok stage 2 is like stage 1, only we are looking for end tokens
# instead of middle tokens
d3 = dict(d1.items() + d2.items())
end_re = re.compile(self.end % d3)
2007-06-05 20:01:05 -04:00
lexer.context.pop(-1)
lexer.context.append(RuleContext(lexer.y, lexer.x, self, 'middle',
list(context), dict(d3)))
2007-03-27 22:52:47 -04:00
2007-03-31 22:39:37 -04:00
# ok, so as long as we aren't done (we haven't found an end token),
# keep reading input
2007-03-28 01:09:04 -04:00
done = False
while not done and lexer.y < len(lexer.lines):
2007-03-31 22:39:37 -04:00
old_y = lexer.y
# if this line is empty, then we will skip it, but here weinsert
# an empty null token just so we have something
if len(lexer.lines[lexer.y]) == 0:
2007-04-01 19:08:36 -04:00
null_t = Token(null_t_name, None, lexer.y, lexer.x, '')
2007-03-28 01:09:04 -04:00
lexer.add_token(null_t)
2007-03-31 22:39:37 -04:00
null_t = None
2007-03-28 01:09:04 -04:00
2007-03-31 22:39:37 -04:00
# ok, as long as we haven't found the end token, and have more
# data on the current line to read, we will process tokens
while not done and lexer.y == old_y and lexer.x < len(lexer.lines[lexer.y]):
# see if we have found the middle token. if so, we can then
# proceed to "stage 2"
m3 = end_re.match(lexer.lines[lexer.y], lexer.x)
if m3:
2007-06-05 20:01:05 -04:00
self._add_from_regex(context, 'end', lexer, m3, None)
2007-03-31 22:39:37 -04:00
done = True
break
# ok, we need to check all our rules now, in order. if we
# find a token, note that we found one and exit the loop
2007-03-28 01:09:04 -04:00
found = False
for rule in self.grammar2.rules:
if rule.match(lexer, context + [self.name], d3):
2007-03-28 01:09:04 -04:00
found = True
null_t = None
break
2007-03-31 22:39:37 -04:00
# if we never found a token, then we need to add another
# character to the current null token (which we should
# create if it isn't set).
2007-03-28 01:09:04 -04:00
if not found:
if null_t is None:
2007-04-01 19:08:36 -04:00
null_t = Token(null_t_name, None, lexer.y, lexer.x, '')
2007-03-28 01:09:04 -04:00
lexer.add_token(null_t)
2007-03-31 22:39:37 -04:00
null_t.add_to_string(lexer.lines[lexer.y][lexer.x])
2007-03-28 01:09:04 -04:00
lexer.x += 1
2007-03-31 22:39:37 -04:00
# ok, since we're soon going to be on a different line (or
# already are), we want a new null token. so forget about the
# current one.
2007-03-28 01:09:04 -04:00
null_t = None
2007-03-31 22:39:37 -04:00
# if we're still on the same line at this point (and not done)
# then that means we're finished with the line and should move
# on to the next one here
if not done and old_y == lexer.y:
2007-06-05 20:01:05 -04:00
lexer.save_context()
2007-03-28 18:38:32 -04:00
lexer.y += 1
lexer.x = 0
2007-03-31 22:39:37 -04:00
2007-06-05 20:01:05 -04:00
# alright, we're finally done processing; return true
lexer.context.pop(-1)
2007-03-28 01:09:04 -04:00
return True
else:
2007-03-31 22:39:37 -04:00
# dual region was not matched; we never started. so return false
2007-03-28 01:09:04 -04:00
return False
class Grammar:
rules = []
2007-03-29 18:37:34 -04:00
def __init__(self):
for rule in self.rules:
if hasattr(rule, 'grammar') and rule.grammar is None:
rule.grammar = self
2007-03-27 22:52:47 -04:00
class Lexer:
def __init__(self, name, grammar):
2007-06-05 20:01:05 -04:00
self.name = name
self.grammar = grammar
self.y = 0
self.x = 0
self.lines = None
self.tokens = []
self.context = []
self.line_contexts = {}
2007-03-27 22:52:47 -04:00
def add_token(self, t):
self.tokens.append(t)
def lex(self, lines, y=0, x=0):
2007-06-05 20:01:05 -04:00
self.y = y
self.x = x
self.lines = lines
2007-03-27 22:52:47 -04:00
self.tokens = []
2007-03-06 10:05:38 -05:00
2007-06-05 20:01:05 -04:00
self.context = []
self.line_contexts = {}
2007-03-06 10:05:38 -05:00
def __iter__(self):
if self.lines is None:
raise Exception, "no lines to lex"
return self
2007-06-05 20:01:05 -04:00
def save_context(self):
self.line_contexts[self.y] = list(self.context)
2007-03-06 10:05:38 -05:00
def next(self):
2007-03-27 22:52:47 -04:00
null_t_name = 'null'
null_t = None
2007-03-06 10:05:38 -05:00
while self.y < len(self.lines):
2007-03-27 22:52:47 -04:00
line = self.lines[self.y]
while self.x < len(line):
2007-03-28 18:38:32 -04:00
curr_t = None
2007-03-27 22:52:47 -04:00
for rule in self.grammar.rules:
if rule.match(self):
2007-06-05 20:01:05 -04:00
assert self.tokens, "match rendered no tokens?"
2007-03-27 22:52:47 -04:00
return self.tokens.pop(0)
if null_t is None:
2007-04-01 19:08:36 -04:00
null_t = Token(null_t_name, None, self.y, self.x, '')
2007-03-27 22:52:47 -04:00
self.add_token(null_t)
null_t.add_to_string(line[self.x])
self.x += 1
2007-06-05 20:01:05 -04:00
self.save_context()
2007-04-09 19:21:43 -04:00
null_t = None
2007-03-06 10:05:38 -05:00
self.y += 1
self.x = 0
2007-03-27 22:52:47 -04:00
if self.tokens:
return self.tokens.pop(0)
else:
raise StopIteration