--HG--
branch : pmacs2
This commit is contained in:
moculus 2007-04-01 01:37:42 +00:00
parent 3f895ac112
commit af6306f67e
3 changed files with 72 additions and 67 deletions

21
lex2.py
View File

@ -78,7 +78,6 @@ class RegionRule(Rule):
m = self.start_re.match(lexer.lines[lexer.y], lexer.x) m = self.start_re.match(lexer.lines[lexer.y], lexer.x)
if m: if m:
self._add_from_regex(context, 'start', lexer, m) self._add_from_regex(context, 'start', lexer, m)
null_t_name = '.'.join(context + [self.name, 'null']) null_t_name = '.'.join(context + [self.name, 'null'])
null_t = None null_t = None
@ -86,14 +85,18 @@ class RegionRule(Rule):
end_re = re.compile(self.end % m.groupdict()) end_re = re.compile(self.end % m.groupdict())
done = False done = False
# NOTE: need to better handle matches that might consume more than
# one line of input. #### also, seems like some "region" matching isn't
# working, and finally, like the end token(s) might not be handled correctly
while not done and lexer.y < len(lexer.lines): while not done and lexer.y < len(lexer.lines):
line = lexer.lines[lexer.y] old_y = lexer.y
if len(line) == 0: if len(lexer.lines[lexer.y]) == 0:
null_t = Token(null_t_name, lexer.y, lexer.x, '') null_t = Token(null_t_name, lexer.y, lexer.x, '')
lexer.add_token(null_t) lexer.add_token(null_t)
while not done and lexer.x < len(line): null_t = None
while not done and lexer.y == old_y and lexer.x < len(lexer.lines[lexer.y]):
if self.end: if self.end:
m = end_re.match(line, lexer.x) m = end_re.match(lexer.lines[lexer.y], lexer.x)
if m: if m:
self._add_from_regex(context, 'end', lexer, m) self._add_from_regex(context, 'end', lexer, m)
done = True done = True
@ -109,17 +112,19 @@ class RegionRule(Rule):
if null_t is None: if null_t is None:
null_t = Token(null_t_name, lexer.y, lexer.x, '') null_t = Token(null_t_name, lexer.y, lexer.x, '')
lexer.add_token(null_t) lexer.add_token(null_t)
null_t.add_to_string(line[lexer.x]) null_t.add_to_string(lexer.lines[lexer.y][lexer.x])
lexer.x += 1 lexer.x += 1
null_t = None null_t = None
if not done: if not done and old_y == lexer.y:
lexer.y += 1 lexer.y += 1
lexer.x = 0 lexer.x = 0
return True return True
else: else:
return False return False
# NOTE: this needs to get synced up with RegionRule's changes...
# right now, it has at least 2-3 different bugs. suck!
class DualRegionRule(Rule): class DualRegionRule(Rule):
def __init__(self, name, start, grammar1, middle, grammar2, end): def __init__(self, name, start, grammar1, middle, grammar2, end):
assert valid_name_re.match(name), 'invalid name %r' % name assert valid_name_re.match(name), 'invalid name %r' % name

View File

@ -116,7 +116,7 @@ class PerlGrammar(Grammar):
), ),
PatternRule( PatternRule(
name=r'bareword_hash_index', name=r'bareword_hash_index',
pattern=r'(?<={) *[A-Za-z0-9_]+(?=})', pattern=r'(?<={)[A-Za-z0-9_]+(?=})',
), ),
PatternRule( PatternRule(
name=r'bareword_hash_key', name=r'bareword_hash_key',

116
test3.py
View File

@ -17,64 +17,64 @@ for i in range(0, len(color_list)):
color_dict[color_names[i]] = color_list[i] color_dict[color_names[i]] = color_list[i]
token_colors = { token_colors = {
'null': 'white', 'null': 'white',
'delimiter': 'white', 'delimiter': 'white',
'pod.start': 'lred', 'pod.start': 'lred',
'pod.null': 'lred', 'pod.null': 'lred',
'pod.end': 'lred', 'pod.end': 'lred',
'pod.header': 'lpurple', 'pod.header': 'lpurple',
'sub': 'lcyan', 'sub': 'lcyan',
'number': 'white', 'number': 'white',
'operator': 'white', 'operator': 'white',
'heredoc': 'lgreen', 'heredoc': 'lgreen',
'endblock': 'lred', 'endblock': 'lred',
'pod': 'lred', 'pod': 'lred',
'comment': 'lred', 'comment': 'lred',
'string1': 'lgreen', #'string1': 'lgreen',
'string1.start': 'lgreen', 'string1.start': 'lgreen',
'string1.null': 'lgreen', 'string1.null': 'lgreen',
'string1.escaped': 'lpurple', 'string1.escaped': 'lpurple',
'string1.scalar': 'yellow', #'string1.scalar': 'yellow',
'string1.system_scalar': 'yellow', #'string1.system_scalar': 'yellow',
'string1.hash_deref': 'yellow', 'string1.hash_deref': 'yellow',
'string1.hash_bareword_index': 'lgreen', #'string1.hash_bareword_index': 'lgreen',
'string1.end': 'lgreen', 'string1.end': 'lgreen',
'string2': 'lgreen', #'string2': 'lgreen',
'string2.start': 'lgreen', 'string2.start': 'lgreen',
'string2.null': 'lgreen', 'string2.null': 'lgreen',
'string2.end': 'lgreen', 'string2.end': 'lgreen',
'evalstring': 'lcyan', 'evalstring': 'lcyan',
'default_string': 'lgreen', 'default_string': 'lgreen',
'keyword': 'lpurple', 'keyword': 'lpurple',
'length_scalar': 'yellow', 'length_scalar': 'yellow',
'system_scalar': 'yellow', 'system_scalar': 'yellow',
'system_array': 'yellow', 'system_array': 'yellow',
'scalar': 'yellow', 'scalar': 'yellow',
'dereference': 'yellow', 'dereference': 'yellow',
'array': 'yellow', 'array': 'yellow',
'hash': 'yellow', 'hash': 'yellow',
'hash_bareword_index': 'lgreen', 'bareword_hash_index': 'lgreen',
'quoted_region': 'lcyan', 'quoted_region': 'lcyan',
'match_regex': 'lcyan', 'match_regex': 'lcyan',
'replace_regex': 'lcyan', 'replace_regex': 'lcyan',
'literal_hash_bareword_index': 'lgreen', 'bareword_hash_key': 'lgreen',
'interpolated_scalar': 'yellow', 'interpolated_scalar': 'yellow',
'interpolated_system_scalar': 'yellow', 'interpolated_system_scalar': 'yellow',
'interpolated_array': 'yellow', 'interpolated_array': 'yellow',
'interpolated_system_array': 'yellow', 'interpolated_system_array': 'yellow',
'interpolated_hash': 'yellow', 'interpolated_hash': 'yellow',
'label': 'lcyan', 'label': 'lcyan',
'package': 'lcyan', 'package': 'lcyan',
'use': 'lcyan', 'use': 'lcyan',
'method': 'lcyan', 'method': 'lcyan',
'methodref': 'lcyan', 'methodref': 'lcyan',
'method_declaration': 'lcyan', 'method_declaration': 'lcyan',
'instance_method': 'lcyan', 'instance_method': 'lcyan',
'static_method': 'lcyan', 'static_method': 'lcyan',
'builtin_method': 'lpurple', 'builtin_method': 'lpurple',
'bareword_method': 'lcyan', 'bareword_method': 'lcyan',
'bareword': 'yellow', 'bareword': 'yellow',
'bizzaro': 'lpurple', 'bizzaro': 'lpurple',
} }
paths = sys.argv[1:] paths = sys.argv[1:]