diff --git a/lex.py b/lex.py index fcc30d5..6f54733 100755 --- a/lex.py +++ b/lex.py @@ -2,6 +2,9 @@ import curses, re import regex, util from point import Point +def escape(s): + return re.escape(s) + class Token(object): def __init__(self, name, rule, y, x, s, color=None, parent=None, matchd={}, link=None): self.name = name @@ -199,6 +202,8 @@ class RegionRule(Rule): # ok, so since we had a match, we need to create our start token, who # will be the ancestor to all other tokens matched in this region matchd = m.groupdict() + for (key, val) in matchd.iteritems(): + matchd[key] = escape(val) parent = self.make_token(lexer, m.group(0), 'start', parent, matchd, 'start') yield parent diff --git a/mode/perl.py b/mode/perl.py index e6a5320..f2cc18c 100644 --- a/mode/perl.py +++ b/mode/perl.py @@ -77,7 +77,7 @@ class PerlGrammar(Grammar): PatternRule(r'deref', r"[@%\$&\*](?={)"), # match regexes - RegionRule(r'match', r'(?:(?<==~)|(?<=!~)|(?<=\()|(?<=split)) *(?P/)', StringGrammar, r'/[a-z]*'), + RegionRule(r'match', r'(?:(?<==~)|(?<=!~)|(?<=\()|(?<=split)|(?<=if)|(?<=unless)|(?<=while)|(?<=until)) *(?P/)', StringGrammar, r'/[a-z]*'), RegionRule(r'match', r'm *(?P[^ #a-zA-Z0-9_])', StringGrammar, r'%(delim)s[a-z]*'), RegionRule(r'match', r'm(?P#)', StringGrammar, r'#[a-z]*'),