diff --git a/application.py b/application.py index 733676d..8a7ab25 100755 --- a/application.py +++ b/application.py @@ -487,6 +487,7 @@ class Application: break name = token.fqname() + #assert name in w.mode.colors, name c = w.mode.colors.get(name, w.mode.default_color) if DARK_BACKGROUND: diff --git a/code_examples/Reporting2.pm b/code_examples/Reporting2.pm index 5ee277a..86dc5d0 100644 --- a/code_examples/Reporting2.pm +++ b/code_examples/Reporting2.pm @@ -1,5 +1,8 @@ package TBB::Reporting2; +my $cat = "cat"; +$cat =~ s/cat/dog/g; + use strict; use warnings; diff --git a/lex2.py b/lex2.py index 9413868..0bef583 100755 --- a/lex2.py +++ b/lex2.py @@ -20,11 +20,14 @@ class Token(object): else: return [] def domain(self): - names = [] if self.parent is not None: + names = [] names.extend(self.parent.domain()) - names.append(self.rule.name) - return names + if names[-1] != self.rule.name: + names.append(self.rule.name) + return names + else: + return [self.rule.name] def fqlist(self): names = [] if self.parent is not None: @@ -264,16 +267,14 @@ class DualRegionRule(Rule): lexer.x += len(s) return token def resume(self, lexer, toresume): - assert toresume + assert toresume, "can't resume without tokens to resume!" token = toresume[0] - d = token.matchd if token.name == 'start': - stoken = toresume[0] - mtoken = self._match_first(lexer, stoken, None, toresume) - self._match_second(lexer, mtoken, []) + t2 = self._match_first(lexer, token, toresume) + t3 = self._match_second(lexer, t2, []) + return True elif token.name == 'middle': - d3 = token.matchd - self._match_second(lexer, token.parent, d3, toresume) + t3 = self._match_second(lexer, token, toresume) else: raise Exception, "invalid flag %r" % flag return True @@ -281,32 +282,27 @@ class DualRegionRule(Rule): # see if we can match our start token m = self.start_re.match(lexer.lines[lexer.y], lexer.x) if m: - # region was match, so let's do this - d1 = m.groupdict() - d2 = self._match_first(lexer, parent, m, []) - d3 = dict(d1.items() + d2.items()) - self._match_second(lexer, parent, d3, None, []) + t1 = self._add_from_regex('start', lexer, parent, m, m.groupdict()) + t2 = self._match_first(lexer, t1, []) + t3 = self._match_second(lexer, t2, []) return True else: # region was not matched; we never started. so return false return False - def _match_first(self, lexer, context, d1, m1, toresume=[]): - # if we have been given rulecontext, then we are going to "resume" a - # parse that can already be assumed to have started + def _match_first(self, lexer, parent, toresume=[]): reenter = len(toresume) > 1 - assert m1 or reenter - - # ok, so create our start token, and get ready to start reading data - if m1 is not None: - self._add_from_regex('start', lexer, parent, m1, m1.groupdict()) + if reenter: + assert parent is toresume[0] + d1 = parent.matchd + assert parent.name == 'start' null_t = None - middle_re = re.compile(self.middle % d1) d2 = {} # ok, so as long as we aren't done (we haven't found an end token), # keep reading input + t2 = None done = False while not done and lexer.y < len(lexer.lines): old_y = lexer.y @@ -314,7 +310,7 @@ class DualRegionRule(Rule): # if this line is empty, then we will skip it, but here weinsert # an empty null token just so we have something if len(lexer.lines[lexer.y]) == 0: - null_t = Token('null', None, lexer.y, lexer.x, '') + null_t = Token('null', None, lexer.y, lexer.x, '', parent) lexer.add_token(null_t) null_t = None @@ -337,8 +333,8 @@ class DualRegionRule(Rule): # proceed to "stage 2" m2 = middle_re.match(lexer.lines[lexer.y], lexer.x) if m2: - d2 = m2.groupdict() - self._add_from_regex('middle', lexer, parent, m2, {}) + d2 = dict(d1.items() + m2.groupdict().items()) + t2 = self._add_from_regex('middle', lexer, parent, m2, d2) done = True break @@ -356,7 +352,7 @@ class DualRegionRule(Rule): # create if it isn't set). if not found: if null_t is None: - null_t = Token('null', None, lexer.y, lexer.x, '') + null_t = Token('null', None, lexer.y, lexer.x, '', parent) lexer.add_token(null_t) null_t.add_to_string(lexer.lines[lexer.y][lexer.x]) lexer.x += 1 @@ -370,23 +366,23 @@ class DualRegionRule(Rule): # then that means we're finished with the line and should move # on to the next one here if not done and old_y == lexer.y: - lexer.save_context() lexer.y += 1 lexer.x = 0 - return d2 + return t2 - def _match_second(self, lexer, context, d3, m, toresume=[]): - # if we have been given rulecontext, then we are going to "resume" a - # parse that can already be assumed to have started + def _match_second(self, lexer, parent, toresume=[]): reenter = len(toresume) > 1 - - # ok stage 2 is like stage 1, only we are looking for end tokens - # instead of middle tokens + if reenter: + assert parent is toresume[0] + assert parent.name == 'middle' + #assert parent.name == 'middle' + d3 = parent.matchd null_t = None end_re = re.compile(self.end % d3) # ok, so as long as we aren't done (we haven't found an end token), # keep reading input + t3 = None done = False while not done and lexer.y < len(lexer.lines): old_y = lexer.y @@ -406,7 +402,7 @@ class DualRegionRule(Rule): # if this line is empty, then we will skip it, but here weinsert # an empty null token just so we have something if len(lexer.lines[lexer.y]) == 0: - null_t = Token('null', None, lexer.y, lexer.x, '') + null_t = Token('null', None, lexer.y, lexer.x, '', parent) lexer.add_token(null_t) null_t = None @@ -417,7 +413,7 @@ class DualRegionRule(Rule): # proceed to "stage 2" m3 = end_re.match(lexer.lines[lexer.y], lexer.x) if m3: - self._add_from_regex('end', lexer, parent, m3, {}) + t3 = self._add_from_regex('end', lexer, parent, m3, {}) done = True break @@ -435,7 +431,7 @@ class DualRegionRule(Rule): # create if it isn't set). if not found: if null_t is None: - null_t = Token('null', None, lexer.y, lexer.x, '') + null_t = Token('null', None, lexer.y, lexer.x, '', parent) lexer.add_token(null_t) null_t.add_to_string(lexer.lines[lexer.y][lexer.x]) lexer.x += 1 @@ -449,12 +445,11 @@ class DualRegionRule(Rule): # then that means we're finished with the line and should move # on to the next one here if not done and old_y == lexer.y: - lexer.save_context() lexer.y += 1 lexer.x = 0 # alright, we're finally done processing; return true - return True + return t3 class Grammar: rules = [] diff --git a/lex2_perl.py b/lex2_perl.py index c91aa67..5f33561 100755 --- a/lex2_perl.py +++ b/lex2_perl.py @@ -43,14 +43,18 @@ class StringGrammar(Grammar): pattern=r"\$[^A-Za-z0-9 %(delim)s](?![A-Za-z0-9_])", fallback=r"\$[^A-Za-z0-9 ](?![A-Za-z0-9_])", ), - PatternRule( - name=r'array', - pattern=r"@_", - ), + #PatternRule( + # name=r'array', + # pattern=r"@_", + #), PatternRule( name=r'scalar', pattern=r"\$\$*[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*", ), + PatternRule( + name=r'cast', + pattern=r"[\$\@\%\&]{.*?}", + ), PatternRule( name=r'array', pattern=r"@\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*", diff --git a/mode_perl.py b/mode_perl.py index f6082ee..32e8c80 100644 --- a/mode_perl.py +++ b/mode_perl.py @@ -96,10 +96,16 @@ class Perl(mode2.Fundamental): 'match.null': color.build('cyan', 'default'), # replace regex - 'replace.start': color.build('cyan', 'default'), - 'replace.middle': color.build('cyan', 'default'), - 'replace.end': color.build('cyan', 'default'), - 'replace.null': color.build('cyan', 'default'), + 'replace.start': color.build('cyan', 'default'), + 'replace.middle': color.build('cyan', 'default'), + 'replace.end': color.build('cyan', 'default'), + 'replace.null': color.build('cyan', 'default'), + 'replace.escaped': color.build('magenta', 'default'), + 'replace.deref': color.build('yellow', 'default'), + 'replace.length': color.build('yellow', 'default'), + 'replace.scalar': color.build('yellow', 'default'), + 'replace.hash': color.build('yellow', 'default'), + 'replace.cast': color.build('yellow', 'default'), # translate regex 'translate.start': color.build('magenta', 'default'),