From 94ee3cd017ecddae04bd46786dfd2f417faea30d Mon Sep 17 00:00:00 2001 From: Erik Osheim Date: Wed, 22 Jul 2009 14:54:43 -0400 Subject: [PATCH] indenting bug fix --HG-- branch : pmacs2 --- mode/perl.py | 20 ++++++++++++-------- tab.py | 6 +++++- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/mode/perl.py b/mode/perl.py index b2a9c8d..3bbe9f2 100644 --- a/mode/perl.py +++ b/mode/perl.py @@ -257,7 +257,7 @@ class PerlGrammar(Grammar): PatternRule('eol', r"\n$"), ] -class PerlTabber2(StackTabber2): +class PerlTabber(StackTabber2): is_ignored_tokens = ('spaces', 'eol', 'perl.comment') open_tokens = {'delimiter': {'{': '}', '(': ')', '[': ']'}} close_tokens = {'delimiter': {'}': '{', ')': '(', ']': '['}} @@ -265,11 +265,16 @@ class PerlTabber2(StackTabber2): end_at_tokens = {'delimiter': {';': 1}} nocontinue_tokens = {'delimiter': {';': 1, ',': 1, '}': 1}, 'perl.heredoc.end': 1, - 'perl.evaldoc.end': 1, 'perl.pod.end': 1} - start_free_tokens = {'perl.string.start': 1, 'perl.pod.start': 1, - 'perl.heredoc.start': 1, 'perl.evaldoc.start': 1} - end_free_tokens = {'perl.string.end': 1, 'perl.pod.end': 1, - 'perl.heredoc.end': 1, 'perl.evaldoc.start': 1} + 'perl.evaldoc.end': 1, + 'perl.pod.end': 1} + start_free_tokens = {'perl.string.start': 1, + 'perl.pod.start': 1, + 'perl.heredoc.start': 1, + 'perl.evaldoc.start': 1} + end_free_tokens = {'perl.string.end': 1, + 'perl.pod.end': 1, + 'perl.heredoc.end': 1, + 'perl.evaldoc.start': 1} class PerlSetLib(Method): '''Set the path(s) to find perl modules''' @@ -718,9 +723,8 @@ class PerlContext(context.Context): class Perl(Fundamental): name = 'Perl' extensions = ['.pl', '.pm', '.pod'] - #detection = ['perl'] detection = [re.compile('^#!(?:.+/)?perl')] - tabbercls = PerlTabber2 + tabbercls = PerlTabber grammar = PerlGrammar commentc = '#' opentokens = ('delimiter',) diff --git a/tab.py b/tab.py index ad3eeb8..33ddeb4 100644 --- a/tab.py +++ b/tab.py @@ -411,7 +411,11 @@ class StackTabber2(Tabber): if i == end - start or self.fixed_indent: level = self._get_next_level() else: - level = tokens[i + 1].x + 1 + level = tokens[i + 1].x + # i'm not sure why this is necessary; seems like there is a bug + # somewhere else that this is compensating for. + if self.curr_level > 0: + level += 1 self._append(t.string, t.name, level, y) def _handle_other_token(self, y, tokens, start, end, i, t):