more bug fixes

--HG--
branch : pmacs2
This commit is contained in:
moculus 2008-10-03 03:57:32 +00:00
parent db2675aac5
commit 5e8ad6f831
3 changed files with 127 additions and 87 deletions

View File

@ -3,6 +3,7 @@ import buffer, color, commands, completer, context, default, method, mode, regex
from point import Point
from lex import Grammar, PatternRule, ContextPatternRule, RegionRule, OverridePatternRule, PatternGroupRule
from method import Argument, Method, WrapParagraph
from tab import StackTabber, StackTabber2
class PodGrammar(Grammar):
rules = [
@ -37,8 +38,28 @@ class StrictStringGrammar(Grammar):
class StringGrammar(Grammar):
rules = _make_string_rules('"')
class MatchGrammar(Grammar):
class EvalGrammar(Grammar):
rules = _make_string_rules('`')
class TranslateGrammar1(Grammar):
rules = [PatternRule(r'data', r"(?:\\.|[^\\/])")]
class TranslateGrammar2(Grammar):
rules = [PatternRule(r'data', r"(?:\\.|[^\\#])")]
class TranslateGrammarX(Grammar):
rules = [PatternRule(r'data', r"(?:\\.|[^\\%(delim)s])")]
class MatchGrammar1(Grammar):
rules = _make_string_rules('/')
class MatchGrammar2(Grammar):
rules = _make_string_rules('#')
class MatchGrammar3(Grammar):
rules = _make_string_rules(')')
class MatchGrammar4(Grammar):
rules = _make_string_rules(']')
class MatchGrammar5(Grammar):
rules = _make_string_rules('}')
class MatchGrammar6(Grammar):
rules = _make_string_rules('>')
class QuotedGrammar1(Grammar):
rules = _make_string_rules(')')
@ -67,7 +88,7 @@ class PerlGrammar(Grammar):
PatternRule(r'comment', r'#.*$'),
RegionRule(r'perl_string', r'"', StringGrammar, r'"'),
RegionRule(r'perl_string', r"'", StrictStringGrammar, r"'"),
RegionRule(r'evalstring', r"`", StringGrammar, r"`"),
RegionRule(r'evalstring', r"`", EvalGrammar, r"`"),
PatternRule(r'number', r'0?\.[0-9]+|[0-9]+(?:\.[0-9]+)?'),
PatternRule(r'perl_keyword', r"(?<!->)(?:STDIN|STDERR|STDOUT|continue|do|else|elsif|eval|foreach|for|if|last|my|next|our|package|require|return|sub|undef|unless|until|use|while)(?![a-zA-Z0-9_])"),
PatternRule(r'hash_key', r'(?<={)[A-Za-z0-9_]+(?=})'),
@ -84,30 +105,32 @@ class PerlGrammar(Grammar):
PatternRule(r'deref', r"[@%\$&\*](?={)"),
# match regexes; paired delimiters
RegionRule(r'match', r'm *(?P<delim>\()', StringGrammar, r'\)[a-z]*'),
RegionRule(r'match', r'm *(?P<delim>\[)', StringGrammar, r'\][a-z]*'),
RegionRule(r'match', r'm *(?P<delim>\{)', StringGrammar, r'\}[a-z]*'),
RegionRule(r'match', r'm *(?P<delim>\<)', StringGrammar, r'\>[a-z]*'),
RegionRule(r'match', r'm *(?P<delim>\()', MatchGrammar3, r'\)[a-z]*'),
RegionRule(r'match', r'm *(?P<delim>\[)', MatchGrammar4, r'\][a-z]*'),
RegionRule(r'match', r'm *(?P<delim>\{)', MatchGrammar5, r'\}[a-z]*'),
RegionRule(r'match', r'm *(?P<delim>\<)', MatchGrammar6, r'\>[a-z]*'),
# match regexes
RegionRule(r'match', r'(?:(?<==~)|(?<=!~)|(?<=\()|(?<=split)|(?<=if)|(?<=unless)|(?<=while)|(?<=until)) *(?P<delim>/)', StringGrammar, r'/[a-z]*'),
RegionRule(r'match', r'm *(?P<delim>/)', MatchGrammar, r'/[a-z]*'),
RegionRule(r'match', r'(?:(?<==~)|(?<=!~)|(?<=\()|(?<=split)|(?<=if)|(?<=unless)|(?<=while)|(?<=until)) *(?P<delim>/)', MatchGrammar1, r'/[a-z]*'),
RegionRule(r'match', r'm *(?P<delim>/)', MatchGrammar1, r'/[a-z]*'),
RegionRule(r'match', r'm *(?P<delim>[^ #a-zA-Z0-9_])', StringGrammar, r'%(delim)s[a-z]*'),
RegionRule(r'match', r'm(?P<delim>#)', StringGrammar, r'#[a-z]*'),
RegionRule(r'match', r'm(?P<delim>#)', MatchGrammar2, r'#[a-z]*'),
# match regexes; paired delimiters
RegionRule(r'replace', r's *(?P<delim>\()', StringGrammar, r'\) *\(', StringGrammar, r'\)[a-z]*'),
RegionRule(r'replace', r's *(?P<delim>\[)', StringGrammar, r'\] *\[', StringGrammar, r'\][a-z]*'),
RegionRule(r'replace', r's *(?P<delim>\{)', StringGrammar, r'\} *\{', StringGrammar, r'\}[a-z]*'),
RegionRule(r'replace', r's *(?P<delim>\<)', StringGrammar, r'\> *\<', StringGrammar, r'\>[a-z]*'),
RegionRule(r'replace', r's *(?P<delim>\()', MatchGrammar3, r'\) *\(', MatchGrammar3, r'\)[a-z]*'),
RegionRule(r'replace', r's *(?P<delim>\[)', MatchGrammar4, r'\] *\[', MatchGrammar4, r'\][a-z]*'),
RegionRule(r'replace', r's *(?P<delim>\{)', MatchGrammar5, r'\} *\{', MatchGrammar5, r'\}[a-z]*'),
RegionRule(r'replace', r's *(?P<delim>\<)', MatchGrammar6, r'\> *\<', MatchGrammar6, r'\>[a-z]*'),
# replace regexes
RegionRule(r'replace', r's *(?P<delim>/)', MatchGrammar1, r'/', MatchGrammar1, r'/[a-z]*'),
RegionRule(r'replace', r's *(?P<delim>[^ a-zA-Z0-9_])', StringGrammar, r'%(delim)s', StringGrammar, r'%(delim)s[a-z]*'),
RegionRule(r'replace', r's(?P<delim>#)', StringGrammar, r'#', StringGrammar, r'#[a-z]*'),
RegionRule(r'replace', r's(?P<delim>#)', MatchGrammar2, r'#', MatchGrammar2, r'#[a-z]*'),
# translate operator
RegionRule(r'translate', r'(?:y|tr) *(?P<delim>[^ a-zA-Z0-9_])', Grammar, r'%(delim)s', Grammar, r'%(delim)s[a-z]*'),
RegionRule(r'translate', r'(?:y|tr)#', Grammar, r'#', Grammar, r'#[a-z]*'),
RegionRule(r'translate', r'(?:y|tr) *(?P<delim>/)', TranslateGrammar1, r'/', TranslateGrammar1, r'/[a-z]*'),
RegionRule(r'translate', r'(?:y|tr)#', TranslateGrammar2, r'#', TranslateGrammar2, r'#[a-z]*'),
RegionRule(r'translate', r'(?:y|tr) *(?P<delim>[^ a-zA-Z0-9_])', TranslateGrammarX, r'%(delim)s', TranslateGrammarX, r'%(delim)s[a-z]*'),
# some more basic stuff
PatternRule(r'package', r"(?<=package )(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*"),
@ -151,73 +174,84 @@ class PerlGrammar(Grammar):
PatternRule(r"eol", r"\n$"),
]
class PerlTabber(tab.StackTabber):
def is_base(self, y):
if y == 0:
return True
highlighter = self.mode.window.buffer.highlights[self.mode.name()]
if not highlighter.tokens[y]:
return False
t = highlighter.tokens[y][0]
return t.name == 'perl_keyword' and t.string == 'sub'
def _handle_open_token(self, currlvl, y, i):
currlvl = tab.StackTabber._handle_open_token(self, currlvl, y, i)
return currlvl
def _handle_close_token(self, currlvl, y, i):
w = self.mode.tabwidth
self._opt_pop('cont')
currlvl = tab.StackTabber._handle_close_token(self, currlvl, y, i)
token = self.get_token(y, i)
if self.is_rightmost_token(y, i):
if token.string == '}':
self._opt_pop('cont')
else:
self._opt_append('cont', currlvl + w)
return currlvl
def _handle_other_token(self, currlvl, y, i):
w = self.mode.tabwidth
token = self.get_token(y, i)
fqname = token.fqname()
if fqname == 'delimiter' and token.string == ';':
self._opt_pop('cont')
elif fqname == 'heredoc.start':
self._opt_append('heredoc', None)
elif fqname == 'heredoc.end':
self._opt_pop('heredoc')
self._opt_pop('cont')
elif fqname == 'quoted.start':
self._opt_append('quoted', currlvl + w)
elif fqname == 'quoted.end':
self._opt_pop('cont')
self._opt_pop('quoted')
elif fqname == 'evaldoc.start':
self._opt_append('evaldoc', None)
elif fqname == 'evaldoc.end':
self._opt_pop('evaldoc')
self._opt_pop('cont')
elif fqname == 'pod.start':
self._opt_append('pod', None)
elif fqname == 'pod.end':
self._opt_pop('pod')
currlvl = 0
elif fqname == 'perl_string.start':
self._opt_append('string', None)
elif fqname == 'perl_string.end':
self._opt_pop('string')
if self.is_rightmost_token(y, i):
self._opt_append('cont', currlvl + w)
if self.is_rightmost_token(y, i):
if(not fqname.startswith('pod') and
not fqname.startswith('heredoc') and
not fqname.startswith('perl_string') and
not fqname.startswith('endblock') and
fqname != 'eol' and
fqname != 'comment' and
fqname != 'spaces' and
fqname != 'null' and
token.string not in ('}', ';', '(', '{', '[', ',')):
self._opt_append('cont', currlvl + w)
return currlvl
class PerlTabber2(StackTabber2):
open_tokens = {'delimiter': {'{': '}', '(': ')', '[': ']'}}
close_tokens = {'delimiter': {'}': '{', ')': '(', ']': '['}}
end_at_eof = False
end_at_tokens = {'delimiter': {';': 1}}
nocontinue_tokens = {'delimiter': {';': 1}}
start_free_tokens = {'string.start': 1, 'pod.start': 1, 'heredoc.start': 1,
'evaldoc.start': 1}
end_free_tokens = {'string.end': 1, 'pod.end': 1, 'heredoc.end': 1,
'evaldoc.start': 1}
#class PerlTabber(StackTabber):
# def is_base(self, y):
# if y == 0:
# return True
# highlighter = self.mode.window.buffer.highlights[self.mode.name()]
# if not highlighter.tokens[y]:
# return False
# t = highlighter.tokens[y][0]
# return t.name == 'perl_keyword' and t.string == 'sub'
# def _handle_open_token(self, currlvl, y, i):
# currlvl = StackTabber._handle_open_token(self, currlvl, y, i)
# return currlvl
# def _handle_close_token(self, currlvl, y, i):
# w = self.mode.tabwidth
# self._opt_pop('cont')
# currlvl = StackTabber._handle_close_token(self, currlvl, y, i)
# token = self.get_token(y, i)
# if self.is_rightmost_token(y, i):
# if token.string == '}':
# self._opt_pop('cont')
# else:
# self._opt_append('cont', currlvl + w)
# return currlvl
# def _handle_other_token(self, currlvl, y, i):
# w = self.mode.tabwidth
# token = self.get_token(y, i)
# fqname = token.fqname()
# if fqname == 'delimiter' and token.string == ';':
# self._opt_pop('cont')
# elif fqname == 'heredoc.start':
# self._opt_append('heredoc', None)
# elif fqname == 'heredoc.end':
# self._opt_pop('heredoc')
# self._opt_pop('cont')
# elif fqname == 'quoted.start':
# self._opt_append('quoted', currlvl + w)
# elif fqname == 'quoted.end':
# self._opt_pop('cont')
# self._opt_pop('quoted')
# elif fqname == 'evaldoc.start':
# self._opt_append('evaldoc', None)
# elif fqname == 'evaldoc.end':
# self._opt_pop('evaldoc')
# self._opt_pop('cont')
# elif fqname == 'pod.start':
# self._opt_append('pod', None)
# elif fqname == 'pod.end':
# self._opt_pop('pod')
# currlvl = 0
# elif fqname == 'perl_string.start':
# self._opt_append('string', None)
# elif fqname == 'perl_string.end':
# self._opt_pop('string')
# if self.is_rightmost_token(y, i):
# self._opt_append('cont', currlvl + w)
# if self.is_rightmost_token(y, i):
# if(not fqname.startswith('pod') and
# not fqname.startswith('heredoc') and
# not fqname.startswith('perl_string') and
# not fqname.startswith('endblock') and
# fqname != 'eol' and
# fqname != 'comment' and
# fqname != 'spaces' and
# fqname != 'null' and
# token.string not in ('}', ';', '(', '{', '[', ',')):
# self._opt_append('cont', currlvl + w)
# return currlvl
class PerlSetLib(Method):
'''Set the path(s) to find perl modules'''
@ -640,7 +674,8 @@ class Perl(mode.Fundamental):
modename = 'Perl'
extensions = ['.pl', '.pm']
detection = ['perl']
tabbercls = PerlTabber
#tabbercls = PerlTabber
tabbercls = PerlTabber2
grammar = PerlGrammar
opentokens = ('delimiter',)
opentags = {'(': ')', '[': ']', '{': '}'}

View File

@ -1,6 +1,6 @@
import commands
import color, mode, tab
from lex import Grammar, PatternRule, RegionRule, OverridePatternRule
from lex import Grammar, PatternRule, RegionRule, PatternGroupRule, OverridePatternRule
from method import Method
class StringGrammar1(Grammar):
@ -88,6 +88,10 @@ class TestGrammar(Grammar):
class ShGrammar(Grammar):
rules = [
PatternGroupRule(r'vardecl', r'spaces', r'^ +', r'variable', r'[a-zA-Z_][a-zA-Z0-9_]*', r'delimiter', r'='),
PatternGroupRule(r'vardecl', r'sh_builtin', r'export', r'spaces', r' +', r'variable', r'[a-zA-Z_][a-zA-Z0-9_]*', r'delimiter', r'='),
PatternRule(r'variable', r"^[a-zA-Z_][a-zA-Z0-9_]*(?==)"),
PatternRule(r'spaces', r' +'),
RegionRule(r'heredoc', r"<<[<\\]?(?P<heredoc>[a-zA-Z_][a-zA-Z0-9_]*)", None, "\n", HereGrammar, r'^%(heredoc)s$'),
RegionRule(r'heredoc', r"<<-(?P<heredoc>[a-zA-Z_][a-zA-Z0-9_]*)", None, "\n", HereGrammar, r'^ *%(heredoc)s$'),
@ -195,10 +199,12 @@ class Sh(mode.Fundamental):
'eval.start': ('cyan', 'default', 'bold'),
'eval.variable': ('yellow', 'default', 'bold'),
'eval.data': ('cyan', 'default', 'bold'),
'eval.null': ('cyan', 'default', 'bold'),
'eval.end': ('cyan', 'default', 'bold'),
'neval.start': ('yellow', 'default', 'bold'),
'neval.variable': ('yellow', 'default', 'bold'),
'neval.data': ('cyan', 'default', 'bold'),
'neval.null': ('cyan', 'default', 'bold'),
'neval.end': ('yellow', 'default', 'bold'),
}

3
tab.py
View File

@ -231,7 +231,6 @@ class Marker2(object):
def __repr__(self):
return '<Marker2(%r, %r, %r)>' % (self.name, self.type_, self.level)
#class StackTabber2(tab.StackTabber):
class StackTabber2(Tabber):
open_tokens = {'delimiter': {'{': '}', '(': ')', '[': ']'}}
close_tokens = {'delimiter': {'}': '{', ')': '(', ']': '['}}
@ -404,7 +403,7 @@ class StackTabber2(Tabber):
# add implicit continuation
top = self._peek()
if top and top.name in self.scope_tokens.get(top.type_, {}):
if i + start == end and top and top.name in self.scope_tokens.get(top.type_, {}):
if self.continue_tokens:
if s in self.continue_tokens.get(name, {}):
self._append_unless('continue', name, self._get_next_level())