branch : pmacs2
This commit is contained in:
moculus 2008-10-02 13:35:28 +00:00
parent ab0a0717a3
commit 72e7eed05a
2 changed files with 160 additions and 79 deletions

View File

@ -35,26 +35,26 @@ BEGIN {
} }
/^\*/ { /^\*/ {
this_len = match($0,/\*([^*]|$)/); # get number of stars in 1st field this_len = match($0,/\*([^*]|$)/); # get number of stars in 1st field
array[this_len]++; # increment index of current leaf array[this_len]++; # increment index of current leaf
if ( this_len - last_len > 1 ) { # check for invalid outline levels if ( this_len - last_len > 1 ) { # check for invalid outline levels
if (FILENAME == "-" ) myfile = "(piped from standard input)" if (FILENAME == "-" )
else myfile = FILENAME myfile = "(piped from standard input)"
else
myfile = FILENAME
error_message = "\a\a" \ error_message = "\a\a" \
"************************************************\n" \ "************************************************\n" \
" WARNING! The input file has an invalid number \n" \ " WARNING! The input file has an invalid number \n" \
" of asterisks on line " NR ", below. \n\n" \ " of asterisks on line " NR ", below. \n\n" \
" The previous outline level had " last_len " asterisks, \n" \ " The previous outline level had " last_len " asterisks, \n" \
" but the current/next level has " this_len " asterisks!\n\n" \ " but the current/next level has " this_len " asterisks!\n\n" \
" You have inadvertently skipped one level of \n" \ " You have inadvertently skipped one level of \n" \
" indentation. Processing halted so you can fix \n" \ " indentation. Processing halted so you can fix \n" \
" the input file, \x22" myfile "\x22. \n" \ " the input file, \x22" myfile "\x22. \n" \
"************************************************\n" \ "************************************************\n" \
">>>\n" \ ">>>\n" \
"Error on Line #" NR " :" ; "Error on Line #" NR " :" ;
print error_message, $0 > "/dev/stderr" ; print error_message, $0 > "/dev/stderr" ;
exit 1; exit 1;

View File

@ -2,6 +2,7 @@ import commands
import color, mode, tab import color, mode, tab
from lex import Grammar, PatternRule, RegionRule from lex import Grammar, PatternRule, RegionRule
from mode.python import StringGrammar2 from mode.python import StringGrammar2
from tab import Marker
class RegexGrammar(Grammar): class RegexGrammar(Grammar):
rules = [ rules = [
@ -16,8 +17,8 @@ class AwkGrammar(Grammar):
RegionRule(r'awk_regex', r'/(?! )', RegexGrammar, r'/'), RegionRule(r'awk_regex', r'/(?! )', RegexGrammar, r'/'),
PatternRule(r'awk_global', r'(?:TEXTDOMAIN|SUBSEP|RLENGTH|RSTART|RT|RS|PROCINFO|ORS|OFS|OFMT|NR|NF|LINT|IGNORECASE|FS|FNR|FILENAME|FIELDWIDTHS|ERRNO|ENVIRON|CONVFMT|BINMODE|ARGV|ARGIND|ARGC)(?![a-zA-Z0-9_])'), PatternRule(r'awk_global', r'(?:TEXTDOMAIN|SUBSEP|RLENGTH|RSTART|RT|RS|PROCINFO|ORS|OFS|OFMT|NR|NF|LINT|IGNORECASE|FS|FNR|FILENAME|FIELDWIDTHS|ERRNO|ENVIRON|CONVFMT|BINMODE|ARGV|ARGIND|ARGC)(?![a-zA-Z0-9_])'),
PatternRule(r'delimiter', r'(?:[\{\}()\[\]?:;,]|=(?!=)|\+=|-=|\*=|/=|\%=|\^=)'), PatternRule(r'delimiter', r'(?:[\{\}()\[\]?:;,]|=(?!=)|\+=|-=|\*=|/=|\%=|\^=)'),
PatternRule(r'keyword', r'(?:if|else|while|do|for|break|continue|delete|exit)(?![a-zA-Z0-9_])'), PatternRule(r'keyword', r'(?:BEGIN|END|if|else|while|do|for|break|continue|delete|exit)(?![a-zA-Z0-9_])'),
PatternRule(r'builtin', r'(?:BEGIN|END|close|getline|nextfile|next|printf|print|system|fflush|atan2|cos|exp|int|log|rand|sin|sqrt|srand|asorti|asort|gensub|gsub|index|length|match|split|sprintf|strtonum|substr|sub|tolower|toupper|mktime|strftime|systime|and|compl|lshift|or|xor|rshift|bindtextdomain|dcgettext|dcngettext|function|extension)(?![a-zA-Z0-9_])'), PatternRule(r'builtin', r'(?:close|getline|nextfile|next|printf|print|system|fflush|atan2|cos|exp|int|log|rand|sin|sqrt|srand|asorti|asort|gensub|gsub|index|length|match|split|sprintf|strtonum|substr|sub|tolower|toupper|mktime|strftime|systime|and|compl|lshift|or|xor|rshift|bindtextdomain|dcgettext|dcngettext|function|extension)(?![a-zA-Z0-9_])'),
PatternRule(r'awk_field', r'\$\d*'), PatternRule(r'awk_field', r'\$\d*'),
@ -37,6 +38,8 @@ class AwkGrammar(Grammar):
] ]
class AwkTabber(tab.StackTabber): class AwkTabber(tab.StackTabber):
open_tokens = {'{': '}', '(': ')', '[': ']'}
close_tokens = {'}': '{', ')': '(', ']': '['}
def __init__(self, m): def __init__(self, m):
self.mode = m self.mode = m
self.name = m.name() self.name = m.name()
@ -53,62 +56,140 @@ class AwkTabber(tab.StackTabber):
def _calc_level(self, y): def _calc_level(self, y):
target = y target = y
while not self.is_base(y) and y > 0: while not self._is_base(y) and y > 0:
y -= 1 y -= 1
self._reset() self._reset()
while y <= target: while y <= target:
self._set_currlvl() self._save_curr_level()
self._handle_tokens(y) self._handle_tokens(y)
y += 1
def _is_base(self, y): def _is_base(self, y):
return False if y == 0:
def _reset(self): return True
self.stack = []
self.curr = 0 t = self._get_tokens(y)[0]
def _set_currlvl(self): if t.fqname() == 'awk_regex.start':
if self.markers: return True
self.curr = self.markers[-1].level elif t.name in ('awk_field', 'awk_global'):
return True
elif t.name == 'keyword' and t.string in ('BEGIN', 'END'):
return True
else: else:
self.curr = 0 return False
def _reset(self):
self.record = {}
self.stack = []
self.markers = self.stack
self.curr_level = 0
def _get_curr_level(self):
if self.stack:
return self.stack[-1].level
else:
return 0
def _get_next_level(self):
return self._get_curr_level() + self.mode.tabwidth
def _save_curr_level(self):
self.curr_level = self._get_curr_level()
def _match(self, *names):
return self.stack and self.stack[-1].name in names
def _nomatch(self, *names):
return self.stack and self.stack[-1].name not in names
def _pop(self, *names):
if self._match(*names):
self.stack.pop()
def _pop_while(self, *names):
while self._match(*names):
self.stack.pop()
def _pop_until(self, *names):
while self._nomatch(*names):
self.stack.pop()
def _append(self, name, level):
self.stack.append(Marker(name, level))
def _append_unless(self, name, level):
if self._nomatch(name):
self.stack.append(Marker(name, level))
def _get_tokens(self, y): def _get_tokens(self, y):
return self.mode.window.buffer.highlights[self.name].tokens[y] return self.mode.window.buffer.highlights[self.name].tokens[y]
def _handle_tokens(self, y): def _handle_tokens(self, y):
tokens = self._get_tokens(y) tokens = self._get_tokens(y)
for i in range(0, len(tokens)): assert tokens
token = tokens[i] start = int(self._is_indent(tokens[0]))
if self._is_indent(token): end = len(tokens) - 1
pass
elif self._is_ignored(token): while end > 0 and self._is_ignored(tokens[end]):
end -= 1
for i in range(0, end + 1 - start):
token = tokens[start + i]
if self._is_ignored(token):
pass pass
elif self._is_close_token(token): elif self._is_close_token(token):
self._handle_close_token(y, tokens, i, token) self._handle_close_token(y, tokens, start, end, i, token)
elif self._is_open_token(token): elif self._is_open_token(token):
self._handle_open_token(y, tokens, i, token) self._handle_open_token(y, tokens, start, end, i, token)
else: else:
self._handle_other_token(y, tokens, i, token) self._handle_other_token(y, tokens, start, end, i, token)
self.lines[y] = self.curr self.lines[y] = self.curr_level
self.record[y] = tuple(self.stack)
def _is_indent(self, token): def _is_indent(self, token):
return False return token.name == 'spaces'
def _is_ignored(self, token): def _is_ignored(self, token):
return False return token.name in ('spaces', 'eol', 'comment')
def _is_close_token(self, token): def _is_close_token(self, token):
return True return token.name == 'delimiter' and token.string in self.close_tokens
def _close_match(self, opentoken, closetoken): def _handle_close_token(self, y, tokens, start, end, i, token):
return True
def _handle_close_token(self, y, tokens, i, token):
if not self.stack: if not self.stack:
raise Exception, "unmatched %r on line %d" % (token.string, y) raise Exception, "unmatched %r, line %d" % (token.string, y)
token2 = self.stack.pop() while True:
if not self._close_match(token2, token): marker = self.stack[-1]
raise Exception, "mismatched %r on line %d (expected %r)" % \ if marker.name in ('control', 'continue'):
(token.string, y, token2.string) self.stack.pop()
if False: elif marker.name in self.open_tokens:
self._set_currlvl() if self.open_tokens[marker.name] == token.string:
self.stack.pop()
break
else:
raise Exception, "mismatched %r, line %d (expected %r)" % \
(token.string, y, d[marker.name])
else:
raise Exception, "what? %r" % marker.name
if i == 0:
self._save_curr_level()
def _is_open_token(self, token):
return token.name == 'delimiter' and token.string in self.open_tokens
def _handle_open_token(self, y, tokens, start, end, i, token):
if i == 0 and self.stack and self.stack[-1].name == 'continue':
self.stack.pop()
if token.string == '{':
self._pop_while('continue', 'control')
if i == end - start:
level = self._get_next_level()
else:
level = tokens[i + 1].x
self._append(token.string, level)
def _handle_other_token(self, y, tokens, start, end, i, token):
name, s = token.name, token.string
if i + start == end:
self._pop_while('continue', 'control')
if name == 'continuation':
self._append_unless('continue', self._get_next_level())
elif name == 'keyword' and s in ('if', 'else', 'while', 'do', 'for'):
if i == start:
self._save_curr_level()
self._append_unless('control', self._get_next_level())
class Awk(mode.Fundamental): class Awk(mode.Fundamental):
tabbercls = AwkTabber
modename = 'awk' modename = 'awk'
extensions = ['.awk'] extensions = ['.awk']
grammar = AwkGrammar grammar = AwkGrammar