branch : pmacs2
This commit is contained in:
moculus 2007-06-27 03:54:23 +00:00
parent 865b3b33bc
commit c17738be12
4 changed files with 0 additions and 450 deletions

View File

@ -1,83 +0,0 @@
import tab
class PerlTabber(tab.TokenStackTabber):
close_tags = {'}': '{',
')': '(',
']': '['}
def error(self, s):
self.mode.window.application.set_error(s)
self.errors = True
def base_indentation_level(self, y):
if y == 0:
return True
lines = self.mode.window.buffer.lines
if y < len(lines) and lines[y].startswith('sub '):
return True
return False
def stack_append_const(self, c):
if self.tab_stack[-1][0] != c:
self.stack_append((c, self.tab_stack[-1][1] + 4))
def stack_pop_const(self, c):
if self.tab_stack[-1][0] in c_args:
self.stack_pop()
def stack_pop_all_const(self, *c_args):
while self.tab_stack[-1][0] in c_args:
self.stack_pop()
def handle_token(self, prev_token, token, next_token, y=None):
buffer = self.mode.window.buffer
name = token.name
s = token.string
if name == "delimiter":
if s == "{" or s == "(" or s == "[":
if prev_token is None:
self.stack_pop_all_const("cont")
if next_token is None:
self.stack_append((s, self.tab_stack[-1][1] + 4))
else:
p = buffer.get_offset_point(next_token.start)
self.stack_append((s, p.x))
elif s == "}" or s == ")" or s == "]":
self.stack_pop_all_const("cont")
if self.tab_stack[-1][0] == self.close_tags[s]:
self.stack_pop()
if prev_token is None:
self.line_depth = self.tab_stack[-1][1]
elif self.errors is False:
self.error("tag mismatch, line %d: expected %r, got %r" %
(self.y, self.tab_stack[-1][0], s))
if s == "}":
self.stack_pop_all_const("cont")
else:
pass
elif s == ";":
self.stack_pop_all_const("cont")
elif name == "heredoc":
if token.start > self.start_offset:
self.stack_append(('heredoc', -1))
elif token.end <= self.end_offset:
self.stack_pop_all_const("heredoc")
self.stack_pop_all_const("cont")
elif name == "pod":
if token.start >= self.start_offset:
self.stack_append(('pod', -1))
elif token.end <= self.end_offset:
assert self.tab_stack[-1][0] == 'pod', "vvvvvveije9876"
self.stack_pop()
self.line_depth = self.tab_stack[-1][1]
if (name != "heredoc" and
name != "endblock" and
name != "pod" and
name != "comment" and
s != "}" and
s != ";" and
s != "(" and
s != "{" and
s != "[" and
s != ",") and next_token is None:
self.stack_append_const("cont")

View File

@ -1,141 +0,0 @@
import point, regex, util, tab
class PythonTabber(tab.Tabber):
start_tags = {'(': ')',
'{': '}',
'[': ']'}
close_tags = {')': '(',
'}': '{',
']': '['}
def __init__(self, m):
tab.Tabber.__init__(self, m)
self.y = None
self.index = None
self.tab_stack = None
self.line_depth = None
def stack_append(self, item):
self.tab_stack.append(item)
def stack_pop(self):
self.tab_stack.pop(-1)
def base_indentation_level(self, y):
return y == 0
def calculate_tabs(self, start=0, goal=None):
lines = self.mode.window.buffer.lines
tokens = self.mode.highlighter.tokens
buffer = self.mode.window.buffer
if self.levels is None:
self.levels = [None] * (len(lines))
self.index = 0
self.y = start
self.base = 0
self.tab_stack = []
# we want to process every logical line in the file
while self.y < len(lines):
line = lines[self.y]
start_index = self.index
start_point = point.Point(0, self.y)
start_offset = buffer.get_point_offset(start_point)
end_point = point.Point(len(line), self.y)
end_offset = buffer.get_point_offset(end_point)
# we want to find all the tokens on the line we are currently processing
while self.index < len(tokens):
token = tokens[self.index]
if token.end > end_offset:
break
self.index += 1
self.handle_line(line,
start_offset, start_index,
end_offset, self.index)
self.levels[self.y] = self.line_depth
self.y += 1
if goal is not None and self.y > goal:
break
def get_line_depth(self):
if len(self.tab_stack) > 0:
return self.tab_stack[-1][1]
else:
return self.base
def handle_line(self, line, start_offset, start_index, end_offset, end_index):
self.line_depth = self.get_line_depth()
tokens = self.mode.highlighter.tokens
if start_index >= len(tokens):
return
if regex.whitespace.match(line):
return
if len(self.tab_stack) == 0 and tokens[start_index].start >= start_offset:
self.base = util.count_leading_whitespace(line)
for i in range(start_index, end_index):
token = tokens[i]
s = token.string
if s in self.start_tags:
if i < end_index - 1:
i = tokens[i+1].start - start_offset
elif len(self.tab_stack) > 0:
i = self.tab_stack[-1][1] + 4
else:
i = self.base + 4
self.stack_append((s, i))
elif s in self.close_tags:
assert len(self.tab_stack), "Unbalanced closing tag"
assert self.tab_stack[-1][0] == self.close_tags[s], "Unmatched closing tag"
self.stack_pop()
if i == start_index:
self.line_depth = self.get_line_depth()
if tokens[start_index].start < start_offset:
self.line_depth = -1
prebase = self.base
s = tokens[start_index].string
e = tokens[end_index-1].string
if s == "except" or s == "elif" or s == "else":
if self.y > 0 and self.line_depth == self.levels[self.y - 1]:
self.line_depth = max(0, self.line_depth - 4)
elif (s == "return" or s == "raise" or s == "yield" or s == "break" or
s == "pass" or s == 'continue'):
self.base = max(0, self.base - 4)
if e == "\\":
if len(self.tab_stack) and self.tab_stack[-1][0] == "\\":
pass
else:
self.stack_append(("\\", prebase + 4))
return
elif e == ":":
self.base += 4
elif len(self.tab_stack) and self.tab_stack[-1][0] == "\\":
self.stack_pop()
def get_indentation_level(self, y):
if self.levels is not None and self.levels[y] is not None:
result = self.levels[y]
else:
i = max(0, y - 1)
while i > 0:
if self.base_indentation_level(i):
break
i -= 1
self.calculate_tabs(i, y)
result = self.levels[y]
if result == -1:
return None
return result

View File

@ -1,21 +0,0 @@
#!/usr/bin/python
import sys
import lex2, lex2_perl
paths = sys.argv[1:]
for path in paths:
f = open(path, 'r')
data = f.read()
f.close()
lines = data.split('\n')
grammar = lex2_perl.PerlGrammar()
lexer = lex2.Lexer('lexer', grammar)
lexer.lex(lines)
print path
for token in lexer:
print '%-30s| %-6s | %r' % (token.name,
'(%d,%d)' % (token.x, token.y),
token.string)

205
test3.py
View File

@ -1,205 +0,0 @@
#!/usr/bin/python
import sys
import lex2, lex2_perl, lex2_python, highlight2
color_list = []
color_list.extend(['\033[3%dm' % x for x in range(0, 8)])
color_list.extend(['\033[3%d;1m' % x for x in range(0, 8)])
color_list.append('\033[0m')
color_names = [
'black', 'dred', 'dgreen', 'brown', 'dblue', 'dpurple', 'dcyan', 'lgrey',
'dgrey', 'lred', 'lgreen', 'yellow', 'lblue', 'lpurple', 'lcyan', 'white',
'unset',
]
color_dict ={}
for i in range(0, len(color_list)):
color_dict[color_names[i]] = color_list[i]
token_colors = {
'perl': {
# basic stuff
'escaped': 'lpurple',
'null': 'white',
'delimiter': 'white',
'sub': 'lcyan',
'number': 'white',
'operator': 'white',
'endblock': 'lred',
'keyword': 'lpurple',
'scalar': 'yellow',
'array': 'yellow',
'deref': 'yellow',
'hash': 'yellow',
'hash_key': 'lgreen',
'comment': 'lred',
'function': 'lcyan',
'builtin': 'lpurple',
'method': 'lcyan',
'bareword': 'white',
'label': 'lcyan',
'package': 'lcyan',
'class': 'lcyan',
'use': 'lcyan',
'method': 'lcyan',
# heredoc
'heredoc1.start': 'lgreen',
'heredoc1.null': 'lgreen',
'heredoc1.end': 'lgreen',
'heredoc2.start': 'lgreen',
'heredoc2.null': 'lgreen',
'heredoc2.end': 'lgreen',
'eval_heredoc.start': 'lcyan',
'eval_heredoc.null': 'lcyan',
'eval_heredoc.end': 'lcyan',
# pod
'pod.start': 'lred',
'pod.null': 'lred',
'pod.entry': 'lpurple',
'pod.end': 'lred',
# "" strings
'string1.start': 'lgreen',
'string1.null': 'lgreen',
'string1.escaped': 'lpurple',
'string1.deref': 'yellow',
'string1.end': 'lgreen',
# '' strings
'string2.start': 'lgreen',
'string2.null': 'lgreen',
'string2.end': 'lgreen',
# `` strings
'evalstring': 'lcyan',
# quoted region
'quoted': 'lcyan',
'quoted.start': 'lcyan',
'quoted.null': 'lcyan',
'quoted.end': 'lcyan',
# match regex
'match.start': 'lcyan',
'match.end': 'lcyan',
'match.null': 'lcyan',
# replace regex
'replace.start': 'lcyan',
'replace.middle': 'lcyan',
'replace.end': 'lcyan',
'replace.null': 'lcyan',
# translate regex
'translate.start': 'lpurple',
'translate.middle': 'lpurple',
'translate.end': 'lpurple',
'translate.null': 'lpurple',
},
'python': {
'keyword': 'lcyan',
'builtin_method': 'lcyan',
'methodname': 'lblue',
'classname': 'lgreen',
'string.start': 'lgreen',
'string.null': 'lgreen',
'string.escaped': 'lpurple',
'string.octal': 'lpurple',
'string.format': 'yellow',
'string.end': 'lgreen',
'integer': 'lred',
'float': 'lred',
'imaginary': 'lred',
'tq_string.start': 'lgreen',
'tq_string.null': 'lgreen',
'tq_string.end': 'lgreen',
'docstring.start': 'lgreen',
'docstring.null': 'lgreen',
'docstring.end': 'lgreen',
'comment': 'lred',
'continuation': 'lred',
#'operator': 'yellow',
#'delimiter': 'lpurple',
'system_identifier': 'lcyan',
#'bound method': color.build('yellow', 'default'),
'import': 'lpurple',
#'bizzaro': 'lpurple',
},
}
grammars = {
'perl': lex2_perl.PerlGrammar,
'python': lex2_python.PythonGrammar,
}
import optparse
parser = optparse.OptionParser()
parser.add_option('-d', '--dump', dest='dump', action='store_true', default=False)
parser.add_option('-g', '--grammar', dest='grammar', action='store', default='python')
parser.add_option('-n', '--normal', dest='normal', action='store_true', default=False)
(opts, args) = parser.parse_args()
for path in args:
f = open(path, 'r')
data = f.read()
f.close()
lines = data.split('\n')
lexer = lex2.Lexer('lexer', grammars[opts.grammar]())
h = highlight2.Highlighter(lexer)
h.highlight(lines)
if opts.normal:
if opts.dump:
h.dump()
else:
h.display(token_colors[opts.grammar])
elif False:
(y1, x1) = (5, 9)
(y2, x2) = (7, 14)
#(y2, x2) = (82, 2)
for i in range(y1 + 1, y2):
del lines[y1 + 1]
lines[y1] = lines[y1][0:x1] + lines[y1 + 1][x2:]
del lines[y1 + 1]
h.relex_del(lines, y1, x1, y2, x2)
#h.update_del(lines, y1, x1, y2, x2)
#h.highlight(lines)
if opts.dump:
h.dump()
else:
h.display(token_colors[opts.grammar])
else:
#newlines = ['one two three']
newlines = ['one two three', 'cat', 'dog', 'del self.foo[3]', 'oops']
(y1, x1) = (5, 9)
if len(newlines) > 1:
lines.insert(y1 + 1, newlines[-1] + lines[y1][x1:])
lines[y1] = lines[y1][:x1] + newlines[0]
for i in range(1, len(newlines) - 1):
newline = newlines[i]
lines.insert(y1 + i, newline)
else:
lines[y1] = lines[y1][:x1] + newlines[0] + lines[y1][x1:]
h.relex_add(lines, y1, x1, newlines)
#h.update_add(lines, y1, x1, newlines)
#h.highlight(lines)
if opts.dump:
h.dump()
else:
h.display(token_colors[opts.grammar])