206 lines
5.7 KiB
Python
206 lines
5.7 KiB
Python
#!/usr/bin/python
|
|
import sys
|
|
import lex2, lex2_perl, lex2_python, highlight2
|
|
|
|
color_list = []
|
|
color_list.extend(['\033[3%dm' % x for x in range(0, 8)])
|
|
color_list.extend(['\033[3%d;1m' % x for x in range(0, 8)])
|
|
color_list.append('\033[0m')
|
|
|
|
color_names = [
|
|
'black', 'dred', 'dgreen', 'brown', 'dblue', 'dpurple', 'dcyan', 'lgrey',
|
|
'dgrey', 'lred', 'lgreen', 'yellow', 'lblue', 'lpurple', 'lcyan', 'white',
|
|
'unset',
|
|
]
|
|
|
|
color_dict ={}
|
|
for i in range(0, len(color_list)):
|
|
color_dict[color_names[i]] = color_list[i]
|
|
|
|
token_colors = {
|
|
'perl': {
|
|
# basic stuff
|
|
'escaped': 'lpurple',
|
|
'null': 'white',
|
|
'delimiter': 'white',
|
|
'sub': 'lcyan',
|
|
'number': 'white',
|
|
'operator': 'white',
|
|
'endblock': 'lred',
|
|
'keyword': 'lpurple',
|
|
'scalar': 'yellow',
|
|
'array': 'yellow',
|
|
'deref': 'yellow',
|
|
'hash': 'yellow',
|
|
'hash_key': 'lgreen',
|
|
'comment': 'lred',
|
|
'function': 'lcyan',
|
|
'builtin': 'lpurple',
|
|
'method': 'lcyan',
|
|
'bareword': 'white',
|
|
'label': 'lcyan',
|
|
'package': 'lcyan',
|
|
'class': 'lcyan',
|
|
'use': 'lcyan',
|
|
'method': 'lcyan',
|
|
|
|
# heredoc
|
|
'heredoc1.start': 'lgreen',
|
|
'heredoc1.null': 'lgreen',
|
|
'heredoc1.end': 'lgreen',
|
|
'heredoc2.start': 'lgreen',
|
|
'heredoc2.null': 'lgreen',
|
|
'heredoc2.end': 'lgreen',
|
|
'eval_heredoc.start': 'lcyan',
|
|
'eval_heredoc.null': 'lcyan',
|
|
'eval_heredoc.end': 'lcyan',
|
|
|
|
# pod
|
|
'pod.start': 'lred',
|
|
'pod.null': 'lred',
|
|
'pod.entry': 'lpurple',
|
|
'pod.end': 'lred',
|
|
|
|
# "" strings
|
|
'string1.start': 'lgreen',
|
|
'string1.null': 'lgreen',
|
|
'string1.escaped': 'lpurple',
|
|
'string1.deref': 'yellow',
|
|
'string1.end': 'lgreen',
|
|
|
|
# '' strings
|
|
'string2.start': 'lgreen',
|
|
'string2.null': 'lgreen',
|
|
'string2.end': 'lgreen',
|
|
|
|
# `` strings
|
|
'evalstring': 'lcyan',
|
|
|
|
# quoted region
|
|
'quoted': 'lcyan',
|
|
'quoted.start': 'lcyan',
|
|
'quoted.null': 'lcyan',
|
|
'quoted.end': 'lcyan',
|
|
|
|
# match regex
|
|
'match.start': 'lcyan',
|
|
'match.end': 'lcyan',
|
|
'match.null': 'lcyan',
|
|
|
|
# replace regex
|
|
'replace.start': 'lcyan',
|
|
'replace.middle': 'lcyan',
|
|
'replace.end': 'lcyan',
|
|
'replace.null': 'lcyan',
|
|
|
|
# translate regex
|
|
'translate.start': 'lpurple',
|
|
'translate.middle': 'lpurple',
|
|
'translate.end': 'lpurple',
|
|
'translate.null': 'lpurple',
|
|
},
|
|
|
|
'python': {
|
|
'keyword': 'lcyan',
|
|
'builtin_method': 'lcyan',
|
|
'methodname': 'lblue',
|
|
'classname': 'lgreen',
|
|
|
|
'string.start': 'lgreen',
|
|
'string.null': 'lgreen',
|
|
'string.escaped': 'lpurple',
|
|
'string.octal': 'lpurple',
|
|
'string.format': 'yellow',
|
|
'string.end': 'lgreen',
|
|
|
|
'integer': 'lred',
|
|
'float': 'lred',
|
|
'imaginary': 'lred',
|
|
|
|
'tq_string.start': 'lgreen',
|
|
'tq_string.null': 'lgreen',
|
|
'tq_string.end': 'lgreen',
|
|
|
|
'docstring.start': 'lgreen',
|
|
'docstring.null': 'lgreen',
|
|
'docstring.end': 'lgreen',
|
|
|
|
'comment': 'lred',
|
|
'continuation': 'lred',
|
|
#'operator': 'yellow',
|
|
#'delimiter': 'lpurple',
|
|
'system_identifier': 'lcyan',
|
|
#'bound method': color.build('yellow', 'default'),
|
|
'import': 'lpurple',
|
|
#'bizzaro': 'lpurple',
|
|
},
|
|
}
|
|
|
|
grammars = {
|
|
'perl': lex2_perl.PerlGrammar,
|
|
'python': lex2_python.PythonGrammar,
|
|
}
|
|
|
|
import optparse
|
|
|
|
parser = optparse.OptionParser()
|
|
parser.add_option('-d', '--dump', dest='dump', action='store_true', default=False)
|
|
parser.add_option('-g', '--grammar', dest='grammar', action='store', default='python')
|
|
parser.add_option('-n', '--normal', dest='normal', action='store_true', default=False)
|
|
|
|
(opts, args) = parser.parse_args()
|
|
|
|
for path in args:
|
|
f = open(path, 'r')
|
|
data = f.read()
|
|
f.close()
|
|
|
|
lines = data.split('\n')
|
|
lexer = lex2.Lexer('lexer', grammars[opts.grammar]())
|
|
|
|
h = highlight2.Highlighter(lexer)
|
|
h.highlight(lines)
|
|
|
|
if opts.normal:
|
|
if opts.dump:
|
|
h.dump()
|
|
else:
|
|
h.display(token_colors[opts.grammar])
|
|
elif False:
|
|
(y1, x1) = (5, 9)
|
|
(y2, x2) = (7, 14)
|
|
#(y2, x2) = (82, 2)
|
|
for i in range(y1 + 1, y2):
|
|
del lines[y1 + 1]
|
|
lines[y1] = lines[y1][0:x1] + lines[y1 + 1][x2:]
|
|
del lines[y1 + 1]
|
|
|
|
h.relex_del(lines, y1, x1, y2, x2)
|
|
#h.update_del(lines, y1, x1, y2, x2)
|
|
#h.highlight(lines)
|
|
if opts.dump:
|
|
h.dump()
|
|
else:
|
|
h.display(token_colors[opts.grammar])
|
|
else:
|
|
#newlines = ['one two three']
|
|
newlines = ['one two three', 'cat', 'dog', 'del self.foo[3]', 'oops']
|
|
(y1, x1) = (5, 9)
|
|
|
|
if len(newlines) > 1:
|
|
lines.insert(y1 + 1, newlines[-1] + lines[y1][x1:])
|
|
lines[y1] = lines[y1][:x1] + newlines[0]
|
|
for i in range(1, len(newlines) - 1):
|
|
newline = newlines[i]
|
|
lines.insert(y1 + i, newline)
|
|
else:
|
|
lines[y1] = lines[y1][:x1] + newlines[0] + lines[y1][x1:]
|
|
|
|
h.relex_add(lines, y1, x1, newlines)
|
|
#h.update_add(lines, y1, x1, newlines)
|
|
#h.highlight(lines)
|
|
if opts.dump:
|
|
h.dump()
|
|
else:
|
|
h.display(token_colors[opts.grammar])
|