parent
683afdf129
commit
74ab76836c
|
@ -25,7 +25,6 @@ import keyinput
|
|||
import method
|
||||
from minibuffer import MiniBuffer, MiniBufferError
|
||||
import mode
|
||||
from point import Point
|
||||
import util
|
||||
from window import Window
|
||||
|
||||
|
@ -59,31 +58,31 @@ class Application(object):
|
|||
default = ('default', 'default')
|
||||
|
||||
# magenta is for keywords/builtins, translation, globs
|
||||
lo_magenta = ('magenta202', 'default')
|
||||
#lo_magenta = ('magenta202', 'default')
|
||||
hi_magenta = ('magenta505', 'default')
|
||||
|
||||
# red is for comments, pods, endblocks
|
||||
lo_red = ('red300', 'default')
|
||||
#lo_red = ('red300', 'default')
|
||||
hi_red = ('red511', 'default')
|
||||
|
||||
# orange are for arrays and hashes
|
||||
hi_orange = ('yellow531', 'default')
|
||||
lo_orange = ('yellow520', 'default')
|
||||
#hi_orange = ('yellow531', 'default')
|
||||
#lo_orange = ('yellow520', 'default')
|
||||
|
||||
# yellow is for scalars and prototypes
|
||||
hi_yellow = ('yellow551', 'default')
|
||||
lo_yellow = ('yellow330', 'default')
|
||||
#hi_yellow = ('yellow551', 'default')
|
||||
#lo_yellow = ('yellow330', 'default')
|
||||
|
||||
# green is for strings and hash keys
|
||||
lo_green = ('green030', 'default')
|
||||
hi_green = ('green050', 'default')
|
||||
|
||||
# cyan is for quotes, evals, regexes, subs
|
||||
lo_cyan = ('cyan033', 'default')
|
||||
#lo_cyan = ('cyan033', 'default')
|
||||
hi_cyan = ('cyan155', 'default')
|
||||
|
||||
# blue is unused
|
||||
lo_blue = ('blue113', 'default')
|
||||
#lo_blue = ('blue113', 'default')
|
||||
hi_blue = ('blue225', 'default')
|
||||
|
||||
# let's prepopulate some default token colors
|
||||
|
|
1
gdb.py
1
gdb.py
|
@ -1,7 +1,6 @@
|
|||
#!/usr/bin/env python
|
||||
import os, re, sys
|
||||
from subprocess import Popen, PIPE, STDOUT
|
||||
import cache
|
||||
|
||||
_can_debug = os.system('which gdb > /dev/null 2>&1') == 0
|
||||
|
||||
|
|
19
highlight.py
19
highlight.py
|
@ -19,21 +19,6 @@ def setup():
|
|||
color_dict[color_names[i]] = color_list[i]
|
||||
setup()
|
||||
|
||||
#def token_match(self, token, name, data=None):
|
||||
# return token.fqname() == name and data is None or token.string == data
|
||||
#def token_match2(self, token, name, regex):
|
||||
# return token.fqname() == name and regex.match(token.string)
|
||||
#def token_vmatch(self, token, *pairs):
|
||||
# for (name, data) in pairs:
|
||||
# if token_match(token, name, data):
|
||||
# return True
|
||||
# return False
|
||||
#def token_vmatch2(self, token, *pairs):
|
||||
# for (name, regex) in pairs:
|
||||
# if token_match(token, name, regex):
|
||||
# return True
|
||||
# return False
|
||||
|
||||
class Highlighter(object):
|
||||
def __init__(self, lexer):
|
||||
self.lexer = lexer
|
||||
|
@ -87,7 +72,7 @@ class Highlighter(object):
|
|||
y += 1
|
||||
|
||||
def highlight(self, lines):
|
||||
self.tokens = [[] for l in lines]
|
||||
self.tokens = [[] for _ in lines]
|
||||
for token in self.lexer.lex(lines, y=0, x=0):
|
||||
self.tokens[token.y].append(token)
|
||||
|
||||
|
@ -203,7 +188,7 @@ class Highlighter(object):
|
|||
|
||||
# ok, so now we need to "adjust" the (x,y) coordinates of all the tokens
|
||||
# after the change. first we will copy over the pre-deletion tokens.
|
||||
newtokens = [[] for x in range(0, len(self.tokens) - y2 + y1)]
|
||||
newtokens = [[] for _ in range(0, len(self.tokens) - y2 + y1)]
|
||||
|
||||
for y in range(0, y1):
|
||||
for token in self.tokens[y]:
|
||||
|
|
2
lex.py
2
lex.py
|
@ -488,7 +488,7 @@ class Lexer(object):
|
|||
def get_line(self):
|
||||
return self.lines[self.y] + '\n'
|
||||
def lex_all(self, lines):
|
||||
lextokens = [[] for l in lines]
|
||||
lextokens = [[] for _ in lines]
|
||||
for t in self.lex(lines):
|
||||
lextokens[t.y].append(t)
|
||||
return lextokens
|
||||
|
|
19
miniparse.py
19
miniparse.py
|
@ -1,4 +1,6 @@
|
|||
import parser, symbol, sys, token
|
||||
import parser
|
||||
import sys
|
||||
import token
|
||||
from pprint import pprint
|
||||
|
||||
def proc(asttup):
|
||||
|
@ -15,10 +17,11 @@ def proc(asttup):
|
|||
queue.insert(i, node[i + 1])
|
||||
return pairs
|
||||
|
||||
for name in sys.argv[1:]:
|
||||
f = open(name, 'r')
|
||||
code = f.read()
|
||||
f.close()
|
||||
ast = parser.suite(code)
|
||||
pairs = proc(ast.totuple())
|
||||
pprint(pairs)
|
||||
if __name__ == "__main__":
|
||||
for name in sys.argv[1:]:
|
||||
f = open(name, 'r')
|
||||
code = f.read()
|
||||
f.close()
|
||||
ast = parser.suite(code)
|
||||
pairs = proc(ast.totuple())
|
||||
pprint(pairs)
|
||||
|
|
12
parse.py
12
parse.py
|
@ -43,10 +43,10 @@ class String(Rule):
|
|||
def __init__(self, s):
|
||||
self.string = s
|
||||
def _match(self, tokens):
|
||||
if token.string == self.string:
|
||||
return [1]
|
||||
else:
|
||||
return []
|
||||
for token in tokens:
|
||||
if token.string == self.string:
|
||||
return [1]
|
||||
return []
|
||||
class Strings(Rule):
|
||||
def __init__(self, ss):
|
||||
self.strings = ss
|
||||
|
@ -113,12 +113,12 @@ class Repeat(Rule):
|
|||
self.maximum = maximum
|
||||
def match(self, tokens):
|
||||
n = 0
|
||||
for i in range(0, self.minimum):
|
||||
for _ in range(0, self.minimum):
|
||||
result = self.rule.match(tokens[n:])
|
||||
if not result:
|
||||
return []
|
||||
n += result[0]
|
||||
for i in range(self.minimum, self.maximum):
|
||||
for _ in range(self.minimum, self.maximum):
|
||||
result = self.rule.match(tokens[n:])
|
||||
if not result:
|
||||
return [n]
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import curses
|
||||
import color
|
||||
from point import Point
|
||||
|
||||
|
@ -37,7 +38,7 @@ class RenderString(object):
|
|||
s = self.string
|
||||
s2 = s.encode('utf-8')
|
||||
cwin.addstr(self.y + y, self.x + x, s2, self.attrs)
|
||||
except Exception, e:
|
||||
except curses.error:
|
||||
raise
|
||||
#v = (self.y, y, self.x, x, self.string, self.attrs, str(e))
|
||||
#raise Exception, "cwin.addstr(%d + %d, %d + %d, %r, %r) failed:\n%s" % v
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import re
|
||||
import regex
|
||||
from point import Point
|
||||
from render import HighlightRegion
|
||||
|
||||
|
|
2
tab.py
2
tab.py
|
@ -1,4 +1,4 @@
|
|||
import regex, util
|
||||
import regex
|
||||
from point import Point
|
||||
|
||||
class Marker(object):
|
||||
|
|
10
term.py
10
term.py
|
@ -36,6 +36,10 @@ def make_cbuf(fg, bg, xt):
|
|||
class Dumb:
|
||||
name = 'dumb'
|
||||
cbuf = False
|
||||
# style info
|
||||
_fg = 'default'
|
||||
_bg = 'default'
|
||||
_xt = set()
|
||||
def _term_insert(self, s):
|
||||
assert self.i <= len(self.outc)
|
||||
if self.i == len(self.outc):
|
||||
|
@ -48,13 +52,15 @@ class Dumb:
|
|||
self.outs = ''
|
||||
self.i = 0
|
||||
self.outc = []
|
||||
if self.cbuf: self._term_insert(make_cbuf(self._fg, self._bg, self._xt))
|
||||
if self.cbuf:
|
||||
self._term_insert(make_cbuf(self._fg, self._bg, self._xt))
|
||||
|
||||
def term_do_clear_bol(self):
|
||||
pass
|
||||
def term_do_clear_eol(self):
|
||||
del self.outc[self.i:]
|
||||
if self.cbuf: self._term_insert(make_cbuf(self._fg, self._bg, self._xt))
|
||||
if self.cbuf:
|
||||
self._term_insert(make_cbuf(self._fg, self._bg, self._xt))
|
||||
def term_do_clear_eos(self):
|
||||
pass
|
||||
def term_do_home(self):
|
||||
|
|
4
util.py
4
util.py
|
@ -137,9 +137,9 @@ except:
|
|||
dict.__repr__(self))
|
||||
|
||||
def decode(s):
|
||||
for format in ('utf-8', 'latin-1'):
|
||||
for coding in ('utf-8', 'latin-1'):
|
||||
try:
|
||||
return s.decode(format)
|
||||
return s.decode(coding)
|
||||
except:
|
||||
pass
|
||||
return s.decode('ascii', 'replace')
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
import os.path, string
|
||||
import color, highlight, regex
|
||||
import os.path
|
||||
import color
|
||||
import highlight
|
||||
import regex
|
||||
from point import Point
|
||||
from render import RenderString
|
||||
|
||||
|
@ -453,7 +455,7 @@ class Window(object):
|
|||
self.goto(Point(0, y))
|
||||
def forward_chars(self, n):
|
||||
(x, y) = self.logical_cursor().xy()
|
||||
for i in range(0, n):
|
||||
for _ in range(0, n):
|
||||
if x == len(self.buffer.lines[y]):
|
||||
y += 1
|
||||
x = 0
|
||||
|
|
Loading…
Reference in New Issue