parent
75d354c2f5
commit
683afdf129
|
@ -1,5 +1,6 @@
|
||||||
import glob, os, pwd
|
import glob
|
||||||
import method, util
|
import os
|
||||||
|
import util
|
||||||
|
|
||||||
_completers = {}
|
_completers = {}
|
||||||
|
|
||||||
|
@ -16,7 +17,6 @@ def find_common_string(candidates):
|
||||||
elif len(candidates) == 1:
|
elif len(candidates) == 1:
|
||||||
return candidates[0]
|
return candidates[0]
|
||||||
else:
|
else:
|
||||||
done = False
|
|
||||||
index = 0
|
index = 0
|
||||||
test = candidates[0]
|
test = candidates[0]
|
||||||
while True:
|
while True:
|
||||||
|
@ -30,7 +30,7 @@ class Completer(object):
|
||||||
def __init__(self, application):
|
def __init__(self, application):
|
||||||
self.application = application
|
self.application = application
|
||||||
def get_candidates(self, s, w=None):
|
def get_candidates(self, s, w=None):
|
||||||
assert "Not implemented"
|
raise Exception("Not implemented")
|
||||||
def tab_string(self, s, w=None):
|
def tab_string(self, s, w=None):
|
||||||
'''returns a tuple of three things:
|
'''returns a tuple of three things:
|
||||||
1. the new string
|
1. the new string
|
||||||
|
|
45
highlight.py
45
highlight.py
|
@ -1,10 +1,7 @@
|
||||||
import re, sys
|
import sys
|
||||||
from lex import Token
|
from lex import Token
|
||||||
|
|
||||||
color_list = []
|
color_list = []
|
||||||
color_list.extend(['\033[3%dm' % x for x in range(0, 8)])
|
|
||||||
color_list.extend(['\033[3%d;1m' % x for x in range(0, 8)])
|
|
||||||
color_list.extend(['\033[0m'])
|
|
||||||
|
|
||||||
color_names = [
|
color_names = [
|
||||||
'black', 'dred', 'dgreen', 'brown', 'dblue', 'dpurple', 'dcyan', 'lgrey',
|
'black', 'dred', 'dgreen', 'brown', 'dblue', 'dpurple', 'dcyan', 'lgrey',
|
||||||
|
@ -13,23 +10,29 @@ color_names = [
|
||||||
]
|
]
|
||||||
|
|
||||||
color_dict ={}
|
color_dict ={}
|
||||||
for i in range(0, len(color_list)):
|
|
||||||
color_dict[color_names[i]] = color_list[i]
|
|
||||||
|
|
||||||
def token_match(self, token, name, data=None):
|
def setup():
|
||||||
return token.fqname() == name and data is None or token.string == data
|
color_list.extend(['\033[3%dm' % x for x in range(0, 8)])
|
||||||
def token_match2(self, token, name, regex):
|
color_list.extend(['\033[3%d;1m' % x for x in range(0, 8)])
|
||||||
return token.fqname() == name and regex.match(token.string)
|
color_list.extend(['\033[0m'])
|
||||||
def token_vmatch(self, token, *pairs):
|
for i in range(0, len(color_list)):
|
||||||
for (name, data) in pairs:
|
color_dict[color_names[i]] = color_list[i]
|
||||||
if token_match(token, name, data):
|
setup()
|
||||||
return True
|
|
||||||
return False
|
#def token_match(self, token, name, data=None):
|
||||||
def token_vmatch2(self, token, *pairs):
|
# return token.fqname() == name and data is None or token.string == data
|
||||||
for (name, regex) in pairs:
|
#def token_match2(self, token, name, regex):
|
||||||
if token_match(token, name, regex):
|
# return token.fqname() == name and regex.match(token.string)
|
||||||
return True
|
#def token_vmatch(self, token, *pairs):
|
||||||
return False
|
# for (name, data) in pairs:
|
||||||
|
# if token_match(token, name, data):
|
||||||
|
# return True
|
||||||
|
# return False
|
||||||
|
#def token_vmatch2(self, token, *pairs):
|
||||||
|
# for (name, regex) in pairs:
|
||||||
|
# if token_match(token, name, regex):
|
||||||
|
# return True
|
||||||
|
# return False
|
||||||
|
|
||||||
class Highlighter(object):
|
class Highlighter(object):
|
||||||
def __init__(self, lexer):
|
def __init__(self, lexer):
|
||||||
|
@ -266,8 +269,6 @@ class Highlighter(object):
|
||||||
for t in self.tokens[y1]:
|
for t in self.tokens[y1]:
|
||||||
tx1 = t.x
|
tx1 = t.x
|
||||||
tx2 = t.x + len(t.string)
|
tx2 = t.x + len(t.string)
|
||||||
ty = t.y
|
|
||||||
ts = t.string
|
|
||||||
if tx2 <= x1:
|
if tx2 <= x1:
|
||||||
# '*| ' before the insertion
|
# '*| ' before the insertion
|
||||||
newtokens[y1].append(t)
|
newtokens[y1].append(t)
|
||||||
|
|
|
@ -14,6 +14,7 @@ def get_speller():
|
||||||
_speller = Speller()
|
_speller = Speller()
|
||||||
return _speller
|
return _speller
|
||||||
def free():
|
def free():
|
||||||
|
global _speller
|
||||||
if _speller:
|
if _speller:
|
||||||
_speller.stop()
|
_speller.stop()
|
||||||
_speller = None
|
_speller = None
|
||||||
|
|
30
keyinput.py
30
keyinput.py
|
@ -1,8 +1,9 @@
|
||||||
import curses, sys, termios
|
import sys
|
||||||
|
import termios
|
||||||
|
|
||||||
# this is a huge map of ASCII keycode sequences it should include all
|
# this is a huge map of ASCII keycode sequences it should include all the
|
||||||
# the "standard" ones for a US 104 key keyboard. this module may need
|
# "standard" ones for a US 104 key keyboard. this module may need to support
|
||||||
# to support some kind of subclassing in order to be localizable.
|
# ome kind of subclassing in order to be localizable.
|
||||||
#
|
#
|
||||||
# of course, i'd be crazy to try to localize a curses app
|
# of course, i'd be crazy to try to localize a curses app
|
||||||
MAP = { 0: "C-@",
|
MAP = { 0: "C-@",
|
||||||
|
@ -75,15 +76,16 @@ MAP = { 0: "C-@",
|
||||||
32: "SPACE",
|
32: "SPACE",
|
||||||
127: "DELETE" }
|
127: "DELETE" }
|
||||||
|
|
||||||
# add the meta/control-char combinations
|
def setup():
|
||||||
for key in MAP.iterkeys():
|
# add the meta/control-char combinations
|
||||||
|
for key in MAP:
|
||||||
if key == 27:
|
if key == 27:
|
||||||
# we don't want to define ESC-ESC
|
# we don't want to define ESC-ESC
|
||||||
continue
|
continue
|
||||||
MAP[27][key] = "M-%s" % (MAP[key])
|
MAP[27][key] = "M-%s" % (MAP[key])
|
||||||
|
|
||||||
# add meta character stuff
|
# add meta character stuff
|
||||||
for i in range(33, 126):
|
for i in range(33, 126):
|
||||||
if i == 79 or i == 91:
|
if i == 79 or i == 91:
|
||||||
# these keys are used in other sequences
|
# these keys are used in other sequences
|
||||||
continue
|
continue
|
||||||
|
@ -91,20 +93,18 @@ for i in range(33, 126):
|
||||||
MAP[27][i] = "M-%s" % (chr(i))
|
MAP[27][i] = "M-%s" % (chr(i))
|
||||||
# 8bit meta characters
|
# 8bit meta characters
|
||||||
MAP[128+i] = "M-%s" % (chr(i))
|
MAP[128+i] = "M-%s" % (chr(i))
|
||||||
MAP[255] = "M-DELETE"
|
MAP[255] = "M-DELETE"
|
||||||
|
setup()
|
||||||
|
|
||||||
def disable_control_chars():
|
def disable_control_chars():
|
||||||
#terminal settings are for chumps
|
# terminal settings are for chumps
|
||||||
attr = termios.tcgetattr(sys.stdin)
|
attr = termios.tcgetattr(sys.stdin)
|
||||||
|
|
||||||
global OLD_ATTR
|
|
||||||
OLD_ATTR = attr
|
|
||||||
|
|
||||||
# don't listen to allow input START/STOP (C-s,C-q)
|
# don't listen to allow input START/STOP (C-s,C-q)
|
||||||
attr[0] = attr[0] & ~(termios.IXON | termios.IXOFF)
|
attr[0] = attr[0] & ~(termios.IXON | termios.IXOFF)
|
||||||
|
|
||||||
# remove as many signal handlers as we can; we want to
|
# remove as many signal handlers as we can; we want to leave C-d and C-z
|
||||||
# leave C-d and C-z probably
|
# probably
|
||||||
for pos in range(0,len(attr[6])):
|
for pos in range(0,len(attr[6])):
|
||||||
if pos == termios.VEOF or pos == termios.VSUSP:
|
if pos == termios.VEOF or pos == termios.VSUSP:
|
||||||
continue
|
continue
|
||||||
|
|
7
lex.py
7
lex.py
|
@ -1,5 +1,5 @@
|
||||||
import curses, re
|
import re
|
||||||
import regex, util
|
import regex
|
||||||
from point import Point
|
from point import Point
|
||||||
|
|
||||||
def escape(s):
|
def escape(s):
|
||||||
|
@ -475,7 +475,6 @@ class OverrideRegionRule(RegionRule):
|
||||||
|
|
||||||
class Grammar(object):
|
class Grammar(object):
|
||||||
rules = []
|
rules = []
|
||||||
grammar = Grammar()
|
|
||||||
|
|
||||||
class Lexer(object):
|
class Lexer(object):
|
||||||
def __init__(self, mode, grammar):
|
def __init__(self, mode, grammar):
|
||||||
|
@ -490,7 +489,7 @@ class Lexer(object):
|
||||||
return self.lines[self.y] + '\n'
|
return self.lines[self.y] + '\n'
|
||||||
def lex_all(self, lines):
|
def lex_all(self, lines):
|
||||||
lextokens = [[] for l in lines]
|
lextokens = [[] for l in lines]
|
||||||
for t in lexer.lex(lines):
|
for t in self.lex(lines):
|
||||||
lextokens[t.y].append(t)
|
lextokens[t.y].append(t)
|
||||||
return lextokens
|
return lextokens
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue