more pychecker cleanup

--HG--
branch : pmacs2
This commit is contained in:
Erik Osheim 2009-11-11 23:22:14 -05:00
parent 75d354c2f5
commit 683afdf129
5 changed files with 58 additions and 57 deletions

View File

@ -1,5 +1,6 @@
import glob, os, pwd import glob
import method, util import os
import util
_completers = {} _completers = {}
@ -16,7 +17,6 @@ def find_common_string(candidates):
elif len(candidates) == 1: elif len(candidates) == 1:
return candidates[0] return candidates[0]
else: else:
done = False
index = 0 index = 0
test = candidates[0] test = candidates[0]
while True: while True:
@ -30,7 +30,7 @@ class Completer(object):
def __init__(self, application): def __init__(self, application):
self.application = application self.application = application
def get_candidates(self, s, w=None): def get_candidates(self, s, w=None):
assert "Not implemented" raise Exception("Not implemented")
def tab_string(self, s, w=None): def tab_string(self, s, w=None):
'''returns a tuple of three things: '''returns a tuple of three things:
1. the new string 1. the new string

View File

@ -1,10 +1,7 @@
import re, sys import sys
from lex import Token from lex import Token
color_list = [] color_list = []
color_list.extend(['\033[3%dm' % x for x in range(0, 8)])
color_list.extend(['\033[3%d;1m' % x for x in range(0, 8)])
color_list.extend(['\033[0m'])
color_names = [ color_names = [
'black', 'dred', 'dgreen', 'brown', 'dblue', 'dpurple', 'dcyan', 'lgrey', 'black', 'dred', 'dgreen', 'brown', 'dblue', 'dpurple', 'dcyan', 'lgrey',
@ -13,23 +10,29 @@ color_names = [
] ]
color_dict ={} color_dict ={}
def setup():
color_list.extend(['\033[3%dm' % x for x in range(0, 8)])
color_list.extend(['\033[3%d;1m' % x for x in range(0, 8)])
color_list.extend(['\033[0m'])
for i in range(0, len(color_list)): for i in range(0, len(color_list)):
color_dict[color_names[i]] = color_list[i] color_dict[color_names[i]] = color_list[i]
setup()
def token_match(self, token, name, data=None): #def token_match(self, token, name, data=None):
return token.fqname() == name and data is None or token.string == data # return token.fqname() == name and data is None or token.string == data
def token_match2(self, token, name, regex): #def token_match2(self, token, name, regex):
return token.fqname() == name and regex.match(token.string) # return token.fqname() == name and regex.match(token.string)
def token_vmatch(self, token, *pairs): #def token_vmatch(self, token, *pairs):
for (name, data) in pairs: # for (name, data) in pairs:
if token_match(token, name, data): # if token_match(token, name, data):
return True # return True
return False # return False
def token_vmatch2(self, token, *pairs): #def token_vmatch2(self, token, *pairs):
for (name, regex) in pairs: # for (name, regex) in pairs:
if token_match(token, name, regex): # if token_match(token, name, regex):
return True # return True
return False # return False
class Highlighter(object): class Highlighter(object):
def __init__(self, lexer): def __init__(self, lexer):
@ -266,8 +269,6 @@ class Highlighter(object):
for t in self.tokens[y1]: for t in self.tokens[y1]:
tx1 = t.x tx1 = t.x
tx2 = t.x + len(t.string) tx2 = t.x + len(t.string)
ty = t.y
ts = t.string
if tx2 <= x1: if tx2 <= x1:
# '*| ' before the insertion # '*| ' before the insertion
newtokens[y1].append(t) newtokens[y1].append(t)

View File

@ -14,6 +14,7 @@ def get_speller():
_speller = Speller() _speller = Speller()
return _speller return _speller
def free(): def free():
global _speller
if _speller: if _speller:
_speller.stop() _speller.stop()
_speller = None _speller = None

View File

@ -1,8 +1,9 @@
import curses, sys, termios import sys
import termios
# this is a huge map of ASCII keycode sequences it should include all # this is a huge map of ASCII keycode sequences it should include all the
# the "standard" ones for a US 104 key keyboard. this module may need # "standard" ones for a US 104 key keyboard. this module may need to support
# to support some kind of subclassing in order to be localizable. # ome kind of subclassing in order to be localizable.
# #
# of course, i'd be crazy to try to localize a curses app # of course, i'd be crazy to try to localize a curses app
MAP = { 0: "C-@", MAP = { 0: "C-@",
@ -75,8 +76,9 @@ MAP = { 0: "C-@",
32: "SPACE", 32: "SPACE",
127: "DELETE" } 127: "DELETE" }
def setup():
# add the meta/control-char combinations # add the meta/control-char combinations
for key in MAP.iterkeys(): for key in MAP:
if key == 27: if key == 27:
# we don't want to define ESC-ESC # we don't want to define ESC-ESC
continue continue
@ -92,19 +94,17 @@ for i in range(33, 126):
# 8bit meta characters # 8bit meta characters
MAP[128+i] = "M-%s" % (chr(i)) MAP[128+i] = "M-%s" % (chr(i))
MAP[255] = "M-DELETE" MAP[255] = "M-DELETE"
setup()
def disable_control_chars(): def disable_control_chars():
# terminal settings are for chumps # terminal settings are for chumps
attr = termios.tcgetattr(sys.stdin) attr = termios.tcgetattr(sys.stdin)
global OLD_ATTR
OLD_ATTR = attr
# don't listen to allow input START/STOP (C-s,C-q) # don't listen to allow input START/STOP (C-s,C-q)
attr[0] = attr[0] & ~(termios.IXON | termios.IXOFF) attr[0] = attr[0] & ~(termios.IXON | termios.IXOFF)
# remove as many signal handlers as we can; we want to # remove as many signal handlers as we can; we want to leave C-d and C-z
# leave C-d and C-z probably # probably
for pos in range(0,len(attr[6])): for pos in range(0,len(attr[6])):
if pos == termios.VEOF or pos == termios.VSUSP: if pos == termios.VEOF or pos == termios.VSUSP:
continue continue

7
lex.py
View File

@ -1,5 +1,5 @@
import curses, re import re
import regex, util import regex
from point import Point from point import Point
def escape(s): def escape(s):
@ -475,7 +475,6 @@ class OverrideRegionRule(RegionRule):
class Grammar(object): class Grammar(object):
rules = [] rules = []
grammar = Grammar()
class Lexer(object): class Lexer(object):
def __init__(self, mode, grammar): def __init__(self, mode, grammar):
@ -490,7 +489,7 @@ class Lexer(object):
return self.lines[self.y] + '\n' return self.lines[self.y] + '\n'
def lex_all(self, lines): def lex_all(self, lines):
lextokens = [[] for l in lines] lextokens = [[] for l in lines]
for t in lexer.lex(lines): for t in self.lex(lines):
lextokens[t.y].append(t) lextokens[t.y].append(t)
return lextokens return lextokens