parent
a5b35c1c0e
commit
efadfdf4b5
|
@ -487,6 +487,7 @@ class Application:
|
||||||
break
|
break
|
||||||
|
|
||||||
name = token.fqname()
|
name = token.fqname()
|
||||||
|
#assert name in w.mode.colors, name
|
||||||
c = w.mode.colors.get(name, w.mode.default_color)
|
c = w.mode.colors.get(name, w.mode.default_color)
|
||||||
|
|
||||||
if DARK_BACKGROUND:
|
if DARK_BACKGROUND:
|
||||||
|
|
|
@ -1,5 +1,8 @@
|
||||||
package TBB::Reporting2;
|
package TBB::Reporting2;
|
||||||
|
|
||||||
|
my $cat = "cat";
|
||||||
|
$cat =~ s/cat/dog/g;
|
||||||
|
|
||||||
use strict;
|
use strict;
|
||||||
use warnings;
|
use warnings;
|
||||||
|
|
||||||
|
|
77
lex2.py
77
lex2.py
|
@ -20,11 +20,14 @@ class Token(object):
|
||||||
else:
|
else:
|
||||||
return []
|
return []
|
||||||
def domain(self):
|
def domain(self):
|
||||||
names = []
|
|
||||||
if self.parent is not None:
|
if self.parent is not None:
|
||||||
|
names = []
|
||||||
names.extend(self.parent.domain())
|
names.extend(self.parent.domain())
|
||||||
names.append(self.rule.name)
|
if names[-1] != self.rule.name:
|
||||||
return names
|
names.append(self.rule.name)
|
||||||
|
return names
|
||||||
|
else:
|
||||||
|
return [self.rule.name]
|
||||||
def fqlist(self):
|
def fqlist(self):
|
||||||
names = []
|
names = []
|
||||||
if self.parent is not None:
|
if self.parent is not None:
|
||||||
|
@ -264,16 +267,14 @@ class DualRegionRule(Rule):
|
||||||
lexer.x += len(s)
|
lexer.x += len(s)
|
||||||
return token
|
return token
|
||||||
def resume(self, lexer, toresume):
|
def resume(self, lexer, toresume):
|
||||||
assert toresume
|
assert toresume, "can't resume without tokens to resume!"
|
||||||
token = toresume[0]
|
token = toresume[0]
|
||||||
d = token.matchd
|
|
||||||
if token.name == 'start':
|
if token.name == 'start':
|
||||||
stoken = toresume[0]
|
t2 = self._match_first(lexer, token, toresume)
|
||||||
mtoken = self._match_first(lexer, stoken, None, toresume)
|
t3 = self._match_second(lexer, t2, [])
|
||||||
self._match_second(lexer, mtoken, [])
|
return True
|
||||||
elif token.name == 'middle':
|
elif token.name == 'middle':
|
||||||
d3 = token.matchd
|
t3 = self._match_second(lexer, token, toresume)
|
||||||
self._match_second(lexer, token.parent, d3, toresume)
|
|
||||||
else:
|
else:
|
||||||
raise Exception, "invalid flag %r" % flag
|
raise Exception, "invalid flag %r" % flag
|
||||||
return True
|
return True
|
||||||
|
@ -281,32 +282,27 @@ class DualRegionRule(Rule):
|
||||||
# see if we can match our start token
|
# see if we can match our start token
|
||||||
m = self.start_re.match(lexer.lines[lexer.y], lexer.x)
|
m = self.start_re.match(lexer.lines[lexer.y], lexer.x)
|
||||||
if m:
|
if m:
|
||||||
# region was match, so let's do this
|
t1 = self._add_from_regex('start', lexer, parent, m, m.groupdict())
|
||||||
d1 = m.groupdict()
|
t2 = self._match_first(lexer, t1, [])
|
||||||
d2 = self._match_first(lexer, parent, m, [])
|
t3 = self._match_second(lexer, t2, [])
|
||||||
d3 = dict(d1.items() + d2.items())
|
|
||||||
self._match_second(lexer, parent, d3, None, [])
|
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
# region was not matched; we never started. so return false
|
# region was not matched; we never started. so return false
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _match_first(self, lexer, context, d1, m1, toresume=[]):
|
def _match_first(self, lexer, parent, toresume=[]):
|
||||||
# if we have been given rulecontext, then we are going to "resume" a
|
|
||||||
# parse that can already be assumed to have started
|
|
||||||
reenter = len(toresume) > 1
|
reenter = len(toresume) > 1
|
||||||
assert m1 or reenter
|
if reenter:
|
||||||
|
assert parent is toresume[0]
|
||||||
# ok, so create our start token, and get ready to start reading data
|
d1 = parent.matchd
|
||||||
if m1 is not None:
|
assert parent.name == 'start'
|
||||||
self._add_from_regex('start', lexer, parent, m1, m1.groupdict())
|
|
||||||
null_t = None
|
null_t = None
|
||||||
|
|
||||||
middle_re = re.compile(self.middle % d1)
|
middle_re = re.compile(self.middle % d1)
|
||||||
d2 = {}
|
d2 = {}
|
||||||
|
|
||||||
# ok, so as long as we aren't done (we haven't found an end token),
|
# ok, so as long as we aren't done (we haven't found an end token),
|
||||||
# keep reading input
|
# keep reading input
|
||||||
|
t2 = None
|
||||||
done = False
|
done = False
|
||||||
while not done and lexer.y < len(lexer.lines):
|
while not done and lexer.y < len(lexer.lines):
|
||||||
old_y = lexer.y
|
old_y = lexer.y
|
||||||
|
@ -314,7 +310,7 @@ class DualRegionRule(Rule):
|
||||||
# if this line is empty, then we will skip it, but here weinsert
|
# if this line is empty, then we will skip it, but here weinsert
|
||||||
# an empty null token just so we have something
|
# an empty null token just so we have something
|
||||||
if len(lexer.lines[lexer.y]) == 0:
|
if len(lexer.lines[lexer.y]) == 0:
|
||||||
null_t = Token('null', None, lexer.y, lexer.x, '')
|
null_t = Token('null', None, lexer.y, lexer.x, '', parent)
|
||||||
lexer.add_token(null_t)
|
lexer.add_token(null_t)
|
||||||
null_t = None
|
null_t = None
|
||||||
|
|
||||||
|
@ -337,8 +333,8 @@ class DualRegionRule(Rule):
|
||||||
# proceed to "stage 2"
|
# proceed to "stage 2"
|
||||||
m2 = middle_re.match(lexer.lines[lexer.y], lexer.x)
|
m2 = middle_re.match(lexer.lines[lexer.y], lexer.x)
|
||||||
if m2:
|
if m2:
|
||||||
d2 = m2.groupdict()
|
d2 = dict(d1.items() + m2.groupdict().items())
|
||||||
self._add_from_regex('middle', lexer, parent, m2, {})
|
t2 = self._add_from_regex('middle', lexer, parent, m2, d2)
|
||||||
done = True
|
done = True
|
||||||
break
|
break
|
||||||
|
|
||||||
|
@ -356,7 +352,7 @@ class DualRegionRule(Rule):
|
||||||
# create if it isn't set).
|
# create if it isn't set).
|
||||||
if not found:
|
if not found:
|
||||||
if null_t is None:
|
if null_t is None:
|
||||||
null_t = Token('null', None, lexer.y, lexer.x, '')
|
null_t = Token('null', None, lexer.y, lexer.x, '', parent)
|
||||||
lexer.add_token(null_t)
|
lexer.add_token(null_t)
|
||||||
null_t.add_to_string(lexer.lines[lexer.y][lexer.x])
|
null_t.add_to_string(lexer.lines[lexer.y][lexer.x])
|
||||||
lexer.x += 1
|
lexer.x += 1
|
||||||
|
@ -370,23 +366,23 @@ class DualRegionRule(Rule):
|
||||||
# then that means we're finished with the line and should move
|
# then that means we're finished with the line and should move
|
||||||
# on to the next one here
|
# on to the next one here
|
||||||
if not done and old_y == lexer.y:
|
if not done and old_y == lexer.y:
|
||||||
lexer.save_context()
|
|
||||||
lexer.y += 1
|
lexer.y += 1
|
||||||
lexer.x = 0
|
lexer.x = 0
|
||||||
return d2
|
return t2
|
||||||
|
|
||||||
def _match_second(self, lexer, context, d3, m, toresume=[]):
|
def _match_second(self, lexer, parent, toresume=[]):
|
||||||
# if we have been given rulecontext, then we are going to "resume" a
|
|
||||||
# parse that can already be assumed to have started
|
|
||||||
reenter = len(toresume) > 1
|
reenter = len(toresume) > 1
|
||||||
|
if reenter:
|
||||||
# ok stage 2 is like stage 1, only we are looking for end tokens
|
assert parent is toresume[0]
|
||||||
# instead of middle tokens
|
assert parent.name == 'middle'
|
||||||
|
#assert parent.name == 'middle'
|
||||||
|
d3 = parent.matchd
|
||||||
null_t = None
|
null_t = None
|
||||||
end_re = re.compile(self.end % d3)
|
end_re = re.compile(self.end % d3)
|
||||||
|
|
||||||
# ok, so as long as we aren't done (we haven't found an end token),
|
# ok, so as long as we aren't done (we haven't found an end token),
|
||||||
# keep reading input
|
# keep reading input
|
||||||
|
t3 = None
|
||||||
done = False
|
done = False
|
||||||
while not done and lexer.y < len(lexer.lines):
|
while not done and lexer.y < len(lexer.lines):
|
||||||
old_y = lexer.y
|
old_y = lexer.y
|
||||||
|
@ -406,7 +402,7 @@ class DualRegionRule(Rule):
|
||||||
# if this line is empty, then we will skip it, but here weinsert
|
# if this line is empty, then we will skip it, but here weinsert
|
||||||
# an empty null token just so we have something
|
# an empty null token just so we have something
|
||||||
if len(lexer.lines[lexer.y]) == 0:
|
if len(lexer.lines[lexer.y]) == 0:
|
||||||
null_t = Token('null', None, lexer.y, lexer.x, '')
|
null_t = Token('null', None, lexer.y, lexer.x, '', parent)
|
||||||
lexer.add_token(null_t)
|
lexer.add_token(null_t)
|
||||||
null_t = None
|
null_t = None
|
||||||
|
|
||||||
|
@ -417,7 +413,7 @@ class DualRegionRule(Rule):
|
||||||
# proceed to "stage 2"
|
# proceed to "stage 2"
|
||||||
m3 = end_re.match(lexer.lines[lexer.y], lexer.x)
|
m3 = end_re.match(lexer.lines[lexer.y], lexer.x)
|
||||||
if m3:
|
if m3:
|
||||||
self._add_from_regex('end', lexer, parent, m3, {})
|
t3 = self._add_from_regex('end', lexer, parent, m3, {})
|
||||||
done = True
|
done = True
|
||||||
break
|
break
|
||||||
|
|
||||||
|
@ -435,7 +431,7 @@ class DualRegionRule(Rule):
|
||||||
# create if it isn't set).
|
# create if it isn't set).
|
||||||
if not found:
|
if not found:
|
||||||
if null_t is None:
|
if null_t is None:
|
||||||
null_t = Token('null', None, lexer.y, lexer.x, '')
|
null_t = Token('null', None, lexer.y, lexer.x, '', parent)
|
||||||
lexer.add_token(null_t)
|
lexer.add_token(null_t)
|
||||||
null_t.add_to_string(lexer.lines[lexer.y][lexer.x])
|
null_t.add_to_string(lexer.lines[lexer.y][lexer.x])
|
||||||
lexer.x += 1
|
lexer.x += 1
|
||||||
|
@ -449,12 +445,11 @@ class DualRegionRule(Rule):
|
||||||
# then that means we're finished with the line and should move
|
# then that means we're finished with the line and should move
|
||||||
# on to the next one here
|
# on to the next one here
|
||||||
if not done and old_y == lexer.y:
|
if not done and old_y == lexer.y:
|
||||||
lexer.save_context()
|
|
||||||
lexer.y += 1
|
lexer.y += 1
|
||||||
lexer.x = 0
|
lexer.x = 0
|
||||||
|
|
||||||
# alright, we're finally done processing; return true
|
# alright, we're finally done processing; return true
|
||||||
return True
|
return t3
|
||||||
|
|
||||||
class Grammar:
|
class Grammar:
|
||||||
rules = []
|
rules = []
|
||||||
|
|
12
lex2_perl.py
12
lex2_perl.py
|
@ -43,14 +43,18 @@ class StringGrammar(Grammar):
|
||||||
pattern=r"\$[^A-Za-z0-9 %(delim)s](?![A-Za-z0-9_])",
|
pattern=r"\$[^A-Za-z0-9 %(delim)s](?![A-Za-z0-9_])",
|
||||||
fallback=r"\$[^A-Za-z0-9 ](?![A-Za-z0-9_])",
|
fallback=r"\$[^A-Za-z0-9 ](?![A-Za-z0-9_])",
|
||||||
),
|
),
|
||||||
PatternRule(
|
#PatternRule(
|
||||||
name=r'array',
|
# name=r'array',
|
||||||
pattern=r"@_",
|
# pattern=r"@_",
|
||||||
),
|
#),
|
||||||
PatternRule(
|
PatternRule(
|
||||||
name=r'scalar',
|
name=r'scalar',
|
||||||
pattern=r"\$\$*[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*",
|
pattern=r"\$\$*[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*",
|
||||||
),
|
),
|
||||||
|
PatternRule(
|
||||||
|
name=r'cast',
|
||||||
|
pattern=r"[\$\@\%\&]{.*?}",
|
||||||
|
),
|
||||||
PatternRule(
|
PatternRule(
|
||||||
name=r'array',
|
name=r'array',
|
||||||
pattern=r"@\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*",
|
pattern=r"@\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*",
|
||||||
|
|
14
mode_perl.py
14
mode_perl.py
|
@ -96,10 +96,16 @@ class Perl(mode2.Fundamental):
|
||||||
'match.null': color.build('cyan', 'default'),
|
'match.null': color.build('cyan', 'default'),
|
||||||
|
|
||||||
# replace regex
|
# replace regex
|
||||||
'replace.start': color.build('cyan', 'default'),
|
'replace.start': color.build('cyan', 'default'),
|
||||||
'replace.middle': color.build('cyan', 'default'),
|
'replace.middle': color.build('cyan', 'default'),
|
||||||
'replace.end': color.build('cyan', 'default'),
|
'replace.end': color.build('cyan', 'default'),
|
||||||
'replace.null': color.build('cyan', 'default'),
|
'replace.null': color.build('cyan', 'default'),
|
||||||
|
'replace.escaped': color.build('magenta', 'default'),
|
||||||
|
'replace.deref': color.build('yellow', 'default'),
|
||||||
|
'replace.length': color.build('yellow', 'default'),
|
||||||
|
'replace.scalar': color.build('yellow', 'default'),
|
||||||
|
'replace.hash': color.build('yellow', 'default'),
|
||||||
|
'replace.cast': color.build('yellow', 'default'),
|
||||||
|
|
||||||
# translate regex
|
# translate regex
|
||||||
'translate.start': color.build('magenta', 'default'),
|
'translate.start': color.build('magenta', 'default'),
|
||||||
|
|
Loading…
Reference in New Issue