text
stringlengths 22
301k
|
---|
@PATH _ROOT _LINE
# flag that.
@CHECK
if (
(X("AfterResumeByItselfLine") || X("resumeOf"))
&& !X("Found cap already")
)
succeed();
fail();
@POST
N("name found") = 1;
++X("Found cap already");
# noop()
@RULES
_xNIL <- _humanName [s] @@
# Take the first cap phrase on the line following "Resume of".
# Or on same line, if it's "Resume of X".
@CHECK
if (
(X("AfterResumeByItselfLine") || X("resumeOf"))
&& !X("Found cap already")
)
succeed();
fail();
@POST
N("unlabeled") = "true";
N("name found") = 1;
++X("Found cap already");
# noop()
@RULES
_xNIL <- _Caps [ren=( _humanName )] @@
|
@CODE
if (G("$islastfile")) {
SaveKB("mykb.kbb",G("kb"),2);
}
@@CODE |
@PATH _ROOT _labelEntry
@POST
if ( N("$text", 2) ) {
L("con") = findconcept(G("adjMatrixData"), strtolower(N("$text", 2)));
if ( X("words", 2) ) {
X("words", 2)[arraylength(X("words", 2))] = numval(L("con"), "index");
}
else {
X("words", 2) = numval(L("con"), "index");
}
}
excise(1,1);
noop();
@RULES
_xNIL <-
_xWILD [star fails=(
nerve
left
right
branch
segment
posterior
anterior
trunk
lateral
area
spinal
artery
cervical
brodmann
thoracic
part
root
medial
layer
nucleus
dorsal
muscle
cord
gyrus
superior
ramus
inferior
lumbar
gray
surface
matter
tendon
component
third
first
cutaneous
internal
fourth
second
proper
column
body
ligament
fifth
vein
distal
zone
intercostal
set
proximal
external
lobe
middle
head
region
white
tract
ventral
sacral
sign
bone
median
sixth
wall
toe
meningeal
rootlet
communicating
temporal
joint
frontal
network
seventh
division
eighth
mater
ganglion
transverse
articular
node
lymph
dura
plexus
finger
neural
central
granular
pyramidal
subdivision
lymphatic
radial
digital
occipital
minor
tube
deep
epiphysis
peripheral
ulnar
interosseous
)] ### (1)
_xWILD [opt matches=(
nerve
left
right
branch
segment
posterior
anterior
trunk
lateral
area
spinal
artery
cervical
brodmann
thoracic
part
root
medial
layer
nucleus
dorsal
muscle
cord
gyrus
superior
ramus
inferior
lumbar
gray
surface
matter
tendon
component
third
first
cutaneous
internal
fourth
second
proper
column
body
ligament
fifth
vein
distal
zone
intercostal
set
proximal
external
lobe
middle
head
region
white
tract
ventral
sacral
sign
bone
median
sixth
wall
toe
meningeal
rootlet
communicating
temporal
joint
frontal
network
seventh
division
eighth
mater
ganglion
transverse
articular
node
lymph
dura
plexus
finger
neural
central
granular
pyramidal
subdivision
lymphatic
radial
digital
occipital
minor
tube
deep
epiphysis
peripheral
ulnar
interosseous
)] ### (2)
@@
|
@DECL
GetWordCodeMappings(L("pn_node")) {
L("this_node") = L("pn_node");
L("code") = pnvar(L("pn_node"), "code");
"test.txt" << L("code") << "\n";
L("child_node") = pndown(L("this_node"));
while (L("child_node")) {
if (strisalpha(pnvar(L("child_node"), "$text"))) {
L("text") = pnvar(L("child_node"), "$text");
L("text") = strtolower(L("text"));
L("con") = AddUniqueCon(G("word2codes"), L("text"));
AddUniqueCon(L("con"), L("code"));
}
L("child_node") = pnnext(L("child_node"));
}
}
@@DECL |
@CODE
G("kb") = getconcept(findroot(),"kb");
@@CODE |
@NODES _LINE
@PRE
<2,2> length(1);
@POST
L("text") = N("$text",2);
if (L("text") != "(") {
L("bullet") = getconcept(G("format"),"bullet");
L("con") = AddUniqueCon(L("bullet"),L("text"));
IncrementCount(L("con"),"count");
single();
}
@RULES
_bullet <-
_xSTART ### (1)
_xWILD [one match=(_xCTRL _xPUNCT _xEMOJI) fail=(\()] ### (2)
@@
@PRE
<1,1> numrange(1800,2050);
@POST
N("year") = num(N("$text"));
@RULES
_xNIL <-
_xNUM ### (1)
@@
|
# Execute commands in a knowledge base command file (.KB file)
take(L("file_str")); |
@NODES _header
@PRE
<1,1> uppercase();
@POST
S("key") = N("$text", 1);
S("value") = N("$text", 5);
single();
@RULES
_identifier <-
_xALPHA ### (1)
_xBLANK [opt] ### (2)
_xPUNCT [min=1 max=2] ### (3)
_xBLANK ### (4)
_xWILD ### (5)
_xWILD [one matches=(\n \r)] ### (6)
@@
|
# Fetch a row from the current database result
@CODE
dbopen("test","root","mypassword");
dballocstmt();
dbexecstmt("SELECT * FROM employee;");
dbbindcol(1,"varchar",50,&G("employee name"),&G("result1"));
while (dbfetch())
{
"output.txt" << "employee name: ";
if (G("result1"))
"output.txt" << G("employee name") << "\n";
else
"output.txt" << "NULL" << "\n";
}
dbfreestmt();
dbclose();
@@CODE |
@PATH _ROOT _textZone _headerZone _LINE
@POST X("pessoa",3) = N("pessoa");
@RULES _xNIL <- _pessoa @@
@POST X("numero",3) = N("numero");
@RULES _xNIL <- _numero @@
@POST X("tempo",3) = N("tempo");
@RULES _xNIL <- _tempo @@
@POST X("root",3) = N("root");
@RULES _xNIL <- _root @@ |
@DECL
########
# FUNC: CLEARPOS
# SUBJ: Mark as not receiving a part of speech.
# NOTE: Find parts-of-speech below this node, if any.
########
clearpos(
L("n"), # Node to clear.
L("ignorepos"), # If putting the ignore feature on.
L("bracket") # If putting noun phrase bracket on.
)
{
if (!L("n"))
return;
if (L("ignorepos") != pnvar(L("n"),"ignorepos"))
pnreplaceval(L("n"),"ignorepos",L("ignorepos"));
if (L("bracket") != pnvar(L("n"),"bracket"))
pnreplaceval(L("n"),"bracket",L("bracket"));
if (pnvar(L("n"),"posarr len"))
pnreplaceval(L("n"),"posarr len",0);
if (pnvar(L("n"),"nopos"))
pnreplaceval(L("n"),"nopos",0);
if (pnvar(L("n"),"mypos"))
pnreplaceval(L("n"),"mypos",0);
}
########
# FUNC: SCLEARPOS
# SUBJ: Mark sugg as not receiving a part of speech.
# NOTE: Find parts-of-speech below this node, if any.
########
sclearpos(
L("ignorepos"), # If putting the ignore feature on.
L("bracket") # If putting noun phrase bracket on.
)
{
if (L("ignorepos") != S("ignorepos"))
S("ignorepos") = L("ignorepos");
if (L("bracket") != S("bracket"))
S("bracket") = L("bracket");
if (S("posarr len"))
S("posarr len") = 0;
if (S("nopos"))
S("nopos") = 0;
if (S("mypos"))
S("mypos") = 0;
}
########
# FUNC: SAMEPOSRANGE
# SUBJ: Intersection of pos for a range of nodes.
# RET: L("posarr") = array of intersected pos values.
########
sameposrange(L("start"),L("end"))
{
if (!L("start"))
return 0;
if (L("end"))
L("end") = pnnext(L("end")); # Easy end boundary.
L("n") = L("start");
L("posarr") = pnvar(L("n"),"posarr"); # Start with first.
L("n") = pnnext(L("n"));
while (L("n") != L("end"))
{
# Intersect pos arrays.
L("posarr") = intersect(L("posarr"),pnvar(L("n"),"posarr"));
if (!L("posarr")) # No intersection.
return 0;
L("n") = pnnext(L("n"));
}
return L("posarr");
}
########
# FUNC: POSACCTINI
# SUBJ: Accounting for rule performance.
########
posacctini()
{
if (!G("posacct"))
return;
# Set up a global area.
G("posacct root") = getconcept(findroot(),"posacct");
}
########
# FUNC: POSACCT
# SUBJ: Accounting for rule performance.
########
posacct(L("n"))
{
if (!G("posacct"))
return;
if (!L("n"))
return;
L("rule") = str(G("$rulenum"));
if (!L("rule"))
return; # Not inside a rule.
L("nm") = pnname(L("n"));
L("ln") = strlength(L("nm"));
if (strpiece(L("nm"),0,0) == "_")
L("nm") = strpiece(L("nm"),1,L("ln")-1);
L("npos") = nodepos(L("n"),L("nm"));
# Get kb concept for pass.
L("pass") = str(G("$passnum"));
L("cpass") = getconcept(G("posacct root"),L("pass"));
# Get kb concept for rule.
L("crule") = getconcept(L("cpass"),L("rule"));
# Keep global count.
L("fired") = numval(L("crule"),"fired");
replaceval(L("crule"),"fired",++L("fired"));
# Score
L("arr") = pnvar(L("n"),"posarr");
#"posdump.txt" << str(L("npos")) << "\t" << L("arr")[0] << "\n"; # *VERBOSE*
if (L("arr")[0] == L("npos")) # Correct.
{
L("good") = numval(L("crule"),"good");
replaceval(L("crule"),"good",++L("good"));
# ++G("good arr")[G("$rulenum")];
}
}
########
# FUNC: POSACCTDUMP
# SUBJ: Dump pos rule accounting for a pass.
########
posacctdump()
{
if (!G("posacct") || !G("posacct root"))
return;
if (!G("LINUX"))
{
L("fname") = G("$apppath") + "\\data\\posdump.txt"; # *VERBOSE*
L("out") = openfile(L("fname"));
}
else
{
L("fname") = ".." + "/data/posdump.txt"; # *VERBOSE*
# L("out") = openfile(L("fname"));
L("out") = L("fname"); # workaround.
}
L("out")
<< "pass rule good total pct" <<"\n"
<< "=============================================" << "\n";
# Now storing in kb.
# For each pass.
L("cpass") = down(G("posacct root"));
while (L("cpass"))
{
# For each rule in pass.
L("crule") = down(L("cpass"));
while (L("crule"))
{
# Print rule stats.
L("gd") = numval(L("crule"),"good");
L("tt") = numval(L("crule"),"fired");
if (L("tt"))
L("pct") = 100 * L("gd") / L("tt");
else
L("pct") = 0;
L("out")
<< rightjustifynum(num(conceptname(L("cpass"))),4) << "\t"
<< rightjustifynum(num(conceptname(L("crule"))),4) << "\t"
<< rightjustifynum(L("gd"),9) << "\t"
<< rightjustifynum(L("tt"),9) << "\t"
<< rightjustifynum(L("pct"),4)
<< "\n"
;
L("crule") = next(L("crule"));
}
L("cpass") = next(L("cpass"));
}
if (L("out"))
closefile(L("out"));
}
########
# FUNC: CHPOS
# SUBJ: Rudimentary set pos.
# NOTE: For accounting convenience.
# Note there's a SETPOS function in this pass, more complex.
########
chpos(L("n"),L("pos"))
{
if (!L("n"))
return;
if (L("pos"))
{
pnreplaceval(L("n"),"mypos",L("pos"));
posacct(L("n"));
}
else if (pnvar(L("n"),"mypos"))
pnreplaceval(L("n"),"mypos",0);
}
########
# FUNC: NODEPOS
# SUBJ: Get part of speech for node.
########
nodepos(L("n"),L("name"))
{
if (!L("n") || !L("name"))
return 0;
if (pnvar(L("n"),"ignorepos"))
return 0;
L("pos") = pnvar(L("n"),"mypos");
if (L("pos"))
return L("pos");
if (L("name") == "conj")
return "CC";
if (L("name") == "det")
return "DT";
if (L("name") == "num")
return "CD";
if (L("name") == "interj")
return "UH";
if (L("name") == "prep")
{
L("txt") = strtolower(pnvar(L("n"),"$text"));
if (L("txt") == "to")
return "TO";
return "IN";
}
if (L("name") == "adj")
{
L("txt") = strtolower(pnvar(L("n"),"$text"));
L("myp") = adjconj(L("txt"));
if (L("myp"))
return L("myp");
return "JJ";
}
if (L("name") == "adv")
{
# if (pnvar(L("n"),"comparative"))
# return "RBR";
# if (pnvar(L("n"),"superlative"))
# return "RBS";
fixadv(L("n")); # 06/19/06 AM.
L("pos") = pnvar(L("n"),"mypos");
if (L("pos"))
return L("pos");
return "RB";
}
if (L("name") == "pro")
return "PP";
if (L("name") == "noun")
{
# "dump.txt" << pnvar(L("n"),"$text") << " "; # *VERBOSE*
L("cap") = pnvar(L("n"),"cap");
if (pnvar(L("n"),"newsent"))
L("cap") = 0; # 07/11/06 AM.
if (!(L("num") = pnvar(L("n"),"number") ))
L("num") = number(L("n"));
# "dump.txt" << L("num") << "\n"; # *VERBOSE*
if (L("num") == "singular")
{
if (L("cap"))
return "NP";
else
return "NN";
}
else if (L("num") == "plural")
{
if (L("cap"))
return "NPS";
else
return "NNS";
}
return "noun";
}
if (L("name") == "verb")
{
if (pnvar(L("n"),"-en"))
return "VBN";
if (pnvar(L("n"),"-s"))
return "VBZ";
if (pnvar(L("n"),"inf"))
return "VBP"; # verb/VBP # [DEFAULT] for verb inf # 05/27/07
if (pnvar(L("n"),"-ing"))
return "VBG";
if (pnvar(L("n"),"-ed"))
return "VBD";
# VBP ... don't have a marker for this yet.
return 0;
}
return 0;
}
########
# FUNC: SCORENODEPOS
# SUBJ: Score a single part-of-speech against key.
########
scorenodepos(L("n"),L("npos"))
{
if (!G("scorepos") || !L("n")) # FIX. # 09/09/05 AM.
return 0;
L("arr") = pnvar(L("n"),"posarr");
# Update score.
L("num") = numval(G("scorepos"),"currtot");
replaceval(G("scorepos"),"currtot",++L("num"));
if (L("arr")[0] == L("npos"))
{
# Update score.
L("num") = numval(G("scorepos"),"currgood");
replaceval(G("scorepos"),"currgood",++L("num"));
return 1;
}
return 0;
}
########
# FUNC: CAPPOS
# SUBJ: Handle pos for capitalized word.
########
cappos(L("n"), L("override"))
{
if (!L("n"))
return;
# DON'T WANT TO DEAL WITH ARTIFICIAL NPS ISSUES. #
if (pnvar(L("n"),"mypos") && !L("overrride"))
return;
L("t") = pnvar(L("n"),"$text");
if (strendswith(L("t"),"s"))
{
L("l") = strlength(L("t"));
if (L("l") == 1 || L("l") == 2)
;
else if (strisupper(strpiece(L("t"),L("l")-2,L("l")-2)))
{
chpos(L("n"),"NPS");
return;
}
}
chpos(L("n"),"NP");
}
########
# FUNC: FINDVEN
# SUBJ: Search inside a vg for first verb, which must be ven.
# RET: L("ven") = ven-able verb, if found.
########
findven(L("n"))
{
# Traverse down to find verb.
L("done") = 0;
while (!L("done"))
{
if (!L("n"))
return 0;
L("nm") = pnname(L("n"));
if (L("nm") == "_vg")
L("n") = pndown(L("n"));
else
L("done") = 1;
}
# Here we're within a vg. Todo: traverse...
L("done") = 0;
while (!L("done"))
{
if (L("nm") == "_verb")
L("done") = 1;
else
{
if (L("n") = pnnext(L("n")))
L("nm") = pnname(L("n"));
else
L("done") = 1;
}
}
# At a verb.
# Could be "having (been) eaten..."
# Else should not be "have" or "be".
if (vconjq(L("n"),"-en"))
return L("n");
return 0;
}
########
# FUNC: VGAMBIGEDN
# SUBJ: See if verb group is ambiguous wrt -edn.
# RET: Verb node, if ambiguous wrt -edn.
########
vgambigedn(L("vg"))
{
if (!L("vg"))
return 0;
if (pnvar(L("vg"),"voice") == "active")
return 0;
# NOT the first verb in the group.
L("v") = pnvar(L("vg"), "verb node");
if (!L("v"))
{
"err.txt" << "[No verb node ptr: " << pnvar(L("vg"),"$text")
<< "\n";
return 0;
}
if (pnvar(L("v"),"mypos"))
return 0;
L("vc") = vconj(L("v"));
if (L("vc") == "-edn")
return L("v");
return 0;
}
########
# FUNC: VGASSIGNED
# SUBJ: See if verb group has been handled wrt pos tagging.
# RET: 1 if done, else 0.
########
vgassigned(L("vg"))
{
if (!L("vg"))
return 0;
# Handle compound/complex vg.
if (pnvar(L("vg"),"first vg"))
L("vg")= pnvar(L("vg"),"first vg");
if (pnname(L("vg")) != "_vg")
return 0;
if (!pnvar(L("vg"),"voice"))
return 0; # If no voice, not done.
L("v") = pnvar(L("vg"),"first verb");
if (!L("v"))
L("v") = pnvar(L("vg"),"verb node");
if (!L("v"))
{
if (G("error")) "err.txt" << "Vg with no verb pointer = " << pnvar(L("vg"),"$text") << "\n";
return 0;
}
if (pnvar(L("v"),"mypos"))
return 1;
return 0;
}
########
# FUNC: MHBV
# SUBJ: Handle verb group in a rule.
# RET:
# NOTE: Update the group node.
########
mhbv(L("n"),L("neg"),L("m"),L("h"),L("b"),L("being"),L("v"))
{
if (!L("n"))
return 0;
# Look at verb components for negation.
if (L("m"))
{
if (pnvar(L("m"),"neg"))
++L("ng");
}
if (L("h"))
{
if (pnvar(L("h"),"neg"))
++L("ng");
}
if (L("b"))
{
if (pnvar(L("b"),"neg"))
++L("ng");
}
if (L("neg"))
++L("ng");
# Double negation.
if (L("ng") == 2 || L("ng") == 4)
L("neg") = 0;
else if (L("ng"))
L("neg") = 1;
if (L("neg"))
pnreplaceval(L("n"),"neg",L("neg"));
if (!L("m") && !L("h") && !L("b") && !L("being") && !L("v"))
{
if (G("error"))
"err.txt" << "[No verb parts: " << pnvar(L("n"),"$text")
<< "]\n";
return 0;
}
if (L("v")) # 09/24/19 AM,
{
L("vv") = pnvar(L("v"),"voice"); # 08/22/19 AM.
# "xxx.txt" << "mhbv in: v= " << pnvar(L("v"),"$text") << "\n";
# "xxx.txt" << " voice in = " << L("vv") << "\n";
}
if (L("vv") && L("vv") != "AMBIG") # 09/24/19 AM.
L("voice") = L("vv"); # 08/22/19 AM.
else
L("voice") = mhbvfix(L("n"),L("m"),L("h"),L("b"),L("being"),L("v"));
if (L("voice"))
{
# "xxx.txt" << " voice=" << L("voice") << "\n";
pnreplaceval(L("n"),"voice",L("voice"));
}
semvg(L("n"),L("v"),L("b"),L("being"));
# TODO: Compose semantics.
if (L("v"))
L("vn") = L("v");
else if (L("b"))
L("vn") = L("b");
else if (L("h"))
L("vn") = L("h");
if (L("vn"))
{
semcomposenp(L("n"),L("vn"),L("vn"));
pnreplaceval(L("n"),"verb node",L("vn"));
L("stem") = pnvar(L("vn"),"stem");
if (L("stem"))
pnreplaceval(L("n"),"stem",L("stem"));
else
pnreplaceval(L("n"),"stem",pnvar(L("vn"),"$text"));
# VERB TO VG COPY ATTRS
if (pnvar(L("vn"),"helping-verb") ) # 09/21/19 AM.
pnreplaceval(L("n"),"helping-verb",1);
if (pnvar(L("vn"),"prepositional-verb") ) # 09/21/19 AM.
pnreplaceval(L("n"),"prepositional-verb",1);
}
else
{
# Get stem.
if (L("being"))
L("sv") = L("being");
else
L("sv") = L("m");
semcomposenp(L("n"),L("sv"),L("sv"));
L("stem") = pnvar(L("sv"),"stem");
if (L("stem"))
pnreplaceval(L("n"),"stem",L("stem"));
else
pnreplaceval(L("n"),"stem",pnvar(L("sv"),"$text"));
}
# First verb. #
if (L("m"))
{
L("mstem") = pnvar(L("m"),"stem"); # 06/05/06 AM.
if (L("mstem") == "do") # 06/05/06 AM.
L("first") = L("m"); # 06/05/06 AM.
else
L("first") = 0; # NO "first" verb to conjugate. # 02/26/05 AM.
if (L("h"))
chpos(L("h"),"VB"); # have/VB
else if (L("b"))
chpos(L("b"),"VB"); # be/VB
else if (L("v"))
chpos(L("v"),"VB"); # verb/VB
}
else if (L("h"))
{
L("first") = L("h");
if (pnvar(L("h"),"-edn")) # had
chpos(L("h"),"VBD"); # had/VBD
}
else if (L("b"))
L("first") = L("b");
else if (L("v"))
L("first") = L("v");
if (L("first"))
pnreplaceval(L("n"),"first verb",L("first"));
# Finite vs nofinite.
# (shallow assignment based on mhbv alone, not clause).
return L("voice");
}
########
# FUNC: MHBVFIX
# SUBJ: Fix up a verb group.
# RET: "active", "passive", or 0 if ambiguous.
########
mhbvfix(L("n"),L("m"),L("h"),L("b"),L("being"),L("v"))
{
if (!L("n"))
{
if (G("error"))
"err.txt" << "[mhbvfix error:] " << phrasetext() << "\n";
return 0;
}
if (!L("v"))
{
if (G("error"))
"err.txt" << "[mhbvfix error(2):] " << phrasetext() << "\n";
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),0);
return 0;
}
#"xxx.txt" << "mhbvfix: in = " << pnvar(L("v"),"$text") << "\n";
# If assigned already, handle that...
L("assig") = 0; # Track if verb has hard-wired assignments.
if (pnvar(L("v"),"-s"))
{
++L("assig");
if (!L("m") && !L("h") && !L("b") && !L("being"))
{
chpos(L("v"),"VBZ");
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-s");
return "active"; # eats.
}
# will/have/been/being eats.
# Look for an alternative...
}
if (pnvar(L("v"),"-ing"))
{
++L("assig");
if (L("b") || L("being"))
{
chpos(L("v"),"VBG");
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-ing");
return "active"; # been/being eating.
}
if (!L("m") && !L("h"))
{
chpos(L("v"),"VBG");
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-ing");
return "active"; # eating.
}
# will/have eating.
# Check alternative...
}
if (pnvar(L("v"),"-en"))
{
++L("assig");
if (L("b") || L("being"))
{
chpos(L("v"),"VBN");
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-en");
return "passive"; # been eaten.
}
if (L("h"))
{
chpos(L("v"),"VBN");
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-en");
return "active"; # have eaten.
}
# will eaten ... so look for alternative.
}
if (pnvar(L("v"),"-ed"))
{
++L("assig");
if (!L("b") && !L("being") && !L("h") && !L("m"))
{
chpos(L("v"),"VBD"); # ate.
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-ed");
return "active";
}
# will/have/been ate. Informal, disagree, or error.
# Look for alternative...
# -ED has been corrupted to be AMBIGUOUS.
# SHOULD BE UNAMBIGUOUS.
}
if (pnvar(L("v"),"-edn"))
{
++L("assig");
if (L("b") || L("being"))
{
# "xxx.txt" << "mhbvfix: -edn + b" << "\n";
chpos(L("v"),"VBN");
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-en");
return "passive"; # been worked.
}
if (L("h"))
{
chpos(L("v"),"VBN");
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-en");
return "active"; # have worked.
}
if (!L("m"))
{
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-edn");
return 0; # worked. Ambiguous.
}
# will worked ... so look for alternative.
}
if (pnvar(L("v"),"inf")) # 01/05/05 AM.
{
++L("assig");
# Unambiguous infinitive.
if (!L("h") && !L("b") && !L("being"))
{
if (L("m")) # will work.
{
chpos(L("v"),"VB"); # will eat.
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"inf");
return "active";
}
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"inf");
return "active"; # be or am.
}
# have/been/being work...
}
if (L("assig")) # Hard-wired conjugations.
{
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"inf");
# "xxx.txt" << "error" << "\n";
return 0; # Didn't work out.
}
# Should assume regular verbs below here. #
# Irregs should all be hardwired, handled above.
L("x") = vconj(L("v"));
if (L("x") == "inf")
{
pnreplaceval(L("v"),"inf",1);
# Unambiguous infinitive.
if (L("m") && !L("h") && !L("b") && !L("being"))
chpos(L("v"),"VB");
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"inf");
return "active";
}
if (L("x") == "-s")
{
pnreplaceval(L("v"),"-s",1);
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-s");
return "active";
}
if (L("x") == "-ed")
{
pnreplaceval(L("v"),"-ed",1);
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-ed");
return "active";
}
if (L("x") == "-en")
{
pnreplaceval(L("v"),"-en",1);
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-en");
return "passive";
}
if (L("x") == "-ing")
{
pnreplaceval(L("v"),"-ing",1);
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-ing");
return "active";
}
# -edn
if (!L("h") && !L("b") && !L("being"))
{
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-edn");
return "AMBIG"; # ambiguous.
}
if (L("x") != "-edn")
return 0; # error.
pnreplaceval(L("v"),"-en",1);
pnreplaceval(L("v"),"-edn",0);
chpos(L("v"),"VBN"); # 06/02/06 AM.
if (L("b") || L("being"))
{
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-en");
return "passive";
}
else if (L("h"))
{
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-en");
return "active";
}
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),"-edn");
return "AMBIG"; # 09/24/19 AM.
}
########
# FUNC: FIXVGATTRS
# SUBJ: Fix up tense and aspect for a verb group.
# NOTE: Will overwrite even if exists.
########
fixvgattrs(L("n"),L("m"),L("h"),L("b"),L("being"),L("vconj"))
{
if (!L("vconj")|| !L("n"))
{
if (G("error"))
"err.txt" << "[fixvgattrs error: " << phrasetext() << "]\n";
return 0;
}
# Fix voice...
if (!L("b") && !L("being")) # 09/08/19 AM.
{
if (!L("m") || L("h"))
pnreplaceval(L("n"),"voice","active"); # 09/08/19 AM.
}
if (L("m"))
pnreplaceval(L("n"),"tense","future");
else if (L("h"))
pnreplaceval(L("n"),"tense",pnvar(L("h"),"tense"));
else if (L("b"))
pnreplaceval(L("n"),"tense",pnvar(L("b"),"tense"));
else if (L("being"))
pnreplaceval(L("n"),"tense","present");
else if (L("vconj") == "-ed"
|| L("vconj") == "-en"
|| L("vconj") == "-edn")
pnreplaceval(L("n"),"tense","past");
else
{
if (L("vconj") == "-ing") # 11/10/07 AM.
pnreplaceval(L("n"),"lone-ing",1); # 11/10/07 AM.
pnreplaceval(L("n"),"tense","present");
}
if (L("m")) pnreplaceval(L("n"),"m",L("m"));
if (L("h")) pnreplaceval(L("n"),"h",L("h"));
if (L("b")) pnreplaceval(L("n"),"b",L("b"));
if (L("being")) pnreplaceval(L("n"),"being",1);
#"xxx.txt" << phrasetext() << "\n";
#"xxx.txt" << L("vconj") << "\n";
if (L("b"))
{
if (L("vconj") == "-ed"
|| L("vconj") == "-en"
|| L("vconj") == "-edn"
)
{
pnreplaceval(L("n"),"voice","passive");
pnreplaceval(L("n"),"suff","en");
pnreplaceval(L("n"),"-en",1);
pnreplaceval(L("n"),"-ed",0);
pnreplaceval(L("n"),"-edn",0);
}
}
else if (L("vconj") == "-en")
{
pnreplaceval(L("n"),"voice","passive");
pnreplaceval(L("n"),"suff","en");
pnreplaceval(L("n"),"-en",1);
pnreplaceval(L("n"),"-ed",0);
pnreplaceval(L("n"),"-edn",0);
}
else if (L("vconj") == "-edn")
{
pnreplaceval(L("n"),"voice","AMBIG"); # NEW.
pnreplaceval(L("n"),"suff","edn");
pnreplaceval(L("n"),"-en",1);
pnreplaceval(L("n"),"-ed",1);
pnreplaceval(L("n"),"-edn",1);
}
else
{
pnreplaceval(L("n"),"voice","active");
}
#pnreplaceval(L("n"),"vconj",L("vconj"));
# NEVER USED THIS GARBAGE! #
#if (L("vconj") == "-s")
# pnreplaceval(L("n"),"aspect","none");
#else if (L("b") && L("vconj") == "-en") # Passive.
# {
# pnreplaceval(L("n"),"voice","passive"); #
# if (L("being") && L("h"))
# pnreplaceval(L("n"),"aspect","perfective-progressive");
# else if (L("h"))
# pnreplaceval(L("n"),"aspect","perfective");
# else if (L("being"))
# pnreplaceval(L("n"),"aspect","progressive");
# else
# pnreplaceval(L("n"),"aspect","none");
# }
#else # Active.
# {
# if (L("b") && L("h"))
# pnreplaceval(L("n"),"aspect","perfective-progressive");
# else if (L("h"))
# pnreplaceval(L("n"),"aspect","perfective");
# else if (L("b") || L("vconj") == "-ing")
# pnreplaceval(L("n"),"aspect","progressive");
# else
# pnreplaceval(L("n"),"aspect","none");
# }
}
########
# FUNC: FIXVG
# SUBJ: Fix up a verb group.
# RET: "active", "passive", or 0 if ambiguous.
########
fixvg(L("vg"),L("active/passive"),L("pos"))
{
#"output.txt" << "fixvg: " << L("pos") << "\n";
if (!L("vg"))
return;
L("fst") = pnvar(L("vg"),"first vg");
if (!L("fst"))
return fixvgsimple(L("vg"),L("active/passive"),L("pos"));
# Traverse the list of vgs.
L("lst") = pnvar(L("vg"),"last vg");
if (L("lst"))
L("lst") = pnnext(L("lst")); # End mark.
L("vg") = L("fst");
while (L("vg") && (L("vg") != L("lst")))
{
fixvg(L("vg"),L("active/passive"),L("pos"));
L("vg") = pnnext(L("vg"));
}
}
########
# FUNC: FIXVGSIMPLE
# SUBJ: Fix up a verb group.
# RET: "active", "passive", or 0 if ambiguous.
########
fixvgsimple(L("vg"),L("active/passive"),L("pos"))
{
#"output.txt" << "fixvgsimple: " << L("pos") << "\n";
if (!L("vg"))
return;
if (L("tmp") = pnvar(L("vg"),"first vg"))
return; # Too nested to bother with, for now.
L("v") = pnvar(L("vg"),"first verb");
if (!L("v"))
L("v") = pnvar(L("vg"),"verb node");
if (L("v"))
L("voice") = fixverb(L("v"),L("active/passive"),L("pos"));
if (!L("voice") && !L("active/passive"))
return;
if (!L("active/passive") || L("voice") == L("active/passive"))
{
pnreplaceval(L("vg"),"voice",L("voice"));
return;
}
if (L("active/passive") && L("voice"))
{
if (G("error")) "err.txt" << "Voice mismatch: " << pnvar(L("vg"),"$text") << "\n";
# "err.txt" << " " << phrasetext() << "\n";
# "err.txt" << " voice=" << L("voice") << "\n";
# "err.txt" << " a/p= " << L("active/passive") << "\n";
}
}
########
# FUNC: FIXVERB
# SUBJ: Fix up a lone verb.
# RET: voice = active, passive or 0 if couldn't deduce.
########
fixverb(L("v"),
L("active/passive"), # What to do if ambiguous. 0 let fn decide.
L("pos") # A reference pos, mainly from previous verb.
)
{
#"xxx.txt" << "fixverb: " << L("pos") << " | " << phrasetext() << "\n";
#"xxx.txt" << "voice=" << L("active/passive") << "\n";
#"output.txt" << G("$passnum") << "," << G("$rulenum") << "\n";
if (!L("v"))
return 0;
#L("dn") = pndown(L("v")); # UNUSED #
L("my") = pnvar(L("v"), "mypos");
if (L("my"))
{
if (L("my") == "VBN")
return "passive";
return "active";
}
if (pnvar(L("v"),"inf")
&& (
pnvar(L("v"),"-ed") ||
pnvar(L("v"),"-edn")
|| pnvar(L("v"),"-en")
) )
L("highly ambig") = 1;
# TRY TO MATCH THE DESIRED POS.
# Note: mainly due to ambiguous tenses.
if (L("pos"))
{
if (L("pos") == "VB" || L("pos") == "VBP")
{
#"output.txt" << "*" << pnvar(L("v"),"inf") << "\n";
#"output.txt" << L("pos") << "\n";
#"output.txt" << pnname(L("v")) << "\n";
if (pnvar(L("v"),"inf"))
{
chpos(L("v"),L("pos"));
return "active";
}
}
else if (L("pos") == "VBZ")
{
if (pnvar(L("v"),"-s"))
{
chpos(L("v"),"VBZ");
return "active";
}
}
else if (L("pos") == "VBG")
{
if (pnvar(L("v"),"-ing"))
{
chpos(L("v"),"VBG");
return "active";
}
}
else if (L("pos") == "VBD")
{
if (pnvar(L("v"),"-ed")) # 09/24/19 AM.
{
chpos(L("v"),"VBD");
return "active";
}
if (pnvar(L("v"),"-edn"))
{
# chpos(L("v"),"VBD");
return "AMBIG"; # 09/24/19 AM.
}
}
else if (L("pos") == "VBN")
{
# "xxx.txt" << "vbn -edn=" << pnvar(L("v"),"-edn") << "\n";
if (pnvar(L("v"),"-en") || pnvar(L("v"),"-edn"))
{
chpos(L("v"),"VBN");
# "xxx.txt" << "fixverb: passive;" << phrasetext() << "\n";
return "passive";
}
}
}
# If assigned already, done.
if (pnvar(L("v"),"-en"))
{
if (!L("highly ambig") || L("active/passive") == "passive")
{
chpos(L("v"),"VBN");
return "passive";
}
}
if (pnvar(L("v"),"-ed") && !L("highly ambig"))
{
chpos(L("v"),"VBD");
return "active";
}
if (pnvar(L("v"),"-s"))
{
chpos(L("v"),"VBZ");
return "active";
}
if (pnvar(L("v"),"-ing"))
{
chpos(L("v"),"VBG");
return "active";
}
L("x") = vconj(L("v"));
if (!L("highly ambig") && L("x") == "inf")
{
pnreplaceval(L("v"),"inf",1);
if (L("pos") == "VB" || L("pos") == "VBP")
chpos(L("v"),L("pos"));
return "active";
}
if (L("x") == "-s")
{
pnreplaceval(L("v"),"-s",1);
chpos(L("v"),"VBZ");
return "active";
}
if (!L("highly ambig") && L("x") == "-ed")
{
pnreplaceval(L("v"),"-ed",1);
chpos(L("v"),"VBD");
return "active";
}
if (!L("highly ambig") && L("x") == "-en")
{
pnreplaceval(L("v"),"-en",1);
chpos(L("v"),"VBN");
return "passive";
}
if (L("x") == "-ing")
{
pnreplaceval(L("v"),"-ing",1);
chpos(L("v"),"VBG");
return "active";
}
if (L("highly ambig"))
return 0; # 05/27/07 AM.
if (L("x") != "-edn")
return 0; # Some kind of error or special case...
if (L("active/passive") == "passive" || L("pos") == "VBN")
{
pnreplaceval(L("v"),"-en",1);
pnreplaceval(L("v"),"-edn",0);
chpos(L("v"),"VBN");
return "passive";
}
else if (L("active/passive") == "active" # 10/09/04 AM.
|| L("pos") == "VBD" )
{
pnreplaceval(L("v"),"-ed",1);
pnreplaceval(L("v"),"-edn",0);
chpos(L("v"),"VBD");
return "active";
}
return 0;
}
########
# FUNC: FIXAPOSS
# SUBJ: Fix up an apostrophe-S to a verb
########
fixaposs(L("vg"),L("v"))
{
if (!L("vg") || !L("v"))
return;
if (!pnvar(L("v"),"apos-s"))
return;
pnreplaceval(L("vg"),"stem","be");
pnreplaceval(L("vg"),"sem","be");
pnreplaceval(L("vg"),"person",3);
pnreplaceval(L("vg"),"number","singular");
pnreplaceval(L("vg"),"tense","present"); # Maybe.
pnreplaceval(L("vg"),"voice","active");
}
########
# FUNC: CLAUSEPOS
# SUBJ: Get verb pos for clause.
# NOTE: For pos scoring, etc.
########
clausepos(L("clause"))
{
if (!L("clause"))
return 0;
if (N("last vg",1))
L("vg") = N("last vg",1);
else if (N("vg node",1))
L("vg") = N("vg node",1);
return vgpos(L("vg"));
}
########
# FUNC: VGPOS
# SUBJ: Get verb pos for vg.
# NOTE: For pos scoring, etc.
########
vgpos(L("vg"))
{
if (!L("vg"))
return 0;
L("v") = pnvar(L("vg"),"verb node");
if (L("v"))
L("pos") = pnvar(L("v"),"mypos");
return L("pos");
}
########
# FUNC: FIXNOUNS
# SUBJ: Fixup nouns in an np.
# NOTE: For pos scoring, etc.
########
fixnouns(
L("first"),
L("last"))
{
if (!L("first") || !L("last"))
return;
L("n") = L("first");
while (L("n") && (L("n") != L("last")))
{
L("pos") = pnvar(L("n"),"pos_np");
if (L("pos"))
chpos(L("n"),L("pos"));
else
fixnoun(L("n"));
L("n") = pnnext(L("n"));
}
if (L("n") != L("last"))
{
if (G("error"))
"err.txt" << "[Error in fixnouns.]" << "\n";
exitpass();
}
# Fix head noun.
#fixnoun(L("last"));
fixnounhead(L("last"));
}
########
# FUNC: FIXNOUN
# SUBJ: Fixup noun info.
# NOTE: For pos scoring, etc.
# Need to account for first word of sentence.
# Need to account for mass nouns.
########
fixnoun(L("n"))
{
if (!L("n"))
return;
if (pnvar(L("n"),"mypos"))
return; # 01/15/05 AM.
if (pnvar(L("n"),"ignorepos"))
return; # 06/10/06 AM.
L("cap") = pnvar(L("n"),"cap");
L("newsent") = pnvar(L("n"),"newsent");
L("unknown") = pnvar(L("n"),"unknown");
if (!(L("num") = pnvar(L("n"),"number") ))
L("num") = number(L("n"));
if (L("num") == "singular")
{
if (L("cap") && (L("unknown") || !L("newsent")))
chpos(L("n"),"NP");
else
chpos(L("n"),"NN");
}
else if (L("num") == "plural")
{
if (L("cap") && (L("unknown") || !L("newsent")))
chpos(L("n"),"NPS");
else
chpos(L("n"),"NNS");
}
}
########
# FUNC: FIXNOUNNONHEAD
# SUBJ: Fixup nonhead noun in an np.
# NOTE: For pos scoring, etc.
########
fixnounnonhead(
L("noun"))
{
if (!L("noun"))
return;
L("pos") = pnvar(L("noun"),"pos_np");
if (L("pos"))
chpos(L("noun"),L("pos"));
else
fixnoun(L("n"));
}
########
# FUNC: FIXNOUNHEAD
# SUBJ: Fixup head noun in an np.
# NOTE: For pos scoring, etc.
########
fixnounhead(
L("noun"))
{
if (!L("noun"))
return;
L("pos") = pnvar(L("noun"),"pos_np_head");
if (L("pos"))
chpos(L("noun"),L("pos"));
else
fixnoun(L("noun"));
}
########
# FUNC: FIXADJS
# SUBJ: Fixup adjs in an np.
# NOTE: For pos scoring, etc.
# Assumes adj is not head of noun phrase...
########
fixadjs(
L("first"),
L("last"))
{
if (!L("first") || !L("last"))
return;
L("n") = L("first");
if (L("last"))
L("last") = pnnext(L("last"));
while (L("n") != L("last"))
{
L("pos") = pnvar(L("n"),"pos_np");
if (pnname(L("n")) != "_adj")
; # Skip it.
else if (pnvar(L("n"),"ignorepos"))
; # Skip it.
else if (L("pos"))
chpos(L("n"),L("pos"));
else
fixadj(L("n"));
L("n") = pnnext(L("n"));
}
}
########
# FUNC: FIXADJ
# SUBJ: Fixup adj.
# NOTE: For pos scoring, etc.
########
fixadj(L("n"))
{
if (!L("n"))
return;
if (pnvar(L("n"),"mypos"))
return;
fixvadj(L("n"));
if (pnvar(L("n"),"mypos"))
return;
# Else get comparative, superlative.
L("txt") = nodetext(L("n"));
if (!L("txt"))
return;
if (L("myp") = adjconj(L("txt")))
chpos(L("n"),L("myp"));
}
########
# FUNC: FIXVADJ
# SUBJ: Fixup adj that's also a ven or ving.
# NOTE: For pos scoring, etc.
########
fixvadj(L("n"))
{
if (!L("n"))
return;
if (pnvar(L("n"),"mypos"))
return;
# Use preference if any. #
# Assuming we're in a noun phrase...
if (L("pos") = pnvar(L("n"),"pos_np"))
{
chpos(L("n"),L("pos"));
return;
}
if (pnvar(L("n"),"hyphenated"))
{
chpos(L("n"),"JJ");
return;
}
if (!pnvar(L("n"),"verb"))
return; # 10/09/06 AM.
# IF OUR DICTIONARY LISTS AN ADJ, USE IT. #
if (pnvar(L("n"),"adj"))
return; # 05/22/07 AM.
L("vc") = vconj(L("n"));
if (L("vc") == "-en" || L("vc") == "-edn")
chpos(L("n"),"VBN");
else if (L("vc") == "-ing")
chpos(L("n"),"VBG");
}
########
# FUNC: ADJCONJ
# SUBJ: Get adjective form.
# NOTE: For pos scoring, etc.
# Short for adj conjugation.
########
adjconj(L("word"))
{
if (!L("word"))
return 0;
if (finddictattr(L("word"),"adj-plain"))
return "JJ"; # 01/15/05 AM.
if (finddictattr(L("word"),"-er"))
return "JJR";
if (finddictattr(L("word"),"-est"))
return "JJS";
if (strendswith(L("word"),"est"))
return "JJS";
if (strendswith(L("word"),"er"))
{
# Watch for exceptions.
L("len") = strlength(L("word"));
if (L("len") < 3)
return 0;
if (strpiece(L("word"),
L("len")-3, L("len")-3) == "e")
return 0;
return "JJR";
}
return "JJ";
}
########
# FUNC: FIXADV
# SUBJ: Fixup adverb.
# NOTE: For pos scoring, etc.
########
fixadv(L("n"))
{
if (!L("n"))
return;
if (pnvar(L("n"),"mypos"))
return;
# Get comparative, superlative.
L("txt") = nodetext(L("n"));
if (!L("txt"))
return;
if (L("myp") = advconj(L("txt")))
chpos(L("n"),L("myp"));
}
########
# FUNC: ADVCONJ
# SUBJ: Get adverb form.
# NOTE: For pos scoring, etc.
# Short for adverb conjugation.
########
advconj(L("word"))
{
if (!L("word"))
return 0;
if (finddictattr(L("word"),"adv-plain"))
return "RB";
if (finddictattr(L("word"),"-er"))
return "RBR";
if (finddictattr(L("word"),"-est"))
return "RBS";
if (strendswith(L("word"),"est"))
return "RBS";
if (strendswith(L("word"),"er"))
{
# Watch for exceptions.
L("len") = strlength(L("word"));
if (L("len") < 3)
return 0;
if (strpiece(L("word"),
L("len")-3, L("len")-3) == "e")
return 0;
return "RBR";
}
return "RB";
}
########
# FUNC: FIXNPNONHEAD
# SUBJ: Fix adj,noun,alpha in np. Non-head node.
# NOTE: For pos scoring, etc.
# EFFECT: Assumes we're in rule match.
########
fixnpnonhead(
L("ord") # Position in current rule.
)
{
L("node") = eltnode(L("ord"));
if (!L("node") || !L("ord"))
return;
L("name") = pnname(L("node"));
if (L("name") == "_adj")
fixadjs(L("node"),L("node"));
else if (L("name") == "_noun")
fixnouns(L("node"),L("node"));
else if (literal(L("node"))) # _xALPHA
{
# DO A REDUCE RIGHT HERE!
if (pnvar(L("node"),"adj"))
{
L("red") = group(L("ord"),L("ord"),"_adj");
pncopyvars(L("node"),L("red"));
fixadjs(L("red"),L("red"));
}
else
{
L("red") = group(L("ord"),L("ord"),"_noun");
pncopyvars(L("node"),L("red"));
fixnounnonhead(L("red"));
}
L("node") = L("red"); # 05/22/07 AM.
}
return L("node"); # 05/22/07 AM.
}
########
# FUNC: FIXNPHEAD
# SUBJ: Fix head noun of np. (alphatonoun)
# NOTE: For pos scoring, etc.
# EFFECT: Assumes we're in rule match.
########
fixnphead(L("ord"))
{
L("n") = eltnode(L("ord"));
if (!L("n") || !L("ord"))
return 0;
L("red") = group(L("ord"),L("ord"),"_noun");
pncopyvars(L("n"),L("red"));
fixnounhead(L("red"));
return L("red");
}
########
# FUNC: SETPOS
# SUBJ: Set POS if possible.
########
setpos(L("node"),L("pos"))
{
if (!L("node"))
return;
if (pnvar(L("node"),"posarr len"))
{
chpos(L("node"),L("pos"));
return;
}
L("node") = pndown(L("node"));
while (L("node"))
{
if (pnnext(L("node")))
return;
if (pnvar(L("node"),"posarr len"))
{
chpos(L("node"),L("pos"));
return;
}
L("node") = pndown(L("node"));
}
}
########
# FUNC: SETPOSRANGE
# SUBJ: Assign POS to a list of nodes.
# NOTE: Weaning away from dependence on the pre-tagged
# stuff, like N("posarr len").
# This doesn't wipe out existing tags and can't serve
# to zero out tags.
# TODO: A function like this to handle capitalized phrases more
# generally.
########
setposrange(
L("first"),
L("last"),
L("pos")
)
{
if (!L("first") || !L("pos"))
return;
if (L("last"))
L("last") = pnnext(L("last")); # Good end boundary.
L("node") = L("first");
while (L("node") != L("last"))
{
if (!L("node"))
return;
if (!pnvar(L("node"),"mypos"))
{
L("p") = pnvar(L("node"),"pos_np"); # 04/19/07 AM.
if (L("p"))
chpos(L("node"),L("p")); # 04/19/07 AM.
else
chpos(L("node"),L("pos"));
}
L("node") = pnnext(L("node"));
}
}
########
# FUNC: SETPOSCAPS
# SUBJ: Assign POS to a list of caps.
########
setposcaps(
L("first"),
L("last")
)
{
if (!L("first"))
return;
if (L("last"))
L("last") = pnnext(L("last")); # Good end boundary.
L("node") = L("first");
while (L("node") != L("last"))
{
if (!L("node"))
return;
if (pnvar(L("node"),"hyphenated")) # 04/22/07 AM.
; # Leave it alone.
else if (pnvar(L("node"),"unknown"))
chpos(L("node"),"NP");
# else if (plural(L("node")))
# chpos(L("node"),"NPS");
else
chpos(L("node"),"NP");
L("node") = pnnext(L("node"));
}
}
########
# FUNC: FORCEPOSRANGE
# SUBJ: Assign POS to a list of nodes.
# NOTE: Weaning away from dependence on the pre-tagged
# stuff, like N("posarr len").
# This doesn't wipe out existing tags and can't serve
# to zero out tags.
# TODO: A function like this to handle capitalized phrases more
# generally.
########
forceposrange(
L("first"),
L("last"),
L("pos")
)
{
if (!L("first") || !L("pos"))
return;
if (L("last"))
L("last") = pnnext(L("last")); # Good end boundary.
L("node") = L("first");
while (L("node") != L("last"))
{
if (!L("node"))
return;
#
chpos(L("node"),L("pos"));
#
L("node") = pnnext(L("node"));
}
}
########
# FUNC: GROUPONE
# SUBJ: Smart group of one node.
# NOTE: Do the right thing with tagging info.
########
groupone(
L("n"), # Node to be grouped.
L("o"), # Order in rule phrase.
L("name"), # Name to reduce node to.
L("copy") # 1 if copying variables up.
)
{
if (!L("n") || !L("o") || !L("name"))
return;
# Do the reduce here.
L("red") = group(L("o"),L("o"),L("name"));
if (L("copy"))
{
pncopyvars(L("n"),L("red"));
if (pnvar(L("n"),"mypos"))
clearpos(L("red"),1,0);
}
}
########
# FUNC: GROUPNP
# SUBJ: Smart group of nodes to a noun phrase.
# NOTE: Do the right thing with tagging info.
# ASS: Assumes dqaninfo and rule have set up S variables.
# RET: The np node.
########
groupnp()
{
if (!S("ofirst") || !S("olast") || !S("first") || !S("last"))
{
if (G("error")) "err.txt" << "groupnp fail" << "\n";
return 0;
}
L("name") = "_np";
#"glubby.txt" << phrasetext() << "\n";
# glomming attrs. #
if (pnvar(S("first"),"no-glom-left"))
++L("no-glom-left");
if (pnvar(S("first"),"no-glom-right"))
++L("no-glom-right");
# Look for possessive pronoun.
if (pnvar(S("first"),"proposs")
&& !pnvar(S("first"),"mypos"))
chpos(S("first"),"PP$");
if (pnname(S("first")) == "_det")
L("det stem") = pnvar(S("first"),"stem");
if (S("firstj"))
fixadjs(S("firstj"),S("lastj"));
fixnouns(S("firstn"),S("lastn"));
L("ne arr") = nenoderange(S("firstan"),S("lastan"));
# Look for negation.
L("neg") = attrinrange("neg",S("first"),S("last"));
# Do the reduce here.
L("red") = group(S("ofirst"),S("olast"),L("name"));
#if (!L("red"))
# "glubby.txt" << "NO RED" << "\n";
#if (L("neg"))
# "sem.txt" << "neg np=" << pnvar(L("red"),"$text") << "\n";
# COPY ATTRS FROM HEAD NOUN TO NP.
pncopyvars(S("last"),L("red"));
# Todo: Clear domain semantics.
clearpos(L("red"),1,1);
#if (S("firstn") && S("firstn") != S("last")) #
# semrange(L("red"),S("firstn"),S("last")); #
semrange(L("red"),S("first"),S("last")); # 2/8/11 AM.
#if (S("firstan") && S("firstan") != S("last"))
# semnpnouns(L("red"),S("firstan"),S("last"),L("neg"));
# Try to compose with context.
#nodevsarray(L("red"),G("context entities"));
# Try to get an np semantics.
L("sem") = pnvar(S("last"),"sem");
L("ne") = pnvar(S("last"),"ne");
L("typ") = pnvar(S("last"),"ne type");
#if (!L("typ") && L("ne")) #
# L("typ") = "name"; #
if (L("typ"))
pnreplaceval(L("red"),"sem",L("typ"));
else if (L("sem"))
pnreplaceval(L("red"),"sem",L("sem"));
else if (pnvar(L("red"),"sem"))
pnreplaceval(L("red"),"sem",0);
if (pnvar(L("red"),"ne"))
pnreplaceval(L("red"),"ne",0);
if (pnvar(L("red"),"ne text"))
pnreplaceval(L("red"),"ne text",0);
if (pnvar(L("red"),"ne type"))
pnreplaceval(L("red"),"ne type",0);
if (L("ne arr"))
pnreplaceval(L("red"),"ne arr",L("ne arr"));
if (L("neg"))
pnreplaceval(L("red"),"neg",L("neg"));
if (L("no-glom-left"))
pnreplaceval(L("red"),"no-glom-left",1);
if (L("no-glom-right"))
pnreplaceval(L("red"),"no-glom-right",1);
if (L("det stem"))
pnreplaceval(L("red"),"det stem",L("det stem"));
# DOMAIN SPECIFIC.
if (S("firstj"))
semcomposenp(L("red"),S("firstj"),S("last"));
else if (S("firstn"))
semcomposenp(L("red"),S("firstn"),S("last"));
#domnpsem(L("red"),S("firstj"),S("lastj"),S("firstn"),S("lastn")); #
# Pronoun looks. #
if (S("firstd"))
{
L("dd") = S("firstd");
if (S("lastd"))
L("ee") = pnnext(S("lastd"));
else
L("ee") = S("lastd");
while (L("dd") != L("ee"))
{
# Mark if NP contains pronouns.
if (pnname(L("dd")) == "_pro")
pnreplaceval(L("red"),"pro",1);
L("dd") = pnnext(L("dd"));
}
}
return L("red");
}
########
# FUNC: NOUNTONP
# SUBJ: Reduce noun to np.
# RET: Return the np node created here.
########
nountonp(L("o"),L("bracket"))
{
if (!L("o"))
return;
L("n") = eltnode(L("o"));
if (!L("n"))
return;
# If more than one noun, process nonheads.
L("e") = lasteltnode(L("o"));
# Gather ne array. #
L("ne arr") = nenoderange(L("n"),L("e")); # 11/16/10 AM.
if (L("e") != L("n"))
{
L("tmp") = pnprev(L("e"));
L("start") = pnprev(L("n"));
while (L("tmp") != L("start"))
{
L("name") = pnname(L("tmp"));
if (L("name") == "_adj")
fixadjs(L("tmp"),L("tmp"));
else if (L("name") == "_noun")
fixnouns(L("tmp"),L("tmp"));
L("tmp") = pnprev(L("tmp"));
}
}
else
{
if (pnname(L("n")) == "_det") # 11/30/07 AM.
L("pro") = 1; # Flag pronoun-like det.
}
# Do the reduce here.
L("red") = group(L("o"),L("o"),"_np");
pncopyvars(L("e"),L("red"));
clearpos(L("red"),1,L("bracket"));
if (L("pro"))
pnreplaceval(L("red"),"pro",1); # 11/30/07 AM.
# DOMAIN SPECIFiC.
semcomposenp(L("red"),L("n"),L("n"));
if (L("ne arr")) # 11/16/10 AM.
pnreplaceval(L("red"),"ne arr",L("ne arr")); # 11/16/10 AM.
return L("red");
}
########
# FUNC: VERBTOVG
# SUBJ: Reduce verb to vg.
# RET: Return the vg node created here.
########
verbtovg(L("o"),L("e"),L("voice"),L("pos"))
{
if (!L("o") || !L("e"))
return;
L("n") = eltnode(L("o"));
if (!L("n"))
return;
# Do the reduce here.
# L("e") allows for a range of nodes to be reduced.
L("red") = group(L("o"),L("e"),"_vg");
L("voicex") = mhbv(L("red"),L("neg"),0,0,0,0,L("n"));
pncopyvars(L("n"),L("red"));
clearpos(L("red"),1,0);
pnreplaceval(L("red"),"first verb",L("n"));
pnreplaceval(L("red"),"verb node",L("n"));
fixvg(L("red"),L("voice"),L("pos"));
return L("red");
}
########
# FUNC: VGVGCOMPOUND
# SUBJ: Glom two vgs together.
# RET: Return the vg node created here.
# NOTE: "has helped build..."
########
vgvgcompound(L("o_vg1"),L("o_vg2"),L("pattern"))
{
if (!L("o_vg1") || !L("o_vg2"))
return;
if (L("o_vg1") > L("o_vg2"))
{
"err.txt" << "vgvgcompound: Bad range=" << L("o_vg1")
<< " " << L("o_vg2") << "\n";
exitpass();
}
L("vg1") = eltnode(L("o_vg1"));
L("vg2") = eltnode(L("o_vg2"));
if (!L("vg1") || !L("vg2"))
return;
# Do the reduce here.
# L("e") allows for a range of nodes to be reduced.
L("red") = group(L("o_vg1"),L("o_vg2"),"_vg");
pnreplaceval(L("red"),"compound-vg",1);
pnreplaceval(L("red"),"first vg",L("vg1"));
pnreplaceval(L("red"),"last vg",L("vg2"));
pnreplaceval(L("red"),"pattern",L("pattern"));
L("voice") = pnvar(L("vg1"),"voice");
if (L("voice"))
pnreplaceval(L("red"),"voice",L("voice"));
return L("red");
}
########
# FUNC: GROUPVGPREP
# SUBJ: Join a verb and particle/preposition.
# RET: Return the np node created here.
########
groupvgprep(L("o_vg"),L("o_prep"))
{
if (!L("o_vg") || !L("o_prep"))
return 0;
if (L("o_vg") > L("o_prep"))
{
"err.txt" << "groupvgprep: Bad range=" << L("o_vg")
<< " " << L("o_prep") << "\n";
exitpass();
}
L("vg") = eltnode(L("o_vg"));
L("prep") = eltnode(L("o_prep"));
if (!L("vg") || !L("prep"))
return 0;
# Do the reduce here.
# L("e") allows for a range of nodes to be reduced.
L("red") = group(L("o_vg"),L("o_prep"),"_vg");
pncopyvars(L("vg"),L("red"));
pnreplaceval(L("red"),"prep/phrasal",1);
pnreplaceval(L("red"),"prep node",L("prep"));
return L("red");
}
########
# FUNC: PREPNPTOADVL
# SUBJ: Reduce prep+np to advl.
# RET: Return the advl node created here.
########
prepnptoadvl(L("o prep"),L("o np"))
{
if (!L("o prep") || !L("o np"))
return 0;
if (L("o prep") > L("o np"))
{
"err.txt" << "prepnptoadvl: Bad range=" << L("o prep")
<< " " << L("o np") << "\n";
exitpass();
}
L("n prep") = eltnode(L("o prep"));
L("n np") = eltnode(L("o np"));
if (!L("n prep"))
return 0;
# Todo: by-np and by-actor semantics can go here...
L("p stem") = pnvar(L("n prep"),"stem");
L("red") = group(L("o prep"),L("o np"),"_advl");
pnreplaceval(L("red"),"pp",1);
pnreplaceval(L("red"),"np",L("n np")); # 11/12/07 AM.
if (L("p stem") == "by")
{
pnreplaceval(L("red"),"by-np",1);
if (semactornode(L("n np")))
pnreplaceval(L("red"),"by-actor",L("n np"));
}
return L("red");
}
########
# FUNC: ALPHATOVERB
# SUBJ: Smart group of alpha to verb.
# NOTE: Do the right thing with tagging info.
########
alphatoverb(
L("ord"), # order of alpha elt in phrase.
L("voice"), # Verb voice.
L("pos") # Verb pos.
)
{
L("n") = eltnode(L("ord"));
if (!L("n") || !L("ord"))
return;
# Do the reduce here.
L("v") = group(L("ord"),L("ord"),"_verb");
pncopyvars(L("n"),L("v"));
if (L("voice") && L("voice") != "AMBIG")
{
pnreplaceval(L("v"),"voice",L("voice")); # Ok
if (L("voice") == "active")
{
pnreplaceval(L("v"),"-edn",0);
# -ed if appropriate...
}
else if (L("voice") == "passive")
{
pnreplaceval(L("v"),"-edn",0);
pnreplaceval(L("v"),"suff","en");
pnreplaceval(L("v"),"-en",1);
}
}
else if (L("voice") || L("pos"))
{
L("vc") = fixverb(L("v"),L("voice"),L("pos"));
if (L("vc")) # FIX. # 09/22/19 AM.
pnreplaceval(L("v"),"voice",L("vc")); # FIX. # 09/22/19 AM.
}
# Other fixups as needed. #
if (!pnvar(L("n"),"suff")) # 09/09/19 AM.
{
L("txt") = strtolower(pnvar(L("n"),"$text"));
L("stem arr") = nvstem(L("txt"));
if (L("stem") = L("stem arr")[0] )
{
pnreplaceval(L("n"),"stem",L("stem"));
pnreplaceval(L("n"),"sem",L("stem"));
}
if (L("suff") = L("stem arr")[1] )
pnreplaceval(L("n"),"suff",L("suff"));
}
# Weird NLP++ bug, this test isn't working right. #
# X(3) returns true when there is no X(3)...
# (What may happen is that the last context node gets used by default...a bug..)
if (pnname(X()) == "_sent") # [WORKAROUND] # 09/13/19 AM.
{
++X("verb_count"); # 09/10/19 AM. # [WORKAROUND]
}
return L("v");
}
########
# FUNC: ALPHATOVG
# SUBJ: Smart group of alpha to verb to vg.
# NOTE: Do the right thing with tagging info.
########
alphatovg(
L("ord"), # order of alpha elt in phrase.
L("voice"), # Verb voice.
L("pos") # Verb pos.
)
{
L("n") = eltnode(L("ord"));
if (!L("n") || !L("ord"))
return;
# Do the reduce here.
L("v") = group(L("ord"),L("ord"),"_verb");
if (X()) ++X("verb_count"); # 09/10/19 AM.
pncopyvars(L("n"),L("v"));
if (L("voice") || L("pos"))
{
L("vc") = fixverb(L("v"),L("voice"),L("pos"));
if (L("vc")) # FIX. # 09/22/19 AM.
pnreplaceval(L("v"),"voice",L("vc")); # FIX. # 09/22/19 AM.
}
# Do the reduce here.
L("vg") = group(L("ord"),L("ord"),"_vg");
mhbv(L("vg"),0,0,0,0,0,L("v"));
pncopyvars(L("v"),L("vg"));
pnreplaceval(L("vg"),"voice",L("voice"));
pnreplaceval(L("vg"),"verb node",L("v"));
pnreplaceval(L("vg"),"first verb",L("v"));
clearpos(L("vg"),1,0); # Zero out token info.
return L("vg");
}
########
# FUNC: ALPHATOVGCOPY
# SUBJ: Smart group of alpha to verb to vg. Copy from given v.
# NOTE: Do the right thing with tagging info.
########
alphatovgcopy(L("n"),L("v"))
{
if (!L("n") || !L("v"))
return; # Error.
if (pnname(L("v")) == "_vg")
L("v") = pnvar(L("v"),"verb node");
L("pos") = pnvar(L("v"),"mypos");
alphatovg(L("n"),0,L("pos"));
}
########
# FUNC: VTREEBANKTOPOS
# SUBJ: Convert treebank to our pos notation.
########
vtreebanktopos(L("tb"))
{
if (L("tb") == "VB")
return "inf";
if (L("tb") == "VBP")
return "inf";
if (L("tb") == "VBG")
return "-ing";
if (L("tb") == "VBZ")
return "-s";
if (L("tb") == "VBD")
return "-ed";
if (L("tb") == "VBN")
return "-en";
return 0;
}
########
# FUNC: ALPHAUNAMBIGRED
# SUBJ: Reduce unambiguous alpha.
# OLD: Renamed from ALPHAUNAMBIG. #
########
alphaunambigred(L("ord"))
{
if (!L("ord"))
return 0;
L("n") = eltnode(L("ord"));
if (!L("n"))
return 0;
if (pnvar(L("n"),"pos num") != 1)
return 0;
if (pnvar(L("n"),"noun"))
alphatonoun(L("ord"));
else if (pnvar(L("n"),"verb"))
alphatoverb(L("ord"),0,0);
else if (pnvar(L("n"),"adj"))
alphatoadj(L("ord"));
else if (pnvar(L("n"),"adv"))
alphatoadv(L("ord"));
else
return 0;
return 1;
}
########
# FUNC: ALPHAUNAMBIG
# SUBJ: Reduce unambiguous alpha. [DE-POS]
########
alphaunambig(
L("ord"),
L("pos") # part-of-speech to disregard. [DE-POS]
)
{
if (!L("ord") || !L("pos"))
return 0;
L("n") = eltnode(L("ord"));
if (!L("n"))
return 0;
if (!pnvar(L("n"),L("pos")))
return 0;
L("pos num") = pnvar(L("n"),"pos num");
if (L("pos num") > 2)
{
# Decrement.
pnreplaceval(L("n"),"pos num",--L("pos num"));
pnreplaceval(L("n"),L("pos"),0);
pnreplaceval(L("n"),"ref "+L("pos"),1); # For reference.
return 0;
}
if (L("pos") != "noun" && pnvar(L("n"),"noun"))
alphatonoun(L("ord"));
else if (L("pos") != "verb" && pnvar(L("n"),"verb"))
alphatoverb(L("ord"),0,0);
else if (L("pos") != "adj" && pnvar(L("n"),"adj"))
alphatoadj(L("ord"));
else if (L("pos") != "adv" && pnvar(L("n"),"adv"))
alphatoadv(L("ord"));
else
{
# Decrement.
pnreplaceval(L("n"),"pos num",--L("pos num"));
pnreplaceval(L("n"),L("pos"),0);
pnreplaceval(L("n"),"ref"+L("pos"),1); # For reference.
return 0;
}
return 1;
}
########
# FUNC: ALPHATONOUN
# SUBJ: Smart group of alpha to noun.
# NOTE: Don't know if np head or not.
########
alphatonoun(L("ord"))
{
L("n") = eltnode(L("ord"));
if (!L("n") || !L("ord"))
return 0;
# Do the reduce here.
L("noun") = group(L("ord"),L("ord"),"_noun");
pncopyvars(L("n"),L("noun"));
L("t") = nodetreetext(L("n"));
L("lc") = strtolower(L("t"));
if (L("t") != L("lc")) # Has uppercase char.
{
if (!spellword(L("lc")))
pnreplaceval(L("noun"),"ne",1);
}
# Add some "semantics". #
if (L("sem") = pnvar(L("n"),"sem"))
return;
L("stem") = pnvar(L("n"),"stem");
if (L("stem"))
pnreplaceval(L("noun"),"sem",L("stem")); # FIX. # 02/19/11 AM.
return L("noun");
}
########
# FUNC: ALPHATOADJ
# SUBJ: Smart group of alpha to adj.
# NOTE: Don't know if in np or not.
########
alphatoadj(L("ord"))
{
L("n") = eltnode(L("ord"));
if (!L("n") || !L("ord"))
return 0;
# Do the reduce here.
L("adj") = group(L("ord"),L("ord"),"_adj");
pncopyvars(L("n"),L("adj"));
fixadj(L("adj"));
# Record stuff in sentence. #
if (X())
{
# (Could count these)
if (pnvar(L("adj"),"-er")) pnreplaceval(X(),"er",1);
if (pnvar(L("adj"),"-est")) pnreplaceval(X(),"est",1);
pnreplaceval(X(),"comparative-adj",1);
}
return L("adj");
}
########
# FUNC: ALPHATOADV
# SUBJ: Smart group of alpha to adv.
########
alphatoadv(L("ord"))
{
L("n") = eltnode(L("ord"));
if (!L("n") || !L("ord"))
return 0;
# Do the reduce here.
L("adv") = group(L("ord"),L("ord"),"_adv");
pncopyvars(L("n"),L("adv"));
fixadv(L("adv"));
return L("adv");
}
########
# FUNC: DQANINFO
# SUBJ: Set up some variables for dqan.
# ASS: Assume in dqan rule context.
########
dqaninfo(
L("ndet"),
L("nquan"),
L("nadj"),
L("nnoun")
)
{
if (L("ndet"))
{
S("first") = S("firstd") = S("det") = eltnode(L("ndet"));
S("last") = S("lastd") = lasteltnode(L("ndet"));
S("ofirst") = S("olast") = L("ndet");
}
if (L("nquan"))
{
S("firstq") = eltnode(L("nquan"));
S("lastq") = lasteltnode(L("nquan"));
if (!S("first"))
S("first") = S("firstq");
if (S("lastq"))
{
S("last") = S("lastq");
S("olast") = L("nquan");
}
if (!S("ofirst"))
S("ofirst") = L("nquan");
}
if (L("nadj"))
{
S("firstj") = eltnode(L("nadj"));
S("lastj") = lasteltnode(L("nadj"));
if (!S("first"))
S("first") = S("firstj");
S("firstan") = S("firstj");
if (S("lastj"))
{
S("last") = S("lastan") = S("lastj");
S("olast") = L("nadj");
}
if (!S("ofirst"))
S("ofirst") = L("nadj");
}
if (L("nnoun"))
{
S("firstn") = eltnode(L("nnoun"));
S("lastn") = lasteltnode(L("nnoun"));
if (!S("first"))
S("first") = S("firstn");
if (!S("firstan"))
S("firstan") = S("firstn");
if (S("lastn"))
{
S("last") = S("lastan") = S("lastn");
S("olast") = L("nnoun");
}
if (!S("ofirst"))
S("ofirst") = L("nnoun");
}
if (!S("ofirst") || !S("olast")
|| !S("first") || !S("last"))
if (G("error")) "err.txt" << "dqaninfo fail." << "\n";
}
########
# FUNC: COPULAQ
# SUBJ: See if verbal noun is a copula.
########
copulaq(L("n"))
{
if (!L("n"))
return 0;
if (pnname(L("n")) == "_vg")
L("n") = pnvar(L("n"),"verb node");
if (!L("n"))
return 0;
if (pnvar(L("n"),"apos-s"))
return 1;
if (pnvar(L("n"),"copula"))
return 1;
L("txt") = nodestem(L("n"));
L("sem") = nodesem(L("n"));
if (L("sem") == "be" || L("sem") == "being"
|| finddictattr(L("txt"),"copula"))
return 1;
return 0;
}
@CODE
L("hello") = 0;
@@CODE
|
@CODE
"test.txt" << "\nlength: " << arraylength(G("nodesToTraverse")) << "\n";
L("i") = 0;
"test.txt" << "\n";
while (L("i") < arraylength(G("nodesToTraverse"))) {
"test.txt" << conceptname(G("nodesToTraverse")[L("i")]) << "\n";
L("i") = L("i") + 1;
}
@@CODE
|
# Fetch entire path of given concept as a string
G("childConcept") = makeconcept(makeconcept(findroot(), "parent"), "child");
"output.txt" << conceptpath(G("childConcept")); |
@NODES _LINE
@POST
S("gender") = "male";
S("con") = AddPerson(X("con"),"Fred","male","noun");
single();
@RULES
_name <- Fred @@
@POST
S("gender") = "female";
S("con") = AddPerson(X("con"),"Sally","female","noun");
single();
@RULES
_name <- Sally @@
@POST
S("gender") = "male";
S("con") = AddPerson(X("con"),"he","male","pro");
single();
@RULES
_pro <- he @@
@POST
S("gender") = "female";
S("con") = AddPerson(X("con"),"she","female","pro");
single();
@RULES
_pro <- she @@
|
@NODES _ROOT
@RULES
_icdRoot <-
_xWILD [one matches=(\@)]
_xWILD [fails=(\n \r)]
_xWILD [one matches=(\n \r)]
@@
|
@MULTI _section _sentence _subsection
@POST
L("pn_iter") = N(1);
L("wrote") = 0;
while (L("pn_iter")) {
if (pnvar(L("pn_iter"), "keyword") == 1) {
AddUniqueCon(G("note_words"), pnname(L("pn_iter")));
"words.txt" << "\t" << pnname(L("pn_iter")) << "\n";
L("wrote") = 1;
}
L("pn_iter") = pnnext(L("pn_iter"));
if ((!L("pn_iter")) && L("wrote")) {
"words.txt" << "\n";
}
}
@RULES
_xNIL <-
_xANY
_xWILD [fails=(_xEND)]
_xEND
@@
|
@DECL
PartOfSpeech(L("pos")) {
if (L("pos") == "noun") {
L("pos") = "n";
} else if (L("pos") == "verb") {
L("pos") = "v";
} else if (L("pos") == "adjective") {
L("pos") = "adj";
} else if (L("pos") == "adverb") {
L("pos") = "adv";
} else if (L("pos") == "pronoun") {
L("pos") = "pro";
} else if (L("pos") == "interj") {
L("pos") = "int";
} else if (L("pos") == "conjunction") {
L("pos") = "conj";
}
return L("pos");
}
@@DECL |
@NODES _LINE
@RULES
# Ex: ofc
_PostalUnit <- _xWILD [min=1 max=1 s match=("ofc" "apt" "basement" "bldg" "bsmt" "building" "department" "dept" "fl"
"floor" "frnt" "front" "hangar" "hngr" "lbby" "lobby" "lot" "lower" "lowr"
"apartment" "office" "penthouse" "ph" "pier" "rear" "rm" "room" "side" "slip"
"space" "spcstop" "ste" "stop" "suite" "trailer" "trlr" "unit" "upper" "uppr")] @@
|
@PATH _ROOT _RULES
@RECURSE rulelt
@POST
rfanonlitelt(1)
single()
@RULES
_ELEMENT [base] <- _NONLIT @@
@POST
rfalitelt(1)
single()
@RULES
_ELEMENT [base] <- _LIT @@
_ELEMENT [base] <- _NUM @@
@@RECURSE rulelt
@RECURSE sugg
@POST
rfasugg(1)
single()
@RULES
_SUGG <- _ELEMENT @@
@@RECURSE sugg
@RECURSE elt
@POST
rfaelt(1)
single()
@RULES
_ELT <- _ELEMENT @@
@@RECURSE elt
@RECURSE rulelts
@POST
rfarulelts(1)
single()
@RULES
_PHRASE [base] <- _ELT [plus] @@
@POST
rfberror(1) # 04/12/03 AM.
@RULES
_xNIL <- _xANY [plus] @@ # 04/12/03 AM.
@@RECURSE rulelts
@POST
rfarule(1, 3)
single()
@RULES
_RULE [base] <-
_xWILD [one match=(_NONLIT _ELEMENT _LIT) recurse=(rulelt sugg)]
_ARROW [trig]
_xWILD [recurse=(rulelt elt rulelts) fail=(_ENDRULE _ARROW)]
_ENDRULE
@@
# Can trap missing end rule with a rule like the above whose last elt is
# an _ARROW. Can similarly trap missing arrow.
# Could do such things if an error has been detected in the parse. Or if
# the above rule partially matched, can set a flag.
# Could have a @COND -- conditions for executing rule at all.
|
@PATH _ROOT _LINE _Caps
@POST
++X("schoolroots");
if (N("$end"))
++X("end schoolroot");
@RULES
_xNIL <- _SchoolRoot [s] @@
_xNIL <- _SchoolType [s] @@ # 12/15/99 AM.
@POST
++X("schoolnames");
if (N("$end"))
++X("end schoolname");
@RULES
_xNIL <- _SchoolName [s] @@
_xNIL <- _CompleteSchoolName [s] @@
|
@NODES _LINE
@RULES
# Ex: joe
_maleName [layer=(_humanNamepart )] <- _xWILD [min=1 max=1 s match=("joe" "john" "robert" "michael" "william" "david" "richard" "charles" "joseph"
"thomas" "christopher" "daniel" "paul" "mark" "donald" "george" "kenneth" "steven" "edward"
"brian" "ronald" "anthony" "kevin" "jason" "matthew" "gary" "timothy" "jose" "larry"
"jeffrey" "frank" "scott" "eric" "stephen" "andrew" "raymond" "gregory" "joshua" "jerry"
"dennis" "walter" "patrick" "peter" "harold" "douglas" "henry" "carl" "arthur" "ryan"
"roger" "james" "juan" "jack" "albert" "jonathan" "justin" "terry" "gerald" "keith"
"samuel" "willie" "ralph" "lawrence" "nicholas" "roy" "benjamin" "bruce" "brandon" "adam"
"harry" "fred" "wayne" "billy" "steve" "louis" "jeremy" "aaron" "randy" "howard"
"eugene" "carlos" "russell" "bobby" "victor" "martin" "ernest" "phillip" "todd" "jesse"
"craig" "alan" "shawn" "clarence" "sean" "philip" "chris" "johnny" "earl" "jimmy"
"antonio" "CARY")] @@
|
@NODES _ROOT
@POST
S("header") = N("header",1);
S("count") = 2;
single();
@RULES
_headerZone <-
_headerTwo ### (1)
_xWILD [plus fails=(_headerTwo _xEND)] ### (2)
@@
|
@CODE
G("i") = 1;
G("reset") = 0;
@@CODE
@MULTI _ROOT
@CHECK
if (num(N("$text", 5)) == (G("i"))) {
G("reset") = 1;
succeed();
}
@POST
S("num") = num(N("$text", 5));
singler(4,7);
@RULES
_listItem <-
_xWILD [opt matches=(_xSTART _listItem)] ### (1)
_xWILD [min=0 fails=(\n \r _listItem _xEND)] ### (2)
_xWILD [one matches=(\n \r)] ### (3)
_xWILD [opt matches=(_xWHITE) except=(\n \r)] ### (4)
_xNUM [trig] ### (5)
\. ### (6)
_xWILD [one matches=(_xWHITE) except=(\n \r)] ### (7)
_xWILD [fails=(_xEND)] ### (8)
@@
@CHECK
# "debug.txt" << N("$text", 1) << "\n";
if (G("reset")==1) {
G("reset") = 0;
"debug.txt" << G("i") << "\n";
G("i")++;
succeed();
}
else {
fail();
}
@POST
noop();
@RULES
_xNIL <-
_xANY
@@ |
@PATH _ROOT _experienceZone _experienceInstance _LINE
# skip single-char alphabetics, as they can be bullets!
@PRE
<1,1> length(1)
@RULES
_xNIL <- _xALPHA [s] @@
# Want all kinds of English stuff here.
# Make caps in English prose invisible.
# More generally, should characterize texty lines with lots of
# lowercase words, verbs, etc.
@PRE
<1,1> lowercase()
@POST
N("hi conf",3) = 0;
N("hi class",3) = 0;
setbase(3,"true");
noop();
@RULES
_xNIL <-
_xALPHA [s except=(and of)]
_xWHITE [s star]
_Caps [s rename=(_CHAFF)]
@@ |
@DECL
###############################################
# General functions
###############################################
AddUniqueCon(L("concept"),L("name")) {
L("con") = findconcept(L("concept"),L("name"));
if (!L("con")) L("con") = makeconcept(L("concept"),L("name"));
return L("con");
}
AddUniqueStr(L("concept"),L("attr"),L("value")) {
if (L("value")) {
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("str") = getstrval(L("val"));
if (L("str") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addstrval(L("concept"),L("attr"),L("value"));
return 1;
}
return 0;
}
AddUniqueNum(L("concept"),L("attr"),L("value")) {
if (L("value")) {
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("num") = getnumval(L("val"));
if (L("num") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addnumval(L("concept"),L("attr"),L("value"));
return 1;
}
return 0;
}
AddUniqueConVal(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("con") = getconval(L("val"));
"unique.txt" << conceptname(L("con")) << "\n";
if (conceptpath(L("con")) == conceptpath(L("value")))
return 0;
L("val") = nextval(L("val"));
}
addconval(L("concept"),L("attr"),L("value"));
return 1;
}
CopyAttr(L("from"),L("to"),L("attr")) {
L("from value") = strval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr"),L("from value"));
}
}
CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) {
L("from value") = strval(L("from"),L("attr from"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr to"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr to"),L("from value"));
}
}
CopyConAttr(L("from"),L("to"),L("attr")) {
L("from value") = conval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = conval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addconval(L("to"),L("attr"),L("from value"));
}
}
AttrValues(L("con"),L("attr")) {
L("at") = findattr(L("con"),L("attr"));
if (L("at"))
return attrvals(L("at"));
return 0;
}
LastChild(L("parent")) {
L("child") = down(L("parent"));
while (L("child")) {
L("last") = L("child");
L("child") = next(L("child"));
}
return L("last");
}
MakeCountCon(L("con"),L("count name")) {
L("count name") = CountName(L("con"),L("count name"));
return makeconcept(L("con"),L("count name"));
}
IncrementCount(L("con"),L("countname")) {
L("count") = numval(L("con"),L("countname"));
if (L("count")) {
L("count") = L("count") + 1;
replaceval(L("con"),L("countname"),L("count"));
} else {
addnumval(L("con"),L("countname"),1);
L("count") = 1;
}
return L("count");
}
CountName(L("con"),L("root")) {
L("count") = IncrementCount(L("con"),L("root"));
return L("root") + str(L("count"));
}
StripEndDigits(L("name")) {
if (strisdigit(L("name"))) return 0;
L("len") = strlength(L("name")) - 1;
L("i") = L("len") - 1;
L("str") = strpiece(L("name"),L("i"),L("len"));
while (strisdigit(L("str")) && L("i")) {
L("i")--;
L("str") = strpiece(L("name"),L("i"),L("len"));
}
return strpiece(L("name"),0,L("i"));
}
###############################################
# KB Dump Functins
###############################################
DumpKB(L("con"),L("file")) {
L("dir") = G("$apppath") + "/kb/";
L("filename") = L("dir") + L("file") + ".kb";
if (!kbdumptree(L("con"),L("filename"))) {
"kb.txt" << "FAILED dump: " << L("filename") << "\n";
} else {
"kb.txt" << "DUMPED: " << L("filename") << "\n";
}
}
TakeKB(L("filename")) {
L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb";
"kb.txt" << "Taking: " << L("path") << "\n";
if (take(L("path"))) {
"kb.txt" << " Taken successfully: " << L("path") << "\n";
} else {
"kb.txt" << " Taken FAILED: " << L("path") << "\n";
}
}
ChildCount(L("con")) {
L("count") = 0;
L("child") = down(L("con"));
while (L("child")) {
L("count")++;
L("child") = next(L("child"));
}
return L("count");
}
###############################################
# KBB DISPLAY FUNCTIONS
###############################################
DisplayKB(L("top con"),L("full")) {
L("file") = DisplayFileName();
DisplayKBRecurse(L("file"),L("top con"),0,L("full"));
L("file") << "\n";
return L("top con");
}
KBHeader(L("text")) {
L("file") = DisplayFileName();
L("file") << "#######################\n";
L("file") << "# " << L("text") << "\n";
L("file") << "#######################\n\n";
}
DisplayFileName() {
if (num(G("$passnum")) < 10) {
L("file") = "ana00" + str(G("$passnum"));
}else if (num(G("$passnum")) < 100) {
L("file") = "ana0" + str(G("$passnum"));
} else {
L("file") = "ana" + str(G("$passnum"));
}
L("file") = L("file") + ".kbb";
return L("file");
}
DisplayKBRecurse(L("file"),L("con"),L("level"),L("full")) {
while (L("con")) {
L("file") << SpacesStr(L("level")+1) << conceptname(L("con"));
DisplayAttributes(L("file"),L("con"),L("full"),L("level"));
L("file") << "\n";
if (down(L("con"))) {
L("lev") = 1;
DisplayKBRecurse(L("file"),down(L("con")),L("level")+L("lev"),L("full"));
}
if (L("level") == 0)
return 0;
L("con") = next(L("con"));
}
}
DisplayAttributes(L("file"),L("con"),L("full"),L("level")) {
L("attrs") = findattrs(L("con"));
if (L("attrs")) L("file") << ": ";
if (L("full") && L("attrs")) L("file") << "\n";
L("first attr") = 1;
while (L("attrs")) {
L("vals") = attrvals(L("attrs"));
if (!L("full") && !L("first attr")) {
L("file") << ", ";
}
if (L("full")) {
if (!L("first attr")) L("file") << "\n";
L("file") << SpacesStr(L("level")+2);
}
L("file") << attrname(L("attrs")) << "=[";
L("first") = 1;
while (L("vals")) {
if (!L("first"))
L("file") << ",";
L("val") = getstrval(L("vals"));
L("num") = getnumval(L("vals"));
L("con") = getconval(L("vals"));
if (L("con")) {
L("file") << conceptpath(L("con"));
} else if (!L("full") && strlength(L("val")) > 20) {
L("shorty") = strpiece(L("val"),0,20);
L("file") << L("shorty");
L("file") << "...";
if (strendswith(L("val"),"\""))
L("file") << "\"";
} else if (L("num") > -1) {
L("file") << str(L("num"));
} else {
L("file") << L("val");
}
L("first") = 0;
L("vals") = nextval(L("vals"));
}
L("file") << "]";
L("first attr") = 0;
L("attrs") = nextattr(L("attrs"));
}
}
# Because NLP++ doesn't allow for empty strings,
# this function can only be called with "num" >= 1
SpacesStr(L("num")) {
L("n") = 1;
L("spaces") = " ";
while (L("n") < L("num")) {
L("spaces") = L("spaces") + " ";
L("n")++;
}
return L("spaces");
}
###############################################
# DICTIONARY FUNCTIONS
###############################################
DictionaryStart() {
G("attrs path") = G("$apppath") + "\\kb\\user\\attrs.kb";
G("attrs") = openfile(G("attrs path"));
}
DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) {
addword(L("word"));
addword(L("attrName"));
G("attrs") << "ind attr\n" << findwordpath(L("word")) << "\n0\n";
G("attrs") << findwordpath(L("attrName")) << "\n";
if (L("attrType") == "str")
G("attrs") << "pst\n" << "\"" << L("value") << "\"";
else if (L("attrType") == "num")
G("attrs") << "pnum\n" << str(L("value"));
else if (L("attrType") == "con")
G("attrs") << "pcon\n" << conceptpath(L("value"));
G("attrs") << "\nend ind\n\n";
}
DictionaryEnd() {
G("attrs") << "\nquit\n\n";
closefile(G("attrs"));
}
@@DECL
|
@CODE
# Open an output file for writing trend info.
G("trend file") = ofile("trend.txt");
# Get the root concept of the dictionary.
G("sys") = getconcept(G("root"), "sys" );
G("dict") = getconcept(G("sys" ), "dict");
G("dict alpha") = getconcept(G("dict"), "a" ); # Alphabetic dictionary hierarchy.
@@CODE
|
@PATH _ROOT _LINE _header
@POST
if (N("$end"))
++X("end experience hdr");
++X("experience hdrs");
@RULES
_xNIL <- _ExperienceHeaderWord [s] @@
_xNIL <- _ExperienceHeaderPhrase [s] @@
@POST
if (N("$end"))
++X("end education hdr");
++X("education hdrs");
@RULES
_xNIL <- _EducationHeaderWord [s] @@
_xNIL <- _EducationHeaderPhrase [s] @@
@POST
if (N("$end"))
++X("end skills hdr");
++X("skills hdrs");
@RULES
_xNIL <- _SkillsHeaderWord [s] @@
_xNIL <- _SkillsHeaderPhrase [s] @@
|
@NODES _term
@POST
AddPhrase(N(2));
"debug.txt" << N("$text", 2) << "\n";
@RULES
_xNIL <-
_xSTART ### (1)
_xWILD [one match=(_xALPHA _xNUM)] ### (2)
@@
|
@CODE
L("hello") = 0;
@@CODE
@NODES _sent
@CHECK
if (!N("noun",3) && !N("adj",3))
fail();
if (!N("noun",2) && !N("adj",2))
fail();
if (N(4))
{
if (!N("noun",4) && !N("adj",4))
fail();
}
@POST
L("tmp3") = N(3);
L("tmp2") = N(2);
if (N("noun",3))
{
group(3,3,"_noun");
pncopyvars(L("tmp3"),N(3));
fixnounnonhead(N(3));
}
else
{
group(3,3,"_adj");
pncopyvars(L("tmp3"),N(3));
fixadj(N(3));
}
if (N("verb",2) && N("noun",2)
&& vconjq(N(2),"inf"))
{
group(2,2,"_noun");
pncopyvars(L("tmp2"),N(2));
fixnounnonhead(N(2));
}
else if (N("adj",2))
{
group(2,2,"_adj");
pncopyvars(L("tmp2"),N(2));
fixadj(N(2));
}
else
{
group(2,2,"_noun");
pncopyvars(L("tmp2"),N(2));
fixnounnonhead(N(2));
}
if (N(4))
{
if (N(5))
fixnpnonhead(4);
else
fixnphead(4);
}
singler(1,5);
@RULES
_np <-
_xWILD [plus match=(_det _quan _num _xNUM _adj)]
_xALPHA
_xALPHA
_xALPHA [opt]
_noun [star]
_xWILD [one lookahead fail=(_xALPHA)]
@@
@CHECK
if (!N("noun",3) && !N("adj",3))
fail();
if (!N("noun",2) && !N("adj",2))
fail();
@POST
L("tmp3") = N(3);
L("tmp2") = N(2);
if (N("noun",3))
{
group(3,3,"_noun");
pncopyvars(L("tmp3"),N(3));
if (N(4)) # 04/21/07 AM.
fixnounnonhead(N(3));
else
fixnphead(3); # 04/21/07 AM.
}
else
{
group(3,3,"_adj");
pncopyvars(L("tmp3"),N(3));
fixadj(N(3));
}
if (N("verb",2) && N("noun",2)
&& vconjq(N(2),"inf"))
{
group(2,2,"_noun");
pncopyvars(L("tmp2"),N(2));
fixnounnonhead(N(2));
}
else if (N("adj",2))
{
group(2,2,"_adj");
pncopyvars(L("tmp2"),N(2));
fixadj(N(2));
}
else
{
group(2,2,"_noun");
pncopyvars(L("tmp2"),N(2));
fixnounnonhead(N(2));
}
S("seg type") = "np";
singler(2,4);
@RULES
_seg <-
_xWILD [plus match=(_prep _conj)]
_xALPHA
_xALPHA
_noun [star]
_xWILD [one lookahead match=(_prep _xPUNCT _xEND _dbldash)]
@@
# pro alpha+
# pro alpha alpha alpha ...
@CHECK
S("o start") = 2;
if (N(7))
S("o end") = 7;
else if (N(6))
S("o end") = 6;
else if (N(5))
S("o end") = 5;
else if (N(4))
S("o end") = 4;
else if (N(3))
S("o end") = 3;
else
S("o end") = 2;
S("ii") = S("o start");
S("o bad") = 2; # Cutoff point for np.
while (S("ii") <= S("o end"))
{
L("n") = eltnode(S("ii"));
if (pnvar(L("n"),"noun") || pnvar(L("n"),"adj"))
S("o bad") = ++S("ii");
else
S("ii") = S("o end") + 1; # terminate.
}
if (S("o bad") > S("o start"))
succeed();
else
fail(); # 04/24/10 AM.
@POST
if (!N("mypos",1))
chpos(N(1),"PP$"); # pro/PP$
# Figure out the np head.
if (S("o bad") > S("o end"))
{
if (!N(8))
{
fixnphead(S("o end"));
L("ii") = S("o end") - 1;
}
else
{
L("include nouns") = 1;
L("ii") = S("o end");
}
}
else
{
S("o end") = S("o bad") - 1;
fixnphead(S("o end"));
L("ii") = S("o end") - 1;
}
while (L("ii") >= S("o start"))
{
fixnpnonhead(L("ii"));
--L("ii");
}
# Now group a noun phrase.
if (L("include nouns"))
{
# setlookahead(9);
group(S("o start"),8,"_np");
}
else
{
# setlookahead(S("o end") + 1);
group(S("o start"),S("o end"),"_np");
}
@RULES
_xNIL <-
_xWILD [s one match=(_proPoss)] ### (1)
_xALPHA ### (2)
_xALPHA [opt] ### (3)
_xALPHA [opt] ### (4)
_xALPHA [opt] ### (5)
_xALPHA [opt] ### (6)
_xALPHA [opt] ### (7)
_noun [star] ### (8)
_xWILD [one lookahead match=(
_prep _xPUNCT _xEND _dbldash _fnword)] ### (9)
@@
# alpha alpha alpha
@CHECK
if (!N("noun",3) && !N("adj",3))
fail();
if (!N("noun",2) && !N("adj",2))
fail();
if (N(4))
{
if (!N("noun",4) && !N("adj",4))
fail();
}
@POST
if (N(4)) # 04/23/07 AM.
{
fixnphead(4); # 04/23/07 AM.
fixnpnonhead(3); # 04/23/07 AM.
}
else
fixnpnonhead(3); # 04/23/07 AM.
fixnpnonhead(2); # 04/23/07 AM.
# group(2,4,"_seg");
# N("seg type",2) = "np";
group(2,4,"_np");
@RULES
_xNIL <-
_xWILD [one match=(_conj _prep _verb _vg)]
_xALPHA
_xALPHA
_xALPHA [opt]
_xWILD [one lookahead match=(_prep _conj _verb _vg)]
@@
@CHECK
if (!N(1) && !N(2) && !N(3) && !N(4))
fail();
if (!N("noun",5) && !N("adj",5))
fail();
if (N(4) && !N("noun",5))
fail();
if (N("verb",5))
fail();
@POST
S("seg type") = "np";
if (N(1))
S("seg first") = "det";
else if (N(2))
S("seg first") = "quan";
else if (N(3))
S("seg first") = "adj";
else if (N(4))
S("seg first") = "noun";
L("tmp5") = N(5);
if (N(6))
fixnpnonhead(5);
else
fixnphead(5);
singler(1,6);
@RULES
_seg <-
_det [star]
_xWILD [star match=(_quan _num _xNUM)]
_adj [star]
_noun [star]
_xALPHA
_noun [star]
_xWILD [one lookahead fail=(_xALPHA _aposS)]
@@
@CHECK
if (!N("noun",5) && !N("adj",5))
fail();
if (N(4) && !N("noun",5))
fail();
if (N("verb",5))
fail();
@POST
S("seg type") = "np";
S("seg first") = "pro";
singler(1,6);
@RULES
_seg <-
_proPoss [s]
_xWILD [star match=(_quan _num _xNUM)]
_adj [star]
_noun [star]
_xALPHA
_noun [star]
_xWILD [one lookahead fail=(_xALPHA)]
@@
# Some random stuff.
# np np , alpha
@CHECK
if (!N("apposition",1) && !N("apposition",2))
fail();
if (!N("noun",4))
fail();
@POST
L("tmp4") = N(4);
group(4,4,"_noun");
pncopyvars(L("tmp4"),N(4));
fixnoun(N(4));
nountonp(4,1);
listadd(2,3,"false"); # Get rid of appositive comma.
@RULES
_xNIL <-
_np
_np
\,
_xALPHA
_xEND
@@
|
@CODE
# # G("eui_to_codes_root") = getconcept(findroot(), "eui_to_codes");
# # G("eui_to_codes") = getconcept(G("eui_to_codes_root"), "top");
# # G("parse") = getconcept(findroot(),"topconcept");
# # G("code_to_eui") = makeconcept(findroot(), "code_to_eui");
# # G("top") = makeconcept(G("code_to_eui"), "top");
# # G("rare") = makeconcept(G("code_to_eui"), "rare");
# # G("1") = makeconcept(G("top"), "V1.06");
# # G("2") = makeconcept(G("top"), "13.06");
# # G("3") = makeconcept(G("top"), "14.50");
# # # addnumval(G("1"), "one", 1);
# # # addstrval(G("1"), "two", "this is the number two, including special characters, like |");
# # # addnumval(G("1"), "three", 3);
# # addstrval(G("1"), "euis", "V1.06");
# # addstrval(G("1"), "euis", "13.06");
# # addstrval(G("1"), "euis", "14.50");
# # addstrval(G("2"), "euis", "V1.06");
# # addstrval(G("2"), "euis", "13.06");
# # addstrval(G("2"), "euis", "14.50");
# # addstrval(G("3"), "euis", "V1.06");
# # addstrval(G("3"), "euis", "13.06");
# # addstrval(G("3"), "euis", "14.50");
# # G("one") = makeconcept(G("rare"), "1");
# # G("two") = makeconcept(G("rare"), "2");
# # G("three") = makeconcept(G("rare"), "3");
# # addstrval(G("one"), "euis", "E
# # addstrval(G("one"), "euis", "E
# # addstrval(G("one"), "euis", "E
# # addstrval(G("two"), "euis", "14.50");
# # addstrval(G("two"), "euis", "14.50");
# # addstrval(G("two"), "euis", "14.50");
# # addstrval(G("three"), "euis", "14.50");
# # # addstrval(G("three"), "euis", "E
# # # addstrval(G("three"), "euis", "E
# # "debug.txt" << "Should be top: " << conceptname(G("top")) << "\n";
# # L("word") = findconcept(G("eui_to_codes"), "E
# # "debug.txt" << "Should be E
# # L("vals") = findattr(L("word"), "codes");
# # "debug.txt" << "attr is " << attrname(L("vals")) << "\n";
# # L("val") = attrvals(L("vals"));
# # "debug.txt" << "Val is " << getstrval(L("val")) << "\n";
# # "debug.txt" << "Val2 is " << getstrval(nextval(L("val"))) << "\n";
# # G("test_kb") = makeconcept(findroot(), "testkb");
# # G("child_con_one") = makeconcept(G("test_kb"), "child_one");
# # G("child_con") = makeconcept(G("child_con_one"), "child");
# # addnumval(G("child_con"), "codes", 3);
# # addstrval(G("child_con"), "p", "0.
# # L("codes") = numval(G("child_con"), "codes");
# # "nums.txt" << "Orig " << L("codes") << "\n";
# # # addnumval(G("child_con"), "samecodes", L("codes"));
# # # G("test_kb") = makeconcept(findroot(), "testkb");
# # # G("child_con_one") = makeconcept(G("test_kb"), "child_two");
# # G("child_two") = makeconcept(G("child_con_one"), "E
# # addnumval(G("child_two"), "codes", 1);
# # addstrval(G("child_two"), "p", "0.
# # L("codes") = numval(G("child_two"), "codes");
# # "nums.txt" << "Origv2 " << L("codes") << "\n";
# # G("child_three") = makeconcept(G("child_con_one"), "child_three");
# # addnumval(G("child_three"), "codes", 2);
# # addstrval(G("child_three"), "p", "0.
# # L("codes") = numval(G("child_three"), "codes");
# # "nums.txt" << "Origv2 " << L("codes") << "\n";
# "test.txt" << "init" << "\n";
# G("euis") = findconcept(findroot(), "euiprobs");
# # DisplayKB(G("euis"), 1);
# G("split_con") = findconcept(G("euis"), "top");
# L("parent_attrs") = findattrs(G("split_con"));
# if L("parent_attrs") {
# L("con_attr_name") = attrname(L("parent_attrs"));
# "test.txt" << "top attrs [0]: " << L("con_attr_name") << "\n";
# }
# else {
# "test.txt" << "no top attrs\n";
# }
# G("eui_con") = findconcept(G("split_con"), "E
# L("con_attrs") = findattrs(G("eui_con"));
# L("attr_name_0") = attrname(L("con_attrs"));
# "test.txt" << "E
# "test.txt" << "\t" << getsval(attrvals(L("con_attrs"))) << "\n";
# L("new_codes") = numval(G("eui_con"), "codes");
# "nums.txt" << conceptname(G("eui_con")) << ": " << L("new_codes") << "\n";
# # findattrs(L("eui_con"))
@@CODE |
# Fetch the next value in a list of values.
L("return_val") = nextval(L("val")); |
# Add the string valueString as a string value to the concept concept's attribute that has name nameString
G("Malibu") = makeconcept(findroot(), "Malibu");
addstrval(G("Malibu"),"Latitude Direction","North");
addstrval(G("Malibu"),"Longitude Direction","West");
addnumval(G("Malibu"),"Latitude value",33);
addnumval(G("Malibu"),"Longitude value",118);
addsval(G("Malibu"),"Route",1+2); |
@CODE
prlit("output.xml","<History>\n");
@@CODE
@RULES
_xNIL <- _xNIL @@ |
# Create a knowledge base with concept apple, adding attributes have=color, color=red, color=green, yellow, and weight=3
if (findconcept(findroot(),"apple"))
# kill them (to start fresh)
rmconcept(findconcept(findroot(),"apple"));
# Create the apple concept
G("apple") = makeconcept(findroot(),"apple");
# Apples have color
addstrval(G("apple"),"have","color");
# Apple's color is red
addstrval(G("apple"),"color","red");
# Apple's weigh 3 something or others
addnumval(G("apple"),"weight",3);
# Apple's color is also green and yellow
addstrval(G("apple"),"color","green and yellow");
The code creates a KB like this:
|
@PATH _ROOT _paragraph _sentence
@POST
L("text") = phrasetext();
if (strendswith(L("text"),"s")) {
S("plural") = 1;
}
single();
@RULES
_title <- _xWILD [min=2 match=(
acting
administration
affairs
air
allen
analyst
army
asset
assets
assistance
assistant
associate
attaché
attorney
bis
border
business
capital
case
casualty
ccips
chief
ci
city
civil
commerce
commission
communications
control
counterintelligence
counterterrorism
country
county
court
crime
crimes
criminal
cyber
dangerous
dea
defense
department
deputy
development
director
district
division
dod
drug
enforcement
environmental
export
fbi
federal
field
food
force
foreign
fraud
front
gang
general
homeland
hsi
human
inspection
inspector
integrity
intellectual
interim
international
investigation
investigations
investigative
judge
justice
labor
laundering
legal
litigation
magistrate
marshals
metropolitan
money
national
natsec
natural
ndca
obscenity
office
operations
police
postal
property
prosecutions
prosecutor
prosecutorial
protection
public
recovery
resources
revenue
rights
safety
salt
sdny
secret
section
securities
security
senior
service
services
small
social
special
state
states
tax
trade
treasury
trial
work
_agency
_USA
)]
@@
|
@CODE
G("format") = getconcept(findroot(),"format");
@@CODE |
@NODES _ROOT
@POST
"zone.txt" << N("$text",2) << "\n";
single();
@RULES
_bodyZone <-
_tbody ### (1)
_xWILD [fail=(_tbodyClose)] ### (2)
_tbodyClose ### (3)
@@
|
####################
####################
## NODE VARIABLES
####################
####################
# stem = The stemmed text, eg, of head of a phrase.
# sem = A semantic category.
# Not just some default stem of a word.
# ne text = Text for a named entity.
# ne = Flag that node is a named entity.
# name = Flag a named entity.
# nopos = (POS tagging) Things like punctuation, that don't
# really need a part of speech. (Not worrying about things
# like period as eos vs decimal, as far as pos tagging, though
# this could/should certainly be done.)
# posarr = (POS tagging) In PRETAGGED text, as in Penn Treebank
# text, an array of answer-key values for pos.
# number = singular, plural, any, or some special values.
####################
## _TEXTZONE
####################
# func words = Count English functional words (e.g., "the").
####################
## _sent
####################
# clauses = Count clauses in sentence.
# vg count = Count verb-group nodes in sentence.
# (Can flag that a verb is "needed" in a clause.)
####################
## _clause
####################
# clause num = ordinal count of clause in its sentence.
# pattern = Clausal pattern. Values:
# nvnp = np-vg-np-prep ("John held Mary up")
# nvn = np-vg-np, monotransitive verb. ("John likes Mary")
# nvj = np-copula-adj ("John is happy")
# nvp = phrasal/prepositional ("John works out")
# nv = intransitive verb. ("John walks")
# (Passive voice falls in here now.)
# vnp, vn, vj, vp, v = ellipted subject. ("likes Mary")
# n = lone np.
# etc.
# vg node = Pointer to main vg node in clause.
# vg count = Count verb-group nodes in clause.
# voice = Active/passive voice.
# last chunk = last main node name (v, n) in clause
# first = first node name in clause
# last = last node name in clause
###################
## POS TAGS
###################
# WordNet part-of-speech (POS) tags. 36 tags.
# May be copyright of Penn Treebank Project.
# https://www.ling.upenn.edu/courses/Fall_
# Same tagset used in NLTK
# https://pythonprogramming.net/part-of-speech-tagging-nltk-tutorial
#
# -----------------------------------------
# CC Coordinating conjunction
# CD Cardinal number
# DT Determiner
# EX Existential 'there'
# FW Foreign word
# IN Preposition or subordinating conjunction
# JJ Adjective
# JJR Adjective, comparative
# JJS Adjective, superlative
# LS List item marker
# MD Modal
# NN Noun, singular or mass
# NNS Noun, plural
# NNP Proper noun, singular
# NNPS Proper noun, plural
# PDT Predeterminer
# POS Possessive ending
# PRP Personal pronoun
# PRP$ Possessive pronoun
# RB Adverb
# RBR Adverb, comparative
# RBS Adverb, superlative
# RP Particle
# SYM Symbol
# TO 'to'
# UH Interjection
# VB Verb, base form
# VBD Verb, past tense
# VBG Verb, gerund or present participle
# VBN Verb, past participle
# VBP Verb, non-3rd person singular present
# VBZ Verb, 3rd person singular present
# WDT Wh-determiner
# WP Wh-pronoun
# WP$ Possessive wh-pronoun
# WRB Wh-adverb
# -----------------------------------------
@CODE
L("hello") = 0;
@@CODE
|
@NODES _ROOT
@POST
if (N("pos num") > 1) {
pncopyvars(N(1));
single();
} else if (N("noun")) {
group(1,1,"_noun");
} else if (N("verb")) {
group(1,1,"_verb");
} else if (N("det")) {
group(1,1,"_det");
} else if (N("adj")) {
group(1,1,"_adj");
} else if (N("prep")) {
group(1,1,"_prep");
} else if (N("pro")) {
group(1,1,"_pro");
} else if (N("adv")) {
group(1,1,"_adv");
}
@RULES
_ambig <-
_xALPHA ### (1)
@@
|
@NODES _LINE
@POST
"complete_mimic_icd_terms.txt" << N("$text", 4) << "\n";
noop();
@RULES
_xNIL <-
_xNUM ### (1)
\, ### (2)
_xWILD [opt matches=(\")] ### (3)
_xWILD [fail=(\n \")] ### (4)
_xWILD [one matches=(\n)] ### (5)
@@
|
@NODES _td
@POST
excise(1,1);
noop();
@RULES
_xNIL <-
_xWHITE [s] ### (1)
@@
|
@NODES _LINE
@POST
excise(1,1);
noop();
@RULES
_xNIL <-
_xWHITE [s] ### (1)
@@
@POST
X("up",3) = 1;
"found.txt" << "this\n";
@RULES
_xNIL <-
_language [s] ### (1)
@@
|
@NODES _ROOT
@POST
NumberType(N(1),num(N("$text")));
@RULES
_xNIL <-
_xNUM ### (1)
@@
|
@NODES _LINE
@POST
"country.dict" << strtolower(N("$text",4)) << " country=1\n";
@RULES
_xNIL <-
_xSTART ### (1)
_xNUM ### (2)
\t ### (3)
_xWILD [fail=(\t)] ### (4)
@@
|
@DECL
DictionaryFilename(L("word"),L("ext")) {
L("path") = G("$apppath");
L("path") = strpiece(L("path"),0,strlength(L("path"))-2);
L("last") = strrchr(L("path"),"\\");
L("parent") = strpiece(L("path"),0,strlength(L("last"))) + "dict\\";
L("filename") = L("parent") + strpiece(L("word"),0,0) + "." + L("ext");
"path.txt" << L("filename") << "\n";
return L("filename");
}
@@DECL |
# Get the entire path of dictionary concept wordString
@CODE
"output.txt" << "1 " << conceptname(addword("hello")) << "\n";
"output.txt" << "2 " << conceptname(wordindex("hello")) << "\n";
"output.txt" << "3 " << findwordpath("hello") << "\n";
"output.txt" << "4 " << findwordpath("olleh") << "\n";
"output.txt" << "5 " << wordpath("foobaz") << "\n";
"output.txt" << "6 " << conceptname(dictfindword("hello")) << \n";
rmword("foobaz");
Prints out:
1 hello
2 he
3 "concept" "sys" "dict" "a" "h" "he" "hello"
4
5 "concept" "sys" "dict" "a" "f" "fo" "foobaz"
6 hello |
@NODES _LINE
@RULES
_CompleteSchoolName [] <-
The
_xWHITE [star s]
_SchoolNamePhrase
@@
|
@PATH _ROOT _LINE
# Moving company pattern here. #
# If cap phrase is well formed, could suggest _company here.
# (May still need to extend, eg, "Ford Motor Co. of America").
@POST
N("glommed companyroot",1) = "true";
N("company conf",1) = N("company conf",1) %% 95;
++N("companyroots",1);
++N("end companyroot",1);
++N("len",1);
listadd(1,4);
@RULES
_xNIL <- _Caps \, [s opt] _xWHITE [s star] _companyRoot [s] @@
|
@NODES _bodyZone
@POST
L("name") = "_" + N("$text",3) + "Close";
group(1,4,L("name"));
@RULES
_xNIL <-
\< ### (1)
\/ ### (2)
_xWILD [match=(tr td)] ### (3)
\> ### (4)
@@
@POST
L("name") = "_" + N("$text",2);
group(1,4,L("name"));
@RULES
_xNIL <-
\< ### (1)
_xWILD [one match=(tr td)] ### (2)
_xWILD [fail=(\>)] ### (3)
\> ### (4)
@@
|
# Fetch the word after the given word in the KB dictionary hierarchy
@CODE
L("con") = dictfirst();
while (L("con"))
{
"output.txt" << conceptname(L("con") << "\n";
L("con") = dictnext(L("con"));
}
Prints a list of the dictionary entries, one per line. |
@CODE
L("hello") = 0;
@@CODE
#@PATH _ROOT _TEXTZONE _sent _clause
@NODES _clause
# Validate well-formed clause.
@POST
L("x3") = pnparent(X()); # 07/13/12 AM.
if (!N(3))
X("no subj") = 1;
X("vg node") = N(5);
L("nm") = pnname(N(7));
if (L("nm") == "_adjc")
{
if (N(3))
X("pattern") = "nvj";
else
X("pattern") = "vj";
}
else
{
if (N(3))
X("pattern") = "nvp";
else
X("pattern") = "vp";
}
X("vg") = 1;
if (N(8))
L("n") = lasteltnode(8);
else if (N(7))
L("n") = lasteltnode(7);
else if (N(6))
L("n") = lasteltnode(6);
else
L("n") = lasteltnode(5);
X("last name") = pnname(L("n"));
L("v") = N("first verb",5); # 06/05/06 AM.
if (N(3) || N(7)
|| pnvar(L("x3"),"vg count") == 1) # Lone verb in sent.
{
# Ambiguous VB or VBP.
fixverb(L("v"),"active",0);
X("voice") = N("voice",5) = "active";
}
# Other verb fixup: _aposS. #
fixaposs(X("vg node"),L("v")); # 9/18/05 AM.
X("id") = "qclause100 wfj";
# semclausenvj(X(),2,3,4,5,6,7,8); # Domain semantics for clause.
@RULES
_xNIL <-
_xSTART
_xWILD [star match=(_advl _adv _xPUNCT)]
_xWILD [opt match=(_np _pro)]
_xWILD [star match=(_advl _adv)]
_vg
_xWILD [star match=(_advl _adv)]
_xWILD [one match=(_adjc _particle)]
_xWILD [star match=(_advl _adv)]
@@
# Ellipted copula.
@POST
X("ellipted-copula") = 1;
L("nm") = pnname(N(5));
if (L("nm") == "_adjc")
X("pattern") = "nvj";
else
X("pattern") = "nvp";
X("id") = "qclause100 n-j";
X("voice") = "active";
# semclausenj(X(),2,3,4,5,6); # Domain semantics for clause.
@RULES
_xNIL <-
_xSTART
_xWILD [star match=(_advl _adv)]
_xWILD [one match=(_np _pro)] # 3
_xWILD [star match=(_advl _adv)]
_xWILD [one match=(_adjc _particle)] # 5
_xWILD [star match=(_advl _adv)]
@@
# Validate well-formed clause.
@POST
L("x3") = pnparent(X()); # 07/13/12 AM.
if (!N(3))
X("no subj") = 1;
X("vg node") = N(5);
if (N(3) && N(7))
X("pattern") = "nvnp";
else if (N(7))
X("pattern") = "vnp";
else if (N(3))
X("pattern") = "nvp";
else
X("pattern") = "vp";
X("vg") = 1;
if (N(8))
L("n") = lasteltnode(8);
else if (N(7))
L("n") = lasteltnode(7);
else if (N(6))
L("n") = lasteltnode(6);
else
L("n") = lasteltnode(5);
X("last name") = pnname(L("n"));
if (N(7))
{
# Ambiguous -edn or inf.
fixvg(N(5),"active","VBD");
X("voice") = "active";
}
else if (N(3)
|| pnvar(L("x3"),"vg count") == 1) # Only 1 verb in sentence.
fixvg(N(5),"active","VBD");
X("id") = "qclause100 wf";
@RULES
_xNIL <-
_xSTART
_xWILD [star match=(_advl _adv)]
_xWILD [opt match=(_np _pro)]
_xWILD [star match=(_advl _adv)]
_vg
_xWILD [star match=(_advl _adv)]
_xWILD [opt match=(_np _pro)]
_xWILD [star match=(_advl _adv)]
_prep
@@
# Validate well-formed clause.
@POST
L("x3") = pnparent(X()); # 07/13/12 AM.
# Characterize verb a bit. #
qclausevg(N(5),X()); # 05/09/07 AM.
# Determine if lone clause. #
if (pnvar(L("x3"),"clauses") == 1
&& X("clause num") == 1)
L("lone clause") = 1;
if (!N(3))
X("no subj") = 1;
X("vg node") = N(5);
if (N(3) && N(7) && !N("prep/phrasal",5))
X("pattern") = "nvn";
else if (N(7) && !N("prep/phrasal",5))
X("pattern") = "vn";
else if (N(3))
X("pattern") = "nv";
else
X("pattern") = "v";
X("vg") = 1;
if (N(8))
L("n") = lasteltnode(8);
else if (N(7))
L("n") = lasteltnode(7);
else if (N(6))
L("n") = lasteltnode(6);
else
L("n") = lasteltnode(5);
X("last name") = pnname(L("n"));
L("v") = N("verb node",5);
L("firstv") = N("first verb",5);
if (L("firstv"))
L("v") = L("firstv"); # 01/08/05 AM.
if (N(7) && !N("prep/phrasal",5))
{
# Ambiguous -edn or inf.
if (singular(N(3)) && vconjq(L("v"),"-ed"))
fixvg(N(5),"active","VBD");
else
fixvg(N(5),"active",0);
}
if (pnvar(L("x3"),"vg count") == 1) # Lone verb in sent.
{
if (N("voice",5) != "passive")
{
fixvg(N(5),"active",0); # 01/07/05 AM.
X("voice") = "active";
}
}
else if (N(3))
{
if (N("voice",5) != "passive" &&
(L("lone clause")
|| N("voice",5) == "active" # Not necess resolved.
|| X("first v in sent") # First verb in sentence.
|| verbfeat(L("v"),"T5") # MBUILD type verb.
))
{
fixvg(N(5),"active",0);
X("voice") = "active";
}
# if (L("v") && !pnvar(L("v"),"mypos"))
# {
# if (pnvar(L("v"),"inf"))
# chpos(L("v"),"VBP");
# }
}
if (N("voice",5))
X("voice") = N("voice",5);
# domnvn(N(2),N(3),N(4),N(5),N(6),N(7),N(8));
# semclausenvn(X(),2,3,4,5,6,7,8,9); # Domain semantics for clause.
X("id") = "qclause100 wf1";
@RULES
_xNIL <-
_xSTART
_xWILD [star match=(_advl _adv)]
_xWILD [opt match=(_np _pro)]
_xWILD [star match=(_advl _adv)]
_vg
_xWILD [star match=(_advl _adv)]
_xWILD [opt match=(_np _pro)]
_xWILD [star match=(_advl _adv)]
_xEND
@@
_xNIL <-
_xSTART
_xWILD [star match=(_advl _adv)]
_xWILD [opt match=(_np _pro)]
_xWILD [star match=(_advl _adv)]
_vg
_xWILD [star match=(_advl _adv)]
_xWILD [opt match=(_np _pro)]
_xWILD [star match=(_advl _adv)]
_xWILD [plus fail=(_prep _np _vg)]
@@
@POST
X("pattern") = "nvnn";
@RULES
_xNIL <-
_xSTART
_xWILD [star match=(_advl _adv)]
_xWILD [one match=(_np _nps)]
_xWILD [star match=(_advl _adv)]
_vg
_xWILD [star match=(_advl _adv)]
_xWILD [one match=(_np _nps)]
_xWILD [star match=(_advl _adv)]
_xWILD [one match=(_np _nps)]
_xWILD [star match=(_advl _adv)]
_xEND
@@
# Query patterns.
# (Could be ditransitive also...)
@POST
X("pattern") = "vnn";
X("query") = 1;
domvnn(X(),N(3),N(5),N(7));
@RULES
_xNIL <-
_xSTART
_xWILD [star match=(_advl _adv)]
_vg
_xWILD [star match=(_advl _adv)]
_xWILD [one match=(_np _nps)]
_xWILD [star match=(_advl _adv)]
_xWILD [one match=(_np _nps)]
_xWILD [star match=(_advl _adv)]
_xEND
@@
# Singleton uncharacterized alpha.
@POST
if (N("verb",2))
{
alphatovg(2,0,0);
X("pattern") = "v";
X("vg node") = N(2);
}
else
X("pattern") = "alpha";
# pncopyvars(N(2),X()); #
@RULES
_xNIL <-
_xSTART
_xALPHA
_xWILD [star match=(_adv _advl)]
_xEND
@@
# Patterns with segments.
# np vg np prep seg
@POST
L("x3") = pnparent(X()); # 07/13/12 AM.
X("vg count") = 1; # 1 verb in clause.
if (N(3)
|| pnvar(L("x3"),"vg count") == 1)
{
if (!N("voice",5))
X("voice") = N("voice",5) = "active";
}
else
X("no subj") = 1;
X("vg node") = N(5);
@RULES
_xNIL <-
_xSTART
_xWILD [star match=(_advl _adv)]
_xWILD [opt match=(_np _pro)]
_xWILD [star match=(_advl _adv)]
_vg
_xWILD [star match=(_advl _adv)]
_xWILD [opt match=(_np _pro)]
_xWILD [star match=(_advl _adv)]
_prep
_xWILD [star match=(_advl _adv)]
_seg
@@
# Component level segments.
# det alpha
@POST
L("tmp3") = N(3);
group(3,3,"_noun");
pncopyvars(L("tmp3"),N(3));
group(2,3,"_np");
pncopyvars(L("tmp3"),N(2));
clearpos(N(2),1,1); # Zero out token info.
@RULES
_xNIL <-
_xSTART
_xWILD [plus match=(_det _quan _num _xNUM _adj)]
_xALPHA
_xEND
@@
@POST
if (N(3))
{
if (!N("voice",5))
X("voice") = N("voice",5) = "active";
}
else
X("no subj") = 1;
X("vg node") = N(5);
noop();
@RULES
_xNIL <-
_xSTART
_xWILD [star match=(_advl _adv)]
_xWILD [opt match=(_np _pro)]
_xWILD [star match=(_advl _adv)]
_vg
_xWILD [star match=(_advl _adv)]
_xEND
@@
# Decide on some active voice.
@POST
if (!vgassigned(N(5)))
fixvg(N(5),"active",0);
if (!N("voice",5))
X("voice") = N("voice",5) = "active";
if (!N(3))
X("no subj") = 1;
X("vg node") = N(5);
X("id") = "qclause100 nv";
@RULES
_xNIL <-
_xSTART
_xWILD [star match=(_advl _adv)]
_xWILD [one match=(_np _pro)]
_xWILD [star match=(_advl _adv)]
_vg
@@
# lone vg at clause start.
@POST
if (!vgassigned(N(3)))
fixvg(N(3),X("voice"),0);
if (N("voice",3))
X("voice") = N("voice",3);
X("no subj") = 1;
X("vg node") = N(3);
X("id") = "qclause100 v";
@RULES
_xNIL <-
_xSTART
_xWILD [star match=(_advl _adv)]
_vg
_xWILD [star match=(_advl _adv)]
_xWILD [one match=(_xEND _prep _qEOS)]
@@
# prep np conj alpha alpha noun
@CHECK
if (!N("adj",4) && !N("noun",4))
fail();
if (!N("adj",5) && !N("noun",5))
fail();
@POST
L("tmp4") = N(4);
L("tmp5") = N(5);
if (N("adj",4))
{
group(4,4,"_adj");
pncopyvars(L("tmp4"),N(4));
fixadj(N(4));
}
else
{
group(4,4,"_noun");
pncopyvars(L("tmp4"),N(4));
fixnoun(N(4));
}
if (N("adj",5))
{
group(5,5,"_adj");
pncopyvars(L("tmp5"),N(5));
fixadj(N(5));
}
else
{
group(5,5,"_noun");
pncopyvars(L("tmp5"),N(5));
fixnoun(N(5));
}
L("tmp6") = N(6);
group(4,6,"_np");
pncopyvars(L("tmp6"),N(6));
clearpos(N(4),1,1);
@RULES
_xNIL <-
_xWILD [one match=(_xSTART _prep)]
_np
_conj
_xALPHA
_xALPHA
_noun
_xWILD [one lookahead match=(_xEND)]
@@
# np
@CHECK
if (X("pattern"))
fail();
@POST
X("pattern") = "n";
@RULES
_xNIL <-
_xSTART
_xWILD [star match=(_advl _adv)]
_np
_xWILD [star match=(_advl _adv)]
_xEND
@@
# Pure adverbial.
@POST
X("pattern") = "x";
# semclauseadvls(X(),2);
@RULES
_xNIL <-
_xSTART
_xWILD [plus match=(_advl _adv)]
_xEND
@@
# DEFAULT TO GET RID OF SEG.
@POST
if (N("seg type"))
pnrename(N(1),"_" + N("seg type"));
else
pnrename(N(1),"_np"); # Default.
@RULES
_xNIL <-
_seg
@@
|
@PATH _ROOT _derivedTerms _headerZone _LINE
@POST
S("con") = MakeCountCon(X("derivedTerms",2),"derived");
single();
@RULES
_derivedTerm <-
_xWILD [plus match=(_xALPHA)] ### (1)
@@
|
@PATH _ROOT _LINE _language
@POST
N("spoken",1) = 1;
@RULES
_xNIL <-
_lang
spoken
@@ |
@NODES _ROOT
@RULES
_noun <-
_xNUM ### (1)
\. ### (2)
_xNUM ### (3)
_xWHITE ### (4)
_xWILD [one match=(cm mm)] ### (5)
@@
|
# Replace named attribute's value(s) with concept con_val
replaceval(L("con"), L("name"), L("con_val")); |
@CODE
SaveKB("emojis.kbb",G("emojis"),2);
L("dict") = "emojis.dict";
L("group") = down(G("emojis"));
while (L("group")) {
L("dict") << "# GROUP: " << strval(L("group"),"description") << "\n";
L("subgroup") = down(L("group"));
while (L("subgroup")) {
L("dict") << "# SUBGROUP: " << strval(L("subgroup"),"description") << "\n";
L("emoji") = down(L("subgroup"));
while (L("emoji")) {
L("dict") << conceptname(L("emoji")) << " emoji=1 des=\"" << strval(L("emoji"),"description") << "\"";
# L("dict") << " g=\"" << strval(L("group"),"description") << "\"";
# L("dict") << " sg=\"" << strval(L("subgroup"),"description") << "\"";
L("dict") << "\n";
L("emoji") = next(L("emoji"));
}
L("subgroup") = next(L("subgroup"));
}
L("group") = next(L("group"));
}
@@CODE |
@NODES _ROOT
@POST
if (N("con", 1)) {
L("text") = conceptname(N("con",1));
if (strcontains(",", L("text"))) {
# Split on comma, last split after comma should be specifier
# unless it starts with conjunction
L("specifier") = split(L("text"), ",");
L("specifier") = strclean(L("specifier")[arraylength(L("specifier"))-1]);
if !(strstartswith(L("specifier"), "and") || strstartswith(L("specifier"), "or")) {
addstrval(N("con",1), "specifier", L("specifier"));
}
}
}
noop();
@RULES
_xNIL <-
_LINE ### (1)
@@
|
# Check if first char of string1 is lowercase
@CHECK
if (strislower(N("$text",1)))
fail();
@RULES
_name <- will @@ |
@NODES _LINE
@RULES
_parens <-
\( ### (1)
_xWILD [fail=(\))] ### (2)
\) ### (3)
@@
|
@PATH _ROOT _posZone _defZone _example _headerZone _LINE
@POST
L("con") = MakeCountCon(X("con",3),"example");
addstrval(L("con"),"text",N("text",1));
@RULES
_xNIL <-
_item ### (1)
@@
|
@NODES _ROOT _addendum
@RULES
_identifier <-
_xWILD [min=3 max=3 matches=(\_)] ### (1)
@@
|
@CODE
G("words") = findconcept(findroot(),"words");
if (!G("words")) G("words") = makeconcept(findroot(),"words");
#rmchildren(G("words"));
@@CODE
|
@CODE
G("hello") = 0;
@@CODE
#@PATH _ROOT _TEXTZONE _sent _clause _seg
@NODES _seg
# dqan
@CHECK
L("x4") = pnparent(X()); # _clause # 07/10/12 AM.
# If verbs in clause, want a better look at parallel
# construction patterns , etc.
if (pnvar(L("x4"),"vg count")) # 07/10/12 AM.
fail();
if (X("seg type") != "np")
fail();
@POST
# Get a head noun.
L("tmp6") = N(6);
if (N(7))
{
L("head") = lasteltnode(7);
fixnpnonhead(6);
# if (N("noun",6))
# {
# group(6,6,"_noun");
# pncopyvars(L("tmp6"),N(6));
# fixnouns(N(6),N(6));
# }
# else
# {
# group(6,6,"_adj");
# pncopyvars(L("tmp6"),N(6));
# fixadjs(N(6),N(6));
# }
}
else
{
if (N("noun",6))
{
group(6,6,"_noun");
pncopyvars(L("tmp6"),N(6));
fixnoun(N(6));
}
else
{
group(6,6,"_adj");
pncopyvars(L("tmp6"),N(6));
fixadj(N(6));
}
L("head") = N(6);
}
if (N(4))
{
L("tmp4") = N(4);
if (N("noun",4))
{
group(4,4,"_noun");
pncopyvars(L("tmp4"),N(4));
fixnoun(N(4));
}
else
{
group(4,4,"_adj");
pncopyvars(L("tmp4"),N(4));
fixadj(N(4));
}
}
# Now fix up noun phrase.
xrename("_np");
pncopyvars(L("head"),X());
clearpos(X(),1,1);
@RULES
_xNIL <-
_xSTART
_xWILD [star match=(_det _pro)]
_xWILD [star match=(_quan _num _xNUM)]
_xALPHA [opt]
_adj [star]
_xALPHA
_noun [star]
_xEND
@@
# dqan
# det alpha alpha alpha noun
@POST
fixnpnonhead(4);
fixnpnonhead(5);
fixnpnonhead(6);
xrename("_np");
L("head") = lasteltnode(7);
pncopyvars(L("head"),X());
clearpos(X(),1,1);
@RULES
_xNIL <-
_xSTART
_xWILD [plus match=(_det _pro)]
_xWILD [star match=(_quan _num _xNUM)]
_xALPHA
_xALPHA
_xALPHA
_noun [plus]
_xEND
@@
|
# Enable C++ source code debugging for VisualText developers
@POST
debug();
@RULES
_xNIL <- _xNIL @@ |
@PATH _ROOT _paragraph _sentence
@POST
S("region") = N("$text",2);
if (N("city",3)) S("city") = N("city",3);
if (N("state",3)) S("state") = N("state",3);
single();
@RULES
_ofRegion <-
_xWILD [match=(of for the)] ### (1)
_region ### (2)
_residentOf ### (3)
@@
|
@NODES _LINE
@RULES
# Ex: {
_openPunct <- _xWILD [min=1 max=1 s match=("\{" "\[" "\<" "\(")] @@
|
# Find the root concept of the knowledge base (named concept).
L("return_con") = findroot(L("")); |
@NODES _section
@POST
excise(1,1);
@RULES
_xNIL <-
_xWHITE ### (1)
@@
|
@CODE
if (!G("find html"))
exitpass();
@@CODE
@NODES _ROOT
@RULES
_commentTag <-
\<
\!
\-
\-
@@
_EcommentTag <-
\-
\-
\>
@@
|
@PATH _ROOT _paragraph _sentence
@RULES
_for <-
for ### (1)
_operator [opt] ### (2)
_money ### (3)
@@
|
@NODES _ROOT
@RULES
_rest <-
_xWILD [plus fail=(_headerZone)] ### (1)
@@
|
@DECL
printblurb(L("out"))
{
L("out") << "*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*\n";
L("out") << "*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*\n";
L("out") << "*x* *x*\n";
L("out") << "*x* Copyright (C) 1990 University of Pennsylvania *x*\n";
L("out") << "*x* *x*\n";
L("out") << "*x* The data in this file are part of a preliminary version of the *x*\n";
L("out") << "*x* Penn Treebank Corpus. Any research using this corpus or based *x*\n";
L("out") << "*x* on it should acknowledge that fact, as well as the preliminary *x*\n";
L("out") << "*x* nature of the corpus. *x*\n";
L("out") << "*x* *x*\n";
L("out") << "*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*\n";
L("out") << "*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*\n";
L("out") << "*x* Copyright (C) 2005 Text Analysis International, Inc. *x*\n";
L("out") << "*x* All rights reserved. *x*\n";
L("out") << "*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*\n";
}
@CODE
# If not outputting tags, exit.
if (!G("verbose")) # Todo: another flag here.
exitpass();
if (G("pretagged"))
exitpass();
G("para") = "======================================";
if (G("treebank"))
printblurb("tags.txt");
# Initialize traversal data.
G("parseRoot") = pnroot();
G("node") = G("parseRoot");
G("depth") = 0;
G("path") = 0; # Store the current path in the parse tree.
L("newline") = 1; # If on a new empty line.
L("newpar") = 0; # If at new paragraph.
while (G("node")) # Traverse the parse tree.
{
G("path")[G("depth")] = G("node"); # Update path in the parse tree.
G("childs") = pndown(G("node"));
G("next") = pnnext(G("node"));
# If there are prior deleted tokens, print those.
# If bracketing this node, do the start.
if (G("bracket") && pnvar(G("node"),"bracket"))
{
if (!L("newline") && !L("newpar"))
"tags.txt" << "\n";
"tags.txt" << "[ ";
L("newline") = 0;
L("newpar") = 0;
}
# Stop at a node with a mypos variable.
# Need some other way for default pos tags...
L("nm") = pnname(G("node"));
L("ln") = strlength(L("nm")) - 1;
L("ch") = strpiece(L("nm"),0,0);
if (L("ch") != "_") # Literal.
{
if (strisdigit(L("nm")))
pnreplaceval(G("node"),"mypos","CD");
else if (L("ch") == " " || L("ch") == "\t"
|| L("ch") == "\n" || L("ch") == "\r")
; # Ignore whitespace.
else if (strisalpha(L("ch")))
{
if (!pnvar(G("node"),"mypos"))
pnreplaceval(G("node"),"mypos","UNKNOWN");
}
else # Punctuation.
pnreplaceval(G("node"),"nopos",1);
L("nm") = "NIL";
}
else
{
if (L("nm") == "_sent")
{
if (!G("bracket"))
"tags.txt" << "\n";
else
{
if (!L("newline") && !L("newpar"))
"tags.txt" << "\n";
if (!L("newpar"))
"tags.txt" << "\n";
}
L("newline") = 1;
}
else if (L("nm") == "_TEXTZONE")
{
if (!L("newline") && !L("newpar"))
"tags.txt" << "\n";
if (!L("newpar"))
"tags.txt" << "\n";
"tags.txt" << G("para") << "\n\n";
L("newline") = 1;
L("newpar") = 1;
}
if (L("ln") >= 1)
L("nm") = strpiece(L("nm"),1,L("ln"));
}
L("npos") = nodepos(G("node"),L("nm"));
L("ignorepos") = pnvar(G("node"),"ignorepos");
L("nopos") = pnvar(G("node"),"nopos");
if (L("npos") && !L("ignorepos"))
{
L("newpar") = 0;
G("gochilds") = 0; # Don't traverse children.
L("txt") = prosify(G("node"));
"tags.txt" << L("txt")
<< "/"
<< L("npos")
;
if (!G("bracket"))
{
"tags.txt" << "\n";
L("newline") = 1;
}
else
{
"tags.txt" << " ";
L("newline") = 0;
}
if (G("bracket") && pnvar(G("node"),"bracket"))
{
"tags.txt" << "]\n"; # End bracket.
L("newline") = 1;
}
}
else if (L("nopos")) # Things like punctuation...
{
L("newpar") = 0;
G("gochilds") = 0; # Don't traverse children.
L("txt") = pnvar(G("node"),"$text");
"tags.txt" << L("txt")
<< "/"
<< L("txt"); # Just echo it.
if (!G("bracket"))
{
"tags.txt" << "\n";
L("newline") = 1;
}
else
{
"tags.txt" << " ";
L("newline") = 0;
}
if (G("bracket") && pnvar(G("node"),"bracket"))
{
"tags.txt" << "]\n"; # End bracket.
L("newline") = 1;
}
}
else
G("gochilds") = 1; # Traverse children.
if (G("childs") # If node has children, traverse them.
&& G("gochilds"))
{
G("node") = G("childs");
++G("depth");
}
else if (G("next")) # If node has right sibling, get it.
{
G("node") = G("next");
}
else # Try continuing from a parent.
{
G("done") = 0;
G("node") = 0;
while (!G("done")) # Find a continuation or quit.
{
if (--G("depth") > 0)
{
G("node") = G("path")[G("depth")]; # Move up to parent.
if (G("bracket") && pnvar(G("node"),"bracket"))
{
"tags.txt" << "]\n"; # End bracket.
L("newline") = 1;
}
if (G("node") = pnnext(G("node"))) # Try to move right.
G("done") = 1;
}
else
G("done") = 1;
}
}
}
@@CODE
|
@CODE
L("hello") = 0;
@@CODE
@NODES _sent
# particle adj noun alpha
@CHECK
if (!N("noun",6))
fail();
if (N("verb",6))
fail();
dqaninfo(2,3,4,5);
@POST
L("tmp6") = N(6);
group(6,6,"_noun");
pncopyvars(L("tmp6"),N(6));
S("olast") = 6;
S("last") = S("lastn") = S("lastan") = N(6);
groupnp();
@RULES
_xNIL <-
_xWILD [one match=(_particle _prep _conj _vg _verb)]
_det [opt]
_xWILD [star match=(_quan _num _xNUM)]
_adj [star]
_noun [plus]
_xALPHA
_xWILD [one lookahead match=(_xPUNCT _qEOS _dbldash _xEND)]
@@
# particle adj noun
@CHECK
dqaninfo(2,3,4,5);
if (!numbersagree(S("first"),S("last")))
fail();
@POST
groupnp();
@RULES
_xNIL <-
_xWILD [one match=(_particle _prep _conj _vg _verb _adv _advl \,)]
_det [opt]
_quan [opt]
_adj [star]
_noun [plus]
_xWILD [one lookahead fail=(_xALPHA _aposS _conj)]
@@
# dqan
@CHECK
dqaninfo(1,2,3,4);
if (!numbersagree(S("first"),S("last")))
fail();
@POST
groupnp();
@RULES
_xNIL <-
_det [star]
_xWILD [plus match=(_quan _num _xNUM)]
_adj [star]
_noun [star]
_xWILD [one lookahead fail=(_xALPHA)]
@@
@POST
L("tmp") = lasteltnode(4);
L("m") = N(1);
L("h") = N(2);
L("b") = N(3);
group(1,4,"_vg");
N("stem",1) = pnvar(L("tmp"),"stem");
L("voice") = mhbvfix(N(1),L("m"),L("h"),L("b"),0,L("tmp"));
pncopyvars(L("tmp"),N(1));
if (L("voice"))
N("voice",1) = L("voice");
clearpos(N(1),1,0); # Zero out token info.
@RULES
_xNIL <-
_modal [star]
_have [star]
_be [star]
_verb [plus]
@@
_xNIL <-
_do [s]
_have [star]
_be [star]
_verb [plus]
@@
# dqan
@CHECK
dqaninfo(2,3,4,5);
@POST
groupnp();
@RULES
_xNIL <-
_xWILD [one match=(\, )]
_det [star]
_xWILD [star match=(_quan _num _xNUM)]
_adj [star]
_noun [plus]
_xEND
@@
# as np
@CHECK
if (N("mypos",1) == "IN")
fail();
@POST
pnrename(N(1),"_prep");
posacct(N(1));
@RULES
_xNIL <-
as [s]
_np
_xWILD [s star lookahead match=(_date _adv _advl _qEOS _xEND)]
@@
# some of np
@POST
listadd(3,1,"true");
@RULES
_xNIL <-
some [s]
of [s]
_np
@@
# nounless.
@POST
pncopyvars(lasteltnode(2));
sclearpos(1,0);
singler(2,2);
@RULES
_np <-
_xWILD [one match=(_prep _vg)]
_adj [plus]
_xWILD [lookahead one match=(_prep _vg _qEOS \. _np _xEND)]
@@
@CHECK
dqaninfo(2,3,4,5);
if (!numbersagree(S("first"),S("last")))
fail();
@POST
if (N(3))
if (!nonliteral(N(3)))
{
L("tmp3") = N(3);
group(3,3,"_num");
pncopyvars(L("tmp3"),N(3));
}
groupnp();
@RULES
_xNIL <-
_xWILD [one fail=(_xALPHA _det _quan _num _xNUM _adj _noun _aposS)]
_det [star]
_xWILD [star match=(_quan _num _xNUM)]
_adj [star]
_noun [plus]
_xWILD [one lookahead fail=(_noun _adj _xALPHA _aposS)]
@@
_xNIL <-
_xWILD [one fail=(_xALPHA _det _quan _num _xNUM _adj _noun)]
_det [star]
_xWILD [star match=(_quan _num _xNUM)]
_adj [star]
_noun [plus]
_xEND
@@
|
@NODES _posZone
@POST
N("con",1) = MakeCountCon(X("con"),"definition");
@RULES
_xNIL <-
_defZone ### (1)
@@
|
@PATH _ROOT _experienceZone
@RULES
_experienceInstance <-
_xWILD [s one match=( _xSTART _experienceBoundary )]
_xWILD [s star match=(_LINE _BLANKLINE) except=(_expStart)]
@@
_experienceInstance <-
_expStart [s]
_xWILD [s star match=(_LINE _BLANKLINE) except=(_expStart)]
@@
|
@NODES _NLPPP
# @RECURSE listarg
# @POST
# rfbarg(1) # Note this difference.
# single()
# @RULES
# _ARG [base] <- _NONLIT @@
# _ARG [base] <- _LIT @@
# _ARG [base] <- _STR @@
# _ARG [base] <- _NUM @@
# @@RECURSE listarg
# @POST
# rfalist(2)
# single()
# @RULES
# _LIST [base] <- \( _xWILD [match=(_LIT _NONLIT _STR _NUM) recurse=(listarg)] \) @@
@POST
rfarange(3, 5)
singler(2,6)
@RULES
_PREPAIR [base] <-
\; # Disambiguating context.
\< _NUM \, _NUM \> @@
_PREPAIR [base] <-
_xSTART # Disambiguating context.
\< _NUM \, _NUM \> @@
|
@DECL
eclXMLTree(
L("out"),
L("n"),
L("parent id")
)
{
L("childs") = pndown(L("n"));
L("leaf") = 0;
while (L("childs")) {
L("name") = pnname(L("childs"));
if (strlength(L("name")) > 1) {
L("id") = G("id")++;
L("tag") = strtrim(strpiece(L("name"),0,strlength(L("name"))-1));
L("out") << "<vertice";
getAttributes(L("out"),L("childs"));
L("out") << ">\n";
L("out") << " <id>" << L("id") << "</id>\n";
L("out") << " <label>" << L("tag") << "</label>\n";
L("out") << "</vertice>\n";
if (L("parent id")) {
L("out") << "<edge>\n";
L("out") << " <source>" << L("parent id") << "</source>\n";
L("out") << " <target>" << L("id") << "</target>\n";
L("out") << "</edge>\n";
}
if (pndown(L("childs"))) {
eclXMLTree(L("out"),L("childs"),L("id"));
}
}
L("childs") = pnnext(L("childs"));
}
return L("leaf");
}
xmlrecurseall(
L("out"),
L("n"), # Current node.
L("level") # level of recursion
)
{
L("childs") = pndown(L("n"));
L("leaf") = 0;
while (L("childs")) {
L("name") = pnname(L("childs"));
if (strlength(L("name")) > 1) {
L("tag") = strpiece(L("name"),1,strlength(L("name"))-1);
if (pndown(L("childs"))) {
G("id")++;
L("out") << "\n" << spaces(L("level")) << "<" << L("tag");
getAttributes(L("out"),L("childs"));
L("out") << ">";
if (!xmlrecurseall(L("out"),L("childs"),L("level")+1))
L("out") << "\n" << spaces(L("level"));
L("out") << "</" << L("tag") << ">";
} else {
L("out") << pnvar(L("childs"),"$text");
L("leaf") = 1;
}
}
L("childs") = pnnext(L("childs"));
}
return L("leaf");
}
getAttributes(L("out"),L("n")) {
addAttribute(L("out"),L("n"),"stem");
addAttribute(L("out"),L("n"),"voice");
addAttribute(L("out"),L("n"),"tense");
addAttribute(L("out"),L("n"),"aspect");
}
addAttribute(L("out"),L("n"),L("name")) {
L("value") = pnvar(L("n"),L("name"));
if (L("value"))
L("out") << " " << L("name") << "=\"" << L("value") << "\"";
}
spaces(L("num")) {
L("i") = 1;
L("spaces") = " ";
while (L("i")++ < L("num")) {
L("spaces") = L("spaces") + " ";
}
return L("spaces");
}
@CODE
if (interactive())
G("out") = "out.xml";
else
G("out") = cbuf();
G("id") = 1;
xmlheader(G("out"));
#xmlrecurseall(G("out"),pnroot(),0);
eclXMLTree(G("out"),pnroot(),0);
@@CODE
|
@NODES _ROOT
@RULES
_BLANKLINE <-
_xWILD [min=0 max=0 matches=(\ \t \r)] ### (1)
\n ### (2)
@@
_LINE <-
_xWILD [min=0 max=0 fails=(\r \n)] ### (1)
_xWILD [one match=(\n _xEND)] ### (2)
@@
|
# Add numeric value num as a string to concept con's attribute called name.
addsval(L("con"), L("name"), L("num")); |
@PATH _ROOT _bodyZone
@POST
AddWord(N("value1"),N("value2"));
@RULES
_xNIL <-
_trZone
@@
|
@NODES _ROOT
@RULES
_BLANKLINE <-
_xWILD [min=0 max=0 matches=(\ \t \r)] ### (1)
\n ### (2)
@@
_LINE <-
_xWILD [min=0 max=0 fails=(\r \n)] ### (1)
_xWILD [one match=(\n _xEND)] ### (2)
@@
|
@NODES _LINE
@POST
X("header",2) = N("$text",3);
X("number",2) = NepaliNum(N("$text",4));
single();
@RULES
_header <-
_xSTART ### (1)
_xWILD [plus match=(\=)] ### (2)
अर्थ ### (3)
_xNUM ### (4)
_xWILD [plus match=(\=)] ### (5)
_xEND ### (6)
@@
|
@CODE
G("hello") = 0;
@@CODE
@NODES _sent
# Moved here from sent
@POST
splice(1,1);
@RULES
_xNIL <-
_clausesep
@@
# Merging clauses with split construct:
# vg to vg
@CHECK
if (!N("start to-vg",2))
fail();
if (N("last chunk",1) != "v")
fail();
if (N("voice",1))
fail();
@POST
if (X("vg count") == 2) # These are the only verbs.
{
L("vg1") = N("vg node",1);
fixvg(L("vg1"),"active","VBP");
N("voice",1) = "active";
}
# Probably best to just note that they are bound up
# together...
# Combine clauses.
# (Perhaps need to renumber clauses...)
# S("vg count") = N("vg count",1) + N("vg count",2);
# S("voice") = "active";
# S("vg node") = N("vg node",1);
# S("clause num") = N("clause num",1);
# S("last chunk") = N("last chunk",2);
# merge();
noop(); # 01/31/05 AM.
@RULES
_clause [unsealed] <-
_clause
_clause
@@
|
@PATH _ROOT _paragraph _sentence
# @RULES
# _this <- _xSTART _person @@
###############################################
# Isaiah Worlow, Steven Fluharty, and Tiffany Kerekes are charged
###############################################
@POST
AddPeople(N(1),"event",N("$text",3));
single();
@RULES
_event <-
_xWILD [plus match=(_titleCaps _person \, of and all _residentOf)] ### (1)
_xWILD [plus match=(was is are were each)] ### (2)
_xWILD [one match=(charged arrested indicted)] ### (3)
@@
###############################################
# charging Ronald DiPietro, Thomas Helmick, and Jason and Rebecca Kachner.
###############################################
@POST
AddPeople(N(2),"event","charged");
single();
@RULES
_event <-
charging ### (1)
_xWILD [plus match=(_titleCaps _person \, and)] ### (2)
with ### (3)
@@
###############################################
# One indictment charges that Stephanie Condric of Canton, Ohio
###############################################
@POST
AddPeople(N(4),"event","charged");
single();
@RULES
_event <-
indictment ### (1)
charges ### (2)
that ### (3)
_xWILD [plus match=(_titleCaps _person \, and)] ### (4)
@@
###############################################
# People were previously assigned to case as TITLE
###############################################
@POST
AddPeople(N(1),"title",N("$text",4));
AddPeopleAttr(N(1),"event","assigned");
single();
@RULES
_event <-
_xWILD [plus match=(_titleCaps _person \, and)] ### (1)
_assigned ### (2)
as ### (3)
_title ### (4)
@@ |
@NODES _LINE
@POST
singler(3,3)
@RULES
_CompleteSchoolName [sealed] <-
_xWILD [s one match=( _xSTART The _xPUNCT)]
_xWHITE [star s]
_SchoolNamePhrase [s t]
@@
@RULES
# Adding this to finish the job.
_CompleteSchoolName [sealed] <- _SchoolNamePhrase [s] @@
|
@PATH _ROOT _translations _headerZone _LINE
@POST
X("language",3) = makeconcept(X("translation",2),N("$text",2));
single();
@RULES
_language <-
_xSTART ### (1)
_xWILD [plus match=(_xALPHA)] ### (2)
\: [trig] ### (3)
@@
|
@NODES _ROOT
@POST
singler(1,2);
@RULES
_emptyItem [base] <-
_enclosedBy ### (1)
_enclosedBy ### (2)
_xWILD [match=(_separator _lineTerminator _xEND)] ### (3)
@@
|
@CODE
# Sort
L("con") = down(G("funcs"));
while (L("con")) {
sortchilds(L("con"));
L("con") = next(L("con"));
}
L("con") = down(G("funcs"));
while (L("con")) {
L("pos") = conceptname(L("con"));
L("comments") = AttrValues(L("con"),"comment");
if (L("pos") != G("pos")) {
L("title") = SplitWord(L("pos"));
G("dictname") << "\n### " << L("title");
}
while (L("comments")) {
G("dictname") << "\n# " << getsval(L("comments"));
L("comments") = nextval(L("comments"));
}
G("pos") = L("pos");
G("dictname") << "\n#--------------------------------------\n";
L("word con") = down(L("con"));
while (L("word con")) {
L("word") = strsubst(conceptname(L("word con")),"\'"," \\' ");
L("word") = strsubst(L("word"),"-"," \\- ");
G("dictname") << L("word") << " functword=1 pos=" << L("pos") << "\n";
L("word con") = next(L("word con"));
}
L("con") = next(L("con"));
}
SaveKB("funcs.kbb",G("funcs"),2);
SaveKB("langs.kbb",getconcept(findroot(),"langs"),2);
@@CODE |
# Create "words", "noun", and "noun_book" concepts under kb root
G("words") = makeconcept(findroot(), "words");
G("noun") = makeconcept(findroot(),"noun");
G("noun_book") = makeconcept(G("words"),"book"); |
#@NODES _ROOT
@PATH _ROOT
#@POST
#singlex(2,2)
# NOTE: DON'T SEAL THESE, BECAUSE WE WANT TO SEARCH FOR
# PATTERNS IN THEM.
@RULES
_contactZone [unsealed] <- _contactBoundary [s] _REZZONE @@
_objectiveZone [unsealed] <- _objectiveBoundary [s] _REZZONE @@
_educationZone [unsealed] <- _educationBoundary [s] _REZZONE @@
_experienceZone [unsealed] <- _experienceBoundary [s] _REZZONE @@
_skillsZone [unsealed] <- _skillsBoundary [s] _REZZONE @@
_presentationsZone [unsealed] <- _presentationsBoundary [s] _REZZONE @@
_publicationsZone [unsealed] <- _publicationsBoundary [s] _REZZONE @@
_referencesZone [unsealed] <- _referencesBoundary [s] _REZZONE @@
_otherZone [unsealed] <- _otherBoundary [s] _REZZONE @@
# Actually, need to see if allcaps boundary at start of line,
# followed by text on line.
@RULES
_objectiveZone [unsealed] <- _objectiveBoundary [s] @@
_referencesZone [unsealed] <- _referencesBoundary [s] @@
|
@CODE
L("lang") = G("lang");
L("iso") = findconcept(G("langs"),L("lang") );
if(L("iso")) {
L("lang") = strval(L("iso"),"language");
}
G("dictname") << "# " << L("lang") << " function words\n";
@@CODE
|
@NODES _contactZone _objectiveZone _educationZone _experienceZone _skillsZone _presentationsZone _publicationsZone _referencesZone _otherZone
@POST
splice(1,1); # Zap the REZZONE node!
@RULES
_xNIL <- _REZZONE [s] @@
|
# Sort concept's immediate children alphabetically.
sortchilds(L("con")); |
@DECL
###############################################
# General functions
###############################################
AddUniqueCon(L("concept"),L("name")) {
L("con") = findconcept(L("concept"),L("name"));
if (!L("con")) L("con") = makeconcept(L("concept"),L("name"));
return L("con");
}
AddUniqueStr(L("concept"),L("attr"),L("value")) {
if (L("value")) {
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("str") = getstrval(L("val"));
if (L("str") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addstrval(L("concept"),L("attr"),L("value"));
return 1;
}
return 0;
}
AddUniqueNum(L("concept"),L("attr"),L("value")) {
if (L("value")) {
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("num") = getnumval(L("val"));
if (L("num") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addnumval(L("concept"),L("attr"),L("value"));
return 1;
}
return 0;
}
AddUniqueConVal(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("con") = getconval(L("val"));
"unique.txt" << conceptname(L("con")) << "\n";
if (conceptpath(L("con")) == conceptpath(L("value")))
return 0;
L("val") = nextval(L("val"));
}
addconval(L("concept"),L("attr"),L("value"));
return 1;
}
PathToConcept(L("parent"),L("hier")) {
L("cons") = split(L("hier")," ");
L("i") = 0;
L("con") = L("parent");
while (L("cons")[L("i")]) {
L("c") = L("cons")[L("i")];
L("name") = strsubst(L("c"),"\"",0);
if (L("name") != "concept")
L("con") = AddUniqueCon(L("con"),L("name"));
L("i")++;
}
return L("con");
}
CopyAttr(L("from"),L("to"),L("attr")) {
L("from value") = strval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr"),L("from value"));
}
}
CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) {
L("from value") = strval(L("from"),L("attr from"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr to"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr to"),L("from value"));
}
}
CopyConAttr(L("from"),L("to"),L("attr")) {
L("from value") = conval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = conval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addconval(L("to"),L("attr"),L("from value"));
}
}
AttrValues(L("con"),L("attr")) {
L("at") = findattr(L("con"),L("attr"));
if (L("at"))
return attrvals(L("at"));
return 0;
}
ValCount(L("vals")) {
while (L("vals")) {
L("count")++;
L("vals") = nextval(L("vals"));
}
return L("count");
}
LastChild(L("parent")) {
L("child") = down(L("parent"));
while (L("child")) {
L("last") = L("child");
L("child") = next(L("child"));
}
return L("last");
}
MakeCountCon(L("con"),L("count name")) {
L("count name") = CountName(L("con"),L("count name"));
return makeconcept(L("con"),L("count name"));
}
IncrementCount(L("con"),L("countname")) {
L("count") = numval(L("con"),L("countname"));
if (L("count")) {
L("count") = L("count") + 1;
replaceval(L("con"),L("countname"),L("count"));
} else {
addnumval(L("con"),L("countname"),1);
L("count") = 1;
}
return L("count");
}
CountName(L("con"),L("root")) {
L("count") = IncrementCount(L("con"),L("root"));
return L("root") + str(L("count"));
}
StripEndDigits(L("name")) {
if (strisdigit(L("name"))) return 0;
L("len") = strlength(L("name")) - 1;
L("i") = L("len") - 1;
L("str") = strpiece(L("name"),L("i"),L("len"));
while (strisdigit(L("str")) && L("i")) {
L("i")--;
L("str") = strpiece(L("name"),L("i"),L("len"));
}
return strpiece(L("name"),0,L("i"));
}
###############################################
# KB Dump Functins
###############################################
DumpKB(L("con"),L("file")) {
L("dir") = G("$apppath") + "/kb/";
L("filename") = L("dir") + L("file") + ".kb";
if (!kbdumptree(L("con"),L("filename"))) {
"kb.txt" << "FAILED dump: " << L("filename") << "\n";
} else {
"kb.txt" << "DUMPED: " << L("filename") << "\n";
}
}
TakeKB(L("filename")) {
L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb";
"kb.txt" << "Taking: " << L("path") << "\n";
if (take(L("path"))) {
"kb.txt" << " Taken successfully: " << L("path") << "\n";
} else {
"kb.txt" << " Taken FAILED: " << L("path") << "\n";
}
}
ChildCount(L("con")) {
L("count") = 0;
L("child") = down(L("con"));
while (L("child")) {
L("count")++;
L("child") = next(L("child"));
}
return L("count");
}
###############################################
# KBB DISPLAY FUNCTIONS
###############################################
###############################################
# display type:
# 0 compact with ellipses on long attr values
# 1 full, more spread out
# 2 compact without ellipses on long attr values
###############################################
DisplayKB(L("top con"),L("display type")) {
L("file") = DisplayFileName();
DisplayKBRecurse(L("file"),L("top con"),0,L("display type"));
L("file") << "\n";
return L("top con");
}
KBHeader(L("text")) {
L("file") = DisplayFileName();
L("file") << "#######################\n";
L("file") << "# " << L("text") << "\n";
L("file") << "#######################\n\n";
}
DisplayFileName() {
if (num(G("$passnum")) < 10) {
L("file") = "ana00" + str(G("$passnum"));
}else if (num(G("$passnum")) < 100) {
L("file") = "ana0" + str(G("$passnum"));
} else {
L("file") = "ana" + str(G("$passnum"));
}
L("file") = L("file") + ".kbb";
return L("file");
}
DisplayKBRecurse(L("file"),L("parent"),L("level"),L("display type")) {
if (L("level") == 0) {
L("file") << conceptname(L("parent")) << "\n";
}
L("con") = down(L("parent"));
while (L("con")) {
L("file") << SpacesStr(L("level")+1) << conceptname(L("con"));
DisplayAttributes(L("file"),L("con"),L("display type"),L("level"));
L("file") << "\n";
if (down(L("con"))) {
L("lev") = 1;
DisplayKBRecurse(L("file"),L("con"),L("level")+L("lev"),L("display type"));
}
L("con") = next(L("con"));
}
}
DisplayAttributes(L("file"),L("con"),L("display type"),L("level")) {
L("attrs") = findattrs(L("con"));
if (L("attrs")) L("file") << ": ";
if (L("display type") == 1 && L("attrs")) L("file") << "\n";
L("first attr") = 1;
while (L("attrs")) {
L("vals") = attrvals(L("attrs"));
L("count") = ValCount(L("vals"));
if (L("display type") != 1 && !L("first attr")) {
L("file") << ", ";
}
if (L("display type") == 1) {
if (!L("first attr")) L("file") << "\n";
L("file") << SpacesStr(L("level")+2);
}
L("name") = attrname(L("attrs"));
L("file") << QuoteIfNeeded(L("name")) << "=";
L("first") = 1;
L("type") = attrtype(L("con"),L("name"));
while (L("vals")) {
if (!L("first"))
L("file") << ",";
else if (L("count") > 1)
L("file") << "[";
if (L("type") == 1) {
L("num") = getnumval(L("vals"));
L("file") << str(L("num"));
} else if (L("type") == 2) {
if (L("first"))
L("file") << "[";
L("con") = getconval(L("vals"));
L("file") << conceptpath(L("con"));
} else if (L("type") == 3) {
L("flt") = getfltval(L("vals"));
L("file") << str(L("flt"));
} else {
L("val") = getstrval(L("vals"));
if (L("display type") == 0 && strlength(L("val")) > 20) {
L("shorty") = strpiece(L("val"),0,20);
L("val") = L("shorty") + "...";
}
L("file") << QuoteIfNeeded(str(L("val")));
}
L("first") = 0;
L("vals") = nextval(L("vals"));
}
if (L("type") == 2 || L("count") > 1)
L("file") << "]";
L("first attr") = 0;
L("attrs") = nextattr(L("attrs"));
}
}
QuoteIfNeeded(L("str")) {
if (!L("str"))
return 0;
L("new") = L("str");
if (strcontains(" ",L("str")) || strcontains("[",L("str")) || strcontains("]",L("str")))
L("new") = "\"" + L("new") + "\"";
return L("new");
}
# Because NLP++ doesn't allow for empty strings,
# this function can only be called with "num" >= 1
SpacesStr(L("num")) {
L("n") = 1;
L("spaces") = " ";
while (L("n") < L("num")) {
L("spaces") = L("spaces") + " ";
L("n")++;
}
return L("spaces");
}
PadStr(L("num str"),L("pad str"),L("pad len")) {
L("len") = strlength(L("num str"));
L("pad") = 0;
L("to pad") = L("pad len") - L("len");
while (L("i")++ < L("to pad")) {
L("pad") = L("pad") + L("pad str");
}
L("padded") = L("pad") + L("num str");
return L("padded");
}
###############################################
# DICTIONARY FUNCTIONS
###############################################
DictionaryStart() {
G("attrs path") = G("$apppath") + "\\kb\\user\\attrs.kb";
G("attrs") = openfile(G("attrs path"));
}
DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) {
addword(L("word"));
addword(L("attrName"));
G("attrs") << "ind attr\n" << findwordpath(L("word")) << "\n0\n";
G("attrs") << findwordpath(L("attrName")) << "\n";
if (L("attrType") == "str")
G("attrs") << "pst\n" << "\"" << L("value") << "\"";
else if (L("attrType") == "num")
G("attrs") << "pnum\n" << str(L("value"));
else if (L("attrType") == "con")
G("attrs") << "pcon\n" << conceptpath(L("value"));
G("attrs") << "\nend ind\n\n";
}
DictionaryEnd() {
G("attrs") << "\nquit\n\n";
closefile(G("attrs"));
}
@@DECL
|
@NODES _split
@POST
L("code") = N("code", 1);
L("term") = N("term", 1);
if (strcontains(".", L("code"))) {
L("codes") = split(L("code"), ".");
L("code_2") = L("codes")[1];
if (strlength(L("code_2")) == 1) {
addEntryToHier(X("con"), L("code"), L("term"));
}
}
noop();
@RULES
_xNIL <-
_entry ### (1)
@@
|
@DECL
WordPOS(L("word")) {
L("word") = strtolower(L("word"));
if (
strendswith(L("word"),"genic") ||
strendswith(L("word"),"itic") ||
strendswith(L("word"),"nal")
) {
SavePOS(L("word"),"adj");
}
else if (
strendswith(L("word"),"itis") ||
strendswith(L("word"),"tions") ||
strendswith(L("word"),"tion")
) {
SavePOS(L("word"),"n");
}
}
SavePOS(L("word"),L("pos")) {
L("con") = getconcept(G("pos"),L("word"));
getconcept(L("con"),L("pos"));
}
@@DECL |
@POST
excise(1, 1)
@RULES
_xNIL <- \ @@
_xNIL <- \n @@
_xNIL <- \t @@
|
@NODES _LINE
@PRE
<1,1> cap()
@RULES
# Ex: Jul
_Month [] <-
_xWILD [min=1 max=1 s match=(Jul Jan February Feb March Mar April Apr May June Jun July January August Aug September Sept Sep October Oct November Nov December Dec)]
@@
@RULES
# Ex: 02
_Month [] <-
_xWILD [min=1 max=1 s match=(02 2 3 4 5 6 7 8 9 01 1 03 04 05 06 07 08 09 10 11 12)]
@@
|
@NODES _LINE
@RULES
_date <-
_xNUM ### (1)
\/ ### (2)
_xNUM ### (3)
@@
|
@CODE
fileout("zexp.txt");
prlit("zexp.txt", "\nCharacterizing General Cap Phrases\n");
prlit("zexp.txt", "----------------------------------\n");
@@CODE
@PATH _ROOT _experienceZone _experienceInstance _LINE
@POST
ndump("zexp.txt",1);
prlit("zexp.txt", "-------\n");
@RULES
_xNIL <- _Caps @@
|
Subsets and Splits