text
stringlengths 22
301k
|
---|
# Replace the values of an attribute. There are three variants of replaceval, one each for replacing the value of a concept concept's attribute named attrNameStr with either integer valueInt, string valueString or concept values valueConcept
@CODE
if(findconcept(findroot(),"apples"))
rmconcept(findconcept(findroot(),"apples"));
G("apples") = makeconcept(findroot(),"apples");
if(findconcept(findroot(),"color"))
rmconcept(findconcept(findroot(),"color"));
G("color") = makeconcept(findroot(),"color");
G("red") = makeconcept(G("color"),"red");
G("blue") = makeconcept(G("color"),"blue");
addnumval(G("apples"),"weight", 3);
addstrval(G("apples"),"name", "MacIntosh");
addconval(G("apples"),"color", G("red"));
"output.txt" << "apples weight = " << numval(G("apples"),"weight") << "\n";
"output.txt" << "apples name = " << strval(G("apples"),"name") << "\n";
"output.txt" << "apples color = " << conceptname(conval(G("apples"),"color")) << "\n";
"output.txt" << "Replacing apple's attr vals:\n";
replaceval(G("apples"),"weight", 4);
replaceval(G("apples"),"name", "Granny Smith");
replaceval(G("apples"),"color", G("blue"));
"output.txt" << "apples weight = " << numval(G("apples"),"weight") << "\n";
"output.txt" << "apples name = " << strval(G("apples"),"name") << "\n";
"output.txt" << "apples color = " << conceptname(conval(G("apples"),"color")) << "\n";
The code above should print out:
apples weight = 3
apples name = MacIntosh
apples color = red
Replacing apple's attr vals:
apples weight = 4
apples name = Granny Smith
apples color = blue |
@NODES _ROOT
@RULES
_BLANKLINE <-
\n ### (1)
\n ### (2)
@@
@RULES
_NEWLINE <-
\n ### (1)
@@
|
@NODES _NLPPP
@RULES
# NLP++ KEYWORDS. #
_IF [base] <- if [s] @@
_ELSE [base] <- else [s] @@
_WHILE [base] <- while [s] @@ # 08/31/00 AM.
_RETURN [base] <- return [s] @@ # 03/07/02 AM.
# @POST
# rfavar(1, 2)
# single()
# @RULES
# _VAR [layer=(_EXPR)] <- _xWILD [s one match=( s G N X P )]
# _LIST
# @@
# FUNCTION CALLS AND ACTIONS. #
# Note: argument list is not yet NLP++ expr. Still old style.
# Renaming ACTION to FNCALL as part of moving into NLP++.
# @POST
# rfaaction(0, 1, 2)
# single()
# @RULES
# _FNCALL [base layer=(_EXPR)] <- _LIT _LIST @@
# Binary ops.
@POST
movesem(1)
single()
@RULES
_OP <- _xWILD [s one match=( _opAND _opOR
_opEQ _opNEQ _opGE _opLE
_opCONF # 12/17/99 AM.
_opOUT # 12/31/99 AM.
)]
@@
|
@CODE
prlit("zdump.txt", "\nCharacterizing General Cap Phrases\n");
prlit("zdump.txt", "----------------------------------\n");
@@CODE
@PATH _ROOT _LINE
@POST
ndump("zdump.txt",1);
prlit("zdump.txt", "-------\n");
@RULES
_xNIL <- _Caps @@
|
@CODE
if (!G("unhandled periods"))
exitpass();
@@CODE
@NODES _TEXTZONE
@POST
listadd(5,6,"false");
@RULES
_xNIL <-
_qEOS
_xWHITE [star]
_xCAP [s]
_xWHITE [star]
_letabbr
\.
@@
# Rearguard.
@POST
++G("unhandled periods");
if (G("error"))
"err.txt" << "[Unhandled period]" << "\n";
L("n") = N(1);
L("prev") = pnprev(L("n"));
if (L("prev")) L("prev") = pnprev(L("prev"));
L("next") = pnnext(L("n"));
if (L("next")) L("next") = pnnext(L("next"));
if (L("next")) L("next") = pnnext(L("next"));
if (L("prev"))
L("s") = pnvar(L("prev"),"$ostart");
else
L("s") = N("$ostart");
if (L("next"))
L("e") = pnvar(L("next"),"$oend");
else
L("e") = N("$oend");
if (G("error"))
"err1.txt" << " " << inputrange(L("s"),L("e"));
@RULES
_xNIL <-
\. [plus]
@@
|
# Move a concept childConcept after next sibling (Moves the concept to the 'right' or 'lower' in the pecking order.)
@CODE
"output.txt" << "move\n";
G("alpha") = makeconcept(findroot(),"first");
G("beta") = makeconcept(findroot(),"second");
G("gamma") = makeconcept(findroot(),"third");
movecleft(G("gamma"));
movecright(G("alpha")); |
@CODE
if (!G("pretagged"))
exitpass();
@@CODE
@PATH _ROOT _LINE
# NON-special chars.
@POST
excise(1,1);
@RULES
_xNIL <-
\\
\/
@@
_xNIL <-
\\
\|
@@
# Else, special chars.
@POST
singlex(2,2);
@RULES
_slash <-
\/
_xWILD [plus fail=(_xWHITE)]
@@
@POST
excise(1,1);
@RULES
_xNIL <-
\[
@@
_xNIL <-
\]
@@
|
# Get each component directory of the current input file path
G("array") = unpackdirs(G("$input"));
"output.txt" << "file=" << G("$input") << "\n";
"output.txt" << "dirs=" << G("array") << "\n";
"output.txt" << "split=" << split(G("$input"),"\\") << "\n";
|
# Send output to user-supplied buffer
@CODE
# In VisualText, output to a file. Outside VisualText, output to user-supplied buffer.
if (interactive())
G("out") = "buf.txt";
else
G("out") = cbuf();
G("out") << "Hello output buffer!" << "\n";
@@CODE |
@NODES _ROOT
@POST
addstrval(N("con",3),"comment",N("text",1));
if (N(2))
addstrval(N("con",3),"comment",N("text",2));
@RULES
_xNIL <-
_comment ### (1)
_comment [opt] ### (2)
_pos ### (3)
@@
|
@NODES _ROOT
@RULES
_BLANKLINE <-
_xWILD [min=0 max=0 matches=(\ \t \r)] ### (1)
\n ### (2)
@@
@POST
S("num") = G("line")++;
single();
@RULES
_LINE <-
_xWILD [min=0 max=0 fails=(\r \n)] ### (1)
_xWILD [one match=(\n _xEND)] ### (2)
@@
|
@NODES _ROOT
@RULES
_paragraph <-
_xWILD [plus match=(_LINE)] ### (1)
@@
|
@NODES _LINE
# ex. English Major
@POST
singlex(1,1);
@RULES
_RealMajor <- _major
_xWHITE [s]
_xWILD [s one matches = ( major ) ]@@
@POST
singlex(3,3);
# ex. "major English"
@RULES
_RealMajor <- _xWILD [s one matches = ( majored major ) ]
_xWHITE [s]
_major @@
# ex. "major in English"
@POST
singlex(5,5);
@RULES
_RealMajor <- _xWILD [s one matches = ( major majored ) ]
_xWHITE [s]
_xWILD [ s one matches = ( in ) ]
_xWHITE [s]
_major @@
# ex. "majored: English
@POST
singlex(4,4);
@RULES
_RealMajor <- _xWILD [s one matches = ( major majored) ]
_xWILD [ s one matches = ( \: \- ) ]
_xWHITE [s opt]
_major @@
@POST
singlex(5,5);
@RULES
# ex. "major concentration English
_RealMajor <- _xWILD [s one matches = ( majored major) ]
_xWHITE [s]
concentration [s]
_xWHITE [s]
_major @@
@POST
singlex(7,7);
@RULES
# ex. "major concentration in English
_RealMajor <- _xWILD [s one matches = ( major majored) ]
_xWHITE [s]
concentration [s]
_xWHITE [s]
in [s]
_xWHITE [s]
_major @@
|
@NODES _ROOT
@PRE
<3,3> var("lang");
<5,5> var("ps");
@RULES
_lang <-
\{ ### (1)
\{ ### (2)
_xALPHA ### (3)
\- ### (4)
_xALPHA ### (5)
_xWILD [fail=(\})] ### (6)
\} ### (7)
\} ### (8)
@@
@RULES
_curly <-
\{ ### (1)
\{ ### (2)
_xWILD [fail=(\})] ### (3)
\} ### (4)
\} ### (5)
@@
@POST excise(1,2); @RULES _xNIL <- \[ \[ @@
@POST excise(1,2); @RULES _xNIL <- \] \] @@
@POST excise(1,2); @RULES _xNIL <- \{ \{ @@
@POST excise(1,2); @RULES _xNIL <- \} \} @@
@POST excise(1,2); @RULES _xNIL <- \( \( @@
@POST excise(1,2); @RULES _xNIL <- \) \) @@
@POST excise(1,2); @RULES _xNIL <- \< \< @@
@POST excise(1,2); @RULES _xNIL <- \> \> @@
@POST excise(1,2); @RULES _xNIL <- \> \> @@
@POST excise(1,3); @RULES _xNIL <- \' \' \' [opt] @@
|
@CODE
prlit("output.txt","\n");
prlit("output.txt","EDUCATION:\n\n");
@@CODE
@PATH _ROOT _educationZone
# Not sure what to require yet. #
# @CHECK
# Require the instance to have a daterange.
# N("date range",1);
@POST
if (N("date"))
{
"output.txt"
<< LJ("Date:",G("indent"))
<< N("date",1)
<< "\n";
}
if (N("school"))
{
"output.txt"
<< LJ("School Name:",G("indent"))
<< N("school",1)
<< "\n";
}
# DOING STRING CATENATION!!
if (!N("school location"))
{
if (N("city") && N("state"))
N("school location") = N("city") + ", " + N("state");
else if (N("city") && N("country"))
N("school location") = N("city") + ", " + N("country");
else if (N("city"))
N("school location") = N("city");
else if (N("state"))
N("school location") = N("state");
else if (N("country"))
N("school location") = N("country");
}
if (N("school location"))
{
"output.txt"
<< LJ("School Location:",G("indent"))
<< N("school location",1)
<< "\n";
}
if (N("degree"))
{
"output.txt"
<< LJ("Degree:",G("indent"))
<< N("degree",1)
<< "\n";
}
if (N("major"))
{
"output.txt"
<< LJ("Major:",G("indent"))
<< N("major",1)
<< "\n";
}
if (N("minor"))
{
"output.txt"
<< LJ("Minor:",G("indent"))
<< N("minor",1)
<< "\n";
}
if (N("grade"))
{
"output.txt"
<< LJ("GPA:",G("indent"))
<< N("grade",1)
<< "\n";
}
if (N("major grade"))
"output.txt" << "Major GPA: " << N("major grade") << "\n";
if (N("minor grade"))
"output.txt" << "Minor GPA: " << N("minor grade") << "\n";
if (N("max grade"))
"output.txt" << "Max GPA: " << N("max grade") << "\n";
prlit("output.txt","\n");
@RULES
_xNIL <- _educationInstance @@
|
@NODES _LINE
@RULES
# Ex: what
_posDET [layer=(_funWORD )] <- _xWILD [min=1 max=1 s match=("what" "an" "the" "my" "our" "your" "his" "her" "its"
"their" "no" "whose" "which" "whichever" "a" "whatever" "some" "any" "enough"
"this" "that" "these" "those" "every" "each" "either" "neither" "much")] @@
|
@PATH _ROOT _posZone _partofspeech _headerZone
@POST
addstrval(X("con",2),"pos",N("$text",3));
@RULES
_xNIL <-
_xSTART ### (1)
_LINE ### (2)
_LINE ### (3)
@@
|
@PATH _ROOT _pronunciations _headerZone _LINE
@POST
addstrval(X("pronunciation",2),"phonemic",N("$text",3));
"debug.txt" << N("$text",1) << "\n";
@RULES
_xNIL <-
_phonetic ### (1)
_xWILD [fail=(_phonemic)] ### (2)
_phonemic ### (3)
@@
|
@CODE
if (G("$islastfile")) {
KBDump();
DisplayKB(G("stats"),1);
DisplayKB(G("words"),1);
G("debug") << "Conjugations: " << str(G("conju")) << "\n";
}
closefile(G("debug"));
@@CODE |
@CODE
G("filepath") = G("$kbpath") + "\\" + "en-surnames.dict";
L("type") = "app";
if (!G("$isdirrun") || G("$isfirstfile"))
G("file") = openfile(G("filepath"));
else
G("file") = openfile(G("filepath"),"app");
@@CODE |
@CODE
if (!G("pretagged"))
exitpass();
posacctdump();
if (G("mismatch out"))
closefile(G("mismatch out"));
if (G("zero out"))
closefile(G("zero out"));
# Get current doc score.
L("currgood") = numval(G("scorepos"),"currgood");
L("currtot") = numval(G("scorepos"),"currtot");
if (!L("currtot"))
{
"err.txt" << "Currtot is zero." << "\n";
exitpass();
}
# Accumulate overall scores.
L("allgood") = numval(G("scorepos"),"allgood") + L("currgood");
L("alltot") = numval(G("scorepos"),"alltot") + L("currtot");
# Printout scores.
L("fname") = G("$apppath") + "\\data\\score.txt";
L("out") = openfile(L("fname"),"app");
L("out") << G("$inputhead") << " "
<< rightjustifynum(L("currgood"),4)
<< "/"
<< rightjustifynum(L("currtot"),4)
<< " = "
<< rightjustifynum(100 * L("currgood") / L("currtot"),3)
<< " "
<< rightjustifynum(L("allgood"),6)
<< "/"
<< rightjustifynum(L("alltot"),6)
<< " = "
<< rightjustifynum(100 * L("allgood") / L("alltot"),3)
;
if (G("score date"))
L("out") << "\t[" << today() << "]";
L("out") << "\n";
# Update overall scores.
replaceval(G("scorepos"),"allgood",L("allgood"));
replaceval(G("scorepos"),"alltot",L("alltot"));
@@CODE
|
@DECL
###############################################
# General functions
###############################################
AddUniqueCon(L("concept"),L("name")) {
L("con") = findconcept(L("concept"),L("name"));
if (!L("con")) L("con") = makeconcept(L("concept"),L("name"));
return L("con");
}
AddUniqueStr(L("concept"),L("attr"),L("value")) {
if (L("value") && strval(L("concept"),L("attr")) != L("value"))
addstrval(L("concept"),L("attr"),L("value"));
}
AddUniqueNum(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << str(L("value")) << " " << conceptpath(L("concept")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("num") = getnumval(L("val"));
"unique.txt" << " value: " << str(L("num")) << "\n";
if (L("num") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addnumval(L("concept"),L("attr"),L("value"));
return 1;
}
AddUniqueConVal(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("con") = getconval(L("val"));
"unique.txt" << conceptname(L("con")) << "\n";
if (conceptpath(L("con")) == conceptpath(L("value")))
return 0;
L("val") = nextval(L("val"));
}
addconval(L("concept"),L("attr"),L("value"));
return 1;
}
CopyAttr(L("from"),L("to"),L("attr")) {
L("from value") = strval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr"),L("from value"));
}
}
CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) {
L("from value") = strval(L("from"),L("attr from"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr to"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr to"),L("from value"));
}
}
CopyConAttr(L("from"),L("to"),L("attr")) {
L("from value") = conval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = conval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addconval(L("to"),L("attr"),L("from value"));
}
}
AttrValues(L("con"),L("attr")) {
L("at") = findattr(L("con"),L("attr"));
if (L("at"))
return attrvals(L("at"));
return 0;
}
LastChild(L("parent")) {
L("child") = down(L("parent"));
while (L("child")) {
L("last") = L("child");
L("child") = next(L("child"));
}
return L("last");
}
MakeCountCon(L("con"),L("count name")) {
L("count name") = CountName(L("con"),L("count name"));
return makeconcept(L("con"),L("count name"));
}
IncrementCount(L("con"),L("countname")) {
L("count") = numval(L("con"),L("countname"));
if (L("count")) {
L("count") = L("count") + 1;
replaceval(L("con"),L("countname"),L("count"));
} else {
addnumval(L("con"),L("countname"),1);
L("count") = 1;
}
return L("count");
}
CountName(L("con"),L("root")) {
L("count") = IncrementCount(L("con"),L("root"));
return L("root") + str(L("count"));
}
StripEndDigits(L("name")) {
if (strisdigit(L("name"))) return 0;
L("len") = strlength(L("name")) - 1;
L("i") = L("len") - 1;
L("str") = strpiece(L("name"),L("i"),L("len"));
while (strisdigit(L("str")) && L("i")) {
L("i")--;
L("str") = strpiece(L("name"),L("i"),L("len"));
}
return strpiece(L("name"),0,L("i"));
}
###############################################
# KB Dump Functins
###############################################
DumpKB(L("con"),L("file")) {
L("dir") = G("$apppath") + "kb\\";
L("filename") = L("dir") + L("file") + ".kb";
if (!kbdumptree(L("con"),L("filename"))) {
"kb.txt" << "FAILED dump: " << L("filename") << "\n";
} else {
"kb.txt" << "DUMPED: " << L("filename") << "\n";
}
}
TakeKB(L("filename")) {
L("path") = G("$apppath") + "kb\\" + L("filename") + ".kb";
"kb.txt" << "Taking: " << L("path") << "\n";
if (take(L("path"))) {
"kb.txt" << " Taken successfully: " << L("path") << "\n";
} else {
"kb.txt" << " Taken FAILED: " << L("path") << "\n";
}
}
ChildCount(L("con")) {
L("count") = 0;
L("child") = down(L("con"));
while (L("child")) {
L("count")++;
L("child") = next(L("child"));
}
return L("count");
}
###############################################
# KBB DISPLAY FUNCTIONS
###############################################
DisplayKB(L("top con"),L("full")) {
L("file") = DisplayFileName();
DisplayKBRecurse(L("file"),L("top con"),0,L("full"));
L("file") << "\n";
return L("top con");
}
KBHeader(L("text")) {
L("file") = DisplayFileName();
L("file") << "#######################\n";
L("file") << "# " << L("text") << "\n";
L("file") << "#######################\n\n";
}
DisplayFileName() {
if (num(G("$passnum")) < 10) {
L("file") = "ana00" + str(G("$passnum"));
}else if (num(G("$passnum")) < 100) {
L("file") = "ana0" + str(G("$passnum"));
} else {
L("file") = "ana" + str(G("$passnum"));
}
L("file") = L("file") + ".kbb";
return L("file");
}
DisplayKBRecurse(L("file"),L("con"),L("level"),L("full")) {
while (L("con")) {
L("file") << SpacesStr(L("level")+1) << conceptname(L("con"));
DisplayAttributes(L("file"),L("con"),L("full"),L("level"));
L("file") << "\n";
if (down(L("con"))) {
L("lev") = 1;
DisplayKBRecurse(L("file"),down(L("con")),L("level")+L("lev"),L("full"));
}
if (L("level") == 0)
return 0;
L("con") = next(L("con"));
}
}
DisplayAttributes(L("file"),L("con"),L("full"),L("level")) {
L("attrs") = findattrs(L("con"));
if (L("attrs")) L("file") << ": ";
if (L("full") && L("attrs")) L("file") << "\n";
L("first attr") = 1;
while (L("attrs")) {
L("vals") = attrvals(L("attrs"));
if (!L("full") && !L("first attr")) {
L("file") << ", ";
}
if (L("full")) {
if (!L("first attr")) L("file") << "\n";
L("file") << SpacesStr(L("level")+2);
}
L("file") << attrname(L("attrs")) << "=[";
L("first") = 1;
while (L("vals")) {
if (!L("first"))
L("file") << ",";
L("val") = getstrval(L("vals"));
L("num") = getnumval(L("vals"));
L("con") = getconval(L("vals"));
if (L("con")) {
L("file") << conceptpath(L("con"));
} else if (!L("full") && strlength(L("val")) > 20) {
L("shorty") = strpiece(L("val"),0,20);
L("file") << L("shorty");
L("file") << "...";
if (strendswith(L("val"),"\""))
L("file") << "\"";
} else if (L("num") > -1) {
L("file") << str(L("num"));
} else {
L("file") << L("val");
}
L("first") = 0;
L("vals") = nextval(L("vals"));
}
L("file") << "]";
L("first attr") = 0;
L("attrs") = nextattr(L("attrs"));
}
}
# Because NLP++ doesn't allow for empty strings,
# this function can only be called with "num" >= 1
SpacesStr(L("num")) {
L("n") = 1;
L("spaces") = " ";
while (L("n") < L("num")) {
L("spaces") = L("spaces") + " ";
L("n")++;
}
return L("spaces");
}
###############################################
# DICTIONARY FUNCTIONS
###############################################
DictionaryClear() {
G("dictionary path") = G("$apppath") + "\\kb\\user\\dictionary.kb";
G("dictionary") = openfile(G("dictionary path"));
}
DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) {
L("file") = G("dictionary");
if (!dictfindword(L("word")))
L("file") << "add word \"" + L("word") + "\"\n";
L("file") << "ind attr\n" << findwordpath(L("word")) << "\n0\n";
L("file") << findwordpath(L("attrName")) << "\n";
if (L("attrType") == "str")
L("file") << "pst\n" << L("value");
else if (L("attrType") == "num")
L("file") << "pnum\n" << str(L("value"));
else if (L("attrType") == "con")
L("file") << "pcon\n" << conceptpath(L("value"));
L("file") << "\nend ind\n\n";
}
DictionaryEnd() {
G("dictionary") << "\nquit\n\n";
closefile(G("dictionary"));
}
@@DECL
|
@NODES _LINE
@RULES
_conj <-
_xWILD [one match=(\,)]
@@
|
@DECL
###############################################
# General functions
###############################################
AddUniqueCon(L("concept"),L("name")) {
L("con") = findconcept(L("concept"),L("name"));
if (!L("con")) L("con") = makeconcept(L("concept"),L("name"));
return L("con");
}
AddUniqueStr(L("concept"),L("attr"),L("value")) {
if (L("value") && strval(L("concept"),L("attr")) != L("value"))
addstrval(L("concept"),L("attr"),L("value"));
}
AddUniqueNum(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << str(L("value")) << " " << conceptpath(L("concept")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("num") = getnumval(L("val"));
"unique.txt" << " value: " << str(L("num")) << "\n";
if (L("num") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addnumval(L("concept"),L("attr"),L("value"));
return 1;
}
AddUniqueConVal(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("con") = getconval(L("val"));
"unique.txt" << conceptname(L("con")) << "\n";
if (conceptpath(L("con")) == conceptpath(L("value")))
return 0;
L("val") = nextval(L("val"));
}
addconval(L("concept"),L("attr"),L("value"));
return 1;
}
CopyAttr(L("from"),L("to"),L("attr")) {
L("from value") = strval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr"),L("from value"));
}
}
CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) {
L("from value") = strval(L("from"),L("attr from"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr to"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr to"),L("from value"));
}
}
CopyConAttr(L("from"),L("to"),L("attr")) {
L("from value") = conval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = conval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addconval(L("to"),L("attr"),L("from value"));
}
}
AttrValues(L("con"),L("attr")) {
L("at") = findattr(L("con"),L("attr"));
if (L("at"))
return attrvals(L("at"));
return 0;
}
LastChild(L("parent")) {
L("child") = down(L("parent"));
while (L("child")) {
L("last") = L("child");
L("child") = next(L("child"));
}
return L("last");
}
MakeCountCon(L("con"),L("count name")) {
L("count name") = CountName(L("con"),L("count name"));
return makeconcept(L("con"),L("count name"));
}
IncrementCount(L("con"),L("countname")) {
L("count") = numval(L("con"),L("countname"));
if (L("count")) {
L("count") = L("count") + 1;
replaceval(L("con"),L("countname"),L("count"));
} else {
addnumval(L("con"),L("countname"),1);
L("count") = 1;
}
return L("count");
}
CountName(L("con"),L("root")) {
L("count") = IncrementCount(L("con"),L("root"));
return L("root") + str(L("count"));
}
StripEndDigits(L("name")) {
if (strisdigit(L("name"))) return 0;
L("len") = strlength(L("name")) - 1;
L("i") = L("len") - 1;
L("str") = strpiece(L("name"),L("i"),L("len"));
while (strisdigit(L("str")) && L("i")) {
L("i")--;
L("str") = strpiece(L("name"),L("i"),L("len"));
}
return strpiece(L("name"),0,L("i"));
}
###############################################
# KB Dump Functins
###############################################
DumpKB(L("con"),L("file")) {
L("dir") = G("$apppath") + "kb\\";
L("filename") = L("dir") + L("file") + ".kb";
if (!kbdumptree(L("con"),L("filename"))) {
"kb.txt" << "FAILED dump: " << L("filename") << "\n";
} else {
"kb.txt" << "DUMPED: " << L("filename") << "\n";
}
}
TakeKB(L("filename")) {
L("path") = G("$apppath") + "\\kb\\" + L("filename") + ".kb";
"kb.txt" << "Taking: " << L("path") << "\n";
if (take(L("path"))) {
"kb.txt" << " Taken successfully: " << L("path") << "\n";
} else {
"kb.txt" << " Taken FAILED: " << L("path") << "\n";
}
}
ChildCount(L("con")) {
L("count") = 0;
L("child") = down(L("con"));
while (L("child")) {
L("count")++;
L("child") = next(L("child"));
}
return L("count");
}
###############################################
# KBB DISPLAY FUNCTIONS
###############################################
DisplayKB(L("top con"),L("full")) {
L("file") = DisplayFileName();
DisplayKBRecurse(L("file"),L("top con"),0,L("full"));
L("file") << "\n";
return L("top con");
}
KBHeader(L("text")) {
L("file") = DisplayFileName();
L("file") << "#######################\n";
L("file") << "# " << L("text") << "\n";
L("file") << "#######################\n\n";
}
DisplayFileName() {
if (num(G("$passnum")) < 10) {
L("file") = "ana00" + str(G("$passnum"));
}else if (num(G("$passnum")) < 100) {
L("file") = "ana0" + str(G("$passnum"));
} else {
L("file") = "ana" + str(G("$passnum"));
}
L("file") = L("file") + ".kbb";
return L("file");
}
DisplayKBRecurse(L("file"),L("con"),L("level"),L("full")) {
while (L("con")) {
L("file") << SpacesStr(L("level")+1) << conceptname(L("con"));
DisplayAttributes(L("file"),L("con"),L("full"),L("level"));
L("file") << "\n";
if (down(L("con"))) {
L("lev") = 1;
DisplayKBRecurse(L("file"),down(L("con")),L("level")+L("lev"),L("full"));
}
if (L("level") == 0)
return 0;
L("con") = next(L("con"));
}
}
DisplayAttributes(L("file"),L("con"),L("full"),L("level")) {
L("attrs") = findattrs(L("con"));
if (L("attrs")) L("file") << ": ";
if (L("full") && L("attrs")) L("file") << "\n";
L("first attr") = 1;
while (L("attrs")) {
L("vals") = attrvals(L("attrs"));
if (!L("full") && !L("first attr")) {
L("file") << ", ";
}
if (L("full")) {
if (!L("first attr")) L("file") << "\n";
L("file") << SpacesStr(L("level")+2);
}
L("file") << attrname(L("attrs")) << "=[";
L("first") = 1;
while (L("vals")) {
if (!L("first"))
L("file") << ",";
L("val") = getstrval(L("vals"));
L("num") = getnumval(L("vals"));
L("con") = getconval(L("vals"));
if (L("con")) {
L("file") << conceptpath(L("con"));
} else if (!L("full") && strlength(L("val")) > 20) {
L("shorty") = strpiece(L("val"),0,20);
L("file") << L("shorty");
L("file") << "...";
if (strendswith(L("val"),"\""))
L("file") << "\"";
} else if (L("num") > -1) {
L("file") << str(L("num"));
} else {
L("file") << L("val");
}
L("first") = 0;
L("vals") = nextval(L("vals"));
}
L("file") << "]";
L("first attr") = 0;
L("attrs") = nextattr(L("attrs"));
}
}
# Because NLP++ doesn't allow for empty strings,
# this function can only be called with "num" >= 1
SpacesStr(L("num")) {
L("n") = 1;
L("spaces") = " ";
while (L("n") < L("num")) {
L("spaces") = L("spaces") + " ";
L("n")++;
}
return L("spaces");
}
###############################################
# DICTIONARY FUNCTIONS
###############################################
DictionaryClear() {
G("dictionary path") = G("$apppath") + "\\kb\\user\\dictionary.kb";
G("dictionary") = openfile(G("dictionary path"));
}
DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) {
L("file") = G("dictionary");
if (!dictfindword(L("word")))
L("file") << "add word \"" + L("word") + "\"\n";
L("file") << "ind attr\n" << findwordpath(L("word")) << "\n0\n";
L("file") << findwordpath(L("attrName")) << "\n";
if (L("attrType") == "str")
L("file") << "pst\n" << L("value");
else if (L("attrType") == "num")
L("file") << "pnum\n" << str(L("value"));
else if (L("attrType") == "con")
L("file") << "pcon\n" << conceptpath(L("value"));
L("file") << "\nend ind\n\n";
}
DictionaryEnd() {
G("dictionary") << "\nquit\n\n";
closefile(G("dictionary"));
}
@@DECL
|
@NODES _ROOT
@POST
excise(1,3)
@RULES
_xNIL <-
_xNUM
,
_xWILD [opt matches=(\")]
@@
|
# Fetch the first node in phrase.
L("return_con") = firstnode(L("phrase")); |
@DECL
###############################################
# General functions
###############################################
AddUniqueCon(L("concept"),L("name")) {
L("con") = findconcept(L("concept"),L("name"));
if (!L("con")) L("con") = makeconcept(L("concept"),L("name"));
return L("con");
}
AddUniqueStr(L("concept"),L("attr"),L("value")) {
if (L("value") && strval(L("concept"),L("attr")) != L("value"))
addstrval(L("concept"),L("attr"),L("value"));
}
AddUniqueNum(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << str(L("value")) << " " << conceptpath(L("concept")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("num") = getnumval(L("val"));
"unique.txt" << " value: " << str(L("num")) << "\n";
if (L("num") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addnumval(L("concept"),L("attr"),L("value"));
return 1;
}
AddUniqueConVal(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("con") = getconval(L("val"));
"unique.txt" << conceptname(L("con")) << "\n";
if (conceptpath(L("con")) == conceptpath(L("value")))
return 0;
L("val") = nextval(L("val"));
}
addconval(L("concept"),L("attr"),L("value"));
return 1;
}
CopyAttr(L("from"),L("to"),L("attr")) {
L("from value") = strval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr"),L("from value"));
}
}
CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) {
L("from value") = strval(L("from"),L("attr from"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr to"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr to"),L("from value"));
}
}
CopyConAttr(L("from"),L("to"),L("attr")) {
L("from value") = conval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = conval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addconval(L("to"),L("attr"),L("from value"));
}
}
AttrValues(L("con"),L("attr")) {
L("at") = findattr(L("con"),L("attr"));
if (L("at"))
return attrvals(L("at"));
return 0;
}
LastChild(L("parent")) {
L("child") = down(L("parent"));
while (L("child")) {
L("last") = L("child");
L("child") = next(L("child"));
}
return L("last");
}
MakeCountCon(L("con"),L("count name")) {
L("count name") = CountName(L("con"),L("count name"));
return makeconcept(L("con"),L("count name"));
}
IncrementCount(L("con"),L("countname")) {
L("count") = numval(L("con"),L("countname"));
if (L("count")) {
L("count") = L("count") + 1;
replaceval(L("con"),L("countname"),L("count"));
} else {
addnumval(L("con"),L("countname"),1);
L("count") = 1;
}
return L("count");
}
CountName(L("con"),L("root")) {
L("count") = IncrementCount(L("con"),L("root"));
return L("root") + str(L("count"));
}
StripEndDigits(L("name")) {
if (strisdigit(L("name"))) return 0;
L("len") = strlength(L("name")) - 1;
L("i") = L("len") - 1;
L("str") = strpiece(L("name"),L("i"),L("len"));
while (strisdigit(L("str")) && L("i")) {
L("i")--;
L("str") = strpiece(L("name"),L("i"),L("len"));
}
return strpiece(L("name"),0,L("i"));
}
###############################################
# KB Dump Functins
###############################################
DumpKB(L("con"),L("file")) {
L("dir") = G("$apppath") + "/kb/";
L("filename") = L("dir") + L("file") + ".kb";
if (!kbdumptree(L("con"),L("filename"))) {
"kb.txt" << "FAILED dump: " << L("filename") << "\n";
} else {
"kb.txt" << "DUMPED: " << L("filename") << "\n";
}
}
TakeKB(L("filename")) {
L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb";
"kb.txt" << "Taking: " << L("path") << "\n";
if (take(L("path"))) {
"kb.txt" << " Taken successfully: " << L("path") << "\n";
} else {
"kb.txt" << " Taken FAILED: " << L("path") << "\n";
}
}
ChildCount(L("con")) {
L("count") = 0;
L("child") = down(L("con"));
while (L("child")) {
L("count")++;
L("child") = next(L("child"));
}
return L("count");
}
###############################################
# KBB DISPLAY FUNCTIONS
###############################################
DisplayKB(L("top con"),L("full")) {
L("file") = DisplayFileName();
DisplayKBRecurse(L("file"),L("top con"),0,L("full"));
L("file") << "\n";
return L("top con");
}
KBHeader(L("text")) {
L("file") = DisplayFileName();
L("file") << "#######################\n";
L("file") << "# " << L("text") << "\n";
L("file") << "#######################\n\n";
}
DisplayFileName() {
if (num(G("$passnum")) < 10) {
L("file") = "ana00" + str(G("$passnum"));
}else if (num(G("$passnum")) < 100) {
L("file") = "ana0" + str(G("$passnum"));
} else {
L("file") = "ana0" + str(G("$passnum"));
}
L("file") = L("file") + ".kbb";
return L("file");
}
DisplayKBRecurse(L("file"),L("top"),L("level"),L("full")) {
if (L("level") == 0) {
L("file") << conceptname(L("top")) << "\n";
}
L("con") = down(L("top"));
while (L("con")) {
L("file") << SpacesStr(L("level")+1) << conceptname(L("con"));
DisplayAttributes(L("file"),L("con"),L("full"),L("level"));
L("file") << "\n";
if (down(L("con"))) {
L("lev") = 1;
DisplayKBRecurse(L("file"),L("con"),L("level")+L("lev"),L("full"));
}
L("con") = next(L("con"));
}
}
DisplayAttributes(L("file"),L("con"),L("full"),L("level")) {
L("attrs") = findattrs(L("con"));
if (L("attrs")) L("file") << ": ";
if (L("full") && L("attrs")) L("file") << "\n";
L("first attr") = 1;
while (L("attrs")) {
L("vals") = attrvals(L("attrs"));
if (!L("full") && !L("first attr")) {
L("file") << ", ";
}
if (L("full")) {
if (!L("first attr")) L("file") << "\n";
L("file") << SpacesStr(L("level")+2);
}
L("file") << attrname(L("attrs")) << "=[";
L("first") = 1;
while (L("vals")) {
if (!L("first"))
L("file") << ",";
L("val") = getstrval(L("vals"));
L("num") = getnumval(L("vals"));
L("con") = getconval(L("vals"));
if (L("con")) {
L("file") << conceptpath(L("con"));
} else if (!L("full") && strlength(L("val")) > 20) {
L("shorty") = strpiece(L("val"),0,20);
L("file") << L("shorty");
L("file") << "...";
if (strendswith(L("val"),"\""))
L("file") << "\"";
} else if (L("num") > -1) {
L("file") << str(L("num"));
} else {
L("file") << L("val");
}
L("first") = 0;
L("vals") = nextval(L("vals"));
}
L("file") << "]";
L("first attr") = 0;
L("attrs") = nextattr(L("attrs"));
}
}
# Because NLP++ doesn't allow for empty strings,
# this function can only be called with "num" >= 1
SpacesStr(L("num")) {
L("n") = 1;
L("spaces") = " ";
while (L("n") < L("num")) {
L("spaces") = L("spaces") + " ";
L("n")++;
}
return L("spaces");
}
###############################################
# DICTIONARY FUNCTIONS
###############################################
DictionaryClear() {
G("dictionary path") = G("$apppath") + "\\kb\\user\\dictionary.kb";
G("dictionary") = openfile(G("dictionary path"));
}
DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) {
L("file") = G("dictionary");
if (!dictfindword(L("word")))
L("file") << "add word \"" + L("word") + "\"\n";
L("file") << "ind attr\n" << findwordpath(L("word")) << "\n0\n";
L("file") << findwordpath(L("attrName")) << "\n";
if (L("attrType") == "str")
L("file") << "pst\n" << L("value");
else if (L("attrType") == "num")
L("file") << "pnum\n" << str(L("value"));
else if (L("attrType") == "con")
L("file") << "pcon\n" << conceptpath(L("value"));
L("file") << "\nend ind\n\n";
}
DictionaryEnd() {
G("dictionary") << "\nquit\n\n";
closefile(G("dictionary"));
}
@@DECL |
@PATH _ROOT _AttlistDecl
@RULES
_DefaultDecl <-
\# [one] ### (1)
_xWILD [s one matches=("REQUIRED" "IMPLIED")] ### (2)
@@
_DefaultDecl <-
\# [one] ### (1)
_xWILD [s one matches=("FIXED")] ### (2)
_whiteSpace [opt] ### (3)
_PERefence [one] ### (4)
@@
_DefaultDecl <-
\# [one] ### (1)
_xWILD [s one matches=("FIXED")] ### (2)
_whiteSpace [opt] ### (3)
_PubidLiteral [one] ### (4)
@@
_DefaultDecl <-
\# [one] ### (1)
_xWILD [s one matches=("FIXED")] ### (2)
_whiteSpace [opt] ### (3)
_SystemLiteral [one] ### (4)
@@
_DefaultDecl <-
_PubidLiteral [one] ### (1)
@@
_DefaultDecl <-
_SystemLiteral [one] ### (1)
@@
_AttType <-
_xWILD [s one matches=("CDATA" "ID" "IDREF" "IDREFS" "ENTITY" "ENTITIES" "NMTOKEN" "NMTOKENS")] ### (1)
@@
_EnumNameElement <-
_whiteSpace [opt] ### (1)
\| [one] ### (2)
_whiteSpace [opt] ### (3)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (4)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (5)
@@
_EnumElement <-
_whiteSpace [opt] ### (1)
\| [one] ### (2)
_whiteSpace [opt] ### (3)
_xWILD [s plus matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (4)
@@
|
@CODE
DictionaryEnd();
@@CODE |
###############################################
# FILE: XML SubSchema.pat #
# SUBJ: Put together the major blocks of an #
# XML Document #
# AUTH: Paul Deane #
# CREATED: 14/Jan/01
# DATE OF THIS VERSION: 31/Aug/01 #
# Copyright
###############################################
@NODES _ROOT
@RULES
_Prolog [unsealed] <-
_declSep [opt]
_XMLDecl [opt] ### (1)
_xWILD [star matches=("_declSep" "_Comment" "ProcessingInstruction" "_whiteSpace")] ### (2)
_doctypedecl [one] ### (3)
_xWILD [star matches=("_declSep" "_Comment" "ProcessingInstruction" "_whiteSpace")] ### (4)
@@
_Prolog [unsealed] <-
_declSep [opt]
_XMLDecl [one] ### (1)
_xWILD [star matches=("_declSep" "_Comment" "ProcessingInstruction" "_whiteSpace")] ### (2)
_doctypedecl [opt] ### (3)
_xWILD [star matches=("_declSep" "_Comment" "ProcessingInstruction" "_whiteSpace")] ### (4)
@@
@@RULES
@RULES
_Misc <-
_xWILD [plus matches=("_declSep" "_Comment" "ProcessingInstruction" "_whiteSpace")] ### (1)
@@
@@RULES
|
@NODES _LINE
@POST
S("two") = strtolower(N("$text",1));
S("three") = strtolower(N("$text",2));
S("num") = strtolower(N("$text",3));
single();
@RULES
_codes <-
_xALPHA ### (1)
_xALPHA ### (2)
_xNUM ### (3)
_xEND ### (4)
@@
|
@MULTI _ROOT
@POST
excise(1,1);
noop();
@RULES
_xNIL <-
_xWHITE [s] ### (1)
@@
|
@NODES _LINE
@CHECK
"debug.txt" << "Checking on " << N("$text", 1) << "\n";
if (IsPOS(N(1), "prep") && N("$text", 2)) {
succeed();
}
fail();
@@CHECK
@POST
# Add prepositional object as attribute
L("text") = N("$text",2);
addstrval(X("con"), "prep_phrase", L("text"));
# Add noun/adjs/conjs before preposition
L("prev") = pnprev(N(1));
L("check_prev") = 1;
L("obj");
while (L("check_prev") && L("prev")){
# Check if it's a noun, adjective, or conjunction
# E.G. [Malignant neoplasm] of splenic fixture
L("is_adj_noun_conj") = IsPOS(L("prev"), "noun");
L("is_adj_noun_conj") = L("is_adj_noun_conj") + IsPOS(L("prev"), "adj");
L("is_adj_noun_conj") = L("is_adj_noun_conj") + IsPOS(L("prev"), "conj");
if (L("is_adj_noun_conj") > 0) {
if (L("obj")) {
if (!(strtolower(pnname(L("prev"))) == "other" || strtolower(pnname(L("prev"))) == "unspecified")) {
L("obj") = pnname(L("prev")) + " " + L("obj");
}
}
else {
L("obj") = pnname(L("prev"));
}
}
# Word is not adj/noun/conj, break.
else {
L("check_prev") = 0;
}
L("prev") = pnprev(L("prev"));
}
if (L("obj")) {
addstrval(X("con"), "obj", L("obj"));
}
noop();
@@POST
@RULES
_xNIL <-
_xALPHA [one]
_xWILD [fails=(_xEND _xPUNCT)]
@@ |
@NODES _LINE
@RULES
_major <- ADULT _xWHITE EDUCATION @@
_major <- AERONAUTICAL _xWHITE ENGINEERING @@
_major <- AEROSPACE _xWHITE ENGINEERING @@
_major <- AFRICAN _xWHITE STUDIES @@
_major <- AFRO-AMERICAN _xWHITE STUDIES @@
_major <- AGRICULTURAL _xWHITE COMMUNICATIONS @@
_major <- AGRICULTURAL _xWHITE ECONOMICS @@
_major <- AGRICULTURAL _xWHITE EDUCATION @@
_major <- AGRICULTURAL _xWHITE ENGINEERING @@
_major <- AGRICULTURAL _xWHITE OPERATIONS @@
_major <- AGRICULTURAL _xWHITE TECHNOLOGY @@
_major <- AMERICAN _xWHITE ETHNIC @@
_major <- AMERICAN _xWHITE HISTORY @@
_major <- AMERICAN _xWHITE STUDIES @@
_major <- ANIMAL _xWHITE HUSBANDRY @@
_major <- ANIMAL _xWHITE MEDICINE @@
_major <- ANIMAL _xWHITE NUTRITION @@
_major <- ANIMAL _xWHITE SCIENCE @@
_major <- ANIMAL _xWHITE SCIENCES @@
_major <- APPAREL _xWHITE DESIGN @@
_major <- APPAREL _xWHITE TEXTILES @@
_major <- APPLIED _xWHITE MATHEMATICS @@
_major <- APPLIED _xWHITE MECHANICS @@
_major <- ARCHITECTURAL _xWHITE ENGINEERING @@
_major <- ARCHITECTURAL _xWHITE HISTORY @@
_major <- ARCHITECTURE _xWHITE SCHOOL @@
_major <- ARMY _xWHITE ROTC @@
_major <- ART _xWHITE EDUCATION @@
_major <- ART _xWHITE HISTORY @@
_major <- ASIAN _xWHITE HISTORY @@
_major <- ASIAN _xWHITE LANGUAGES @@
_major <- ASIAN _xWHITE STUDIES @@
_major <- ATHLETIC _xWHITE TRAINING @@
_major <- ATMOSPHERIC _xWHITE SCIENCES @@
_major <- AUTOMOTIVE _xWHITE TECHNOLOGY @@
_major <- AVIATION _xWHITE TECHNOLOGY @@
_major <- BILINGUAL _xWHITE EDUCATION @@
_major <- BIOLOGICAL _xWHITE CHEMISTRY @@
_major <- BIOLOGICAL _xWHITE SCIENCES @@
_major <- BIOMEDICAL _xWHITE ENGINEERING @@
_major <- BUILDING _xWHITE CONSTRUCTION @@
_major <- BUSINESS _xWHITE ADMINISTRATION @@
_major <- BUSINESS _xWHITE LAW @@
_major <- BUSINESS _xWHITE LOGISTICS @@
_major <- CERAMIC _xWHITE SCIENCE @@
_major <- CHEMICAL _xWHITE ENGINEERING @@
_major <- CITY _xWHITE PLANNING @@
_major <- CIVIL _xWHITE ENGINEERING @@
_major <- CLINCH _xWHITE VALLEY @@
_major <- CLINICAL _xWHITE ETHICS @@
_major <- CLINICAL _xWHITE SCIENCES @@
_major <- COGNITIVE _xWHITE SCIENCE @@
_major <- CLINICAL _xWHITE PSYCHOLOGY @@
_major <- COGNITIVE _xWHITE SCIENCES @@
_major <- COMMUNICATION _xWHITE DISORDERS @@
_major <- COMMUNICATION _xWHITE SCIENCES @@
_major <- COMMUNICATIVE _xWHITE DISORDERS @@
_major <- COMMUNITY _xWHITE EDUCATION @@
_major <- COMMUNITY _xWHITE STUDIES @@
_major <- COMPARATIVE _xWHITE LAW @@
_major <- COMPARATIVE _xWHITE LITERATURE @@
_major <- COMPUTER _xWHITE SCIENCE @@
_major <- COMPUTER _xWHITE TECHNOLOGY @@
_major <- COMPUTER _xWHITE ENGINEERING @@
_major <- CONTINUING _xWHITE EDUCATION @@
_major <- COOPERATIVE _xWHITE EDUCATION @@
_major <- COUNSELOR _xWHITE EDUCATION @@
_major <- CREATIVE _xWHITE WRITING @@
_major <- CRIMINAL _xWHITE JUSTICE @@
_major <- DENTAL _xWHITE SCIENCES @@
_major <- DEVELOPMENTAL _xWHITE BIOLOGY @@
_major <- EARTH _xWHITE SCIENCES @@
_major <- EDUCATIONAL _xWHITE ADMINISTRATION @@
_major <- EDUCATIONAL _xWHITE EVALUATION @@
_major <- EDUCATIONAL _xWHITE LEADERSHIP @@
_major <- EDUCATIONAL _xWHITE POLICY @@
_major <- EDUCATIONAL _xWHITE PSYCHOLOGY @@
_major <- EDUCATIONAL _xWHITE RESEARCH @@
_major <- ELECTRICAL _xWHITE ENGINEERING @@
_major <- ELECTRONICS _xWHITE TECHNOLOGY @@
_major <- ELEMENTARY _xWHITE EDUCATION @@
_major <- ENERGY _xWHITE ENGINEERING @@
_major <- ENGINEERING _xWHITE MANAGEMENT @@
_major <- ENGINEERING _xWHITE MECHANICS @@
_major <- ENGINEERING _xWHITE PHYSICS @@
_major <- ENGINEERING _xWHITE SCIENCE @@
_major <- ENGINEERING _xWHITE TECHNOLOGY @@
_major <- ENGLISH _xWHITE EDUCATION @@
_major <- ENVIRONMENTAL _xWHITE ENGINEERING @@
_major <- ENVIRONMENTAL _xWHITE HEALTH @@
_major <- ENVIRONMENTAL _xWHITE PLANNING @@
_major <- ENVIRONMENTAL _xWHITE SCIENCE @@
_major <- ENVIRONMENTAL _xWHITE SCIENCES @@
_major <- ENVIRONMENTAL _xWHITE STUDIES @@
_major <- ETHNIC _xWHITE STUDIES @@
_major <- EUROPEAN _xWHITE HISTORY @@
_major <- EXERCISE _xWHITE PHYSIOLOGY @@
_major <- FAMILY _xWHITE STUDIES @@
_major <- FASHION _xWHITE DESIGN @@
_major <- FILM _xWHITE STUDIES @@
_major <- FINE _xWHITE ARTS @@
_major <- FISHERIES _xWHITE SCIENCE @@
_major <- FOOD _xWHITE SCIENCE @@
_major <- FOOD _xWHITE SERVICE @@
_major <- FOREIGN _xWHITE AFFAIRS @@
_major <- FOREIGN _xWHITE LANGUAGES @@
_major <- FOREST _xWHITE RESOURCES @@
_major <- GENERAL _xWHITE STUDIES @@
_major <- GERMANIC _xWHITE STUDIES @@
_major <- GERONTOLOGICAL _xWHITE STUDIES @@
_major <- GRAIN _xWHITE SCIENCE @@
_major <- HEALTH _xWHITE ADMINISTRATION @@
_major <- HEALTH _xWHITE EDUCATION @@
_major <- HEALTH _xWHITE INFORMATION @@
_major <- HEALTH _xWHITE POLICY @@
_major <- HEALTH _xWHITE SCIENCE @@
_major <- HEALTH _xWHITE SCIENCES @@
_major <- HEALTH _xWHITE SERVICES @@
_major <- HEBREW _xWHITE STUDIES @@
_major <- HORTICULTURAL _xWHITE SCIENCE @@
_major <- HORTICULTURAL _xWHITE SCIENCES @@
_major <- HOSPITALITY _xWHITE MANAGEMENT @@
_major <- HOTEL _xWHITE MANAGEMENT @@
_major <- HUMAN _xWHITE DEVELOPMENT @@
_major <- HUMAN _xWHITE ECOLOGY @@
_major <- HUMAN _xWHITE NUTRITION @@
_major <- HUMAN _xWHITE RESOURCES @@
_major <- INDIVIDUAL _xWHITE STUDIES @@
_major <- INDUSTRIAL _xWHITE ENGINEERING @@
_major <- INDUSTRIAL _xWHITE RELATIONS @@
_major <- INFORMATION _xWHITE MANAGEMENT @@
_major <- INFORMATION _xWHITE SCIENCE @@
_major <- INFORMATION _xWHITE SYSTEMS @@
_major <- INSTITUTIONAL _xWHITE MANAGEMENT @@
_major <- INSTRUCTIONAL _xWHITE TECHNOLOGY @@
_major <- INTERDISCIPLINARY _xWHITE STUDIES @@
_major <- INTERIOR _xWHITE ARCHITECTURE @@
_major <- INTERIOR _xWHITE DESIGN @@
_major <- INTERNATIONAL _xWHITE BUSINESS @@
_major <- INTERNATIONAL _xWHITE STUDIES @@
_major <- JEWISH _xWHITE STUDIES @@
_major <- LABOR _xWHITE RELATIONS @@
_major <- LABOR _xWHITE STUDIES @@
_major <- LANDSCAPE _xWHITE ARCHITECTURE @@
_major <- LAW _xWHITE ENFORCEMENT @@
_major <- LEGAL _xWHITE ASSISTANCE @@
_major <- LEISURE _xWHITE STUDIES @@
_major <- LIBERAL _xWHITE ARTS @@
_major <- LITERARY _xWHITE CRITICISM @@
_major <- LITERARY _xWHITE THEORY @@
_major <- MANAGEMENT _xWHITE INFORMATION @@
_major <- MANAGEMENT _xWHITE SCIENCE @@
_major <- MANAGEMENT _xWHITE TECHNOLOGY @@
_major <- MANUFACTURING _xWHITE ENGINEERING @@
_major <- MASS _xWHITE COMMUNICATION @@
_major <- MASS _xWHITE COMMUNICATIONS @@
_major <- MATERIALS _xWHITE ENGINEERING @@
_major <- MATERIALS _xWHITE SCIENCE @@
_major <- MATHEMATICS _xWHITE EDUCATION @@
_major <- MECHANICAL _xWHITE ENGINEERING @@
_major <- MEDIA _xWHITE ARTS @@
_major <- MEDIA _xWHITE STUDIES @@
_major <- MEDICAL _xWHITE ASSISTANCE @@
_major <- MEDICAL _xWHITE SCHOOL @@
_major <- MEDICAL _xWHITE SCIENCES @@
_major <- MEDICINAL _xWHITE CHEMISTRY @@
_major <- METALS _xWHITE SCIENCE @@
_major <- MILITARY _xWHITE SCIENCE @@
_major <- MINERAL _xWHITE ECONOMICS @@
_major <- MINERAL _xWHITE ENGINEERING @@
_major <- MINERAL _xWHITE PROCESSING @@
_major <- MINING _xWHITE ENGINEERING @@
_major <- MODERN _xWHITE LANGUAGES @@
_major <- MOLECULAR _xWHITE BIOLOGY @@
_major <- MOLECULAR _xWHITE GENETICS @@
_major <- MOTOR _xWHITE LEARNING @@
_major <- NAVY _xWHITE ROTC @@
_major <- NUCLEAR _xWHITE ENGINEERING @@
_major <- OCCUPATIONAL _xWHITE DEVELOPMENT @@
_major <- OCCUPATIONAL _xWHITE THERAPY @@
_major <- OCEAN _xWHITE ENGINEERING @@
_major <- OFFICE _xWHITE TECHNOLOGY @@
_major <- OPERATIONS _xWHITE MANAGEMENT @@
_major <- OPERATIONS _xWHITE RESEARCH @@
_major <- ORAL _xWHITE BIOLOGY @@
_major <- PARK _xWHITE MANAGEMENT @@
_major <- PEST _xWHITE MANAGEMENT @@
_major <- PHARMACEUTICAL _xWHITE SCIENCES @@
_major <- PHARMACY _xWHITE HEALTH @@
_major <- PHYSICAL _xWHITE EDUCATION @@
_major <- PHYSICAL _xWHITE THERAPIST @@
_major <- PHYSICAL _xWHITE THERAPY @@
_major <- PLANETARY _xWHITE SCIENCES @@
_major <- PLANT _xWHITE PATHOLOGY @@
_major <- PLANT _xWHITE PHYSIOLOGY @@
_major <- PLANT _xWHITE SCIENCES @@
_major <- POLICE _xWHITE SCIENCE @@
_major <- POLITICAL _xWHITE SCIENCE @@
_major <- POLYMER _xWHITE SCIENCE @@
_major <- PHYSICAL _xWHITE THERAPY @@
_major <- PUBLIC _xWHITE ADMINISTRATION @@
_major <- PUBLIC _xWHITE HEALTH @@
_major <- PUBLIC _xWHITE POLICY @@
_major <- PUBLIC _xWHITE RELATIONS @@
_major <- QUANTITATIVE _xWHITE ANALYSIS @@
_major <- RADIOLOGIC _xWHITE TECHNOLOGY @@
_major <- RADIOLOGICAL _xWHITE ENGINEERING @@
_major <- READING _xWHITE EDUCATION @@
_major <- REAL _xWHITE ESTATE @@
_major <- RECREATION _xWHITE MANAGEMENT @@
_major <- RECREATION _xWHITE RESOURCES @@
_major <- REHABILITATION _xWHITE COUNSELING @@
_major <- REHABILITATION _xWHITE SCIENCE @@
_major <- RELIGIOUS _xWHITE STUDIES @@
_major <- RESPIRATORY _xWHITE TECHNOLOGY @@
_major <- RESTAURANT _xWHITE MANAGEMENT @@
_major <- ROMANCE _xWHITE LANGUAGES @@
_major <- ROTC _xWHITE PROGRAMS @@
_major <- RURAL _xWHITE SOCIOLOGY @@
_major <- RUSSIAN _xWHITE STUDIES @@
_major <- SCHOOL _xWHITE PSYCHOLOGY @@
_major <- SCIENCE _xWHITE EDUCATION @@
_major <- SLAVIC _xWHITE LANGUAGES @@
_major <- SOCIAL _xWHITE STUDIES @@
_major <- SOCIAL _xWHITE WORK @@
_major <- SOFTWARE _xWHITE ENGINEERING @@
_major <- SOIL _xWHITE SCIENCE @@
_major <- SOLID _xWHITE STATE @@
_major <- SPECIAL _xWHITE EDUCATION @@
_major <- SPEECH _xWHITE COMMUNICATION @@
_major <- SPEECH _xWHITE PATHOLOGY @@
_major <- SPORT _xWHITE PSYCHOLOGY @@
_major <- SPORTS _xWHITE MEDICINE @@
_major <- STRUCTURAL _xWHITE MECHANICS @@
_major <- STUDENT _xWHITE COUNSELING @@
_major <- SYSTEMS _xWHITE ENGINEERING @@
_major <- TEACHER _xWHITE EDUCATION @@
_major <- TELECOMMUNICATIONS _xWHITE STUDIES @@
_major <- TEXTILE _xWHITE DESIGN @@
_major <- THEATRE _xWHITE ARTS @@
_major <- TOURISM _xWHITE ADMINISTRATION @@
_major <- TRANSPORTATION _xWHITE SYSTEMS @@
_major <- URBAN _xWHITE DEVELOPMENT @@
_major <- URBAN _xWHITE PLANNING @@
_major <- URBAN _xWHITE STUDIES @@
_major <- VETERINARY _xWHITE MEDICINE @@
_major <- VETERINARY _xWHITE SCIENCE @@
_major <- VISUAL _xWHITE ARTS @@
_major <- VOICE _xWHITE PERFORMANCE @@
_major <- WILDLIFE _xWHITE ECOLOGY @@
_major <- WILDLIFE _xWHITE SCIENCE @@
_major <- WOMEN \' S _xWHITE STUDIES @@
_major <- WORKFORCE _xWHITE EDUCATION @@
|
@CODE
if (G("pretagged"))
exitpass();
if (!G("hilite")) # 10/25/10 AM.
exitpass(); # 10/25/10 AM.
G("hello") = 0;
@@CODE
# Traverse the whole tree.
@MULTI _ROOT
@PRE
<1,1> var("ne"); # FEATURE-BASED MATCH IN NLP++ ! 6/16/05 AM.
@POST
if (G("verbose"))
"multi.txt" << pnname(N(1)) << "\t" << phrasetext() << "\n";
noop(); # Merely matching the rule will set text to green.
@RULES
_xNIL <-
_xANY
@@
|
@PATH _ROOT _educationZone _educationInstance _LINE
# If higher conf stuff hasn't filled education instance yet, try
# some stuff in here.
# If there's an unclaimed turd to the left, glom it in!
# Should have constraints on that turd. If end of it can relate
# to start of head phrase, so much the better. etc. etc.
@CHECK
if (
!X("school",3) # School not filled in yet,
&& (N("school conf",5) > 80) # and a good candidate is around.
)
succeed();
fail();
#@POST
# X("school",3) = S("$text"); # Fill instance with school name.
# single()
@RULES
_school <-
_Caps # The aforementioned turd.
_xWHITE [s star]
_xWILD [s one match=(and \&)]
_xWHITE [s star]
_Caps
@@
# If I can glom degree with a subsequent turd w separation, use it.
# Should check that Caps isn't anything else useful...
@CHECK
if (X("major",3)) fail(); # No major yet.
@POST
X("major",3) = N("$text",5);
@RULES
_xNIL <-
_degree [s]
_xWHITE [s star]
\, [s]
_xWHITE [s star]
_Caps [layer=(_major)]
@@
# Standalone good school. Use it!
@CHECK
if (
!X("school",3) # School not filled in yet,
&& (N("school conf") > 80) # and a good candidate is around.
)
succeed();
fail();
@POST
# X("school",3) = N("$text"); # Could do it this way.
noop(); # Uncommented. # 11/22/00 AM.
@RULES
#_school <- _Caps @@ #
_xNIL <- _Caps [rename=(_school)] @@
|
@CODE
L("hello") = 0;
@@CODE
@NODES _TEXTZONE
@POST
if (N("pos num") == 1) # UNAMBIGUOUS PART OF SPEECH.
{
if (N("pos"))
{
G("node") = N(1);
group(1,1,N("pos"));
pncopyvars(G("node"),N(1)); # 12/14/01 AM.
if (!N("sem")) # 01/01/02 AM.
{
if (N("text"))
S("tmp") = N("text");
else
S("tmp") = strtolower(N("$text"));
S("stem") = nvstem(S("tmp"));
if (S("stem"))
N("sem") = S("stem");
else
N("sem") = S("tmp");
}
if (N("pos") == "_noun") # 09/04/04 AM.
{
L("num") = number(N(1));
if (L("num"))
N("number") = L("num");
}
# Doing this in nodepos(). #
# if (N("pos") == "_adj") #
# {
# L("txt") = strtolower(N("$text"));
# if (L("myp") = adjconj(L("txt")))
# N("mypos") = L("myp");
# }
}
}
# STARTING TO USE CONFIDENCE, FREQUENCY. #
else if (N("verb") >= 90)
{
S("tmp") = N(1);
group(1,1,"_verb");
pncopyvars(S("tmp"),N(1));
}
else if (N("adv") >= 90)
{
S("tmp") = N(1);
group(1,1,"_adv");
pncopyvars(S("tmp"),N(1));
}
@RULES
_xNIL <-
_xALPHA # Uncharacterized, unreduced.
@@
|
@PATH _ROOT _paragraph _sentence
@POST
if (!N("normal"))
N("normal") = N("$text");
N("object") = makeconcept(X("object"),N("$text"));
addstrval(N("object"),"type","company");
addstrval(N("object"),"normal",N("normal"));
@RULES
_xNIL <-
_company [s] ### (1)
@@
|
# Create a new directory
@CODE
mkdir("c:\abc\myfolder"); # This will work on Windows OS even if abc doesn't exist.
@@CODE |
@NODES _LINE
@PRE
<1,1> cap()
@RULES
_CompleteSchoolName <- _xWILD [s one matches=(
MIT
SJSU
UCLA
USC
UCSB
UCSD
UCSC
URI
UMASS
SUNY
CALTECH
Citadel
USNA
USMA
USAFA
SMU
LACC
CSU
UNSW
)] @@
|
@NODES _term
@POST
excise(1,1);
@RULES
_xNIL <-
_xWILD [matches=(\n \r \")]
@@
@POST
excise(2,2);
@RULES
_xNIL <-
_xWILD [one matches=(_xWHITE)]
_xWILD [plus matches=(_xWHITE)]
@@ |
@NODES _ROOT
@RULES
_EmptyTag <-
\< [one] ### (1)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (2)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (3)
_whiteSpace [opt] ### (4)
_xWILD [star fails=("_EndEmptyTag" "_EndTag")] ### (5)
_EndEmptyTag [one] ### (6)
@@
@@RULES
@POST
S("buffer1") = str(N("$text",2)) ;
S("buffer2") = str(N("$text",3)) ;
if (N("$text",2) && N("$text",3)) {
S("tagName") = S("buffer1") + S("buffer2") ;
}
else if (N("$text",2))
S("tagName") = S("buffer1") ;
else if (N("$text",3))
S("tagName") = S("buffer2") ;
G("ReferenceIDforConcept") = findconcept(G("Elements"),S("tagName")) ;
if (G("ReferenceIDforConcept") == 0 )
makeconcept(G("Elements"),S("tagName")) ;
single() ;
@@POST
@RULES
_StartingTag <-
\< [one] ### (1)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (2)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (3)
_whiteSpace [opt] ### (4)
_xWILD [star fails=("_EndTag")] ### (5)
_EndTag [one] ### (6)
@@
@@RULES
@POST
S("buffer1") = str(N("$text",3)) ;
S("buffer2") = str(N("$text",4)) ;
if (N("$text",3) && N("$text",4)) {
S("tagName") = S("buffer1") + S("buffer2") ;
}
else if (N("$text",3))
S("tagName") = S("buffer1") ;
else if (N("$text",4))
S("tagName") = S("buffer2") ;
G("ReferenceIDforConcept") = findconcept(G("Elements"),S("tagName")) ;
if (G("ReferenceIDforConcept") == 0 )
makeconcept(G("Elements"),S("tagName")) ;
single() ;
@@POST
@RULES
_ClosingTag <-
\< [one] ### (1)
\/ [one] ### (2)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (3)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (4)
_whiteSpace [opt] ### (5)
_xWILD [star fails=("_EndTag")] ### (6)
_EndTag [one] ### (7)
@@
@@RULES
|
@PATH _ROOT _columnHeaders
@POST
# Add column attributes for each concept in KB here
L("tempCon") = down(findconcept(findroot(),"RadLex"));
while ( L("tempCon") ) {
addattr(L("tempCon"), N("$text", 1));
L("tempCon") = next(L("tempCon"));
}
excise(2,2);
singler(1,1);
@RULES
_columnName <-
_xWILD [fails=(\t \n)] ### (1)
_xWILD [one matches=(\t \n)] ### (2)
@@
|
@NODES _ROOT
@PRE
<1,1> uppercase();
@POST
S("header") = N("$text", 1) + N("$treetext", 2);
single();
@RULES
_zoneHeader <-
_xALPHA [plus fails=(REFERRING CM MM _xCAPLET)] ### (1)
@@ |
@NODES _LINE
@PRE
<1,1> cap();
<5,5> cap();
@RULES
# Ex: Master's\_Project
_Thesis [layer=(_Caps )] <- Master [s] \' [s] s [s] _xWHITE [star s] Project [s] @@
# Ex: Master's\_Thesis
_Thesis [layer=(_Caps )] <- Master [s] \' [s] s [s] _xWHITE [star s] Thesis [s] @@
# Ex: Master's\_Work
_Thesis [layer=(_Caps )] <- Master [s] \' [s] s [s] _xWHITE [star s] Work [s] @@
@PRE
<1,1> cap();
<3,3> cap();
@RULES
# Ex: Master\_Project
_Thesis [layer=(_Caps )] <- Master [s] _xWHITE [star s] Project [s] @@
# Ex: Master\_Thesis
_Thesis [layer=(_Caps )] <- Master [s] _xWHITE [star s] Thesis [s] @@
# Ex: Master\_Work
_Thesis [layer=(_Caps )] <- Master [s] _xWHITE [star s] Work [s] @@
# Ex: Doctoral\_Dissertation
_Thesis [layer=(_Caps )] <- Doctoral [s] _xWHITE [star s] Dissertation [s] @@
@PRE
<1,1> cap();
@RULES
# Ex: Thesis
_Thesis [layer=(_Caps )] <- Thesis [s] @@
# Ex: Dissertation
_Thesis [layer=(_Caps )] <- Dissertation [s] @@
|
@NODES _LINE
@POST
N("text") = N("$treetext");
@RULES
_xNIL <-
_item ### (1)
@@
|
@PATH _ROOT _headerZone
@POST
if (N("attr",1) == "class") {
S("text") = N("$text",2);
single();
}
@RULES
_iGroup <-
_iOpen ### (1)
_xWILD [fail=(_iOpen _iClose)] ### (2)
_iClose ### (3)
@@
#@POST
#S("text") = N("$text",3);
#singler(2,4);
#@RULES
#_iGroup <-
# _liOpen ### (1)
# _iOpen ### (2)
# _xWILD [fail=(_iOpen _iClose)] ### (3)
# _iClose ### (4)
# @@
@POST
X("title") = N("$text",2);
S("title") = N("$text",2);
single();
@RULES
_paraGroup <-
_paraOpen ### (1)
_xWILD [fail=(_paraClose)] ### (2)
_paraClose ### (3)
@@
|
@CODE
L("hello") = 0;
@@CODE
@NODES _ROOT
@RULES
_xNIL <-
_xNIL ### (1)
@@
|
# show how we can create a concept like noun and assign it as the value of a word's attribute. We create the concepts named words and noun as children to the root of the KB (concept), and then make the concept book a child of words
G("words") = makeconcept(findroot(), "words");
G("noun") = makeconcept(findroot(),"noun");
G("noun_book") = makeconcept(G("words"),"book"); |
@NODES _section
@POST
S("subsection") = N("$text", 2);
excise(4,4);
singler(2,3);
@RULES
_subsection <-
_xWILD [one matches=(_xPUNCT _xSTART)] ### (1)
_xWILD [one matches=(_patientID _xCAP)]
_xWILD [min=0 max=3 match=(_xCAP _xNUM)]
\:
@@
|
@CODE
"output.txt" << "\n==References==\n<references />\n";
@@CODE
|
# Match a _verb node optionally preceded by _modal, _have, or _be nodes, reducing to _vgroup
@RULES
_vgroup <- _modal [o]
_have [o] _be [o]
_verb @@ |
@NODES _LINE
@RULES
_dateBoundary <-
_xWILD [s one matches=(_SingleDate _DateRange)] @@
#_dateBoundary <- _xWILD [s one matches=(_date _dateRange)] @@
|
@NODES _LINE
@RULES
# Ex: a.\_s.
_companyRoot <- a [s] \. [s] _xWHITE [star s] s [s] \. [s] @@
# Ex: a/s
_companyRoot <- a [s] \/ [s] s [s] @@
# Ex: co.
_companyRoot <- co [s] \. [s] @@
# Ex: corp.
_companyRoot <- corp [s] \. [s] @@
# Ex: cos.
_companyRoot <- cos [s] \. [s] @@
# Ex: inc.
_companyRoot <- inc [s] \. [s] @@
# Ex: ltd.
_companyRoot <- ltd [s] \. [s] @@
# Ex: llc.
_companyRoot <- llc [s] \. [s] @@
# Ex: l.\_p.
_companyRoot <- l [s] \. [s] _xWHITE [star s] p [s] \. [s] @@
# Ex: n.\_l.
_companyRoot <- n [s] \. [s] _xWHITE [star s] l [s] \. [s] @@
# Ex: n.\_v.
_companyRoot <- n [s] \. [s] _xWHITE [star s] v [s] \. [s] @@
# Ex: plc.
_companyRoot <- plc [s] \. [s] @@
# Ex: p.\_t.
_companyRoot <- p [s] \. [s] _xWHITE [star s] t [s] \. [s] @@
# Ex: s.\_a.\_de\_c.\_v.
_companyRoot <- s [s] \. [s] _xWHITE [star s] a [s] \. [s] _xWHITE [star s] de [s] _xWHITE [star s] c [s] \. [s] _xWHITE [star s] v [s] \. [s] @@
# Ex: s.\_a.
_companyRoot <- s [s] \. [s] _xWHITE [star s] a [s] \. [s] @@
# Ex: s/a
_companyRoot <- s [s] \/ [s] a [s] @@
# Ex: sa\_de\_cv
_companyRoot <- sa [s] _xWHITE [star s] de [s] _xWHITE [star s] cv [s] @@
# Ex: ab
_companyRoot <- ab [s] @@
# Ex: ag
_companyRoot <- ag [s] @@
# Ex: asa
_companyRoot <- asa [s] @@
# Ex: co
_companyRoot <- co [s] @@
# Ex: companies
_companyRoot <- companies [s] @@
# Ex: company
_companyRoot <- company [s] @@
# Ex: corp
_companyRoot <- corp [s] @@
# Ex: corporation
_companyRoot <- corporation [s] @@
# Ex: cos
_companyRoot <- cos [s] @@
# Ex: inc
_companyRoot <- inc [s] @@
# Ex: incorporated
_companyRoot <- incorporated [s] @@
# Ex: llc
_companyRoot <- llc [s] @@
# Ex: limited
_companyRoot <- limited [s] @@
# Ex: ltd
_companyRoot <- ltd [s] @@
# Ex: nv
_companyRoot <- nv [s] @@
# Ex: plc
_companyRoot <- plc [s] @@
# Ex: sa
_companyRoot <- sa [s] @@
# Ex: sca
_companyRoot <- sca [s] @@
# Ex: spa
_companyRoot <- spa [s] @@
|
@CODE
G("icd_hier") = getconcept(findroot(),"icd_hier");
if (! G("icd_hier")) {
G("icd_hier") = makeconcept(findroot(),"icd_hier");
}
rmchildren(G("icd_hier"));
@@CODE |
@DECL
IsHex(L("string")) {
L("len") = strlength(L("string"));
L("i") = 0;
while (L("i") < L("len")) {
L("c") = strpiece(L("string"),L("i"),L("i"));
if (!strcontainsnocase(L("c"),"abcdef") && !strcontainsnocase(L("c"),"1234567890")) {
"letters.txt" << " NOT HEX\n";
return 0;
} else {
"letters.txt" << " ";
}
L("i")++;
}
return 1;
}
@@DECL |
@PATH _ROOT _educationZone _LINE
@CHECK
if (
(N("hi class") == "school")
&& (N("hi conf") >= G("threshold"))
)
succeed();
fail();
@POST
noop();
@RULES
#_school <- _Caps @@ #
_xNIL <- _Caps [rename=(_school)] @@
@CHECK
if (
(N("hi class") == "minor")
&& (N("hi conf") >= G("threshold"))
)
succeed();
fail();
@RULES
_minor <- _Caps @@
|
@NODES _ROOT
@PRE
<1,1> var("bases");
@POST
# Check for section or subsection title.
"extract_codes.txt" << "Beginning code extraction step." << ":\n";
L("section_title") = N("section_title", 1);
L("subsection_title") = N("subsection", 1);
G("line_count") = G("line_count") + 1;
L("section") = getconcept(G("note_words"), "note");
L("all_bases") = pnvar(N(1), "bases");
L("bases_len") = arraylength(L("all_bases"));
"extract_codes.txt" << "\t" << "Adding " << L("bases_len") << " total bases." << "\n";
L("i") = 0;
while(L("i") < L("bases_len")) {
"extract_codes.txt" << "\t" << "Adding " << L("all_bases")[L("i")] << "\n";
AddUniqueStr(L("section"),"bases", L("all_bases")[L("i")]);
L("i")++;
}
@RULES
_xNIL <-
_xWILD [one matches=( _section _subsection _sentence _looseText _item)]
@@ |
@NODES _LINE
@PRE
<3,3> cap();
@POST
S("daySD") = N("$text", 1);;
S("monthSD") = N("$text", 3);;
S("yearSD") = N("$text", 7);;
single();
@RULES
# Ex: 3\_Sept.,\_99
_SingleDate <- _xNUM [s layer=("_daySD")] _xWHITE [star s] _monthWord [trig s layer=("_monthSD")] \. [s] \, [s] _xWHITE [star s] _year [s layer=("_yearSD")] @@
@PRE
<3,3> cap();
@POST
S("daySD") = N("$text", 1);;
S("monthSD") = N("$text", 3);;
S("yearSD") = N("$text", 6);;
single();
@RULES
# Ex: 31\_Sept.\_
_SingleDate <- _xNUM [s layer=("_daySD")] _xWHITE [star s] _monthWord [trig s layer=("_monthSD")] _xPUNCT [s] _xWHITE [star s] _year [s layer=("_yearSD")] @@
@PRE
<3,3> cap();
@POST
S("daySD") = N("$text", 1);;
S("monthSD") = N("$text", 3);;
S("yearSD") = N("$text", 5);;
single();
@RULES
# Ex: 17\_September\_90
_SingleDate <- _xNUM [s layer=("_daySD")] _xWHITE [star s] _monthWord [trig s layer=("_monthSD")] _xWHITE [star s] _year [s layer=("_yearSD")] @@
@PRE
<1,1> cap();
@POST
S("monthSD") = N("$text", 1);;
S("daySD") = N("$text", 4);;
S("yearSD") = N("$text", 7);;
single();
@RULES
# Ex: Oct.\_9,\_'51
_SingleDate <- _monthWord [s layer=("_monthSD")] \. [s] _xWHITE [star s] _xNUM [s layer=("_daySD")] \, [s] _xWHITE [star s] _year [s layer=("_yearSD")] @@
@PRE
<1,1> cap();
@POST
S("monthSD") = N("$text", 1);;
S("daySD") = N("$text", 3);;
S("yearSD") = N("$text", 6);;
single();
@RULES
# Ex: Sept\_3,\_
_SingleDate <- _monthWord [s layer=("_monthSD")] _xWHITE [star s] _xNUM [s layer=("_daySD")] \, [s] _xWHITE [star s] _year [s layer=("_yearSD")] @@
@PRE
<1,1> cap();
@POST
S("monthSD") = N("$text", 1);;
S("yearSD") = N("$text", 5);;
single();
@RULES
# Ex: Jan.,\_94
_SingleDate <- _monthWord [s layer=("_monthSD")] \. [s] \, [s] _xWHITE [star s] _year [s layer=("_yearSD")] @@
@PRE
<1,1> cap();
@POST
S("monthSD") = N("$text", 1);;
S("yearSD") = N("$text", 4);;
single();
@RULES
# Ex: Oct.\_'93
_SingleDate <- _monthWord [s layer=("_monthSD")] \. [s] _xWHITE [star s] _year [s layer=("_yearSD")] @@
# Ex: Sept.\_
_SingleDate <- _monthWord [s layer=("_monthSD")] _xPUNCT [s] _xWHITE [star s] _year [s layer=("_yearSD")] @@
@PRE
<1,1> cap();
@POST
S("monthSD") = N("$text", 1);;
S("yearSD") = N("$text", 3);;
single();
@RULES
# Ex: Jun/88
_SingleDate <- _monthWord [s layer=("_monthSD")] \/ [s] _year [s layer=("_yearSD")] @@
# Ex: Aug\_'89
_SingleDate <- _monthWord [s layer=("_monthSD")] _xWHITE [star s] _year [s layer=("_yearSD")] @@
@PRE
<1,1> cap();
@POST
S("seasonSD") = N("$text", 1);;
S("yearSD") = N("$text", 3);;
single();
@RULES
# Ex: Winter\_'88
_SingleDate <- _Season [s layer=("_seasonSD")] _xWHITE [star s] _year [s layer=("_yearSD")] @@
@POST
S("numA") = N("$text", 1);;
S("numB") = N("$text", 3);;
S("yearSD") = N("$text", 5);;
single();
@RULES
# Ex: 31-31-49
_SingleDate <- _xNUM [s layer=("_numA")] _xPUNCT [trig s] _xNUM [s layer=("_numB")] _xPUNCT [s] _xNUM [s layer=("_yearSD")] @@
@POST
S("monthSD") = N("$text", 1);;
S("yearSD") = N("$text", 3);;
single();
@RULES
_SingleDate <- _monthNum [s layer=("_monthSD")] _xPUNCT [s] _year [s layer=("_yearSD")] @@
@PRE
<1,1> length(4);
@POST
S("yearSD") = N("$text", 1);;
single();
@RULES
_SingleDate <- _year [s layer=("_yearSD")] @@
|
# Fetch the first 10 characters of the input text as a string
G("str") = inputrange(0,9); #
Fetch the first 10 characters of the input text as a string. |
@CODE
fileout("output.xml");
prlit("output.xml", "<?xml version=\"1.0\" ?>\n");
prlit("output.xml", "<BizTalk xmlns=\"urn:schemas-biztalk-org/biztalk-0.81.xml\">\n");
prlit("output.xml", "<Body>\n");
prlit("output.xml", "<CandidateProfile xmlns=\"urn:schemas-biztalk-org:HR-XML-org/CandidateProfile\">\n");
@@CODE
#N("xmlfile") = G("$inputhead");
#N("xmlfile") = N("xmlfile")+ ".xml";
#fprintnvar("output.xml","xmlfile",1);
#@@POST
# _xNIL <- _xNIL @@ #
|
# Count the occurrences of a character in a string
G("count") = strchrcount("abcabc","b");
# This should evaluate to 2, for the two occurrences of "b"
in "abcabc". |
# Fetch number from numeric VAL.
L("return_int") = getnumval(L("val")); |
@NODES _LINE
@PRE
<1,1> cap();
@RULES
# Ex: Department
_subOrg <- _xWILD [min=1 max=1 s match=("Department" "Section" "Project" "Office" "Branch" "Division")] @@
|
@NODES _term
@POST
AddPhrase(N(2));
"debug.txt" << N("$text", 2) << "\n";
@RULES
_xNIL <-
_xSTART ### (1)
_xWILD [one match=(_xALPHA _xNUM)] ### (2)
@@
|
@DECL
AddPhrase(L("node")) {
L("con") = G("phrases");
while (L("node")) {
L("text") = pnvar(L("node"), "$text");
L("con") = AddUniqueCon(L("con"), L("text"));
L("node") = pnnext(L("node"));
if (!L("node")) {
makeconcept(L("con"), "00000");
}
}
}
AddDictionaryWord(L("node")) {
DictionaryWord(pnvar(L("node"), "$text"), "Radlex", 1, "num");
}
@@DECL |
@NODES _ROOT
@RULES
_prep <-
_prep ### (1)
_np ### (2)
@@
|
@PATH _ROOT _posZone _defZone _definition _headerZone _LINE _item
@POST
L("con") = MakeCountCon(X("con",3),"variation");
addstrval(L("con"),"text",N("$text",2));
singler(2,2);
@RULES
_variation <-
\; ### (1)
_xWILD [plus fail=(\; । _xEND)] ### (2)
@@ |
@CODE
G("hello") = 0;
@@CODE
@NODES _TEXTZONE
@CHECK
if (!N("ne type",1))
fail();
@POST
L("tmp2") = N(2);
if (!N(3))
L("one") = 1;
group(2,3,"_caps");
group(2,2,"_noun");
if (L("one"))
pncopyvars(L("tmp2"),N(2));
N("sem",2) = "name";
N("ne",2) = 1;
N("ne type",2) = N("ne type",1);
N("stem",2) = N("$text",2);
N("posarr",2) = pnvar(L("tmp2"),"posarr"); # 06/14/06 AM.
N("posarr len",2) = pnvar(L("tmp2"),"posarr len");
chpos(N(2),"NP"); # 06/14/06 AM.
if (N("ne type",1) == "person")
# Some name semantics here.
registerpersnames(N("$text",2));
@RULES
_xNIL <-
_noun
_xCAP
_xWILD [star match=(_xWHITE _xCAP _letabbr)]
@@
# Title capitalization...
@POST
group(2,4,"_caps");
group(2,2,"_noun");
N("ne",2) = 1;
N("ne type",2) = "title";
@RULES
_xNIL <-
\"
_xWILD [s plus match=(_xCAP _det _prep _fnword)]
_xCAP [s trigger]
_xWILD [s star match=(_xCAP _det _prep _fnword)]
_qEOS [opt]
\"
@@
# Semi-capitalized phrase...
# dqan cap noun 's
#@PRE
#<3,3> lowercase();
@POST
L("tmp2") = N(2);
group(2,2,"_noun");
pncopyvars(L("tmp2"),N(2));
chpos(N(2),"NP");
group(2,3,"_caps");
group(2,2,"_noun");
@RULES
_xNIL <-
_xWILD [plus match=(_det _pro _adj _prep)]
_xCAP
_noun
_aposS [lookahead]
@@
# neither cap nor
# neither alpha nor
@POST
L("tmp2") = N(2);
group(2,2,"_noun");
pncopyvars(L("tmp2"),N(2));
chpos(N(1),"CC"); # neither/CC.
chpos(N(3),"CC"); # nor/CC.
@RULES
_xNIL <-
neither [s]
_xCAP
nor [s]
@@
@POST
pnrename(N(1),"_adv"); # either -> adv
chpos(N(1),"CC"); # either/CC
chpos(N(3),"CC"); # or/CC
@RULES
_xNIL <-
either [s]
_xALPHA
or [s]
@@
# Miscellany.
# ^ "
@POST
++X("dblquote");
group(2,2,"_qEOS");
@RULES
_xNIL <-
_xSTART
_dblquote
@@
# , "
@POST
++X("dblquote");
N("double quote",1) = 1;
# group(1,2,"_qEOS"); #
excise(2,2); # Try this. # 05/29/07 AM.
@RULES
_xNIL <-
\,
_dblquote
@@
# Hard-wired or "lexicalized" for now.
@PRE
<1,1> cap();
@CHECK
if (N("mypos",1))
fail();
@POST
# if (G("conform treebank"))
chpos(N(1),"NP");
@RULES
_xNIL <-
_xWILD [s one match=(
American
British
Congress
Continental
Economic
Environmental
Mercantile
Data
Digital
Eastern
Financial
Ford
Foreign General Great
Industrial
Jaguar
Japanese
Minister
Monetary
Moody
Paramount
Philippines
Poor President Prime
SEC
Secretary
Southern
State
Supreme
TV
Warner
Airlines Americans
Associates
Brothers
Communications
Containers
Futures
Gardens
Holdings
Industries
Investors
Machines
Manufacturers
Markets
Motors
Resources
Savings
Securities
Services
Stores
Systems
United
)]
_xCAP [s lookahead]
@@
@PRE
<2,2> cap();
@CHECK
if (N("mypos",2))
fail();
@POST
# if (G("conform treebank"))
chpos(N(2),"NP");
@RULES
_xNIL <- # 8
_xCAP [s]
_xWILD [s one lookahead match=(
American
British
Congress
Continental
Economic
Environmental
Mercantile
Data
Digital
Eastern
Financial
Ford
Foreign General Great
Industrial
Jaguar
Japanese
Minister
Monetary
Moody
Paramount
Philippines
Poor President Prime
SEC
Secretary
Southern
State
Supreme
TV
Warner
Airlines Americans
Associates
Brothers
Communications
Containers
Futures
Gardens
Holdings
Industries
Investors
Machines
Manufacturers
Markets
Motors
Resources
Savings
Securities
Services
Stores
Systems
)]
@@
@POST
if (G("conform treebank"))
N("mypos",1) = "NP";
chpos(N(2),"NP");
if (N(3))
chpos(N(3),"NP");
group(1,3,"_noun");
N("ignorepos",1) = 1;
@RULES
_xNIL <-
American [s]
Stock [s]
Exchange [s]
@@
@PRE
<1,1> cap();
@POST
pnrename(N(1),"_noun"); # -> noun
chpos(N(1),"NP");
if (N(2))
chpos(N(2),"NP");
if (N(3))
{
if (G("conform treebank"))
chpos(N(3),"NP");
else
chpos(N(3),"JJ");
}
if (N(4))
chpos(N(4),"NP");
group(1,4,"_noun");
N("ignorepos",1) = 1;
@RULES
_xNIL <-
Dow [s]
Jones [s opt]
Industrial [s opt]
Average [s opt]
@@
# money
# yen
@POST
L("tmp2") = N(2);
group(2,2,"_noun");
pncopyvars(L("tmp2"),N(2));
if (N("num",1) != 1) # NUMERIC VALUE of number.
chpos(N(2),"NNS");
else
chpos(N(2),"NN");
@RULES
_xNIL <-
_xWILD [one match=(_num _quan _xNUM)]
yen
@@
# idiom: data base
@POST
L("tmp2") = N(2);
group(2,2,"_noun");
pncopyvars(L("tmp2"),N(2));
@RULES
_xNIL <-
data [s]
_xWILD [one match=(base bases)]
@@
# much noun
@POST
L("tmp1") = N(1);
group(1,1,"_adj");
pncopyvars(L("tmp1"),N(1));
if (!N("mypos",1))
chpos(N(1),"JJ");
@RULES
_xNIL <-
much
_xWILD [one lookahead match=(_noun)]
@@
# much adv
@POST
L("tmp1") = N(1);
group(1,1,"_adv");
pncopyvars(L("tmp1"),N(1));
if (!N("mypos",1))
chpos(N(1),"RB");
@RULES
_xNIL <-
much
_xWILD [one lookahead match=(_adv)]
@@
# savings and loan
@POST
if (!N("mypos",3))
chpos(N(3),"NN");
group(1,3,"_noun");
clearpos(N(1),1,0);
@RULES
_xNIL <-
savings [s]
and [s]
loan [s]
@@
# Standalone cap words are not necessarily NP.
@POST
if (!N("mypos",2))
chpos(N(2),"NN");
@RULES
_xNIL <-
_xWILD [s one fail=(_xCAP)]
_xWILD [s one match=(
tv
)]
_xWILD [s one lookahead fail=(_xCAP)]
@@
# adj 's
# Fix some bad assignments.
@POST
pnrename(N(1),"_noun"); # adj -> noun
@RULES
_xNIL <-
_adj
_aposS [lookahead]
@@
# someone else
# noun else
@PRE
<1,1> var("nounpro");
@POST
L("tmp2") = N(2);
group(2,2,"_noun");
pncopyvars(L("tmp2"),N(2));
chpos(N(2),"RB"); # Conform treebank.
@RULES
_xNIL <-
_noun
else
@@
# which means
# that means
@POST
L("tmp2") = N(2);
group(2,2,"_verb");
pncopyvars(L("tmp2"),N(2));
@RULES
_xNIL <-
_xWILD [s one match=(that which)]
means
@@
# money num
@POST
group(1,2,"_num");
N("number") = "any";
@RULES
_xNIL <-
_money # eg, US$
_num
@@
# human being
# idiom. (not foolproof, of course.)
@POST
L("tmp2") = N(2);
group(2,2,"_noun");
pncopyvars(L("tmp2"),N(2));
N("sem",2) = N("stem",2) = "being";
chpos(N(2),"NN"); # being/NN.
@RULES
_xNIL <-
human [s]
being
@@
# being
# DEFAULT (96% in Penn Treebank.)
@POST
L("tmp1") = N(1);
group(1,1,"_verb");
pncopyvars(L("tmp1"),N(1));
N("sem",1) = N("stem",1) = "be";
chpos(N(1),"VBG"); # being/VBG.
@RULES
_xNIL <-
being
@@
# look like
# sound like
@POST
alphatoverb(1,"active","VBD");
if (!N("mypos",2))
{
chpos(N(2),"IN"); # like/IN.
pnrename(N(2),"_prep");
}
@RULES
_xNIL <-
_xWILD [one match=(
look sound
looks sounds
looking sounding
looked sounded
seem seems seeming seemed
feel feels feeling felt
)]
like [s lookahead]
@@
# like pro
@CHECK
if (N("mypos",1))
fail();
if (pnname(N(1)) != "_fnword")
fail();
@POST
chpos(N(1),"IN"); # like/IN.
pnrename(N(1),"_prep");
@RULES
_xNIL <-
_xWILD [s one match=(like)]
_proSubj [s lookahead]
@@
# like to
@CHECK
if (N("mypos",1))
fail();
if (pnname(N(1)) != "_fnword")
fail();
@POST
pnrename(N(1),"_verb");
@RULES
_xNIL <-
like [s]
_adv [star lookahead]
to [s]
@@
# det num [det year]
# prep num [prep year]
@CHECK
if (pnname(N(1)) == "_pro" && !N("proposs",1))
fail();
N("num") = num(N("$text",2));
if (N("num") > 1900 && N("num") < 2015)
succeed();
fail();
@POST
group(2,2,"_adj");
chpos(N(2),"CD");
@RULES
_xNIL <-
_xWILD [one match=(_det _pro)]
_xNUM
@@
# num th street
@PRE
<1,1> var("ordinal");
<2,2> var("cap");
@POST
chpos(N(1),"NP");
chpos(N(2),"NP");
@RULES
_xNIL <-
_num
_xWILD [s one match=(street avenue boulevard)]
@@
# Note: cap not sentence starter.
@PRE
<2,2> cap();
@POST
N("mypos",2) = "NP";
@RULES
_xNIL <-
_xWILD [one fail=( \. _qEOS)]
_det [trigger]
_xCAP [s]
@@
# adj cap
@PRE
<1,1> vareq("number","plural");
@CHECK
L("t") = strtolower(N("$text",2));
if (strendswith(L("t"),"ss"))
fail();
if (!strendswith(L("t"),"s"))
fail();
@POST
N("mypos",2) = "NPS";
@RULES
_xNIL <-
_adj
_xCAP
@@
#Get rid of quotes inside a cap phrase.
# Ex: Toys "R" Us
@POST
excise(4,4);
excise(2,2);
@RULES
_xNIL <-
_xCAP [s]
_dblquote
_xCAP [s plus]
_dblquote
@@
##########################
### NONCAP ISSUES
##########################
# verb ok
@POST
alphatoadj(2);
@RULES
_xNIL <-
_verb
ok
@@
|
@NODES _LINE
@RULES
_field <-
_xWILD [s matches=(_fieldName _field)]
_xWHITE [s]
_xWILD [s matches=(_fieldName _field)]
_xWHITE [s star]
_xWILD [s opt matches=(_fieldName _field)]@@ |
@NODES _LINE
@RULES
# Ex: summer
_Season <- _xWILD [min=1 max=1 s match=("summer" "winter" "fall" "spring")] @@
|
@CODE
DictionaryClear();
@@CODE |
@CODE
if (!G("verbose"))
exitpass();
#L("dir") = G("$apppath") + "\\data\\ne";
#mkdir(L("dir"));
#L("fname") = L("dir") + "\\" + G("$inputname");
#G("kout") = openfile(L("fname"));
G("kout") = "clause_ne.txt";
G("kout") << "PART SEM STEM NE_TEXT ALL_TEXT" << "\n"
<< "========================================================" << "\n"
;
@@CODE
#@PATH _ROOT _TEXTZONE _sent _clause
@NODES _clause
@POST
L("x3") = pnparent(X()); # 07/13/12 AM.
if (!pnvar(L("x3"),"printed"))
{
G("kout") << "\n" << "sentence: "
<< pnvar(L("x3"),"$text")
<< "\n";
# X("printed",3) = 1; #
pnreplaceval(L("x3"),"printed",1); # 07/13/12 AM.
}
if (!X("printed"))
{
G("kout") << "\n" << "clause: "
<< X("$text")
<< "\n";
if (X("voice"))
G("kout") << "voice:\t" << X("voice") << "\n";
else
G("kout") << "voice:\tnil\n";
X("printed") = 1;
}
if (!X("stuff-in-clause"))
{
L("nm") = pnname(N(1));
L("len") = strlength(L("nm"));
if (strpiece(L("nm"),0,0) == "_"
&& L("len") > 1)
L("nm") = strpiece(L("nm"),1,L("len")-1);
G("kout") << L("nm");
indent(5-L("len"),G("kout"));
L("sem") = N("sem");
if (!L("sem"))
L("sem") = N("stem");
if (!L("sem"))
L("sem") = "nil";
G("kout") << "\t"
<< L("sem")
;
L("len") = strlength(L("sem"));
indent(8-L("len"),G("kout"));
# NP head, etc.
L("stem") = N("stem");
if (!L("stem"))
L("stem") = "nil";
G("kout") << "\t"
<< L("stem")
;
L("len") = strlength(L("stem"));
indent(8-L("len"),G("kout"));
L("ne text") = N("ne text");
if (!L("ne text"))
L("ne text") = "nil";
G("kout") << "\t"
<< L("ne text")
;
L("len") = strlength(L("ne text"));
indent(12-L("len"),G("kout"));
G("kout") << "\t" << N("$text") << "\n";
}
@RULES
_xNIL <-
_xANY
@@
|
@NODES _section
@PRE
<1,7> length(1);
@RULES
_abbr <-
_xALPHA ### (1)
\. ### (2)
_xALPHA ### (3)
\. ### (4)
_xALPHA [opt] ### (5)
\. [opt] ### (6)
_xALPHA [opt] ### (7)
\. [opt] ### (8)
@@
|
@NODES _td
@POST L("node") = pnnext(pnup(N(1)));pnrename(L("node"),"_abbrev");@RULES _info <- state abbreviation @@
@POST L("node") = pnnext(pnup(N(1)));pnrename(L("node"),"_capital");@RULES _xNIL <- state capital @@
@POST L("node") = pnnext(pnup(N(1)));pnrename(L("node"),"_numCounties");@RULES _xNIL <- number of counties @@
@POST L("node") = pnnext(pnup(N(1)));pnrename(L("node"),"_timeZone");@RULES _xNIL <- time zone @@
@POST L("node") = pnnext(pnup(N(1)));pnrename(L("node"),"_areaCodes");@RULES _xNIL <- area codes @@
@POST L("node") = pnnext(pnup(N(1)));pnrename(L("node"),"_cities");@RULES _xNIL <- top _xNUM @@
|
@PATH _ROOT _LINE
# Adding some semantics here, etc. #
# Seen this a few times even in Dev1. #
# Cumulative GPA 4.7 on a weighted 4.0 scale.
# Cumulative GPA 4.01 on 4.0 scale.
@POST
if (N("major gpa",1))
S("major grade") = N("$text",6);
else if (N("minor gpa",1))
S("minor grade") = N("$text",6);
else
S("grade") = N("$text",6);
S("max grade") = N("$text",14);
S("school type") = N("school type",1);
++X("gpa"); # Flag that line has GPA.
single();
@RULES
_Grade [base] <-
_GPA [s]
\. [s opt] # Took period out of abbrev.
_xWHITE [s star]
_xWILD [s opt match= ( \: of was \= is )]
_xWHITE [s star]
_numDecimal [s layer=(_gradeValue)]
_xWHITE [s star]
on [s]
_xWHITE [s star]
a [s opt]
_xWHITE [s star]
weighted [s opt]
_xWHITE [s star]
_numDecimal [s layer=(_maxGrade)]
_xWHITE [s star]
scale [s]
@@
# 3.8 (4.0) Major GPA
# 3.8 (4.0) overall GPA
@POST
if (N("major gpa",7))
S("major grade") = N("$text",1);
else if (N("minor gpa",7))
S("minor grade") = N("$text",1);
else
S("grade") = N("$text",1);
S("max grade") = N("$text",4);
S("school type") = N("school type",7);
++X("gpa"); # Flag that line has GPA.
single();
@RULES
_Grade [] <-
_numDecimal [s layer=(_gradeValue)]
_xWHITE [star s]
\( [s]
_numDecimal [s layer=(_maxGrade)]
\) [s]
_xWHITE [s star]
_GPA [s]
@@
# Major G.P.A.: 3.5 Overall: 3.0
# Minor GPA: 3.
# G.P.A. = 3.1; In Major = 3.4
# GPA: still awaiting final grades
# Computer Science Departmental GPA 4.17.
# Computer Science GPA: 3.12 / 4.00
# Heur: Watch out for GPA 4.0 vs NT 4.0!
# gpa computer
# (4.0, 5.0), PowerBuilder 3.0-5.0
# Other product versions...
# Heur: So many of these standalone ranges should be done in edu
# section only.
# 3.85 GPA (of 4.0)
@POST
if (N("major gpa",3))
S("major grade") = N("$text",1);
else if (N("minor gpa",3))
S("minor grade") = N("$text",1);
else
S("grade") = N("$text",1);
S("max grade") = N("$text",9);
S("school type") = N("school type",3);
++X("gpa"); # Flag that line has GPA.
single();
@RULES
_Grade <-
_numDecimal [s layer=(_gradeValue)]
_xWHITE [star s]
_GPA [s]
\. [s opt]
_xWHITE [s star]
\( [s]
of [s opt]
_xWHITE [s star]
_numDecimal [s layer=(_maxGrade)]
\) [s]
@@
@POST
if (N("major gpa",3))
S("major grade") = N("$text",1);
else if (N("minor gpa",3))
S("minor grade") = N("$text",1);
else
S("grade") = N("$text",1);
S("school type") = N("school type",3);
++X("gpa"); # Flag that line has GPA.
single();
@RULES
# Ex: 3.85 GPA
_Grade <-
_numDecimal [s layer=(_gradeValue)]
_xWHITE [star s]
_GPA [s]
@@
# GPA of 3.
@POST
if (N("minor gpa",1))
S("minor grade") = N("$text",6);
else
S("grade") = N("$text",6);
S("major grade") = N("$text",9);
S("school type") = N("school type",1);
++X("gpa"); # Flag that line has GPA.
single();
@RULES
_Grade <-
_GPA [s]
\. [s opt] # Took out of abbrev.
_xWHITE [s star]
_xWILD [s opt match= ( \: of was \= is )]
_xWHITE [s star]
_numDecimal [s layer=(_gradeValue)]
_xWHITE [s star]
\( [s]
_numDecimal [s]
_xWHITE [s star]
in [s]
_xWHITE [s star]
major [s]
\) [s]
@@
# GPA: 3.68 / 4.0
@POST
if (N("major gpa",1))
S("major grade") = N("$text",6);
else if (N("minor gpa",1))
S("minor grade") = N("$text",6);
else
S("grade") = N("$text",6);
S("max grade") = N("$text",10);
S("school type") = N("school type",1);
++X("gpa"); # Flag that line has GPA.
single();
@RULES
_Grade <-
_GPA [s]
\. [s opt] # Took out of abbrev.
_xWHITE [s star]
_xWILD [s opt match= ( \: of was \= is )]
_xWHITE [s star]
_numDecimal [s layer=(_gradeValue)]
_xWHITE [s star]
\/ [s]
_xWHITE [s star]
_numDecimal [s layer=(_maxGrade)]
@@
# GPA: 3.68 (4.0 scale)
@POST
if (N("major gpa",1))
S("major grade") = N("$text",6);
else if (N("minor gpa",1))
S("minor grade") = N("$text",6);
else
S("grade") = N("$text",6);
S("max grade") = N("$text",9);
S("school type") = N("school type",1);
++X("gpa"); # Flag that line has GPA.
single();
@RULES
_Grade <-
_GPA [s]
\. [s opt] # Took out of abbrev.
_xWHITE [s star]
_xWILD [s opt match= ( \: of was \= is )]
_xWHITE [s star]
_numDecimal [s layer=(_gradeValue)]
_xWHITE [s star]
\( [s]
_numDecimal [s layer=(_maxGrade)]
_xWHITE [s star]
scale [s opt]
\) [s]
@@
@POST
if (N("major gpa",1))
S("major grade") = N("$text",6);
else if (N("minor gpa",1))
S("minor grade") = N("$text",6);
else
S("grade") = N("$text",6);
S("school type") = N("school type",1);
++X("gpa"); # Flag that line has GPA.
single();
# Ex: GPA of 3.6
# GPA: 3.6
# GPA 3.6
# G.P.A. in Major: 3.08
@RULES
_Grade <-
_GPA [s]
\. [s opt] # Took out of abbrev.
_xWHITE [s star]
_xWILD [s opt match= ( \: of was \= is )]
_xWHITE [s star]
_numDecimal [s layer=(_gradeValue)]
@@
|
@NODES _LINE
@POST
S("text",2) = N("$text",2);
singler(2,2);
@RULES
_item <-
\# ### (1)
_xWILD [plus fail=(_xEND)] ### (2)
@@
|
@CODE
L("hello") = 0;
@@CODE
#@PATH _ROOT _TEXTZONE _sent _clause
@NODES _clause
@POST
L("x3") = pnparent(X()); # _sent # 07/10/12 AM.
X("last chunk") = "v";
++X("vg count");
++X("vg count",3); # Count in sentence also. # 01/07/05 AM.
L("cc") = num(pnvar(L("x3"),"vg count")); # 07/07/12 AM.
pnreplaceval(L("x3"),"vg count",++L("cc")); # 07/07/12 AM.
if (N("voice"))
X("voice") = N("voice"); # Copy active/passive to clause.
if (N("last vg"))
X("last vg") = N("last vg");
if (N("first vg"))
X("first vg") = N("first vg");
if (!X("vg node"))
X("vg node") = N(1);
if (!X("voice") && N("voice"))
X("voice") = N("voice");
X("last") = N(1);
if (!X("first name"))
X("first name") = "_vg";
if (!pnvar(L("x3"),"first vclause")) # 07/10/12 AM.
{
# First clause with vg.
pnreplaceval(L("x3"),"first vclause",X()); # 07/10/12 AM.
X("first v in sent") = 1;
}
if (N("ellipted-that"))
X("ellipted-that") = 1;
# Todo: check for multiple vgs...
@RULES
_xNIL <-
_vg
@@
# np
@CHECK
if (X("pattern"))
fail();
@POST
X("last chunk") = "n";
X("pattern") = "n";
X("np") = N(3);
if (N(4))
{
L("tmp") = lasteltnode(4);
X("last") = L("tmp");
}
else
X("last") = N(3);
if (!X("first name"))
{
if (N(2))
X("first name") = pnname(N(2));
else
X("first name") = "_np";
}
if (N("ellipted-that"))
X("ellipted-that") = 1;
@RULES
_xNIL <-
_xSTART
_xWILD [star match=(_advl _adv)]
_np
_xWILD [star match=(_advl _adv)]
_xEND
@@
@POST
X("last chunk") = "n";
X("last") = N(1);
if (!X("first name"))
X("first name") = "_np";
if (N("ellipted-that"))
X("ellipted-that") = 1;
@RULES
_xNIL <-
_np
@@
@POST
L("x3") = pnparent(X()); # _sent # 07/10/12 AM.
X("last chunk") = "v";
X("start to-vg") = 1;
++X("vg count");
# Count in sentence also. #
L("cc") = num(pnvar(L("x3"),"vg count")); # 07/10/12 AM.
pnreplaceval(L("x3"),"vg count",++L("cc")); # 07/10/12 AM.
if (N("voice"))
X("voice") = N("voice"); # Copy active/passive to clause.
if (N("last vg"))
X("last vg") = N("last vg");
if (N("first vg"))
X("first vg") = N("first vg");
if (!X("vg node"))
X("vg node") = N(5);
X("last") = N(5);
if (!X("first name"))
{
if (N(2))
X("first name") = pnname(N(2));
else
X("first name") = "_prep";
}
# Todo: check for multiple vgs...
@RULES
_xNIL <-
_xSTART
_xWILD [star match=(_advl _adv)]
to [s]
_xWILD [star match=(_advl _adv)]
_vg
@@
@POST
X("last chunk") = "p";
@RULES
_xNIL <-
_prep
@@
@POST
X("last") = N(1);
if (!X("first name"))
X("first name") = "_advl";
if (N("by-actor")) # 12/07/05 AM.
{
if (!X("by-actor"))
X("by-actor") = N("by-actor");
++X("num by-actor");
if (!X("voice"))
X("voice") = "passive"; # Fix voice up...
}
@RULES
_xNIL <-
_advl
@@
@POST
X("last") = N(1);
if (!X("first name"))
X("first name") = "_adjc";
X("last chunk") = "j";
@RULES
_xNIL <-
_adjc
@@
@POST
X("last") = N(1);
if (!X("first name"))
X("first name") = "_adv";
@RULES
_xNIL <-
_adv
@@
|
@PATH _ROOT _headerZone
@POST
if (N("header")) {
L("node name") = "_" + N("header");
pnrename(N(1),L("node name"));
}
@RULES
_xNIL <-
_headerZone ### (1)
@@
@POST
if (N("header")) {
pnrename(N(1),"_Person");
}
@RULES
_xNIL <-
_header ### (1)
@@ |
@NODES _LINE
@POST
S("len") = 2;
single();
@RULES
_saintName [layer=(_Caps)] <-
St [s]
\.
_xWHITE
_saintName [s]
@@
_saintName [layer=(_Caps)] <-
Saint
_xWHITE
_saintName [s]
@@
|
@PATH _ROOT _AttlistDecl
@RULES
_DefaultDecl <-
\# [one] ### (1)
_xWILD [s one matches=("REQUIRED" "IMPLIED")] ### (2)
@@
_DefaultDecl <-
\# [one] ### (1)
_xWILD [s one matches=("FIXED")] ### (2)
_whiteSpace [opt] ### (3)
_PERefence [one] ### (4)
@@
_DefaultDecl <-
\# [one] ### (1)
_xWILD [s one matches=("FIXED")] ### (2)
_whiteSpace [opt] ### (3)
_PubidLiteral [one] ### (4)
@@
_DefaultDecl <-
\# [one] ### (1)
_xWILD [s one matches=("FIXED")] ### (2)
_whiteSpace [opt] ### (3)
_SystemLiteral [one] ### (4)
@@
_DefaultDecl <-
_PubidLiteral [one] ### (1)
@@
_DefaultDecl <-
_SystemLiteral [one] ### (1)
@@
_AttType <-
_xWILD [s one matches=("CDATA" "ID" "IDREF" "IDREFS" "ENTITY" "ENTITIES" "NMTOKEN" "NMTOKENS")] ### (1)
@@
_EnumNameElement <-
_whiteSpace [opt] ### (1)
\| [one] ### (2)
_whiteSpace [opt] ### (3)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (4)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (5)
@@
_EnumElement <-
_whiteSpace [opt] ### (1)
\| [one] ### (2)
_whiteSpace [opt] ### (3)
_xWILD [s plus matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (4)
@@
|
@CODE
DisplayKB(G("terms"), 1);
#DumpKB(G("terms"), "terms");
@@CODE |
@MULTI _ROOT _section _looseText _sentence _subsection _item
@PRE
<1,1> var("keyword");
@POST
L("parent_keys") = pnvar(X(), "key_words");
if (L("parent_keys")) {
pnrpushval(X(), "key_words", N("$text", 1));
}
else {
X("key_words") = N("$text", 1);
}
# pnmakevars()
# # X("keyword") = varinlist("keyword", 2);
# noop();
@RULES
_xNIL <-
# Stop list includes non-leaf nodes and common medical terms
# We exclude _subsection since this may give us relevant title
_xWILD [one fails=(
_xEND
_section
_subsection
_sentence
_item
patient
)]
@@
@PRE
<1,1> var("key_words");
@POST
if (N("section_title", 1)) {
pnrpushval(N(1), "key_words", N("section_title", 1));
}
if (N("subsection", 1)) {
pnrpushval(N(1), "key_words", N("subsection", 1));
}
if (X("section_title")) {
pnrpushval(N(1), "key_words", X("section_title"));
}
if (X("subsection")) {
pnrpushval(N(1), "key_words", X("subsection"));
}
@RULES
_xNIL <-
_xWILD [one matches=(
_section
_sentence
_subsection
_item
_looseText
)]
@@
|
@DECL
###############################################
# General functions
###############################################
AddUniqueCon(L("concept"),L("name")) {
L("con") = findconcept(L("concept"),L("name"));
if (!L("con")) L("con") = makeconcept(L("concept"),L("name"));
return L("con");
}
AddUniqueStr(L("concept"),L("attr"),L("value")) {
if (L("value")) {
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("str") = getstrval(L("val"));
if (L("str") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addstrval(L("concept"),L("attr"),L("value"));
return 1;
}
return 0;
}
AddUniqueNum(L("concept"),L("attr"),L("value")) {
if (L("value")) {
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("num") = getnumval(L("val"));
if (L("num") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addnumval(L("concept"),L("attr"),L("value"));
return 1;
}
return 0;
}
AddUniqueConVal(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("con") = getconval(L("val"));
"unique.txt" << conceptname(L("con")) << "\n";
if (conceptpath(L("con")) == conceptpath(L("value")))
return 0;
L("val") = nextval(L("val"));
}
addconval(L("concept"),L("attr"),L("value"));
return 1;
}
PathToConcept(L("parent"),L("hier")) {
L("cons") = split(L("hier")," ");
L("i") = 0;
L("con") = L("parent");
while (L("cons")[L("i")]) {
L("c") = L("cons")[L("i")];
L("name") = strsubst(L("c"),"\"",0);
if (L("name") != "concept")
L("con") = AddUniqueCon(L("con"),L("name"));
L("i")++;
}
return L("con");
}
CopyAttr(L("from"),L("to"),L("attr")) {
L("from value") = strval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr"),L("from value"));
}
}
CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) {
L("from value") = strval(L("from"),L("attr from"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr to"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr to"),L("from value"));
}
}
CopyConAttr(L("from"),L("to"),L("attr")) {
L("from value") = conval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = conval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addconval(L("to"),L("attr"),L("from value"));
}
}
CopyNodeAttrs(L("node"),L("con")) {
L("vars") = pnvarnames(L("node"));
L("len") = arraylength(L("vars"));
while (L("vars")[L("i")]) {
L("attr") = L("vars")[L("i")];
"copy.txt" << L("attr");
L("val") = pnvar(L("node"),L("attr"));
if (findhierconcept(L("val"),G("caps"))) {
"copy.txt" << " is a concept";
}
"copy.txt" << "\n";
L("i")++;
}
}
AttrValues(L("con"),L("attr")) {
L("at") = findattr(L("con"),L("attr"));
if (L("at"))
return attrvals(L("at"));
return 0;
}
ValCount(L("vals")) {
while (L("vals")) {
L("count")++;
L("vals") = nextval(L("vals"));
}
return L("count");
}
LastChild(L("parent")) {
L("child") = down(L("parent"));
while (L("child")) {
L("last") = L("child");
L("child") = next(L("child"));
}
return L("last");
}
MakeCountCon(L("con"),L("count name")) {
L("count name") = CountName(L("con"),L("count name"));
return makeconcept(L("con"),L("count name"));
}
IncrementCount(L("con"),L("countname")) {
L("count") = numval(L("con"),L("countname"));
if (L("count")) {
L("count") = L("count") + 1;
replaceval(L("con"),L("countname"),L("count"));
} else {
addnumval(L("con"),L("countname"),1);
L("count") = 1;
}
return L("count");
}
CountName(L("con"),L("root")) {
L("count") = IncrementCount(L("con"),L("root"));
return L("root") + str(L("count"));
}
StripEndDigits(L("name")) {
if (strisdigit(L("name"))) return 0;
L("len") = strlength(L("name")) - 1;
L("i") = L("len") - 1;
L("str") = strpiece(L("name"),L("i"),L("len"));
while (strisdigit(L("str")) && L("i")) {
L("i")--;
L("str") = strpiece(L("name"),L("i"),L("len"));
}
return strpiece(L("name"),0,L("i"));
}
###############################################
# KB Dump Functins
###############################################
DumpKB(L("con"),L("file")) {
L("dir") = G("$apppath") + "/kb/";
L("filename") = L("dir") + L("file") + ".kb";
if (!kbdumptree(L("con"),L("filename"))) {
"kb.txt" << "FAILED dump: " << L("filename") << "\n";
} else {
"kb.txt" << "DUMPED: " << L("filename") << "\n";
}
}
TakeKB(L("filename")) {
L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb";
"kb.txt" << "Taking: " << L("path") << "\n";
if (take(L("path"))) {
"kb.txt" << " Taken successfully: " << L("path") << "\n";
} else {
"kb.txt" << " Taken FAILED: " << L("path") << "\n";
}
}
ChildCount(L("con")) {
L("count") = 0;
L("child") = down(L("con"));
while (L("child")) {
L("count")++;
L("child") = next(L("child"));
}
return L("count");
}
###############################################
# KBB DISPLAY FUNCTIONS
###############################################
###############################################
# display type:
# 0 compact with ellipses on long attr values
# 1 full, more spread out
# 2 compact without ellipses on long attr values
###############################################
DisplayKB(L("top con"),L("display type")) {
L("file") = DisplayFileName();
DisplayKBRecurse(L("file"),L("top con"),0,L("display type"));
L("file") << "\n";
return L("top con");
}
KBHeader(L("text")) {
L("file") = DisplayFileName();
L("file") << "#######################\n";
L("file") << "# " << L("text") << "\n";
L("file") << "#######################\n\n";
}
DisplayFileName() {
if (num(G("$passnum")) < 10) {
L("file") = "ana00" + str(G("$passnum"));
}else if (num(G("$passnum")) < 100) {
L("file") = "ana0" + str(G("$passnum"));
} else {
L("file") = "ana" + str(G("$passnum"));
}
L("file") = L("file") + ".kbb";
return L("file");
}
DisplayKBRecurse(L("file"),L("parent"),L("level"),L("display type")) {
if (L("level") == 0) {
L("file") << conceptname(L("parent")) << "\n";
}
L("con") = down(L("parent"));
while (L("con")) {
L("file") << SpacesStr(L("level")+1) << conceptname(L("con"));
DisplayAttributes(L("file"),L("con"),L("display type"),L("level"));
L("file") << "\n";
if (down(L("con"))) {
L("lev") = 1;
DisplayKBRecurse(L("file"),L("con"),L("level")+L("lev"),L("display type"));
}
L("con") = next(L("con"));
}
}
DisplayAttributes(L("file"),L("con"),L("display type"),L("level")) {
L("attrs") = findattrs(L("con"));
if (L("attrs")) L("file") << ": ";
if (L("display type") == 1 && L("attrs")) L("file") << "\n";
L("first attr") = 1;
while (L("attrs")) {
L("vals") = attrvals(L("attrs"));
L("count") = ValCount(L("vals"));
if (L("display type") != 1 && !L("first attr")) {
L("file") << ", ";
}
if (L("display type") == 1) {
if (!L("first attr")) L("file") << "\n";
L("file") << SpacesStr(L("level")+2);
}
L("name") = attrname(L("attrs"));
if (DisplayValNeedsQuote(L("name")))
L("file") << "\"";
L("file") << L("name");
if (DisplayValNeedsQuote(L("name")))
L("file") << "\"";
L("file") << "=";
L("first") = 1;
L("type") = attrtype(L("con"),L("name"));
while (L("vals")) {
if (!L("first"))
L("file") << ",";
else if (L("count") > 1 && !L("con"))
L("file") << "[";
if (L("type") == 1) {
L("num") = getnumval(L("vals"));
L("file") << str(L("num"));
} else if (L("type") == 2) {
if (L("first"))
L("file") << "[";
L("con") = getconval(L("vals"));
L("file") << conceptpath(L("con"));
} else if (L("type") == 3) {
L("flt") = getfltval(L("vals"));
L("file") << str(L("flt"));
} else {
L("val") = getstrval(L("vals"));
if (DisplayValNeedsQuote(L("val")))
L("file") << "\"";
if (L("display type") == 0 && strlength(L("val")) > 20) {
L("shorty") = strpiece(L("val"),0,20);
L("file") << L("shorty");
L("file") << "...";
if (strendswith(L("val"),"\""))
L("file") << "\"";
}
L("file") << str(L("val"));
if (DisplayValNeedsQuote(L("val")))
L("file") << "\"";
}
L("first") = 0;
L("vals") = nextval(L("vals"));
}
if (L("type") == 2 || L("count") > 1)
L("file") << "]";
L("first attr") = 0;
L("attrs") = nextattr(L("attrs"));
}
}
QuoteIt(L("str")) {
L("new") = 0;
if (!L("str"))
return 0;
if (strcontains(" ",L("str")) || strcontains("[",L("str")) || strcontains("]",L("str"))) {
L("new") = "\"" + L("str") + "\"";
}
return L("new");
}
DisplayValNeedsQuote(L("str")) {
if (!L("str"))
return 0;
if (strcontains(" ",L("str")) || strcontains("[",L("str")) || strcontains("]",L("str"))) {
return 1;
}
return 0;
}
# Because NLP++ doesn't allow for empty strings,
# this function can only be called with "num" >= 1
SpacesStr(L("num")) {
L("n") = 1;
L("spaces") = " ";
while (L("n") < L("num")) {
L("spaces") = L("spaces") + " ";
L("n")++;
}
return L("spaces");
}
PadStr(L("num str"),L("pad str"),L("pad len")) {
L("len") = strlength(L("num str"));
L("pad") = 0;
L("to pad") = L("pad len") - L("len");
while (L("i")++ < L("to pad")) {
L("pad") = L("pad") + L("pad str");
}
L("padded") = L("pad") + L("num str");
return L("padded");
}
###############################################
# DICTIONARY FUNCTIONS
###############################################
DictionaryStart() {
G("attrs path") = G("$apppath") + "\\kb\\user\\attrs.kb";
G("attrs") = openfile(G("attrs path"));
}
DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) {
addword(L("word"));
addword(L("attrName"));
G("attrs") << "ind attr\n" << findwordpath(L("word")) << "\n0\n";
G("attrs") << findwordpath(L("attrName")) << "\n";
if (L("attrType") == "str")
G("attrs") << "pst\n" << "\"" << L("value") << "\"";
else if (L("attrType") == "num")
G("attrs") << "pnum\n" << str(L("value"));
else if (L("attrType") == "con")
G("attrs") << "pcon\n" << conceptpath(L("value"));
G("attrs") << "\nend ind\n\n";
}
DictionaryEnd() {
G("attrs") << "\nquit\n\n";
closefile(G("attrs"));
}
@@DECL
|
###############################################
# FILE: XML baseElements.pat #
# SUBJ: Collect the smallest syntactic pieces #
# of an XML file ... starts and ends of #
# tags, entity references, and the like #
# AUTH: Paul Deane #
# CREATED: 11/Jan/01
# DATE OF CURRENT VERSION: 31/Aug/01 #
###############################################
###############################################
# CONTENT INDEX #
# 1. Rules for special items like ampersands #
# greater than etc. plus tag elements #
# 2. Doctype declaration #
# 3. Signals for special tag types including #
# comments and entity references #
###############################################
@CODE
G("root") = findroot() ;
G("tmp") = getconcept(G("root"),"tmp");
G("gramtab") = getconcept(G("tmp"),"gram") ;
#in case someone is so thoughtless as not to specify a doc type
G("DocTypeName") = "XML";
G("EntityName") = "Entities";
G("ElementName") = "Elements";
G("CurrentDocType") = findconcept(G("gramtab"),G("DocTypeName")) ;
if (G("CurrentDocType") == 0 ) {
makeconcept(G("gramtab"),G("DocTypeName")) ;
G("CurrentDocType") = findconcept(G("gramtab"),G("DocTypeName")) ;
}
G("Entities") = findconcept(G("CurrentDocType"),G("EntityName")) ;
if (G("Entities") == 0 ) {
makeconcept(G("CurrentDocType"),G("EntityName")) ;
G("Entities") = findconcept(G("CurrentDocType"),G("EntityName")) ;
}
G("Elements") = findconcept(G("CurrentDocType"),G("ElementName")) ;
if (G("Elements") == 0 ) {
makeconcept(G("CurrentDocType"),G("ElementName")) ;
G("Elements") = findconcept(G("CurrentDocType"),G("ElementName")) ;
}
@@CODE
@PATH _ROOT
###################################
# Rule set 1 #
# Special syntactic elements #
###################################
@RULES
_Ampersand <-
\& [one] ###(1)
_xWILD [one matches=("amp")] ###(2)
\; [one] ###(3)
@@
_LessThan <-
\& [one] ###(1)
_xWILD [one matches=("lt")] ###(1)
\; [one] ###(3)
@@
_GreaterThan <-
\& [one] ###(1)
_xWILD [one matches=("gt")] ###(2)
\; [one] ###(3)
@@
_APos <-
\& [one] ###(1)
_xWILD [one matches=("apos")] ###(2)
\; [one] ###(3)
@@
_Quote <-
\& [one] ###(1)
_xWILD [one matches=("quot")] ###(2)
\; [one] ###(3)
@@
_CommentStart <-
\< [one] ### (1)
\! [one] ### (2)
\- [one] ### (3)
\- [one] ### (4)
@@
_CommentEnd <-
\- [one] ### (1)
\- [one] ### (2)
\> [one] ### (3)
@@
_DoubleHyphen <-
\- [one] ### (1)
\- [one] ### (2)
@@
_StartXML <-
\< [one] ### (1)
\? [one] ### (2)
_xALPHA [s one matches=("xml")] ### (3)
@@
@@RULES
##############################################
# Rule set 2 -- Doc Type Declaration #
##############################################
@POST
#get the name of the document type we're working on here
#and attach that to the tag we're bulding for the doctype
#statement
G("buffer1") = str(0) ;
G("buffer2") = str(0) ;
G("ElementName") = "Elements" ;
G("EntityName") = "Entities" ;
if (N("$text",5))
G("buffer1") = str(N("$text",5)) ;
if (N("$text",6))
G("buffer2") = str(N("$text",6)) ;
if (N("$text",5) && N("$text",6)) {
G("DocTypeName") = G("buffer1") + G("buffer2") ;
}
else if (N("$text",5))
G("DocTypeName") = G("buffer1") ;
else if (N("$text",6))
G("DocTypeName") = G("buffer2") ;
S("DocTypeName") = G("DocTypeName");
G("CurrentDocType") = findconcept(G("gramtab"),G("DocTypeName")) ;
if (G("CurrentDocType") == 0 ) {
makeconcept(G("gramtab"),G("DocTypeName")) ;
G("CurrentDocType") = findconcept(G("gramtab"),G("DocTypeName")) ;
}
G("Entities") = findconcept(G("CurrentDocType"),G("EntityName")) ;
if (G("Entities") == 0 ) {
makeconcept(G("CurrentDocType"),G("EntityName")) ;
G("Entities") = findconcept(G("CurrentDocType"),G("EntityName")) ;
}
G("Elements") = findconcept(G("CurrentDocType"),G("ElementName")) ;
if (G("Elements") == 0 ) {
makeconcept(G("CurrentDocType"),G("ElementName")) ;
G("Elements") = findconcept(G("CurrentDocType"),G("ElementName")) ;
}
single() ;
@@POST
@RULES
_StartDocType <-
\< [one] ### (1)
\! [one trig] ### (2)
_xWILD [s one match=("DOCTYPE")] ### (3)
_xWHITE [plus] ### (4)
_xWILD [one matches=("_xALPHA" "_" ":")] ### (5)
_xWILD [star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (6)
@@
@@RULES
##############################################
# Rule set 3 -- Signals for specially tagged #
# items like processing instructions and #
# comments #
##############################################
@RULES
_StartProcessingInstruction <- ### (5)
\< [one] ### (1)
\? [one trig] ### (2)
@@
_EndProcessingInstruction <- ### (10)
\? [one] ### (1)
\> [one] ### (2)
@@
_CDStart <-
\< [one] ### (1)
\! [one] ### (2)
\[ [one] ### (3)
_xALPHA [s one matches=("CDATA")] ### (4)
\[ ### (5)
@@
_CDEnd <-
\] [one] ### (1)
\] [one] ### (2)
\> [one] ### (3)
@@
_EndDocType <-
\] [one] ### (1)
_xWHITE [star] ### (2)
\> [one] ### (3)
@@
_EndEmptyTag <-
\/ [one] ### (1)
\> [one] ### (2)
@@
_EndTag <-
\> [one] ### (1)
@@
_CharRef <-
\& [one] ### (1)
\# [one] ### (2)
_xNUM [one] ### (3)
\; [one] ### (4)
@@
_CharRef <-
\& [one] ### (1)
\# [one] ### (2)
x [one] ### (3)
_xWILD [one matches=("xNUM" "A" "a" "B" "b" "C" "c" "D" "d" "E" "e" "F" "f")] ### (4)
\; [one] ### (5)
@@
_EntityRef <-
\& [one] ### (1)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (2)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (3)
\; [one] ### (4)
@@
_PEReference <-
\% [one] ### (1)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (2)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (3)
\; [one] ### (4)
@@
@@RULES
@POST
#Get the name of the element we are declaring here
S("buffer1") = N("$text",5) ;
S("buffer2") = N("$text",6) ;
if (S("buffer1") != 0 && S("buffer2") != 0 ) {
S("ElementName") = S("buffer1") + S("buffer2") ;
}
else if (S("buffer1") !=0)
S("ElementName") = S("buffer1") ;
else
S("ElementName") = S("buffer2") ;
#record the elements we've identified as part of the DTD
S("CurrentElement") = findconcept(G("Elements"),S("ElementName")) ;
if (S("CurrentElement") == 0) {
makeconcept(G("Elements"),S("ElementName")) ;
S("CurrentElement") = findconcept(G("Elements"),S("ElementName")) ;
}
single() ;
@@POST
@RULES
_ElementDeclStart <-
\< [one] ### (1)
\! [one] ### (2)
_xWILD [s one matches=("ELEMENT")] ### (3)
_xWHITE [plus] ### (4)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (5)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (6)
@@
@@RULES
@RULES
_NotationDeclStart <-
\< [one] ### (1)
\! [one] ### (2)
_xWILD [s one matches=("NOTATION")] ### (3)
_xWHITE [plus] ### (4)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (5)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (6)
@@
@@RULES
@POST
S("buffer1") = str(N("$text",5)) ;
S("buffer2") = str(N("$text",6)) ;
if (N("$text",5) && N("$text",6)) {
S("ElementName") = S("buffer1") + S("buffer2") ;
}
else if (N("$text",5)) {
S("ElementName") = N("$text",5) ;
}
else if (N("$text",6)) {
S("ElementName") = N("$text",6) ;
}
S("CurrentElement") = findconcept(G("Elements"),S("ElementName")) ;
if (S("CurrentElement") == 0) {
makeconcept(G("Elements"),S("ElementName")) ;
S("CurrentElement") = findconcept(G("Elements"),S("ElementName")) ;
}
single() ;
@@POST
@RULES
_AttlistDeclStart <-
\< [one] ### (1)
\! [one] ### (2)
_xWILD [s one matches=("ATTLIST")] ### (3)
_xWHITE [plus] ### (4)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (5)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (6)
@@
@@RULES
@RULES
_EntityDeclStart <-
\< [one] ### (1)
\! [one] ### (2)
_xWILD [s one matches=("ENTITY")] ### (3)
_xWHITE [plus] ### (4)
@@
@@RULES
|
@NODES _LINE
@PRE
<1,1> cap();
<1,1> length(5);
<3,3> cap();
<3,3> length(5);
@RULES
# Ex: Brent\_Biggs
_humanname <- Brent [s layer=("_firstname")] _xWHITE [star s] Biggs [s layer=("_lastname")] @@
|
@CODE
L("hello") = 0;
@@CODE
@NODES _LINE
# Looking for numbered lines.
@POST
N("nopos",3) = 1;
L("num") = num(N("$text",2));
if (!G("num lines"))
{
if (L("num") == 1)
G("num lines") = 1;
xrename("_TEXTZONE");
}
else if (
(L("num") == G("num lines"))
|| (L("num") == G("num lines") + 1) )
{
G("num lines") = L("num");
xrename("_TEXTZONE");
}
@RULES
_xNIL <-
_xSTART
_xNUM
\. [gp=_qEOS]
@@
@POST
xrename("_HORIZLINE");
@RULES
_xNIL <-
_xSTART
_xWHITE [star]
\- [plus]
_xWHITE [star]
_xEND
@@
|
@CODE
if (!G("pretagged"))
exitpass();
@@CODE
@PATH _ROOT _LINE
@POST
excise(1,1);
@RULES
_xNIL <-
_xWHITE [plus]
@@
@RULES
_pos <-
_xWILD [fail=(_slash _xWHITE) gp=_text]
_slash
@@
@POST
if (G("error"))
"err.txt" << "[Bad tagging: " << phrasetext() << "\n";
@RULES
_xNIL <-
_xANY [plus]
@@
|
@NODES _LINE
@PRE
<1,1> length(5);
@RULES
# Ex: pager:\_(
_phonePagerPhrase <- _xWILD [min=1 max=1 s layer=("_Pager") match=("pager" "Pager")] \: [s] _xWHITE [star s] _phoneNumber [s] @@
@PRE
<2,2> length(5);
@RULES
# Ex: (pager)\_(
_phonePagerPhrase <- _xWILD [min=1 max=1 s match=("_openPunct" "\(")] _xWILD [min=1 max=1 trig s layer=("_Pager") match=("pager" "Pager")] _xWILD [min=1 max=1 s match=("_closePunct" "\)")] _xWHITE [star s] _phoneNumber [s] @@
@PRE
<1,1> length(5);
@RULES
# Ex: pager\_(
_phonePagerPhrase <- _xWILD [min=1 max=1 s layer=("_Pager") match=("pager" "Pager")] _xWHITE [star s] _phoneNumber [s] @@
@PRE
<4,4> length(5);
@RULES
_phonePagerPhrase <- _phoneNumber [s] _xWHITE [star s] _xWILD [min=1 max=1 s match=("_openPunct" "\(")] _xWILD [min=1 max=1 trig s layer=("_Pager") match=("pager" "Pager")] _xWILD [min=1 max=1 s match=("_closePunct" "\)")] @@
@PRE
<3,3> length(5);
@RULES
_phonePagerPhrase <- _phoneNumber [s] _xWHITE [star s] _xWILD [min=1 max=1 trig s layer=("_Pager") match=("pager" "Pager")] @@
|
@CODE
if (G("pretagged"))
exitpass();
if (!G("hilite")) # 10/25/10 AM.
exitpass(); # 10/25/10 AM.
G("hello") = 0;
@@CODE
# Traverse the whole tree.
@MULTI _ROOT
@POST
# Active/passive output with clause. #
if (G("verbose"))
{
if (N("vg")) # If there are verbs in clause.
{
if (N("voice") == "active")
"clause.txt" << "active: ";
else if (N("voice") == "passive")
"clause.txt" << "passive: ";
else
"clause.txt" << "novoice: ";
}
else
"clause.txt" << "noverb: ";
"clause.txt" << N("$text") << "\n";
}
noop(); # Merely matching the rule will set text to green.
@RULES
_xNIL <- _xWILD [one match=(
_clause
)] @@
|
@CODE
prlit("zdump.txt", "Unreduced unknown words in Caps\n");
prlit("zdump.txt", "-------------------------------\n");
@@CODE
@PATH _ROOT _LINE _Caps
# Preceding passes should look for some capitalized phrase types:
# In general, look for head word at the end, as main thing.
# People names.
# Areas of study, endeavor.
# School designator.
# Company.
# Job title.
# Geo location.
# (May need a pass for each of these!)
# This pass does some general characterizations.
# Single Letters. An important category. #
@PRE
<1,1> length(1)
@POST
++X("caplen");
++X("letters");
++X("unreduced"); # Not really, but keeps subsequent accounting ok.
@RULES
_xNIL <- _xCAP @@
# UNREDUCED UNKNOWN WORDS. #
#@PRE
#<1,1> unknown()
@CHECK # 09/02/01 AM.
if (spellword(N("$text",1)))
fail();
@POST
++X("caplen");
++X("unknowns");
if (N("$allcaps"))
++X("allcaps"); # 01/10/00 AM.
N("txt",1) = N("$text", 1); # Workaround to print text!
#fprintnvar("zdump.txt", "txt", 1)
"zdump.txt" << N("txt",1);
prlit("zdump.txt", "\n");
@RULES
_xNIL <- _xCAP @@
# UNREDUCED KNOWN WORDS. #
@POST
++X("caplen");
++X("unreduced");
if (N("$allcaps"))
++X("allcaps"); # 01/10/00 AM.
@RULES
_xNIL <- _xCAP @@
# REDUCED WORDS. ("Known" either from dict lookup or from
# being reduced in the resume analyzer grammar.)
@POST
++X("caplen");
if (N("$allcaps"))
++X("allcaps"); # 01/10/00 AM.
# noop() # Implicit.
@RULES
_xNIL <- _xCAP [s] @@
|
@DECL
###############################################
# General functions
###############################################
AddUniqueCon(L("concept"),L("name")) {
L("con") = findconcept(L("concept"),L("name"));
if (!L("con")) L("con") = makeconcept(L("concept"),L("name"));
return L("con");
}
AddUniqueStr(L("concept"),L("attr"),L("value")) {
if (L("value") && strval(L("concept"),L("attr")) != L("value"))
addstrval(L("concept"),L("attr"),L("value"));
}
AddUniqueNum(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << str(L("value")) << " " << conceptpath(L("concept")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("num") = getnumval(L("val"));
"unique.txt" << " value: " << str(L("num")) << "\n";
if (L("num") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addnumval(L("concept"),L("attr"),L("value"));
return 1;
}
AddUniqueConVal(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("con") = getconval(L("val"));
"unique.txt" << conceptname(L("con")) << "\n";
if (conceptpath(L("con")) == conceptpath(L("value")))
return 0;
L("val") = nextval(L("val"));
}
addconval(L("concept"),L("attr"),L("value"));
return 1;
}
CopyAttr(L("from"),L("to"),L("attr")) {
L("from value") = strval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr"),L("from value"));
}
}
CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) {
L("from value") = strval(L("from"),L("attr from"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr to"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr to"),L("from value"));
}
}
CopyConAttr(L("from"),L("to"),L("attr")) {
L("from value") = conval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = conval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addconval(L("to"),L("attr"),L("from value"));
}
}
AttrValues(L("con"),L("attr")) {
L("at") = findattr(L("con"),L("attr"));
if (L("at"))
return attrvals(L("at"));
return 0;
}
LastChild(L("parent")) {
L("child") = down(L("parent"));
while (L("child")) {
L("last") = L("child");
L("child") = next(L("child"));
}
return L("last");
}
MakeCountCon(L("con"),L("count name")) {
L("count name") = CountName(L("con"),L("count name"));
return makeconcept(L("con"),L("count name"));
}
IncrementCount(L("con"),L("countname")) {
L("count") = numval(L("con"),L("countname"));
if (L("count")) {
L("count") = L("count") + 1;
replaceval(L("con"),L("countname"),L("count"));
} else {
addnumval(L("con"),L("countname"),1);
L("count") = 1;
}
return L("count");
}
CountName(L("con"),L("root")) {
L("count") = IncrementCount(L("con"),L("root"));
return L("root") + str(L("count"));
}
StripEndDigits(L("name")) {
if (strisdigit(L("name"))) return 0;
L("len") = strlength(L("name")) - 1;
L("i") = L("len") - 1;
L("str") = strpiece(L("name"),L("i"),L("len"));
while (strisdigit(L("str")) && L("i")) {
L("i")--;
L("str") = strpiece(L("name"),L("i"),L("len"));
}
return strpiece(L("name"),0,L("i"));
}
###############################################
# KB Dump Functins
###############################################
DumpKB(L("con"),L("file")) {
L("dir") = G("$apppath") + "/kb/";
L("filename") = L("dir") + L("file") + ".kb";
if (!kbdumptree(L("con"),L("filename"))) {
"kb.txt" << "FAILED dump: " << L("filename") << "\n";
} else {
"kb.txt" << "DUMPED: " << L("filename") << "\n";
}
}
TakeKB(L("filename")) {
L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb";
"kb.txt" << "Taking: " << L("path") << "\n";
if (take(L("path"))) {
"kb.txt" << " Taken successfully: " << L("path") << "\n";
} else {
"kb.txt" << " Taken FAILED: " << L("path") << "\n";
}
}
ChildCount(L("con")) {
L("count") = 0;
L("child") = down(L("con"));
while (L("child")) {
L("count")++;
L("child") = next(L("child"));
}
return L("count");
}
###############################################
# KBB DISPLAY FUNCTIONS
###############################################
DisplayKB(L("top con"),L("full")) {
L("file") = DisplayFileName();
DisplayKBRecurse(L("file"),L("top con"),0,L("full"));
L("file") << "\n";
return L("top con");
}
KBHeader(L("text")) {
L("file") = DisplayFileName();
L("file") << "#######################\n";
L("file") << "# " << L("text") << "\n";
L("file") << "#######################\n\n";
}
DisplayFileName() {
if (num(G("$passnum")) < 10) {
L("file") = "ana00" + str(G("$passnum"));
}else if (num(G("$passnum")) < 100) {
L("file") = "ana0" + str(G("$passnum"));
} else {
L("file") = "ana" + str(G("$passnum"));
}
L("file") = L("file") + ".kbb";
return L("file");
}
DisplayKBRecurse(L("file"),L("con"),L("level"),L("full")) {
while (L("con")) {
L("file") << SpacesStr(L("level")+1) << conceptname(L("con"));
DisplayAttributes(L("file"),L("con"),L("full"),L("level"));
L("file") << "\n";
if (down(L("con"))) {
L("lev") = 1;
DisplayKBRecurse(L("file"),down(L("con")),L("level")+L("lev"),L("full"));
}
if (L("level") == 0)
return 0;
L("con") = next(L("con"));
}
}
DisplayAttributes(L("file"),L("con"),L("full"),L("level")) {
L("attrs") = findattrs(L("con"));
if (L("attrs")) L("file") << ": ";
if (L("full") && L("attrs")) L("file") << "\n";
L("first attr") = 1;
while (L("attrs")) {
L("vals") = attrvals(L("attrs"));
if (!L("full") && !L("first attr")) {
L("file") << ", ";
}
if (L("full")) {
if (!L("first attr")) L("file") << "\n";
L("file") << SpacesStr(L("level")+2);
}
L("file") << attrname(L("attrs")) << "=[";
L("first") = 1;
while (L("vals")) {
if (!L("first"))
L("file") << ",";
L("val") = getstrval(L("vals"));
L("num") = getnumval(L("vals"));
L("con") = getconval(L("vals"));
if (L("con")) {
L("file") << conceptpath(L("con"));
} else if (!L("full") && strlength(L("val")) > 20) {
L("shorty") = strpiece(L("val"),0,20);
L("file") << L("shorty");
L("file") << "...";
if (strendswith(L("val"),"\""))
L("file") << "\"";
} else if (L("num") > -1) {
L("file") << str(L("num"));
} else {
L("file") << L("val");
}
L("first") = 0;
L("vals") = nextval(L("vals"));
}
L("file") << "]";
L("first attr") = 0;
L("attrs") = nextattr(L("attrs"));
}
}
# Because NLP++ doesn't allow for empty strings,
# this function can only be called with "num" >= 1
SpacesStr(L("num")) {
L("n") = 1;
L("spaces") = " ";
while (L("n") < L("num")) {
L("spaces") = L("spaces") + " ";
L("n")++;
}
return L("spaces");
}
###############################################
# DICTIONARY FUNCTIONS
###############################################
DictionaryClear() {
G("dictionary path") = G("$apppath") + "\\kb\\user\\dictionary.kb";
G("dictionary") = openfile(G("dictionary path"));
}
DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) {
L("file") = G("dictionary");
if (!dictfindword(L("word")))
L("file") << "add word \"" + L("word") + "\"\n";
L("file") << "ind attr\n" << findwordpath(L("word")) << "\n0\n";
L("file") << findwordpath(L("attrName")) << "\n";
if (L("attrType") == "str")
L("file") << "pst\n" << L("value");
else if (L("attrType") == "num")
L("file") << "pnum\n" << str(L("value"));
else if (L("attrType") == "con")
L("file") << "pcon\n" << conceptpath(L("value"));
L("file") << "\nend ind\n\n";
}
DictionaryEnd() {
G("dictionary") << "\nquit\n\n";
closefile(G("dictionary"));
}
@@DECL
|
@CODE
G("icd_terms") = getconcept(findroot(),"icd_terms");
if (! G("icd_terms")) {
G("icd_terms") = makeconcept(findroot(),"icd_terms");
}
rmchildren(G("icd_terms"));
@@CODE |
@CODE
L("hello") = 0;
@@CODE
@NODES _sent
# np that clause clause
# that
# Need a verb in last clause.
# "The snail that ate Manhattan, our favorite town, is green."
# that
@CHECK
if (N("vg count",7))
fail();
@POST
N("need verb",7) = 1;
N("need voice",7) = "active"; # If ambiguous.
N("need inf",7) = "VBP";
N("need edn",7) = "VBD";
@RULES
_xNIL <-
_clause
that [s]
_clause
_xWILD [one match=( \, _dbldash)]
_clause # Embedded clause.
_xWILD [one match=( \, _dbldash)]
_clause # Needs verb.
@@
# OBSOLETE. #
# clause clause
#@PRE
#<2,2> varz("ellipted-that");
#@CHECK
# if (N("voice",2))
# fail();
# if (N("voice",1) != "passive")
# fail();
# S("vg") = N("vg node",2);
# if (!S("vg"))
# {
# "err.txt" << "Clause has no vg=" << N("$text",2) << "\n";
# fail();
# }
# S("v") = pnvar(S("vg"),"verb node");
# if (!S("v"))
# {
# if (G("error"))
# "err.txt" << "Vg has no verb=" << N("$text",2) << "\n";
# fail();
# }
#@POST
# N("qsent75 c-c",1) = 1;
# if (!pnvar(S("v"),"mypos"))
# {
# L("vc") = vconj(S("v"));
# "vc.txt" << pnvar(S("v"),"$text") << ","<< L("vc") << "\n";
# if (L("vc") == "-edn")
# {
# fixvg(S("vg"),"passive","VBN");
# N("voice",2) = "passive";
# }
# }
#@RULES
#_xNIL <-
# _clause
# _clause [lookahead]
# @@
# This rule is too loose. Need constraints before and after.
@CHECK
S("vg1") = N("vg node",1);
S("vg3") = N("vg node",3);
if (!S("vg1") || !S("vg3"))
{
# "err.txt" << "no vg: " << phrasetext() << "\n";
fail();
}
S("v1") = pnvar(S("vg1"),"verb node");
S("v3") = pnvar(S("vg3"),"verb node");
if (!S("v1") || !S("v3"))
{
if (G("error"))
"err.txt" << "no v: " << phrasetext() << "\n";
fail();
}
if (pnvar(S("v3"),"mypos"))
fail();
if (!pnvar(S("v1"),"mypos"))
fail();
@POST
L("pos") = pnvar(S("v1"),"mypos");
L("voice") = pnvar(S("vg1"),"voice");
if (L("pos") == "VB" && N("first name",3) == "_np")
L("pos") = "VBP";
if (samevconj(S("v1"),S("v3")))
{
fixvg(S("vg3"),L("voice"),L("pos"));
N("voice",3) = pnvar(S("vg3"),"voice");
}
N("qsent75 c-and-c",1) = N("qsent75 c-and-c",3) = 1;
@RULES
_xNIL <-
_clause
_conj
_clause
_xWILD [one lookahead fail=(_dbldash)]
@@
# clause , clause , clause
@CHECK
if (N("vg count",6))
fail();
if (N("vg count",2))
fail();
if (N("pattern",2) != "n")
fail();
if (!elliptedpassive(N(4))
&& !elliptedactive(N(4)) )
fail();
@POST
N("need verb",6) = 1;
N("need voice",6) = "active"; # If ambiguous.
N("need inf",6) = "VBP";
N("need edn",6) = "VBD";
@RULES
_xNIL <-
_xSTART
_clause
\,
_clause
\,
_clause
@@
# Try simple stupid.
@CHECK
if (N("vg count"))
fail();
if (N("pattern") != "ving")
fail();
@POST
N("need verb") = 1;
@RULES
_xNIL <-
_clause
@@
|
@PATH _ROOT _EntityDecl
@POST
S("public")=1 ;
S("URI") = N("textValue",3) ;
single() ;
@@POST
@RULES
_ExternalID <-
_xWILD [one matches=("PUBLIC")] ### (1)
_whiteSpace [one] ### (2)
_PubidLiteral [one] ### (3)
_whiteSpace [one] ### (4)
_PubidLiteral [one] ### (5)
@@
_ExternalID <-
_xWILD [one matches=("PUBLIC")] ### (1)
_whiteSpace [one] ### (2)
_PubidLiteral [one] ### (3)
_whiteSpace [one] ### (4)
_SystemLiteral [one] ### (5)
@@
@@RULES
@POST
S("public")=0 ;
S("URI") = N("textValue",3) ;
single() ;
@@POST
@RULES
_ExternalID <-
_xWILD [min=1 max=1 matches=("SYSTEM")] ### (1)
_whiteSpace [opt] ### (2)
_PubidLiteral [one] ### (3)
@@
_ExternalID <-
_xWILD [min=1 max=1 matches=("SYSTEM")] ### (1)
_whiteSpace [opt] ### (2)
_SystemLiteral [one] ### (3)
@@
@@RULES
|
@NODES _ROOT
@RULES
_xNIL <-
Hello ### (1)
@@
|
@NODES _ROOT
@POST
N("stopword", 1) = 1;
@RULES
_xNIL <-
_xWILD [one matches=(
i
me
my
myself
we
our
ours
ourselves
you
your
yours
yourself
yourselves
he
him
his
himself
she
her
hers
herself
it
its
itself
they
them
their
theirs
themselves
what
which
who
whom
this
that
these
those
am
is
are
was
were
be
been
being
have
has
had
having
do
does
did
doing
a
an
the
and
but
if
or
because
as
until
while
of
at
by
for
with
about
against
between
into
through
during
before
after
above
below
to
from
up
down
in
out
on
off
over
under
again
further
then
once
here
there
when
where
why
how
all
any
both
each
few
more
most
other
some
such
no
nor
not
only
own
same
so
than
too
very
s
t
can
will
just
don
should
now
)]
@@
|
@PATH _ROOT _bodyZone _trZone
@POST
X("value1") = N("value",1);
X("value2") = N("value",2);
@RULES
_xNIL <-
_tdZone
_tdZone
@@
|
@PATH _ROOT _experienceZone _experienceInstance _LINE _experiencePart _Caps
@RULES
@POST
X("job title") = N("$text");
@RULES
_xNIL <- _jobTitle [s] @@
|
@NODES _ROOT
@POST
S("section_title") = N("section_title", 1);
merge();
@RULES
_section <-
_section [one]
_looseText [plus]
@@
|
Subsets and Splits