text
stringlengths 22
301k
|
---|
@PATH _ROOT _LINE
@POST
# Want to make elt 3 optional and say:
# if (ispresent(elt 3))
N("school type") = "undergraduate";
listadd(5,1,"true");
@RULES
_xNIL <-
_xWILD [s one match=(overall cumulative)]
_xWHITE [s star]
undergraduate [s]
_xWHITE [s star]
_GPA [s]
@@
@POST
listadd(3,1,"true");
@RULES
_xNIL <-
_xWILD [s one match=(overall cumulative)]
_xWHITE [s star]
_GPA [s]
@@
@POST
N("major gpa",3) = 1; # GPA for academic major.
listadd(3,1,"true");
@RULES
_xNIL <-
_xWILD [s one match=(major departmental)]
_xWHITE [s star]
_GPA [s]
@@
# GPA in major
@POST
N("major gpa",1) = 1; # GPA for academic major.
listadd(1,6,"true");
@RULES
_xNIL <-
_GPA [s]
\. [s opt]
_xWHITE [s star]
in [s]
_xWHITE [s star]
major [s]
@@
@POST
N("minor gpa",3) = 1; # GPA for academic minor.
listadd(3,1,"true");
@RULES
_xNIL <-
minor [s]
_xWHITE [s star]
_GPA [s]
@@
|
@NODES _ROOT
# LINK TO PRE ACTIONS:
# https://visualtext.org/help/NLP_PP_Stuff/AT-PRE_Actions.htm
@PRE
<4,4> length(4);
<4,4> numrange(1700,2100);
@RULES
_date <-
_month ### (1)
_xNUM ### (2)
\, [opt] ### (3)
_xNUM ### (4)
@@
|
@PATH _ROOT _areaCodes
@POST
L("type") = pnvar(pnup(N(1)),"type");
if (L("type") == "areaCodes") {
"codes.txt" << N("$text",1) << "\n";
X("codes")[X("c")++] = N("$text",1);
}
@RULES
_xNIL <-
_xNUM ### (1)
@@
|
@CODE
if (G("pretagged"))
exitpass();
if (!G("hilite")) # 10/25/10 AM.
exitpass(); # 10/25/10 AM.
G("hello") = 0;
@@CODE
@PATH _ROOT _TEXTZONE _sent _clause
@CHECK
if (N("sem") != "location"
&& !N("sem location"))
fail();
@POST
if (G("verbose"))
"location.txt" << N("$text") << "\n";
# Registering in clause and sent.
registerx(X(4),"location",N("$text"));
registerx(X(3),"location",N("$text"));
noop(); # Merely matching the rule will set text to green.
@RULES
_xNIL <- _xWILD [one match=(
_advl
_np
)] @@
|
@PATH _ROOT _LINE _language
@RULES
_lang <-
_xALPHA ### (1)
\- ### (2)
_xALPHA ### (3)
@@
@POST
noop();
@RULES
_xNIL <-
_xALPHA [gp=(_lang)] ### (1)
\, ### (2)
_xALPHA [gp=(_lang)] ### (3)
@@
_xNIL <-
_xSTART ### (1)
_xALPHA [gp=(_lang)] ### (2)
@@
|
@NODES _ROOT
@RULES
_documentation <-
_xSTART ### (1)
_xWILD [fail=(_group)] ### (2)
@@
|
@DECL
SplitWord(L("word")) {
L("len") = strlength(L("word"));
L("i") = 0;
L("split") = 0;
while (L("i") < L("len")) {
L("c") = strpiece(L("word"),L("i"),L("i"));
if (strisupper(L("c")))
L("split") = L("split") + " ";
if (!L("i"))
L("c") = strtoupper(L("c"));
L("split") = L("split") + L("c");
strisupper(L("c"));
L("i")++;
}
return L("split");
}
@@DECL |
@NODES _termEntry
@POST
S("base") = N("$text", 3);
X("base") = N("$text", 3);
excise(4,4);
excise(1,2);
single();
@RULES
_base <-
base ### (1)
\= ### (2)
_xWILD [fails=(\n \r)] ### (3)
_xWILD [one matches=(\n \r)] ### (4)
@@ |
@NODES _LINE
@RULES
_degree <- _xWILD [s one match=( _degreePhrase _degreeWord )] @@
_city <- _xWILD [s one match=( _cityPhrase _cityWord )] @@
|
@NODES _ROOT
@RULES
_header <-
_xWILD [plus match=(\# \-)] ### (1)
_xALPHA ### (2)
@@
@RULES
_np <-
_det ### (1)
_xWILD [match=(_adj)] ### (2)
_xWILD [trig plus match=(_noun)] ### (3)
@@
@POST
singler(2,2);
@RULES
_endSent <-
_xWILD [s one match=(_xALPHA _xNUM)] ### (1)
_xWILD [one trig match=(\. \? \!)] ### (2)
_xWILD [s one match=(_xWHITE _xALPHA _xNUM _xEND _xCTRL)] ### (3)
@@
|
@NODES _LINE
#@CHECK
# Look in merged lines only. (Would like a more efficient
# way, rather than traversing the whole unmerged line.)
# X("merged");
@POST
excise(2,2); # Zap the indent.
@RULES
_xNIL <- \n [s] _whtINDENT [s] @@
_xNIL <- \n [s] _xWHITE [s star] @@ # 12/03/99 AM.
@RULES
_whtSEP [base] <- \t [s plus] @@
|
# Find a string attribute attrNameStr in the hierarchy starting at the concept childConcept, searching upwards. Search for attribute continues until attribute is found or the concept named topConcept is reached
@CODE
if (findconcept(findroot(),"ontology"))
rmconcept(findconcept(findroot(),"ontology"));
G("ontology") = makeconcept(findroot(), "ontology");
G("animal") = makeconcept(G("ontology"),"animal");
G("human") = makeconcept(G("animal"),"human");
G("man") = makeconcept(G("human"),"man");
addstrval(G("man"),"gender", "male");
G("bachelor") = makeconcept(G("man"),"bachelor");
G("result") = inheritval(G("bachelor"), "gender", G("man"));
"output.txt" << "bachelor's gender: " << G("result") << "\n"; |
@PATH _ROOT _LINE
#@PRE
#<1,1> unknown()
@CHECK # 09/02/01 AM.
if (spellword(N("$text",1)))
fail();
@POST
++X("nunknowns");
# noop()
@RULES
_xNIL <- _xALPHA [s] @@
|
@CODE
G("out") = cbuf();
G("sentence") = down(G("parse"));
while (G("sentence"))
{
G("object") = down(G("sentence"));
G("out") << "-------------------------------------------------------------\n";
G("out") << "\"" << strwrap(strval(G("sentence"),"text"),60) << "\"\n\n";
while (G("object"))
{
G("printed") = 0;
G("subobject") = down(G("object"));
if (strval(G("object"),"type") == "company")
{
if (G("subobject"))
G("out") << "Company: " << conceptname(G("object")) << "\n";
}
else
G("out") << "Action: " << conceptname(G("object")) << "\n";
G("last subobject") = " ";
while (G("subobject"))
{
G("attributes") = findattrs(G("subobject"));
while (G("attributes"))
{
G("values") = attrvals(G("attributes"));
while (G("values"))
{
G("out") << " " << conceptname(G("subobject")) << ": ";
G("out") << "(" << attrname(G("attributes")) << ") ";
if (getstrval(G("values")))
G("out") << getstrval(G("values")) << "\n";
else
G("out") << getnumval(G("values")) << "\n";
G("printed") = 1;
G("values") = nextval(G("values"));
}
G("attributes") = nextattr(G("attributes"));
}
if (G("last subobject") != conceptname(G("subobject")))
{
G("out") << "\n";
G("printed") = 0;
}
G("last subobject") = conceptname(G("subobject"));
G("subobject") = next(G("subobject"));
}
if (G("printed"))
G("out") << "\n";
G("object") = next(G("object"));
}
G("sentence") = next(G("sentence"));
}
@@CODE |
@PATH _ROOT _LINE _countryText
@POST
L("country") = N("$text");
L("country") = strsubst(L("country"),"[",0);
L("country") = strsubst(L("country"),"]",0);
X("country",2) = L("country");
X("con",2) = getconcept(G("countries"),L("country"));
if (X("full",2))
addstrval(X("con",2),"full",X("full",2));
single();
@RULES
_country <-
_xWILD [plus fail=(_parens)] ### (1)
@@
|
@NODES _LINE
@RULES
_saintName <-
_xWILD [s min=1 max=1 matches=(
Agnes
Albert
Ambrose
Andrew
Andrews
Anselm
Anthony
Aquinas
Augustine
Bonaventure
Benedict
Boniface
Catherine
Clare
Cloud
Edward
Elizabeth
Francis
John
Joseph
Lawrence
Leo
Louis
Martin
Mary
Michael
Norbert
Olaf
Patrick
Paul
Peter
Pierre
Rose
Scholastica
Thomas
Vincent
Virgil
Xavier
)]
@@
_humanName <-
_humanNamePart [s layer = (_firstName)]
_xWHITE [s]
_jobTitleRoot [s layer = (_lastName)]
@@
|
@DECL
###############################################
# General functions
###############################################
AddUniqueCon(L("concept"),L("name")) {
"debug.txt" << conceptpath(L("concept")) << "\n";
L("con") = findconcept(L("concept"),L("name"));
if (!L("con")) L("con") = makeconcept(L("concept"),L("name"));
return L("con");
}
AddUniqueStr(L("concept"),L("attr"),L("value")) {
if (L("value")) {
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("str") = getstrval(L("val"));
if (L("str") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addstrval(L("concept"),L("attr"),L("value"));
return 1;
}
return 0;
}
AddUniqueNum(L("concept"),L("attr"),L("value")) {
if (L("value")) {
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("num") = getnumval(L("val"));
if (L("num") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addnumval(L("concept"),L("attr"),L("value"));
return 1;
}
return 0;
}
AddUniqueConVal(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("con") = getconval(L("val"));
"unique.txt" << conceptname(L("con")) << "\n";
if (conceptpath(L("con")) == conceptpath(L("value")))
return 0;
L("val") = nextval(L("val"));
}
addconval(L("concept"),L("attr"),L("value"));
return 1;
}
CopyAttr(L("from"),L("to"),L("attr")) {
L("from value") = strval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr"),L("from value"));
}
}
CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) {
L("from value") = strval(L("from"),L("attr from"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr to"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr to"),L("from value"));
}
}
CopyConAttr(L("from"),L("to"),L("attr")) {
L("from value") = conval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = conval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addconval(L("to"),L("attr"),L("from value"));
}
}
AttrValues(L("con"),L("attr")) {
L("at") = findattr(L("con"),L("attr"));
if (L("at"))
return attrvals(L("at"));
return 0;
}
LastChild(L("parent")) {
L("child") = down(L("parent"));
while (L("child")) {
L("last") = L("child");
L("child") = next(L("child"));
}
return L("last");
}
MakeCountCon(L("con"),L("count name")) {
L("count name") = CountName(L("con"),L("count name"));
return makeconcept(L("con"),L("count name"));
}
IncrementCount(L("con"),L("countname")) {
L("count") = numval(L("con"),L("countname"));
if (L("count")) {
L("count") = L("count") + 1;
replaceval(L("con"),L("countname"),L("count"));
} else {
addnumval(L("con"),L("countname"),1);
L("count") = 1;
}
return L("count");
}
CountName(L("con"),L("root")) {
L("count") = IncrementCount(L("con"),L("root"));
return L("root") + str(L("count"));
}
StripEndDigits(L("name")) {
if (strisdigit(L("name"))) return 0;
L("len") = strlength(L("name")) - 1;
L("i") = L("len") - 1;
L("str") = strpiece(L("name"),L("i"),L("len"));
while (strisdigit(L("str")) && L("i")) {
L("i")--;
L("str") = strpiece(L("name"),L("i"),L("len"));
}
return strpiece(L("name"),0,L("i"));
}
###############################################
# KB Dump Functins
###############################################
DumpKB(L("con"),L("file")) {
L("dir") = G("$apppath") + "/kb/";
L("filename") = L("dir") + L("file") + ".kb";
if (!kbdumptree(L("con"),L("filename"))) {
"kb.txt" << "FAILED dump: " << L("filename") << "\n";
} else {
"kb.txt" << "DUMPED: " << L("filename") << "\n";
}
}
TakeKB(L("filename")) {
L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb";
"kb.txt" << "Taking: " << L("path") << "\n";
if (take(L("path"))) {
"kb.txt" << " Taken successfully: " << L("path") << "\n";
} else {
"kb.txt" << " Taken FAILED: " << L("path") << "\n";
}
}
ChildCount(L("con")) {
L("count") = 0;
L("child") = down(L("con"));
while (L("child")) {
L("count")++;
L("child") = next(L("child"));
}
return L("count");
}
###############################################
# KBB DISPLAY FUNCTIONS
###############################################
DisplayKB(L("top con"),L("full")) {
L("file") = DisplayFileName();
DisplayKBRecurse(L("file"),L("top con"),0,L("full"));
L("file") << "\n";
return L("top con");
}
KBHeader(L("text")) {
L("file") = DisplayFileName();
L("file") << "#######################\n";
L("file") << "# " << L("text") << "\n";
L("file") << "#######################\n\n";
}
DisplayFileName() {
if (num(G("$passnum")) < 10) {
L("file") = "ana00" + str(G("$passnum"));
}else if (num(G("$passnum")) < 100) {
L("file") = "ana0" + str(G("$passnum"));
} else {
L("file") = "ana" + str(G("$passnum"));
}
L("file") = L("file") + ".kbb";
return L("file");
}
DisplayKBRecurse(L("file"),L("con"),L("level"),L("full")) {
while (L("con")) {
L("file") << SpacesStr(L("level")+1) << conceptname(L("con"));
DisplayAttributes(L("file"),L("con"),L("full"),L("level"));
L("file") << "\n";
if (down(L("con"))) {
L("lev") = 1;
DisplayKBRecurse(L("file"),down(L("con")),L("level")+L("lev"),L("full"));
}
if (L("level") == 0)
return 0;
L("con") = next(L("con"));
}
}
DisplayAttributes(L("file"),L("con"),L("full"),L("level")) {
L("attrs") = findattrs(L("con"));
if (L("attrs")) L("file") << ": ";
if (L("full") && L("attrs")) L("file") << "\n";
L("first attr") = 1;
while (L("attrs")) {
L("vals") = attrvals(L("attrs"));
if (!L("full") && !L("first attr")) {
L("file") << ", ";
}
if (L("full")) {
if (!L("first attr")) L("file") << "\n";
L("file") << SpacesStr(L("level")+2);
}
L("file") << attrname(L("attrs")) << "=[";
L("first") = 1;
while (L("vals")) {
if (!L("first"))
L("file") << ",";
L("val") = getstrval(L("vals"));
L("num") = getnumval(L("vals"));
L("con") = getconval(L("vals"));
if (L("con")) {
L("file") << conceptpath(L("con"));
} else if (!L("full") && strlength(L("val")) > 20) {
L("shorty") = strpiece(L("val"),0,20);
L("file") << L("shorty");
L("file") << "...";
if (strendswith(L("val"),"\""))
L("file") << "\"";
} else if (L("num") > -1) {
L("file") << str(L("num"));
} else {
L("file") << L("val");
}
L("first") = 0;
L("vals") = nextval(L("vals"));
}
L("file") << "]";
L("first attr") = 0;
L("attrs") = nextattr(L("attrs"));
}
}
# Because NLP++ doesn't allow for empty strings,
# this function can only be called with "num" >= 1
SpacesStr(L("num")) {
L("n") = 1;
L("spaces") = " ";
while (L("n") < L("num")) {
L("spaces") = L("spaces") + " ";
L("n")++;
}
return L("spaces");
}
###############################################
# DICTIONARY FUNCTIONS
###############################################
DictionaryStart() {
G("attrs path") = G("$apppath") + "\\kb\\user\\attrs.kb";
G("attrs") = openfile(G("attrs path"));
}
DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) {
addword(L("word"));
addword(L("attrName"));
G("attrs") << "ind attr\n" << findwordpath(L("word")) << "\n0\n";
G("attrs") << findwordpath(L("attrName")) << "\n";
if (L("attrType") == "str")
G("attrs") << "pst\n" << "\"" << L("value") << "\"";
else if (L("attrType") == "num")
G("attrs") << "pnum\n" << str(L("value"));
else if (L("attrType") == "con")
G("attrs") << "pcon\n" << conceptpath(L("value"));
G("attrs") << "\nend ind\n\n";
}
DictionaryEnd() {
G("attrs") << "\nquit\n\n";
closefile(G("attrs"));
}
@@DECL
|
@PATH _ROOT _labelEntry
@RULES
_xNIL <-
__xALPHA ### (1)
@@
|
@PATH _ROOT _report _headerZone
@POST
L("radLexTerms") = GetRadLexTerms(N(2));
"radLexTerms.txt" << N("$text", 1) << "\n";
if(L("radLexTerms")) {
L("count") = 0;
while (L("count") < arraylength(L("radLexTerms"))) {
"radLexTerms.txt" << "\t" << L("radLexTerms")[L("count")] << "\n";
L("count") = L("count") + 1;
}
}
@RULES
_xNIL <-
_header
_xWILD [one match=(_xALPHA _xNUM)] ### (2)
@@
|
@CODE
G("ontology") = findconcept(findroot(),"ontology");
if (G("ontology"))
exitpass(); # One-time load per VisualText session.
# While developing, wipe it out each time.
#if (G("ontology"))
# rmconcept(G("ontology"));
G("ontology") = makeconcept(findroot(),"ontology");
###### ONTOLOGY TOP LEVELS ###########
L("eventive") = makeconcept(G("ontology"),"eventive");
L("geo") = makeconcept(G("ontology"),"geo");
L("body_of_water") = makeconcept(L("geo"),"body_of_water");
L("geo_political") = makeconcept(L("geo"),"geo_political");
###### DETAILS #######################
L("con") = makeconcept(L("body_of_water"),"ocean");
L("con") = makeconcept(L("geo_political"),"city");
loadattr("resort","nounsem",L("con"));
#######EVENTIVE
loadattr("aftermath","nounsem",L("eventive"));
@@CODE
|
@NODES _LINE
@POST
X("up") = 1;
single();
@RULES
_list <-
\# ### (1)
_xWILD [fail=(_xEND)]
_xEND
@@
|
@PATH _ROOT _paragraph _sentence
@POST
N("possessive",1) = 1;
excise(2,3);
@RULES
_xNIL <-
_xWILD [one match=(_title _agency)] ### (1)
_quote ### (2)
s ### (3)
@@
|
@CODE
# Sentence counter. #
G("sentence count") = 0;
@NODES _ROOT
@POST
++G("sentence count"); # 05/17/01 AM.
S("name") = "sentence" + str(G("sentence count")); # 05/17/01 AM.
S("object") = makeconcept(G("parse"),S("name")); # 05/17/01 AM.
addstrval(S("object"),"text",N("$text",1));
single();
@RULES
_sentence [unsealed] <-
_xWILD [s plus fails=(\. \? \! _paragraphSeparator)] ### (1)
_xWILD [s one matches=(\. \? \!)] ### (2)
@@
_sentence [unsealed] <-
_xWILD [s plus fails=(_paragraphSeparator)] ### (1)
@@
|
@NODES _LINE
@PRE
<1,1> var("noun");
<1,1> varz("stop");
@POST
L("text") = N("$text",1);
addstrval(X("con"), "nouns", L("text"));
L("word_exists") = findconcept(findconcept(G("words"), X("type")), L("text"));
if (!L("word_exists")) {
"icd_keywords.dict" << L("text") << " key_word=1\n";
}
L("con") = AddUniqueCon(findconcept(G("words"), X("type")), L("text"));
addstrval(L("con"), "code", X("code"));
noop();
@RULES
_xNIL <-
_xALPHA [one] ### (1)
@@
@PRE
<1,1> var("adj,noun");
@POST
L("text") = N("$text",1);
addstrval(X("con"), "nouns", L("text"));
L("word_exists") = findconcept(findconcept(G("words"), X("type")), L("text"));
if (!L("word_exists")) {
"icd_keywords.dict" << L("text") << " key_word=1\n";
}
L("con") = AddUniqueCon(findconcept(G("words"), X("type")), L("text"));
addstrval(L("con"), "code", X("code"));
noop();
@RULES
_xNIL <-
_xALPHA [one] ### (1)
@@
@PRE
<1,1> var("adj,noun,verb");
@POST
L("text") = N("$text", 1);
addstrval(X("con"), "nouns", L("text"));
L("d_word_exists") = findconcept(findconcept(G("words"), "diagnosis"), L("text"));
L("p_word_exists") = findconcept(findconcept(G("words"), "procedure"), L("text"));
if (!L("dword_exists") &) {
"icd_keywords.dict" << L("text") << " key_word=1\n";
}
L("con") = AddUniqueCon(findconcept(G("words"), X("type")), L("text"));
addstrval(L("con"), "code", X("code"));
noop();
@RULES
_xNIL <-
_xALPHA [one] ### (1)
@@ |
@CODE
# SaveKB("mykb.kbb",G("codes"),2);
DisplayKB(G("codes"), 1);
DisplayKB(G("words"), 1);
kbdumptree(findroot(), "icd_9.kbb");
@@CODE |
@NODES _LINE
@POST
singler(3,3)
@RULES
_CompleteSchoolName [base] <-
_xWILD [s one match=( _xSTART The at from in _xPUNCT )]
_xWHITE [star s]
_SchoolNamePhrase [t]
@@
# Joe (SchoolNamePhrase University) -> ModSchoolPhrase(Joe University)
@PRE
<1,1> cap()
@RULES
_SchoolNamePhrase [base] <-
_xALPHA [one s except=(
_jobTitleRoot # 11/30/99 AM.
_degree _degreePhrase _major _minor _gpa
_DateRange _SingleDate at from in)] # 10/09/99 PS
_xWHITE [one s]
_SchoolNamePhrase [t]
@@
|
@NODES _LINE
@PRE
<3,3> lowercase();
@POST
X("word") = N("$text",3);
"words.txt" << N("$text",3) << "\n";
@RULES
_xNIL <-
_xSTART ### (1)
_xWILD [match=(\{ \[ _xWHITE)] ### (2)
_xWILD [plus match=(_xALPHA \_ \-)] ### (3)
@@
|
# Remove the two _adjs nodes from the parse tree, merging their children under a new _adjs node
@POST
merge();
@RULES
_adjs <- _adjs _adjs @@ |
@CODE
G("codes") = getconcept(findroot(),"codes");
G("words") = getconcept(findroot(),"words");
G("matches") = getconcept(findroot(),"matches");
G("order") = getconcept(findroot(),"order");
G("icd11") = getconcept(findroot(),"ICD11");
@@CODE |
@NODES _NLPPP
# Catch the start of a function call here, so it won't be grabbed by
# expression grammar. #
# Added L local var reference. #
@POST
fncallstart()
single()
@RULES
_VARLIST [base] <-
_xWILD [s one match=( s G N X P L ) layer=(_VARNAME)]
\( @@
# Eg, user::func() #
@POST
scopefncallstart(1,4)
single()
@RULES
_FNCALLLIST [base] <- _LIT \: \: _LIT [layer=(_FNNAME)] \( @@
@POST
fncallstart()
single()
@RULES
_FNCALLLIST [base] <- _LIT [layer=(_FNNAME)] \( @@
@POST
movesem(2) # Move expr semantic object up the tree.
single()
@RULES
# NOTE: Need a better action to grab the num, str.
_EXPR <- \( _xWILD [s one match=( _EXPR _NUM _FLOAT _STR )] \) @@
# NOTE: Ambiguity with _LIST must be resolved.
@POST
rfaexpr(1,2,3)
# single() #
singler(1,3) # 08/01/00 AM.
@RULES
_EXPR <-
_xWILD [s one match=(_EXPR _NUM _FLOAT _STR )]
_xWILD [s t one match=( \* \/ \%
_opCONF # 12/17/99 AM.
)]
_xWILD [s one match=(_EXPR _NUM _FLOAT _STR )]
_xWILD [s one fail=(_opINC _opDEC)] # 08/01/00 AM.
@@
# Handling precedence. That's why these rules look funny.
@POST
rfaexpr(1,2,3)
singler(1,3)
@RULES
_EXPR <-
_xWILD [s one match=(_EXPR _NUM _FLOAT _STR )]
_xWILD [s t one match=( \+ \- )]
_xWILD [s one match=(_EXPR _NUM _FLOAT _STR )]
_xWILD [s one match=( _xANY _xEND _xEOF ) except=( \/ \* \%
_opCONF # 12/17/99 AM.
_opINC _opDEC # 08/01/00 AM.
)]
@@
@POST
rfaexpr(1,2,3)
singler(1,3)
@RULES
_EXPR <-
_xWILD [s one match=(_EXPR _NUM _FLOAT _STR )]
_xWILD [s t one match=( \< \> _opLE _opGE _opEQ _opNEQ )]
_xWILD [s one match=(_EXPR _NUM _FLOAT _STR )]
_xWILD [s one match=( _xANY _xEND _xEOF ) except=( \/ \* \% \+ \-
_opCONF # 12/17/99 AM.
_opINC _opDEC # 08/01/00 AM.
)]
@@
@POST
rfaexpr(1,2,3)
singler(1,3)
@RULES
_EXPR <-
_xWILD [s one match=(_EXPR _NUM _FLOAT _STR )]
_xWILD [s t one match=( _opAND _opOR )]
_xWILD [s one match=(_EXPR _NUM _FLOAT _STR )]
_xWILD [s one match=( _xANY _xEND _xEOF )
except=( \/ \* \% \+ \- \< \> _opLE _opGE _opEQ _opNEQ
_opCONF # 12/17/99 AM.
_opINC _opDEC # 08/01/00 AM.
)]
@@
# LOWEST PRECEDENCE of any operator except output op (<<).
_EXPR <-
_VAR [s]
\= [s]
_xWILD [s one match=( _EXPR _NUM _FLOAT _STR )]
_xWILD [s one match=( _xANY _xEND _xEOF )
except=( \/ \* \% \+ \- \< \> _opLE _opGE _opEQ _opNEQ
_opAND _opOR
_opCONF # 12/17/99 AM.
\= # To associate right to left. # 12/31/99 AM.
_opINC _opDEC # 08/01/00 AM.
)]
@@
# Output operator! #
# LOWEST PRECEDENCE of any operator.
_EXPR <-
_xWILD [s one match=(_STR _EXPR)]
_opOUT [s]
_xWILD [s one match=( _EXPR _NUM _FLOAT _STR )]
_xWILD [s one match=( _xANY _xEND _xEOF )
except=( \/ \* \% \+ \- \< \> _opLE _opGE _opEQ _opNEQ
_opAND _opOR
_opCONF
\=
_opINC _opDEC # 08/01/00 AM.
)]
@@
@POST
rfaunary(1,2)
singler(1,2)
@RULES
# Unary operators.
# Highest precedence, apart from post operators.
_EXPR <- _xWILD [s one match=( _opINC _opDEC )]
_VAR [s]
_xWILD [s one match=( _xANY _xEND _xEOF) except=( _opINC _opDEC)]
@@
_EXPR <- \! [s]
_xWILD [s one match=( _EXPR _NUM _FLOAT _STR )]
_xWILD [s one match=( _xANY _xEND _xEOF) except=( _opINC _opDEC)]
@@
# Highest precedence operators.
@POST
rfapostunary(1,2)
single()
@RULES
_EXPR <-
_VAR [s]
_xWILD [s one match=( _opINC _opDEC )]
@@
# Post unary ops have precedence.
@POST
rfaunary(2,3)
singler(2,3)
@RULES
# Only do this if you're at the start of something or there's an
# operator to the left.
_EXPR <-
_xWILD [s one match=( _xSTART \< \> \+ \- \* \/ \% \! \=
_opINC _opDEC _opLE _opGE _opEQ _opNE _opAND _opOR
_opCONF # 12/17/99 AM.
_opOUT # 12/31/99 AM.
_RETURN \( \[ \{ # 08/15/08 AM.
)]
_xWILD [s t one match=( \- \+ )]
_xWILD [s one match=( _EXPR _NUM _FLOAT )]
_xWILD [s one match=( _xANY _xEND _xEOF ) except=( _opINC _opDEC)]
@@
###################################
# GENERALIZED FUNCTION CALL GRAMMAR. #
###################################
# LIST GRAMMAR.
# FUNCTION CALL GRAMMAR.
@POST
addarg(1,0,3) # 05/26/02 AM.
listadd(1,3)
@RULES
_VARLIST <- _VARLIST
\, [opt]
_xWILD [one match=(_EXPR _NUM _STR)]
_xWILD [one match=( \, \) )] # lookahead.
@@
@POST
addarg(1,3,4) # 05/26/02 AM.
listadd(1,4)
@RULES
_xNIL <-
_FNCALLLIST
\, [opt]
\& [opt] # Call by reference! # 05/26/02 AM.
_xWILD [one match=(_EXPR _NUM _FLOAT _STR)]
_xWILD [one match=( \, \) )] # lookahead.
@@
#PUT ARRAY GRAMMAR IN HERE. #
@POST
varfnarray(1,4) # 10/13/00 AM.
single() # 10/13/00 AM.
@RULES
_VAR [layer=(_EXPR)] <- # 10/13/00 AM.
_VARLIST
\)
\[
_xWILD [opt match=(_EXPR _NUM)] # 10/13/00 AM.
# Making this OPTIONAL to catch ARRAY ASSIGNMENT. #
\]
@@
@POST
varfn()
singler(1,2) # 10/15/00 AM.
@RULES
_VAR [layer=(_EXPR)] <- _VARLIST \)
_xWILD [one fail=( \[ )] # lookahead. # 10/15/00 AM.
@@
@POST
movesem(1)
singler(1,2)
@RULES
_FNCALL <- _FNCALLLIST \) \{ @@
@POST
movesem(1)
single()
@RULES
_FNCALL [layer=(_EXPR)] <- _FNCALLLIST \) @@
###############################################
# FILE: preaction.pat
# SUBJ: Keeping pre actions the old way, for now.
# AUTH: Amnon Meyers
# CREATED: 13/Jan/00
###############################################
#@NODES _NLPPP
@POST
preaction()
single()
@RULES
_ACTION [base] <- _PREPAIR _FNCALL [s] \; [s opt] @@
###############################################
# FILE: gram4.pat
# SUBJ: NLP++ syntax.
# AUTH: Amnon Meyers
# CREATED: 06/Nov/99
# MODIFIED:
# NOTE: Some of this could merge with gram5.
###############################################
#@NODES _NLPPP
@POST
movesem(2)
single()
@RULES
_IFPART <- _IF _xWILD [s one match=( _EXPR _NUM _FLOAT _STR )] @@
# Simple statements.
@POST
# movesem(1) #
makestmt(1) # 05/11/00 AM.
single()
@RULES
# NOTE: num and str aren't handled as statements, for now.
_STMT <- _xWILD [s one match=( _EXPR _NUM _FLOAT _STR )] \; [s] @@
# FUNCTION RETURN STATEMENT. #
@POST
returnstmt(1,2) # 03/12/02 AM.
single()
@RULES
_STMT <- # 03/07/02 AM.
_RETURN
_xWILD [s opt match=(_EXPR _NUM _FLOAT _STR)]
\; [s]
@@
# EMPTY STATEMENT. #
@RULES
_STMT [base] <- \; [s] @@ # Keep from infinite loop. # 07/01/02 AM.
# SOME SYNTAX ERROR DETECTION. #
#@POST
# rfbunmatchedpunct(1) #
# single(); # To avoid infinite looping. #
#@RULES
#_err [base] <- _xWILD [one match=( #
# \( \) \[ \] \< \>
# )] @@
###############################################
# FILE: gram5.pat
# SUBJ: NLP++ syntax.
# AUTH: Amnon Meyers
# CREATED: 07/Nov/99
# MODIFIED:
# NOTE: NEED A RECURSIVE ALGORITHM HERE!!
# Not doing lists of blocks. Not doing functions.
# For now, ignoring some degenerate statements like "1".
###############################################
# Todo: DROP NLP++ regions into their own segments, to
# be able to say @PATH _ROOT _NLPPP
#@NODES _NLPPP
@POST
# movesem(1) #
makestmts(1) # Renamed. # 05/29/01 AM.
single()
@RULES
# NEED THE BASE, OR INFINITE LOOP!
_STMTS [base] <- _xWILD [s one match=(_STMT
_BLOCK # 12/15/99 AM.
)] @@
# For RFA parsing of RFB, need this. #
@POST
makestmts(1)
singler(1,1)
@RULES
_STMTS [base] <-
_EXPR
_xWILD [one lookahead match=( _EXPR _STMT \{ \} _startMark _endMark _ELSE _WHILE _xEND )]
@@
@POST
addstmt(1, 2)
single()
@RULES
_STMTS [base] <- _STMTS _xWILD [s one match=(_STMT
_BLOCK # 12/15/99 AM.
)] @@
# For RFA parsing of RFB, need this. #
@POST
addstmt(1,2)
singler(1,2)
@RULES
_STMTS [base] <-
_STMTS
_EXPR
_xWILD [one lookahead match=( _EXPR _STMT \{ \} _startMark _endMark _ELSE _WHILE _xEND )]
@@
@POST
movesem(2)
single()
@RULES
_BLOCK <- \{ [s] _STMTS \} [s] @@
# EMPTY BLOCK. #
@RULES
_BLOCK <- \{ [s] \} [s] @@ # 08/01/00 AM.
@POST
ifstmt(1, 2)
single()
@RULES
_IFSTMT <-
_IFPART
_xWILD [s one match=(_BLOCK _STMT
)]
@@
@POST
ifstmt(1,2)
singler(1,2)
@RULES
_IFSTMT <-
_IFPART
_EXPR
_xWILD [one lookahead match=( _EXPR _STMT \{ \} _startMark _endMark _ELSE _WHILE _xEND )]
@@
# WHILE STATEMENT #
@POST
movesem(2)
single()
@RULES
_WHILECOND <- _WHILE _EXPR @@
# Should make sure expr is parenthesized.
@POST
whilestmt(1, 2)
single()
@RULES
_STMT <- _WHILECOND _xWILD [s one match=(_BLOCK _STMT
)]
@@
@POST
whilestmt(1,2)
singler(1,2)
@RULES
_STMT <-
_WHILECOND
_EXPR
_xWILD [one lookahead match=( _EXPR _STMT \{ \} _startMark _endMark _ELSE _WHILE _xEND )]
@@
@POST
# movesem(2) #
makestmt(2) # 05/11/00 AM.
single()
@RULES
# Need better action to handle _NUM, _STR.
_ELSEPART <-
_ELSE
_xWILD [s one match=(_BLOCK _STMT
)]
@@
@POST
makestmt(2)
singler(1,2)
@RULES
_ELSEPART <-
_ELSE
_EXPR
_xWILD [one lookahead match=( _EXPR _STMT \{ \} _startMark _endMark _ELSE _WHILE _xEND )]
@@
@POST
ifelse(1, 2)
single()
@RULES
_STMT <-
_IFSTMT _ELSEPART
@@
# Probably need to check for no "else" as next elt.
@POST
movesem(1)
singler(1, 1)
@RULES
_STMT [base] <- _IFSTMT
_xWILD [s one match=( _xANY _xEND _xEOF ) except=( _ELSE )]
@@
|
@DECL
# Check whether value is in list. List must be iterable
# Push unique value onto var of pnnode
# Optimize later with searching algorithm. Not important for 2-3 elmnts.
# Args
# pnode: Parse tree node with <var>
# var: variable of <pnode> to push to
# val: value to push onto <pnode>'s <var>
# Returns
# integer: 1 if added, 0 if already present and not pushed.
pnrpushuniqueval(L("pnode"), L("var"), L("val")) {
L("current_vals") = pnvarnames(L("pnode"), L("var"));
L("num_vals") = arraylength(L("current_vals"));
L("i") = 0;
L("first_val") = L("current_vals")[0];
if (!L("first_val")) {
pnrpushval(L("pnode"), L("var"), L("val"));
return 1;
}
if (L("num_vals") == 1) {
if (strequal(L("val"), L("first_val"))) {
return 0;
}
else {
pnrpushval(L("pnode"), L("var"), L("val"));
return 1;
}
}
L("i")++;
while (L("i") < L("arraylength")) {
}
}
@@DECL |
@NODES _ROOT
@POST
if (N("header",1) == "नेपाली")
group(1,1,"_nepali");
else if (N("header",1) == "उच्चारण")
group(1,1,"_pronunciations");
else if (N("header",1) == "पदवर्ग")
group(1,1,"_partofspeech");
else if (N("header",1) == "अर्थ")
group(1,1,"_definition");
else if (N("header",1) == "उदाहरण")
group(1,1,"_example");
else if (N("header",1) == "समानार्थी शब्द")
group(1,1,"_synonyms");
else if (N("header",1) == "व्युत्पन्न सर्तहरू")
group(1,1,"_derivedTerms");
else if (N("header",1) == "अनुवाद")
group(1,1,"_translations");
@RULES
_xNIL <-
_headerZone ### (1)
@@
|
# Convert string or num to float type, if possible
@CODE
G("val") = flt("345.67");
@@CODE |
# Print the text under a range of rule elements number1 to number2 to fileName
@POST
@@POST
@RULES
@@RULES |
@CODE
L("hello") = 0;
@@CODE
@NODES _TEXTZONE
# New tokenization handlers.
@CHECK
if (!N("dissolve"))
fail();
@POST
splice(1,1);
@RULES
_xNIL <-
_tok
@@
# Looks like a lousy rule...
# prep alpha adj
@CHECK
if (!N("adj",2))
fail();
if (N("verb",2))
fail();
@POST
L("tmp2") = N(2);
group(2,2,"_adj");
pncopyvars(L("tmp2"),N(2));
fixadj(N(2));
@RULES
_xNIL <-
_xWILD [one match=(_prep _conj)]
_xALPHA
_xWILD [one lookahead match=(_adj)]
@@
# Some post-token handling...
@POST
singler(2,2);
@RULES
_qEOS <-
_aposS
\.
@@
# Any leftover periods as end of sentence...
@RULES
_qEOS <-
\.
@@
@POST
S("sem") = "money";
S("sem val") = phrasetext();
S("ignorepos") = 1; # 04/21/07 AM.
# Changing from _num to _noun.
single();
@RULES
_money [layer=_noun] <-
_xWILD [one match=(\$ \#)]
_xWILD [one match=(_num _xNUM)]
_xWHITE [star]
_xWILD [s opt match=(thousand million billion trillion)]
@@
# $ num-a-share
@PRE
<2,2> var("num-dash-dash");
@POST
N("sem",2) = "money";
listadd(2,1,"false");
@RULES
_xNIL <-
\$
_adj
@@
# noun - noun
@POST
group(2,4,"_caps");
group(2,2,"_adj");
N("cap",2) = 1;
N("glom",2) = 1;
@RULES
_xNIL <-
_xWILD [one fail=( \- )]
_noun
\-
_noun
_xWILD [one fail=( \- )]
@@
# Flag unhandled tokens. #
@POST
if (G("verbose"))
"tok.txt" << phrasetext() << "\n";
@RULES
_xNIL <-
_tok
@@
# noun num
@PRE
<1,1> var("cap");
@POST
group(1,2,"_noun");
N("cap",1) = 1;
clearpos(N(1),1,0);
@RULES
_xNIL <-
_noun
_xNUM
_xWILD [one lookahead match=(_det _quan _num _adj _noun
_prep _adv _verb
)]
@@
# Note: Handle slangy stuff like thinkin'
# Need to account for single quotes also.
@CHECK
L("txt") = strtolower(N("$text",1));
if (!strendswith(L("txt"),"in"))
fail();
@POST
N("apos",1) = 1;
excise(2,2);
@RULES
_xNIL <-
_xALPHA
\'
@@
# Some nouns that must be capitalized.
@PRE
<1,1> lowercase();
@POST
--N("pos num",1);
N("noun",1) = 0;
alphaunambigred(1);
@RULES
_xNIL <- _xWILD [one match=(
lent
)] @@
|
@NODES _ROOT
@RULES
_Comment <-
_CommentStart [one] ### (1)
_xWILD [star fail=("_CommentEnd" "_CommentEnd" "_DoubleHyphen")] ### (2)
_CommentEnd [one] ### (3)
@@
@POST
S("textValue") = N("$text",2) ;
single() ;
@@POST
@RULES
_PubidLiteral <-
\" [one] ### (1)
_xWILD [star matches=( "_xALPHA" "_xNUM" \ \- \' \( \) \+ \, \. \/ \: \= \? \; \! \* \# \@ \$ \_ \% )] ### (2)
\" [one] ### (3)
@@
_PubidLiteral <-
\' [one] ### (1)
_xWILD [star matches=( "_xALPHA" _xNUM \ \- \( \) \+ \, \. \/ \: \= \? \; \! \* \# \@ \$ \_ \% )] ### (2)
\' [one] ### (3)
@@
_SystemLiteral <-
\" [one] ### (1)
_xWILD [star fails=("\"")] ### (2)
\" [one] ### (3)
@@
_SystemLiteral <-
\' [one] ### (1)
_xWILD [star fails=("'")] ### (2)
\' [one] ### (3)
@@
@@RULES
@RULES
_whiteSpace <-
_xWHITE [plus] ### (1)
@@
@@RULES
|
@PATH _ROOT _educationZone _educationInstance
@POST
splice(1,1); # Zap the _expStart node
@RULES
_xNIL <- _eduStart @@
_xNIL <- _educationBoundary @@
|
@CODE
G("labels") = findconcept(findroot(),"labels");
if (!G("labels")) G("labels") = makeconcept(findroot(),"labels");
rmchildren(G("labels"));
G("phrases") = findconcept(findroot(),"phrases");
if (!G("phrases")) G("phrases") = makeconcept(findroot(),"phrases");
rmchildren(G("phrases"));
DictionaryClear();
@@CODE |
@PATH _ROOT _paragraph _sentence
@POST
L("text") = strtolower(N("$text"));
if (strlength(L("text")) > 1 && !N("functword")) {
single();
}
@RULES
_titleCaps <-
_xWILD [plus match=(_xCAP _nameAbbrevCandidate _nameSuffix \-)] ### (1)
@@
|
@NODES _ROOT
@POST
S("header") = N("header",1);
S("count") = 4;
single();
@RULES
_headerZone <-
_headerFour ### (1)
_xWILD [plus fails=(_headerFour _headerThree _headerTwo _xEND)] ### (2)
@@
|
# Reduce _det _quan _adj _noun nodes to _np, triggering _noun first
@RULES
_np <- _det _quan _adj _noun [trig] @@ |
# Replace all matching attribute-value pairs in the given hierarchy to have the new string value, new_s.
attrchange(L("hier"), L("attr_s"), L("val_s"), L("new_s")); |
@PATH _ROOT _language _headerZone
@PRE
<1,1> vareq("header", "etymology");
@POST
S("con") = MakeCountCon(X("con", 2), "etymology");
S("pos") = makeconcept(S("con"), "pos");
single();
@RULES
_etymologyZone <-
_headerZone ### (1)
@@
|
@CODE
G("emojis") = getconcept(findroot(),"emojis");
@@CODE |
@PATH _ROOT _section
@POST
L("icdDiagnoses") = GetICDTerms(N(2), G("split"));
@RULES
_xNIL <-
_xWILD [one matches=(_xSTART _patientID _time)] ### (1)
_xWILD [one match=(_xALPHA _xNUM)] ### (2)
@@ |
@CODE
G("database con") = findconcept(findroot(),"database");
if (G("database con"))
rmconcept(G("database con"));
G("database con") = makeconcept(findroot(),"database");
G("fields con") = makeconcept(G("database con"),"fields");
G("records con") = makeconcept(G("database con"),"records");
G("field count") = 0;
@@CODE
|
@POST
rfaselect(2)
single()
@RULES
_SELECT [base] <- _soSELECT [opt] _NODES _eoSELECT [opt] @@
_SELECT [base] <- _soSELECT [opt] _MULTI _eoSELECT [opt] @@
_SELECT [base] <- _soSELECT [opt] _PATH _eoSELECT [opt] @@
|
@PATH _ROOT _experienceZone _LINE
# Looking for unassigned caps.
@CHECK
if (
N("len",1) <= 2
&& !N("capofcap",1)
&& !N("capandcap",1)
&& N("hi conf",1) < 70 # Hasn't been assigned to anything.
)
succeed();
fail();
@POST
S("city") = N("$text",1);
S("state") = N("$text",4);
single();
@RULES
_cityState <-
_Caps [rename=(_city)]
\, [s]
_xWHITE [s star]
_state [s]
@@
# eg, "Flibble and Gribble".
@CHECK
if (
N("hi conf",1) < G("threshold")
&& N("hi conf",5) < G("threshold")
&& N("unknowns",1)
&& N("unknowns",5)
)
succeed();
fail();
@POST
merge();
@RULES
_company <-
_Caps
_xWHITE [s star]
_xWILD [s one match=(and \& )]
_xWHITE [s star]
_Caps
@@
|
@PATH _ROOT _textZone
@POST
S("header") = N("header",1);
single();
@RULES
_headerZone <-
_header ### (1)
_xWILD [fail=(_header _language _xEND)] ### (2)
@@ ### (3)
|
@CODE
# Uncomment passes 3 and 8 to recreate/add to kbbs.
G("icd9_codes") = getconcept(findroot(), "icd9_codes");
G("mimic_splits") = getconcept(findroot(), "mimic_splits");
G("top") = getconcept(G("mimic_splits"), "top");
G("rare") = getconcept(G("mimic_splits"), "rare");
@@CODE |
@NODES _LINE
@POST
group(2,2,"_item");
@RULES
_xNIL <-
_xSTART ### (1)
_comma ### (2)
@@
@POST
group(1,1,"_item");
@RULES
_xNIL <-
_comma ### (1)
_comma ### (2)
@@
|
@CODE
L("hello") = 0;
# Remove spurious words from the dictionary.
L("c") = dictfindword("nondurables");
if (L("c"))
rmconcept(L("c"));
@@CODE
@NODES _TEXTZONE
#####
## CORRECTIONS TO DICTIONARY HERE.
#####
@POST
N("pos num") = 2;
N("noun") = 1; # MISSING.
N("verb") = 1;
N("stem") = "affiliate";
N("pos") = "_noun";
@RULES
_xNIL <- affiliate @@
_xNIL <- affiliates @@
# bar - listed as preposition, which is ok, but
# extremely rare.
@POST
N("pos num") = 2;
N("verb") = 1;
N("noun") = 1;
N("stem") = "bar";
N("pos") = "_noun";
@RULES
_xNIL <- bar @@
@POST
N("pos num") = 2;
N("adj") = 1; # MISSING.
N("noun") = 1;
N("stem") = "giant";
N("pos") = "_noun";
@RULES
_xNIL <- giant @@
# own - pronoun pos useless (Conform treebank and otherwise).
# Only adj and verb.
@POST
N("adj") = 1;
N("verb") = 1;
N("sem") = N("stem") = "own";
N("pos num") = 2;
N("pos") = "_adj";
@RULES
_xNIL <- own @@
# even - getting rid of rare/archaic noun sense. (evening).
@POST
N("adv") = 97; # Conform treebank.
N("adj") = 1;
N("verb") = 1;
N("noun") = 0; # Effectively zero except in poetry.
N("sem") = N("stem") = "even";
N("pos num") = 3;
N("pos") = "_adv";
@RULES
_xNIL <- even @@
# disparate - Noun sense looks erroneous.
@POST
N("adj") = 1;
N("noun") = 0;
N("sem") = N("stem") = "disparate";
N("pos num") = 1;
N("pos") = "_adj";
@RULES
_xNIL <- disparate @@
# goes - Erroneously listed as noun.
@POST
N("verb") = 1;
N("noun") = 0;
N("sem") = N("stem") = "go";
N("pos num") = 1;
N("pos") = "_verb";
@RULES
_xNIL <- goes @@
# Note: exclusively in "single-handedly".
@POST
N("adv") = 1;
N("sem") = N("stem") = "handed";
N("pos num") = 1;
N("pos") = "_adv";
@RULES
_xNIL <- handedly @@
# protecting
# dict lists as noun, adj.
@POST
N("pos num") = 1;
N("verb") = 1;
N("sem") = N("stem") = "protect";
N("-ing") = 1;
N("pos") = "_verb";
@RULES
_xNIL <- protecting @@
# provided - special word.
# dict lists as conj. (handle in grammar).
@POST
N("pos num") = 1;
N("verb") = 1;
N("sem") = N("stem") = "provided";
N("-edn") = 1;
N("pos") = "_verb";
@RULES
_xNIL <- provided @@
# providing - special word.
# dict lists as conj. (handle in grammar).
@POST
N("pos num") = 2;
N("verb") = 1;
N("noun") = 1; # 10/09/06 AM.
N("sem") = N("stem") = "provide";
N("-ing") = 1;
N("pos") = "_verb";
@RULES
_xNIL <- providing @@
# pending listed as prep, which messes up lookup.
# (MW lists it as prep.)
# pending is a special word. Treebank lists it as VBG
# usually, which is wrong.
@POST
N("pos num") = 2;
N("adj") = 1;
N("verb") = 1; # WRONG. Conforming to Treebank.
N("stem") = N("sem") = "pending"; # !
N("pos") = "_adj";
chpos(N(1),"VBG"); # WRONG. Conforming to Treebank.
@RULES
_xNIL <- pending @@
# failing listed as prep, which messes up lookup.
@POST
N("pos num") = 3;
N("adj") = 1;
N("verb") = 1;
N("noun") = 1;
N("stem") = N("sem") = "fail"; # AMBIG, could have failings...
N("pos") = "_verb";
N("pos_np") = "VBG";
@RULES
_xNIL <- failing @@ # 10
# directly as conj is chiefly British, not quite a conj.
@POST
N("pos num") = 1;
N("adv") = 1;
N("stem") = N("sem") = "direct";
N("pos") = "_adv";
@RULES
_xNIL <- directly @@
# 'plus' is a prep.
# plus
@POST
N("pos num") = 5;
N("noun") = 1;
N("conj") = 1;
N("adj") = 1;
N("adv") = 1;
N("prep") = 1;
N("stem") = "plus";
N("pos") = "_adj";
@RULES
_xNIL <- plus @@
# 'plus' is a prep.
# past
@POST
N("pos num") = 4;
N("noun") = 24;
N("adj") = 71; # Conform Treebank.
N("adv") = 1;
N("prep") = 4;
N("stem") = "past";
N("pos") = "_adj";
N("sem") = "date"; # 07/13/06 AM.
++X("date ref");
++X("date=past");
@RULES
_xNIL <- past @@
# recessionary listed only as noun.
@POST
N("pos num") = 1;
N("adj") = 1;
N("stem") = N("sem") = "recession";
N("pos") = "_adj";
@RULES
_xNIL <- recessionary @@
@POST
N("pos num") = 5;
N("noun") = 1;
N("verb") = 1;
N("adj") = 1;
N("adv") = 1;
N("prep") = 1;
N("stem") = "round";
N("pos") = "_adj";
@RULES
_xNIL <- round @@
@POST
N("pos num") = 2;
N("adj") = 86; # Conform Treebank.
N("adv") = 14;
# N("prep") = 5;
N("stem") = "next";
N("pos") = "_adj";
@RULES
_xNIL <- next @@
# 'go' missing from dict!
@POST
N("pos num") = 3;
N("verb") = 95; # Putting in some preferences!
N("noun") = 4;
N("adj") = 1;
N("stem") = "go";
N("pos") = "_verb";
@RULES
_xNIL <- go @@
# 'offer' listed as adj in dict, but can't corroborate.
@POST
N("pos num") = 2;
N("verb") = 1;
N("noun") = 1;
N("stem") = "offer";
N("pos") = "_noun";
@RULES
_xNIL <- offer @@
# used
@POST
N("pos num") = 2;
N("verb") = 1;
N("adj") = 1;
N("sem") = N("stem") = "use";
N("-edn") = 1;
N("pos") = "_verb";
@RULES
_xNIL <- used @@
# 'seek' listed as noun in dict, but can't corroborate.
@POST
N("pos num") = 1;
N("verb") = 100;
N("stem") = "seek";
N("pos") = "_verb";
@RULES
_xNIL <- seek @@
_xNIL <- seeks @@
# resolving listed as noun, can't corroborate.
@POST
N("pos num") = 1;
N("verb") = 1;
N("stem") = "resolve";
N("pos") = "_verb";
@RULES
_xNIL <- resolving @@ # 20
# listed as noun, can't corroborate.
@POST
N("pos num") = 1;
N("verb") = 1;
N("stem") = "comprise";
N("pos") = "_verb";
@RULES
_xNIL <- comprising @@
@POST
N("pos num") = 2;
N("verb") = 1;
N("adv") = 1; # MISSING.
N("stem") = "please";
N("pos") = "_verb";
@RULES
_xNIL <- please @@
# To avoid mis-stemming.
@POST
N("pos num") = 1;
N("noun") = 1;
N("stem") = "thing";
N("sem") = "thing";
N("pos") = "_noun";
@RULES
_xNIL <- thing @@
@POST
N("pos num") = 1;
N("adj") = 1;
N("stem") = "unirradiated";
N("pos") = "_adj";
@RULES
_xNIL <- unirradiated @@
# Consecutive is ONLY an adj.
@POST
N("pos num") = 1;
N("adj") = 1;
N("stem") = "consecutive";
N("pos") = "_adj";
@RULES
_xNIL <- consecutive @@
@POST
N("pos num") = 2;
N("adj") = 1; # MISSING.
N("noun") = 1;
N("stem") = "miotic";
N("pos") = "_adj";
@RULES
_xNIL <- miotic @@
@POST
# BUG: "do" is listed only as a noun in the dictionary!
N("pos num") = 2;
N("verb") = 1; # MISSING.
N("noun") = 1;
N("stem") = "do";
N("pos") = "_verb";
@RULES
_xNIL <- do @@
@POST
N("pos num") = 2;
N("verb") = 1;
N("noun") = 1; # MISSING.
N("stem") = "whale";
N("pos") = "_noun";
@RULES
_xNIL <- whaling @@
@POST
N("pos num") = 2;
N("verb") = 1;
N("noun") = 1;
N("stem") = "reprocess";
N("pos") = "_noun";
@RULES
_xNIL <- reprocessing @@
@POST
N("pos num") = 2;
N("verb") = 1;
N("noun") = 1;
N("stem") = "restructure";
N("eventive") = 1;
N("pos") = "_noun";
N("pos_np") = "NN";
@RULES
_xNIL <- restructuring @@
@POST
N("pos num") = 1;
N("noun") = 1;
N("stem") = "pc";
N("sem") = "pc";
N("pos") = "_noun";
N("number") = "singular";
N("mypos") = "NN";
N("acronym") = 1;
@RULES
_xNIL <- pc @@
@POST
N("pos num") = 1;
N("noun") = 1;
N("stem") = "pc";
N("sem") = "pc";
N("pos") = "_noun";
N("number") = "plural";
N("mypos") = "NNS";
N("acronym") = 1;
@RULES
_xNIL <- pcs @@
@POST
S("pos num") = 2;
S("verb") = 1;
S("noun") = 1;
S("text") = "cochair";
S("stem") = "cochair";
S("pos") = "_noun";
single();
@RULES
cochair <- # 30
co [s]
\- [s]
chair [s]
@@
@POST
N("pos num") = 2;
N("verb") = 1;
N("noun") = 1;
N("stem") = "cochair";
N("pos") = "_noun";
@RULES
_xNIL <- cochair @@
# Stemmer incorrectly does improving => improv!
@POST
N("pos num") = 2;
N("verb") = 1;
N("adj") = 1;
N("stem") = "improve";
N("pos") = "_verb";
@RULES
_xNIL <- improving @@
_xNIL <- improved @@
@POST
N("pos num") = 2;
N("verb") = 1;
N("noun") = 1; # Add this definition.
N("stem") = "compute";
N("eventive") = 1;
N("pos") = "_noun";
N("pos_np") = "NN";
@RULES
_xNIL <- computing @@ # 04/23/07 AM.
# Stemmer incorrectly does pointed => pointe
@POST
N("pos num") = 2;
N("verb") = 1;
N("adj") = 1;
N("stem") = "point";
N("pos") = "_verb";
@RULES
_xNIL <- pointed @@
@POST
N("pos num") = 1;
N("verb") = 1;
N("stem") = "locate";
N("pos") = "_verb";
@RULES
_xNIL <- locating @@
# Dict had this as adj also. #
@POST
N("pos num") = 1;
N("verb") = 1;
N("stem") = "hear";
N("pos") = "_verb";
N("-edn") = 1;
@RULES
_xNIL <- heard @@
@POST
N("pos num") = 2;
N("verb") = 50;
N("noun") = 50;
N("stem") = "interest";
N("pos") = "_noun";
@RULES
_xNIL <- interest @@ # 07/17/03 AM.
@POST
N("pos num") = 2;
N("verb") = 1;
N("noun") = 1;
@RULES
_xNIL <- sideline @@
_xNIL <- sidelines @@ # 40
@RULES
_verb <- sidelining @@
_verb <- sidelined @@
@POST
N("pos num") = 1; # Dict has verb also.
N("noun") = 1;
N("stem") = "aide";
pncopyvars();
single();
@RULES
_noun <- aides @@
# modest is not superlative!
@POST
N("stem") = N("sem") = "modest";
N("pos num") = 1;
N("adj") = 1;
chpos(N(1),"JJ");
pncopyvars();
single();
@RULES
_adj <- modest @@
@POST
N("pos num") = 1;
N("noun") = 1;
N("stem") = "man";
N("number") = "plural";
chpos(N(1),"NNS");
pncopyvars();
single();
@RULES
_noun <- men @@
@POST
N("pos num") = 1;
N("noun") = 1;
N("stem") = "penny";
N("number") = "plural";
chpos(N(1),"NNS");
pncopyvars();
single();
@RULES
_noun <- pence @@ # 46
@POST
N("pos num") = 1;
N("noun") = 1;
N("stem") = "known";
N("number") = "plural";
chpos(N(1),"NNS");
pncopyvars();
single();
@RULES
_noun <- knowns @@
# news is singular only.
@POST
N("pos num") = 1;
N("noun") = 1;
N("stem") = "news";
N("number") = "singular";
# chpos(N(1),"NN");
@RULES
_noun <- news @@
# noun in dict is spurious.
@POST
N("pos num") = 1;
N("adj") = 1;
pncopyvars();
single();
@RULES
_adj <-
nondurable
@@
@POST
N("pos num") = 1;
N("adj") = 1;
pncopyvars();
single();
@RULES
_adj <-
nonexecutive
@@
# listed as conj in dictionary.
@POST
N("pos num") = 1;
N("adv") = 1;
pncopyvars();
single();
@RULES
_adv <- immediately @@
@POST
# Don't want this to automatically go to preposition.
N("prep") = 0;
pncopyvars();
single();
@RULES
_noun <- vice @@
# Setting a preference for noun/adj/vbg, eg,
# if this ends up as non head noun in noun phrase.
@POST
N("pos_np") = "JJ";
@RULES
_xNIL <- _xWILD [one match=(
accelerated
annual
blue # Colors...
common
composite
computerized
confused
differential
disaffected
disembodied
distorted
disturbing
domestic
executive # About 665/510 !
facial
first
free
further # Treebank 329/356.
general
good
human
initial
intellectual
last
lead # ambig.
local
matching
major
member #
monthly
open
past
personal
private
senior # 06/04/06 AM.
shaded
simple
specific
standardized
standing
striking
troubled
worthy
)] @@
@POST
N("pos_np") = "JJS";
@RULES
_xNIL <- _xWILD [one match=(
best
)] @@
@POST
N("pos_np") = "JJR";
@RULES
_xNIL <- _xWILD [one match=(
lesser
lower
)] @@
@POST
N("pos_np") = "NNS";
N("-s") = 1;
@RULES
_xNIL <- _xWILD [one match=(
alumni
data
)] @@
@POST
N("pos_np") = "NN";
@RULES
_xNIL <- _xWILD [one match=(
adult
attack
baby
bond
business
capital
cardboard
centennial
cheerleading # new noun.
compound
copyright
country
crack # usually.
desktop
district
engineering
guest
holding
home
household
managing
manufacturing
market
material
mathematics # want singular...
morning
motor
operating
patent
power
radio
rate
state
stock
summer
test
textile
trade
trading
vice
winter
work
world
)] @@
# See treebank tagguide for vbg/jj, vbn/jj.
@POST
N("pos_np") = "VBN"; # 04/20/07 AM.
@RULES
_xNIL <- _xWILD [one match=(
accused
announced
expected
imported
)] @@
@POST
N("pos_np") = "VBG";
@RULES
_xNIL <- _xWILD [one match=(
amusing
building
competing
developing
failing
growing # 04/23/07 AM.
remaining # 04/20/07 AM.
receiving
slowing
)] @@
@POST
N("pos num") = 3;
N("noun") = 1;
N("adj") = 1;
# N("sem") = "direction";
N("stem") = strtolower(N("$text"));
# if (N("$length") == 1)
# N("adv") = 50; # confidence.
# else
# N("adv") = 95; # Ballworld domain.
N("adv") = 1;
@RULES
_xNIL <- _xWILD [one match=(
n s e w
ne se nw sw
nne ene sse ese nnw wnw ssw wsw
)] @@
|
@NODES _ROOT
@RULES
_xNIL <- _xALPHA [s min=1 max=0 matches=()] @@
|
@PATH _ROOT _headerZone
@POST
if (strstartswith(N("value"),"verbtxt")) {
N("type") == "verbtxt";
}
@RULES
_xNIL <-
_iOpen ### (1)
@@
|
@CODE
#G("out") = cbuf();
G("out") = "words.xml";
L("word") = down(G("words"));
while (L("word")) {
L("wordid")++;
G("out") << "<word>\n";
G("out") << "\t<wordid>" << L("wordid") << "</wordid>\n";
G("out") << "\t<word>" << conceptname(L("word")) << "</word>\n";
G("out") << "</word>\n";
# pos1, pronunciation, synonym, derivedTerm, translation
L("child") = down(L("word"));
while (L("child")) {
L("name") = conceptname(L("child"));
# pos1
if (strstartswith(L("name"),"pos")) {
L("posid")++;
L("pos") = strval(L("child"),"pos");
G("out") << "<pos>\n";
G("out") << "\t<wordid>" << L("wordid") << "</wordid>\n";
G("out") << "\t<posid>" << L("posid") << "</posid>\n";
G("out") << "\t<pos>" << L("pos") << "</pos>\n";
G("out") << "</pos>\n";
# definition
L("grand") = down(L("child"));
while (L("grand")) {
L("gname") = conceptname(L("grand"));
if (strstartswith(L("gname"), "definition")) {
L("defid")++;
G("out") << "<definition>\n";
G("out") << "\t<posid>" << L("posid") << "</posid>\n";
G("out") << "\t<defid>" << L("defid") << "</defid>\n";
G("out") << "</definition>\n";
#explanation, variation, example
L("great") = down(L("grand"));
while (L("great")) {
L("ggname") = conceptname(L("great"));
if (strstartswith(L("ggname"), "explanation")) {
L("expid")++;
L("text") = strval(L("great"),"text");
G("out") << "<explanation>\n";
G("out") << "\t<defid>" << L("defid") << "</defid>\n";
G("out") << "\t<expid>" << L("expid") << "</expid>\n";
G("out") << "\t<text>" << L("text") << "</text>\n";
G("out") << "</explanation>\n";
}
else if (strstartswith(L("ggname"), "variation")) {
L("varid")++;
L("text") = strval(L("great"),"text");
G("out") << "<variation>\n";
G("out") << "\t<defid>" << L("defid") << "</defid>\n";
G("out") << "\t<varid>" << L("varid") << "</varid>\n";
G("out") << "\t<text>" << L("text") << "</text>\n";
G("out") << "</variation>\n";
}
else if (strstartswith(L("ggname"), "example")) {
L("exampid")++;
L("text") = strval(L("great"),"text");
G("out") << "<example>\n";
G("out") << "\t<defid>" << L("defid") << "</defid>\n";
G("out") << "\t<exampid>" << L("exampid") << "</exampid>\n";
G("out") << "\t<text>" << L("text") << "</text>\n";
G("out") << "</example>\n";
}
L("great") = next(L("great"));
}
}
L("grand") = next(L("grand"));
}
}
# pronunciation
else if (strstartswith(L("name"),"pronunciation")) {
L("attrList") = findattr(L("child"),"phonetic");
L("pht_valueList") = attrvals(L("attrList"));
while (L("pht_valueList")) {
L("phoneticid")++;
G("out") << "<pronunciation>\n";
G("out") << "\t<wordid>" << L("wordid") << "</wordid>\n";
G("out") << "\t<phoneticid>" << L("phoneticid") << "</phoneticid>\n";
G("out") << "\t<pttext>" << getstrval(L("pht_valueList")) << "</pttext>\n";
L("pht_valueList") = nextval(L("pht_valueList"));
}
L("m_attrList") = findattr(L("child"),"phonemic");
L("phm_valueList") = attrvals(L("m_attrList"));
while (L("phm_valueList")) {
L("phonemicid")++;
G("out") << "<pronunciation>\n";
G("out") << "\t<wordid>" << L("wordid") << "</wordid>\n";
G("out") << "\t<phonemicid>" << L("phonemicid") << "</phonemicid>\n";
G("out") << "\t<pttext>" << getstrval(L("phm_valueList")) << "</pttext>\n";
L("phm_valueList") = nextval(L("phm_valueList"));
}
G("out") << "</pronunciation>\n";
}
# synonym
else if (strstartswith(L("name"),"synonym")) {
L("grand") = down(L("child"));
while (L("grand")) {
L("gname") = conceptname(L("grand"));
if (strstartswith(L("gname"), "synonym")) {
L("synid")++;
L("text") = strval(L("grand"), "text");
G("out") << "<synonym>\n";
G("out") << "\t<wordid>" << L("wordid") << "</wordid>\n";
G("out") << "\t<synid>" << L("synid") << "</synid>\n";
G("out") << "\t<text>" << L("text") << "</text>\n";
G("out") << "</synonym>\n";
}
L("grand") = next(L("grand"));
}
}
# derivedTerms
else if (strstartswith(L("name"),"derived")) {
L("grand") = down(L("child"));
while (L("grand")) {
L("gname") = conceptname(L("grand"));
if (strstartswith(L("gname"), "derived")){
L("termid")++;
L("term") = strval(L("grand"), "derivedTerms");
G("out") << "<term>\n";
G("out") << "\t<wordid>" << L("wordid") << "</wordid>\n";
G("out") << "\t<termid>" << L("termid") << "</termid>\n";
G("out") << "\t<text>" << L("term") << "</text>\n";
G("out") << "</term>\n";
}
L("grand") = next(L("grand"));
}
}
#translation
else if (strstartswith(L("name"),"translation")) {
# अङ्ग्रेजी
L("grand") = down(L("child"));
while (L("grand")) {
L("gname") = conceptname(L("grand"));
if (strstartswith(L("gname"), "अङ्ग्रेजी")) {
L("langid")++;
G("out") << "<language>\n";
G("out") << "\t<wordid>" << L("wordid") << "</wordid>\n";
G("out") << "\t<langid>" << L("langid") << "</langid>\n";
G("out") << "\t<text>" << L("gname") << "</text>\n";
G("out") << "</language>\n";
}
L("great") = down(L("grand"));
while (L("great")) {
L("ggname") = conceptname(L("great"));
if (strstartswith(L("ggname"), "translation")) {
L("transid")++;
L("text") = strval(L("great"), "text");
G("out") << "<translation>\n";
G("out") << "\t<langid>" << L("langid") << "</langid>\n";
G("out") << "\t<transid>" << L("transid") << "</transid>\n";
G("out") << "\t<text>" << L("text") << "</text>\n";
G("out") << "</translation>\n";
}
L("great") = next(L("great"));
}
L("grand") = next(L("grand"));
}
}
L("child") = next(L("child"));
}
L("word") = next(L("word"));
}
@@CODE
|
@PATH _ROOT _textZone _LINE
@POST X("root",3) = N("$text",3); X("numero",3) = N("$text",1); single();
@RULES _numero <- _numero de _xALPHA @@
@POST X("pessoa",3) = N("$text",1); single();
@RULES _pessoa <- _pessoa pessoa @@
@POST X("numero",3) = N("$text",2); single();
@RULES _numero <- do _numero @@
@POST X("tempo",3) = N("$text",2); single();
@RULES _tempo <- do _tempo @@
@POST X("root",3) = N("$text",3); single();
@RULES _root <- do verbo _xALPHA @@
|
@CODE
fileout("caps.txt");
prlit("caps.txt", "CAPITALIZED PHRASE CONFIDENCES\n");
prlit("caps.txt", " (RESUME-WIDE)\n");
prlit("caps.txt", "=======================================================\n");
prlit("caps.txt", " HUM CITY FLD SCH JOB CO. TEXT\n");
prlit("caps.txt", " ------------------------------------------------------\n");
# NAM LOC MAJ SUBORG...
# Count caps printed out.
# But need conditional printout to do this.
#var("prcount",0)
G("prcount") = 0;
@@CODE
@PATH _ROOT _LINE
@POST
# For fun, fetch text into a variable.
N("text",1) = N("$text",1); # Special $var fetches node text.
"caps.txt" << " ";
"caps.txt" << rightjustifynum(N("humanname conf",1),3) << " ";
"caps.txt" << rightjustifynum(N("city conf",1),3) << " ";
"caps.txt" << rightjustifynum(N("field conf",1),3) << " ";
"caps.txt" << rightjustifynum(N("school conf",1),3) << " ";
"caps.txt" << rightjustifynum(N("job conf",1),3) << " ";
"caps.txt" << rightjustifynum(N("company conf",1),3) << " ";
"caps.txt" << N("text",1);
prlit("caps.txt", "\n");
#noop()
@RULES
_xNIL <- _Caps [s] @@
|
@NODES _ROOT
@RULES
_xNIL <-
_phrase ### (1)
@@
|
@PATH _ROOT _paragraph _LINE
if (pnvar)
@RULES
_xNIL <-
_xWILD [one] ### (1)
@@
|
@NODES _ROOT
@CHECK
if (N("level",1) == G("max header"))
succeed();
fail();
@POST
"nesting.txt" << "header: " << N("level",1) << " " << N("header",1) << "\n";
S("level") = N("level",1);
S("header") = N("header",1);
single();
@RULES
_headerZone <-
_header ### (1)
_xWILD [fail=(_header _xEND)] ### (2)
@@
@POST
G("max header")--;
if (G("max header") < 1)
exitpass();
"nesting.txt" << "max: " << G("max header") << "\n";
@RULES
_xNIL <-
_xWILD [match=(_LINE _headerZone)]
_xEND
@@
|
@NODES _section
@POST
S("subsection") = N("subsection", 1);
merge(1,2);
@RULES
_subsection <-
_subsection [one]
_xWILD [plus fails=(_xEND _subsection)]
@@ |
@CODE
if (!G("find html"))
exitpass();
@@CODE
@NODES _ROOT
# Some html headers, h1 h2 etc.
@POST
L("num") = N("$text",4);
if (L("num") == "1")
L("txt") = "ha";
else if (L("num") == "2")
L("txt") = "hb";
else if (L("num") == "3")
L("txt") = "hc";
else if (L("num") == "4")
L("txt") = "hd";
else if (L("num") == "5")
L("txt") = "he";
else if (L("num") == "6")
L("txt") = "hf";
else
L("txt") = "hx";
if (N(2))
{
L("name") = "_E" + L("txt");
}
else
L("name") = "_" + L("txt");
group(1,5,L("name"));
N("tag",1) = L("name");
@RULES
_xNIL <-
\<
\/ [opt]
H
_xNUM
\>
@@
# Generic html tag.
@POST
L("txt") = strtolower(N("$text",3));
if (N(2))
{
L("name") = "_E" + L("txt");
}
else
L("name") = "_" + L("txt");
group(1,5,L("name"));
N("tag",1) = L("name");
@RULES
_xNIL <-
\<
\/ [opt]
_xALPHA
_xWILD
\>
@@
@POST
group(1,3,"_comment");
N("tag",1) = "comment";
@RULES
_xNIL <-
_commentTag
_xWILD
_EcommentTag
@@
# Some erroneous html.
@POST
group(1,3,"<");
@RULES
_xNIL <-
\&
lt
\;
_xALPHA
\>
@@
@RULES
_char <-
\&
_xALPHA
\;
@@
|
@CODE
L("arr");
L("i") = 21;
L("count") = 0;
L("arr_2") = 0;
"testfile.txt" << "Init: " << L("arr") << "\n";
"testfile.txt" << "Empty len: " << arraylength(L("arr")) << "\n";
if (L("arr_2")) {
"testfile.txt" << "arr_2: " << L("arr") << "\n";
}
"testfile.txt" << "Empty len: " << arraylength(L("arr")) << "\n";
while (L("count") < 12) {
L("arr")[arraylength(L("arr"))] = L("i")--;
L("count")++;
}
L("arr")[arraylength(L("arr"))] = 0;
L("i") = 21;
L("count") = 0;
while (L("count") < 12) {
L("arr")[arraylength(L("arr"))] = L("i")--;
L("count")++;
}
L("arr")[arraylength(L("arr"))] = 0;
# L("arr") = "ac";
# L("arr")[arraylength(L("arr"))] = "aa";
# L("arr")[arraylength(L("arr"))] = "ba";
# L("arr")[arraylength(L("arr"))] = "ab";
# L("arr")[arraylength(L("arr"))] = "bb";
# L("arr")[arraylength(L("arr"))] = "ba";
# L("arr")[arraylength(L("arr"))] = "ab";
# L("arr")[arraylength(L("arr"))] = "ab";
# L("arr")[arraylength(L("arr"))] = "ba";
"testfile.txt" << "Original: " << L("arr") << "\n";
L("sorted") = QuickSort(L("arr"));#, 0, arraylength(L("arr"))-1);
"testfile.txt" << "Sorted: " << L("sorted") << "\n";
# L("search_term") = "ab";
# "testfile.txt" << "Searching for: " << L("search_term") << "\n";
# L("idx") = BinarySearch(L("sorted"), L("search_term"));
# "testfile.txt" << "Search result for " << L("search_term") << ":\t"<< L("idx") << "\n";
# L("search_term") = "ab";
# "testfile.txt" << "Searching for: ab" << "\n";
# L("idx") = BinarySearchPartition(L("sorted"), 0, 1, "ab");
# "testfile.txt" << "Search result for " << L("search_term") << ":\t"<< L("idx") << "\n";
L("search_term") = "ab";
L("unique") = FilterDuplicates(L("arr"), 0);
"testfile.txt" << "Set:\t" << L("unique") << "\n";
L("str") = "Contrary to popular belief, Lorem Ipsum is not simply random text. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of 'de Finibus Bonorum et Malorum' (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, 'Lorem ipsum dolor sit amet..', comes from a line in section 1.10.32.";
L("x") = GetTokens(L("str"));
"tokens.txt" << "Tokens: " << L("x") << "\n";
@@CODE |
@NODES _ROOT
@RULES
_ABSTRACT <-
_beginAbs ### (1)
_xWILD ### (2)
_endAbs ### (3)
@@
|
@CODE
L("hello") = 0;
@@CODE
@PATH _ROOT _TEXTZONE _tok
# Artifact of pretagged texts.
# --
# - -
@POST
pncopyvars(X(3),N(4));
X("dissolve") = 1;
clearpos(N(2),1,0);
clearpos(N(3),1,0);
group(2,3,"_dbldash");
N("nopos",2) = 1;
@RULES
_xNIL <-
_xSTART
\-
\-
_xWILD [min=2 match=(_xANY) gp=_tok]
_xEND
@@
# Artifact of pretagged texts.
# --
# - -
@POST
pncopyvars(X(3),N(4));
X("dissolve") = 1;
clearpos(N(2),1,0);
clearpos(N(3),1,0);
group(2,3,"_dbldash");
N("nopos",2) = 1;
@RULES
_xNIL <-
_xSTART
\-
\-
_xANY
_xEND
@@
@POST
X("dissolve") = 1;
clearpos(N(2),1,0);
clearpos(N(3),1,0);
group(2,3,"_dbldash");
N("nopos",2) = 1;
@RULES
_xNIL <-
_xSTART
\-
\-
_xEND
@@
# Artifact of pretagged texts.
# ...
@POST
pncopyvars(X(3),N(2));
X("dissolve") = 1;
clearpos(N(3),1,0);
clearpos(N(4),1,0);
clearpos(N(5),1,0);
group(3,5,"_qEOS");
N("nopos",3) = 1;
@RULES
_xNIL <-
_xSTART
_xANY
\.
\.
\.
_xEND
@@
|
@POST
rfanodes(2, multi)
single()
@RULES
_MULTI [base] <- _soMULTI _NONLIT [star] _eoMULTI [opt] @@
|
@NODES _LINE
@RULES
_major <- BIOLOGICAL _xWHITE AND _xWHITE AGRICULTURAL _xWHITE ENGINEERING @@
_major <- COMPUTER _xWHITE SCIENCE _xWHITE AND _xWHITE ENGINEERING @@
_major <- AGRICULTURAL _xWHITE AND _xWHITE BIOLOGICAL _xWHITE ENGINEERING @@
_major <- ELECTRICAL _xWHITE AND _xWHITE COMPUTER _xWHITE ENGINEERING @@
_major <- CIVIL _xWHITE AND _xWHITE ENVIRONMENTAL _xWHITE ENGINEERING @@
_major <- INDUSTRIAL _xWHITE AND _xWHITE MANUFACTURING _xWHITE ENGINEERING @@
_major <- COASTAL _xWHITE AND _xWHITE OCEANOGRAPHIC _xWHITE ENGINEERING @@
_major <- NUCLEAR _xWHITE AND _xWHITE RADIOLOGICAL _xWHITE ENGINEERING @@
_major <- INDUSTRIAL _xWHITE AND _xWHITE SYSTEMS _xWHITE ENGINEERING @@
_major <- INSURANCE _xWHITE AND _xWHITE REAL _xWHITE ESTATE @@
_major <- HUMAN _xWHITE DEVELOPMENT @@
_major <- PETROLEUM _xWHITE AND _xWHITE NATURAL _xWHITE GAS @@
_major <- MOLECULAR _xWHITE GENETICS @@
_major <- APPAREL _xWHITE TEXTILES @@
_major <- LAW _xWHITE ENFORCEMENT @@
_major <- FOREIGN _xWHITE LANGUAGES _xWHITE AND _xWHITE LITERATURES @@
_major <- ROMANCE _xWHITE LANGUAGES _xWHITE AND _xWHITE LITERATURES @@
_major <- SLAVIC _xWHITE LANGUAGES _xWHITE AND _xWHITE LITERATURES @@
_major <- ENVIRONMENTAL _xWHITE PLANNING _xWHITE AND _xWHITE MANAGEMENT @@
_major <- TRANSPORTATION _xWHITE SYSTEMS _xWHITE AND _xWHITE MANAGEMENT @@
_major <- QUALITY _xWHITE AND _xWHITE MANUFACTURING _xWHITE MANAGEMENT @@
_major <- RECREATION _xWHITE AND _xWHITE PARK _xWHITE MANAGEMENT @@
_major <- MICROBIOLOGY @@
_major <- VOICE _xWHITE PERFORMANCE _xWHITE AND _xWHITE PEDAGOGY @@
_major <- STUDENT _xWHITE COUNSELING _xWHITE AND _xWHITE PERSONNEL @@
_major <- URBAN _xWHITE STUDIES _xWHITE AND _xWHITE PLANNING @@
_major <- URBAN _xWHITE AND _xWHITE ENVIRONMENTAL _xWHITE PLANNING @@
_major <- URBAN _xWHITE AND _xWHITE REGIONAL _xWHITE PLANNING @@
_major <- HEALTH _xWHITE EDUCATION _xWHITE AND _xWHITE PROMOTION @@
_major <- CLINICAL _xWHITE AND _xWHITE HEALTH _xWHITE PSYCHOLOGY @@
_major <- CLINICAL _xWHITE AND _xWHITE SCHOOL _xWHITE PSYCHOLOGY @@
_major <- FINANCE, _xWHITE INSURANCE _xWHITE AND _xWHITE REAL @@
_major <- ENGINEERING _xWHITE AND _xWHITE APPLIED _xWHITE SCIENCE @@
_major <- MICROBIOLOGY _xWHITE AND _xWHITE CELL _xWHITE SCIENCE @@
_major <- WILDLIFE _xWHITE AND _xWHITE FISHERIES _xWHITE SCIENCE @@
_major <- COMPUTER _xWHITE AND _xWHITE INFORMATION _xWHITE SCIENCE @@
_major <- LIBRARY _xWHITE AND _xWHITE INFORMATION _xWHITE SCIENCE @@
_major <- SOIL _xWHITE AND _xWHITE WATER _xWHITE SCIENCE @@
_major <- FISHERIES _xWHITE AND _xWHITE AQUATIC _xWHITE SCIENCES @@
_major <- BRAIN _xWHITE AND _xWHITE COGNITIVE _xWHITE SCIENCES @@
_major <- FAMILY _xWHITE AND _xWHITE CONSUMER _xWHITE SCIENCES @@
_major <- COMPUTING _xWHITE AND _xWHITE INFORMATION _xWHITE SCIENCES @@
_major <- DECISION _xWHITE AND _xWHITE INFORMATION _xWHITE SCIENCES @@
_major <- ATMOSPHERIC, _xWHITE AND _xWHITE PLANETARY _xWHITE SCIENCES @@
_major <- DAIRY _xWHITE AND _xWHITE POULTRY _xWHITE SCIENCES @@
_major <- EXERCISE _xWHITE AND _xWHITE SPORT _xWHITE SCIENCES @@
_major <- AFRO \- AMERICAN _xWHITE AND _xWHITE AFRICAN _xWHITE STUDIES @@
_major <- AFRO \- AMERICAN _xWHITE STUDIES @@
_major <- AFRICAN _xWHITE STUDIES @@
_major <- WRITING _xWHITE AND _xWHITE HUMANISTIC _xWHITE STUDIES @@
_major <- FILM _xWHITE AND _xWHITE MEDIA _xWHITE STUDIES @@
_major <- INDUSTRIAL _xWHITE AND _xWHITE MANUFACTURING _xWHITE SYSTEMS @@
_major <- INSTRUCTIONAL _xWHITE TECHNOLOGY _xWHITE AND _xWHITE TELECOMMUNICATIONS @@
_major <- POLITICAL _xWHITE AND _xWHITE SOCIAL _xWHITE THOUGHT @@
|
@NODES _LINE
@POST
X("title") = N("$text",4);
singler(4,4);
@RULES
_title <-
title ### (1)
\= ### (2)
\" ### (3)
_xWILD [fail=(\")] ### (4)
\" ### (5)
@@
@POST
X("code") = N("$text",4);
singler(4,4);
@RULES
_code <-
_xSTART ### (1)
_xWILD [match=(_indent)] ### (2)
\" ### (3)
_xWILD [fail=(\")] ### (4)
\" ### (5)
@@ |
@NODES _ROOT
@RULES
_dash <-
\\ [s] ### (1)
textemdash ### (2)
@@
@RULES
_spacing <-
\\ [s] ### (1)
_xWILD [s match=(hspace vspace)] ### (2)
\{ [s] ### (3)
_xWILD [s fails=(\})] ### (4)
\} [s] ### (5)
@@
@POST
S("title") = N("$text",3);
G("title") = S("title");
single();
@RULES
_title <-
mytitle ### (1)
\{ ### (2)
_xWILD ### (3)
\} ### (4)
@@
@POST
S("authors") = N("$text",3);
G("authors") = S("authors");
single();
@RULES
_authors <-
myauthors ### (1)
\{ ### (2)
_xWILD ### (3)
\} ### (4)
@@
@POST
S("type") = N("$text",4);
S("title") = N("$text",6);
single();
@RULES
_section <-
\\ ### (1)
cnpsheading ### (2)
\\ ### (3)
_xWILD [plus match=(section subsection subsubsection)] ### (4)
\{ ### (5)
_xWILD ### (6)
\} ### (7)
@@
@RULES
_beginAbs <-
\\ [s] ### (1)
begin [s] ### (2)
\{ [s] ### (3)
abstract [s] ### (4)
\} [s] ### (5)
@@
@RULES
_endAbs <-
\\ [s] ### (1)
end [s] ### (2)
\{ [s] ### (3)
abstract [s] ### (4)
\} [s] ### (5)
@@
@RULES
_beginFigure <-
\\ [s] ### (1)
begin [s] ### (2)
\{ [s] ### (3)
figure [s] ### (4)
\} [s] ### (5)
@@
@RULES
_endFigure <-
\\ [s] ### (1)
end [s] ### (2)
\{ [s] ### (3)
figure [s] ### (4)
\} [s] ### (5)
@@
@RULES
_bibliography <-
\\ [s] ### (1)
bibliographystyle [s] ### (2)
\{ [s] ### (3)
_xWILD [s] ### (4)
\} [s] ### (5)
@@
@POST
S("cite") = N("$text",4);
addCite(N("$text",4));
single();
@RULES
_cite <-
\\ [s] ### (1)
cite [s] ### (2)
\{ [s] ### (3)
_xWILD [s] ### (4)
\} [s] ### (5)
@@
@POST
S("ref type") = N("$text",4);
S("ref") = N("$text",6);
L("full") = N("$text",4) + ":" + N("$text",6);
S("full") = L("full");
addReference(L("full"),S("ref type"),S("ref"));
single();
@RULES
_ref <-
\\ [s] ### (1)
ref [s] ### (2)
\{ [s] ### (3)
_xWILD [s one] ### (4)
\: [s] ### (5)
_xWILD [s] ### (6)
\} [s] ### (7)
@@
@RULES
_beginBib <-
\\ [s] ### (1)
begin [s] ### (2)
\{ [s] ### (3)
thebibliography [s] ### (4)
\} [s] ### (5)
\{ [s] ### (6)
_xNUM [s] ### (7)
\} [s] ### (8)
@@
@RULES
_endBib <-
\\ [s] ### (1)
end [s] ### (2)
\{ [s] ### (3)
thebibliography [s] ### (4)
\} [s] ### (5)
@@
@RULES
_beginEq <-
\\ [s] ### (1)
begin [s] ### (2)
\{ [s] ### (3)
equation [s] ### (4)
\} [s] ### (5)
@@
@RULES
_endEq <-
\\ [s] ### (1)
end [s] ### (2)
\{ [s] ### (3)
equation [s] ### (4)
\} [s] ### (5)
@@
@POST
S("label type") = N("$text",4);
S("label name") = N("$text",6);
S("label full") = N("$text",4) + ":" + N("$text",6);
single();
@RULES
_label <-
\\ [s] ### (1)
label [s] ### (2)
\{ [s] ### (3)
_xWILD [s] ### (4)
\: [s] ### (5)
_xWILD [s] ### (6)
\} [s] ### (7)
@@
@RULES
_beginEnum <-
\\ [s] ### (1)
begin [s] ### (2)
\{ [s] ### (3)
enumerate [s] ### (4)
\} [s] ### (5)
@@
@RULES
_endEnum <-
\\ [s] ### (1)
end [s] ### (2)
\{ [s] ### (3)
enumerate [s] ### (4)
\} [s] ### (5)
@@
@RULES
_beginSplit <-
\\ [s] ### (1)
begin [s] ### (2)
\{ [s] ### (3)
split [s] ### (4)
\} [s] ### (5)
@@
@RULES
_endSplit <-
\\ [s] ### (1)
end [s] ### (2)
\{ [s] ### (3)
split [s] ### (4)
\} [s] ### (5)
@@
|
# Fetch the right or next sibling of concept.
L("return_con") = next(L("con")); |
@CODE
G("max header") = 0;
# Simplified since tutorials were recorded
G("resume") = getconcept(findroot(),"resume");
G("company") = getconcept(G("resume"),"company");
@@CODE |
@NODES _ROOT
@POST
excise(1,1);
noop();
@RULES
_xNIL <-
_xWHITE [s] ### (1)
@@
|
@NODES _attr
@POST
X("word") = N("$text",20);
X("type") = N("$text",8);
"words.txt" << N("$text",8) << " " << N("$text",20)<< "\n";
@RULES
_xNIL <-
\" ### (1)
concept ### (2)
\" ### (3)
\" ### (4)
sys ### (5)
\" ### (6)
\" ### (7)
_xALPHA ### (8)
\" ### (9)
\" ### (10)
_xALPHA ### (11)
\" ### (12)
\" ### (13)
_xALPHA ### (14)
\" ### (15)
\" ### (16)
_xALPHA ### (17)
\" ### (18)
\" ### (19)
_xALPHA ### (20)
\" ### (21)
@@
|
@NODES _LINE
@PRE
<1,1> cap();
@RULES
# Ex: Schooling
_EducationHeaderWord [layer=(_headerWord )] <- _xWILD [min=1 max=1 s match=("Schooling" "Qualifications" "Education")] @@
|
@CODE
# G("kb") = getconcept(findroot(),");
# SaveKB("mykb.kbb",G("pred_codes"),2);
DisplayKB(G("pred_codes"),1);
L("out") = cout();
L("out") << "This is a test.\n";
@@CODE |
@PATH _ROOT _group _subgroup _LINE
@POST
S("emoji") = N("$text",2);
S("description") = N("$text",3);
single();
@RULES
_emoji <-
\# ### (1)
_xEMOJI ### (2)
_xWILD [fail=(_xEND)] ### (3)
@@
|
@NODES _ROOT
@POST
N("field con") = next(N("field con"));
while (N("field con"))
{
addstrval(N("record con"),conceptname(N("field con"))," ");
N("field con") = next(N("field con"));
}
@RULES
_xNIL <-
_RECORD
@@ |
@CODE
if (!G("pretagged"))
exitpass();
@@CODE
@PATH _ROOT _LINE _pos _text
@POST
noop();
@RULES
_xNIL <-
_xWHITE [plus]
@@
@POST
if (X("posarr len",3))
{
N("posarr") = X("posarr",3);
N("posarr len") = X("posarr len",3);
}
else if (X("nopos",3))
N("nopos") = 1;
# Tokenize multi-element texts.
if (pnnext(N(1)))
{
xrename("_tok");
X("posarr",4) = X("posarr",3);
X("posarr len",4) = X("posarr len",3);
# Still scoring on tokens! #
# X("nopos",4) = 1;
}
@RULES
_xNIL <-
_xANY
@@
|
@NODES _sentence
@PRE
<1,1> cap();
@RULES
# Ex: California
_state <- California [s] @@
# Ex: Maryland
_state <- Maryland [s] @@
|
@PATH _ROOT _LINE _example
@POST
X("gender",2) = "male";
@RULES
_xNIL <-
_xWILD [matches=(man boy male father brother uncle)] ### (1)
@@
@POST
X("gender",2) = "female";
@RULES
_xNIL <-
_xWILD [matches=(woman girl female mother sister aunt)] ### (1)
@@ |
@PATH _ROOT _headerZone _headerZone
@POST
L("text") = strtrim(N("text"));
L("text") = strsubst(L("text"),"# ",0);
G("vocab") << " def=\"" << L("text") << "\"";
if (N("isa")) G("vocab") << " isa=" << N("isa");
if (N("is")) G("vocab") << " is=" << N("is");
@RULES
_xNIL <-
_list ### (1)
@@
|
@PATH _ROOT _attr _concept _LINE
@POST
X("word",2) = N("word",1);
@RULES
_xNIL <-
_string
_xEND
@@
|
@PATH _ROOT _LINE
@CHECK
# Only recompute for caps that were glommed in the g_caps1 pass.
if (!N("capofcap")) fail();
@POST
# if (!N("len"))
# print out no length.
# Treating confidences independently, but keep track of
# ambiguity in the phrase.
# Glommed, so must be a multi-node cap phrase.
### REJECT FIRST CAP AS HUMAN NAME.
# cap of cap is unlikely to hold a human name in a resume.
# Note: Things like "Resume of John Smith" handled separately,
# so far.
# If the unglommed cap phrase added to ambiguity, undo it.
if (N("humanname conf") >= G("threshold"))
--N("ambigs");
N("humanname conf") = 0;
if (N("hi class") == "humanname")
{
# Find runner up category here.
N("hi conf") = 0;
N("hi class") = "NULL";
if (N("school conf") > N("hi conf"))
{
N("hi class") = "school";
N("hi conf") = N("school conf");
}
if (N("company conf") > N("hi conf"))
{
N("hi class") = "company";
N("hi conf") = N("company conf");
}
if (N("job conf") > N("hi conf"))
{
N("hi class") = "job";
N("hi conf") = N("job conf");
}
if (N("field conf") > N("hi conf"))
{
N("hi class") = "field";
N("hi conf") = N("field conf");
}
}
@RULES
_xNIL <- _Caps @@
|
@CODE
sortchilds(G("languages"));
SaveKB("languages.kbb",G("languages"),2);
@@CODE |
# Build an apple kb, adding color attr with values red, yellow, and green, as well as a weight attr with numval of 3
@CODE
# if you find apples in the concept hierarchy
if (findconcept(findroot(),"apple"))
# kill them (to start fresh)
rmconcept(findconcept(findroot(),"apple"));
# Create the apple concept
G("apple") = makeconcept(findroot(),"apple");
# Apples have color
addstrval(G("apple"),"have","color");
# Apple's color is red
addstrval(G("apple"),"color","red");
# Apple's color is also green and yellow
addstrval(G("apple"),"color","green and yellow");
The code creates a KB like this:
The following code accesses the KB's attributes and values:
# Find apple's attribute list
G("attrList") = findattrs(G("apple"));
# Find the list of values of the first attribute
G("valList") = attrvals(G("attrList"));
# print out the first attribute's name and value
if(attrname(G("attrList"))){
"output.txt" << "1) first attribute of apple is: " << attrname(G("attrList")) << "\n";
"output.txt" << "2) first value of that attribute is: " << getstrval(G("valList")) << "\n";}
# get the next attribute
G("nextAttr") = nextattr(G("attrList"));
if(attrname(nextattr(G("attrList"))))
"output.txt" << "3) next attribute of apple is: " << attrname(G("nextAttr")) << "\n";
# get the list of values of the second attribute
G("valList") = attrvals(G("nextAttr"));
# print the first value's name
"output.txt" << "4) first value of that attribute is: " << getstrval(G("valList")) << "\n";
# print the second value of the second attribute
"output.txt" << "5) second value of that attribute is: " << getstrval(nextval(G("valList"))) << "\n";
The output looks like this:
|
# If anything follows a rulefile, error.
@POST
rfberror(2)
@RULES
_xNIL <- _RULESFILE _xANY @@
# If anything precedes a rulefile, error.
@POST
rfberror(1)
@RULES
_xNIL <-
_xWILD [plus fail=(_RULESFILE)]
_RULESFILE
@@
|
@CODE
if (!G("num lines"))
exitpass();
@@CODE
@NODES _ROOT
@POST
merge();
@RULES
_TEXTZONE <-
_TEXTZONE
_LINE [star]
@@
|
@NODES _LINE
#@CHECK
# being applied to a non optional here, which it shouldn't be....
# or(1,3) #
# useful is present. Should have a check here that says one of the
# wildcards ought to have some real education or experience stuff.
# Also, should treat education and experience separately.
@POST
single()
@RULES
# one line. Reasonable for many education zones, but not for
# experience.
# If date is last in a line, this fails.
# If date is not first, then this misses some information on the line.
# _dateGroup <-
# _dateBoundary [s]
# _xWILD [s plus fails=(_dateBoundary)]
# @@
_dateGroup <-
_xWILD [s star fails=(_dateBoundary)]
_dateBoundary [s trig]
_xWILD [s star fails=(_dateBoundary)]
@@
|
@PATH _ROOT _PRES _NLPPP
@POST
rfapres(1)
single()
@RULES
_PRES [base] <- _ACTION [plus] @@
|
@NODES _ROOT
@RULES
_termEntry <-
\{ ### (1)
_xWILD [fails=(\})] ### (2)
\} ### (3)
_xWILD [one matches=(\n \r)]
@@
|
@NODES _LINE
@PRE
<1,1> cap();
<3,3> cap();
@RULES
# Ex: Technical\_Skills
_SkillsHeaderPhrase [layer=(_header )] <- Technical [s] _xWHITE [star s] Skills [s] @@
|
# Check if concept's attribute has given value (multiple-value aware). Note that second two arguments must be STR.
L("has_attr_with_val") = attrwithval(L("con"), L("attr_str"), L("val_str")); |
@PATH _ROOT _experienceZone _experienceInstance _LINE
@CHECK
if (
!X("company name",3)
&& X("lineno") == X("anchor lineno",3)
)
succeed();
fail();
@POST
X("company name",3) = N("$text");
@RULES
_xNIL <- _company [s] @@
@CHECK
if (
!X("job title",3)
&& X("lineno") == X("anchor lineno",3)
)
succeed();
fail();
@POST
X("job title",3) = N("$text");
@RULES
_xNIL <- _jobTitle [s] @@
_xNIL <- _jobPhrase [s] @@
@CHECK
if (
!X("country",3)
&& X("lineno") == X("anchor lineno",3)
)
succeed();
fail();
@POST
X("country",3) = N("$text");
@RULES
_xNIL <- _country [s] @@
|
@PATH _ROOT _doctypedecl
@RULES
_NotationDecl <-
_NotationDeclStart [one] ### (1)
_whiteSpace [opt] ### (2)
_xWILD [plus fail=("_EndTag")] ### (3)
_whiteSpace [opt] ### (4)
_EndTag [one] ### (5)
@@
_NDataDecl <-
_whiteSpace [opt] ### (1)
_xWILD [one matches=("NDATA")] ### (2)
_whiteSpace [one] ### (3)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (4)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (5)
@@
|
@PATH _ROOT _LINE
@POST
L("code") = N("$text", 2);
L("title") = N("$text", 4);
X("icd_code") = makeconcept(G("icd9_codes"), L("code"));
addstrval(X("icd_code"), "title", L("title"));
L("term_con") = makeconcept(G("icd9_terms"), L("title"));
addstrval(L("term_con"), "code", L("code"));
excise(1,3);
noop();
@RULES
_xNIL <-
_xSTART ### (1)
_xWILD [min=1 fails=(\,)] ### (2)
\, ### (3)
_xWILD [min=1 fails=(\n \r _xEND)] ### (4)
_xWILD [one matches=(\n \r _xEND)] ### (5)
@@
|
# Find 'mother' in the KB, then find parent concept and print name to output.txt, repeat for mother, daughter and mother again
@CODE
# Find 'mother' in the KB
G("First") = findhierconcept("mother", findroot());
# goto parent concept (female)
G("Second") = up(G("First"));
"output.txt" << conceptname(G("Second")) << "\n";
# go back down to 'mother'
G("First") = down(G("Second"));
"output.txt" << conceptname(G("First")) << "\n";
# find mother's first sibling, which should be 'daughter'
G("Second") = next(G("First"));
"output.txt" << conceptname(G("Second")) << "\n";
# find daughter's previous sibling, which should be mother
G("First") = prev(G("Second"));
"output.txt" << conceptname(G("First")) << "\n";
|
@PATH _ROOT _labelEntry
#CREATE EMPTY ADJACENCY MATRIX
@CODE
L("len") = 100;
L("matrixSize") = L("len") * L("len");
L("i") = 1;
G("adjacencyMatrix") = 0;
while (L("i") < L("matrixSize")) {
G("adjacencyMatrix")[L("i")] = 0;
}
@@CODE
@POST
"test.txt" << N("$text", 1) << "\n";
single();
@RULES
_xNIL <-
_xALPHA [plus]
@@
|
@NODES _LINE
@PRE
<1,1> cap()
<3,3> cap()
@RULES
# Ex: New\_York
_NY <- New [s] _xWHITE [star s] York [s] @@
@PRE
<1,1> cap()
<4,4> cap()
@RULES
# Ex: N.\_Y.
_NY <- N [s] \. [s] _xWHITE [star s] Y [s] \. [s] @@
@PRE
<1,1> cap()
@RULES
# Ex: NY
_NY <- NY [s] @@
|
# Execute a statement on the currently open database
@CODE
dbopen("test","root","mypassword");
dbexec("INSERT INTO abc (name, number) VALUES('Joe','0013');");
dbclose();
@@CODE |
@DECL
AddPhrase(L("node")) {
L("con") = G("icd_p");
while (L("node")) {
L("text") = strtolower(pnvar(L("node"), "$text"));
L("con") = AddUniqueCon(L("con"), L("text"));
L("node") = pnnext(L("node"));
}
}
@@DECL
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.