text
stringlengths 22
301k
|
---|
# Remove concept's children and phrase.
rmchildren(L("con")); |
@CODE
# G("outfile") = G("$inputfilename"); # Todo!
fileout("xml.txt")
prlit("xml.txt", " <?xml version=\"1.0\" ?>\n")
prlit("xml.txt", "<BizTalk xmlns=\"urn:schemas-biztalk-org/biztalk-0.81.xml\">\n")
prlit("xml.txt", " <Body>\n")
prlit("xml.txt", " <Resume xmlns=\"urn:schemas-biztalk-org:HR-XML-org/Resume\">\n")
prlit("xml.txt", " <ResumeProlog>\n")
prlit("xml.txt", " </ResumeProlog>\n")
prlit("xml.txt", " <ResumeBody>\n")
#var("xml indent",2) # How much indentation for xml lines.
G("xml indent") = 2;
@@CODE
@RULES
_xNIL <- _xNIL @@
|
@NODES _ROOT
@RULES
_report <-
_xWILD[fails=(_addendum)]
@@
|
@CODE
G("radlex") = findconcept(findroot(),"radlex");
if (!G("radlex")) G("radlex") = makeconcept(findroot(),"radlex");
rmchildren(G("radlex"));
@@CODE |
# Free the statement handle for the currently open database
@CODE
dbopen("test","root","mypassword");
dballocstmt();
dbexecstmt("INSERT INTO employee (name, age) VALUES('John Smith','32');");
dbfreestmt();
dbclose();
@@CODE |
@NODES _ROOT
@POST
noop();
@RULES
#===================== Quoted Empty Items =====================
_xNIL <-
_emptyItem [layer=(_item)] ### (1)
_xWILD [one lookahead match=(_separator _lineTerminator)] ### (2)
@@
#================ First Non-Enclosed Empty Items ==============
_xNIL <-
_lineTerminator [s] ### (1)
_separator [layer=(_item)] ### (2)
@@
#============== Continuous Non-Enclosed Empty Items ===========
_xNIL <-
_separator ### (1)
_xWILD [plus match=(_separator) layer=(_item)] ### (2)
@@
#====================== Non-Empty Items =======================
@POST
S("value") = strunescape(N("$text",2),G("enclosed by"),G("escaped by"));
single();
@RULES
_item <-
_enclosedBy ### (1)
_xWILD [plus] ### (2)
_enclosedBy ### (3)
@@
#====================== Non-Empty Non-Quoted ================
@POST
S("value") = N("$text",1);
singler(1,1);
@RULES
_item <-
_xWILD [plus fail=("_separator" "_lineTerminator" "_enclosedBy")] ### (1)
_separator [one] ### (2)
@@
@@RULES
|
@NODES _LINE
@RULES
_indent <-
_xWILD [match=(_space)] ### (1)
@@
|
# Remove a concept's phrase.
rmcphrase(L("con")); |
@DECL
# indent
# params
# f: file to which to print
# n: integer number of tabs to print
# return
# null
indent(L("f"), L("n")) {
L("i") = 0;
while (L("i") < L("n")) {
L("f") << "\t";
L("i")++;
}
}
printQuotedStr(L("f"), L("str")) {
L("f") << "\"" << L("str") << "\"";
}
# printJSONPreds
# Print results in JSON format
# Params:
# proc_list: type str array of procedure codes
# diag_list: type str array of diagnosis codes
# f: target output filename
printJSONPreds(L("proc_list"), L("diag_list"), L("f"), L("print_ranks")) {
L("first") = 1;
L("level") = 0;
if (L("proc_list")) {
# Line 1
L("first") = 0;
L("f") << "{\n";
L("level")++;
# Line 2
indent(L("f"), L("level"));
L("level")++;
printQuotedStr(L("f"), "procedures");
L("f") << ": ";
# Print codes in list
L("i") = 0;
L("f") << "[\n";
# L("level")++;
while (L("i") < (arraylength(L("proc_list"))-1)) {
indent(L("f"), L("level"));
printQuotedStr(L("f"), conceptname(L("proc_list")[L("i")]));
if (L("print_ranks")) {
L("f") << ": " << fltval(L("proc_list")[L("i")], "rank");
}
L("f") << ",\n";
L("i")++;
}
indent(L("f"), L("level"));
printQuotedStr(L("f"), conceptname(L("proc_list")[L("i")]));
if (L("print_ranks")) {
L("f") << ": " << fltval(L("proc_list")[L("i")], "rank");
}
L("f") << "\n";
L("level")--;
indent(L("f"), L("level"));
L("f") << "]";
}
if (L("diag_list")) {
if (L("first")) {
L("first") = 0;
L("f") << "{\n";
L("level")++;
}
else {
L("f") << ",\n";
}
indent(L("f"), L("level"));
L("level")++;
printQuotedStr(L("f"), "diagnoses");
L("f") << ": ";
# Print codes in list
L("i") = 0;
L("f") << "[\n";
while (L("i") < (arraylength(L("diag_list"))-1)) {
indent(L("f"), L("level"));
printQuotedStr(L("f"), conceptname(L("diag_list")[L("i")]));
if (L("print_ranks")) {
L("f") << ": " << fltval(L("diag_list")[L("i")], "rank");
}
L("f") << ",\n";
L("i")++;
}
indent(L("f"), L("level"));
printQuotedStr(L("f"), conceptname(L("diag_list")[L("i")]));
if (L("print_ranks")) {
L("f") << ": " << fltval(L("diag_list")[L("i")], "rank");
}
L("f") << "\n";
L("level")--;
indent(L("f"), L("level"));
L("f") << "]";
}
L("f") << "\n";
L("level")--;
L("f") << "}";
}
# printCSVpreds() {
# }
conListToStrList(L("con_list")) {
L("iter") = L("con_list")[0];
L("str_list") = conceptname(L("iter"));
L("iter") = next(L("iter"));
while (L("iter")) {
L("str_list")[arraylength(L("str_list"))] = conceptname(L("iter"));
L("iter") = next(L("iter"));
}
return(L("str_list"));
}
@@DECL |
@NODES _ROOT
@POST
excise(3,4);
excise(1,1);
single();
@RULES
_termEntry <-
\{ ### (1)
_xWILD [fails=(\})] ### (2)
\} ### (3)
_xWILD [opt matches=(\n \r)]
@@ |
@PATH _ROOT _textZone _LINE
@RULES _pessoa <- _xWILD [one match=(primeira segunda terceira)] @@
@RULES _numero <- _xWILD [one match=(singular plural)] @@
#@RULES _tempo <- pretérito _xWILD [one match=(imperfeito perfeito)] @@
#@RULES _tempo <- pretérito mais \- que \- perfeito @@
@RULES _tempo <- pretérito @@
@RULES _tempo <- perfeito @@
@RULES _tempo <- imperfeito @@
@RULES _tempo <- presente @@
@RULES _tempo <- futuro @@
@RULES _tempo <- afirmativo @@
@RULES _tempo <- negativo @@
@RULES _tempo <- futuro do presente @@
@RULES _tempo <- futuro do pretérito @@ |
@NODES _FIELDS
@POST
makeconcept(G("fields con"),N("value"));
@RULES
_xNIL <-
_item ### (1)
@@
|
@CODE
DisplayKB(G("words"),1);
@@CODE |
@NODES _paragraph
@RULES
_sentence <-
_xWILD [fail=(_endSent _BLANKLINE)] ### (1)
_xWILD [one match=(_endSent _BLANKLINE)] ### (2)
@@ |
# Specify the maximum number of nodes to match
@RULES
_htmltag <- \< _xWILD [min=1 max=100] \> @@ |
@DECL
##############
# LOOKUPWORD
# SUBJ: Lookup and record word information.
##############
lookupword(L("n"))
{
if (!L("n"))
return;
L("txt") = pnvar(L("n"),"$treetext");
L("lctxt") = strtolower(L("txt"));
if (strisupper(strpiece(L("txt"),0,0)))
pnreplaceval(L("n"),"cap",1); # 06/26/06 AM.
L("wcon") = dictfindword(L("lctxt"));
if (!L("wcon"))
{
pnreplaceval(L("n"),"unknown",1);
if (G("verbose"))
"unknown.txt" << L("lctxt") << "\n";
if (L("txt") == L("lctxt") && G("verbose"))
"lcunknown.txt" << L("lctxt") << "\n";
return;
}
#lookupalpha(L("lctxt"),L("n")); # [DICTZ_FIX] #
lookupalphadicttokz(L("lctxt"),N(1)); # [DICTZ_FIX] # 12/29/20 AM.
if (!pnvar(L("n"),"pos num")) # Not in dictionary.
{
if (G("verbose"))
"unknown.txt" << L("lctext") << "\n";
if (L("txt") == L("lctxt") && G("verbose"))
"lcunknown.txt" << L("lctxt") << "\n";
pnreplaceval(L("n"),"unknown",1);
pnreplaceval(L("n"),"stem",L("lctxt"));
# unknownword(N("lctext"),N(1));
}
}
@CODE
L("hello") = 0;
@@CODE
@NODES _TEXTZONE
@CHECK
# if (N("pos num")) # [DICTZ_FIX] #
if (N("pos")) # [DICTZ_FIX] # 12/29/20 AM.
fail(); # Fixed dict earlier.
@POST
lookupword(N(1));
if (N("prep"))
{
L("tmp") = N(1);
group(1,1,"_prep");
pncopyvars(L("tmp"),N(1));
}
else if (N("det"))
{
L("tmp") = N(1);
group(1,1,"_det");
pncopyvars(L("tmp"),N(1));
}
else if (N("conj"))
{
L("tmp") = N(1);
group(1,1,"_conj");
pncopyvars(L("tmp"),N(1));
}
@RULES
_xNIL <-
_xALPHA
@@
|
@NODES _ROOT
@POST
S("con") = N("con", 1);
excise(1,1);
single();
@RULES
_split <-
_split
_entry [plus] ### (1)
@@
|
@NODES _ROOT
@RULES
_compare <-
_xWILD [match=(more less greater less)] ### (1)
than ### (2)
@@
|
# Add concept value con_val to concept's name attribute.
addconval(L("con"), L("name"), L("con_val")); |
@NODES _paragraph
@POST
splice(1,1);
@RULES
_xNIL <-
_LINE ### (1)
@@
|
@PATH _ROOT _paragraph _sentence
@POST
N("date",5) = makeconcept(N("action",5),"date");
addstrval(N("date",5),"year",N("$text",2));
@RULES
_xNIL <-
_prep [s] ### (1)
_year [s] ### (2)
alone [s] ### (3)
_conj [s] ### (4)
_event [s] ### (5)
@@ |
@NODES _ROOT
@POST
excise(5,5);
S("code") = AddUniqueCon(G("icd_codes"), N("$text", 1));
addstrval(S("code"), "term", N("$text", 1));
excise(1,3);
single();
# Here we group individual icd entries (one per line),
# delete beginning and end quotes, if they exist,
# and extract codes and terms.
@RULES
_code <-
_xNUM ### (1)
\, ### (2)
\" [opt] ### (3)
_xWILD [fails=(\n \r \")] ### (4)
\" [opt] ### (5)
_xWILD [one matches=(\n \r)] ### (6)
@@
|
@NODES _LINE
@RULES
_CompleteSchoolName [] <-
The
_xWHITE [star s]
_SchoolNamePhrase
@@
|
# Find concept number num under the parent concept con.
L("return_con") = findconcept(L("con"), L("num")); |
@NODES _LINE
@POST
singler(1,5); # Reduce all but the trailing whitespace.
@RULES
_url <-
_xALPHA [layer=(_protocol)]
\: [s trigger]
\/ [s]
\/ [s]
_xWILD [plus fails=(\, _xWHITE _whtSEP \[ \])]
_xWILD [opt s match=(_xWHITE _whtSEP)]
@@
|
@CODE
L("hello") = 0;
@@CODE
#@PATH _ROOT _TEXTZONE _sent _seg
@NODES _seg
# alpha alpha alpha.
# Could still want to allow first or last to be separated as
# a verb.
@CHECK
if (X("resolve") != "np")
fail();
@POST
fixnpnonhead(2);
fixnpnonhead(3);
xrename("_np");
# fixnphead
L("tmp4") = N(4);
group(4,4,"_noun");
pncopyvars(L("tmp4"),N(4));
fixnoun(N(4));
@RULES
_xNIL <-
_xSTART
_xALPHA
_xALPHA
_xALPHA
_xEND
@@
|
# Determine whether apple can be yellow, red or blue color, print to output.txt in the form 1) are apples <color>? <true or false>, etc.
"output.txt" << "1) are apples yellow? " << attrwithval(G("apple"), "color", "yellow") << "\n";
"output.txt" << "2) are apples red? " << attrwithval(G("apple"), "color", "red") << "\n";
"output.txt" << "3) are apples blue? " << attrwithval(G("apple"), "color", "blue") << "\n"; |
@NODES _ROOT
@POST
L("text") = N("$text",2);
if (strendswith(L("text"),"ing") && N("verb",2) && N("noun",2)) {
pnrename(N(2),"_noun");
}
@RULES
_xNIL <-
_noun ### (1)
_ambig ### (2)
@@
|
@DECL
###############################################
# General functions
###############################################
AddUniqueCon(L("concept"),L("name")) {
L("con") = findconcept(L("concept"),L("name"));
if (!L("con")) L("con") = makeconcept(L("concept"),L("name"));
return L("con");
}
AddUniqueStr(L("concept"),L("attr"),L("value")) {
if (L("value") && strval(L("concept"),L("attr")) != L("value"))
addstrval(L("concept"),L("attr"),L("value"));
}
AddUniqueNum(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << str(L("value")) << " " << conceptpath(L("concept")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("num") = getnumval(L("val"));
"unique.txt" << " value: " << str(L("num")) << "\n";
if (L("num") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addnumval(L("concept"),L("attr"),L("value"));
return 1;
}
AddUniqueConVal(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("con") = getconval(L("val"));
"unique.txt" << conceptname(L("con")) << "\n";
if (conceptpath(L("con")) == conceptpath(L("value")))
return 0;
L("val") = nextval(L("val"));
}
addconval(L("concept"),L("attr"),L("value"));
return 1;
}
CopyAttr(L("from"),L("to"),L("attr")) {
L("from value") = strval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr"),L("from value"));
}
}
CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) {
L("from value") = strval(L("from"),L("attr from"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr to"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr to"),L("from value"));
}
}
CopyConAttr(L("from"),L("to"),L("attr")) {
L("from value") = conval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = conval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addconval(L("to"),L("attr"),L("from value"));
}
}
AttrValues(L("con"),L("attr")) {
L("at") = findattr(L("con"),L("attr"));
if (L("at"))
return attrvals(L("at"));
return 0;
}
LastChild(L("parent")) {
L("child") = down(L("parent"));
while (L("child")) {
L("last") = L("child");
L("child") = next(L("child"));
}
return L("last");
}
MakeCountCon(L("con"),L("count name")) {
L("count name") = CountName(L("con"),L("count name"));
return makeconcept(L("con"),L("count name"));
}
IncrementCount(L("con"),L("countname")) {
L("count") = numval(L("con"),L("countname"));
if (L("count")) {
L("count") = L("count") + 1;
replaceval(L("con"),L("countname"),L("count"));
} else {
addnumval(L("con"),L("countname"),1);
L("count") = 1;
}
return L("count");
}
CountName(L("con"),L("root")) {
L("count") = IncrementCount(L("con"),L("root"));
return L("root") + str(L("count"));
}
StripEndDigits(L("name")) {
if (strisdigit(L("name"))) return 0;
L("len") = strlength(L("name")) - 1;
L("i") = L("len") - 1;
L("str") = strpiece(L("name"),L("i"),L("len"));
while (strisdigit(L("str")) && L("i")) {
L("i")--;
L("str") = strpiece(L("name"),L("i"),L("len"));
}
return strpiece(L("name"),0,L("i"));
}
###############################################
# KB Dump Functins
###############################################
DumpKB(L("con"),L("file")) {
L("dir") = G("$apppath") + "/kb/";
L("filename") = L("dir") + L("file") + ".kb";
if (!kbdumptree(L("con"),L("filename"))) {
"kb.txt" << "FAILED dump: " << L("filename") << "\n";
} else {
"kb.txt" << "DUMPED: " << L("filename") << "\n";
}
}
TakeKB(L("filename")) {
L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb";
"kb.txt" << "Taking: " << L("path") << "\n";
if (take(L("path"))) {
"kb.txt" << " Taken successfully: " << L("path") << "\n";
} else {
"kb.txt" << " Taken FAILED: " << L("path") << "\n";
}
}
ChildCount(L("con")) {
L("count") = 0;
L("child") = down(L("con"));
while (L("child")) {
L("count")++;
L("child") = next(L("child"));
}
return L("count");
}
###############################################
# KBB DISPLAY FUNCTIONS
###############################################
DisplayKB(L("top con"),L("full")) {
L("file") = DisplayFileName();
DisplayKBRecurse(L("file"),L("top con"),0,L("full"));
L("file") << "\n";
return L("top con");
}
KBHeader(L("text")) {
L("file") = DisplayFileName();
L("file") << "#######################\n";
L("file") << "# " << L("text") << "\n";
L("file") << "#######################\n\n";
}
DisplayFileName() {
if (num(G("$passnum")) < 10) {
L("file") = "ana00" + str(G("$passnum"));
}else if (num(G("$passnum")) < 100) {
L("file") = "ana0" + str(G("$passnum"));
} else {
L("file") = "ana" + str(G("$passnum"));
}
L("file") = L("file") + ".kbb";
return L("file");
}
DisplayKBRecurse(L("file"),L("con"),L("level"),L("full")) {
while (L("con")) {
L("file") << SpacesStr(L("level")+1) << conceptname(L("con"));
DisplayAttributes(L("file"),L("con"),L("full"),L("level"));
L("file") << "\n";
if (down(L("con"))) {
L("lev") = 1;
DisplayKBRecurse(L("file"),down(L("con")),L("level")+L("lev"),L("full"));
}
if (L("level") == 0)
return 0;
L("con") = next(L("con"));
}
}
DisplayAttributes(L("file"),L("con"),L("full"),L("level")) {
L("attrs") = findattrs(L("con"));
if (L("attrs")) L("file") << ": ";
if (L("full") && L("attrs")) L("file") << "\n";
L("first attr") = 1;
while (L("attrs")) {
L("vals") = attrvals(L("attrs"));
if (!L("full") && !L("first attr")) {
L("file") << ", ";
}
if (L("full")) {
if (!L("first attr")) L("file") << "\n";
L("file") << SpacesStr(L("level")+2);
}
L("file") << attrname(L("attrs")) << "=[";
L("first") = 1;
while (L("vals")) {
if (!L("first"))
L("file") << ",";
L("val") = getstrval(L("vals"));
L("num") = getnumval(L("vals"));
L("con") = getconval(L("vals"));
if (L("con")) {
L("file") << conceptpath(L("con"));
} else if (!L("full") && strlength(L("val")) > 20) {
L("shorty") = strpiece(L("val"),0,20);
L("file") << L("shorty");
L("file") << "...";
if (strendswith(L("val"),"\""))
L("file") << "\"";
} else if (L("num") > -1) {
L("file") << str(L("num"));
} else {
L("file") << L("val");
}
L("first") = 0;
L("vals") = nextval(L("vals"));
}
L("file") << "]";
L("first attr") = 0;
L("attrs") = nextattr(L("attrs"));
}
}
# Because NLP++ doesn't allow for empty strings,
# this function can only be called with "num" >= 1
SpacesStr(L("num")) {
L("n") = 1;
L("spaces") = " ";
while (L("n") < L("num")) {
L("spaces") = L("spaces") + " ";
L("n")++;
}
return L("spaces");
}
###############################################
# DICTIONARY FUNCTIONS
###############################################
DictionaryStart() {
G("attrs path") = G("$apppath") + "\\kb\\user\\attrs.kb";
G("attrs") = openfile(G("attrs path"));
}
DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) {
G("attrs") << "ind attr\n" << findwordpath(L("word")) << "\n0\n";
G("attrs") << findwordpath(L("attrName")) << "\n";
if (L("attrType") == "str")
G("attrs") << "pst\n" << "\"" << L("value") << "\"";
else if (L("attrType") == "num")
G("attrs") << "pnum\n" << str(L("value"));
else if (L("attrType") == "con")
G("attrs") << "pcon\n" << conceptpath(L("value"));
G("attrs") << "\nend ind\n\n";
}
DictionaryEnd() {
G("attrs") << "\nquit\n\n";
closefile(G("attrs"));
}
@@DECL
|
###############################################
# FILE: XML SubSchema.pat #
# SUBJ: Put together the major blocks of an #
# XML Document #
# AUTH: Paul Deane #
# CREATED: 14/Jan/01
# DATE OF THIS VERSION: 31/Aug/01 #
# Copyright
###############################################
@NODES _ROOT
@RULES
_Prolog [unsealed] <-
_declSep [opt]
_XMLDecl [opt] ### (1)
_xWILD [star matches=("_declSep" "_Comment" "ProcessingInstruction" "_whiteSpace")] ### (2)
_doctypedecl [one] ### (3)
_xWILD [star matches=("_declSep" "_Comment" "ProcessingInstruction" "_whiteSpace")] ### (4)
@@
_Prolog [unsealed] <-
_declSep [opt]
_XMLDecl [one] ### (1)
_xWILD [star matches=("_declSep" "_Comment" "ProcessingInstruction" "_whiteSpace")] ### (2)
_doctypedecl [opt] ### (3)
_xWILD [star matches=("_declSep" "_Comment" "ProcessingInstruction" "_whiteSpace")] ### (4)
@@
_Misc <-
_xWILD [plus matches=("_declSep" "_Comment" "ProcessingInstruction" "_whiteSpace")] ### (1)
@@
|
@CODE
prlit("edu.txt", "DUMP EDUCATION INSTANCES\n");
prlit("edu.txt", "------------------------\n");
@@CODE
@PATH _ROOT _educationZone _educationInstance _LINE _educationPart
@POST
ndump("edu.txt",1);
# prlit("edu.txt","-------------\n");
"edu.txt" << "--------------\n";
noop();
@RULES
_xNIL <- _Grade @@
|
@PATH _ROOT _LINE
@POST
if (N("fields",5))
N("minor conf",5) = 95;
else
N("minor conf",5) = 90;
# Override whatever was assigned to this cap phrase.
N("hi class",5) = "minor";
N("hi conf",5) = N("minor conf",5); # Still room to grow!
# noop()
@RULES
#example "Minored in English"
_xNIL <-
_minorKey [s]
_xWHITE [s star]
in [s]
_xWHITE [s star]
_Caps
@@
@POST
N("hi class",4) = "minor";
# if (N("$present",2)) bump the confidence.
if (N("fields",4))
N("hi conf",4) = 95;
else
N("hi conf",4) = 90;
# noop()
@RULES
# ex "Minor: Greek"
_xNIL <-
_minorKey [s]
# \: [s]
_xWILD [s one matches = ( \: \, ) ] # PS 01/11/00 for Mason
_xWHITE [s star]
_Caps
@@
@POST
N("hi class",3) = "minor";
if (N("fields",3))
N("hi conf",3) = 95;
else
N("hi conf",3) = 70;
# noop()
@RULES
_xNIL <-
_minorKey [s]
_xWHITE [s star]
_Caps
@@
@POST
N("hi class",1) = "minor";
N("hi conf",1) = 80;
# noop()
@RULES
_xNIL <-
_Caps
_xWHITE [s star]
_minorKey [s]
@@
|
##################################################
# FILE: SIMPLE_NUMERICS.pat #
# SUBJ: Analyse simple numeric words as numerals #
# AUTH: Paul Deane #
# CREATED: 04/Jan/01
# DATE OF THIS VERSION: 31/Aug/01 #
# Copyright
##################################################
@NODES _ROOT
@POST
G("stem") = strtolower(N("$text",5));
if (strequal(strtolower(G("stem")),"hundred")) {
G("Numeral Value") = 100;
}
else if (strequal(strtolower(G("stem")),"thousand")) {
G("Numeral Value") = 1000;
}
else if (strequal(strtolower(G("stem")),"million")) {
G("Numeral Value") = 1000000;
}
else if (strequal(strtolower(G("stem")),"billion")) {
G("Numeral Value") = 1000000000;
}
else if (strequal(strtolower(G("stem")),"trillion")) {
G("Numeral Value") = 1000000000000;
}
S("Numeral Value") = num(N("$text",1)) * G("Numeral Value");
single();
@@POST
@RULES
_cardinalNumeral <-
_xNUM [one] ### (1)
\. [opt] ### (2)
_xNUM [opt] ### (3)
_xWILD [star match=(_xWHITE "_whiteSpace")] ### (4)
_xWILD [one match=("hundred" "thousand" "million" "billion" "trillion")] ### (5)
@@
@@RULES
#######################
# Ordinal with integer#
#######################
@POST
G("numeralT") = N("$text",1);
S("Numeral Value") = num(G("numeralT"));
single();
@@POST
@RULES
_ordinalNumeral <-
_xNUM [s one] ### (1)
_xWILD [s min=1 max=1 match=("st" "nd" "rd" "th")] ### (2)
@@
@@RULES
#####################
# One word ordinals #
#####################
@POST
G("stem") = N("$text",1);
if (strequal(strtolower(G("stem")),"first")) {
S("Numeral Value") = 1;
}
else if (strequal(strtolower(G("stem")),"second")) {
S("Numeral Value") = 2;
}
else if (strequal(strtolower(G("stem")),"third")) {
S("Numeral Value") = 3;
}
else if (strequal(strtolower(G("stem")),"fourth")) {
S("Numeral Value") = 4;
}
else if (strequal(strtolower(G("stem")),"fifth")) {
S("Numeral Value") = 5;
}
else if (strequal(strtolower(G("stem")),"sixth")) {
S("Numeral Value") = 6;
}
else if (strequal(strtolower(G("stem")),"seventh")) {
S("Numeral Value") = 7;
}
else if (strequal(strtolower(G("stem")),"eighth")) {
S("Numeral Value") = 8;
}
else if (strequal(strtolower(G("stem")),"ninth")) {
S("Numeral Value") = 9;
}
else if (strequal(strtolower(G("stem")),"tenth")) {
S("Numeral Value") = 10;
}
else if (strequal(strtolower(G("stem")),"eleventh")) {
S("Numeral Value") = 11;
}
else if (strequal(strtolower(G("stem")),"twelfth")) {
S("Numeral Value") = 12;
}
else if (strequal(strtolower(G("stem")),"thirteenth")) {
S("Numeral Value") = 13;
}
else if (strequal(strtolower(G("stem")),"fourteenth")) {
S("Numeral Value") = 14;
}
else if (strequal(strtolower(G("stem")),"fifteenth")) {
S("Numeral Value") = 15;
}
else if (strequal(strtolower(G("stem")),"sixteenth")) {
S("Numeral Value") = 16;
}
else if (strequal(strtolower(G("stem")),"seventeenth")) {
S("Numeral Value") = 17;
}
else if (strequal(strtolower(G("stem")),"eighteenth")) {
S("Numeral Value") = 18;
}
else if (strequal(strtolower(G("stem")),"nineteenth")) {
S("Numeral Value") = 19;
}
else if (strequal(strtolower(G("stem")),"twentieth")) {
S("Numeral Value") = 20;
}
else if (strequal(strtolower(G("stem")),"thirtieth")) {
S("Numeral Value") = 30;
}
else if (strequal(strtolower(G("stem")),"fortieth")) {
S("Numeral Value") = 40;
}
else if (strequal(strtolower(G("stem")),"fiftieth")) {
S("Numeral Value") = 50;
}
else if (strequal(strtolower(G("stem")),"sixtieth")) {
S("Numeral Value") = 60;
}
else if (strequal(strtolower(G("stem")),"seventieth")) {
S("Numeral Value") = 70;
}
else if (strequal(strtolower(G("stem")),"eightieth")) {
S("Numeral Value") = 80;
}
else if (strequal(strtolower(G("stem")),"ninetieth")) {
S("Numeral Value") = 90;
}
else if (strequal(strtolower(G("stem")),"hundredth")) {
S("Numeral Value") = 100;
}
else if (strequal(strtolower(G("stem")),"thousandth")) {
S("Numeral Value") = 1000;
}
else if (strequal(strtolower(G("stem")),"millionth")) {
S("Numeral Value") = 1000000;
}
else if (strequal(strtolower(G("stem")),"billionth")) {
S("Numeral Value") = 1000000000;
}
else if (strequal(strtolower(G("stem")),"trillionth")) {
S("Numeral Value") = 1000000000000;
}
single();
@@POST
@RULES
_ordinalNumeral <-
_xWILD [s min=1 max=1 match=("first" "second" "third" "fourth" "fifth" "sixth" "seventh" "eighth" "ninth" "tenth" "eleventh" "twelfth" "thirteenth" "fourteenth" "fifteenth" "sixteenth" "seventeenth" "eighteenth" "nineteenth" "twentieth" "thirtieth" "fortieth" "fiftieth" "sixtieth" "seventieth" "eightieth" "ninetieth" "hundredth" "thousandth" "millionth" "billionth" "trillionth")] ### (1)
@@
@@RULES
#####################
# One word cardinals #
#####################
@POST
G("stem") = N("$text",1);
if (strequal(strtolower(G("stem")),"one")) {
S("Numeral Value") = 1;
}
else if (strequal(strtolower(G("stem")),"two")) {
S("Numeral Value") = 2;
}
else if (strequal(strtolower(G("stem")),"three")) {
S("Numeral Value") = 3;
}
else if (strequal(strtolower(G("stem")),"four")) {
S("Numeral Value") = 4;
}
else if (strequal(strtolower(G("stem")),"five")) {
S("Numeral Value") = 5;
}
else if (strequal(strtolower(G("stem")),"six")) {
S("Numeral Value") = 6;
}
else if (strequal(strtolower(G("stem")),"seven")) {
S("Numeral Value") = 7;
}
else if (strequal(strtolower(G("stem")),"eight")) {
S("Numeral Value") = 8;
}
else if (strequal(strtolower(G("stem")),"nine")) {
S("Numeral Value") = 9;
}
else if (strequal(strtolower(G("stem")),"ten")) {
S("Numeral Value") = 10;
}
else if (strequal(strtolower(G("stem")),"eleven")) {
S("Numeral Value") = 11;
}
else if (strequal(strtolower(G("stem")),"twelve")) {
S("Numeral Value") = 12;
}
else if (strequal(strtolower(G("stem")),"thirteen")) {
S("Numeral Value") = 13;
}
else if (strequal(strtolower(G("stem")),"fourteen")) {
S("Numeral Value") = 14;
}
else if (strequal(strtolower(G("stem")),"fifteen")) {
S("Numeral Value") = 15;
}
else if (strequal(strtolower(G("stem")),"sixteen")) {
S("Numeral Value") = 16;
}
else if (strequal(strtolower(G("stem")),"seventeen")) {
S("Numeral Value") = 17;
}
else if (strequal(strtolower(G("stem")),"eighteen")) {
S("Numeral Value") = 18;
}
else if (strequal(strtolower(G("stem")),"nineteen")) {
S("Numeral Value") = 19;
}
else if (strequal(strtolower(G("stem")),"twenty")) {
S("Numeral Value") = 20;
}
else if (strequal(strtolower(G("stem")),"thirty")) {
S("Numeral Value") = 30;
}
else if (strequal(strtolower(G("stem")),"forty")) {
S("Numeral Value") = 40;
}
else if (strequal(strtolower(G("stem")),"fifty")) {
S("Numeral Value") = 50;
}
else if (strequal(strtolower(G("stem")),"sixty")) {
S("Numeral Value") = 60;
}
else if (strequal(strtolower(G("stem")),"seventy")) {
S("Numeral Value") = 70;
}
else if (strequal(strtolower(G("stem")),"eighty")) {
S("Numeral Value") = 80;
}
else if (strequal(strtolower(G("stem")),"ninety")) {
S("Numeral Value") = 90;
}
else if (strequal(strtolower(G("stem")),"hundred")) {
S("Numeral Value") = 100;
}
else if (strequal(strtolower(G("stem")),"thousand")) {
S("Numeral Value") = 1000;
}
else if (strequal(strtolower(G("stem")),"million")) {
S("Numeral Value") = 1000000;
}
else if (strequal(strtolower(G("stem")),"billion")) {
S("Numeral Value") = 1000000000;
}
else if (strequal(strtolower(G("stem")),"trillion")) {
S("Numeral Value") = 1000000000000;
}
single();
@@POST
@RULES
_cardinalNumeral <-
_xWILD [s min=1 max=1 match=("one" "two" "three" "four" "five" "six" "seven" "eight" "nine" "ten" "eleven" "twelve" "thirteen" "fourteen" "fifteen" "sixteen" "seventeen" "eighteen" "nineteen" "twenty" "thirty" "forty" "fifty" "sixty" "seventy" "eighty" "ninety" "hundred" "thousand" "million" "billion" "trillion")] ### (1)
@@
@@RULES
|
# Increment the count of nouns,. without reducing _noun to anything
@CHECK
++G("noun count");
@POST
noop();
# If absent,
then _noun will reduce to _xNIL.
@RULES
_xNIL <- _noun @@ |
@PATH _ROOT _pronunciations _headerZone _LINE
@POST
addstrval(X("pronunciation",2),"phonemic",N("$text",1));
"debug.txt" << N("$text",1) << "\n";
@RULES
_xNIL <-
_phonemic ### (1)
@@
|
@NODES _sentence
@PRE
<1,1> cap();
<2,2> cap();
@RULES
# Ex: MD\_Files
_company <- MD [s] Files [s] @@
@PRE
<1,1> cap();
<2,2> cap();
<3,3> cap();
@RULES
# Ex: New\_Hampshire\_Medical
_company <- New [s] Hampshire [s] Medical [s] @@
@PRE
<1,1> cap();
<2,2> cap();
@RULES
# Ex: Carezani\_Med
_company <- Carezani [s] Med [s] @@
|
@NODES _ROOT
@RULES
_no <- no \. @@
_inc <- inc \. @@
_num <- _xNUM \. _xNUM @@ |
@NODES _ROOT
@POST
N("pronunciation") = makeconcept(G("word"),"pronunciation");
@RULES
_xNIL <-
_pronunciations ### (1)
@@
@POST
N("synonym") = makeconcept(G("word"),"synonym");
@RULES
_xNIL <-
_synonyms ### (1)
@@
@POST
N("derivedTerms") = makeconcept(G("word"),"derivedTerms");
@RULES
_xNIL <-
_derivedTerms ### (1)
@@
@POST
N("translation") = makeconcept(G("word"),"translation");
@RULES
_xNIL <-
_translations ### (1)
@@ |
@PATH _ROOT _LINE
@POST
S("city") = N("$text",1);
S("state") = N("$text",4);
single();
@RULES
_cityState <-
_city [s layer=(_cityName)]
\, [s]
_xWHITE [s star]
_state [s layer=(_stateName)]
@@
@CHECK
if (
N("len",1) <= 2
&& !N("capofcap",1)
&& !N("capandcap",1)
&&
(N("city conf",1) >= 40 ||
N("humanname conf",1) >= 40) # Convert.
)
succeed();
else
fail();
@POST
S("city") = N("$text",1);
S("state") = N("$text",4);
single();
@RULES
_cityState <-
_Caps [s rename=(_city) layer=(_cityName)]
\, [s]
_xWHITE [s star]
_state [s layer=(_stateName)]
@@
|
# Match _det _quan _adj _noun sequence and reduce to _np, matching _noun first
@RULES
_np <- _det _quan _adj _noun [t]
@@ |
################################################
# FILE: Numeric Sequences.pat #
# SUBJ: Recognize numeric sequence expressions #
# like 5-15, or between 5 and 15 #
# AUTH: Paul Deane #
# CREATED: 01/Mar/01
# DATE OF THIS VERSION: 31/Aug/01 #
################################################
@NODES _ROOT
@POST
S("Numeral Value")[0] = num(N("$text",3));
S("Numeral Value")[1] = num(N("$text",7));
S("MaxArrayPos") = 1;
single();
@@POST
@RULES
_cardinalSequence <-
_xWILD [s one match=("between")] ### (1)
_xWILD [s one match=(_xWHITE "_whiteSpace")] ### (2)
_xNUM [s one] ### (3)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (4)
_xWILD [s one match=("&" "and")] ### (5)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (6)
_xNUM [s one] ### (7)
@@
@@RULES
@POST
S("Numeral Value")[0] = num(N("Numeral Value",3));
S("Numeral Value")[1] = num(N("Numeral Value",7));
S("MaxArrayPos") = 1;
single();
@@POST
@RULES
_cardinalSequence <-
_xWILD [s one match=("between")] ### (1)
_xWILD [s one match=(_xWHITE "_whiteSpace")] ### (2)
_cardinalNumeral [s one] ### (3)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (4)
_xWILD [s one match=("&" "and")] ### (5)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (6)
_cardinalNumeral [s one] ### (7)
@@
@@RULES
@POST
S("Numeral Value")[0] = num(N("Numeral Value",5));
S("Numeral Value")[1] = num(N("Numeral Value",11));
S("MaxArrayPos") = 1;
single();
@@POST
@RULES
_ordinalSequence <-
_xWILD [s one match=("between")] ### (1)
_xWILD [s one match=(_xWHITE "_whiteSpace")] ### (2)
the [s opt] ### (3)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (4)
_ordinalNumeral [s one] ### (5)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (6)
_xWILD [s one match=("&" "and")] ### (7)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (8)
the [s opt] ### (9)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (10)
_ordinalNumeral [s one] ### (11)
@@
@@RULES
@POST
S("Numeral Value")[0] = num(N("$text",3));
S("Numeral Value")[1] = num(N("$text",7));
S("MaxArrayPos") = 1;
single();
@@POST
@RULES
_cardinalSequence <-
_xWILD [s opt match=("from")] ### (1)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (2)
_xNUM [s one] ### (3)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (4)
_xWILD [s one match=("-" "to" "through")] ### (5)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (6)
_xNUM [s one] ### (7)
@@
@@RULES
@POST
S("Numeral Value")[0] = num(N("Numeral Value",3));
S("Numeral Value")[1] = num(N("Numeral Value",7));
S("MaxArrayPos") = 1;
single();
@@POST
@RULES
_cardinalSequence <-
_xWILD [s opt match=("from")] ### (1)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (2)
_cardinalNumeral [s one] ### (3)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (4)
_xWILD [s one match=("to" "through")] ### (5)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (6)
_cardinalNumeral [s one] ### (7)
@@
@@RULES
@POST
S("Numeral Value")[0] = num(N("Numeral Value",5));
S("Numeral Value")[1] = num(N("Numeral Value",11));
S("MaxArrayPos") = 1;
single();
@@POST
@RULES
_ordinalSequence <-
_xWILD [s opt match=("from")] ### (1)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (2)
the [s opt] ### (3)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (4)
_ordinalNumeral [s one] ### (5)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (6)
_xWILD [s one match=("-" "to" "through")] ### (7)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (8)
the [s opt] ### (9)
_xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (10)
_ordinalNumeral [s one] ### (11)
@@
@@RULES
|
@NODES _LINE
@POST
xaddlen("nindent", 2)
singler(2,2) # 10/09/99 AM.
@RULES
# Count indentation separately. Doesn't add to nonwhite blob count.
_whtINDENT <- _xSTART _xWHITE [s plus]@@
# Recording long blobs of whitespace now. #
@POST
xinc("nblobs")
single() # 10/09/99 AM.
@RULES
# Note: Blobs = nonwhite regions of text in a line.
# Note: counting end of line to get the right blob count.
_whtSEP <- _xWHITE [s min=4 max=0] @@
# Because of variable spacing in text regions, allowing up to 3
# whitespace to be a normal word separation. #
@POST
++X("nblobs");
@RULES
_xNIL <- _xWHITE [s min=1 max=3] @@
# xinc("wcap") # Num of capitalized words.
@POST
++X("wcap");
@RULES
_xNIL <- _xCAP [s] @@
|
@PATH _ROOT _RULES
@POST
rfaelement(1, 2)
single()
@RULES
_ELEMENT [base] <- _NONLIT _PAIRS @@
_ELEMENT [base] <- _LIT _PAIRS @@
_ELEMENT [base] <- _NUM _PAIRS @@
|
@PATH _ROOT _educationZone _educationInstance _LINE _school
# If city or state found within a school, prefer that.
# OVERWRITES city,state from elsewhere in education instance, if present.
@POST
X("city",3) = N("$text");
@RULES
_xNIL <- _city [s] @@
@POST
X("state",3) = N("$text");
@RULES
_xNIL <- _state [s] @@
# If some phrase is carrying a state, use it.
@CHECK
if (N("state") && !X("state",3))
succeed();
fail();
@POST
X("state",3) = N("state");
@RULES
_xNIL <- _Caps @@
|
@CODE
G("caps") = getconcept(findroot(),"caps");
G("names") = getconcept(findroot(),"names");
@@CODE |
@NODES _LINE
@RULES
_date [] <- _monthNum [s] \/ [s one] _year [s one] @@
_date [] <- _monthNum [s] \- [s one] _year [s one] @@
@RULES
# Ex: July '90
_date [] <- _monthWord [s]
\. [s opt]
\, [s opt]
_xWHITE [s]
_year [s]
@@
# Ex: Summer '90
_date [] <- _season [s]
\, [s opt]
_xWHITE [s]
_year [s]
@@
_date [] <- _year @@
|
@PATH _ROOT _paragraph _sentence
@CHECK
## FIND LAST MATCHING OBJECT
S("exit") = 0;
N("anaphora") = 0;
S("sentence object") = X("object");
if (N("action"))
fail();
# LOOP BACK THROUGH SENTENCES
while (!S("exit") && S("sentence object"))
{
N("object") = down(S("sentence object"));
# LOOP THROUGH OBJECTS IN SENTENCE
while (!S("exit") && N("object"))
{
if (!strval(N("object"),"action"))
S("exit") = 1;
else
N("object") = next(N("object"));
}
S("sentence object") = prev(S("sentence object"));
}
if (!N("object"))
{
"anaphora.txt" << "Failed!" << "\n";
fail();
}
@POST
N("anaphora") = N("object");
S("object") = N("object");
S("normal") = strval(N("object"),"normal");
"anaphora.txt" << "Anaphora: " << phrasetext() << "\n";
"anaphora.txt" << " from: " << X("$text") << "\n";
"anaphora.txt" << " Object: " << conceptname(S("object")) << "\n";
if (N("type"))
"anaphora.txt" << " Type: " << N("type") << "\n";
"anaphora.txt" << "\n";
single();
@RULES
_company <-
_anaphora [s] ### (1)
@@ |
# Remove all phrases from a KB subhierarchy
@CODE
# Warning: Exit VisualText without saving, after running this example, or your Gram Tab
# hierarchy will lose all its samples!
G("root") = findroot(); # Get the root concept of the KB.
G("gram") = findconcept(G("root"), "gram"); # Get root of Gram hierarchy.
prunephrases(G("gram")); # Remove all the samples in the Gram hierarchy!
@@CODE |
# Find a concept in the subhierarchy of a given concept. e.g.: G("returnedConcept") = findhierconcept("child",G("ancestor")); finds the concept named "child" anywhere under the concept pointed to by G("ancestor").
L("return_con") = findhierconcept(L("name"), L("hier")); |
@PATH _ROOT _LINE _example
@POST
X("male",2) = 1;
@RULES
_xNIL <-
_xWILD [one matches=(man boy male father brother uncle)] ### (1)
@@ |
@NODES _ROOT
@POST
S("con") = MakeCountCon(G("word"),"pos");
single();
@RULES
_posZone <-
_partofspeech
_xWILD [plus match=(_defZone)] ### (1)
@@
|
@NODES _LINE
@POST
S("degree") = N("$text",1);
S("major") = N("$text",5);
single();
@RULES
# bachelor(,) (of) computer engineering
_degreeInMajor <-
_degree [s]
_xWHITE [s opt]
_xWILD [s one match=(\, of \- in \:)]
_xWHITE [s ]
_field [s layer=(_major)]
@@
# ex BS CS
@POST
S("degree") = N("$text",1);
S("major") = N("$text",3);
single();
@RULES
_degreeInMajor <-
_degree [s]
_xWHITE [s ]
_field [s layer=(_major)]
@@
#ex CS BS
@POST
S("degree") = N("$text",3);
S("major") = N("$text",1);
single();
@RULES
_degreeInMajor <-
_field [s layer=(_major)]
_xWHITE [s ]
_degree [s]
@@
|
@PATH _ROOT _group _subgroup
@POST
X("con",3) = MakeCountCon(X("con",2),"subgroup");
addstrval(X("con",3),"description",N("description",1));
@RULES
_xNIL <-
_subgroupHeader ### (1)
@@
|
# Remove child named str from parent concept con.
rmchild(L("con"), L("str")); |
# Execute a statement with the currently open statement handle
@CODE
dbopen("test","root","mypassword");
dballocstmt();
dbexecstmt("INSERT INTO abc (name, number) VALUES('Jane','0011');");
dbexecstmt("INSERT INTO abc (name, number) VALUES('Joe','0013');");
dbfreestmt();
dbclose();
@@CODE |
@PATH _ROOT _pronunciations _headerZone _LINE
@RULES
_phonemic <-
\/ ### (1)
_xWILD [plus fail=(\/)] ### (2)
\/ ### (3)
@@
|
###############################################
# CONTENT INDEX #
# 1. Rules for special items like ampersands #
# greater than etc. plus tag elements #
# 2. Signals for special tag types including #
# comments and entity references #
###############################################
@CODE
#Now we need to make sure that the doc type is in the KB
#and properly identified for this document
G("root") = findroot() ;
G("tmp") = getconcept(G("root"),"tmp");
G("gramtab") = getconcept(G("tmp"),"gram") ;
G("ElementName") = "Elements" ;
G("EntityName") = "Entities" ;
#we get the doc type name from the name of the DTD file
G("DocTypeName") = G("$inputhead") ;
G("CurrentDocType") = findconcept(G("gramtab"),G("DocTypeName")) ;
if (G("CurrentDocType") == 0 ) {
makeconcept(G("gramtab"),G("DocTypeName")) ;
G("CurrentDocType") = findconcept(G("gramtab"),G("DocTypeName")) ;
}
G("Entities") = findconcept(G("CurrentDocType"),G("EntityName")) ;
if (G("Entities") == 0 ) {
makeconcept(G("CurrentDocType"),G("EntityName")) ;
G("Entities") = findconcept(G("CurrentDocType"),G("EntityName")) ;
}
G("Elements") = findconcept(G("CurrentDocType"),G("ElementName")) ;
if (G("Elements") == 0 ) {
makeconcept(G("CurrentDocType"),G("ElementName")) ;
G("Elements") = findconcept(G("CurrentDocType"),G("ElementName")) ;
}
@@CODE
@PATH _ROOT
###################################
# Rule set 1 #
# Special syntactic elements #
###################################
@RULES
_Ampersand <-
\& [one] ### (1)
_xWILD [one matches=("amp")] ### (2)
\; [one] ### (3)
@@
_LessThan <-
\& [one] ### (1)
_xWILD [one matches=("lt")] ### (2)
\; [one] ### (3)
@@
_GreaterThan <-
\& [one] ### (1)
_xWILD [one matches=("gt")] ### (2)
\; [one] ### (3)
@@
_APos <-
\& [one] ### (1)
_xWILD [one matches=("apos")] ### (2)
\; [one] ### (3)
@@
_Quote <-
\& [one] ### (1)
_xWILD [one matches=("quot")] ### (2)
\; [one] ### (3)
@@
_CommentStart <-
\< [one] ### (1)
\! [one] ### (2)
\- [one] ### (3)
\- [one] ### (4)
@@
_CommentEnd <-
\- [one] ### (1)
\- [one] ### (2)
\> [one] ### (3)
@@
_DoubleHyphen <-
\- [one] ### (1)
\- [one] ### (2)
@@
_StartXML <-
\< [one] ### (1)
\? [one] ### (2)
_xALPHA [s one matches=("xml")] ### (3)
@@
@@RULES
##############################################
# Rule set 2 -- Signals for specially tagged #
# items like processing instructions and #
# comments #
##############################################
@RULES
_StartProcessingInstruction <- ### (5)
\< [one] ### (1)
\? [one trig] ### (2)
@@
_EndProcessingInstruction <- ### (10)
\? [one] ### (1)
\> [one] ### (2)
@@
_CDStart <-
\< [one] ### (1)
\! [one] ### (2)
\[ [one] ### (3)
_xALPHA [s one matches=("CDATA")] ### (4)
\[ [one] ### (5)
@@
_CDEnd <-
\] [one] ### (1)
\] [one] ### (2)
\> [one] ### (3)
@@
_EndDocType <-
\] [one] ### (1)
_xWHITE [star] ### (2)
\> [one] ### (3)
@@
_EndEmptyTag <-
\/ [one] ### (1)
\> [one] ### (2)
@@
_EndTag <-
\> [one] ### (1)
@@
_CharRef <-
\& [one] ### (1)
\# [one] ### (2)
_xNUM [one] ### (3)
\; [one] ### (4)
@@
_CharRef <-
\& [one] ### (1)
\# [one] ### (2)
x [one] ### (3)
_xWILD [one matches=("xNUM" "A" "a" "B" "b" "C" "c" "D" "d" "E" "e" "F" "f")] ### (4)
\; [one] ### (5)
@@
_EntityRef <-
\& [one] ### (1)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (2)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (3)
\; [one] ### (4)
@@
_PEReference <-
\% [one] ### (1)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (2)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (3)
\; [one] ### (4)
@@
@@RULES
@POST
#find the name of the entity we're declaring
S("buffer1") = N("$text",5) ;
S("buffer2") = N("$text",6) ;
if (S("buffer1") != 0 && S("buffer2") != 0 ) {
S("ElementName") = S("buffer1") + S("buffer2") ;
}
else if (S("buffer1") !=0)
S("ElementName") = S("buffer1") ;
else
S("ElementName") = S("buffer2") ;
S("CurrentElement") = findconcept(G("Elements"),S("ElementName")) ;
if (S("CurrentElement") == 0) {
makeconcept(G("Elements"),S("ElementName")) ;
S("CurrentElement") = findconcept(G("Elements"),S("ElementName")) ;
}
single() ;
@@POST
@RULES
_ElementDeclStart <-
\< [one] ### (1)
\! [one] ### (2)
_xWILD [s one matches=("ELEMENT")] ### (3)
_xWHITE [plus] ### (4)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (5)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (6)
@@
@@RULES
@RULES
_NotationDeclStart <-
\< [one] ### (1)
\! [one] ### (2)
_xWILD [s one matches=("NOTATION")] ### (3)
_xWHITE [plus] ### (4)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (5)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (6)
@@
@@RULES
@POST
#find the name of the element we're declaring
S("buffer1") = str(N("$text",5)) ;
S("buffer2") = str(N("$text",6)) ;
if (N("$text",5) && N("$text",6)) {
S("ElementName") = S("buffer1") + S("buffer2") ;
}
else if (N("$text",5)) {
S("ElementName") = N("$text",5) ;
}
else if (N("$text",6)) {
S("ElementName") = N("$text",6) ;
}
S("CurrentElement") = findconcept(G("Elements"),S("ElementName")) ;
if (S("CurrentElement") == 0) {
makeconcept(G("Elements"),S("ElementName")) ;
S("CurrentElement") = findconcept(G("Elements"),S("ElementName")) ;
}
single() ;
@@POST
@RULES
_AttlistDeclStart <-
\< [one] ### (1)
\! [one] ### (2)
_xWILD [s one matches=("ATTLIST")] ### (3)
_xWHITE [plus] ### (4)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (5)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (6)
@@
@@RULES
@RULES
_EntityDeclStart <-
\< [one] ### (1)
\! [one] ### (2)
_xWILD [s one matches=("ENTITY")] ### (3)
_xWHITE [plus] ### (4)
@@
@@RULES
|
@NODES _ROOT
@POST
excise(1,1);
noop();
@RULES
_xNIL <-
_xWILD [fails=(_labelEntry _alphaNum)] ### (1)
@@
|
@CODE
"output.txt" << "[" << today() << "]" << "\n";
@@CODE
@NODES _ROOT
@POST
excise(1,1);
excise(3,3);
excise(5,5);
@RULES
_xNIL <- \r [optional] \n \r [min=0 max=1] \n
_xWILD [min=0 max=0 match=(\r \n)]
@@
@POST
excise(1,1);
@RULES
_xNIL <- \r \n @@
|
@CODE
fileout("exp_anchors.txt"); # 06/10/00 AM.
@@CODE
# Look for an anchor in each line of experience zone.
# Would like a way to refer to vars in the experience zone of:
@PATH _ROOT _experienceZone _LINE
# eg, X("var", 2) refers to var in 2nd component of path.
# opt! Can reverse the list and change the numbers accordingly,
# internally. Or make it an array, better yet.
# For each anchor, mark that its line contains an anchor.
@POST
++X("nanchors");
# Would like to layer _ANCHOR onto context node here!
# (Could use xrename as temporary rename.
if (!X("first anchor",2)) # Track first anchor lineno.
{
X("first anchor",2) = X("lineno"); # 01/01/00 AM.
if (X("lineno") > 1) # More than just a header before.
X("noprestuff",2) = 0; # There are lines before anchors.
else
X("noprestuff",2) = 1;
}
"exp_anchors.txt" << "------------------------" << "\n";
xdump("exp_anchors.txt",2);
"exp_anchors.txt" << "---------" << "\n";
ndump("exp_anchors.txt",1);
# noop()
@RULES
_xNIL <- _DateRange [s] @@
_xNIL <- _SingleDate [s] @@ # Trying out. # 01/01/00 AM.
|
@NODES _LINE
@RULES
_state <- _xWILD [s one match=( _statePhrase _PostalState )] @@
_country <- _xWILD [s one match=( _countryPhrase _countryWord )] @@
|
@NODES _NLPPP
# Catch the start of a function call here, so it won't be grabbed by
# expression grammar. #
# Added L local var reference. #
@POST
fncallstart()
single()
@RULES
_VARLIST [base] <-
_xWILD [s one match=( s G N X P L ) layer=(_VARNAME)]
\( @@
# Eg, user::func() #
@POST
scopefncallstart(1,4)
single()
@RULES
_FNCALLLIST [base] <- _LIT \: \: _LIT [layer=(_FNNAME)] \( @@
@POST
fncallstart()
single()
@RULES
_FNCALLLIST [base] <- _LIT [layer=(_FNNAME)] \( @@
@POST
movesem(2) # Move expr semantic object up the tree.
single()
@RULES
# NOTE: Need a better action to grab the num, str.
_EXPR <- \( _xWILD [s one match=( _EXPR _NUM _FLOAT _STR )] \) @@
# NOTE: Ambiguity with _LIST must be resolved.
@POST
rfaexpr(1,2,3)
# single() #
singler(1,3) # 08/01/00 AM.
@RULES
_EXPR <-
_xWILD [s one match=(_EXPR _NUM _FLOAT _STR )]
_xWILD [s t one match=( \* \/ \%
_opCONF # 12/17/99 AM.
)]
_xWILD [s one match=(_EXPR _NUM _FLOAT _STR )]
_xWILD [s one fail=(_opINC _opDEC)] # 08/01/00 AM.
@@
# Handling precedence. That's why these rules look funny.
@POST
rfaexpr(1,2,3)
singler(1,3)
@RULES
_EXPR <-
_xWILD [s one match=(_EXPR _NUM _FLOAT _STR )]
_xWILD [s t one match=( \+ \- )]
_xWILD [s one match=(_EXPR _NUM _FLOAT _STR )]
_xWILD [s one match=( _xANY _xEND _xEOF ) except=( \/ \* \%
_opCONF # 12/17/99 AM.
_opINC _opDEC # 08/01/00 AM.
)]
@@
@POST
rfaexpr(1,2,3)
singler(1,3)
@RULES
_EXPR <-
_xWILD [s one match=(_EXPR _NUM _FLOAT _STR )]
_xWILD [s t one match=( \< \> _opLE _opGE _opEQ _opNEQ )]
_xWILD [s one match=(_EXPR _NUM _FLOAT _STR )]
_xWILD [s one match=( _xANY _xEND _xEOF ) except=( \/ \* \% \+ \-
_opCONF # 12/17/99 AM.
_opINC _opDEC # 08/01/00 AM.
)]
@@
@POST
rfaexpr(1,2,3)
singler(1,3)
@RULES
_EXPR <-
_xWILD [s one match=(_EXPR _NUM _FLOAT _STR )]
_xWILD [s t one match=( _opAND _opOR )]
_xWILD [s one match=(_EXPR _NUM _FLOAT _STR )]
_xWILD [s one match=( _xANY _xEND _xEOF )
except=( \/ \* \% \+ \- \< \> _opLE _opGE _opEQ _opNEQ
_opCONF # 12/17/99 AM.
_opINC _opDEC # 08/01/00 AM.
)]
@@
# LOWEST PRECEDENCE of any operator except output op (<<).
_EXPR <-
_VAR [s]
\= [s]
_xWILD [s one match=( _EXPR _NUM _FLOAT _STR )]
_xWILD [s one match=( _xANY _xEND _xEOF )
except=( \/ \* \% \+ \- \< \> _opLE _opGE _opEQ _opNEQ
_opAND _opOR
_opCONF # 12/17/99 AM.
\= # To associate right to left. # 12/31/99 AM.
_opINC _opDEC # 08/01/00 AM.
)]
@@
# Output operator! #
# LOWEST PRECEDENCE of any operator.
_EXPR <-
_xWILD [s one match=(_STR _EXPR)]
_opOUT [s]
_xWILD [s one match=( _EXPR _NUM _FLOAT _STR )]
_xWILD [s one match=( _xANY _xEND _xEOF )
except=( \/ \* \% \+ \- \< \> _opLE _opGE _opEQ _opNEQ
_opAND _opOR
_opCONF
\=
_opINC _opDEC # 08/01/00 AM.
)]
@@
@POST
rfaunary(1,2)
singler(1,2)
@RULES
# Unary operators.
# Highest precedence, apart from post operators.
_EXPR <- _xWILD [s one match=( _opINC _opDEC )]
_VAR [s]
_xWILD [s one match=( _xANY _xEND _xEOF) except=( _opINC _opDEC)]
@@
_EXPR <- \! [s]
_xWILD [s one match=( _EXPR _NUM _FLOAT _STR )]
_xWILD [s one match=( _xANY _xEND _xEOF) except=( _opINC _opDEC)]
@@
# Highest precedence operators.
@POST
rfapostunary(1,2)
single()
@RULES
_EXPR <-
_VAR [s]
_xWILD [s one match=( _opINC _opDEC )]
@@
# Post unary ops have precedence.
@POST
rfaunary(2,3)
singler(2,3)
@RULES
# Only do this if you're at the start of something or there's an
# operator to the left.
_EXPR <-
_xWILD [s one match=( _xSTART \< \> \+ \- \* \/ \% \! \=
_opINC _opDEC _opLE _opGE _opEQ _opNE _opAND _opOR
_opCONF # 12/17/99 AM.
_opOUT # 12/31/99 AM.
)]
_xWILD [s t one match=( \- \+ )]
_xWILD [s one match=( _EXPR _NUM _FLOAT )]
_xWILD [s one match=( _xANY _xEND _xEOF ) except=( _opINC _opDEC)]
@@
###################################
# GENERALIZED FUNCTION CALL GRAMMAR. #
###################################
# LIST GRAMMAR.
# FUNCTION CALL GRAMMAR.
@POST
addarg(1,0,3) # 05/26/02 AM.
listadd(1,3)
@RULES
_VARLIST <- _VARLIST
\, [opt]
_xWILD [one match=(_EXPR _NUM _STR)]
_xWILD [one match=( \, \) )] # lookahead.
@@
@POST
addarg(1,3,4) # 05/26/02 AM.
listadd(1,4)
@RULES
_xNIL <-
_FNCALLLIST
\, [opt]
\& [opt] # Call by reference! # 05/26/02 AM.
_xWILD [one match=(_EXPR _NUM _FLOAT _STR)]
_xWILD [one match=( \, \) )] # lookahead.
@@
#PUT ARRAY GRAMMAR IN HERE. #
@POST
varfnarray(1,4) # 10/13/00 AM.
single() # 10/13/00 AM.
@RULES
_VAR [layer=(_EXPR)] <- # 10/13/00 AM.
_VARLIST
\)
\[
_xWILD [opt match=(_EXPR _NUM)] # 10/13/00 AM.
# Making this OPTIONAL to catch ARRAY ASSIGNMENT. #
\]
@@
@POST
varfn()
singler(1,2) # 10/15/00 AM.
@RULES
_VAR [layer=(_EXPR)] <- _VARLIST \)
_xWILD [one fail=( \[ )] # lookahead. # 10/15/00 AM.
@@
@POST
movesem(1)
singler(1,2)
@RULES
_FNCALL <- _FNCALLLIST \) \{ @@
@POST
movesem(1)
single()
@RULES
_FNCALL [layer=(_EXPR)] <- _FNCALLLIST \) @@
|
@PATH _ROOT _synonyms _headerZone _LINE
@RULES
_synonym <-
_xWILD [plus match=(_xALPHA)] ### (1)
@@
|
# Fetch attribute named str belonging to concept con.
L("return_attr") = findattr(L("con"), L("str")); |
@PATH _ROOT _textZone _headerZone _LINE
@POST X("lang") = N("lang");
@RULES _xNIL <- _langauge @@
@POST X("pessoa",3) = X("pessoa",4); X("meaning",3) = 1;
@RULES _xNIL <- _pessoa @@
@POST X("numero",3) = X("numero",4); X("meaning",3) = 1;
@RULES _xNIL <- _numero @@
@POST X("tempo",3) = X("tempo",4); X("meaning",3) = 1;
@RULES _xNIL <- _tempo @@
@POST X("root",3) = X("root",4); X("meaning",3) = 1;
@RULES _xNIL <- _root @@
@POST
X("stem",3) = N("$text",6);
X("verb",3) = N("$text",8);
IncrementCount(G("conjugations"),"count");
AddUniqueCon(G("conjugations"),N("$text",8));
single();
@RULES
_conjug <-
_xWILD [match=(\{)] ### (1)
conj ### (2)
\/ ### (3)
_xALPHA ### (4)
\| ### (5)
_xALPHA ### (6)
\| ### (7)
_xALPHA ### (8)
_xWILD [match=(\})] ### (9)
@@ |
@CODE
L("file") = "numbers.dict";
L("number") = down(G("numbers"));
addword("numeric");
while (L("number")) {
L("name") = conceptname(L("number"));
L("file") << L("name") << " numermic=" << str(numval(L("number"),"numeric")) << "\n";
L("number") = next(L("number"));
}
@@CODE |
@PATH _ROOT _experienceZone
@CHECK
# Require the instance to have a daterange.
if (!N("date range",1)) fail();
@POST
prlit("output.xml","<Position>\n");
#prlit("output.txt","Date: ");
#fprintnvar("output.txt","date range",1);
#prlit("output.txt","\n");
prlit("output.xml","\n<EmployerName>");
#fprintnvar("output.xml","_company",1);
prtree("output.xml",1,"_EmployerName");
prlit("output.xml","</EmployerName>\n");
prlit("output.xml","\n<JobTitle>");
#fprintnvar("output.xml","job title",1);
prtree("output.xml",1,"_jobTitle");
prlit("output.xml","</JobTitle>\n");
if (N("city"))
{
if (N("state"))
N("loc") = N("city") + ", " + N("state");
else if (N("country"))
N("loc") = N("city") + ", " + N("country");
else
N("loc") = N("city");
}
else if (N("country"))
N("loc") = N("country");
prlit("output.xml", "\n<Location>");
prtree("output.xml",1,"_cityState");
prlit("output.xml","</Location>\n");
prlit("output.xml","<Date>\n<StartDate>");
prtree("output.xml",1,"_fromDate");
prlit("output.xml","</StartDate>\n<EndDate>");
prtree("output.xml",1,"_toDate");
prlit("output.xml","</EndDate>\n</Date>");
prlit("output.xml","\n</Position>\n");
@RULES
_xNIL <- _experienceInstance @@ |
@NODES _LINE
@RULES
# Ex: current
_Present <- _xWILD [min=1 max=1 s match=("current" "date" "now" "today" "present")] @@
|
@PATH _ROOT _paragraph _sentence
# @POST
# G("out") << phrasetext() << "\n";
# @RULES
# _xNIL <-
# _xWILD [plus match=(_xCAP _USA)] ### (1)
# bureau ### (2)
# @@
@POST
G("out") << phrasetext() << "\n";
@RULES
_xNIL <-
_xWILD [plus match=(_xCAP _USA)] ### (1)
for ### (2)
_xWILD [plus match=(_xCAP _USA)] ### (3)
@@
# @POST
# G("out") << phrasetext() << "\n";
# @RULES
# _xNIL <-
# section [trig] ### (1)
# of ### (2)
# _xWILD [plus match=(_xCAP _USA)] ### (3)
# @@
# @POST
# G("out") << phrasetext() << "\n";
# @RULES
# _xNIL <-
# department ### (1)
# of ### (2)
# _xWILD [min=1 max=7 match=(_xCAP and)] ### (3)
# @@
# @POST
# L("node") = N(1);
# L("node") = pnprev(L("node"));
# L("phrase") = strtolower(N("$text"));
# L("continue") = 1;
# while (L("node") && L("continue")) {
# L("text") = strtolower(pnvar(L("node"),"$text"));
# if (!pnvar(L("node"),"functword")) {
# L("phrase") = L("text") + " " + L("phrase");
# } else {
# L("continue") = 0;
# }
# L("node") = pnprev(L("node"));
# }
# G("out") << L("phrase") << "\n";
# @RULES
# _xNIL <-
# department ### (1)
# @@
|
@PATH _ROOT _DECL _NLPPP
@POST
rfbdecls(1)
single()
@RULES
_DECL [base] <- _FUNCDEF [plus] @@
|
@NODES _ROOT
@POST
group(1,2,"_COMMENT");
@RULES
_xNIL <-
\% ### (1)
_xWILD ### (2)
_xWILD [lookahead matches=(_NEWLINE _BLANKLINE)] ### (3)
@@
|
@CODE
# # Essentially iterate over pred_codes, setting
# # rank(code) = max(code_ranks)
# # Print results to file.
# L("iter") = down(G("pred_codes"));
# L("ranks")
# while (L("iter")) {
# L("child_attrs") = findattrs(L("iter"));
# # Iterate over each attr (rank) of parent
# L("curr_max") = 0.0;
# while (L("child_attrs")) {
# # Since repeated (list) values are, by design, the same,
# # We only have to deal with the first one.
# "raw_ranks.txt" << attrname(L("child_attrs")) << "\n";
# L("rank") = getstrval(attrvals(L("child_attrs")));
# "raw_ranks.txt" << L("rank") << "\n";
# if (flt(L("rank")) > flt(L("curr_max"))) {
# L("curr_max") = L("rank");
# }
# L("child_attrs") = nextattr(L("child_attrs"));
# }
# "final_ranks.txt" << conceptname(L("iter")) << ": " << L("curr_max") << "\n";
# L("iter") = next(L("iter"));
# }
L("iter") = down(G("pred_codes"));
while (L("iter")) {
L("child_attrs") = findattrs(L("iter"));
"aggregate.txt" << "Getting ranks for " << conceptname(L("iter"));
# Iterate over each attr (rank) of parent
L("curr_max") = 0.0;
L("sum") = 0.0;
L("count") = 0;
while (L("child_attrs")) {
# Since repeated (list) values are, by design, the same,
# We only have to deal with the first one.
L("rank") = getstrval(attrvals(L("child_attrs")));
"aggregate.txt" << "\t" << attrname(L("child_attrs")) << "\n";
if (flt(L("rank")) > flt(L("curr_max"))) {
L("curr_max") = L("rank");
}
L("sum") = flt(L("sum")) + flt(L("rank"));
L("count")++;
L("child_attrs") = nextattr(L("child_attrs"));
}
addstrval(L("iter"), "max", L("curr_max"));
addstrval(L("iter"), "sum", str(L("sum")));
L("mean") = L("sum") / flt(L("count"));
addstrval(L("iter"), "mean", str(L("mean")));
L("iter") = next(L("iter"));
}
@@CODE |
@MULTI _ROOT _section _sentence _subsection
@PRE
<2,2> vareq("negation", "POST");
@POST
excise(1,2);
# single();
@RULES
_xNIL <-
_xWILD [min=0 max=5 matches=(_xALPHA _xNUM _conj _phrase _patientID \, \-)]
_negation
@@
@PRE
<1,1> vareq("negation", "PREN");
@POST
excise(1,2);
# single();
@RULES
_xNIL <-
_negation
_xWILD [min=0 max=5 matches=(_xALPHA _xNUM _conj _phrase _patientID \, \-)]
@@
|
@NODES _LINE
@POST
X("con") = makeconcept(G("codes"),N("$text",1));
X("word count") = 0;
@RULES
_xNIL <-
_code ### (1)
@@
@POST
L("word") = strtolower(N("$text",1));
if (strlength(L("word")) > 2 && strisalpha(L("word"))) {
"words.txt" << L("word") << "\n";
if (!spellword(L("word"))) {
AddUniqueCon(G("lookups"),L("word"));
}
}
N("word con") = AddUniqueCon(G("words"),L("word"));
makeconcept(N("word con"),conceptname(X("con")));
WordPOS(L("word"));
X("code con") = AddUniqueCon(X("con"),L("word"));
@RULES
_xNIL <-
_word
@@ |
@CODE
G("enclosed by out") = G("enclosed by");
G("separator out") = G("separator");
G("lineTerminator out") = G("lineTerminator");
G("escaped by out") = G("escaped by");
#======================== Dump Fields =========================
G("field con") = down(G("fields con"));
while (G("field con"))
{
"output.txt" << G("enclosed by out");
"output.txt" << conceptname(G("field con"));
"output.txt" << G("enclosed by out");
G("field con") = next(G("field con"));
if (G("field con"))
"output.txt" << G("separator out");
}
"output.txt" << G("lineTerminator out");
#======================== Dump Records ========================
G("record con") = down(G("records con"));
while (G("record con"))
{
G("record attrs") = findattrs(G("record con"));
while (G("record attrs"))
{
G("values") = attrvals(G("record attrs"));
"output.txt" << G("enclosed by out");
G("value") = getstrval(G("values"));
if (G("value") != " ")
{
G("value") = strescape(G("value"),G("enclosed by out"),G("escaped by out"));
"output.txt" << G("value");
}
"output.txt" << G("enclosed by out");
G("record attrs") = nextattr(G("record attrs"));
if (G("record attrs"))
"output.txt" << G("separator out");
}
G("record con") = next(G("record con"));
if (G("record con"))
"output.txt" << G("lineTerminator out");
}
@@CODE
|
@NODES _LINE
@RULES
# for State College, PA
_city[layer = (_SchoolRoot)] <-
State [s] _xWHITE [s] College [s] @@
_city [layer = (_country)] <- Bermuda [s] @@
_city [layer = (_country)] <- Djibouti [s] @@
_city [layer = (_country)] <- Gibraltar [s] @@
_city [layer = (_country)] <- Grenada [s] @@
_city [layer = (_country)] <- Guam [s] @@
_city [layer = (_country)] <- Guatamala [s] @@
_city [layer = (_country)] <- Holland [s] @@
_city [layer = (_country)] <- Lebanon [s] @@
_city [layer = (_country)] <- Luxembourg [s] @@
_city [layer = (_country)] <- Macau [s] @@
_city [layer = (_country)] <- Mexico [s] @@
_city [layer = (_country)] <- Monaco [s] @@
_city [layer = (_country)] <- Palestine [s] @@
_city [layer = (_country)] <- Panama [s] @@
_city [layer = (_country)] <- Peru [s] @@
_city [layer = (_country)] <- Singapore [s] @@
_city [layer = (_country)] <- Vatican [s] _xWHITE [s] City [s] @@
# New York can be a city or a state
_city [layer = (_state)] <- New [s] _xWHITE York [s] @@
_city [layer = (_state)] <- N [s] \. [s] Y [s] \. [s] @@
_city [layer = (_state)] <- NY [s] @@
_city [] <- NYC @@
# MD can be a degree or a state
_degreeInMajor [layer = (_state)] <- MD [s] @@
# catch S.C. before it turns into south carolina
_degreeInMajor [] <- M [s] \. [s] S[s] \. [s] C [s] \. [s] S [s] \. [s] @@
# Masters or Massachusetts
_degree [layer = (_state) ] <- MA [s] @@
_degree [layer = (_state) ] <- AA [s] @@
_degree [layer = (_state) ] <- M [s] \. [s] A [s] \.[s] @@
# Ontario is both a state and a city
_city [layer = (_state)] <- Ontario [s] @@
# Some company names match hardware or software
_hardware [layer = (_company) ] <- IBM [s] @@
_hardware [layer = (_company) ] <- SUN [s] @@
_hardware [layer = (_company) ] <- DEC [s] @@
_hardware [layer = (_company) ] <- HP [s] @@
_hardware [layer = (_company) ] <- Intel [s] @@
_hardware [layer = (_company) ] <- Dell [s] @@
_hardware [layer = (_company) ] <- Compaq [s] @@
_hardware [layer = (_company) ] <- Sony [s] @@
_hardware [layer = (_company) ] <- NEC [s] @@
_hardware [layer = (_company) ] <- Fujitsu [s] @@
# this one is really ugly
_software [layer = (_company _degree _state) ] <- MS [s] @@
_poBoxNumber [layer = _addressLine ]<-
P [s] \. [s opt] O [s] \. [s opt] _xWHITE [star s]
Box [s] _xWHITE [star s] _xNUM [s] @@
|
# Match rank starting with "No." followed by any amount of white space and a number. Reduce to _rank
@RULES
_rank <- No \. _xWHITE [star]
_xNUM @@ |
@CODE
L("hello") = 0;
if (!G("hilite")) # 10/25/10 AM.
exitpass(); # 10/25/10 AM.
@@CODE
#@PATH _ROOT _TEXTZONE _sent _clause
@NODES _clause
@POST
noop();
@RULES
_xNIL <- _xWILD [one match=(
_noun _verb _adj
)]
@@
|
@CODE
G("headers") = findconcept(findroot(),"headers");
DisplayKB(G("headers"),0);
@@CODE |
@CODE
G("words") = findconcept(findroot(),"words");
if (!G("words")) G("words") = makeconcept(findroot(),"words");
rmchildren(G("words"));
G("alphaNumeric") = makeconcept(G("words"), "AlphaNumeric");
addattr(G("alphaNumeric"), "terms");
G("adjMatrixData") = findconcept(findroot(),"adjMatrixData");
if (!G("adjMatrixData")) G("adjMatrixData") = makeconcept(findroot(),"adjMatrixData");
@@CODE |
@NODES _ROOT
@POST
singlex(4,4);
@RULES
_text <-
\\ [s] ### (1)
_xWILD [s one matches=(textit)] ### (2)
\{ [s] ### (3)
_xWILD [s fail=(\})] ### (4)
\} [s] ### (5)
@@ |
@NODES _ROOT
@POST
excise(1,1);
@RULES
_xNIL <-
_tag ### (1)
@@
|
@NODES _ROOT
@POST
if (strisupper(N("$text")))
pnrename(N(1),"X");
else
pnrename(N(1),"x");
@RULES
_xNIL <-
_xALPHA
@@
|
@DECL
AddWord(L("word"),L("lang"),L("node")) {
if (L("lang") != "pt") {
return;
}
L("header") = strtolower(pnvar(L("node"),"header"));
L("pos") = PosStr(L("header"));
"add.txt" << L("word") << " " << L("pos") << "\n";
if (L("pos")) {
L("con") = AddUniqueCon(G("words"),L("word"));
AddMeaning(L("con"),L("pos"),L("lang"),L("node"),"pessoa");
if (pnvar(L("node"),"pessoa2")) {
AddMeaning(L("con"),L("pos"),L("lang"),L("node"),"pessoa2");
}
} else if (L("header") == "conjugação") {
L("verb") = pnvar(L("node"),"verb");
L("stem") = pnvar(L("node"),"stem");
L("verb") = GetVerbEnding(L("stem"),L("verb"));
L("stem") = G("GetVerbEnding");
"stem.txt" << L("stem") << "\n";
if (strstartswith(L("verb"),"c")) {
L("stem") = L("stem") + "c";
L("verb") = strpiece(L("verb"),1,strlength(L("verb"))-1);
}
if (L("verb") == "ar") {
AddVerbAr(L("stem"));
} else if (L("verb") == "er") {
AddVerbEr(L("stem"));
} else if (L("verb") == "ir") {
AddVerbIr(L("stem"));
}
} else if (L("lang") == "pt") {
# Word with no part of speech
G("debug") << L("word") << " " << L("lang") << " <-- No POS\n";
}
}
AddMeaning(L("con"),L("pos"),L("lang"),L("node"),L("pessoa")) {
L("meaning") = MakeCountCon(L("con"),"meaning");
AddUniqueStr(L("meaning"),"pos",L("pos"));
AddUniqueStr(L("meaning"),"lang",L("lang"));
AddWordAttr(L("meaning"),"root",L("node"));
AddWordAttr(L("meaning"),L("pessoa"),L("node"));
AddWordAttr(L("meaning"),"numero",L("node"));
AddWordAttr(L("meaning"),"tempo",L("node"));
}
AddWordAttr(L("meaning"),L("attr"),L("node")) {
L("at") = pnvar(L("node"),L("attr"));
if (L("at")) {
L("at") = VerbAttribute(L("at"));
if (L("attr") == "pessoa2") {
L("attr") = "pessoa";
}
AddUniqueStr(L("meaning"),L("attr"),L("at"));
}
}
VerbAttribute(L("attr")) {
"attr.txt" << L("attr") << "\n";
if (L("attr") == "plural") {
return "p";
} else if (L("attr") == "singular") {
return "s";
} else if (L("attr") == "primeira" || L("attr") == "primeiro") {
return "p";
} else if (L("attr") == "segunda" || L("attr") == "segundo") {
return "s";
} else if (L("attr") == "terceira") {
return "t";
} else if (L("attr") == "presente") {
return "p";
} else if (L("attr") == "pretérito imperfeito") {
return "pi";
} else if (L("attr") == "pretérito perfeito") {
return "pp";
} else if (L("attr") == "pretérito mais-que-perfeito") {
return "pm";
} else if (L("attr") == "futuro do presente" || L("attr") == "futuro") {
return "f";
} else if (L("attr") == "futuro do pretérito") {
return "fp";
} else if (L("attr") == "presente de subjuntivo") {
return "ps";
} else if (L("attr") == "pretérito imperfeito de subjuntivo") {
return "pis";
} else if (L("attr") == "futuro de subjuntivo") {
return "fs";
}
return L("attr");
}
###############################################
# {{conj/pt|esp|edir}} where the last token is longer than ar, er, or ir
###############################################
GetVerbEnding(L("stem"),L("verb")) {
L("len") = strlength(L("verb"));
G("GetVerbEnding") = L("stem");
L("ending") = L("verb");
if (L("len") > 2) {
L("ending") = strpiece(L("verb"),L("len")-2,L("len")-1);
G("GetVerbEnding") = L("stem") + strpiece(L("verb"),0,L("len")-3);
}
return L("ending");
}
###############################################
# https://www.conjugacao.com.br/verbo-brincar/
###############################################
AddVerbAr(L("stem")) {
AddConjugation(L("stem"),"o","1","s","presente");
AddConjugation(L("stem"),"as","2","s","presente");
AddConjugation(L("stem"),"a","3","s","presente");
AddConjugation(L("stem"),"amos","1","p","presente");
AddConjugation(L("stem"),"ais","2","p","presente");
AddConjugation(L("stem"),"am","3","p","presente");
AddConjugation(L("stem"),"ava","1","s","pretérito imperfeito");
AddConjugation(L("stem"),"avas","2","s","pretérito imperfeito");
AddConjugation(L("stem"),"ava","3","s","pretérito imperfeito");
AddConjugation(L("stem"),"ávamos","1","p","pretérito imperfeito");
AddConjugation(L("stem"),"ávais","2","p","pretérito imperfeito");
AddConjugation(L("stem"),"avam","3","p","pretérito imperfeito");
AddConjugation(L("stem"),"ei","1","s","pretérito perfeito");
AddConjugation(L("stem"),"aste","2","s","pretérito perfeito");
AddConjugation(L("stem"),"ou","3","s","pretérito perfeito");
AddConjugation(L("stem"),"amos","1","p","pretérito perfeito");
AddConjugation(L("stem"),"astes","2","p","pretérito perfeito");
AddConjugation(L("stem"),"aram","3","p","pretérito perfeito");
AddConjugation(L("stem"),"ara","1","s","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"aras","2","s","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"ara","3","s","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"áramos","1","p","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"áreis","2","p","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"aram","3","p","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"arei","1","s","futuro do presente");
AddConjugation(L("stem"),"arás","2","s","futuro do presente");
AddConjugation(L("stem"),"ará","3","s","futuro do presente");
AddConjugation(L("stem"),"aremos","1","p","futuro do presente");
AddConjugation(L("stem"),"areis","2","p","futuro do presente");
AddConjugation(L("stem"),"arão","3","p","futuro do presente");
AddConjugation(L("stem"),"aria","1","s","futuro do pretérito");
AddConjugation(L("stem"),"arias","2","s","futuro do pretérito");
AddConjugation(L("stem"),"aria","3","s","futuro do pretérito");
AddConjugation(L("stem"),"aríamos","1","p","futuro do pretérito");
AddConjugation(L("stem"),"aríeis","2","p","futuro do pretérito");
AddConjugation(L("stem"),"ariam","3","p","futuro do pretérito");
AddConjugation(L("stem"),"e","1","s","presente de subjuntivo");
AddConjugation(L("stem"),"es","2","s","presente de subjuntivo");
AddConjugation(L("stem"),"e","3","s","presente de subjuntivo");
AddConjugation(L("stem"),"emos","1","p","presente de subjuntivo");
AddConjugation(L("stem"),"eis","2","p","presente de subjuntivo");
AddConjugation(L("stem"),"em","3","p","presente de subjuntivo");
AddConjugation(L("stem"),"asse","1","s","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"asses","2","s","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"asse","3","s","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"ássemos","1","p","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"ásseis","2","p","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"assem","3","p","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"ar","1","s","futuro de subjuntivo");
AddConjugation(L("stem"),"ares","2","s","futuro de subjuntivo");
AddConjugation(L("stem"),"ar","3","s","futuro de subjuntivo");
AddConjugation(L("stem"),"aremo","1","p","futuro de subjuntivo");
AddConjugation(L("stem"),"ardes","2","p","futuro de subjuntivo");
AddConjugation(L("stem"),"arem","3","p","futuro de subjuntivo");
}
AddVerbEr(L("stem")) {
AddConjugation(L("stem"),"o","1","s","presente");
AddConjugation(L("stem"),"es","2","s","presente");
AddConjugation(L("stem"),"e","3","s","presente");
AddConjugation(L("stem"),"emos","1","p","presente");
AddConjugation(L("stem"),"eis","2","p","presente");
AddConjugation(L("stem"),"em","3","p","presente");
AddConjugation(L("stem"),"ia","1","s","pretérito imperfeito");
AddConjugation(L("stem"),"ias","2","s","pretérito imperfeito");
AddConjugation(L("stem"),"ia","3","s","pretérito imperfeito");
AddConjugation(L("stem"),"íamos","1","p","pretérito imperfeito");
AddConjugation(L("stem"),"ias","2","p","pretérito imperfeito");
AddConjugation(L("stem"),"íeis","3","p","pretérito imperfeito");
AddConjugation(L("stem"),"i","1","s","pretérito perfeito");
AddConjugation(L("stem"),"este","2","s","pretérito perfeito");
AddConjugation(L("stem"),"eu","3","s","pretérito perfeito");
AddConjugation(L("stem"),"emos","1","p","pretérito perfeito");
AddConjugation(L("stem"),"estes","2","p","pretérito perfeito");
AddConjugation(L("stem"),"eram","3","p","pretérito perfeito");
AddConjugation(L("stem"),"a","1","s","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"as","2","s","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"a","3","s","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"áramos","1","p","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"êreis","2","p","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"eram","3","p","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"erei","1","s","futuro do presente");
AddConjugation(L("stem"),"erás","2","s","futuro do presente");
AddConjugation(L("stem"),"erá","3","s","futuro do presente");
AddConjugation(L("stem"),"eremos","1","p","futuro do presente");
AddConjugation(L("stem"),"ereis","2","p","futuro do presente");
AddConjugation(L("stem"),"erão","3","p","futuro do presente");
AddConjugation(L("stem"),"aria","1","s","futuro do pretérito");
AddConjugation(L("stem"),"arias","2","s","futuro do pretérito");
AddConjugation(L("stem"),"aria","3","s","futuro do pretérito");
AddConjugation(L("stem"),"aríamos","1","p","futuro do pretérito");
AddConjugation(L("stem"),"aríeis","2","p","futuro do pretérito");
AddConjugation(L("stem"),"ariam","3","p","futuro do pretérito");
AddConjugation(L("stem"),"a","1","s","presente de subjuntivo");
AddConjugation(L("stem"),"as","2","s","presente de subjuntivo");
AddConjugation(L("stem"),"a","3","s","presente de subjuntivo");
AddConjugation(L("stem"),"amos","1","p","presente de subjuntivo");
AddConjugation(L("stem"),"ais","2","p","presente de subjuntivo");
AddConjugation(L("stem"),"am","3","p","presente de subjuntivo");
AddConjugation(L("stem"),"esse","1","s","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"esses","2","s","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"esse","3","s","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"êssemos","1","p","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"êsseis","2","p","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"essem","3","p","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"er","1","s","futuro de subjuntivo");
AddConjugation(L("stem"),"eres","2","s","futuro de subjuntivo");
AddConjugation(L("stem"),"er","3","s","futuro de subjuntivo");
AddConjugation(L("stem"),"eremo","1","p","futuro de subjuntivo");
AddConjugation(L("stem"),"erdes","2","p","futuro de subjuntivo");
AddConjugation(L("stem"),"erem","3","p","futuro de subjuntivo");
}
AddVerbIr(L("stem")) {
AddConjugation(L("stem"),"o","1","s","presente");
AddConjugation(L("stem"),"es","2","s","presente");
AddConjugation(L("stem"),"e","3","s","presente");
AddConjugation(L("stem"),"imos","1","p","presente");
AddConjugation(L("stem"),"eis","2","p","presente");
AddConjugation(L("stem"),"em","3","p","presente");
AddConjugation(L("stem"),"ia","1","s","pretérito imperfeito");
AddConjugation(L("stem"),"ias","2","s","pretérito imperfeito");
AddConjugation(L("stem"),"ia","3","s","pretérito imperfeito");
AddConjugation(L("stem"),"íamos","1","p","pretérito imperfeito");
AddConjugation(L("stem"),"íeis","2","p","pretérito imperfeito");
AddConjugation(L("stem"),"iam","3","p","pretérito imperfeito");
AddConjugation(L("stem"),"i","1","s","pretérito perfeito");
AddConjugation(L("stem"),"iste","2","s","pretérito perfeito");
AddConjugation(L("stem"),"iu","3","s","pretérito perfeito");
AddConjugation(L("stem"),"imos","1","p","pretérito perfeito");
AddConjugation(L("stem"),"istes","2","p","pretérito perfeito");
AddConjugation(L("stem"),"iram","3","p","pretérito perfeito");
AddConjugation(L("stem"),"a","1","s","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"as","2","s","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"a","3","s","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"áramos","1","p","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"íreis","2","p","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"iram","3","p","pretérito mais-que-perfeito");
AddConjugation(L("stem"),"irei","1","s","futuro do presente");
AddConjugation(L("stem"),"irás","2","s","futuro do presente");
AddConjugation(L("stem"),"irá","3","s","futuro do presente");
AddConjugation(L("stem"),"iremos","1","p","futuro do presente");
AddConjugation(L("stem"),"ireis","2","p","futuro do presente");
AddConjugation(L("stem"),"irão","3","p","futuro do presente");
AddConjugation(L("stem"),"iria","1","s","futuro do pretérito");
AddConjugation(L("stem"),"irias","2","s","futuro do pretérito");
AddConjugation(L("stem"),"iria","3","s","futuro do pretérito");
AddConjugation(L("stem"),"iríamos","1","p","futuro do pretérito");
AddConjugation(L("stem"),"iríeis","2","p","futuro do pretérito");
AddConjugation(L("stem"),"iriam","3","p","futuro do pretérito");
AddConjugation(L("stem"),"a","1","s","presente de subjuntivo");
AddConjugation(L("stem"),"as","2","s","presente de subjuntivo");
AddConjugation(L("stem"),"a","3","s","presente de subjuntivo");
AddConjugation(L("stem"),"amos","1","p","presente de subjuntivo");
AddConjugation(L("stem"),"ais","2","p","presente de subjuntivo");
AddConjugation(L("stem"),"am","3","p","presente de subjuntivo");
AddConjugation(L("stem"),"isse","1","s","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"isses","2","s","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"isse","3","s","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"issemos","1","p","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"isseis","2","p","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"issem","3","p","pretérito imperfeito de subjuntivo");
AddConjugation(L("stem"),"ir","1","s","futuro de subjuntivo");
AddConjugation(L("stem"),"ires","2","s","futuro de subjuntivo");
AddConjugation(L("stem"),"ir","3","s","futuro de subjuntivo");
AddConjugation(L("stem"),"iremo","1","p","futuro de subjuntivo");
AddConjugation(L("stem"),"irdes","2","p","futuro de subjuntivo");
AddConjugation(L("stem"),"irem","3","p","futuro de subjuntivo");
}
StemSuffix(L("stem"),L("suffix")) {
L("len") = strlength(L("stem")) - 1;
L("stem let") = strpiece(L("stem"),L("len"),L("len"));
L("suffix let") = strpiece(L("suffix"),0,0);
L("word") = L("stem") + L("suffix");
if (L("stem let") == "c" && L("suffix let") == "e") {
L("word") = strpiece(L("stem"),0,L("len")-1) + "qu" + L("suffix");
}
return L("word");
}
AddConjugation(L("stem"),L("suffix"),L("pessoa"),L("numero"),L("tempo")) {
L("root") = StemSuffix(L("stem"),"ar");
L("word") = StemSuffix(L("stem"),L("suffix"));
L("con") = AddUniqueCon(G("words"),L("word"));
L("meaning") = MakeCountCon(L("con"),"meaning");
AddUniqueStr(L("meaning"),"pos","v");
AddUniqueStr(L("meaning"),"lang","pt");
AddUniqueStr(L("meaning"),"root",L("root"));
AddUniqueStr(L("meaning"),"pessoa",VerbAttribute(L("pessoa")));
AddUniqueStr(L("meaning"),"numero",VerbAttribute(L("numero")));
AddUniqueStr(L("meaning"),"tempo",VerbAttribute(L("tempo")));
}
PosStr(L("word")) {
if (!L("word")) {
return 0;
}
if (strcontainsnocase("verbal",L("word")) || strcontainsnocase("verbo",L("word"))) {
return "v";
}
else if (strcontainsnocase("substantivo",L("word"))) {
return "n";
}
else if (strcontainsnocase("adjetivo",L("word"))) {
return "adj";
}
else if (strcontainsnocase("advérbio",L("word"))) {
return "adv";
}
else if (strcontainsnocase("pronome",L("word"))) {
return "pro";
}
else if (strcontainsnocase("preposição",L("word"))) {
return "prep";
}
else if (strcontainsnocase("artigo",L("word"))) {
return "det";
}
else if (strcontainsnocase("interjeição",L("word"))) {
return "det";
}
else if (strcontainsnocase("conjunção",L("word"))) {
return "conj";
}
return 0;
}
KBDump() {
L("word") = down(G("words"));
if (G("$isdirrun")) {
L("dictfile") = G("$kbpath") + G("$inputparent") + ".dict";
L("kbbfile") = G("$kbpath") + G("$inputparent") + ".kbb";
} else {
L("dictfile") = G("$kbpath") + G("$inputhead") + ".dict";
L("kbbfile") = G("$kbpath") + G("$inputhead") + ".kbb";
}
L("dict") = openfile(L("dictfile"),1);
L("kbb") = openfile(L("kbbfile"),1);
DisplayKBRecurse(L("kbb"),G("words"),0,1);
closefile(L("kbb"));
while (L("word")) {
L("meaning") = down(L("word"));
while (L("meaning")) {
L("dict") << conceptname(L("word"));
AddWordAttribute(L("dict"),L("meaning"),"pos");
AddWordAttribute(L("dict"),L("meaning"),"lang");
AddWordAttribute(L("dict"),L("meaning"),"root");
AddWordAttribute(L("dict"),L("meaning"),"pessoa");
AddWordAttribute(L("dict"),L("meaning"),"numero");
AddWordAttribute(L("dict"),L("meaning"),"tempo");
L("dict") << "\n";
L("meaning") = next(L("meaning"));
}
L("word") = next(L("word"));
}
closefile(L("dict"));
}
AddWordAttribute(L("file"),L("meaning"),L("attr")) {
L("val") = strval(L("meaning"),L("attr"));
if (L("val")) {
L("file") << " " << L("attr") << "=" << L("val");
}
}
@@DECL
|
@PATH _ROOT _headerZone _Experience
@POST
X("companies",3)++;
makeconcept(G("company"),N("header"));
@RULES
_xNIL <-
_headerZone ### (1)
@@
|
@NODES _labelEntry
@POST
AddPhrase(N(2));
"debug.txt" << N("$text", 2) << "\n";
@RULES
_xNIL <-
_xSTART ### (1)
_xWILD [one match=(_xALPHA _xNUM)] ### (2)
@@
|
@CODE
closefile(G("dict"));
@@CODE |
# Count nouns that have a zero feature (i.e., node variable)
@PRE
<1,1> varz("mass");
@POST
++G("count nonmass nouns");
@RULES
_xNIL <-
_noun
@@ |
@NODES _ROOT
@PRE
<1,1> uppercase();
@POST
S("header") = N("$text", 1);
excise(2,2);
single();
@RULES
_header <-
_xWILD [fails=(\: \n \r)] ### (1)
\: ### (2)
@@
|
@NODES _LINE
@POST
S("etymology") = num(N("$text", 2));
X("header") = "etymology";
single();
@RULES
_etymology <-
etymology ### (1)
_xNUM ### (2)
@@
|
@CODE
SaveKB("icd_codes.kbb",G("icd_codes"),2);
@@CODE |
@NODES _line
@POST
X("term") = N("$text", 1);
X("negation") = N("$text", 5);
@RULES
_xNIL <-
_xWILD [fails=(\t)] ### (1)
\t
\t
\[
_xALPHA
@@
|
@CODE
L("hello") = 0;
@@CODE
@NODES _ROOT
@PRE
<3,3> varz("NL");
<4,4> var("NL"); # The newline-before-me attribute.
@POST
if (N("NL",2) > 1)
{
++X("PARS"); # Count potential paragraphs.
++S("PAR");
}
singler(2,3);
@RULES
_LINE [unsealed] <-
_xANY
_xWILD [star fail=(_dummy)]
_xANY [opt lookahead]
@@
|
@CODE
DisplayKB(findroot(),0);
@@CODE
|
@NODES _ROOT
@POST
"prose.txt" << N("$text") << "\n\n";
@RULES
_xNIL <-
_prose ### (1)
@@
|
# Fetch the left or previous sibling of concept.
L("return_con") = prev(L("con")); |
@CODE
fileout("zdump.txt");
@@CODE
@NODES _LINE
################
# BLOCK GLOMMING (gulab jamun anyone?)
################
# for things like "roots" or other items that are COMPLETE or
# otherwise would rather STAND ALONE!
# May want some kind of feature like "noglom" or "complete".
# NOTE: PUT NEW ITEMS INTO BOTH RULES BELOW.
# I'd rather not have many such blocks. We need info as to whether
# things are nicely separated with commas, etc. to decide whether
# to block or not.
# that we don't want to participate in the cap phrase glomming. This
# way, the underlying _xCAP won't be seen and we can remove all the
# special treatment below.
@POST
noop();
@RULES
_xNIL <- _xWILD [s one match=(
_posPREP _posDET _posCONJ
_resumeOf _degreeInMajor
)] @@
# blocked for glomming to the left and right.
@POST
S("noglom") = 1; # Don't let it glom yet.
S("len") = 1; # One cap in list.
single();
@RULES
_Caps [base unsealed] <- _xWILD [s one match=(
_degreePhrase
_header _headerWord
_minorKey _state
)] @@
@POST
noop(); # This "fails" so that recursive pass won't infinite loop.
@RULES
_xNIL <- _Caps _xWHITE [s star] _xWILD [s one match=(
_degreePhrase _degreeInMajor
_header _headerWord
_minorKey _state
)] @@
# Need a copy-up semantic action.
@POST
if (N("degree"))
S("degree") = N("degree");
else
S("degree") = N("$text"); # for now.
if (N("major"))
S("major") = N("major");
else
S("major") = N("$text");
S("noglom") = 1; # Don't let it glom yet.
S("len") = 1; # One cap in list.
single();
@RULES
_xNIL [base unsealed] <- _degreeInMajor [s] @@
####################
# BUILD CAPS PHRASES
####################
############
# SOME SPECIAL CASES.
############
# Single-letter alpha followed by period.
# May want "A." and so on handled separately and much earlier
# in analyzer.
@PRE
<1,1> length(1)
@POST
S("len") = 1; # One cap in list.
single();
@RULES
_Caps [base unsealed] <- _xCAP [s] \. [s] @@
@PRE
<3,3> length(1)
@CHECK
if (N("noglom",1)) fail();
@POST
++N("len",1);
listadd(1,4, "true"); # 4 is not really the "item"!
@RULES
_xNIL <- _Caps _xWHITE [s star] _xCAP [s] \. @@
##############
# NORMAL CAP PHRASE STARTUP.
##############
@POST
S("len") = 1; # One cap in list.
single();
@RULES
# With [s], BASE is needed to prevent infinite loop in REC pass!
# Making this unsealed so we can gather data within _Caps later.
# (May not be needed.)
_Caps [base unsealed] <- _xCAP [s] @@
### NORMAL EXTENSION OF CAP PHRASE.
@CHECK
if (N("noglom",1)) fail();
@POST
++N("len",1); # Increment number of caps.
listadd(1,3); # Discard the matched whitespace.
# Glom last cap into list, without nesting.
@RULES
_xNIL <- _Caps _xWHITE [s star] _xCAP [s] @@
# Some reverse building may be needed.
@CHECK
if (N("noglom",3)) fail();
@POST
++N("len",3);
listadd(3,1);
@RULES
_xNIL <- _xCAP [s] _xWHITE [s star] _Caps @@
# Some merging may be needed.
@CHECK
if (
!N("noglom",1) && !N("noglom",3)
)
succeed();
fail();
@POST
S("len") = N("len",1) + N("len",3);
merge();
@RULES
_Caps [unsealed] <- _Caps _xWHITE [s star] _Caps @@
##############
# SOME NONCAPS
##############
# Some more specialized stuff. #
# May want to do these later on instead. We'll see.
# Allowing noglom caps to succeed here.
@POST
++N("len",1);
listadd(1,5,"true"); # Keep the intervening stuff.
# noop()
@RULES
_xNIL <-
_Caps
_xWHITE [s star]
\& [s]
_xWHITE [s star]
_xCAP [s] @@
|
@NODES _ROOT
@RULES
_looseText <-
_xWILD [fails=(_section _break)]
@@
|
# Get levenshtein distance between "hello" and "hell"
L("distance") = levenshtein("hello","hell");
# This should evaluate to 1. |
@NODES _LINE
@POST
X("tag") = 1;
single();
@RULES
_startAttr <- ind attr @@
@POST
X("tag") = 1;
single();
@RULES
_endAttr <- end ind @@ |
Subsets and Splits