text
stringlengths
22
301k
@NODES _ROOT @POST L("text") = N("$text", 1); "test.txt" << L("text") << "\n"; if (L("text") == "PROCEDURES" || L("text") == "DISEASES AND INJURIES") { L("range") = split(N("code", 1), "-"); L("class_start") = L("range")[0]; L("class_end") = L("range")[1]; L("class_end") = split(L("class_end"), "."); L("class_end") = L("class_end")[0]; S("con") = AddUniqueCon(G("icd_hier"), N("term", 1)); addstrval(S("con"), "code", N("code", 1)); addstrval(S("con"), "start", L("class_start")); addstrval(S("con"), "end", L("class_end")); merge(); } noop(); @RULES _split <- _entry ### (1) @@
@NODES _ROOT @CHECK "brackets.txt" << N("$text",2) << ": " << N("curly level",3) << " <=> " << N("curly level",5) << "\n"; if (N("curly level",3) != N("curly level",5)) fail(); @POST group(5,5,"_bold"); singler(1,3); @RULES _bold <- \\ [s] ### (1) _xWILD [matches=(textbf emph)] ### (2) \{ [s] ### (3) _xWILD [s] ### (4) \} ### (5) @@ @CHECK "brackets.txt" << N("$text",2) << ": " << N("curly level",3) << " <=> " << N("curly level",5) << "\n"; if (N("curly level",3) != N("curly level",5)) fail(); @POST group(5,5,"_italics"); singler(1,3); @RULES _italics <- \\ [s] ### (1) textit [s] ### (2) \{ [s] ### (3) _xWILD [s] ### (4) \} ### (5) @@
@PATH _ROOT _LINE # Need some confidence that the caps is a good one. # (Could put a weak variant in edu zone, stronger one here.) @CHECK if (!N("fields",3)) fail(); # Has field-of-endeavor words. @POST S("degree") = N("$text",1); S("major") = N("$text",3); single(); # Default reduction. @RULES _degreeInMajor <- _degree [s] _xWHITE [s star] _Caps [ren=(_major)] @@ # A loose end.... @RULES _subEntity <- _subOrg [s] _xWHITE [s star] of [s] _xWHITE [s star] _Caps [ren=(_subEntityname)] @@
@PATH _ROOT _LINE # Glomming good school stuff. @CHECK if ( (N("hi class", 1) == "school") # Highest class is a school && (N("hi conf", 1) >= 80) # with >= 80% confidence. ) succeed(); fail(); @POST S("hi class") = "school"; S("hi conf") = N("hi conf",1); S("school conf") = N("school conf",1); single(); @RULES _Caps [unsealed] <- _Caps _xWHITE [s star] of [s] _xWHITE [s star] _Caps [s] @@ @CHECK if ( (N("hi class", 3) == "school") # Highest class is a school && (N("hi conf", 3) >= 80) # with >= 80% confidence. ) succeed(); fail(); @POST S("hi class") = "school"; S("hi conf") = N("hi conf",1); S("school conf") = N("school conf",1); single(); @RULES _Caps <- _Caps _xWHITE [s star] _Caps [s] @@ # A "normal" rule. @POST S("hi class") = "school"; S("hi conf") = 95; S("school conf") = 95; single(); @RULES _Caps <- _xWILD [s one match=( _SchoolRoot _SchoolType)] _xWHITE [s star] _xWILD [s one match=(of for)] _xWHITE [s star] _Caps [s] @@ @POST S("degree") = "MD"; S("major") = N("$text",5); single(); # Default reduction. @RULES _degreeInMajor <- Doctor [s layer=(_degree)] _xWHITE [s star] of [s] _xWHITE [s star] _Caps [ren=(_major)] @@ @POST S("degree") = N("$text",1); S("major") = N("$text",5); single(); # Default reduction. @RULES _degreeInMajor <- _degree [s] _xWHITE [s star] in [s] _xWHITE [s star] _Caps [ren=(_major)] @@
@NODES _ROOT @POST SaveName(N("$text",5)); single(); @RULES _word <- a ### (1) name ### (2) \= ### (3) \" ### (4) _xALPHA ### (5) \" ### (6) @@ @POST SaveName(N("$text",5)); single(); @RULES _word <- 9393 ### (1) ff ### (2) \" ### (3) \> ### (4) _xALPHA ### (5) \< ### (6) @@
@DECL ############################################### # General functions ############################################### AddUniqueCon(L("concept"),L("name")) { L("con") = findconcept(L("concept"),L("name")); if (!L("con")) L("con") = makeconcept(L("concept"),L("name")); return L("con"); } AddUniqueStr(L("concept"),L("attr"),L("value")) { if (L("value")) { L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("str") = getstrval(L("val")); if (L("str") == L("value")) return 0; L("val") = nextval(L("val")); } addstrval(L("concept"),L("attr"),L("value")); return 1; } return 0; } AddUniqueNum(L("concept"),L("attr"),L("value")) { if (L("value")) { L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("num") = getnumval(L("val")); if (L("num") == L("value")) return 0; L("val") = nextval(L("val")); } addnumval(L("concept"),L("attr"),L("value")); return 1; } return 0; } AddUniqueConVal(L("concept"),L("attr"),L("value")) { "unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n"; L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("con") = getconval(L("val")); "unique.txt" << conceptname(L("con")) << "\n"; if (conceptpath(L("con")) == conceptpath(L("value"))) return 0; L("val") = nextval(L("val")); } addconval(L("concept"),L("attr"),L("value")); return 1; } PathToConcept(L("parent"),L("hier")) { L("cons") = split(L("hier")," "); L("i") = 0; L("con") = L("parent"); while (L("cons")[L("i")]) { L("c") = L("cons")[L("i")]; L("name") = strsubst(L("c"),"\"",0); if (L("name") != "concept") L("con") = AddUniqueCon(L("con"),L("name")); L("i")++; } return L("con"); } CopyAttr(L("from"),L("to"),L("attr")) { L("from value") = strval(L("from"),L("attr")); if (L("from value")) { L("to value") = strval(L("to"),L("attr")); if (L("from value") && !L("to value")) addstrval(L("to"),L("attr"),L("from value")); } } CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) { L("from value") = strval(L("from"),L("attr from")); if (L("from value")) { L("to value") = strval(L("to"),L("attr to")); if (L("from value") && !L("to value")) addstrval(L("to"),L("attr to"),L("from value")); } } CopyConAttr(L("from"),L("to"),L("attr")) { L("from value") = conval(L("from"),L("attr")); if (L("from value")) { L("to value") = conval(L("to"),L("attr")); if (L("from value") && !L("to value")) addconval(L("to"),L("attr"),L("from value")); } } AttrValues(L("con"),L("attr")) { L("at") = findattr(L("con"),L("attr")); if (L("at")) return attrvals(L("at")); return 0; } ValCount(L("attr")) { L("vals") = attrvals(L("attr")); while (L("con")) { L("count")++; L("con") = nextval(L("con")); } return L("count"); } LastChild(L("parent")) { L("child") = down(L("parent")); while (L("child")) { L("last") = L("child"); L("child") = next(L("child")); } return L("last"); } MakeCountCon(L("con"),L("count name")) { L("count name") = CountName(L("con"),L("count name")); return makeconcept(L("con"),L("count name")); } IncrementCount(L("con"),L("countname")) { L("count") = numval(L("con"),L("countname")); if (L("count")) { L("count") = L("count") + 1; replaceval(L("con"),L("countname"),L("count")); } else { addnumval(L("con"),L("countname"),1); L("count") = 1; } return L("count"); } CountName(L("con"),L("root")) { L("count") = IncrementCount(L("con"),L("root")); return L("root") + str(L("count")); } StripEndDigits(L("name")) { if (strisdigit(L("name"))) return 0; L("len") = strlength(L("name")) - 1; L("i") = L("len") - 1; L("str") = strpiece(L("name"),L("i"),L("len")); while (strisdigit(L("str")) && L("i")) { L("i")--; L("str") = strpiece(L("name"),L("i"),L("len")); } return strpiece(L("name"),0,L("i")); } ############################################### # KB Dump Functins ############################################### DumpKB(L("con"),L("file")) { L("dir") = G("$apppath") + "/kb/"; L("filename") = L("dir") + L("file") + ".kb"; if (!kbdumptree(L("con"),L("filename"))) { "kb.txt" << "FAILED dump: " << L("filename") << "\n"; } else { "kb.txt" << "DUMPED: " << L("filename") << "\n"; } } TakeKB(L("filename")) { L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb"; "kb.txt" << "Taking: " << L("path") << "\n"; if (take(L("path"))) { "kb.txt" << " Taken successfully: " << L("path") << "\n"; } else { "kb.txt" << " Taken FAILED: " << L("path") << "\n"; } } ChildCount(L("con")) { L("count") = 0; L("child") = down(L("con")); while (L("child")) { L("count")++; L("child") = next(L("child")); } return L("count"); } ############################################### # KBB DISPLAY FUNCTIONS ############################################### ############################################### # display type: # 0 compact with ellipses on long attr values # 1 full, more spread out # 2 compact without ellipses on long attr values ############################################### DisplayKB(L("top con"),L("display type")) { L("file") = DisplayFileName(); DisplayKBRecurse(L("file"),L("top con"),0,L("display type")); L("file") << "\n"; return L("top con"); } KBHeader(L("text")) { L("file") = DisplayFileName(); L("file") << "#######################\n"; L("file") << "# " << L("text") << "\n"; L("file") << "#######################\n\n"; } DisplayFileName() { if (num(G("$passnum")) < 10) { L("file") = "ana00" + str(G("$passnum")); }else if (num(G("$passnum")) < 100) { L("file") = "ana0" + str(G("$passnum")); } else { L("file") = "ana" + str(G("$passnum")); } L("file") = L("file") + ".kbb"; return L("file"); } DisplayKBRecurse(L("file"),L("parent"),L("level"),L("display type")) { if (L("level") == 0) { L("file") << conceptname(L("parent")) << "\n"; } L("con") = down(L("parent")); while (L("con")) { L("file") << SpacesStr(L("level")+1) << conceptname(L("con")); DisplayAttributes(L("file"),L("con"),L("display type"),L("level")); L("file") << "\n"; if (down(L("con"))) { L("lev") = 1; DisplayKBRecurse(L("file"),L("con"),L("level")+L("lev"),L("display type")); } L("con") = next(L("con")); } } DisplayAttributes(L("file"),L("con"),L("display type"),L("level")) { L("attrs") = findattrs(L("con")); if (L("attrs")) L("file") << ": "; if (L("display type") == 1 && L("attrs")) L("file") << "\n"; L("first attr") = 1; while (L("attrs")) { L("vals") = attrvals(L("attrs")); L("count") = ValCount(L("attrs")); if (L("display type") != 1 && !L("first attr")) { L("file") << ", "; } if (L("display type") == 1) { if (!L("first attr")) L("file") << "\n"; L("file") << SpacesStr(L("level")+2); } L("file") << attrname(L("attrs")) << "="; L("first") = 1; while (L("vals")) { L("val") = getstrval(L("vals")); L("num") = getnumval(L("vals")); L("con") = getconval(L("vals")); if (!L("first")) L("file") << ","; else if (L("count") > 1 || L("con")) L("file") << "["; if (L("con")) { if (L("first")) L("file") << "["; L("file") << conceptpath(L("con")); } else if (L("display type") == 0 && strlength(L("val")) > 20) { L("shorty") = strpiece(L("val"),0,20); L("file") << L("shorty"); L("file") << "..."; if (strendswith(L("val"),"\"")) L("file") << "\""; } else if (L("num") > -1) { L("file") << str(L("num")); } else { if (DisplayValNeedsQuote(L("val"))) L("file") << "\""; L("file") << L("val"); if (DisplayValNeedsQuote(L("val"))) L("file") << "\""; } L("first") = 0; L("vals") = nextval(L("vals")); } if (L("con")) L("file") << "]"; L("first attr") = 0; L("attrs") = nextattr(L("attrs")); } } DisplayValNeedsQuote(L("str")) { if (strcontains(" ",L("str")) || strcontains("[",L("str")) || strcontains("]",L("str"))) return 1; return 0; } # Because NLP++ doesn't allow for empty strings, # this function can only be called with "num" >= 1 SpacesStr(L("num")) { L("n") = 1; L("spaces") = " "; while (L("n") < L("num")) { L("spaces") = L("spaces") + " "; L("n")++; } return L("spaces"); } PadStr(L("num str"),L("pad str"),L("pad len")) { L("len") = strlength(L("num str")); L("pad") = 0; L("to pad") = L("pad len") - L("len"); while (L("i")++ < L("to pad")) { L("pad") = L("pad") + L("pad str"); } L("padded") = L("pad") + L("num str"); return L("padded"); } ############################################### # DICTIONARY FUNCTIONS ############################################### DictionaryStart() { G("attrs path") = G("$apppath") + "\\kb\\user\\attrs.kb"; G("attrs") = openfile(G("attrs path")); } DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) { addword(L("word")); addword(L("attrName")); G("attrs") << "ind attr\n" << findwordpath(L("word")) << "\n0\n"; G("attrs") << findwordpath(L("attrName")) << "\n"; if (L("attrType") == "str") G("attrs") << "pst\n" << "\"" << L("value") << "\""; else if (L("attrType") == "num") G("attrs") << "pnum\n" << str(L("value")); else if (L("attrType") == "con") G("attrs") << "pcon\n" << conceptpath(L("value")); G("attrs") << "\nend ind\n\n"; } DictionaryEnd() { G("attrs") << "\nquit\n\n"; closefile(G("attrs")); } @@DECL
@NODES _ROOT @POST excise(1,3) @RULES _xNIL <- _xNUM , _xWILD [opt matches=(\")] @@
#@CODE # A global threshold for confidence in a category. #var("threshold", 70) # Minimum is 70% #@@CODE @PATH _ROOT _LINE @POST # if (!N("len")) # print out no length. # Treating confidences independently, but keep track of # ambiguity in the phrase. N("hi conf") = 0; # Track highest conf so far. N("hi class") = "NULL"; # Track most confident classification. # Track number of classes that have met the threshold. N("ambigs") = 0; # None yet. if (N("len") == 1) { ### ASSESS AS HEADER. if (N("headerroots")) { N("header conf") = 90; if (N("$start") && N("$end")) # Lone caps on line. N("header conf") = N("header conf") %% 95; else if (N("$start")) N("header conf") = N("header conf") %% 80; else if (N("$end")) N("header conf") = N("header conf") %% 70; } ### ASSESS AS SCHOOL NAME. if (N("schoolnames")) { # Would like to get specific conf from each name. N("school conf") = 90; } else if (N("schoolroots")) N("school conf") = 0; # eg, "College" is not standalone school. ### ASSESS AS A FIELD OF ENDEAVOR. if (N("fields")) { # Should get a list of standalone field names. N("field conf") = 75; } ### ASSESS AS COMPANY. if (N("companyphrase")) N("company conf") = 95; # Standalone company name. else if (N("unknowns") && N("allcaps")) N("company conf") = 60; else if (N("allcaps") || N("unknowns")) N("company conf") = 55; ### ASSESS AS JOB. if (N("jobtitleroots")) N("job conf") = 90; ### ASSESS AS CITY, GEO ITEM. if (N("geos")) N("geo conf") = 95; ### ASSESS AS HUMAN NAME. # N("humanname conf") = 0; } else # Multi-word cap phrase. { ### ASSESS AS HEADER. if (N("end headerroot")) N("header conf") = 90; else if (N("headerroots")) N("header conf") = 85; else if (N("end headermod")) N("header conf") = 80; else if (N("headermods")) N("header conf") = 75; if (N("header conf") > G("threshold")) { if (N("$start") && N("$end")) # Lone caps on line. N("header conf") = N("header conf") %% 95; else if (N("$start")) N("header conf") = N("header conf") %% 80; else if (N("$end")) N("header conf") = N("header conf") %% 70; } ### ASSESS AS SCHOOL NAME. if (N("end schoolroot")) # eg, head is "university". N("school conf") = 95; else if (N("schoolroots") || N("schoolnames")) { N("school conf") = 95; if (N("unknowns")) N("school conf") = 97; } ### ASSESS AS A FIELD OF ENDEAVOR. if (N("fields")) { # Should get a list of standalone field names. if (N("end field")) N("field conf") = 90; else if (N("fields") > 1) N("field conf") = 75; else N("field conf") = 70; } ### ASSESS AS COMPANY NAME. if (N("end companyroot")) N("company conf") = 95; else if (N("companyphrase")) N("company conf") = 90; else if (N("end companymodroot")) N("company conf") = 80; ### ASSESS AS CITY NAME. if (N("end citymod")) N("city conf") = 95; else if (N("start citymod")) N("city conf") = 75; else if (N("citymods")) N("city conf") = 70; if (N("puncts")) N("city conf") = 10; # 01/11/00 AM. ### ASSESS AS GEO ENTITY. (city,state,country). if (N("geos")) N("geo conf") = 95; # Need a way to increase confidence based on counts of these. if (N("companyroots") || N("companymodroots") || N("companymods")) N("company conf") = N("company conf") %% 80; if (N("unknowns")) N("company conf") = N("company conf") %% 50; ### ASSESS AS JOB TITLE if (N("end jobtitleroot")) N("job conf") = 95; else if (N("end jobmod")) N("job conf") = 65; N("diff") = N("jobtitleroots") - N("end jobtitleroot"); if (N("diff") > 0) N("job conf") = N("job conf") %% 80; N("diff") = N("jobmods") - N("end jobmod"); if (N("caplen")) N("diff") = N("diff") * 100 / N("caplen"); N("job conf") = N("job conf") %% N("diff"); if (N("unknowns")) N("job conf") = N("job conf") %% - 70; ### ASSESS AS HUMAN NAME. if (N("start humanname") && N("end humanname")) # John Xxx Smith. N("humanname conf") = 99; # Got it, locally speaking. else if (N("humannames") == 2) N("humanname conf") = 95; else if (N("end humanname")) # eg, "Xxxx Smith". N("humanname conf") = 90; else if (N("humannames") == 3) N("humanname conf") = 90; # Problem? Name Name Name Xxxx. else if (N("humannames") > 3) # Could be a foreign or long name. N("humanname conf") = 80; else if (N("start humanname")) # eg, "John Xxxx". N("humanname conf") = 80; else if (N("letters")) N("humanname conf") = 80; # 01/04/00 AM. # Need a logarithm function to "perturb confidence" # eg, standalone for unknown word might be 70. # But when conf is already 80, want to increment it a bit. # (This is why Dave liked addemup better.) # If unreduced unknown words in addition, add some conf. if (N("unknowns") > 1) N("humanname conf") = N("humanname conf") %% 70; else if (N("unknowns") == 1) N("humanname conf") = N("humanname conf") %% 60; # Some negative indicators. if (N("len") > 3) N("humanname conf") = N("humanname conf") %% -80; else N("humanname conf") = N("humanname conf") %% 20; if (N("len") > N("caplen")) # Some uncapitalized words. N("humanname conf") = N("humanname conf") %% -80; else N("humanname conf") = N("humanname conf") %% 20; if (N("len") > (N("unknowns") + N("unreduced"))) N("humanname conf") = N("humanname conf") %% -50; else N("humanname conf") = N("humanname conf") %% 20; } # Update overall confidences for this phrase. if (N("header conf") >= G("threshold")) ++N("ambigs"); # Another possible sense of cap phrase. if (N("header conf") > N("hi conf")) # New high. { N("hi conf") = N("header conf"); N("hi class") = "header"; # Header is best so far. } if (N("company conf") >= G("threshold")) ++N("ambigs"); # Another possible sense of cap phrase. if (N("company conf") > N("hi conf")) # New high. { N("hi conf") = N("company conf"); N("hi class") = "company"; # Company is best so far. } if (N("job conf") >= G("threshold")) ++N("ambigs"); # Another possible sense of cap phrase. if (N("job conf") > N("hi conf")) # New high. { N("hi conf") = N("job conf"); N("hi class") = "job"; # Job title is best so far. } if (N("humanname conf") >= G("threshold")) ++N("ambigs"); if (N("humanname conf") > N("hi conf")) { N("hi conf") = N("humanname conf"); N("hi class") = "humanname"; } if (N("field conf") >= G("threshold")) ++N("ambigs"); if (N("field conf") > N("hi conf")) { N("hi conf") = N("field conf"); N("hi class") = "field"; } if (N("school conf") >= G("threshold")) ++N("ambigs"); if (N("school conf") > N("hi conf")) { N("hi conf") = N("school conf"); N("hi class") = "school"; } if (N("city conf") >= G("threshold")) ++N("ambigs"); if (N("city conf") > N("hi conf")) { N("hi conf") = N("city conf"); N("hi class") = "city"; } if (N("geo conf") >= G("threshold")) ++N("ambigs"); if (N("geo conf") > N("hi conf")) { N("hi conf") = N("geo conf"); N("hi class") = "geo"; } @RULES _xNIL <- _Caps @@
@DECL # Concatenate # Concatenates two non-empty arrays # # Args: Two non-empty arrayd # Returns: concatenated array Concatenate(L("arr_1"), L("arr_2")) { L("new_arr") = L("arr_1"); L("idx") = 0; while (L("idx") < arraylength(L("arr_2"))) { L("new_arr")[arraylength(L("new_arr"))] = L("arr_2")[L("idx")]; L("idx")++; } return L("new_arr"); } # Swap # Swap elements at first and second indices in array # # Args: # L("arr") = Non-empty array # L("first_idx") = Index of first element to swap # L("second_idx") = Index of second element to swap # # Returns: Array with swapped elements # # Note that indices must be < array len Swap(L("arr"), L("first_idx"), L("second_idx")) { L("temp") = L("arr")[L("first_idx")]; L("arr")[L("first_idx")] = L("arr")[L("second_idx")]; L("arr")[L("second_idx")] = L("temp"); return L("arr"); } # Quicksort wrapper to handle indices, since this can be a challenge. # Use QuickSortPartition for subarray QuickSort(L("arr")) { if (arraylength(L("arr")) <= 1) { return L("arr"); } L("start") = 0; L("end") = arraylength(L("arr")) - 1; return QuickSortPartition(L("arr"), L("start"), L("end")); } # QuickSortPartition # Performs quicksort on array from <low> to <high> # # Args: # L("arr"): Array to sort # L("low"): Starting index of array # L("high"): Upper index of array # # Returns: # Sorted array # # N.B. low and high must be >=0 and < array len QuickSortPartition(L("arr"), L("low"), L("high")) { if (L("low") < L("high")) { # Get pivot index L("pivot") = L("arr")[L("high")]; L("i") = L("low") - 1; L("j") = L("low"); while (L("j") < L("high")) { if (L("arr")[L("j")] <= L("pivot")) { L("i")++; L("arr") = Swap(L("arr"), L("i"), L("j")); } L("j")++; } L("arr") = Swap(L("arr"), L("i")+1, L("high")); L("pivot") = L("i") + 1; # Sort each partition of array recursively L("arr") = QuickSortPartition(L("arr"), L("low"), L("pivot")-1); L("arr") = QuickSortPartition(L("arr"), L("pivot")+1, L("high")); # Return concatenated array # L("sorted") = L("left_arr"); # L("sorted")[arraylength(L("sorted"))] = L("pivot"); # return ConcatArrays(L("sorted"), L("right_arr")); } return L("arr"); } # Naive tokenization function GetTokens(L("str")) { L("new_str"); L("str") = strsubst(L("str"), "n't", " not"); L("str") = strclean(L("str")); L("i") = 0; while (L("i") < strlength(L("str"))) { L("char") = strchar(L("str"), L("i")); L("char_to_add"); if (strisalpha(L("char")) || L("char") == " ") { L("char_to_add") = L("char"); } else { L("char_to_add") = " "; } if (!L("new_str")) { L("new_str") = L("char_to_add"); } else{ L("new_str") = L("new_str") + L("char_to_add"); } L("i")++; } L("tokens") = split(L("new_str"), " "); L("return"); L("j") = 0; "debug.txt" << arraylength(L("tokens")) << "\n"; "debug.txt" << L("new_str") << "\n"; "debug.txt" << L("str") << "\n"; while (L("j") < arraylength(L("tokens"))) { L("this_str") = L("tokens")[L("j")]; if (L("this_str")) { L("this_str") = strclean(L("this_str")); L("this_str") = strtolower(L("this_str")); if ((strlength(L("this_str")) > 2) && (spellword(L("this_str")))) { L("return")[arraylength(L("return"))] = stem(L("this_str")); } } L("j")++; } return L("return"); } # GetUniqueWords # Args: # L("str"): Input string to get words from # # # Should take parsetree node as well # GetUniqueStrWords(L("str")) { # # Assumes non-tokenized string # L("str") # if (L("vocab")) { # for L("") # } # # } # Binary Search # Args # arr: sorted array in which to search BinarySearchPartition(L("arr"), L("low"), L("high"), L("val")) { "binary.txt" << "low: " << L("low") << " high: " << L("high") << "\n"; if (L("low") > L("high")) { return -1; } # if (L("low") == L("high")) { # if (L("arr")[L("low")] == L("val")) { # return L("low"); # } # return -1; # } L("mid") = (L("high") + L("low")) / 2; "binary.txt" << "mid: " << L("mid") << "\n"; if (L("arr")[L("mid")] == L("val")) { return L("mid"); } else if (L("val") < L("arr")[L("mid")]) { L("high") = L("mid") - 1; return BinarySearchPartition(L("arr"), L("low"), L("high"), L("val")); } else { L("low") = L("mid") + 1; return BinarySearchPartition(L("arr"), L("low"), L("high"), L("val")); } } # Binary Search # Args # arr: sorted array to search # val: value to search for # # Returns # integer: -1 if not in array, otherwise idx in array BinarySearch(L("arr"), L("val")) { L("len") = arraylength(L("arr")); L("low") = 0; if (L("len") == 1) { if (L("arr")[L("low")] == L("val")) { return 0; } else { return -1; } } return BinarySearchPartition(L("arr"), L("low"), L("len")-1, L("val")); } @@DECL
# Fetch the next attribute in a list of attributes L("return_attr") = nextattr(L("attr"));
@NODES _LINE @PRE <1,1> cap(); @RULES # Ex: Professional _headerMod <- _xWILD [min=1 max=1 s match=("Professional" "Additional" "Detail")] @@
@NODES _ROOT @POST L("capital") = getconcept(G("state"),"capital"); addstrval(L("capital"),"city",N("text",1)); single(); @RULES _xNIL <- _capital @@
@PATH _ROOT _experienceZone _experienceInstance _LINE # name regardless of position, etc. Should raise confidence here. # May want to collect confidence in the instance. @CHECK if ( !X("company name",3) && (N("tmp") = (X("lineno") - X("anchor lineno",3))) && (N("tmp") == -2 || N("tmp") == 2) # Within 2 lines of anchor. ) succeed(); fail(); @POST X("company name",3) = N("$text"); @RULES _xNIL <- _company [s] @@ # Don't get cocky, Luke! @CHECK if ( !X("job title",3) && (N("tmp") = (X("lineno") - X("anchor lineno",3))) && (N("tmp") == -2 || N("tmp") == 2) # Within 2 lines of anchor. ) succeed(); fail(); @POST X("job title") = N("$text"); @RULES _xNIL <- _jobTitle [s] @@ _xNIL <- _jobPhrase [s] @@ @CHECK if ( !X("country",3) && (N("tmp") = (X("lineno") - X("anchor lineno",3))) && (N("tmp") == 2 || N("tmp") == 2) # Within 2 lines of anchor. ) succeed(); fail(); @POST X("country",3) = N("$text"); @RULES _xNIL <- _country [s] @@
@CODE SaveKB("format.kbb",G("format"),2); @@CODE
@PATH _ROOT _contactZone # 10/19/99 AM. #@NODES _contactZone # @CHECK if (N("wcap",3) < 2) fail(); if (N("wcap",3) > 4) fail(); if (N("nblobs",3) > 4) fail(); if (N("humanNameCandidate",3)) fail(); if (N("addressParts",3)) fail(); #Nge(3, "wcap", 2) # >= 2 cap words on line #Nle(3, "wcap", 4) # <= 4 cap words on line #Nle(3, "nblobs", 4) # <= 4 blobs on line #Neq(3, "humanNameCandidate", 0) #Neq(3, "addressParts", 0) @POST ++N("humanNameCandidate",3); noop(); @RULES _xNIL <- _xSTART _xWILD [s star match=(_BLANKLINE _horizRule)] _LINE # No rename of line. # 12/25/99 AM. @@ # searches to find HEADER lines! Will require TWO or more address # lines below a name, or a short address line. # Should consist of alphabetics primarily. @CHECK if (N("nblobs",1) < 2) fail(); if (N("nblobs",1) > 5) fail(); if (N("wcap",1) < 2) fail(); if (N("humanNameCandidate",1)) fail(); if (N("addressParts",1)) fail(); if (N("walpha",3) > 7) fail(); #Nge(1, "nblobs", 2) # >=2 "blobs" (ie nonwhite sequences). #Nle(1, "nblobs", 5) # <=5 "blobs" #Nge(1, "wcap", 2) # >=2 cap words on line. #Neq(1, "humanNameCandidate", 0) #Neq(1, "addressParts", 0) #Nle(3, "walpha", 7) # address part must be relatively short. # notcontains(1, "_humanNameCandidate") # one idea for not nesting. @POST ++N("humanNameCandidate",1); noop(0); # Don't reduce the phrase. @RULES # Want NO-CONTAIN(_addressPart) in the _LINE. # _xNIL <- _LINE # No rename of line. # 12/25/99 AM. _xWILD [s star match=(_BLANKLINE _horizRule)] _addressPart [tree plus] # 08/03/99 AM. @@ @CHECK if (N("nblobs",1) < 2) fail(); if (N("nblobs",1) > 5) fail(); if (N("wcap",1) < 2) fail(); if (N("humanNameCandidate",1)) fail(); if (N("addressParts",1)) fail(); #Nge(1, "nblobs", 2) # >=2 "blobs" (ie nonwhite sequences). #Nle(1, "nblobs", 5) # <=5 "blobs" #Nge(1, "wcap", 2) # >=2 cap words on line. #Neq(1, "humanNameCandidate", 0) #Neq(1, "addressParts", 0) @POST ++N("humanNameCandidate",1); noop(0); # Don't reduce the phrase. @RULES # Want NO-CONTAIN(_addressPart) in the _LINE. # _xNIL <- _LINE # No rename of line. # 12/25/99 AM. _xWILD [s star match=(_BLANKLINE _horizRule)] _addressPart [tree min=2 max=0] # 2 or more. @@ # current address block. @POST noop(); @RULES _xNIL <- _addressPart [tree plus] @@
@PATH _ROOT _experienceZone _experienceInstance _LINE @CHECK if ( !X("job title",3) && (N("job conf tot",1) == X("job conf hi",3) && N("job conf tot",1) >= G("threshold") ) ) succeed(); fail(); @POST X("job title",3) = N("$text",1); @RULES _xNIL <- _Caps [rename=(_jobTitle)] @@ @CHECK if ( !X("company name",3) && (N("company conf tot",1) == X("company conf hi",3) && N("company conf tot",1) >= G("threshold") ) ) succeed(); fail(); @POST X("company name",3) = N("$text",1); @RULES _xNIL <- _Caps [rename=(_company)] @@ @CHECK if (X("date range",3)) fail(); @POST X("date range",3) = N("$text",1); @RULES _xNIL <- _DateRange [s] @@ # Overwrite city and state. @POST if (N("city")) X("city",3) = N("city"); if (N("state")) X("state",3) = N("state"); @RULES _xNIL <- _cityState [s] @@ @CHECK if (X("city",3)) fail(); @POST X("city",3) = N("$text"); @RULES _xNIL <- _city [s] @@ @CHECK if (X("state",3)) fail(); @POST X("state",3) = N("$text"); @RULES _xNIL <- _state [s] @@ @CHECK if (X("country",3)) fail(); @POST X("country",3) = N("$text"); @RULES _xNIL <- _country [s] @@
@PATH _ROOT _labels @POST S("RID") = makeconcept(G("RadLex"), N("$text", 5)); single(); @RULES _labelEntry <- http ### (1) _xWILD [plus fails=(\#)] ### (2) \# ### (3) RID ### (4) _xNUM ### (5) _xWILD [plus fails=(\n \r)] ### (6) _xWILD [one matches=(\n \r)] ### (7) @@
@NODES _LINE @POST X("words") = X("words") + 1; if (strlength(N("$text")) > 1) { if (striscaps(N("$text")) && strlength(N("$text")) > 1) { X("caps") = X("caps") + 1; } else if (strisupper(N("$text")) && strlength(N("$text")) > 1) { X("upper") = X("upper") + 1; } } @RULES _xNIL <- _xALPHA ### (1) @@
@PATH _ROOT _paragraph _sentence @POST S("compare") = ">"; "tester.txt" << "testing!\n"; single(); @RULES _compare <- more [s] ### (1) than [s] ### (2) @@ @RULES _pro <- _xWILD [s one matches=(it he she its all they them those)] ### (1) @@ _prep <- _xWILD [s one matches=(as at in of with into)] ### (1) @@ _be <- _xWILD [s one matches=(is be are was were)] ### (1) @@ _det <- _xWILD [s one matches=(a the an some this these)] ### (1) @@ _conj <- _xWILD [s one matches=(and \, or \&)] ### (1) @@ _have <- _xWILD [s one matches=(have has had offer offers offered include includes)] ### (1) @@ _conditional <- _xWILD [s one matches=(could can should)] ### (1) @@ _that <- _xWILD [s one matches=(that which)] ### (1) @@ _to <- _xWILD [s one matches=(to)] ### (1) @@ _special <- _xWILD [s one matches=(only when so whenever whatever)] ### (1) @@ _direction <- _xWILD [s one matches=(up down steady)] ### (1) @@ _found <- _xWILD [s one matches=(found founds founded founding)] ### (1) @@ _defend <- _xWILD [s one matches=(defend defends defended defending)] ### (1) @@ _adj <- _xWILD [s one matches=(last latest)] ### (1) @@ _adv <- _xWILD [s one matches=(substantially)] ### (1) @@ _position <- _xWILD [s one matches=(above below)] ### (1) @@ _field <- _xWILD [s one matches=(area field)] ### (1) @@ _moneyType <- _xWILD [s one matches=(cash currency)] ### (1) @@ @POST S("action") = "acquire"; single(); @RULES _acquire <- _xWILD [s one matches=(acquire acquires acquired acquiring)] ### (1) @@ @POST S("action") = "buy"; single(); @RULES _buyEvent <- _xWILD [s one matches=(purchase sale)] ### (1) @@ _buy <- _xWILD [s one matches=(buy buys bought)] ### (1) @@
## CODE REGION #@CODE #prlit("output.txt", "Assigned Phones=") #fprintvar("output.txt", "Assigned Phones") #prlit("output.txt", "\n") #prlit("output.txt", "Total Phones=") #fprintvar("output.txt", "Total Phones") #prlit("output.txt", "\n") #prlit("output.txt", "\n") #prlit("output.txt", "Total Names=") #fprintvar("output.txt", "Total Names") #prlit("output.txt", "\n\n") #@@CODE # fills for contact information. # @NODES _addressBlock # @MULTI _contactZone # 09/04/99 AM. # Heur: Output name if there is only one in the contact zone. # one for names. # Heur: If multiple names, output name that is attached to # "Resume of X" or similar phrase. # # @CHECK # Ggt("Total Names", 1) # Ngt(1, "ResumeOf", 0) #@POST #prxtree("output.txt", "Title: ", 1, "_prefixName", "\n") #prxtree("output.txt", "First Name: ", 1, "_firstName", "\n") #prxtree("output.txt", "Middle Name: ", 1, "_middleName", "\n") #prxtree("output.txt", "Last Name: ", 1, "_lastName", "\n") #prxtree("output.txt", "Suffix Name: ", 1, "_suffixName", "\n") #@RULES #_xNIL <- _humanName @@ # @POST prxtree("output.txt", "PO Box: ", 1, "_poBoxNumber", "\n"); prxtree("output.txt", "Number: ", 1, "_streetNumber", "\n"); prxtree("output.txt", "Direction: ", 1, "_direction", "\n"); prxtree("output.txt", "Street: ", 1, "_streetName", "\n"); prxtree("output.txt", "Road: ", 1, "_road", "\n"); prxtree("output.txt", "Unit: ", 1, "_unit", "\n"); prxtree("output.txt", "Room: ", 1, "_room", "\n"); @RULES _xNIL <- _addressLine @@ # 09/04/99 AM. @POST prxtree("output.txt", "Unit: ", 1, "_unit", "\n"); prxtree("output.txt", "Room: ", 1, "_room", "\n"); @RULES _xNIL <- _unitRoom @@ # 09/23/99 AM. @POST prlit("output.txt", "Home phone: "); prxtree("output.txt", "(+", 1, "_countryCode", ") "); # 09/25/99 AM. # Note: Empty string in rules file not handled. # #prxtree("output.txt", "", 1, "_areaCode", " "); prtree("output.txt", 1, "_areaCode"); prlit("output.txt", " "); prtree("output.txt", 1, "_prefix"); prlit("output.txt", " "); prtree("output.txt", 1, "_suffix"); prlit("output.txt", "\n"); @RULES _xNIL <- _phoneHomePhrase @@ # 09/04/99 AM. # phone number in contact zones, assume it is the home phone number. @CHECK if ( G("Assigned Phones") == 0 && G("Total Phones") == 1 ) succeed(); fail(); @POST prlit("output.txt", "Home phone: "); #prlit("output.txt", "(+"); #prtree("output.txt", 1, "_countryCode"); #prlit("output.txt", ") "); prxtree("output.txt", "(+", 1, "_countryCode", ") "); # 09/25/99 AM. prtree("output.txt", 1, "_areaCode"); prlit("output.txt", " "); prtree("output.txt", 1, "_prefix"); prlit("output.txt", " "); prtree("output.txt", 1, "_suffix"); prlit("output.txt", "\n"); @RULES _xNIL <- _phoneNumber @@ @POST prlit("output.txt", "Work phone: "); prxtree("output.txt", "(+", 1, "_countryCode", ") "); # 09/25/99 AM. prtree("output.txt", 1, "_areaCode"); prlit("output.txt", " "); prtree("output.txt", 1, "_prefix"); prlit("output.txt", " "); prtree("output.txt", 1, "_suffix"); prlit("output.txt", " ext "); prtree("output.txt", 1, "_extension"); prlit("output.txt", "\n"); @RULES _xNIL <- _phoneWorkPhrase @@ # 09/04/99 AM. @POST prlit("output.txt", "Fax phone: "); prtree("output.txt", 1, "_areaCode"); prlit("output.txt", " "); prtree("output.txt", 1, "_prefix"); prlit("output.txt", " "); prtree("output.txt", 1, "_suffix"); prlit("output.txt", "\n"); @RULES _xNIL <- _phoneFaxPhrase @@ @POST prlit("output.txt", "Home/Fax phone: "); prtree("output.txt", 1, "_areaCode"); prlit("output.txt", " "); prtree("output.txt", 1, "_prefix"); prlit("output.txt", " "); prtree("output.txt", 1, "_suffix"); prlit("output.txt", "\n"); @RULES _xNIL <- _phoneHomeFaxPhrase @@ @POST prlit("output.txt", "Pager phone: "); prtree("output.txt", 1, "_areaCode"); prlit("output.txt", " "); prtree("output.txt", 1, "_prefix"); prlit("output.txt", " "); prtree("output.txt", 1, "_suffix"); prlit("output.txt", "\n"); @RULES _xNIL <- _phonePagerPhrase @@ @POST prlit("output.txt", "Cell phone: "); prtree("output.txt", 1, "_areaCode"); prlit("output.txt", " "); prtree("output.txt", 1, "_prefix"); prlit("output.txt", " "); prtree("output.txt", 1, "_suffix"); prlit("output.txt", "\n"); @RULES _xNIL <- _phoneCellPhrase @@ @POST prxtree("output.txt", "City: ", 1, "_cityName", "\n"); prxtree("output.txt", "State: ", 1, "_stateName", "\n"); prxtree("output.txt", "Province: ", 1, "_province", "\n"); prxtree("output.txt", "Zip: ", 1, "_zipCode", "\n"); prxtree("output.txt", "Zip suffix: ", 1, "_zipSuffix", "\n"); prxtree("output.txt", "Country: ", 1, "_country", "\n"); @RULES _xNIL <- _cityStateZip @@ # 09/04/99 AM. # Only use cityState if no cityStateZip around. # (Should find the first, also.) @CHECK if (G("cityStateZip")) fail(); @POST prxtree("output.txt", "City: ", 1, "_city", "\n"); prxtree("output.txt", "State: ", 1, "_state", "\n"); prxtree("output.txt", "State: ", 1, "_country", "\n"); prxtree("output.txt", "Province: ", 1, "_province", "\n"); @RULES _xNIL <- _cityState @@ @POST prxtree("output.txt", "Email: ", 1, "_email", "\n"); @RULES _xNIL <- _email @@ # 09/04/99 AM. @POST prxtree("output.txt", "URL: ", 1, "_url", "\n"); @RULES _xNIL <- _url @@ # 09/04/99 AM.
@NODES _ROOT @RULES _state <- California @@ _prep <- _xWILD [one matches=(as at in of with into)] @@ _det <- _xWILD [one matches=(a an the this that)] @@ _adj <- _xWILD [one matches=(blue red green happy funny)] @@ _noun <- _xWILD [one matches=(moose frog lizard)] @@ _month <- _xWILD [one matches=(january february march)] @@ @POST S("num") = 3; single(); @RULES _num <- three @@ @POST S("gender") = "female"; single(); @RULES _pro <- she @@ @POST S("compare") = ">"; single(); @RULES _compare <- greater than @@
@PATH _ROOT _LINE @POST excise(1,1); noop(); @RULES _xNIL <- _xWHITE [s] ### (1) @@
# Fetch a substring of the input text, as specified by start and end offsets, and print it to an output stream G("out") = openfile("c:\\tmp.txt"); inputrangetofile(0,9,G("out"));   # Print the first 10 characters of the input text. closefile(G("out"));
@NODES _ROOT @POST excise(1,3); noop(); @RULES _xNIL <- \( ### (1) _xWILD [fails=( \) \n \r)] ### (2) \) ### (3) @@
@NODES _ROOT @POST S("ref") = N("$text",4); "bib.txt" << "Bib ref: " << S("ref") << "\n"; single(); @RULES _bibRef <- \\ [s] ### (1) bibitem [s] ### (2) \{ [s] ### (3) _xWILD [s fails=(\})] ### (4) \} [s] ### (5) @@
# Rebind the output stream cout to a given file @CODE # In VisualText, output to a file.  Outside VisualText, output to user-supplied stream. if (interactive())    coutreset("out.txt"); cout() << "Hello output stream!" << "\n"; @@CODE
# The action lowercase specifies that the first rule element (_xALPHA) must be a word in all lowercase letters in order to be reduced to the suggested node _noun @PRE <1,1>lowercase(); @RULES _noun <- _xALPHA@@
@NODES _ROOT @POST G("count") = G("count") + 1; single(); @RULES _item <- \< ### (1) \/ ### (2) item ### (3) \> ### (4) @@
@NODES _ROOT @RULES _wordClass <- hword @@ _pos <- parts \- of \- speech @@ _sense <- sense \- content @@ _h <- \< ### (1) \/ ### (2) h ### (3) _xWILD [one match=(1 2)] ### (4) \> ### (5) @@
# Create an attribute named 'myAttr' under 'aConcept' and give it the value 'myVal' Then remove the value from 'myAttr' if(findconcept(findroot(),"myConcept")) rmconcept(findconcept(findroot(),"myConcept")); G("aConcept") = makeconcept(findroot(), "myConcept"); # remove values of existing attribute. G("result") = rmvals(G("aConcept"),"myAttr"); "output.txt" << "G(result) should be 0, is " << G("result") << "\n"; addstrval(G("aConcept"),"myAttr","myVal1"); addstrval(G("aConcept"),"myAttr","myVal2"); G("anAttr") = findattr(G("aConcept"),"myAttr"); G("aVal") = attrvals(G("anAttr")); G("result") = rmval(G("anAttr"), G("aVal"));
# See if given string is a suffix of given word @CHECK if (!suffix(N("$text",1),"s")) fail(); @RULES _snoun [layer=(_sverb)] <- _xALPHA @@
@NODES _figure @POST S("image") = N("$text",7); X("image") = N("$text",7); "images.txt" << S("image") << "\n"; single(); @RULES _image <- \\ ### (1) includegraphics ### (2) \[ ### (3) _xWILD ### (4) \] ### (5) \{ ### (6) _xWILD ### (7) \} ### (8) @@ @POST S("image") = N("$text",4); X("image") = N("$text",4); "images.txt" << S("image") << "\n"; single(); @RULES _image <- \\ ### (1) includegraphics ### (2) \{ ### (3) _xWILD ### (4) \} ### (5) @@ @POST X("label") = N("label full"); addFigure(N("label name")); @RULES _xNIL <- _label ### (2) @@ @POST S("caption") = N("$text",4); X("caption") = N("$text",4); single(); @RULES _caption <- \\ ### (1) caption ### (2) \{ ### (3) _xWILD ### (4) \} ### (5) @@
@CODE L("hello") = 0; @@CODE #@PATH _ROOT _TEXTZONE _sent _clause @NODES _clause # Zap clause start temporary nodes. @POST # Copy any attrs from the node to the clause. pncopyvars(N(1),X()); splice(1,1); @RULES _xNIL <- _clausestart @@
@NODES _ROOT @PRE <2,2> varz("NL"); @RULES _paragraph <- _xSTART ### (1) _xWILD [plus] ### (2) @@ @PRE <1,1> var("NL"); <2,2> varz("NL"); @RULES _paragraph <- _xWILD [one] ### (1) _xWILD [star] ### (2) @@
@NODES _ROOT @POST G("CurrentElementName") = str(N("ElementName",1)) ; noop() ; @@POST @RULES _xNIL <- _ElementDeclStart ### (1) @@ @@RULES @POST G("CurrentElementName") = 0 ; noop() ; @@POST @RULES _xNIL <- _EndTag ### (1) @@ @@RULES @RULES _Mixed <- _PCDataStart _xWILD [min=1 max=0 fail=("\)")] _whiteSpace [opt] \) \* @@ _Mixed <- _PCDataStart _whiteSpace [opt] \) @@ @@RULES @CHECK if (strequal(str(N("$text",1)),"EMPTY") || strequal(str(N("$text",1)),"ANY")) { fail() ; } @@CHECK @POST S("buffer1") = str(N("$text",1)) ; S("buffer2") = str(N("$text",2)) ; if (N("$text",1) && N("$text",2)) { S("ChildElementName") = S("buffer1") + S("buffer2") ; } else if ( N("$text",1)) { S("ChildElementName") = S("buffer1") ; } else if ( N("$text",2)) { S("ChildElementName") = S("buffer2") ; } G("CurrentConcept") = findconcept(G("Elements"),G("CurrentElementName")) ; G("CurrentChildConcept") = findconcept(G("CurrentConcept"),S("ChildElementName")) ; if (G("CurrentChildConcept") == 0 ) { G("CurrentChildConcept") = makeconcept(G("CurrentConcept"),S("ChildElementName")) ; G("ReferenceIDforConcept") = findconcept(G("Elements"),S("ChildElementName")) ; if (G("ReferenceIDforConcept")==0) { makeconcept(G("Elements"),S("ChildElementName")) ; } } single() ; @@POST @RULES _cp <- _xWILD [s min=1 max=1 matches=("_xALPHA" "_" ":")] _xWILD [s min=0 max=0 matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] _xWILD [s min=0 max=1 matches=("?" "*" "+")] @@ @@RULES @RULES _cp <- \( [one] ### (1) _PEReference [one] ### (2) \) [one] ### (3) _xWILD [opt match=("?" "+" "*")] ### (4) @@ @@RULES
@CODE if (!G("semantic processing")) exitpass(); if (G("verbose")) "dump.txt" << "\n" << "[PASS sentsem: Register sentences in KB.]" << "\n\n"; @@CODE @NODES _TEXTZONE @POST sentregister(N(1)); @RULES _xNIL <- _sent @@
@CODE if (G("pretagged")) exitpass(); if (!G("hilite")) # 10/25/10 AM. exitpass(); # 10/25/10 AM. G("hello") = 0; @@CODE # This isn't catching all the temporals. #@PATH _ROOT _TEXTZONE _sent _clause # Comment. # #@MULTI _ROOT @NODES _clause _advl # 07/13/12 AM. @CHECK if (N("sem") != "date" && !N("sem date") && !N("date node")) # 04/20/05 AM. fail(); @POST L("x3") = pnparent(X()); # 07/13/12 AM. if (G("verbose")) "temporal.txt" << N("$text") << "\n"; # Registering in clause and sent. registerx(X(),"temporal",N("$text")); registerx(L("x3"),"temporal",N("$text")); noop(); # Merely matching the rule will set text to green. @RULES _xNIL <- _xWILD [one match=( _advl _np _fnword _prep )] @@
@PATH _ROOT _textZone _LINE @POST X("tempo",3) = N("$text",2); single(); @RULES _tempo <- _tempo mais \- do \- que _tempo @@ @POST X("root",3) = N("$text",3); X("numero",3) = N("$text",1); single(); @RULES _numero <- _numero de _xALPHA @@ @POST group(3,3,"_pessoa"); group(1,1,"_pessoa"); @RULES _xNIL <- _pessoa e _pessoa _xWILD [one match=(pessoas pessoa)] @@ @POST if (X("pessoa")) { X("pessoa2",3) = N("$text",1); } else { X("pessoa",3) = N("$text",1); } single(); @RULES _pessoa <- _pessoa pessoa @@ @POST X("numero",3) = N("$text",2); single(); @RULES _numero <- do _numero @@ @POST X("tempo",3) = N("$text",1) + " do " + N("$text",3); single(); @RULES _tempo <- _tempo do _tempo @@ @POST X("root",3) = N("$text",3); single(); @RULES _root <- do verbo _xALPHA @@
@CODE SaveKB("all.kbb",findroot(),2); @@CODE
@PATH _ROOT _LINE _item _indent @POST X("indent",3)++; @RULES _xNIL <- _space ### (1) @@
@NODES _ROOT @PRE <1,1> var("header"); <2,2> varz("header"); @POST S("header") = N("header",1); single(); @RULES _headerZone <- _LINE ### (1) _xWILD [plus] ### (2) @@
@NODES _ROOT @POST getconcept(G("words"),N("$text",4)); @RULES _xNIL <- _wordClass ### (1) \" ### (2) \> ### (3) _xALPHA ### (4) \< ### (5) @@
@NODES _LINE @PRE <1,1> length(4); <3,3> length(3); @POST group(1, 3, "_HomeFax"); single(); @RULES # Ex: home/fax:\_( _phoneHomeFaxPhrase <- _xWILD [min=1 max=1 s match=("home" "Home")] _xWILD [min=1 max=1 s match=("\/")] _xWILD [min=1 max=1 s match=("fax" "Fax")] \: [s] _xWHITE [star s] _phoneNumber [s] @@ # Ex: home\_fax:\_( _phoneHomeFaxPhrase <- _xWILD [min=1 max=1 s match=("home")] _xWHITE [star s] fax [s] \: [s] _xWHITE [star s] _phoneNumber [s] @@ @PRE <3,3> length(4); <5,5> length(3); @POST group(3, 5, "_HomeFax"); single(); @RULES _phoneHomeFaxPhrase <- _phoneNumber [s] _xWHITE [star s] _xWILD [min=1 max=1 trig s match=("home" "Home")] _xWILD [min=1 max=1 s match=("\/")] _xWILD [min=1 max=1 s match=("fax" "Fax")] @@ _phoneHomeFaxPhrase <- _phoneNumber [s] _xWHITE [star s] _xWILD [min=1 max=1 trig s match=("home")] _xWHITE [star s] fax [s] @@ @PRE <4,4> length(4); <6,6> length(3); @POST group(4, 6, "_HomeFax"); single(); @RULES _phoneHomeFaxPhrase <- _phoneNumber [s] _xWHITE [star s] _xWILD [min=1 max=1 s match=("_openPunct" "\(")] _xWILD [min=1 max=1 trig s match=("home" "Home")] _xWILD [min=1 max=1 s match=("\/")] _xWILD [min=1 max=1 s match=("fax" "Fax")] _xWILD [min=1 max=1 s match=("_closePunct" "\)")] @@
###################################################################### # FILE: TEXT_TAGGING.pat # # SUBJ: Handle any HTML tagging inserted in the text file. # # This pass assembles the pieces of an HTML tag into a unit # # AUTH: Paul Deane # # CREATED: 02/Jan/01 # DATE OF CURRENT VERSION: 31/Aug/01 # # Copyright ###################################################################### @NODES _ROOT ############################################################### # CONTENT INDEX # # 1. Put together the pieces of an opening tag # # a. Simple list of parameters a=b # # b. Simple list of modifiers like noshade or nowrap # # c. Combinations of parameters and modifiers # # 2. Put together the pieces of an ending tag # # 3. Put together the pieces of a comment # # 4. Put together the pieces of a special <! tag # ############################################################### ############################################## # # # (rule 1a) # # # # Put together the pieces of an opening tag # ############################################## @POST #extract the tag name and pass it up the tree S("tagName") = N("tagName",1) ; S("horizBreak") = N("horizBreak",1) ; S("paraBreak") = N("paraBreak",1) ; S("sectionBreak") = N("sectionBreak",1) ; S("heading level") = N("heading level",1) ; S("lineBreak") = N("lineBreak",1) ; #if the rule match includes a parameter, we're going #to want to extract all the parameter info and make #it immediately available on the node representing #the tag #so we get the first element that matches the parameter #name node in the rule G("currentParameterNode") = N(2) ; #then we get the last element that matches the #parameter name node in the rule G("endParameterList") = lasteltnode(2) ; #and advance to one past it, which will tell us #we're out of the list when we go into the loop G("endParameterList") = pnnext( G("endParameterList")); #now we'll loop through the list of parameters and #extract all the information to a pair of arrays. #The first array will give the name of the parameter, #the corresponding position in the array will give its #value #first step: make sure the global paramNum variable #is set to zero before we go into the loop G("paramNum") = 0 ; #now loop if (G("currentParameterNode")!=0) { G("parameter") = pnvar(G("currentParameterNode"),"parameterName"); G("parameterVal") = pnvar( G("currentParameterNode"),"parameterValue") ; while (G("currentParameterNode")!=G("endParameterList")) { #set the current position on the parameter name #list to the current parameter name if (G("parameter")) { S("parameterNameList")[G("paramNum")] = G("parameter"); #set the current position on the parameter value #list to the current parameter value S("parameterValueList")[G("paramNum")] = G("parameterVal"); G("paramNum")++; } G("currentParameterNode") = pnnext(G("currentParameterNode")) ; if (G("currentParameterNode")) { G("parameter")= pnvar(G("currentParameterNode"),"parameterName"); G("parameterVal") = pnvar( G("currentParameterNode"),"parameterValue") ; } else G("parameter")=0; } } S("paramNum") = G("paramNum"); single() ; @@POST @RULES _HTMLOpenTag [base] <- _startOpenTag [s trig one] ###(1) _paramValue [plus] ###(2) _endTag [s one] ###(3) @@ @@RULES ############################################## # # # (rule 1b) # # # # Put together the pieces of an opening tag # ############################################## @POST #extract the tag name and pass it up the tree S("tagName") = N("tagName",1) ; S("tagName") = N("tagName",1) ; S("horizBreak") = N("horizBreak",1) ; S("paraBreak") = N("paraBreak",1) ; S("sectionBreak") = N("sectionBreak",1) ; S("heading level") = N("heading level",1) ; S("lineBreak") = N("lineBreak",1) ; single() ; @@POST @RULES _HTMLOpenTag [base] <- _startOpenTag [s trig one] ###(1) _xALPHA [star] ###(2) _endTag [s one] ###(3) @@ @@RULES ############################################## # # # (rule 1c) # # # # Put together the pieces of an opening tag # # with mixed parameters and value settings # # like nowrap # ############################################## @POST #extract the tag name and pass it up the tree S("tagName") = N("tagName",1) ; S("tagName") = N("tagName",1) ; S("horizBreak") = N("horizBreak",1) ; S("paraBreak") = N("paraBreak",1) ; S("sectionBreak") = N("sectionBreak",1) ; S("heading level") = N("heading level",1) ; S("lineBreak") = N("lineBreak",1) ; #if the rule match includes a parameter, we're going #to want to extract all the parameter info and make #it immediately available on the node representing #the tag #so we get the first element that matches the parameter #name node in the rule G("currentParameterNode") = N(4) ; #then we get the last element that matches the #parameter name node in the rule G("endParameterList") = lasteltnode(4) ; #and advance to one past it, which will tell us #we're out of the list when we go into the loop G("endParameterList") = pnnext( G("endParameterList")); #now we'll loop through the list of parameters and #extract all the information to a pair of arrays. #The first array will give the name of the parameter, #the corresponding position in the array will give its #value #first step: make sure the global paramNum variable #is set to zero before we go into the loop G("paramNum") = 0 ; #now loop if (G("currentParameterNode")!=0) { G("parameter") = pnvar(G("currentParameterNode"),"parameterName"); G("parameterVal") = pnvar( G("currentParameterNode"),"parameterValue") ; while (G("parameter")) { #set the current position on the parameter name #list to the current parameter name if (G("parameter")) { S("parameterNameList")[G("paramNum")] = G("parameter"); #set the current position on the parameter value #list to the current parameter value S("parameterValueList")[G("paramNum")] = G("parameterVal"); G("paramNum")++; } G("currentParameterNode") = pnnext(G("currentParameterNode")) ; if (G("currentParameterNode")) { G("parameter")= pnvar(G("currentParameterNode"),"parameterName"); G("parameterVal") = pnvar( G("currentParameterNode"),"parameterValue") ; } else G("parameter")=0; } } single() ; @@POST @RULES _HTMLOpenTag [base] <- _startOpenTag [s trig one] ###(1) _xWHITE [star] ###(2) _xALPHA [opt] ###(3) _paramValue [star] ###(4) _xWHITE [star] ###(5) _xALPHA[opt] ###(6) _paramValue [star] ###(7) _xWHITE [star] ###(8) _xALPHA [opt] ###(9) _xWHITE [star] ###(10) _endTag [s one] ###(11) @@ @@RULES @POST #extract the tag name and pass it up the tree S("tagName") = N("tagName",1) ; S("horizBreak") = N("horizBreak",1) ; S("paraBreak") = N("paraBreak",1) ; S("sectionBreak") = N("sectionBreak",1) ; S("heading level") = N("heading level",1) ; S("lineBreak") = N("lineBreak",1) ; single() ; @@POST @RULES _HTMLOpenTag [base] <- _startOpenTag [s trig one] ###(1) _endTag [s one] ###(2) @@ @@RULES ############################################### # # # (rule 2) # # # # Put together the pieces of an ending tag # ############################################### @POST #extract the tag name and pass it up the tree S("tagName") = N("tagName",1) ; S("tagName") = N("tagName",1) ; S("horizBreak") = N("horizBreak",1) ; S("paraBreak") = N("paraBreak",1) ; S("sectionBreak") = N("sectionBreak",1) ; S("heading level") = N("heading level",1) ; S("lineBreak") = N("lineBreak",1) ; single() ; @@POST @RULES _HTMLEndTag [base] <- _startEndTag [s trig one] ###(1) _endTag [s one] ###(2) @@ @@RULES ############################################ # # # (rule 3) # # # # Put together the pieces of a comment # ############################################ @POST single() ; @@POST @RULES _HTMLComment [base] <- _startComment [s trig one] ###(1) _xWILD [star] ###(2) _endComment [s one] ###(3) @@ @@RULES ################################################# # # # (rule 4) # # # # Put together the pieces of a special <! tag # ################################################# @POST single() ; @@POST @RULES _HTMLSpecialTag [base] <- _startSpecialTag [s trig one] ###(1) _paramValue [star] ###(2) _endTag [s one] ###(3) @@ _HTMLSpecialTag [base] <- _startSpecialTag [s trig one] _xWILD [star] _endTag [s one] @@ @@RULES ############################################ # # # (rule 5) # # # # Put together the pieces of an ASP Script # ############################################ @POST single() ; @@POST @RULES _ASPScript [base] <- _startASPScript [s trig one] _xWILD [star fail=("\>")] _endTag [s one] @@ @@RULES
# Match everything up to _endofpar or _par and reduce matches to _par @RULES _par <- _xWILD [plus fails=(_endofpar _par)] @@
# Create a dump file file_str of knowledge base concept root_con @CODE G("root") = findroot(); G("conc") = makeconcept(G("root"), "companies"); makeconcept(G("conc"), "ford"); makeconcept(G("conc"), "gm"); kbdumptree(G("conc"), "c:\\companies.kb"); @@CODE
@NODES _section @POST S("subsection") = N("subsection", 1); single(); @RULES _subsection <- _subsection [one] _xWILD [plus fails=(_xEND _subsection)] @@
# Fetch the attribute named nameString belonging to the concept concept G("myConcept") = makeconcept(findroot(),"a concept"); G("myAttr") = addattr(G("myConcept"),"an attribute"); "output.txt" << attrname(findattr(G("myConcept"),"an attribute")) << "\n"; G("junk") = findattr(G("myConcept"),"duh"); "output.txt" << "junk = " << G("junk");
@PATH _ROOT _attrBlock _value @POST X("value",2) = PartOfSpeech(N("$text",1)); @RULES _xNIL <- _xALPHA \" _xEND @@
@PATH _ROOT _labels @POST splice(1,3); noop(); @RULES _xNIL <- http ### (1) _xWILD [plus fails=(\#)] ### (2) _xWILD [one matches=(\#)] ### (3) @@
# Exit from a region or rule in a pass file @CHECK fail(); # Reject the current rule match. @@CHECK
@CODE L("hello") = 0; @@CODE @NODES _sent # so # while # "so" hasn't been grabbed up within a clause. # Assume it's a conjunction. #@CHECK # if (N("mypos")) # fail(); #@POST # chpos(N(1),"IN"); #@RULES #_xNIL <- _xWILD [s one match=( # so # while # )] @@ # before # coordinating conjunction. @CHECK if (N("mypos")) fail(); if (pnname(N(1)) != "_fnword") fail(); @POST chpos(N(1),"IN"); # before/IN. DEFAULT. @RULES _xNIL <- before [s] @@ @CHECK if (N("mypos")) fail(); if (pnname(N(1)) != "_fnword") fail(); @POST chpos(N(1),"IN"); # except/IN. DEFAULT. @RULES _xNIL <- except [s] @@ # what # whatever @CHECK if (N("mypos")) fail(); @POST chpos(N(1),"WP"); @RULES _xNIL <- _xWILD [s one match=( what whatever )] @@ # both. @CHECK if (N("mypos")) fail(); @POST chpos(N(1),"DT"); @RULES _xNIL <- _xWILD [s one match=( both )] @@ @CHECK if (N("mypos")) fail(); if (pnname(N(1)) != "_fnword") fail(); @POST chpos(N(1),"IN"); # that/IN. DEFAULT. @RULES _xNIL <- that [s] @@ @CHECK if (N("mypos")) fail(); @POST chpos(N(1),"JJR"); # more/JJR. DEFAULT. @RULES _xNIL <- more [s] @@ @CHECK if (N("mypos")) fail(); @POST chpos(N(1),"CC"); # yet/CC. DEFAULT. @RULES _xNIL <- yet [s] @@
@NODES _ROOT @RULES _FIELDS <- _xSTART ### (1) _xWILD [plus fails=(_lineTerminator)] ### (2) _lineTerminator ### (3) @@
@PATH _ROOT _educationZone _educationInstance _LINE @POST X("school",3) = N("$text"); # Fill instance with school name. @RULES _xNIL <- _xWILD [s one matches=(_school _schoolPhrase _SchoolName)] @@ @POST X("date",3) = N("$text"); # Fill instance with date. @RULES _xNIL <- _DateRange [s] @@ _xNIL <- _SingleDate [s] @@ @POST # (RUG hierarchy guys don't layer semantics yet.) # This is a bit hacky, eg, for "MBA". if (N("degree")) X("degree",3) = N("degree"); else X("degree",3) = N("$text"); if (N("major")) X("major",3) = N("major"); else X("major",3) = N("$text"); @RULES _xNIL <- _degreeInMajor [s] @@ #_xNIL <- _degree [s] @@ @POST if (!X("city",3)) X("city",3) = N("$text"); # noop() @RULES _xNIL <- _city [s] @@ @POST if (!X("state",3)) X("state",3) = N("$text"); # noop() @RULES _xNIL <- _state [s] @@ @POST if (!X("minor",3)) X("minor",3) = N("$text"); # noop() @RULES _xNIL <- _minor [s] @@
@CODE sortchilds(G("categories")); L("cat") = down(G("categories")); while (L("cat")) { L("count") = getnumval(findvals(L("cat"), "count")); if (L("count") > 1) { L("vals") = findvals(L("cat"), "catid"); while (L("vals")) { L("num") = getstrval(L("vals")); "output.txt" << L("num") << "-"; L("vals") = nextval(L("vals")); } "output.txt" << conceptname(L("cat")); "output.txt" << "\n"; } L("cat") = next(L("cat")); } @@CODE
# Example shows a way to "layer" semantics in parse tree @POST  G("last") = lasteltnode(4);               # Get last noun node.  G("noun sem") = pnsingletdown(G("last")); # Get semantic node under _noun.  if (G("noun sem")                         # If got a semantic guy, eg "_house",      && (G("name") = pnname(G("noun sem")))# (and get the name of the node)      && strchr(G("noun sem"),"_") )        # and it's nonliteral.  {     group(1,4,G("name"));                  # "Promote" the sem, eg, "_house".     group(1,1,"_np");  # Now layer on the np.  Note numbering has changed.  }  else     group(1,4,"_np");        # No semantic guy, just build noun phrase. @RULES _xNIL <- _det [s] _quan [s] _adj [s plus] _noun [s plus] @@
@MULTI _ROOT _section _looseText _sentence _subsection _item @PRE <1,1> var("bases"); @CHECK if (pnname(X()) == "_patientID") { fail(); } @@CHECK # Replace pnvar bases with list of euis by splitting on pipe char. @POST if (strcontains("|", N("bases", 1))) { L("base_list") = split(N("bases", 1), "|"); "filtering.txt" << "Split bases into " << arraylength(L("base_list")) << " euis: " << L("base_list") << "\n"; L("base_list") = FilterDuplicates(L("base_list"), 0); "filtering.txt" << "Filtered list contains " << arraylength(L("base_list")) << " euis: " << L("base_list") << "\n"; pnreplaceval(N(1), "bases", L("base_list")); "filtering.txt" << "Post add variable length: " << arraylength(N("bases", 1)) << " containing " << N("bases", 1) << "\n"; "filtering.txt" << "\n"; } @@POST @RULES _xNIL <- _xANY # FOR RECURSIVE IMPLEMENTATION, REMOVE THIS # Stop list includes non-leaf nodes and common medical terms # We exclude _subsection since this may give us relevant title # _xWILD [one fails=( # _xEND # _section # _subsection # _sentence # _item # patient # )] @@ # @PRE # <1,1> var("bases"); # @POST # if (N("section_title", 1)) { # pnrpushval(N(1), "bases", N("section_title", 1)); # } # if (N("subsection", 1)) { # pnrpushval(N(1), "bases", N("subsection", 1)); # } # if (X("section_title")) { # pnrpushval(N(1), "bases", X("section_title")); # } # if (X("subsection")) { # pnrpushval(N(1), "bases", X("subsection")); # } # @RULES # _xNIL <- # _xWILD [one matches=( # _section # _sentence # _subsection # _item # _looseText # )] # @@
@PATH _ROOT _headerZone _catGroup _liGroup @POST G("file") << G("verb") << " \"" << X("header",2) << "\" " << N("text",3) << "\n"; @RULES _xNIL <- _xSTART ### (1) _liOpen ### (2) _hiGroup ### (3) @@ @POST G("file") << G("verb") << " \"" << X("header",2) << "\" " << N("text",1) << " " << N("text",2) << "\n"; @RULES _xNIL <- _iGroup ### (1) _hiGroup [look] ### (2) @@
@NODES _ROOT ############################################### # # CONTENT INDEX # # 1. Internet domain names formed with alphanumerics # separated by dots, ending in the common domains # .com, .edu etc. # 2. email address: alphanumeric @ domain name # 3. Time ranges: from # 4. Time abbrevs: a.m., p.m. # 5. Simple colon numerals, possible time expressions # 6. Longer colon numerals # 7. Time of day expressions, # 8. More time of day expressions # 9. Abbreviations of the form U.S., U.S.A. etc. # 10. Alphanumeric expressions ############################ # (rules 1) # # Internet Domain Names # ############################ @POST single() ; @@POST @RULES _domainName <- _alphaNumeric [one] ### (1) \. [one] ### (2) _domainName [one] ### (3) @@ _domainName <- _alphaNumeric [one] ### (1) \. [one] ### (2) _xWILD [one matches=("com" "edu" "net" "gov" "org" "ca" "uk")] ### (3) @@ _domainName <- _xWILD [plus matches=(_xALPHA _xNUM)] ### (1) \. [one] ### (2) _domainName [one] ### (3) @@ _domainName <- _xWILD [plus matches=(_xALPHA _xNUM)] ### (1) \. [one] ### (2) _xWILD [one matches=("com" "edu" "net" "gov" "org" "ca" "uk")] ### (3) @@ @@RULES @RULES _url <- _xWILD [one matches=("http" "ftp" "gopher" "mailto" "telnet")] ### (1) \: [one] ### (2) \/ [one] ### (3) \/ [one] ### (4) _domainName [one] ### (5) @@ _url <- _url [one] ### (1) _xWILD [min=1 max=1 match=("\\" "/")] ### (2) _alphaNumeric [opt] ### (3) @@ @@RULES ############################ # (rule 2) # # Email addresses # ############################ @POST S("user name") = N("$text",1) ; S("domain name") = N("$text",3) ; single() ; @@POST @RULES _emailAddress <- _xWILD [plus matches=(_xALPHA _xNUM)] ### (1) \@ [one] ### (2) _domainName [one] ### (3) @@ @@RULES ################################################## # Times of Day & the like ################################################## ################################## # rule 3a # # from time of day to time of day# ################################## @POST S("start hour") = N("hour",3) ; S("start minute") = N("minute",3) ; S("start second") = N("second",3) ; S("start meridian") = N("meridian",3) ; S("end hour") = N("hour",7) ; S("end minute") = N("minute",7) ; S("end second") = N("second",7) ; S("end meridian") = N("meridian",7) ; S("timeZone") = N("timeZone",7) ; single() ; @@POST @RULES _timeRange <- _xWILD [star matches=("from" "From" "FROM")] ### (1) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (2) _timeOfDay [one] ### (3) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (4) _xWILD [one matches=("-" "_to" "to" "TO" "To" "through" "THROUGH")] ### (5) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (6) _timeOfDay [one] ### (7) @@ @@RULES ################################## # rule 3b # # from possible time of day # # to time of day # ################################## @POST S("start hour") = N("hour",3) ; S("start minute") = N("minute",3) ; S("start second") = N("second",3) ; S("start meridian") = N("meridian",3) ; S("end hour") = N("hour",7) ; S("end minute") = N("minute",7) ; S("end second") = N("second",7) ; S("end meridian") = N("meridian",7) ; S("timeZone") = N("timeZone",7) ; single() ; @@POST @RULES _timeRange <- _xWILD [opt matches=("from" "From" "FROM")] ### (1) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (2) _colonNumeral [one] ### (3) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (4) _xWILD [one matches=("-" "_to" "to" "TO" "To" "through" "THROUGH")] ### (5) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (6) _timeOfDay [one] ### (7) @@ @@RULES ###################################### # rule 3c # # between time of day and time of day# ###################################### @POST S("start hour") = N("hour",3) ; S("start minute") = N("minute",3) ; S("start second") = N("second",3) ; S("start meridian") = N("meridian",3) ; S("end hour") = N("hour",7) ; S("end minute") = N("minute",7) ; S("end second") = N("second",7) ; S("end meridian") = N("meridian",7) ; S("timeZone") = N("timeZone",7) ; single() ; @@POST @RULES _timeRange <- _xWILD [opt matches=("between" "BETWEEN" "Between")] ### (1) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (2) _timeOfDay [one] ### (3) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (4) _xWILD [one matches=("&" "and" "AND" "And")] ### (5) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (6) _timeOfDay [one] ### (7) @@ @@RULES ###################################### # rule 3d # # between time of day and time of day# ###################################### @POST S("start hour") = N("hour",3) ; S("start minute") = N("minute",3) ; S("start second") = N("second",3) ; S("start meridian") = N("meridian",3) ; S("end hour") = N("hour",7) ; S("end minute") = N("minute",7) ; S("end second") = N("second",7) ; S("end meridian") = N("meridian",7) ; S("timeZone") = N("timeZone",7) ; single() ; @@POST @RULES _timeRange <- _xWILD [opt matches=("between")] ### (1) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (2) _colonNumeral [one] ### (3) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (4) _xWILD [one matches=("&" "and")] ### (5) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (6) _timeOfDay [one] ### (7) @@ @@RULES ####################### # rule 4a # # time abbreviations # ####################### @POST S("meridian") = "a.m." ; single() ; @@POST @RULES _timeAbbrev <- _xWILD [one matches=("a")] ### (1) \. [one] ### (2) _xWILD [one matches=("m")] ### (3) \. [opt] ### (4) @@ @@RULES ####################### # rule 4b # # time abbreviations # ####################### @POST S("meridian") = "p.m." ; single(); @@POST @RULES _timeAbbrev <- _xWILD [one matches=("p" "P")] ### (1) \. [one] ### (2) _xWILD [one matches=("m" "M")] ### (3) \. [one] ### (4) @@ @@RULES ########################################## # rule 5 # # number colon number -- possible time of# # day expressions # ########################################## @POST S("firstlevel") = 1 ; G("num1T") = N("$text",1) ; G("num1") = num(G("num1T")); G("num2T") = N("$text",3) ; G("num2") = num(G("num2T")); if ( G("num1") > 0 && G("num1") < 25 && G("num2") >= 0 && G("num2") < 61 ) { S("hour") = N("$text",1) ; S("minute") = N("$text",3) ; S("possible time") = 1 ; } single() ; @@POST @RULES _colonNumeral <- _xNUM [one] ### (1) _xWILD [one match=(":")] ### (2) _xNUM [one] ### (3) @@ @@RULES ####################### # rule 6 # # longer colon number # # expressions - case # # with three numbers # # is possible time # # expression. # ####################### @POST G("secT") = N("$text",3); G("sec") = num(G("secT")); if ( N("possible time",1) == 1 && N("firstlevel",1) == 1 && G("sec") >= 0 && G("sec") <= 60 ) { S("hour") = N("hour",1) ; S("minute") = N("minute",1) ; S("second") = N("$text",3) ; S("possible time") = 1 ; } single() ; @@POST @RULES _colonNumeral <- _colonNumeral [one] ### (1) _xWILD [one match=(":")] ### (2) _xNUM [one] ### (3) @@ @@RULES #################################### # Rule 7a # # explicit time of day with abbrev# #################################### @CHECK if (N("possible time",1)) { succeed() ; } else fail() ; @@CHECK @POST S("hour") = N("hour",1) ; S("minute") = N("minute",1) ; S("second") = N("second",1) ; S("meridian") = strtolower(N("$text",3)) ; S("timeZone") = N("$text",5) ; single() ; @@POST @RULES _timeOfDay <- _colonNumeral [one] ### (1) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (2) _timeAbbrev [one] ### (3) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (4) _xWILD [opt matches=("EST" "EDT" "PST" "PDT" "CST" "CDT" "PST" "PDT" "GMT")] ### (5) @@ @@RULES #################################### # Rule 8b # # explicit time of day no periods # #################################### @CHECK if (N("possible time",1)) { succeed() ; } else fail() ; @@CHECK @POST S("hour") = N("hour",1) ; S("minute") = N("minute",1) ; S("second") = N("second",1) ; if (strequal(N("$text",3),"am")) S("meridian") = "a.m." ; if (strequal(N("$text",3),"pm")) S("meridian") = "p.m." ; if (strequal(N("$text",3),"AM")) S("meridian") = "a.m." ; if (strequal(N("$text",3),"PM")) S("meridian") = "p.m." ; S("timeZone") = N("$text",5) ; single() ; @@POST @RULES _timeOfDay <- _colonNumeral [one] ### (1) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (2) _xWILD [one matches=("am" "pm")] ### (3) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (4) _xWILD [opt matches=("EST" "EDT" "PST" "PDT" "CST" "CDT" "PST" "PDT" "GMT")] ### (5) @@ @@RULES ############################################### # Rule 9 # # Abbreviations (capital + period alternating # ############################################### @CHECK G("word") = N("$text",2); G("L1") = strlength(G("word")); if (G("L1") > 2) fail(); @@CHECK @RULES _abbrev <- _abbrev [one] ### (1) _xALPHA [one] ### (2) \. [opt] ### (3) @@ @@RULES @CHECK G("word") = N("$text",1); G("LL") = num(strlength(G("word"))); if (G("LL") > 2) fail(); G("firstword") = N("$text",1) + N("$text",2); G("fwordConcept") = dictfindword(G("firstword")); if (G("fwordConcept") && G("LL")==2 && N("$text",3)) fail(); G("word") = N("$text",4); G("LL") = num(strlength(G("word"))); if (G("LL") > 2) fail(); @@CHECK @RULES _abbrev <- _xALPHA [one] ### (1) \. [one] ### (2) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (3) _xALPHA [one] ### (4) \. [one] ### (5) @@ @@RULES ################################ # Rule 10 # # alphanumeric # ################################ @POST S("noun") = 1; S("adjective") = 1; single(); @@POST @RULES _alphaNumeric <- _xALPHA [s one] ### (1) _xNUM [s one] ### (2) @@ _alphaNumeric <- _xNUM [s one] ### (1) _xALPHA [s one] ### (2) @@ @@RULES @POST S("noun")=1; S("adjective")=1; single(); @@POST @RULES _alphaNumeric <- _alphaNumeric [one] ### (1) _xNUM [one] ### (2) @@ _alphaNumeric <- _alphaNumeric [one] ### (1) _xALPHA [one] ### (2) @@ @@RULES
@NODES _section @POST S("subsection") = N("$text", 2); excise(5,5); singler(2,4); @RULES _subsection <- _xWILD [one matches=(_xPUNCT _xSTART _xWHITE)] ### (1) _xWILD [one matches=(_xCAP)] ### (2) _xWHITE [opt] ### (3) _xWILD [min=0 max=3 match=(_xALPHA _xNUM)] ### (4) _xWILD [one matches=(\:)] ### (5) @@
@NODES _ROOT @CHECK if (N("level",1) == G("max header")) succeed(); fail(); @POST "nesting.txt" << "header: " << N("level",1) << " " << N("header",1) << "\n"; S("level") = N("level",1); S("header") = N("header",1); S("pos") = N("pos",1); single(); @RULES _headerZone <- _header ### (1) _xWILD [fail=(_header _xEND)] ### (2) @@ @POST G("max header")--; if (G("max header") < 1) exitpass(); "nesting.txt" << "max: " << G("max header") << "\n"; @RULES _xNIL <- _xWILD [match=(_LINE _headerZone)] _xEND @@
@NODES _ROOT @RULES _xNIL <- _xNIL ### (1) @@
@PATH _ROOT _headerZone @POST "debug.txt" << "Verbtxt: " << N("value",1) << "\n"; if (strstartswith(N("value",1),"verbtxt")) { N("verbtxt",1) = 1; } @RULES _xNIL <- _iOpen ### (1) @@
# Create the concept of apples, and give them the attribute have with value color. This is sort of like saying 'Apples have color.' Then we give apples the attribute color who's value is red, then the values of color green and yellow G("root") = findroot(); # look for apples in the concept hierarchy G("apple") = findconcept(G("root"),"apple"); # if you find them, kill them (to start fresh) if (G("apple")) rmconcept(G("apple")); # Apples exist G("apple") = makeconcept(G("root"),"apple"); # Apples have color addstrval(G("apple"),"have","color"); # Apple's color is red addstrval(G("apple"),"color","red"); # Apple's color is also green and yellow addstrval(G("apple"),"color","green and yellow"); # Now we access the attributes of the concept apple: # Find apple's attribute list G("attrList") = findattrs(G("apple")); # Find the first attribute's name G("attrName") = attrname(G("attrList")); # Find the list of values of the attribute G("valList") = attrvals(G("attrList")); # get the first value G("valName") = getstrval(G("valList")); # print out the first attribute's name and value if(G("attrName")){ "output.txt" << "first attribute of apple is: " << G("attrName") << "\n"; "output.txt" << "first value of that attribute is: " << G("valName") << "\n"; } # get the next attribute G("nextAttr") = nextattr(G("attrList")); # get its name G("attrName") = attrname(G("nextAttr")); if(G("attrName")){ "output.txt" << "next attribute of apple is: " << G("attrName") << "\n"; } # get the list of values of the second attribute G("valList") = attrvals(G("nextAttr")); # get the first value's name G("valName") = getstrval(G("valList")); # print it out "output.txt" << "first value of that attribute is: " << G("valName") << "\n"; # get the second value of the second attribute G("nextValName") = getstrval(nextval(G("valList"))); # print it out "output.txt" << "second value of that attribute is: " << G("nextValName") << "\n";
################################################ # FILE: Numeric Sequences.pat # # SUBJ: Recognize numeric sequence expressions # # like 5-15, or between 5 and 15 # # AUTH: Paul Deane # # CREATED: 01/Mar/01 # DATE OF THIS VERSION: 31/Aug/01 # # Copyright ################################################ @NODES _ROOT @POST S("Numeral Value")[0] = num(N("$text",3)); S("Numeral Value")[1] = num(N("$text",7)); S("MaxArrayPos") = 1; single(); @@POST @RULES _cardinalSequence <- _xWILD [s one match=("between")] ### (1) _xWILD [s one match=(_xWHITE "_whiteSpace")] ### (2) _xNUM [s one] ### (3) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (4) _xWILD [s one match=("&" "and")] ### (5) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (6) _xNUM [s one] ### (7) @@ @@RULES @POST S("Numeral Value")[0] = num(N("Numeral Value",3)); S("Numeral Value")[1] = num(N("Numeral Value",7)); S("MaxArrayPos") = 1; single(); @@POST @RULES _cardinalSequence <- _xWILD [s one match=("between")] ### (1) _xWILD [s one match=(_xWHITE "_whiteSpace")] ### (2) _cardinalNumeral [s one] ### (3) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (4) _xWILD [s one match=("&" "and")] ### (5) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (6) _cardinalNumeral [s one] ### (7) @@ @@RULES @POST S("Numeral Value")[0] = num(N("Numeral Value",5)); S("Numeral Value")[1] = num(N("Numeral Value",11)); S("MaxArrayPos") = 1; single(); @@POST @RULES _ordinalSequence <- _xWILD [s one match=("between")] ### (1) _xWILD [s one match=(_xWHITE "_whiteSpace")] ### (2) the [s opt] ### (3) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (4) _ordinalNumeral [s one] ### (5) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (6) _xWILD [s one match=("&" "and")] ### (7) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (8) the [s opt] ### (9) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (10) _ordinalNumeral [s one] ### (11) @@ @@RULES @POST S("Numeral Value")[0] = num(N("$text",3)); S("Numeral Value")[1] = num(N("$text",7)); S("MaxArrayPos") = 1; single(); @@POST @RULES _cardinalSequence <- _xWILD [s opt match=("from")] ### (1) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (2) _xNUM [s one] ### (3) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (4) _xWILD [s one match=("-" "to" "through")] ### (5) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (6) _xNUM [s one] ### (7) @@ @@RULES @POST S("Numeral Value")[0] = num(N("Numeral Value",3)); S("Numeral Value")[1] = num(N("Numeral Value",7)); S("MaxArrayPos") = 1; single(); @@POST @RULES _cardinalSequence <- _xWILD [s opt match=("from")] ### (1) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (2) _cardinalNumeral [s one] ### (3) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (4) _xWILD [s one match=("to" "through")] ### (5) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (6) _cardinalNumeral [s one] ### (7) @@ @@RULES @POST S("Numeral Value")[0] = num(N("Numeral Value",5)); S("Numeral Value")[1] = num(N("Numeral Value",11)); S("MaxArrayPos") = 1; single(); @@POST @RULES _ordinalSequence <- _xWILD [s opt match=("from")] ### (1) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (2) the [s opt] ### (3) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (4) _ordinalNumeral [s one] ### (5) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (6) _xWILD [s one match=("-" "to" "through")] ### (7) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (8) the [s opt] ### (9) _xWILD [s opt match=(_xWHITE "_whiteSpace")] ### (10) _ordinalNumeral [s one] ### (11) @@ @@RULES
@NODES _ROOT # Phrases @RULES _xNIL <- _x @@
@NODES _LINE @PRE <1,1> cap(); @RULES # Ex: Micronesia _PostalState <- _xWILD [min=1 max=1 s match=("Micronesia" "AE" "AK" "AL" "Alabama" "Alaska" "AP" "AR" "Arizona" "Arkansas" "AS" "AZ" "CA" "California" "CO" "Colorado" "Columbia" "Connecticut" "CT" "Dakota" "DC" "DE" "Delaware" "FL" "Florida" "FM" "GA" "Georgia" "GU" "Guam" "Hawaii" "HI" "IA" "ID" "Idaho" "IL" "Illinois" "IN" "Indiana" "Iowa" "Kansas" "Kentucky" "KS" "KY" "LA" "Louisiana" "MA" "Maine" "Maryland" "Massachusetts" "MD" "ME" "MH" "MI" "Michigan" "AA" "Minnesota" "Mississippi" "Missouri" "MN" "MO" "Montana" "MP" "MS" "MT" "NC" "ND" "NE" "Nebraska" "Nevada" "NH" "NJ" "NM" "NV" "NY" "OH" "Ohio" "OK" "Oklahoma" "OR" "Oregon" "Orpalau" "PA" "Pennsylvania" "PR" "PW" "RI" "Samoa" "SC" "SD" "Tennessee" "Texas" "TN" "TX" "UT" "Utah" "VA" "Vermont" "VI" "Virginia" "VT" "WA" "Washington" "WI" "Wisconsin" "WV" "WY" "Wyoming" "Mass" "NSW")] @@
@PATH _ROOT _tdOpen @POST X("rowspan",2) = N("$text",4); single(); @RULES _rowspan <- rowspan ### (1) \= ### (2) \" ### (3) _xNUM ### (4) \" ### (5) @@
# Fetch float value of attribute (must be first). L("return_flt") = fltval(L("con"), L("name"));
@PATH _ROOT _experienceZone _experienceInstance @POST splice(1,1); # Zap the _expStart node @RULES _xNIL <- _expStart @@
@NODES _LINE # Merge caps. @POST # Not a "glommed" caps, since first is dominant. S("capofcap") = 1; # Flag what this is. # 12/30/99 AM. # Should transfer from 1st caps (dominant) to new list. S("hi class") = N("hi class",1); S("hi conf") = N("hi conf",1); S("ambigs") = N("ambigs"); # etc. S("humanname conf") = N("humanname conf",1); S("company conf") = N("company conf",1); S("field conf") = N("field conf",1); S("job conf") = N("job conf",1); S("school conf") = N("school conf",1); # This will enable reasoning about how to glom lists. # merge() single(); @RULES _Caps [unsealed] <- _Caps _xWHITE [s star] _posPREP [s] _xWHITE [s star] the [s opt] _xWHITE [s star] _Caps [s] @@ @POST S("capofcap") = 1; # Flag what this is. # 12/30/99 AM. # Should transfer from 1st caps (dominant) to new list. S("hi class") = N("hi class",1); S("hi conf") = N("hi conf",1); S("humanname conf") = N("humanname conf",1); S("company conf") = N("company conf",1); S("field conf") = N("field conf",1); S("job conf") = N("job conf",1); S("school conf") = N("school conf",1); # This will enable reasoning about how to glom lists. # merge() single(); @RULES # Chicken of the Sea. # District of Columbia. _Caps [unsealed] <- _Caps _xWHITE [s star] _posPREP [s] _xWHITE [s star] the [s opt] # ONLY CARE ABOUT 'the', for now. # 12/22/99 AM. _xWHITE [s star] _xCAP [s] @@
@CODE L("hello") = 0; @@CODE @NODES _ROOT @POST if (!G("title")) G("title") = N("$treetext",2); N("header",2) = 1; # Flag header zone. @RULES _xNIL <- _title _xWILD [plus gp=_TEXTZONE] _Etitle @@ _xNIL <- _ha _xWILD [plus gp=_TEXTZONE] _Eha @@ _xNIL <- _hb _xWILD [plus gp=_TEXTZONE] _Ehb @@ _xNIL <- _hc _xWILD [plus gp=_TEXTZONE] _Ehc @@ _xNIL <- _hd _xWILD [plus gp=_TEXTZONE] _Ehd @@ _xNIL <- _he _xWILD [plus gp=_TEXTZONE] _Ehe @@ _xNIL <- _hf _xWILD [plus gp=_TEXTZONE] _Ehf @@ _xNIL <- _hx _xWILD [plus gp=_TEXTZONE] _Ehx @@
@DECL DispKB() { DisplayKB(G("person"),1); DisplayKB(G("sentences"),1); } AddPerson(L("sent con"),L("name"),L("gender"),L("pos")) { L("sent") = makeconcept(L("sent con"),L("name")); addstrval(L("sent"),"pos",L("pos")); addstrval(L("sent"),"gender",L("gender")); if (L("pos") == "noun") { L("person") = AddUniqueCon(G("person"),L("name")); addconval(L("sent"),"person",L("person")); AddUniqueStr(L("person"),"gender",L("gender")); } return L("sent"); } ResolvePronoun(L("sent con"),L("pro con")) { if (L("pro con")) { "anaphora.txt" << conceptpath(L("sent con")) << "\n"; L("sent") = prev(L("sent con")); L("pro gender") = strval(L("pro con"),"gender"); while (L("sent")) { if (down(L("sent"))) { "anaphora.txt" << conceptpath(L("sent")) << "\n"; L("child") = down(L("sent")); while (L("child")) { if (strval(L("child"),"pos") == "noun" && strval(L("child"),"gender") == L("pro gender")) { addconval(L("pro con"),"person",L("child")); "anaphora.txt" << conceptname(L("pro con")) << " == " << conceptname(L("child")) << "\n"; return 1; } L("child") = next(L("child")); } } L("sent") = prev(L("sent")); } } return 0; } LookupPerson(L("con")) { if (L("con")) { if (strval(L("con"),"pos") == "pro") { L("con") = conval(L("con"),"person"); } } return conval(L("con"),"person"); } AddRelationship(L("con 1"),L("con 2"),L("type")) { L("person 1") = LookupPerson(L("con 1")); L("person 2") = LookupPerson(L("con 2")); addconval(L("person 1"),"spouse",L("person 2")); addconval(L("person 2"),"spouse",L("person 1")); } AddAttribute(L("person con"),L("attr node")) { L("person") = LookupPerson(L("person con")); AddUniqueStr(L("person"),"age",pnvar(L("attr node"),"age")); AddUniqueStr(L("person"),"is",pnvar(L("attr node"),"is")); } @@DECL
@MULTI _ROOT _section _sentence _subsection @POST excise(1,1); noop(); @RULES _xNIL <- _xWHITE [s] ### (1) @@
@CODE L("suffix") = down(G("suffix")); while (L("suffix")) { L("suf") = down(L("suffix")); while (L("suf")) { "street-suffix.dict" << conceptname(L("suf")) << " usps=" << strval(L("suffix"),"standard") << "\n"; L("suf") = next(L("suf")); } L("suffix") = next(L("suffix")); } SaveKB("street-suffix.kbb",G("suffix"),2); @@CODE
@DECL ItemValue(L("value")) { "value.txt" << L("value") << "\n"; if (L("value") == ",") { return 0; } return L("value"); } NthHeader(L("num")) { L("header") = down(G("headers")); while (L("header")) { if (L("c")++ == L("num")) { return conceptname(L("header")); } L("header") = next(L("header")); } return 0; } @@DECL
@PATH _ROOT _paragraph _sentence @POST S("value") = num(N("value",1)) * num(N("numeric",2)); single(); @RULES _money <- _money ### (1) _number ### (2) _currency [opt] ### (3) @@ @RULES _asset <- in [s] ### (1) cash ### (2) @@ @RULES _asset <- in [s] ### (1) assumed ### (2) debt ### (3) @@
# Fetch concept from value. L("return_con") = getconval(L("val"));
# Add an attribute with no value to given concept. L("return_attr") = addattr(L("con"), L("attr_s"));
# CHANGED _SchoolName => _SchoolType # @NODES _LINE @POST S("len") = 2; single(); @RULES _SchoolType [layer=(_Caps)] <- _xWILD [one s match=(High Graduate Postgraduate Music Law Regional)] _xWHITE [s star] School [s t] @@ _SchoolType [layer=(_Caps)] <- _xWILD [one s match=(Christian State Tech Hebrew)] _xWHITE [s star] _SchoolRoot [s] @@ _SchoolType [layer=(_Caps)] <- _xWILD [one s match=(State Bible Community Western Union Southern Baptist Methodist _PostalState Polytechnical Christian Memorial Central Mountain New Technical)] _xWHITE [s star] _SchoolRoot [s] @@ _SchoolType [layer=(_Caps)] <- _xWILD [one s match=(Art)] _xWHITE [s star] Academy [s t] @@ _SchoolType [layer=(_Caps)] <- _xWILD [one s match=(Bible Technical _PostalState Graduate Polytechnic Art Military)] _xWHITE [s star] Institute [s t] @@ _SchoolType [layer=(_Caps)] <- _xWILD [one s match=(Theological Lutheran)] _xWHITE [s star] Seminary [s t] @@ _SchoolType [layer=(_Caps)] <- _xWILD [s one matches=( State Polytechnic Graduate Technical Community Junior Bible Theological )] _xWHITE [s star] _SchoolRoot [s] @@
@NODES _LINE @PRE <1,1> cap(); @RULES # Ex: Stewart _surName [layer=(_humanNamepart )] <- _xWILD [min=1 max=1 s match=("Stewart" "Johnson" "Williams" "Jones" "Brown" "Davis" "Miller" "Wilson" "Moore" "Taylor" "Anderson" "Thomas" "Jackson" "White" "Harris" "Martin" "Thompson" "Garcia" "Martinez" "Robinson" "Clark" "Rodriguez" "Lewis" "Lee" "Walker" "Hall" "Allen" "Young" "Hernandez" "King" "Wright" "Lopez" "Hill" "Scott" "Green" "Adams" "Baker" "Gonzalez" "Nelson" "Carter" "Mitchell" "Perez" "Roberts" "Turner" "Phillips" "Campbell" "Parker" "Evans" "Edwards" "Collins" "Smith" "Sanchez" "Morris" "Rogers" "Reed" "Cook" "Morgan" "Bell" "Murphy" "Bailey" "Rivera" "Cooper" "Richardson" "Cox" "Howard" "Ward" "Torres" "Peterson" "Gray" "Ramirez" "James" "Watson" "Brooks" "Kelly" "Sanders" "Price" "Bennett" "Wood" "Barnes" "Ross" "Henderson" "Coleman" "Jenkins" "Perry" "Powell" "Long" "Patterson" "Hughes" "Flores" "Washington" "Butler" "Simmons" "Foster" "Gonzales" "Bryant" "Alexander" "Russell" "Griffin" "Diaz" "Hayes")] @@
@NODES _ROOT @POST G("curly") = G("curly") + 1; N("curly level") = G("curly"); @RULES _xNIL <- \{ ### (1) @@ @POST N("curly level") = G("curly"); G("curly") = G("curly") - 1; @RULES _xNIL <- \} ### (1) @@
@CODE G("languages") = getconcept(findroot(),"languages"); @@CODE
# Since RFB rules are hashed, don't need sentinel. #@POST # noop() #@RULES #_xNIL <- _xWILD [fail=(\\)] @@ @POST rfaname(2) single() @RULES # For efficiency, some rules are triggered on backslash, while # others will be triggered on elt following backslash. The latter # will always match after the former. _cLF [base layer=(_LIT )] <- \\ n [ren=\n] @@ _cCR [base layer=(_LIT )] <- \\ r [ren=\r] @@ _cHT [base layer=(_LIT )] <- \\ t [ren=\t] @@ _cLANGLE [base layer=(_LIT )] <- \\ \< @@ _cPOUND [base layer=(_LIT )] <- \\ \# @@ _cDQUOTE [base layer=(_LIT )] <- \\ \" @@ _cATSIGN [base layer=(_LIT )] <- \\ \@ @@ _cLPAR [base layer=(_LIT )] <- \\ \( @@ _cRPAR [base layer=(_LIT )] <- \\ \) @@ _cCOMMA [base layer=(_LIT )] <- \\ \, @@ _cSEMICOLON [base layer=(_LIT )] <- \\ \; @@ _cEQUAL [base layer=(_LIT )] <- \\ \= @@ _cLBRACKET [base layer=(_LIT )] <- \\ \[ @@ _cRBRACKET [base layer=(_LIT )] <- \\ \] @@ _cUNDERSCORE [base layer=(_LIT )] <- \\ \_ @@ _cDASH [base layer=(_LIT )] <- \\ \- @@ _cSPACE [base layer=(_LIT )] <- \\ \ @@ _cRANGLE [base layer=(_LIT )] <- \\ \> @@ _cBEL [base layer=(_LIT )] <- \\ a [ren=\a] @@ _cBS [base layer=(_LIT )] <- \\ b [ren=\b] @@ _cFF [base layer=(_LIT )] <- \\ f [ren=\f] @@ _cVT [base layer=(_LIT )] <- \\ v [ren=\v] @@ _cSQUOTE [base layer=(_LIT )] <- \\ \' @@ _cQMARK [base layer=(_LIT )] <- \\ \? @@ _cBANG [base layer=(_LIT )] <- \\ \! @@ _cDOLLAR [base layer=(_LIT )] <- \\ \$ @@ _cPERCENT [base layer=(_LIT )] <- \\ \% @@ _cAMPERSAND [base layer=(_LIT )] <- \\ \& @@ _cASTERISK [base layer=(_LIT )] <- \\ \* @@ _cPLUS [base layer=(_LIT )] <- \\ \+ @@ _cPERIOD [base layer=(_LIT )] <- \\ \. @@ _cSLASH [base layer=(_LIT )] <- \\ \/ @@ _cCOLON [base layer=(_LIT )] <- \\ \: @@ _cCARET [base layer=(_LIT )] <- \\ \^ @@ _cBACKQUOTE [base layer=(_LIT )] <- \\ \` @@ _cLBRACE [base layer=(_LIT )] <- \\ \{ @@ _cRBRACE [base layer=(_LIT )] <- \\ \} @@ _cVBAR [base layer=(_LIT )] <- \\ \| @@ _cTILDE [base layer=(_LIT )] <- \\ \~ @@ # Problem is that single backslash and double backslash get reduced to the # same thing -- a single backslash. # # But things like "abc\ndef" need to get retokenized later, to capture \n. # But if it's "abc\\ndef", that shouldn't get retokenized! _cBSLASH [base layer=(_LIT )] <- \\ \\ @@ @POST excise(1,1) @RULES _xNIL <- \r \n @@
# Convert any unassigned phone numbers to address parts. @NODES _LINE @RULES _addressPart <- _phoneNumber @@ _addressPart <- _cityStateZip @@ _addressPart <- _addressLine @@ @POST S("city") = N("city"); S("state") = N("state"); single(); @RULES _addressPart <- _cityState @@
@NODES _LINE @PRE <1,1> cap(); @RULES # Ex: Afghanistan _countryPhrase [layer=(_Caps )] <- Afghanistan [s] @@ # Ex: Albania _countryPhrase [layer=(_Caps )] <- Albania [s] @@ # Ex: Algeria _countryPhrase [layer=(_Caps )] <- Algeria [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: American\_Samoa _countryPhrase [layer=(_Caps )] <- American [s] _xWHITE [star s] Samoa [s] @@ @PRE <1,1> cap(); @RULES # Ex: Andorra _countryPhrase [layer=(_Caps )] <- Andorra [s] @@ # Ex: Angola _countryPhrase [layer=(_Caps )] <- Angola [s] @@ # Ex: Anguilla _countryPhrase [layer=(_Caps )] <- Anguilla [s] @@ # Ex: Antarctica _countryPhrase [layer=(_Caps )] <- Antarctica [s] @@ @PRE <1,1> cap(); <5,5> cap(); @RULES # Ex: Antigua\_and\_Barbuda _countryPhrase [layer=(_Caps )] <- Antigua [s] _xWHITE [star s] and [s] _xWHITE [star s] Barbuda [s] @@ @PRE <1,1> cap(); @RULES # Ex: Argentina _countryPhrase [layer=(_Caps )] <- Argentina [s] @@ # Ex: Armenia _countryPhrase [layer=(_Caps )] <- Armenia [s] @@ # Ex: Aruba _countryPhrase [layer=(_Caps )] <- Aruba [s] @@ @PRE <1,1> cap(); <5,5> cap(); <7,7> cap(); @RULES # Ex: Ashmore\_and\_Cartier\_Islands _countryPhrase [layer=(_Caps )] <- Ashmore [s] _xWHITE [star s] and [s] _xWHITE [star s] Cartier [s] _xWHITE [star s] Islands [s] @@ @PRE <1,1> cap(); @RULES # Ex: Australia _countryPhrase [layer=(_Caps )] <- Australia [s] @@ # Ex: Austria _countryPhrase [layer=(_Caps )] <- Austria [s] @@ # Ex: Azerbaijan _countryPhrase [layer=(_Caps )] <- Azerbaijan [s] @@ # Ex: Bahamas _countryPhrase [layer=(_Caps )] <- Bahamas [s] @@ # Ex: Bahrain _countryPhrase [layer=(_Caps )] <- Bahrain [s] @@ # Ex: Bangladesh _countryPhrase [layer=(_Caps )] <- Bangladesh [s] @@ # Ex: Barbados _countryPhrase [layer=(_Caps )] <- Barbados [s] @@ # Ex: Belarus _countryPhrase [layer=(_Caps )] <- Belarus [s] @@ # Ex: Belgium _countryPhrase [layer=(_Caps )] <- Belgium [s] @@ # Ex: Belize _countryPhrase [layer=(_Caps )] <- Belize [s] @@ # Ex: Benin _countryPhrase [layer=(_Caps )] <- Benin [s] @@ # Ex: Bermuda _countryPhrase [layer=(_Caps )] <- Bermuda [s] @@ # Ex: Bhutan _countryPhrase [layer=(_Caps )] <- Bhutan [s] @@ # Ex: Bolivia _countryPhrase [layer=(_Caps )] <- Bolivia [s] @@ # Ex: Bosnia _countryPhrase [layer=(_Caps )] <- Bosnia [s] @@ # Ex: Botswana _countryPhrase [layer=(_Caps )] <- Botswana [s] @@ # Ex: Brazil _countryPhrase [layer=(_Caps )] <- Brazil [s] @@ # Ex: Brunei _countryPhrase [layer=(_Caps )] <- Brunei [s] @@ # Ex: Bulgaria _countryPhrase [layer=(_Caps )] <- Bulgaria [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Burkina\_Faso _countryPhrase [layer=(_Caps )] <- Burkina [s] _xWHITE [star s] Faso [s] @@ @PRE <1,1> cap(); @RULES # Ex: Burundi _countryPhrase [layer=(_Caps )] <- Burundi [s] @@ # Ex: Cambodia _countryPhrase [layer=(_Caps )] <- Cambodia [s] @@ # Ex: Cameroon _countryPhrase [layer=(_Caps )] <- Cameroon [s] @@ # Ex: Canada _countryPhrase [layer=(_Caps )] <- Canada [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Cape\_Verde _countryPhrase [layer=(_Caps )] <- Cape [s] _xWHITE [star s] Verde [s] @@ # Ex: Cayman\_Islands _countryPhrase [layer=(_Caps )] <- Cayman [s] _xWHITE [star s] Islands [s] @@ @PRE <1,1> cap(); <3,3> cap(); <5,5> cap(); @RULES # Ex: Central\_African\_Republic _countryPhrase [layer=(_Caps )] <- Central [s] _xWHITE [star s] African [s] _xWHITE [star s] Republic [s] @@ @PRE <1,1> cap(); @RULES # Ex: Chad _countryPhrase [layer=(_Caps )] <- Chad [s] @@ # Ex: Chile _countryPhrase [layer=(_Caps )] <- Chile [s] @@ # Ex: China _countryPhrase [layer=(_Caps )] <- China [s] @@ # Ex: Colombia _countryPhrase [layer=(_Caps )] <- Colombia [s] @@ # Ex: Comoros _countryPhrase [layer=(_Caps )] <- Comoros [s] @@ # Ex: Congo _countryPhrase [layer=(_Caps )] <- Congo [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Costa\_Rica _countryPhrase [layer=(_Caps )] <- Costa [s] _xWHITE [star s] Rica [s] @@ @PRE <1,1> cap(); <5,5> cap(); @RULES # Ex: Cote\_d'Ivoire _countryPhrase [layer=(_Caps )] <- Cote [s] _xWHITE [star s] d [s] \' [s] Ivoire [s] @@ @PRE <1,1> cap(); @RULES # Ex: Croatia _countryPhrase [layer=(_Caps )] <- Croatia [s] @@ # Ex: Cuba _countryPhrase [layer=(_Caps )] <- Cuba [s] @@ # Ex: Cyprus _countryPhrase [layer=(_Caps )] <- Cyprus [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Czech\_Republic _countryPhrase [layer=(_Caps )] <- Czech [s] _xWHITE [star s] Republic [s] @@ @PRE <1,1> cap(); @RULES # Ex: Czechoslovakia _countryPhrase [layer=(_Caps )] <- Czechoslovakia [s] @@ # Ex: Denmark _countryPhrase [layer=(_Caps )] <- Denmark [s] @@ # Ex: Deutschland _countryPhrase [layer=(_Caps )] <- Deutschland [s] @@ # Ex: Dominica _countryPhrase [layer=(_Caps )] <- Dominica [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Dominican\_Republic _countryPhrase [layer=(_Caps )] <- Dominican [s] _xWHITE [star s] Republic [s] @@ # Ex: East\_Timor _countryPhrase [layer=(_Caps )] <- East [s] _xWHITE [star s] Timor [s] @@ @PRE <1,1> cap(); @RULES # Ex: Ecuador _countryPhrase [layer=(_Caps )] <- Ecuador [s] @@ # Ex: Egypt _countryPhrase [layer=(_Caps )] <- Egypt [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: El\_Salvador _countryPhrase [layer=(_Caps )] <- El [s] _xWHITE [star s] Salvador [s] @@ @PRE <1,1> cap(); @RULES # Ex: England _countryPhrase [layer=(_Caps )] <- England [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Equatorial\_Guinea _countryPhrase [layer=(_Caps )] <- Equatorial [s] _xWHITE [star s] Guinea [s] @@ @PRE <1,1> cap(); @RULES # Ex: Eritrea _countryPhrase [layer=(_Caps )] <- Eritrea [s] @@ # Ex: Estonia _countryPhrase [layer=(_Caps )] <- Estonia [s] @@ # Ex: Ethiopia _countryPhrase [layer=(_Caps )] <- Ethiopia [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: European\_Union _countryPhrase [layer=(_Caps )] <- European [s] _xWHITE [star s] Union [s] @@ # Ex: Faroe\_Islands _countryPhrase [layer=(_Caps )] <- Faroe [s] _xWHITE [star s] Islands [s] @@ @PRE <1,1> cap(); @RULES # Ex: Fiji _countryPhrase [layer=(_Caps )] <- Fiji [s] @@ # Ex: Finland _countryPhrase [layer=(_Caps )] <- Finland [s] @@ # Ex: France _countryPhrase [layer=(_Caps )] <- France [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: French\_Polynesia _countryPhrase [layer=(_Caps )] <- French [s] _xWHITE [star s] Polynesia [s] @@ @PRE <1,1> cap(); @RULES # Ex: Gabon _countryPhrase [layer=(_Caps )] <- Gabon [s] @@ # Ex: Gambia _countryPhrase [layer=(_Caps )] <- Gambia [s] @@ # Ex: Georgia _countryPhrase [layer=(_Caps )] <- Georgia [s] @@ # Ex: Germany _countryPhrase [layer=(_Caps )] <- Germany [s] @@ # Ex: Ghana _countryPhrase [layer=(_Caps )] <- Ghana [s] @@ # Ex: Greece _countryPhrase [layer=(_Caps )] <- Greece [s] @@ # Ex: Greenland _countryPhrase [layer=(_Caps )] <- Greenland [s] @@ # Ex: Grenadines _countryPhrase [layer=(_Caps )] <- Grenadines [s] @@ # Ex: Guinea _countryPhrase [layer=(_Caps )] <- Guinea [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Guinea-Bissau _countryPhrase [layer=(_Caps )] <- Guinea [s] \- [s] Bissau [s] @@ @PRE <1,1> cap(); @RULES # Ex: Guyana _countryPhrase [layer=(_Caps )] <- Guyana [s] @@ # Ex: Haiti _countryPhrase [layer=(_Caps )] <- Haiti [s] @@ # Ex: Herzegovina _countryPhrase [layer=(_Caps )] <- Herzegovina [s] @@ # Ex: Honduras _countryPhrase [layer=(_Caps )] <- Honduras [s] @@ # Ex: Hungary _countryPhrase [layer=(_Caps )] <- Hungary [s] @@ # Ex: Iceland _countryPhrase [layer=(_Caps )] <- Iceland [s] @@ # Ex: India _countryPhrase [layer=(_Caps )] <- India [s] @@ # Ex: Indonesia _countryPhrase [layer=(_Caps )] <- Indonesia [s] @@ # Ex: Iran _countryPhrase [layer=(_Caps )] <- Iran [s] @@ # Ex: Iraq _countryPhrase [layer=(_Caps )] <- Iraq [s] @@ # Ex: Ireland _countryPhrase [layer=(_Caps )] <- Ireland [s] @@ # Ex: Israel _countryPhrase [layer=(_Caps )] <- Israel [s] @@ # Ex: Italy _countryPhrase [layer=(_Caps )] <- Italy [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Ivory\_Coast _countryPhrase [layer=(_Caps )] <- Ivory [s] _xWHITE [star s] Coast [s] @@ @PRE <1,1> cap(); @RULES # Ex: Jamaica _countryPhrase [layer=(_Caps )] <- Jamaica [s] @@ # Ex: Japan _countryPhrase [layer=(_Caps )] <- Japan [s] @@ # Ex: Jordan _countryPhrase [layer=(_Caps )] <- Jordan [s] @@ # Ex: Kazakhstan _countryPhrase [layer=(_Caps )] <- Kazakhstan [s] @@ # Ex: Kenya _countryPhrase [layer=(_Caps )] <- Kenya [s] @@ # Ex: Kiribati _countryPhrase [layer=(_Caps )] <- Kiribati [s] @@ # Ex: Korea _countryPhrase [layer=(_Caps )] <- Korea [s] @@ # Ex: Kuwait _countryPhrase [layer=(_Caps )] <- Kuwait [s] @@ # Ex: Kyrgyzstan _countryPhrase [layer=(_Caps )] <- Kyrgyzstan [s] @@ # Ex: Laos _countryPhrase [layer=(_Caps )] <- Laos [s] @@ # Ex: Latvia _countryPhrase [layer=(_Caps )] <- Latvia [s] @@ # Ex: Lesotho _countryPhrase [layer=(_Caps )] <- Lesotho [s] @@ # Ex: Liberia _countryPhrase [layer=(_Caps )] <- Liberia [s] @@ # Ex: Libya _countryPhrase [layer=(_Caps )] <- Libya [s] @@ # Ex: Liechtenstein _countryPhrase [layer=(_Caps )] <- Liechtenstein [s] @@ # Ex: Lithuania _countryPhrase [layer=(_Caps )] <- Lithuania [s] @@ # Ex: Madagascar _countryPhrase [layer=(_Caps )] <- Madagascar [s] @@ # Ex: Malawi _countryPhrase [layer=(_Caps )] <- Malawi [s] @@ # Ex: Malaysia _countryPhrase [layer=(_Caps )] <- Malaysia [s] @@ # Ex: Maldives _countryPhrase [layer=(_Caps )] <- Maldives [s] @@ # Ex: Mali _countryPhrase [layer=(_Caps )] <- Mali [s] @@ # Ex: Malta _countryPhrase [layer=(_Caps )] <- Malta [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Marshall\_Islands _countryPhrase [layer=(_Caps )] <- Marshall [s] _xWHITE [star s] Islands [s] @@ @PRE <1,1> cap(); @RULES # Ex: Martinique _countryPhrase [layer=(_Caps )] <- Martinique [s] @@ # Ex: Mauritania _countryPhrase [layer=(_Caps )] <- Mauritania [s] @@ # Ex: Mauritius _countryPhrase [layer=(_Caps )] <- Mauritius [s] @@ # Ex: Micronesia _countryPhrase [layer=(_Caps )] <- Micronesia [s] @@ # Ex: Moldova _countryPhrase [layer=(_Caps )] <- Moldova [s] @@ # Ex: Mongolia _countryPhrase [layer=(_Caps )] <- Mongolia [s] @@ # Ex: Montenegro _countryPhrase [layer=(_Caps )] <- Montenegro [s] @@ # Ex: Montserrat _countryPhrase [layer=(_Caps )] <- Montserrat [s] @@ # Ex: Morocco _countryPhrase [layer=(_Caps )] <- Morocco [s] @@ # Ex: Mozambique _countryPhrase [layer=(_Caps )] <- Mozambique [s] @@ # Ex: Myanmar _countryPhrase [layer=(_Caps )] <- Myanmar [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Nagorno-Karabakh _countryPhrase [layer=(_Caps )] <- Nagorno [s] \- [s] Karabakh [s] @@ @PRE <1,1> cap(); @RULES # Ex: Namibia _countryPhrase [layer=(_Caps )] <- Namibia [s] @@ # Ex: NATO _countryPhrase [layer=(_Caps )] <- NATO [s] @@ # Ex: Nauru _countryPhrase [layer=(_Caps )] <- Nauru [s] @@ # Ex: Nepal _countryPhrase [layer=(_Caps )] <- Nepal [s] @@ # Ex: Netherlands _countryPhrase [layer=(_Caps )] <- Netherlands [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Netherlands\_Antilles _countryPhrase [layer=(_Caps )] <- Netherlands [s] _xWHITE [star s] Antilles [s] @@ # Ex: New\_Caledonia _countryPhrase [layer=(_Caps )] <- New [s] _xWHITE [star s] Caledonia [s] @@ # Ex: New\_Zealand _countryPhrase [layer=(_Caps )] <- New [s] _xWHITE [star s] Zealand [s] @@ @PRE <1,1> cap(); @RULES # Ex: Nicaragua _countryPhrase [layer=(_Caps )] <- Nicaragua [s] @@ # Ex: Niger _countryPhrase [layer=(_Caps )] <- Niger [s] @@ # Ex: Nigeria _countryPhrase [layer=(_Caps )] <- Nigeria [s] @@ # Ex: Niue _countryPhrase [layer=(_Caps )] <- Niue [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: North\_Korea _countryPhrase [layer=(_Caps )] <- North [s] _xWHITE [star s] Korea [s] @@ # Ex: Northern\_Ireland _countryPhrase [layer=(_Caps )] <- Northern [s] _xWHITE [star s] Ireland [s] @@ @PRE <1,1> cap(); <3,3> cap(); <5,5> cap(); @RULES # Ex: Northern\_Mariana\_Islands _countryPhrase [layer=(_Caps )] <- Northern [s] _xWHITE [star s] Mariana [s] _xWHITE [star s] Islands [s] @@ @PRE <1,1> cap(); @RULES # Ex: Norway _countryPhrase [layer=(_Caps )] <- Norway [s] @@ # Ex: Oman _countryPhrase [layer=(_Caps )] <- Oman [s] @@ # Ex: Pakistan _countryPhrase [layer=(_Caps )] <- Pakistan [s] @@ # Ex: Palau _countryPhrase [layer=(_Caps )] <- Palau [s] @@ @PRE <1,1> cap(); <3,3> cap(); <5,5> cap(); @RULES # Ex: Papua\_New\_Guinea _countryPhrase [layer=(_Caps )] <- Papua [s] _xWHITE [star s] New [s] _xWHITE [star s] Guinea [s] @@ @PRE <1,1> cap(); @RULES # Ex: Paraguay _countryPhrase [layer=(_Caps )] <- Paraguay [s] @@ # Ex: Philippines _countryPhrase [layer=(_Caps )] <- Philippines [s] @@ # Ex: Poland _countryPhrase [layer=(_Caps )] <- Poland [s] @@ # Ex: Portugal _countryPhrase [layer=(_Caps )] <- Portugal [s] @@ # Ex: PRC _countryPhrase [layer=(_Caps )] <- PRC [s] @@ @PRE <1,1> cap(); <5,5> cap(); <9,9> cap(); @RULES # Ex: People's\_Republic\_of\_China _countryPhrase [layer=(_Caps )] <- People [s] \' [s] s [s] _xWHITE [star s] Republic [s] _xWHITE [star s] of [s] _xWHITE [star s] China [s] @@ @PRE <1,1> cap(); <3,3> cap(); <5,5> cap(); @RULES # Ex: P.R.C. _countryPhrase [layer=(_Caps )] <- P [s] \. [s] R [s] \. [s] C [s] \. [s] @@ @PRE <1,1> cap(); @RULES # Ex: Principe _countryPhrase [layer=(_Caps )] <- Principe [s] @@ # Ex: Qatar _countryPhrase [layer=(_Caps )] <- Qatar [s] @@ # Ex: ROC _countryPhrase [layer=(_Caps )] <- ROC [s] @@ # Ex: Romania _countryPhrase [layer=(_Caps )] <- Romania [s] @@ # Ex: Rumania _countryPhrase [layer=(_Caps )] <- Rumania [s] @@ # Ex: Russia _countryPhrase [layer=(_Caps )] <- Russia [s] @@ # Ex: Rwanda _countryPhrase [layer=(_Caps )] <- Rwanda [s] @@ # Ex: Sahara _countryPhrase [layer=(_Caps )] <- Sahara [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Saint\_Kitts _countryPhrase [layer=(_Caps )] <- Saint [s] _xWHITE [star s] Kitts [s] @@ # Ex: Saint\_Lucia _countryPhrase [layer=(_Caps )] <- Saint [s] _xWHITE [star s] Lucia [s] @@ # Ex: Saint\_Vincent _countryPhrase [layer=(_Caps )] <- Saint [s] _xWHITE [star s] Vincent [s] @@ @PRE <1,1> cap(); @RULES # Ex: Samoa _countryPhrase [layer=(_Caps )] <- Samoa [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Sao\_Tome _countryPhrase [layer=(_Caps )] <- Sao [s] _xWHITE [star s] Tome [s] @@ # Ex: Saudi\_Arabia _countryPhrase [layer=(_Caps )] <- Saudi [s] _xWHITE [star s] Arabia [s] @@ @PRE <1,1> cap(); @RULES # Ex: Scotland _countryPhrase [layer=(_Caps )] <- Scotland [s] @@ # Ex: Senegal _countryPhrase [layer=(_Caps )] <- Senegal [s] @@ # Ex: Serbia _countryPhrase [layer=(_Caps )] <- Serbia [s] @@ # Ex: Seychelles _countryPhrase [layer=(_Caps )] <- Seychelles [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Sierra\_Leone _countryPhrase [layer=(_Caps )] <- Sierra [s] _xWHITE [star s] Leone [s] @@ @PRE <1,1> cap(); @RULES # Ex: Slovakia _countryPhrase [layer=(_Caps )] <- Slovakia [s] @@ # Ex: Slovenia _countryPhrase [layer=(_Caps )] <- Slovenia [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Solomon\_Islands _countryPhrase [layer=(_Caps )] <- Solomon [s] _xWHITE [star s] Islands [s] @@ @PRE <1,1> cap(); @RULES # Ex: Somalia _countryPhrase [layer=(_Caps )] <- Somalia [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: South\_Africa _countryPhrase [layer=(_Caps )] <- South [s] _xWHITE [star s] Africa [s] @@ # Ex: South\_Korea _countryPhrase [layer=(_Caps )] <- South [s] _xWHITE [star s] Korea [s] @@ @PRE <1,1> cap(); @RULES # Ex: Spain _countryPhrase [layer=(_Caps )] <- Spain [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Sri\_Lanka _countryPhrase [layer=(_Caps )] <- Sri [s] _xWHITE [star s] Lanka [s] @@ @PRE <1,1> cap(); @RULES # Ex: Sudan _countryPhrase [layer=(_Caps )] <- Sudan [s] @@ # Ex: Suriname _countryPhrase [layer=(_Caps )] <- Suriname [s] @@ # Ex: Swaziland _countryPhrase [layer=(_Caps )] <- Swaziland [s] @@ # Ex: Sweden _countryPhrase [layer=(_Caps )] <- Sweden [s] @@ # Ex: Switzerland _countryPhrase [layer=(_Caps )] <- Switzerland [s] @@ # Ex: Syria _countryPhrase [layer=(_Caps )] <- Syria [s] @@ # Ex: Taiwan _countryPhrase [layer=(_Caps )] <- Taiwan [s] @@ # Ex: Tajikistan _countryPhrase [layer=(_Caps )] <- Tajikistan [s] @@ # Ex: Tanzania _countryPhrase [layer=(_Caps )] <- Tanzania [s] @@ # Ex: Thailand _countryPhrase [layer=(_Caps )] <- Thailand [s] @@ # Ex: Tibet _countryPhrase [layer=(_Caps )] <- Tibet [s] @@ # Ex: Tobago _countryPhrase [layer=(_Caps )] <- Tobago [s] @@ # Ex: Togo _countryPhrase [layer=(_Caps )] <- Togo [s] @@ # Ex: Tonga _countryPhrase [layer=(_Caps )] <- Tonga [s] @@ # Ex: Trinidad _countryPhrase [layer=(_Caps )] <- Trinidad [s] @@ # Ex: Tunisia _countryPhrase [layer=(_Caps )] <- Tunisia [s] @@ # Ex: Turkey _countryPhrase [layer=(_Caps )] <- Turkey [s] @@ # Ex: Turkmenistan _countryPhrase [layer=(_Caps )] <- Turkmenistan [s] @@ @PRE <1,1> cap(); <5,5> cap(); <7,7> cap(); @RULES # Ex: Turks\_and\_Caicos\_Islands _countryPhrase [layer=(_Caps )] <- Turks [s] _xWHITE [star s] and [s] _xWHITE [star s] Caicos [s] _xWHITE [star s] Islands [s] @@ @PRE <1,1> cap(); @RULES # Ex: Tuvalu _countryPhrase [layer=(_Caps )] <- Tuvalu [s] @@ @PRE <1,1> cap(); <3,3> cap(); <5,5> cap(); <7,7> cap(); @RULES # Ex: U.S.S.R. _countryPhrase [layer=(_Caps )] <- U [s] \. [s] S [s] \. [s] S [s] \. [s] R [s] \. [s] @@ @PRE <1,1> cap(); @RULES # Ex: UAR _countryPhrase [layer=(_Caps )] <- UAR [s] @@ @PRE <1,1> cap(); <3,3> cap(); <5,5> cap(); @RULES # Ex: U.A.R. _countryPhrase [layer=(_Caps )] <- U [s] \. [s] A [s] \. [s] R [s] \. [s] @@ @PRE <1,1> cap(); @RULES # Ex: Uganda _countryPhrase [layer=(_Caps )] <- Uganda [s] @@ # Ex: UK _countryPhrase [layer=(_Caps )] <- UK [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: U.K. _countryPhrase [layer=(_Caps )] <- U [s] \. [s] K [s] \. [s] @@ @PRE <1,1> cap(); @RULES # Ex: Ukraine _countryPhrase [layer=(_Caps )] <- Ukraine [s] @@ @PRE <1,1> cap(); <3,3> cap(); <5,5> cap(); @RULES # Ex: United\_Arab\_Emirates _countryPhrase [layer=(_Caps )] <- United [s] _xWHITE [star s] Arab [s] _xWHITE [star s] Emirates [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: United\_Kingdom _countryPhrase [layer=(_Caps )] <- United [s] _xWHITE [star s] Kingdom [s] @@ # Ex: United\_States _countryPhrase [layer=(_Caps )] <- United [s] _xWHITE [star s] States [s] @@ @PRE <1,1> cap(); @RULES # Ex: Uruguay _countryPhrase [layer=(_Caps )] <- Uruguay [s] @@ # Ex: USA _countryPhrase [layer=(_Caps )] <- USA [s] @@ # Ex: USSR _countryPhrase [layer=(_Caps )] <- USSR [s] @@ @PRE <1,1> cap(); <3,3> cap(); <5,5> cap(); @RULES # Ex: U.S.A. _countryPhrase [layer=(_Caps )] <- U [s] \. [s] S [s] \. [s] A [s] \. [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: U.S. _countryPhrase [layer=(_Caps )] <- U [s] \. [s] S [s] \. [s] @@ @PRE <1,1> cap(); @RULES # Ex: US _countryPhrase [layer=(_Caps )] <- US [s] @@ # Ex: Uzbekistan _countryPhrase [layer=(_Caps )] <- Uzbekistan [s] @@ # Ex: Vanuatu _countryPhrase [layer=(_Caps )] <- Vanuatu [s] @@ # Ex: Venezuela _countryPhrase [layer=(_Caps )] <- Venezuela [s] @@ # Ex: Vietnam _countryPhrase [layer=(_Caps )] <- Vietnam [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Western\_Sahara _countryPhrase [layer=(_Caps )] <- Western [s] _xWHITE [star s] Sahara [s] @@ # Ex: Western\_Samoa _countryPhrase [layer=(_Caps )] <- Western [s] _xWHITE [star s] Samoa [s] @@ @PRE <1,1> cap(); @RULES # Ex: Yemen _countryPhrase [layer=(_Caps )] <- Yemen [s] @@ # Ex: Yugoslavia _countryPhrase [layer=(_Caps )] <- Yugoslavia [s] @@ # Ex: Zaire _countryPhrase [layer=(_Caps )] <- Zaire [s] @@ # Ex: Zambia _countryPhrase [layer=(_Caps )] <- Zambia [s] @@ # Ex: Zimbabwe _countryPhrase [layer=(_Caps )] <- Zimbabwe [s] @@
@CODE L("hello") = 0; @@CODE @NODES _sent @CHECK if (N("pattern",1) == "alpha" && N("pattern",3) == "alpha") fail(); if (N("pattern",1) != "alpha" && N("pattern",3) != "alpha") fail(); @POST if (N("pattern",3) == "alpha") { # Assume no evidence for local glom around conj. if (N("verb",3)) { L("tmp") = N(3); pnrename(N(3),"_verb"); group(3,3,"_vg"); mhbv(N(3),L("neg"),0,0,0,0,L("tmp")); pncopyvars(L("tmp"),N(3)); # N("voice",3) = ? pnreplaceval(N(3),"pattern",0); group(3,3,"_clause"); setunsealed(3,"true"); # 07/10/12 AM. pncopyvars(L("tmp"),N(3)); N("pattern",3) = "v"; } } else # pattern 1 == alpha { } @RULES _xNIL <- _clause _conj _clause @@ # that clause # Assigning role to "that" based on following clause. #@CHECK # if (N("mypos",1)) # Fix. # fail(); # if (!N("pattern",2)) # fail(); #@POST # L("ch") = strpiece(N("pattern",2),0,0); # if (L("ch") == "n") # chpos(N(1),"IN"); # else if (L("ch") == "v") # { # chpos(N(1),"WDT"); # that/WDT # N("bracket",1) = 1; # } #@RULES #_xNIL <- # that [s] # _clause [lookahead] # @@ # that # clause that clause # NOTE: New and better handling of that-clause. # Looking for "incomplete" second clause, eg, nv # with a transitive verb. @CHECK if (N("mypos",2)) fail(); if (N("voice",3) == "passive") fail(); # Need a separate handler for passive. L("p1") = N("pattern",1); L("p") = N("pattern",3); if (L("p1") == "nv" || L("p1") == "v") { if (N("last name",1) != "_advl") # 05/27/07 AM. { # Should check for pp etc. S("pos") = "IN"; succeed(); } } # If first clause ends in np. # If second starts with vg or is incomplete-transitive. # Then WDT. Else IN. # "cl.txt" << phrasetext() << "\n"; # "cl.txt" << clauselastnp(N(1)) << "\n"; # "cl.txt" << clausecomplete(N(3)) << "\n"; if (!clauselastnp(N(1))) fail(); if (clauseincomplete(N(3)) # Fits incomplete patterns. && !clausecomplete(N(3))) # Can't fit complete patterns. { S("pos") = "WDT"; succeed(); } # Todo: Need better checking on prepositional/phrasal verbs. fail(); @POST # "cl.txt" << "active" << "\n"; if (S("pos") == "IN") { chpos(N(2),"IN"); # that/IN } else { chpos(N(2),"WDT"); # that/WDT N("bracket",2) = 1; } @RULES _xNIL <- _clause that [s lookahead] _clause @@ # that # clause that clause # To simplify logic, separated passive voice handler. @CHECK if (N("mypos",2)) fail(); if (N("voice",3) != "passive") fail(); L("p1") = N("pattern",1); if (L("p1") == "nv" || L("p1") == "v") { S("pos") = "IN"; succeed(); } S("pos") = "WDT"; L("p") = N("pattern",3); if (!L("p")) fail(); if (strpiece(L("p"),0,0) == "v") succeed(); if (strpiece(L("p"),0,0) == "n") { # Require lone "by" or other prep at end of clause. if (L("p") == "nvp") succeed(); } fail(); @POST if (S("pos") == "IN") { chpos(N(2),"IN"); # that/IN } else { # "cl.txt" << "passive" << "\n"; chpos(N(2),"WDT"); # that/WDT N("bracket",2) = 1; } @RULES _xNIL <- _clause that [s lookahead] _clause @@
@CODE L("hello") = 0; @@CODE @NODES _TEXTZONE # ^ cap @PRE <3,3> cap(); @CHECK if (N("quoted",3)) { # Probably a caps region... if (X("quoted caps") > X("quoted prose")) fail(); } @POST N("newsent",3) = 1; noop(); @RULES _xNIL <- _xSTART _xWILD [star match=(_xPUNCT _qEOS _dbldash)] _xWILD [one match=(_det _pro _prep _conj _fnword)] @@ # Defer decisions on start of sentence. @POST N("newsent",2) = 1; if (N("unknown",2)) fixnphead(2); else noop(); @RULES _xNIL <- _xSTART _xALPHA _prep @@ _xNIL <- _qEOS _xALPHA _prep @@ # Interrogative forms... _xNIL <- _xSTART _verb I [s] @@ # ^ alpha @CHECK if (N("verb",2) && !vconjq(N(2),"inf")) succeed(); # Inflected verb. fail(); @POST N("newsent") = 1; noop(); # Hide from cap glomming. @RULES _xNIL <- _xSTART _xCAP @@ # Some chaff. # Four Americans ... @POST noop(); @RULES _xNIL <- _xWILD [plus match=(_num _quan)] _adj @@ # Don't know what to do with "I" yet. @PRE <1,1> vareq("stem","i"); @POST noop(); @RULES _xNIL <- _pro @@ # ^ cap date @CHECK if (N("sem",3) != "date" && !N("acronym",3)) # 04/23/07 AM. fail(); @POST N("newsent",2) = 1; noop(); # Don't throw proper noun on first guy. @RULES _xNIL <- _xSTART _xCAP _noun @@ # cap prep cap @POST cappos(N(1),0); cappos(N(3),0); @RULES _xNIL <- _xCAP [s] _prep [lookahead] _xCAP [s] @@ # Trying out the cool feature-based matching. # @PRE <1,1> lowercase(); <2,2> var("cap"); @POST # chpos(N(3),"POS"); # Don't really know yet. L("fst") = N(2); L("lst") = lasteltnode(2); setposrange(L("fst"),L("lst"),"NP"); group(2,2,"_caps"); setunsealed(2,"true"); # 07/10/12 AM. group(2,2,"_noun"); setunsealed(2,"true"); # 07/10/12 AM. N("ne",2) = 1; # group(2,3,"_adj"); # clearpos(N(2),1,0); # N("possessive",2) = 1; @RULES _caps [layer=_noun] <- _xWILD [s one match=(_xALPHA _aposS \, _dbldash)] _xWILD [plus fail=(_xPUNCT _xNUM _qEOS)] _aposS @@ # Trying a more abstract method. # @PRE <1,1> var("cap"); @POST L("fst") = N(1); L("lst") = lasteltnode(1); setposcaps(L("fst"),L("lst")); group(1,1,"_caps"); setunsealed(1,"true"); # 07/10/12 AM. group(1,1,"_noun"); setunsealed(1,"true"); # 07/10/12 AM. clearpos(N(1),1,0); N("cap",1) = 1; N("number",1) = number(L("lst")); @RULES _xNIL <- _xWILD [min=2 fail=(_xPUNCT _qEOS _xNUM)] @@ # cap cap apos @POST N("cap",3) = 1; N("noun",3) = 1; L("tmp3") = N(3); group(3,3,"_noun"); setunsealed(3,"true"); # 07/10/12 AM. pncopyvars(L("tmp3"),N(3)); @RULES _xNIL <- _xSTART _xCAP [s] _xCAP _aposS [lookahead] _xWILD [s one fail=(_xCAP)] @@ # city , state @POST L("lst") = lasteltnode(2); setposrange(N(2),L("lst"),"NP"); # 05/03/05 AM. L("tmp4") = N(4); group(2,2,"_city"); group(2,5,"_citystate"); group(2,2,"_noun"); pncopyvars(L("tmp4"),N(2)); chpos(N(2),0); # 05/03/05 AM. clearpos(N(2),1,0); # chpos(N(2),"NP"); # N("cap",2) = 1; N("glom comma",2) = 1; # Can glom comma to the right. N("sem",2) = "geoloc"; # 04/21/07 AM. @RULES _xNIL <- _xWILD [s opt fail=(_xCAP)] _xCAP [s plus except=(_det)] \, _usstate [s] \, [opt] @@ # Uknown word at start of sentence etc. @CHECK if (!N("unknown",2)) fail(); @POST L("tmp2") = N(2); group(2,2,"_noun"); pncopyvars(L("tmp2"),N(2)); N("cap",2) = 1; @RULES _xNIL <- _xWILD [one match=(_qEOS _conj)] _xCAP _xWILD [one lookahead match=( \, _verb _vg )] @@ _xNIL <- _xSTART _xCAP _xWILD [one lookahead match=( \, )] @@ @POST L("tmp2") = N(2); group(2,2,"_noun"); pncopyvars(L("tmp2"),N(2)); N("cap",2) = 1; N("ne",2) = 1; cappos(N(2),0); # Some domain semantics. L("lc") = strtolower(N("$text",2)); if (finddictattr(L("lc"),"persname")) { N("ne type",2) = "person"; N("sem",2) = "name"; N("ne type conf",2) = 50; } @RULES _xNIL <- _xWILD [one match=( \, _conj )] _xCAP _xWILD [one lookahead match=( \, _verb _vg )] @@ # alpha num # cap num @CHECK if (!N("unknown",1)) fail(); @POST chpos(N(1),"NP"); group(1,2,"_noun"); clearpos(N(1),1,0); N("cap",1) = 1; N("ne",1) = 1; @RULES _xNIL <- #10 _xCAP _xNUM @@ # alpha cap alpha @PRE <1,1> lowercase(); <3,3> lowercase(); @CHECK if (N("adj",2) && N("noun",2)) fail(); # 01/04/05 AM. @POST L("tmp2") = N(2); if (N("noun",2) || !N("adj",2)) { group(2,2,"_noun"); pncopyvars(L("tmp2"),N(2)); cappos(N(2),0); } else { group(2,2,"_adj"); pncopyvars(L("tmp2"),N(2)); } N("cap",2) = 1; N("ne",2) = 1; N("stem",2) = N("$text",2); # Some domain semantics. L("lc") = strtolower(N("$text",2)); if (finddictattr(L("lc"),"persname")) { N("ne type",2) = "person"; N("ne type conf",2) = 50; N("sem",2) = "name"; N("ne",2) = 1; } else if (N("unknown",2)) { # A bit of morph. if (strendswith(L("lc"),"tv")) { N("ne type",2) = "organization"; N("ne type conf",2) = 50; N("sem",2) = "organization"; N("ne",2) = 1; } } @RULES _xNIL <- _xALPHA [s] _xCAP _xALPHA [s lookahead] @@ # cap & cap @POST L("tmp1") = lasteltnode(1); setposrange(N(1),L("tmp1"),"NP"); L("tmp3") = lasteltnode(3); setposrange(N(3),L("tmp3"),"NP"); pncopyvars(L("tmp3")); sclearpos(1,0); single(); @RULES _caps [layer=_noun unsealed] <- _xCAP [plus] \& [s] _xCAP [plus] @@ @PRE <1,1> lowercase(); @POST chpos(N(4),"POS"); # 5/24/06 AM. if (N(3)) L("last") = lasteltnode(3); else L("last") = N(2); fixnouns(N(2),L("last")); group(2,3,"_caps"); setunsealed(2,"true"); # 07/10/12 AM. group(2,2,"_noun"); setunsealed(2,"true"); # 07/10/12 AM. # N("posarr",2) = "NP"; # # N("posarr len",2) = 1; # # chpos(N(2),"NP"); # N("ne",2) = 1; group(2,3,"_adj"); clearpos(N(2),1,0); N("possessive",2) = 1; @RULES _caps [layer=_noun] <- _xWILD [s one match=(_xALPHA _xSTART _aposS \, _dbldash)] _xCAP _xWILD [s star match=(_xCAP _letabbr)] _aposS @@ @PRE <1,1> lowercase(); @POST L("n") = N(2); L("posarr") = sameposrange(N(2),L("n")); if (!L("posarr")) "err.txt" << "[More than one POS in caps list.]\n"; pncopyvars(L("n")); L("last") = lasteltnode(3); forceposrange(N(2),L("last"),"NP"); # 04/21/07 AM. group(2,3,"_caps"); setunsealed(2,"true"); # 07/10/12 AM. group(2,2,"_noun"); setunsealed(2,"true"); # 07/10/12 AM. clearpos(N(2),1,0); N("cap",2) = 1; N("sem",2) = "name"; N("ne",2) = 1; @RULES #_caps [layer=_noun] <- _xNIL <- _xWILD [s one match=(_xALPHA _aposS)] _xCAP [s] # 06/21/06 AM. _xWILD [s plus match=(_xCAP _letabbr)] @@ _xNIL <- _xSTART _xCAP _xWILD [s plus match=(_xCAP _letabbr)] @@ @POST L("first") = N(1); L("last") = lasteltnode(2); L("sem") = pnvar(L("last"),"sem"); if (!L("sem")) L("sem") = "name"; setposrange(L("first"),L("last"),"NP"); L("ne text") = phrasetext(); group(1,2,"_caps"); setunsealed(1,"true"); # 07/10/12 AM. group(1,1,"_noun"); setunsealed(1,"true"); # 07/10/12 AM. clearpos(N(1),1,0); if (L("sem")) N("sem",1) = L("sem"); N("stem",1) = phrasetext(); N("ne text",1) = L("ne text"); N("ne type",1) = "person"; N("ne",1) = 1; N("ne type conf",1) = 50; N("cap",1) = 1; @RULES _xNIL <- _xWILD [one match=(_title _letabbr _letlet)] _xWILD [s plus match=(_xCAP _letabbr)] @@ # Two caps in a row, even if reduced. @CHECK if (N("ne type",1)) # Don't glom known entities. fail(); @POST L("first") = N(1); L("last") = lasteltnode(2); L("sem") = pnvar(L("last"),"sem"); if (!L("sem")) L("sem") = "name"; setposrange(L("first"),L("last"),"NP"); L("ne text") = phrasetext(); L("caps arr")[0] = N("$text",1); L("caps arr")[1] = N("$text",2); group(1,2,"_caps"); setunsealed(1,"true"); # 07/10/12 AM. group(1,1,"_noun"); setunsealed(1,"true"); # 07/10/12 AM. clearpos(N(1),1,0); if (L("sem")) N("sem",1) = L("sem"); N("ne text",1) = N("stem",1) = L("ne text"); N("ne",1) = 1; N("cap",1) = 1; if (L("caps arr")) N("caps arr",1) = L("caps arr"); @RULES _xNIL <- _xWILD [s one match=(_xCAP _title _letabbr _letlet _name _month _abbr _usstate) except=(_det _fnword _prep _conj _pro _verb _vg _modal)] _xWILD [s plus match=(_xCAP _letabbr) except=(_det _fnword _prep _conj _pro)] @@ @POST pncopyvars(1); S("mypos") = "NP"; single(); @RULES _caps [layer=_noun unsealed] <- _xCAP _xWILD [s plus match=(_xCAP _letabbr)] @@ @POST chpos(N(3),"POS"); L("tmp2") = N(2); group(2,2,"_caps"); setunsealed(2,"true"); # 07/10/12 AM. group(2,2,"_noun"); setunsealed(2,"true"); # 07/10/12 AM. pncopyvars(L("tmp2"),N(2)); chpos(N(2),"NP"); @RULES _xNIL <- _xWILD [s one fail=(_xCAP)] _xCAP _aposS @@ @PRE <1,1> lowercase(); @POST chpos(N(2),"NP"); chpos(N(3),"POS"); pncopyvars(2); sclearpos(1,0); singler(2,3); @RULES _caps [layer=_noun unsealed] <- _xWILD [s one match=(_xALPHA _xSTART)] _xCAP _aposS @@ # cap apos eos @POST chpos(N(2),"NP"); chpos(N(3),"POS"); pncopyvars(2); sclearpos(1,0); singler(2,3); @RULES _caps [layer=_noun unsealed] <- _xWILD [one match=(_prep _fnword _verb _vg)] _xCAP _aposS _xWILD [one lookahead match=(_qEOS _xEND)] @@ # Flag all caps. @PRE <1,1> varz("mypos"); #<1,1> varz("adj"); # @POST N("caps50 lonecap") = 1; # Todo: Fun to assess start of new sentence here. L("newsent") = isnewsentence(N(1)); L("tmp1") = N(1); # 06/27/06 AM. if (!L("newsent")) { # Try restricting this. # if (literal(N(1)) && N("unknown",1)) { group(1,1,"_noun"); # 06/27/06 AM. setunsealed(1,"true"); # 07/10/12 AM. pncopyvars(L("tmp1"),N(1)); # 06/27/06 AM. } N("cap") = 1; N("ne") = 1; # if (pnname(N(1)) == "_noun" || pnname(N(1)) == "_adj") # if (N("noun",1) || N("adj",1)) # cappos(N(1),0); # 05/10/07 AM. # fixnoun(N(1)); } else # First word of text zone is cap. { N("newsent") = 1; if (literal(N(1)) && N("unknown")) # 06/27/06 AM. { group(1,1,"_noun"); # 06/27/06 AM. setunsealed(1,"true"); # 07/10/12 AM. pncopyvars(L("tmp1"),N(1)); # 06/27/06 AM. N("cap") = 1; # 06/27/06 AM. fixnoun(N(1)); # 06/27/06 AM. } else if (pnname(N(1)) == "_noun" && !N("mypos")) { # Only unknown words will be NP, for now... if (N("unknown")) { N("cap") = 1; N("mypos",1) = "NP"; # 05/26/07 AM. fixnoun(N(1)); } # Else could collect tentative noncount/mass nouns here. if (G("verbose")) "mass-noun.txt" << N("$text") << "\n"; } # else if (literal(N(1)) && N("unknown") && !N("mypos")) # { # N("cap") = 1; # } } @RULES _xNIL <- #22. 83% _xWILD [s one match=(_xCAP) # Want all caps within a sentence handled here. # except=(_pro _det _fnword _prep) # 06/02/07 AM. ] @@ # Common words that are capitalized. @POST N("cap") = 1; # Flag as caps. # 06/06/07 AM. @RULES _xNIL <- _xCAP [s] @@ # simple name pattern @POST pncopyvars(2); S("mypos") = "NP"; singler(1,2); @RULES _caps [layer=_noun unsealed] <- _letlet [s] _xCAP _xWILD [one lookahead fail=(_xCAP)] @@ # Lone letter abbreviation. # Could be bad answer key. @POST L("tmp1") = N(1); group(1,1,"_caps"); setunsealed(1,"true"); # 07/10/12 AM. group(1,1,"_noun"); setunsealed(1,"true"); # 07/10/12 AM. pncopyvars(L("tmp1"),N(1)); chpos(N(1),"NP"); @RULES _xNIL <- _letabbr @@ # alpha num noun @POST L("tmp2") = N(2); if (literal(N(2))) { group(2,2,"_caps"); setunsealed(2,"true"); # 07/10/12 AM. group(2,2,"_noun"); setunsealed(2,"true"); # 07/10/12 AM. pncopyvars(L("tmp2"),N(2)); } chpos(N(2),"NP"); group(2,4,"_noun"); setunsealed(2,"true"); # 07/10/12 AM. @RULES _xNIL <- _xWILD [one fail=(_xCAP)] _xCAP [s] _xNUM _listitem [s opt] _xWILD [one lookahead match=(_noun _verb _vg)] @@ # by cap prep cap , @PRE <1,1> vareq("stem","by"); @POST group(2,4,"_np"); setunsealed(2,"true"); # 07/10/12 AM. N("cap",2) = 1; N("ne",2) = 1; @RULES _xNIL <- _prep _xCAP [s] _prep _xCAP [s] _xWILD [one lookahead match=( \, )] @@
# Bind a database table's column to an NLP++ variable @CODE dbopen("test","root","mypassword"); dballocstmt(); dbexecstmt("SELECT * FROM employee;"); dbbindcol(1,"varchar",50,&G("employee name"),&G("result1")); while (dbfetch()) {    "output.txt" << "employee name: ";    if (G("result1"))       "output.txt" << G("employee name") << "\n";    else       "output.txt" << "NULL" << "\n"; } dbfreestmt(); dbclose(); @@CODE
@NODES _LINE @POST L("con") = getconcept(G("caps"),N("$text")); S("caps") = L("con"); AddUniqueStr(L("con"),"type","full"); L("con") = getconcept(G("names"),N("$text")); S("names") = L("con"); single(); @RULES _name <- _xWILD [min=2 match=(_xCAP \& 310) fail=(_det _function)] @@ @PRE <1,1> uppercase(); @POST L("con") = getconcept(G("caps"),N("$text")); S("caps") = L("con"); AddUniqueStr(L("con"),"type","abbrev"); single(); @RULES _name <- _xALPHA @@
# Since @POST is non-empty, you must explicitly specify reduction @POST  ++G("nouns");  single();  # NEEDED, or computer will not reduce to _noun. @RULES _noun <- computer @@ # In this case, @POST is empty, so single() occurs by default. @POST @RULES _noun <- sheep @@
@CODE DisplayKB(G("labels"), 1); @@CODE
@CODE G("file") = G("$inputhead") + ".dict"; @@CODE
@NODES _ROOT ############################################### # Rule 9 # # Abbreviations (capital + period alternating # ############################################### @CHECK G("word") = N("$text",2); G("L1") = strlength(G("word")); if (G("L1") > 2) fail(); @@CHECK @RULES _abbrev <- _abbrev [one] ### (1) _xALPHA [one] ### (2) \. [opt] ### (3) @@ @@RULES @CHECK G("word") = N("$text",1); G("LL") = num(strlength(G("word"))); if (G("LL") > 2) fail(); G("firstword") = N("$text",1) + N("$text",2); G("fwordConcept") = dictfindword(G("firstword")); if (G("fwordConcept") && G("LL")==2 && N("$text",3)) fail(); G("word") = N("$text",4); G("LL") = num(strlength(G("word"))); if (G("LL") > 2) fail(); @@CHECK @RULES _abbrev <- _xALPHA [one] ### (1) \. [one] ### (2) _xWILD [opt match=(_xWHITE "_whiteSpace")] ### (3) _xALPHA [one] ### (4) \. [one] ### (5) @@ @@RULES
# Remove all phrases from given subhierarchy. prunephrases(L("hier"));
@PATH _ROOT _pronunciations _headerZone _LINE @RULES _phonetic <- _xWILD [plus match=(_xALPHA)] ### (1) @@
@CODE DisplayKB(G("eui_to_codes_root"), 2); # L("a") = -1; # L("b") = 0; # if (L("b") != 1) { # "negatives.txt" << L("a") << "\n"; # "negatives.txt" << L("b") << "\n"; # } @@CODE
@PATH _ROOT _doctypedecl @RULES _Comment <- _CommentStart [one] ### (1) _xWILD [min=0 max=0 fail=("_CommentEnd" "_CommentEnd" "_DoubleHyphen")] ### (2) _CommentEnd [one] ### (3) @@ _ExternalID <- _xWILD [min=1 max=1 matches=("PUBLIC")] ### (1) _whiteSpace [one] ### (2) _PubidLiteral [one] ### (3) _whiteSpace [one] ### (4) _PubidLiteral [one] ### (5) @@ _ExternalID <- _xWILD [one matches=("PUBLIC")] ### (1) _whiteSpace [one] ### (2) _PubidLiteral [one] ### (3) _whiteSpace [one] ### (4) _SystemLiteral [one] ### (5) @@ _ExternalID <- _xWILD [one matches=("SYSTEM")] ### (1) _whiteSpace [opt] ### (2) _PubidLiteral [one] ### (3) @@ _ExternalID <- _xWILD [one matches=("SYSTEM")] ### (1) _whiteSpace [opt] ### (2) _SystemLiteral [one] ### (3) @@ @@RULES @POST S("ElementName") = N("ElementName",1) ; single() ; @@POST @RULES _AttlistDecl [unsealed] <- _AttlistDeclStart [one] ### (1) _whiteSpace [opt] ### (2) _xWILD [plus fail=("_EndTag")] ### (3) _whiteSpace [opt] ### (4) _EndTag [one] ### (5) @@ _PCDataStart <- \( [one] ### (1) _whiteSpace [opt] ### (2) \# [one] ### (3) _xALPHA [s one matches=("#PCDATA")] ### (4) @@ _EntityDecl <- _EntityDeclStart [one] ### (1) _whiteSpace [opt] ### (2) _xWILD [plus fail=("_EndTag" "_CommentEnd" "_EndEmptyTag" "EndDocType")] ### (3) _whiteSpace [opt] ### (4) _EndTag [one] ### (5) @@ _EntityDecl <- _EntityDeclStart [one] ### (1) _whiteSpace [opt] ### (2) _xWILD [plus fail=("_EndTag" "_CommentEnd" "_EndEmptyTag" "EndDocType")] ### (3) _whiteSpace [opt] ### (4) _CommentEnd [one] ### (5) @@ _EntityDecl <- _EntityDeclStart [one] ### (1) _whiteSpace [opt] ### (2) _xWILD [plus fail=("_EndTag" "_CommentEnd" "_EndEmptyTag" "EndDocType")] ### (3) _whiteSpace [opt] ### (4) _EndEmptyTag [one] ### (5) @@ _EntityDecl <- _EntityDeclStart [one] ### (1) _whiteSpace [opt] ### (2) _xWILD [plus fail=("_EndTag" "_CommentEnd" "_EndEmptyTag" "EndDocType")] ### (3) _whiteSpace [opt] ### (4) _EndDocType [one] ### (5) @@ @@RULES
@NODES _LINE @POST S("word") = N("$text",2); single(); @RULES _string <- \" ### (1) _xWILD [fail=(\" _xEND)] ### (2) \" ### (3) @@
@NODES _LINE @PRE <1,1> vareq("date","month"); <2,2> var("year"); @RULES _dateRange <- _xALPHA [s] ### (1) _xNUM ### (2) to ### (3) present ### (4) @@