text
stringlengths 22
301k
|
---|
@NODES _ROOT
@POST
"funcwords.dict" << N("word") << " pos=" << N("pos") << "\n";
@RULES
_xNIL <-
_LINE
@@
|
@PATH _ROOT _doctypedecl _AttlistDecl
@RULES
_DefaultDecl <-
\# [one] ### (1)
_xWILD [s one matches=("REQUIRED" "IMPLIED")] ### (2)
@@
_DefaultDecl <-
\# [one] ### (1)
_xWILD [s min=1 max=1 matches=("FIXED")] ### (2)
_whiteSpace [opt] ### (3)
_PEReference [one] ### (4)
@@
_DefaultDecl <-
\# [one] ### (1)
_xWILD [s one matches=("FIXED")] ### (2)
_whiteSpace [opt] ### (3)
_PubidLiteral [one] ### (4)
@@
_DefaultDecl <-
\# [one] ### (1)
_xWILD [s one matches=("FIXED")] ### (2)
_whiteSpace [opt] ### (3)
_SystemLiteral [one] ### (4)
@@
_DefaultDecl <-
_PubidLiteral [one] ### (1)
@@
_DefaultDecl <-
_SystemLiteral [one] ### (1)
@@
_AttType <-
_xWILD [s one matches=("CDATA" "ID" "IDREF" "IDREFS" "ENTITY" "ENTITIES" "NMTOKEN" "NMTOKENS")] ### (1)
@@
_EnumNameElement <-
_whiteSpace [opt] ### (1)
\| [one] ### (2)
_whiteSpace [opt] ### (3)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (4)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (5)
@@
_EnumElement <-
_whiteSpace [opt] ### (1)
\| [one] ### (2)
_whiteSpace [opt] ### (3)
_xWILD [s plus matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (4)
@@
|
@PATH _ROOT _zoneHeader
@POST
excise(1,1);
noop();
@RULES
_xNIL <-
_xWHITE [s] ### (1)
@@
|
# Fetch attribute's list of values.
L("return_val") = attrvals(L("attr")); |
@NODES _ROOT
@CHECK
if ()
@RULES
_xNIL <-
_xWILD [min=3 matches=(_)] ### (1)
@@
|
@CODE
# rmchildren(findroot());
L("codes") = findconcept(findroot(), "icd_codes");
L("cod") = findconcept(L("codes"), "code2words");
L("proc") = findconcept(L("cod"), "procedures");
DisplayKB(L("proc"), 1);
L("iter") = G("level_1_con");
while(L("iter")) {
"testing_nav.txt" << conceptname(L("iter")) << "\n";
if (down(L("iter"))) "testing_nav.txt" << "\tdown " << " = " << conceptname(down(L("iter"))) << "\n";
if (next(L("iter"))) "testing_nav.txt" << "\nnext " << " = " << conceptname(next(L("iter"))) << "\n";
L("attrs") = findattrs(L("iter"));
if (L("attrs")) {
"testing_nav.txt" << "\tattrs = " << arraylength(L("attrs")) << "\n";
}
else {
"testing_nav.txt" << "\tNo attributes\n";
}
L("iter") = down(L("iter"));
}
@@CODE |
tokenize nil # Gen: Convert input to token list.
pat kb # comment
pat tags # comment
pat items # comment
pat count # comment
pat names # comment
pat output # comment
|
@CODE
L("filename") = G("$kbpath") + "\\en.dict";
if (G("$isfirstfile")) {
G("dict") = openfile(L("filename"));
} else {
G("dict") = openfile(L("filename"),"app");
}
@@CODE |
@NODES _LINE
@POST
if (X("num")) {
N("num",1) = X("items")++;
if (N("$text",1) != ",") {
if (!N("str")) {
N("value") = N("$text",1);
}
N("header") = NthHeader(N("num"));
if (N("header") == "ClassKind")
X("ClassKind") = N("$text",1);
}
}
@RULES
_xNIL <-
_item ### (1)
@@
|
# To demonstrate nextattr, we first need to build a KB:
@CODE
# if you find apples in the concept hierarchy
if (findconcept(findroot(),"apple"))
# kill them (to start fresh)
rmconcept(findconcept(findroot(),"apple"));
# Create the apple concept
G("apple") = makeconcept(findroot(),"apple");
# Apples have color
addstrval(G("apple"),"have","color");
# Apple's color is red
addstrval(G("apple"),"color","red");
# Apple's weigh 3 something or others
addnumval(G("apple"),"weight",3);
# Apple's color is also green and yellow
addstrval(G("apple"),"color","green and yellow");
The code creates a KB like this:
|
# Compute base 10 logarithm of 3.1 and print to "output.txt"
@CODE
"output.txt" << logten(3.1) << "\n";
@@CODE
Outputs something like:
.49136 |
@NODES _ROOT
@RULES
_interjection <-
hello ### (1)
@@
|
# In CHECK Region and CODE Region, succeed without executing further code
@CHECK
succeed();
@@CHECK |
# See if the given analyzer is loaded into memory
@CODE
L("ana present")
= findana("TAIParse"); |
@NODES _ROOT
@PRE
<2,2> varz("NL");
@RULES
_LINE <-
_xSTART ### (1)
_xWILD [plus] ### (2)
@@
@PRE
<1,1> var("NL");
<2,2> varz("NL");
@RULES
_LINE <-
_xWILD [one] ### (1)
_xWILD [star] ### (2)
@@
|
@NODES _LINE
@RULES
_skill <- _xWILD [s one matches=(
assembly
basic
cgi
dec
dos
eurolang
excel
fortran
galaxy
graphic
graphics
html
interface
interfaces
internet
java
javascript
lisp
mac
macintosh
motif
netscape
next
pascal
pc
perl
prolog
smalltalk
sql
sun
sunview
trados
unix
vax
webmaster
windowing
word
workstation
writing
)] @@
|
@NODES _textZone
@RULES
_BLANKLINE <-
_xWILD [min=0 max=0 matches=(\ \t \r)] ### (1)
\n ### (2)
@@
_LINE <-
_xWILD [min=0 max=0 fails=(\r \n)] ### (1)
_xWILD [one match=(\n _xEND)] ### (2)
@@
|
@CODE
G("labels") = findconcept(findroot(),"labels");
if (!G("labels")) G("labels") = makeconcept(findroot(),"labels");
rmchildren(G("labels"));
@@CODE |
@CODE
prlit("output.xml","</History>\n");
@@CODE
@RULES
_xNIL <- _xNIL @@ |
@NODES _ROOT
@RULES
_equation <-
_beginEq [s] ### (1)
_xWILD [s] ### (2)
_endEq [s] ### (3)
@@
@POST
S("equation") = N("$text",2);
single();
@RULES
_equationInline <-
\$ [s] ### (1)
_xWILD [s] ### (2)
\$ [s] ### (3)
@@ |
@CODE
L("hello") = 0;
@@CODE
@NODES _TEXTZONE
# noncount or mass nouns. Need to build a full listing.
@POST
N("mass") = 1;
@RULES
_xNIL <- _xWILD [s one match=(
advice
AIDS
air
aluminum
anger
applause
attire
bacon
beauty
beef
beer
behavior
biology
blood
boating
bread
broccoli
butter
cabbage
cake
camping
cancer
candy
cash
cauliflower
caution
celery
cement
cereal
chalk
chaos
cheese
chemistry
chicken
Chinese
chocolate
cloth
clothing
coal
coffee
#common
conduct
confidence
copper
corn
cotton
courage
cream
crime
dancing
despair
detergent
difficulty # have difficulty...
dirt
dust
economics
education
electricity
email
employment
engineering
English
entertainment
equipment
experience
fish
flour
fog
food
fruit
fun
furniture
gas
gasoline
glass
gold
grief
hail
hair
happiness
harm
hate
health
heat
help
hepatitis
history
hockey
homework
honesty
honey
hospitality
humidity
hydrogen
ice
information
insurance
intelligence
iron
jewelry
jogging
juice
junk
justice
knowledge
leather
leisure
lemonade
lettuce
leukemia
lightning
linguistics
lotion
love
luggage
machinery
mail
marble
margarine
mathematics
meat
melancholy
merchandise
metal
microbiology
milk
money
mononucleosis
motion
music
nausea
net # biz domain.
news
nonsense
nylon
oil
optimism
output
oxygen
paper
pasta
patience
pay
peace
pepper
permission
pessimism
petroleum
photography
plastic
poetry
polio
pollution
polyester
porcelain
poverty
praise
produce
progress
propaganda
property
publicity
rain
reading
remorse
research
rice
rope
running
safety
salt
sand
satisfaction
scenery
science
sense
shampoo
shopping
sickness
silk
silver
skating
skiing
sleet
smog
smoke
smoking
snow
soap
soccer
softness
soil
soup
Spanish
speed
spinach
steam
steel
stock
string
stuff
sugar
sunshine
swimming
thread
thunder
traffic
transportation
trash
truth
unemployment
vinegar
violence
walking
warmth
water
wealth
weather
welfare
wheat
wind
wine
wood
wool
work
yogurt
)] @@
|
@CODE
G("dict") = findconcept(findroot(),"dict");
if (!G("dict")) G("dict") = makeconcept(findroot(),"dict");
rmchildren(G("dict"));
@@CODE
|
@CODE
L("hello") = 0;
if (!G("hilite")) # 10/25/10 AM.
exitpass(); # 10/25/10 AM.
@@CODE
@PATH _ROOT _TEXTZONE
@POST
noop();
@RULES
_xNIL <-
_xALPHA [plus]
@@
|
# Reduce "fly" to a verb and then a vg (or "verb
@POST
noop();
@RULES
_xNIL <- to
fly [layer=(_verb _vg)] @@ |
@NODES _LINE
@PRE
<1,1> cap();
@RULES
# Ex: Motif
_ProgrammingLanguage <- _xWILD [min=1 max=1 s match=("Motif" "Pascal")] @@
|
@DECL
# Join KB path concept names to form Radlex phrase
GetKBPathString(L("path")) {
L("pathArray") = split(L("path"), " ");
L("len") = arraylength(L("pathArray"));
L("i") = 3;
L("string") = L("pathArray")[2];
L("string") = strpiece(L("string"), 1, strlength(L("string"))-2);
while (L("i") < L("len")) {
L("string") = L("string") + " " +
strpiece(L("pathArray")[L("i")], 1, strlength(L("pathArray")[L("i")])-2);
L("i") = L("i") + 1;
}
return L("string");
}
GetRadLexTerms(L("node")) {
# Stores phrases encountered
L("completePhrases");
while (L("node")) {
# Get text from pn node
L("text") = pnvar(L("node"), "$text");
# If text is a word, convert to lowercase
if (strisalpha(L("text"))) {
L("text") = strtolower(L("text"));
}
L("term");
L("found") = 0;
# Look up successive words in subtree until we reach leaf or dne
L("KBIter") = findconcept(G("phrases"), L("text"));
L("word") = L("node");
while (L("KBIter") && L("word")) {
# Store term, if it's terminal
if (findconcept(L("KBIter"), "00000")) {
L("path") = conceptpath(L("KBIter"));
L("term") = GetKBPathString(L("path"));
L("found") = 1;
L("node") = L("word");
}
# Advance to next word
L("word") = pnnext(L("word"));
L("wordText") = pnvar(L("word"), "$text");
L("KBIter") = findconcept(L("KBIter"), L("wordText"));
}
# Add longest term to list of terms found
if (L("found")) {
if (arraylength(L("completePhrases")) == 1 && !L("completePhrases")) {
L("completePhrases") = L("term");
}
else {
L("completePhrases")[arraylength(L("completePhrases"))] = L("term");
}
}
L("node") = pnnext(L("node"));
}
return L("completePhrases");
}
@@DECL
|
@NODES _ROOT
@POST
"indexList.txt" << N("words", 1) << "\n";
noop();
@RULES
_xNIL <-
_labelEntry
@@ |
@CODE
G("vocab") << "\n";
closefile(G("debug"));
closefile(G("vocab"));
@@CODE |
@PATH _ROOT _LINE _Caps
# But how to subtype headers!?
# Probably best is: decide you have a header first, then
# count up the subtype confidences...
@POST
++X("headerroots");
if (N("$end"))
++X("end headerroot");
@RULES
_xNIL <- _headerWord [s] @@
_xNIL <- _header [s] @@ # Just in case this is allowed.
@POST
++X("headermods");
if (N("$end"))
++X("end headermod");
@RULES
_xNIL <- _headerMod [s] @@
|
# Extract a normalized directory path from a URL
@CODE
"output.txt"
<< urlbase("http://www.x.com/pqr/a.txt") << "\n";
"output.txt"
<< urlbase("http://www.abc.com/") << "\n";
"output.txt"
<< urlbase("http://www.x.com/pqr/") << "\n";
prints out:
http://www.x.com/pqr
http://www.abc.com
http://www.x.com/pqr |
@NODES _LINE
@RULES
_comma <-
\, ### (1)
@@
|
@PATH _ROOT _doctypedecl _AttlistDecl
@RULES
_DefaultDecl <-
\# [one] ### (1)
_xWILD [s one matches=("REQUIRED" "IMPLIED")] ### (2)
@@
_DefaultDecl <-
\# [one] ### (1)
_xWILD [s min=1 max=1 matches=("FIXED")] ### (2)
_whiteSpace [opt] ### (3)
_PEReference [one] ### (4)
@@
_DefaultDecl <-
\# [one] ### (1)
_xWILD [s one matches=("FIXED")] ### (2)
_whiteSpace [opt] ### (3)
_PubidLiteral [one] ### (4)
@@
_DefaultDecl <-
\# [one] ### (1)
_xWILD [s one matches=("FIXED")] ### (2)
_whiteSpace [opt] ### (3)
_SystemLiteral [one] ### (4)
@@
_DefaultDecl <-
_PubidLiteral [one] ### (1)
@@
_DefaultDecl <-
_SystemLiteral [one] ### (1)
@@
_AttType <-
_xWILD [s one matches=("CDATA" "ID" "IDREF" "IDREFS" "ENTITY" "ENTITIES" "NMTOKEN" "NMTOKENS")] ### (1)
@@
_EnumNameElement <-
_whiteSpace [opt] ### (1)
\| [one] ### (2)
_whiteSpace [opt] ### (3)
_xWILD [s one matches=("_xALPHA" "_" ":")] ### (4)
_xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (5)
@@
_EnumElement <-
_whiteSpace [opt] ### (1)
\| [one] ### (2)
_whiteSpace [opt] ### (3)
_xWILD [s plus matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (4)
@@
|
@NODES _ROOT
@RULES
_zap <-
_xWILD [plus fail=(_word _textZone)] ### (1)
@@
|
@NODES _ROOT
@POST
G("verb") = N("$text",5);
"debug.txt" << "verb: " << G("verb") << "\n";
singler(5,5);
@RULES
_verb <-
\< ### (1)
title ### (2)
\> ### (3)
Conjugation ### (4)
_xALPHA ### (5)
@@
|
@CODE
G("output") = G("$inputhead") + ".kbb";
G("kb") = makeconcept(findroot(),"kb");
@@CODE |
@PATH _ROOT _LINE _Caps
@POST
++X("humannames");
if (N("$end"))
++X("end humanname");
if (N("$start"))
++X("start humanname");
@RULES
_xNIL <- _humanNamepart [s] @@
|
@NODES _LINE
@POST
X("words") = X("words") + 1;
if (strlength(N("$text")) > 1) {
if (striscaps(N("$text")) && strlength(N("$text")) > 1) {
X("caps") = X("caps") + 1;
}
else if (strisupper(N("$text")) && strlength(N("$text")) > 1) {
X("upper") = X("upper") + 1;
}
}
if (N("common") || (N("lower") && spellword(N("$text")))) {
X("prose")++;
}
L("con") = getconcept(G("words"),strtolower(N("$text")));
IncrementCount(L("con"),"count");
@RULES
_xNIL <-
_xALPHA ### (1)
@@
|
@NODES _ROOT
@RULES
_Comment <-
_CommentStart [one] ### (1)
_xWILD [star fail=("_CommentEnd" "_CommentEnd" "_DoubleHyphen")] ### (2)
_CommentEnd [one] ### (3)
@@
_ExternalID <-
_xWILD [one matches=("PUBLIC")] ### (1)
_whiteSpace [opt] ### (2)
_PubidLiteral [one] ### (3)
_whiteSpace [opt] ### (4)
_PubidLiteral [one] ### (5)
@@
_ExternalID <-
_xWILD [one matches=("PUBLIC")] ### (1)
_whiteSpace [opt] ### (2)
_PubidLiteral [one] ### (3)
_whiteSpace [opt] ### (4)
_SystemLiteral [one] ### (5)
@@
_ExternalID <-
_xWILD [one matches=("SYSTEM")] ### (1)
_whiteSpace [opt] ### (2)
_PubidLiteral [one] ### (3)
@@
_ExternalID <-
_xWILD [one matches=("SYSTEM")] ### (1)
_whiteSpace [opt] ### (2)
_SystemLiteral [one] ### (3)
@@
@@RULES
@POST
S("ElementName") = N("ElementName",1) ;
single() ;
@@POST
@RULES
_AttlistDecl [unsealed] <-
_AttlistDeclStart [one] ### (1)
_whiteSpace [opt] ### (2)
_xWILD [plus fail=("_EndTag")] ### (3)
_whiteSpace [opt] ### (4)
_EndTag [one] ### (5)
@@
_PCDataStart <-
\( [one] ### (1)
_whiteSpace [opt] ### (2)
\# [one] ### (3)
_xALPHA [s one matches=("#PCDATA")] ### (4)
@@
_EntityDecl <-
_EntityDeclStart [one] ### (1)
_whiteSpace [opt] ### (2)
_xWILD [plus fail=("_EndTag" "_CommentEnd" "_EndEmptyTag" "EndDocType")] ### (3)
_whiteSpace [opt] ### (4)
_EndTag [one] ### (5)
@@
_EntityDecl <-
_EntityDeclStart [one] ### (1)
_whiteSpace [opt] ### (2)
_xWILD [min=1 max=0 fail=("_EndTag" "_CommentEnd" "_EndEmptyTag" "EndDocType")] ### (3)
_whiteSpace [opt] ### (4)
_CommentEnd [one] ### (5)
@@
_EntityDecl <-
_EntityDeclStart [one] ### (1)
_whiteSpace [opt] ### (2)
_xWILD [min=1 max=0 fail=("_EndTag" "_CommentEnd" "_EndEmptyTag" "EndDocType")] ### (3)
_whiteSpace [opt] ### (4)
_EndEmptyTag [one] ### (5)
@@
_EntityDecl <-
_EntityDeclStart [one] ### (1)
_whiteSpace [opt] ### (2)
_xWILD [min=1 max=0 fail=("_EndTag" "_CommentEnd" "_EndEmptyTag" "EndDocType")] ### (3)
_whiteSpace [opt] ### (4)
_EndDocType [one] ### (5)
@@
@@RULES
|
@NODES _ROOT
@POST
makeconcept(G("kb"),N("$text",1))
@RULES
_xNIL <-
_xALPHA ### (1)
@@
|
# Fetch concept given its string path
if (findconcept(findroot(),"parent"))
rmconcept(findconcept(findroot(),"parent"));
G("childConcept") = makeconcept(makeconcept(findroot(),"parent"),"child");
G("the path") = conceptpath(G("childConcept"));
"output.txt" << "The path is: " << G("the path") << "\n";
G("the concept") = pathconcept(G("the path"));
"output.txt" << "The concept is: " << conceptname(G("the concept")) << "\n";
G("garbage") = pathconcept("foo bar");
"output.txt" << "garbage is: " << G("garbage"); |
@NODES _LINE
@POST
X("rank") = num(N("$text"));
@RULES
_xNIL <-
_rank ### (1)
@@
@POST
X("country") = N("$text");
@RULES
_xNIL <-
_country ### (1)
@@
@POST
X("pop") = num(strsubst(N("$text"),",",0));
@RULES
_xNIL <-
_pop ### (1)
@@
|
@NODES _ROOT
@POST
if (num(N("words")) && num(N("words")) == num(N("caps"))) {
N("level") = 1;
single();
}
else if (num(N("words")) && num(N("words")) == num(N("upper")) && N("words") <= 3) {
N("level") = 2;
single();
}
@RULES
_header <-
_LINE ### (1)
@@
|
@CODE
G("numbers") = getconcept(findroot(),"numbers");
rmchildren(G("numbers"));
@@CODE |
@NODES _LINE
@PRE
<1,1> cap();
@RULES
# Ex: International
_companyModroot <- _xWILD [min=1 max=1 s match=("International" "Associates" "Bookstore" "Brands" "Casinos" "Centers" "Chemical" "Communications" "Companies"
"Consulting" "Devices" "Dynamics" "Electric" "Electronic" "Electronics" "Energy" "Engineering" "Enterprises" "Entertainment"
"Equipment" "Financial" "Foods" "Gaming" "Gas" "General" "Global" "Graphics" "Group" "Grupo"
"Health" "Healthcare" "Holding" "Holdings" "Homes" "Hotels" "Imaging" "Industrial" "Industries" "Instruments"
"Insurance" "Interactive" "Airlines" "Laboratories" "Manufacturing" "Market" "Marketing" "Markets" "Materials" "Medical"
"Micro" "Microsystems" "Mines" "Mining" "Networks" "Online" "Partners" "Petroleum" "Pharmaceutical" "Pharmaceuticals"
"Pharmacy" "Products" "Research" "Resources" "Restaurant" "Restaurants" "Semiconductor" "Service" "Services" "Software"
"Solutions" "Store" "Stores" "Supermarket" "Supermarkets" "Systems" "Tech" "Technologies" "Technology" "Telecom"
"Telecommunications" "Therapeutics" "United" "Universal")] @@
|
@NODES _ROOT
@POST
L("word") = strtolower(N("$text",1));
L("word con") = findconcept(G("words"),L("word"));
"debug.txt" << "word: " << L("word") << "\n";
if (L("word con")) {
"debug.txt" << "Found word: " << L("word") << "\n";
L("code") = down(L("word con"));
while (L("code")) {
L("con") = AddUniqueCon(G("matches"),conceptname(L("code")));
AddUniqueCon(L("con"),L("word"));
L("code") = next(L("code"));
}
}
@RULES
_xNIL <-
_xWILD [one match=(_xALPHA _xNUM)] ### (1)
@@
|
@NODES _bodyZone
@RULES
_trZone <-
_tr ### (1)
_xWILD [fail=(_trClose)] ### (2)
_trClose ### (3)
@@ |
@PATH _ROOT _contactZone _LINE _Caps # 12/25/99 AM.
# Hand building generalized human name. #
@PRE
<1,1> cap();
<1,1> length(1);
<4,4> cap();
<4,4> length(1);
<7,7> cap();
@CHECK
if (!X("humanNameCandidate",3)) fail(); # Line is likely candidate.
@POST
# Rename the cap phrase node.
xrename("_humanName");
# noop()
@RULES
_xNIL <-
_xALPHA [s layer=(_firstName)]
\. [s opt]
_xBLANK [s opt]
_xALPHA [s layer=(_middleName)] # middle initial
\. [s opt]
_xBLANK [s opt]
_xALPHA [s layer=(_lastName)]
@@
@PRE
<1,1> cap();
<3,3> cap();
<3,3> length(1);
<6,6> cap();
@CHECK
if (!X("humanNameCandidate",3)) fail(); # Line is likely candidate.
@POST
xrename("_humanName");
# noop()
@RULES
_xNIL <-
_xALPHA [s layer=(_firstName)]
_xBLANK [s opt]
_xALPHA [s layer=(_middleName)] # middle initial
\. [s opt]
_xBLANK [s opt]
_xALPHA [s layer=(_lastName)]
@@
@PRE
<1,1> cap();
<5,5> cap();
@CHECK
if (!X("humanNameCandidate",3)) fail(); # Line is likely candidate.
@POST
group(3,5, "_lastName");
xrename("_humanName");
# noop()
@RULES
_xNIL <-
_xALPHA [s layer=(_firstName)]
_xBLANK [s opt]
_xWILD [s one match=(de du la da)]
_xBLANK [s opt]
_xALPHA [s]
@@
@PRE
<1,1> cap();
<3,3> cap();
<5,5> cap();
@CHECK
if (!X("humanNameCandidate",3)) fail();
@POST
xrename("_humanName");
# noop()
@RULES
_xNIL <-
_xALPHA [s layer=(_firstName)]
_xWHITE [s star]
_xALPHA [s layer=(_middleName)]
_xWHITE [s star]
_xALPHA [s layer=(_lastName)]
@@
@PRE
<1,1> cap();
<5,5> cap();
@CHECK
if (!X("humanNameCandidate",3)) fail();
@POST
xrename("_humanName");
# noop()
@RULES
_xNIL <-
_xALPHA [s layer=(_firstName)]
_xWHITE [s star]
_xALPHA [s layer=(_lastName)]
@@
|
@MULTI _ROOT
@POST
S("num") = N("num", 1);
excise(1,1);
single();
@RULES
_item <-
_listItem
_xWILD [fails=(_listItem _xEND)]
@@
|
# Match a _verb node optionally preceded by _modal, _have, or _be nodes, reducing to _vgroup
@RULES
_vgroup <- _modal [optional]
_have [optional] _be [optional]
_verb @@ |
# Traverse caps in experience instance, assigning conf. as
# job title, company name.
# Compute a score for each capitalized phrase based on its
# absolute confidence plus factor based on distance from anchor.
# Retain the maximum with the experience instance.
@PATH _ROOT _experienceZone _experienceInstance _LINE
@POST
# Convert schools in the education zone to companies, ie,
# places of employment.
if (N("hi class") == "school")
{
N("hi class") = "company";
N("company conf") = N("school conf");
}
# If cap is on same line as anchor, add a bonus.
# NEED TO GET ANCHOR LINE NO. FROM INSTANCE.
N("diff") = 0; # Initialize temp variable!
if (X("anchor lineno",3) == X("lineno"))
{
N("prox penalty") = 0;
# PENALTY BECOMES A BONUS FOR WORTHY GUYS.
if (N("job conf") >= G("threshold"))
N("job conf") = N("job conf") %% 85;
if (N("company conf") >= G("threshold"))
N("company conf") = N("company conf") %% 85;
}
# Reduce confidence with greater distance.
# Need absolute value here.
# Need += here!
# Need to iterate here.
else if (X("anchor lineno",3) >= X("lineno"))
N("diff") = X("anchor lineno",3) - X("lineno");
else # if <=
N("diff") = X("lineno") - X("anchor lineno",3);
N("print1") = N("diff");
N("diff") = N("diff") * 25;
N("printy") = N("diff");
if (N("diff") > 100)
N("diff") = 99;
N("prox penalty") = N("prox penalty") %% N("diff");
# Additional penalty for ambiguity.
if (N("ambigs"))
N("prox penalty") = N("prox penalty") %% ((N("ambigs") - 1) * 20);
# Compute total confidences.
N("job conf tot") = N("job conf",1) %% -N("prox penalty");
N("company conf tot") = N("company conf") %% -N("prox penalty");
# Now find the maximum for each experience instance.
# (Note: no good way to initialize context vars yet, till
# NLP++ modified to have code regions with @PATH.)
if (N("job conf tot",1) > N("company conf tot",1)
&& N("job conf tot",1) > X("job conf hi",3))
X("job conf hi",3) = N("job conf tot",1);
else if (N("company conf tot",1) > X("company conf hi",3))
X("company conf hi",3) = N("company conf tot",1);
#noop()
@RULES
_xNIL <- _Caps @@
|
@PATH _ROOT _LINE _countryText
@POST
L("text") = strtolower(N("$text",2));
if (L("text") != "the") {
X("full",2) = L("text");
}
single();
@RULES
_parens <-
\( ### (1)
_xWILD [fail=(\))] ### (2)
\) ### (3)
@@
|
@CODE
L("con") = getconcept(findroot(), "kb");
# L("con_3") = makeconcept(L("con"), "con_3");
# L("def_con_3") = makeconcept(L("con_3"), "def_con_3");
# addstrval(L("def_con_3"), "keyword1", "a system test");
# addstrval(L("def_con_3"), "keyword2", "system");
# addstrval(L("def_con_3"), "keyword3", "system test");
DisplayKB(L("con"), 1);
# SaveKB("mykb.kbb",G("kb"),2);
@@CODE |
@DECL
AddOrder(L("node"),L("sent")) {
L("name") = pnname(L("node"));
L("text") = pnvar(L("node"),"$text");
L("conname") = strpiece(L("name"),1,strlength(L("name"))-1);
L("con") = makeconcept(L("sent"),L("conname"));
return L("con");
}
GetAbbreviation(L("word")) {
"abbrev.txt" << L("word");
L("len") = strlength(L("word"));
L("i") = 0;
while (L("i") < L("len")) {
L("c") = strpiece(L("word"),L("i"),L("i"));
"abbrev.txt" << " " << L("c");
if (L("last c") == " " || L("i") == 0) {
L("abbrev") = L("abbrev") + L("c");
}
L("last c") = L("c");
L("i")++;
}
"abbrev.txt" << "\n ==> " << L("abbrev") << "\n";
return L("abbrev");
}
@@DECL |
@PATH _ROOT _section
@POST
L("icdDiagnoses") = GetICDTerms(N(2), "diagnoses");
L("icdProcedures") = GetICDTerms(N(2), "procedures");
if(L("icdDiagnoses")) {
L("count") = 0;
while (L("count") < arraylength(L("icdDiagnoses"))) {
"icdDiagnoses.txt" << "\t" << L("icdDiagnoses")[L("count")] << "\n";
L("count") = L("count") + 1;
}
}
if(L("icdProcedures")) {
L("count") = 0;
while (L("count") < arraylength(L("icdProcedures"))) {
"icdProcedures.txt" << "\t" << L("icdProcedures")[L("count")] << "\n";
L("count") = L("count") + 1;
}
}
@RULES
_xNIL <-
_xWILD [one matches=(_xSTART _patientID _time)] ### (1)
_xWILD [one match=(_xALPHA _xNUM)] ### (2)
@@ |
@CODE
# Change the passed argument to the concept you are wanting to Dump
DisplayKB(G("radlex"),1);
@@CODE
|
@PATH _ROOT _paragraph _sentence
@PRE
<3,3> varz("name");
@POST
S("title") = N("$text",1);
if (N(3))
S("agency") = N("$text",3);
group(2,3,"_agency");
single();
@RULES
_title <-
_title ### (1)
for ### (2)
_xCAP [plus] ### (3)
@@
|
#################################################
# FILE: HTML Text Functions.pat #
# SUBJ: Distinguish between formatting elements #
# in HTML tags and other tags #
# AUTH: Paul Deane #
# CREATED: 07/Jun/01
# DATE OF THIS VERSION: 31/Aug/01 #
# Copyright
#################################################
@NODES _ROOT
##########################
# HTML horizontal rules #
##########################
@CHECK
if ( N("horizBreak",2) == 1)
succeed() ;
else fail();
@@CHECK
@POST
pncopyvars(2);
single();
@@POST
@RULES
_horizRule <-
_xWHITE [star] ### (1)
_HTMLOpenTag [trig] ### (2)
_xWHITE [star] ### (3)
@@
@@RULES
@RULES
\& <-
\& [one] ### (1)
amp [one] ### (2)
\; [one] ### (3)
@@
@@RULES
#######################################################
# HTML paragraph break indicated by double linebreak #
#######################################################
@CHECK
if ( N("lineBreak",2)==1 &&
N("lineBreak",4)==1 )
succeed() ;
else fail();
@@CHECK
@POST
pncopyvars(2);
single();
@@POST
@RULES
_paraBreak <-
_xWHITE [star] ### (1)
_HTMLOpenTag [trig] ### (2)
_xWHITE [star] ### (3)
_HTMLOpenTag ### (4)
_xWHITE [star] ### (5)
@@
@@RULES
###########################
# HTML paragraph breaks #
###########################
@CHECK
if ( N("paraBreak",2)==1 &&
N("paraBreak",4)==1 )
succeed() ;
else fail();
@@CHECK
@POST
pncopyvars(2);
single();
@@POST
@RULES
_paraBreak <-
_xWHITE [star] ### (1)
_HTMLEndTag [trig] ### (2)
_xWHITE [star] ### (3)
_HTMLOpenTag ### (4)
_xWHITE [star] ### (5)
@@
@@RULES
###########################
# HTML paragraph breaks #
###########################
@CHECK
if ( N("paraBreak",2)==1)
succeed() ;
else fail();
@@CHECK
@POST
pncopyvars(2);
single();
@@POST
@RULES
_paraBreak <-
_xWHITE [star] ### (1)
_HTMLOpenTag [trig] ### (2)
_xWHITE [star] ### (3)
@@
_paraBreak <-
_xWHITE [star] ### (1)
_HTMLEndTag [trig] ### (2)
_xWHITE [star] ### (3)
@@
@@RULES
#####################################################
# Section breaks. #
# #
# DIV tags are one major indication of section #
# information in an HTML document, as are the start #
# of a heading. #
#####################################################
@CHECK
if (N("sectionBreak",2)==1)
succeed() ;
else fail();
@@CHECK
@POST
pncopyvars(2);
single();
@@POST
@RULES
_sectionBreak <-
_xWHITE [star] ### (1)
_HTMLOpenTag [trig] ### (2)
_xWHITE [star] ### (3)
@@
@@RULES
################################################
# Eliminate repeated whitespace, which isn't #
# significant in an HTML file #
################################################
@POST
single();
@@POST
@RULES
_whiteSpace <-
_xWHITE [star] ### (1)
\& [one] ### (2)
nbsp [one] ### (3)
\; [one] ### (4)
_xWHITE [star] ### (5)
@@
_whiteSpace <-
_xWHITE [one] ### (1)
_xWHITE [star] ### (2)
@@
@@RULES
#####################################################
# Mapping of <BR> and similar tags to line breaks #
# for compatibility with non-html handling of text #
#####################################################
@CHECK
if (N("lineBreak",2) != 1)
fail();
else succeed();
@@CHECK
@POST
pncopyvars(2);
single();
@@POST
@RULES
_lineBreak <-
_xWHITE [star] ### (1)
_HTMLOpenTag [trig] ### (2)
_xWHITE [star] ### (3)
@@
@@RULES
#####################################################
# Mapping of <IMG> and similar tags to line breaks #
# for compatibility with non-html handling of text #
#####################################################
@CHECK
if (strequal(N("tagName",2),"IMG") != 1 &&
strequal(N("tagName",2),"AREA") != 1
)
fail();
else succeed();
@@CHECK
@POST
pncopyvars(2);
single();
@@POST
@RULES
_imageLink <-
_xWHITE [star] ### (1)
_HTMLOpenTag [trig] ### (2)
_xWHITE [star] ### (3)
@@
@@RULES
#####################################################
# Mapping of <META> and similar tags to line breaks #
# for compatibility with non-html handling of text #
#####################################################
@CHECK
if (strequal(N("tagName",2),"META") != 1 &&
strequal(N("tagName",2),"BASE") != 1
) fail();
else succeed();
@@CHECK
@POST
pncopyvars(2);
single();
@@POST
@RULES
_metaInfo <-
_xWHITE [star] ### (1)
_HTMLOpenTag [trig] ### (2)
_xWHITE [star] ### (3)
@@
@@RULES
#####################################################
# Mapping of <META> and similar tags to line breaks #
# for compatibility with non-html handling of text #
#####################################################
@CHECK
if (strequal(N("tagName",2),"INPUT") != 1
) fail();
else succeed();
@@CHECK
@POST
pncopyvars(2);
single();
@@POST
@RULES
_formInfo <-
_xWHITE [star] ### (1)
_HTMLOpenTag [trig] ### (2)
_xWHITE [star] ### (3)
@@
@@RULES
@CHECK
G("tagName") = N("tagName",1);
if (!G("tagName"))
fail();
if ( !strequalnocase(G("tagName"),"title") &&
!strequalnocase(G("tagName"),"h1") &&
!strequalnocase(G("tagName"),"h2") &&
!strequalnocase(G("tagName"),"h3") &&
!strequalnocase(G("tagName"),"h4") &&
!strequalnocase(G("tagName"),"h5") &&
!strequalnocase(G("tagName"),"h6")
) fail();
@@CHECK
@POST
pncopyvars(1);
single();
@@POST
@RULES
_startElement <-
_HTMLOpenTag [one] ### (1)
@@
@CHECK
G("tagName") = N("tagName",1);
if (!G("tagName"))
fail();
if ( !strequalnocase(G("tagName"),"title") &&
!strequalnocase(G("tagName"),"h1") &&
!strequalnocase(G("tagName"),"h2") &&
!strequalnocase(G("tagName"),"h3") &&
!strequalnocase(G("tagName"),"h4") &&
!strequalnocase(G("tagName"),"h5") &&
!strequalnocase(G("tagName"),"h6")
) fail();
@@CHECK
@POST
pncopyvars(1);
single();
@@POST
@RULES
_endElement <-
_HTMLEndTag [one] ### (1)
@@
@CHECK
G("tagName") = N("tagName",1);
if (!G("tagName"))
fail();
if ( !strequalnocase(G("tagName"),"b") &&
!strequalnocase(G("tagName"),"i") &&
!strequalnocase(G("tagName"),"strong") &&
!strequalnocase(G("tagName"),"font") &&
!strequalnocase(G("tagName"),"center") &&
!strequalnocase(G("tagName"),"em")
) fail();
@@CHECK
@POST
pncopyvars(1);
single();
@@POST
@RULES
_startFormatElement <-
_HTMLOpenTag [one] ### (1)
@@
_endFormatElement <-
_HTMLEndTag [one] ### (1)
@@
|
# Find dictionary concept wordString in the dictionary hierarchy of KB, return handle to concept
@CODE
"output.txt" << "1 " << conceptname(addword("hello")) << "\n";
"output.txt" << "2 " << conceptname(wordindex("hello")) << "\n";
"output.txt" << "3 " << findwordpath("hello") << "\n";
"output.txt" << "4 " << findwordpath("olleh") << "\n";
"output.txt" << "5 " << wordpath("foobaz") << "\n";
"output.txt" << "6 " << conceptname(dictfindword("hello")) << \n";
rmword("foobaz");
Prints out:
1 hello
2 he
3 "concept" "sys" "dict" "a" "h" "he" "hello"
4
5 "concept" "sys" "dict" "a" "f" "fo" "foobaz"
6 hello |
@DECL
###############################################
# General functions
###############################################
AddUniqueCon(L("concept"),L("name")) {
L("con") = findconcept(L("concept"),L("name"));
if (!L("con")) L("con") = makeconcept(L("concept"),L("name"));
return L("con");
}
AddUniqueStr(L("concept"),L("attr"),L("value")) {
if (L("value")) {
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("str") = getstrval(L("val"));
if (L("str") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addstrval(L("concept"),L("attr"),L("value"));
return 1;
}
return 0;
}
AddUniqueNum(L("concept"),L("attr"),L("value")) {
if (L("value")) {
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("num") = getnumval(L("val"));
if (L("num") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addnumval(L("concept"),L("attr"),L("value"));
return 1;
}
return 0;
}
AddUniqueConVal(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("con") = getconval(L("val"));
"unique.txt" << conceptname(L("con")) << "\n";
if (conceptpath(L("con")) == conceptpath(L("value")))
return 0;
L("val") = nextval(L("val"));
}
addconval(L("concept"),L("attr"),L("value"));
return 1;
}
PathToConcept(L("parent"),L("hier")) {
L("cons") = split(L("hier")," ");
L("i") = 0;
L("con") = L("parent");
while (L("cons")[L("i")]) {
L("c") = L("cons")[L("i")];
L("name") = strsubst(L("c"),"\"",0);
if (L("name") != "concept")
L("con") = AddUniqueCon(L("con"),L("name"));
L("i")++;
}
return L("con");
}
CopyAttr(L("from"),L("to"),L("attr")) {
L("from value") = strval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr"),L("from value"));
}
}
CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) {
L("from value") = strval(L("from"),L("attr from"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr to"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr to"),L("from value"));
}
}
CopyConAttr(L("from"),L("to"),L("attr")) {
L("from value") = conval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = conval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addconval(L("to"),L("attr"),L("from value"));
}
}
AttrValues(L("con"),L("attr")) {
L("at") = findattr(L("con"),L("attr"));
if (L("at"))
return attrvals(L("at"));
return 0;
}
ValCount(L("attr")) {
L("vals") = attrvals(L("attr"));
while (L("con")) {
L("count")++;
L("con") = nextval(L("con"));
}
return L("count");
}
LastChild(L("parent")) {
L("child") = down(L("parent"));
while (L("child")) {
L("last") = L("child");
L("child") = next(L("child"));
}
return L("last");
}
MakeCountCon(L("con"),L("count name")) {
L("count name") = CountName(L("con"),L("count name"));
return makeconcept(L("con"),L("count name"));
}
IncrementCount(L("con"),L("countname")) {
L("count") = numval(L("con"),L("countname"));
if (L("count")) {
L("count") = L("count") + 1;
replaceval(L("con"),L("countname"),L("count"));
} else {
addnumval(L("con"),L("countname"),1);
L("count") = 1;
}
return L("count");
}
CountName(L("con"),L("root")) {
L("count") = IncrementCount(L("con"),L("root"));
return L("root") + str(L("count"));
}
StripEndDigits(L("name")) {
if (strisdigit(L("name"))) return 0;
L("len") = strlength(L("name")) - 1;
L("i") = L("len") - 1;
L("str") = strpiece(L("name"),L("i"),L("len"));
while (strisdigit(L("str")) && L("i")) {
L("i")--;
L("str") = strpiece(L("name"),L("i"),L("len"));
}
return strpiece(L("name"),0,L("i"));
}
###############################################
# KB Dump Functins
###############################################
DumpKB(L("con"),L("file")) {
L("dir") = G("$apppath") + "/kb/";
L("filename") = L("dir") + L("file") + ".kb";
if (!kbdumptree(L("con"),L("filename"))) {
"kb.txt" << "FAILED dump: " << L("filename") << "\n";
} else {
"kb.txt" << "DUMPED: " << L("filename") << "\n";
}
}
TakeKB(L("filename")) {
L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb";
"kb.txt" << "Taking: " << L("path") << "\n";
if (take(L("path"))) {
"kb.txt" << " Taken successfully: " << L("path") << "\n";
} else {
"kb.txt" << " Taken FAILED: " << L("path") << "\n";
}
}
ChildCount(L("con")) {
L("count") = 0;
L("child") = down(L("con"));
while (L("child")) {
L("count")++;
L("child") = next(L("child"));
}
return L("count");
}
###############################################
# KBB DISPLAY FUNCTIONS
###############################################
###############################################
# display type:
# 0 compact with ellipses on long attr values
# 1 full, more spread out
# 2 compact without ellipses on long attr values
###############################################
DisplayKB(L("top con"),L("display type")) {
L("file") = DisplayFileName();
DisplayKBRecurse(L("file"),L("top con"),0,L("display type"));
L("file") << "\n";
return L("top con");
}
KBHeader(L("text")) {
L("file") = DisplayFileName();
L("file") << "#######################\n";
L("file") << "# " << L("text") << "\n";
L("file") << "#######################\n\n";
}
DisplayFileName() {
if (num(G("$passnum")) < 10) {
L("file") = "ana00" + str(G("$passnum"));
}else if (num(G("$passnum")) < 100) {
L("file") = "ana0" + str(G("$passnum"));
} else {
L("file") = "ana" + str(G("$passnum"));
}
L("file") = L("file") + ".kbb";
return L("file");
}
DisplayKBRecurse(L("file"),L("parent"),L("level"),L("display type")) {
if (L("level") == 0) {
L("file") << conceptname(L("parent")) << "\n";
}
L("con") = down(L("parent"));
while (L("con")) {
L("file") << SpacesStr(L("level")+1) << conceptname(L("con"));
DisplayAttributes(L("file"),L("con"),L("display type"),L("level"));
L("file") << "\n";
if (down(L("con"))) {
L("lev") = 1;
DisplayKBRecurse(L("file"),L("con"),L("level")+L("lev"),L("display type"));
}
L("con") = next(L("con"));
}
}
DisplayAttributes(L("file"),L("con"),L("display type"),L("level")) {
L("attrs") = findattrs(L("con"));
if (L("attrs")) L("file") << ": ";
if (L("display type") == 1 && L("attrs")) L("file") << "\n";
L("first attr") = 1;
while (L("attrs")) {
L("vals") = attrvals(L("attrs"));
L("count") = ValCount(L("attrs"));
if (L("display type") != 1 && !L("first attr")) {
L("file") << ", ";
}
if (L("display type") == 1) {
if (!L("first attr")) L("file") << "\n";
L("file") << SpacesStr(L("level")+2);
}
L("file") << attrname(L("attrs")) << "=";
L("first") = 1;
while (L("vals")) {
L("val") = getstrval(L("vals"));
L("num") = getnumval(L("vals"));
L("con") = getconval(L("vals"));
if (!L("first"))
L("file") << ",";
else if (L("count") > 1 || L("con"))
L("file") << "[";
if (L("con")) {
if (L("first"))
L("file") << "[";
L("file") << conceptpath(L("con"));
} else if (L("display type") == 0 && strlength(L("val")) > 20) {
L("shorty") = strpiece(L("val"),0,20);
L("file") << L("shorty");
L("file") << "...";
if (strendswith(L("val"),"\""))
L("file") << "\"";
} else if (L("num") > -1) {
L("file") << str(L("num"));
} else {
if (DisplayValNeedsQuote(L("val")))
L("file") << "\"";
L("file") << L("val");
if (DisplayValNeedsQuote(L("val")))
L("file") << "\"";
}
L("first") = 0;
L("vals") = nextval(L("vals"));
}
if (L("con"))
L("file") << "]";
L("first attr") = 0;
L("attrs") = nextattr(L("attrs"));
}
}
DisplayValNeedsQuote(L("str")) {
if (strcontains(" ",L("str")) || strcontains("[",L("str")) || strcontains("]",L("str")))
return 1;
return 0;
}
# Because NLP++ doesn't allow for empty strings,
# this function can only be called with "num" >= 1
SpacesStr(L("num")) {
L("n") = 1;
L("spaces") = " ";
while (L("n") < L("num")) {
L("spaces") = L("spaces") + " ";
L("n")++;
}
return L("spaces");
}
###############################################
# DICTIONARY FUNCTIONS
###############################################
DictionaryStart() {
G("attrs path") = G("$apppath") + "\\kb\\user\\attrs.kb";
G("attrs") = openfile(G("attrs path"));
}
DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) {
addword(L("word"));
addword(L("attrName"));
G("attrs") << "ind attr\n" << findwordpath(L("word")) << "\n0\n";
G("attrs") << findwordpath(L("attrName")) << "\n";
if (L("attrType") == "str")
G("attrs") << "pst\n" << "\"" << L("value") << "\"";
else if (L("attrType") == "num")
G("attrs") << "pnum\n" << str(L("value"));
else if (L("attrType") == "con")
G("attrs") << "pcon\n" << conceptpath(L("value"));
G("attrs") << "\nend ind\n\n";
}
DictionaryEnd() {
G("attrs") << "\nquit\n\n";
closefile(G("attrs"));
}
@@DECL
|
@CODE
prlit("ctc.txt", "\n Contact Zone Cap Phrases\n");
prlit("ctc.txt", "----------------------------------\n");
@@CODE
@PATH _ROOT _contactZone _LINE
@POST
ndump("ctc.txt",1);
prlit("ctc.txt", "-------\n");
@RULES
_xNIL <- _Caps @@
_xNIL <- _humanName @@
|
@CODE
# L("word") = down(G("pos"));
# while (L("word")) {
# L("pos") = down(L("word"));
# L("w") = conceptname(L("word"));
# while (L("pos")) {
# "pos.dict" << L("w") << " pos=" << conceptname(L("pos")) << "\n";
# L("pos") = next(L("pos"));
# }
# L("word") = next(L("word"));
# }
# sortchilds(G("lookups"));
# DisplayKB(G("lookups"),0);
sortchilds(G("codes"));
DisplayKB(G("codes"),0);
# sortchilds(G("words"));
# DisplayKB(G("words"),0);
# DisplayKB(G("pos"),0);
@@CODE |
# Match one node of inc, ltd, company, or firm, and reduce to _company
_company <- _xWILD [s one matches=(inc ltd company firm)] |
@CODE
SaveKB("everything.kbb",findroot(),2);
@@CODE |
@CODE
sortchilds(G("langs"));
L("file2") = "iso-languages2.dict";
L("file3") = "iso-languages3.dict";
L("lang") = down(G("langs"));
while (L("lang")) {
L("attrs") = DictDisplayAttr(L("lang"),"language");
L("file2") << conceptname(L("lang")) << " iso=2 " << L("attrs") << "\n";
L("file3") << strval(L("lang"),"iso2") << " iso=3 " << L("attrs") << "\n";
if (strval(L("lang"),"iso2") != strval(L("lang"),"iso3"))
L("file3") << strval(L("lang"),"iso3") << " iso=4 " << L("attrs") << "\n";
L("lang") = next(L("lang"));
}
SaveKB("iso-languages.kbb",G("langs"),2);
@@CODE |
@NODES _ROOT
@RULES
|
@NODES _NLPPP
@POST
movesem(2)
single()
@RULES
_IFPART <- _IF _xWILD [s one match=( _EXPR _NUM _FLOAT _STR )] @@
# Simple statements.
@POST
# movesem(1) #
makestmt(1) # 05/11/00 AM.
single()
@RULES
# NOTE: num and str aren't handled as statements, for now.
_STMT <- _xWILD [s one match=( _EXPR _NUM _FLOAT _STR )] \; [s] @@
# FUNCTION RETURN STATEMENT. #
@POST
returnstmt(1,2) # 03/12/02 AM.
single()
@RULES
_STMT <- # 03/07/02 AM.
_RETURN
_xWILD [s opt match=(_EXPR _NUM _FLOAT _STR)]
\; [s]
@@
# EMPTY STATEMENT. #
@RULES
_STMT <- \; [s] @@
# SOME SYNTAX ERROR DETECTION. #
@POST
rfbunmatchedpunct(1) # 05/19/01 AM.
@RULES
_xNIL <- _xWILD [one match=(
\( \) \[ \] \< \>
)] @@
|
@DECL
# Takes PN node and add subsequent tokens to
# KB ti form a hierarchy
AddPhrase(L("node")) {
L("con") = G("phrases");
while (L("node")) {
L("text") = pnvar(L("node"), "$text");
L("con") = AddUniqueCon(L("con"), L("text"));
L("node") = pnnext(L("node"));
if (!L("node")) {
makeconcept(L("con"), "00000");
}
}
}
# Takes lexicon term and EID and adds to
# hierarchical phrase KB
addConceptPhrase(L("node"), L("eid")) {
"log.txt" << "Phrase: " << L("phrase") << "\tEID: " << L("eid") << "\n";
L("con") = G("terms");
L("node") = pndown(L("node"));
while (L("node")) {
L("text") = pnvar(L("node"), "$text");
L("con") = AddUniqueCon(L("con"), L("text"));
L("node") = pnnext(L("node"));
if (!L("node")) {
makeconcept(L("con"), L("eid"));
}
}
}
@@DECL |
# Fetch name of given concept.
L("return_str") = conceptname(L("con")); |
# Replace the hyphenated "moti-vate" with "motivate"
@POST
singlezap();
@RULES
motivate <- moti \- vate @@ |
@NODES _LINE
@PRE
<1,1> length(3);
@RULES
# Ex: and
_posCONJ [layer=(_funWORD )] <- _xWILD [min=1 max=1 s match=("and")] @@
|
@CODE
L("hello") = 0;
@@CODE
@NODES _TEXTZONE
# New tokenization handlers.
@CHECK
if (!N("dissolve"))
fail();
@POST
splice(1,1);
@RULES
_xNIL <-
_tok
@@ |
@PATH _ROOT _contactZone _LINE _Caps
@CHECK
if (
!X("name found",2) && !G("humannames") # No hi conf names found yet.
&& X("humanname conf") >= 50 # Caps is reasonable to try.
)
succeed();
fail();
@POST
# If fit this pattern, raise confidence a bunch.
X("humanname conf") = X("humanname conf") %% 80;
# noop()
@RULES
_xNIL <-
_xSTART
_xALPHA [s layer=(_firstName)]
\. [s opt]
_xWHITE [s star]
_xALPHA [s layer=(_middleName)]
\. [s]
_xWHITE [s star]
_xALPHA [s layer=(_lastName)]
_xEND
@@
@CHECK
if (
!X("name found",2) && !G("humannames") # No hi conf names found yet.
&& X("humanname conf") >= 50 # Caps is reasonable to try.
)
succeed();
fail();
@POST
# If fit this pattern, raise confidence.
X("humanname conf") = X("humanname conf") %% 65;
# noop()
@RULES
_xNIL <-
_xSTART
_xALPHA [s layer=(_firstName)]
_xWHITE [s star]
_xALPHA [s layer=(_middleName)]
_xWHITE [s star]
_xALPHA [s layer=(_lastName)]
_xEND
@@
@CHECK
if (
!X("name found",2) && !G("humannames") # No hi conf names found yet.
&& X("humanname conf") >= 50 # Caps is reasonable to try.
)
succeed();
fail();
@POST
# If fit this pattern, raise confidence.
X("humanname conf") = X("humanname conf") %% 65;
# noop()
@RULES
_xNIL <-
_xSTART
_xALPHA [s layer=(_firstName)]
_xWHITE [s star]
_xALPHA [s layer=(_lastName)]
_xEND
@@
|
# Sort concept's subhierarchy in alphabetic order
L("con") = getconcept(findroot(),"top");
getconcept(L("con"),"32");
getconcept(L("con"),"3");
getconcept(L("con"),"33");
# Another layer.
getconcept(L("33"),"c");
getconcept(L("33"),"b");
getconcept(L("33"),"a");
sortchilds(L("con"));
sorthier(L("con"));
Before sorting, the children of "top" are ordered 32, 3, 33.
The children
of 33 are ordered c b a.
After sorting, the order is changed to 3, 32, 33 under top, and a b c under
33. |
# Sort concept's phrase nodes alphabetically.
sortphrase(L("con")); |
@NODES _LINE
@RULES
# Ex: "(The) (XXX) U niversity of XXX (XXX),"
_SchoolName [] <-
_xWILD [opt s match=(The)]
_xWHITE [star s]
# _xALPHA [opt s fails=( University _xWHITE The)]
# _xWHITE [star s]
_xALPHA [opt s fails=( University _xWHITE The)]
_xWHITE [star s]
_xWILD [min=1 max=1 s match=(University) trigger]
_xWHITE [star s]
_xWILD [min=1 max=1 s match=(for of) ]
_xWHITE [star s]
_xALPHA [opt s]
_xWHITE [star s]
_xALPHA [opt s]
\,
@@
#Ex: "(The)(XXX) University of XXX (XXX) ENDOFLINE"
_SchoolName [] <-
_xWILD [opt s match=(The)]
_xWHITE [star s]
# _xALPHA [opt s fails=( University _xWHITE The)]
# _xWHITE [star s]
_xALPHA [opt s fails=( University _xWHITE The)]
_xWHITE [star s]
_xWILD [min=1 max=1 s match=(University) trigger]
_xWHITE [star s]
_xWILD [min=1 max=1 s match=(for of)]
_xWHITE [star s]
_xALPHA [opt s]
_xWHITE [star s]
_xALPHA [opt s]
\n
@@
|
@NODES _ROOT
@PRE
<1,1> var("up");
@POST
splice(1,1);
@RULES
_xNIL <-
_LINE ### (1)
@@
|
@PATH _ROOT _experienceZone _experienceInstance _LINE _experiencePart _Caps
@POST
X("job title",3) = N("$text");
@RULES
_xNIL <- _jobTitle [s] @@
_xNIL <- _jobPhrase [s] @@
_xNIL <- _jobTitleRoot [s] @@
|
@PATH _ROOT _doctypedecl _EntityDecl
@POST
if (N("$text",4))
S("buffer1") = str(N("$text",4)) ;
if (N("$text",5))
S("buffer2") = str(N("$text",5)) ;
if (S("buffer1") && S("buffer2")) {
S("EntityName") = S("buffer1") + S("buffer2") ;
}
else if (S("buffer1"))
S("EntityName") = S("buffer1") ;
else if (S("buffer2"))
S("EntityName") = S("buffer2") ;
G("CurrentEntity") = findconcept(G("Entities"),S("EntityName")) ;
if (G("CurrentEntity") == 0 ) {
makeconcept(G("Entities"),S("EntityName")) ;
G("CurrentEntity") = findconcept(G("Entities"),S("EntityName")) ;
}
addattr(G("CurrentEntity"),"parameter") ;
addsval(G("CurrentEntity"),"parameter",1) ;
addattr(G("CurrentEntity"),"internal") ;
addsval(G("CurrentEntity"),"internal",0) ;
addattr(G("CurrentEntity"),"textValue") ;
addattr(G("CurrentEntity"),"URI") ;
addstrval(G("CurrentEntity"),"URI",str(N("URI",7))) ;
single() ;
@@POST
@RULES
_PEDecl <-
_whiteSpace [opt]
_xWILD [s trig min=1 max=1 matches=("\%")]
_whiteSpace
_xWILD [s min=1 max=1 matches=("_xALPHA" "_" ":")]
_xWILD [s min=0 max=0 matches=("_xALPHA" "_xNUM" "." "-" "_" ":")]
_whiteSpace
_ExternalID
@@
@@RULES
@POST
if (N("$text",4))
S("buffer1") = str(N("$text",4)) ;
if (N("$text",5))
S("buffer2") = str(N("$text",5)) ;
if (S("buffer1") && S("buffer2")) {
S("EntityName") = S("buffer1") + S("buffer2") ;
}
else if (S("buffer1"))
S("EntityName") = S("buffer1") ;
else if (S("buffer2"))
S("EntityName") = S("buffer2") ;
G("CurrentEntity") = findconcept(G("Entities"),S("EntityName")) ;
if (G("CurrentEntity") == 0 ) {
makeconcept(G("Entities"),S("EntityName")) ;
G("CurrentEntity") = findconcept(G("Entities"),S("EntityName")) ;
}
addattr(G("CurrentEntity"),"parameter") ;
addsval(G("CurrentEntity"),"parameter",1) ;
addattr(G("CurrentEntity"),"internal") ;
addsval(G("CurrentEntity"),"internal",1) ;
addattr(G("CurrentEntity"),"textValue") ;
addstrval(G("CurrentEntity"),"textValue",str(N("textValue",7))) ;
single() ;
@@POST
@RULES
_PEDecl <-
_whiteSpace [opt]
_xWILD [s trig min=1 max=1 matches=("\%")]
_whiteSpace
_xWILD [s min=1 max=1 matches=("_xALPHA" "_" ":")]
_xWILD [s min=0 max=0 matches=("_xALPHA" "_xNUM" "." "-" "_" ":")]
_whiteSpace
_EntityRef
@@
_PEDecl <-
_whiteSpace [opt]
_xWILD [s trig min=1 max=1 matches=("\%")]
_whiteSpace
_xWILD [s min=1 max=1 matches=("_xALPHA" "_" ":")]
_xWILD [s min=0 max=0 matches=("_xALPHA" "_xNUM" "." "-" "_" ":")]
_whiteSpace
_PEReference
@@
@@RULES
@POST
if (N("$text",4))
S("buffer1") = str(N("$text",4)) ;
if (N("$text",5))
S("buffer2") = str(N("$text",5)) ;
if (S("buffer1") && S("buffer2")) {
S("EntityName") = S("buffer1") + S("buffer2") ;
}
else if (S("buffer1"))
S("EntityName") = S("buffer1") ;
else if (S("buffer2"))
S("EntityName") = S("buffer2") ;
G("CurrentEntity") = findconcept(G("Entities"),S("EntityName")) ;
if (G("CurrentEntity") == 0 ) {
makeconcept(G("Entities"),S("EntityName")) ;
G("CurrentEntity") = findconcept(G("Entities"),S("EntityName")) ;
}
addattr(G("CurrentEntity"),"parameter") ;
addsval(G("CurrentEntity"),"parameter",1) ;
addattr(G("CurrentEntity"),"internal") ;
addsval(G("CurrentEntity"),"internal",1) ;
addattr(G("CurrentEntity"),"textValue") ;
addstrval(G("CurrentEntity"),"textValue",str(N("textValue",7))) ;
single() ;
@@POST
@RULES
_PEDecl <-
_whiteSpace [opt]
_xWILD [s trig min=1 max=1 matches=("\%")]
_whiteSpace
_xWILD [s min=1 max=1 matches=("_xALPHA" "_" ":")]
_xWILD [s min=0 max=0 matches=("_xALPHA" "_xNUM" "." "-" "_" ":")]
_whiteSpace
_PubidLiteral
@@
_PEDecl <-
_whiteSpace [opt]
_xWILD [s trig min=1 max=1 matches=("\%")]
_whiteSpace
_xWILD [s min=1 max=1 matches=("_xALPHA" "_" ":")]
_xWILD [s min=0 max=0 matches=("_xALPHA" "_xNUM" "." "-" "_" ":")]
_whiteSpace
_SystemLiteral
@@
@@RULES
@POST
if (N("$text",2))
S("buffer1") = str(N("$text",2)) ;
if (N("$text",3))
S("buffer2") = str(N("$text",3)) ;
if (N("$text",2) && N("$text",3)) {
S("EntityName") = S("buffer1") + S("buffer2") ;
}
else if (N("$text",2))
S("EntityName") = S("buffer1") ;
else if (N("$text",3))
S("EntityName") = S("buffer2") ;
G("CurrentEntity") = findconcept(G("Entities"),S("EntityName")) ;
if (G("CurrentEntity") == 0 ) {
makeconcept(G("Entities"),S("EntityName")) ;
G("CurrentEntity") = findconcept(G("Entities"),S("EntityName")) ;
}
addattr(G("CurrentEntity"),"parameter") ;
addsval(G("CurrentEntity"),"parameter",0) ;
addattr(G("CurrentEntity"),"internal") ;
addsval(G("CurrentEntity"),"internal",1) ;
addattr(G("CurrentEntity"),"textValue") ;
addstrval(G("CurrentEntity"),"textValue",str(N("textValue",5))) ;
single() ;
@@POST
@RULES
_GEDecl <-
_whiteSpace [opt]
_xWILD [s min=1 max=1 matches=("_xALPHA" "_" ":")]
_xWILD [s min=0 max=0 matches=("_xALPHA" "_xNUM" "." "-" "_" ":")]
_whiteSpace
_PubidLiteral
@@
_GEDecl <-
_whiteSpace [opt]
_xWILD [s min=1 max=1 matches=("_xALPHA" "_" ":")]
_xWILD [s min=0 max=0 matches=("_xALPHA" "_xNUM" "." "-" "_" ":")]
_whiteSpace
_SystemLiteral
@@
_GEDecl <-
_whiteSpace [opt]
_xWILD [s min=1 max=1 matches=("_xALPHA" "_" ":")]
_xWILD [s min=0 max=0 matches=("_xALPHA" "_xNUM" "." "-" "_" ":")]
_whiteSpace
_EntityRef
@@
_GEDecl <-
_whiteSpace [opt]
_xWILD [s min=1 max=1 matches=("_xALPHA" "_" ":")]
_xWILD [s min=0 max=0 matches=("_xALPHA" "_xNUM" "." "-" "_" ":")]
_whiteSpace
_PEReference
@@
_GEDecl <-
_whiteSpace [opt]
_xWILD [s min=1 max=1 matches=("_xALPHA" "_" ":")]
_xWILD [s min=0 max=0 matches=("_xALPHA" "_xNUM" "." "-" "_" ":")]
_whiteSpace
_PubidLiteral
@@
_GEDecl <-
_whiteSpace [opt]
_xWILD [s min=1 max=1 matches=("_xALPHA" "_" ":")]
_xWILD [s min=0 max=0 matches=("_xALPHA" "_xNUM" "." "-" "_" ":")]
_whiteSpace
_SystemLiteral
@@
@@RULES
|
@NODES _ROOT
@POST
oldsplice(1,1) # 07/19/02 AM.
@RULES
_xNIL <- _NLPPP @@
|
# Copy a node's variables to the suggested node of a rule match
@POST
pncopyvars(N(2)); # Copy head _noun variables to _np node.
single();
@RULES
_np <- _det [s] _noun [s] @@ |
@DECL
###############################################
# General functions
###############################################
AddUniqueCon(L("concept"),L("name")) {
L("con") = findconcept(L("concept"),L("name"));
if (!L("con")) L("con") = makeconcept(L("concept"),L("name"));
return L("con");
}
AddUniqueStr(L("concept"),L("attr"),L("value")) {
if (L("value")) {
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("str") = getstrval(L("val"));
if (L("str") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addstrval(L("concept"),L("attr"),L("value"));
return 1;
}
return 0;
}
AddUniqueNum(L("concept"),L("attr"),L("value")) {
if (L("value")) {
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("num") = getnumval(L("val"));
if (L("num") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addnumval(L("concept"),L("attr"),L("value"));
return 1;
}
return 0;
}
AddUniqueConVal(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("con") = getconval(L("val"));
"unique.txt" << conceptname(L("con")) << "\n";
if (conceptpath(L("con")) == conceptpath(L("value")))
return 0;
L("val") = nextval(L("val"));
}
addconval(L("concept"),L("attr"),L("value"));
return 1;
}
PathToConcept(L("parent"),L("hier")) {
L("cons") = split(L("hier")," ");
L("i") = 0;
L("con") = L("parent");
while (L("cons")[L("i")]) {
L("c") = L("cons")[L("i")];
L("name") = strsubst(L("c"),"\"",0);
if (L("name") != "concept")
L("con") = AddUniqueCon(L("con"),L("name"));
L("i")++;
}
return L("con");
}
CopyAttr(L("from"),L("to"),L("attr")) {
L("from value") = strval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr"),L("from value"));
}
}
CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) {
L("from value") = strval(L("from"),L("attr from"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr to"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr to"),L("from value"));
}
}
CopyConAttr(L("from"),L("to"),L("attr")) {
L("from value") = conval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = conval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addconval(L("to"),L("attr"),L("from value"));
}
}
AttrValues(L("con"),L("attr")) {
L("at") = findattr(L("con"),L("attr"));
if (L("at"))
return attrvals(L("at"));
return 0;
}
LastChild(L("parent")) {
L("child") = down(L("parent"));
while (L("child")) {
L("last") = L("child");
L("child") = next(L("child"));
}
return L("last");
}
MakeCountCon(L("con"),L("count name")) {
L("count name") = CountName(L("con"),L("count name"));
return makeconcept(L("con"),L("count name"));
}
IncrementCount(L("con"),L("countname")) {
L("count") = numval(L("con"),L("countname"));
if (L("count")) {
L("count") = L("count") + 1;
replaceval(L("con"),L("countname"),L("count"));
} else {
addnumval(L("con"),L("countname"),1);
L("count") = 1;
}
return L("count");
}
CountName(L("con"),L("root")) {
L("count") = IncrementCount(L("con"),L("root"));
return L("root") + str(L("count"));
}
StripEndDigits(L("name")) {
if (strisdigit(L("name"))) return 0;
L("len") = strlength(L("name")) - 1;
L("i") = L("len") - 1;
L("str") = strpiece(L("name"),L("i"),L("len"));
while (strisdigit(L("str")) && L("i")) {
L("i")--;
L("str") = strpiece(L("name"),L("i"),L("len"));
}
return strpiece(L("name"),0,L("i"));
}
###############################################
# KB Dump Functins
###############################################
DumpKB(L("con"),L("file")) {
L("dir") = G("$apppath") + "/kb/";
L("filename") = L("dir") + L("file") + ".kb";
if (!kbdumptree(L("con"),L("filename"))) {
"kb.txt" << "FAILED dump: " << L("filename") << "\n";
} else {
"kb.txt" << "DUMPED: " << L("filename") << "\n";
}
}
TakeKB(L("filename")) {
L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb";
"kb.txt" << "Taking: " << L("path") << "\n";
if (take(L("path"))) {
"kb.txt" << " Taken successfully: " << L("path") << "\n";
} else {
"kb.txt" << " Taken FAILED: " << L("path") << "\n";
}
}
ChildCount(L("con")) {
L("count") = 0;
L("child") = down(L("con"));
while (L("child")) {
L("count")++;
L("child") = next(L("child"));
}
return L("count");
}
###############################################
# KBB DISPLAY FUNCTIONS
###############################################
DisplayKB(L("top con"),L("full")) {
L("file") = DisplayFileName();
DisplayKBRecurse(L("file"),L("top con"),0,L("full"));
L("file") << "\n";
return L("top con");
}
KBHeader(L("text")) {
L("file") = DisplayFileName();
L("file") << "#######################\n";
L("file") << "# " << L("text") << "\n";
L("file") << "#######################\n\n";
}
DisplayFileName() {
if (num(G("$passnum")) < 10) {
L("file") = "ana00" + str(G("$passnum"));
}else if (num(G("$passnum")) < 100) {
L("file") = "ana0" + str(G("$passnum"));
} else {
L("file") = "ana" + str(G("$passnum"));
}
L("file") = L("file") + ".kbb";
return L("file");
}
DisplayKBRecurse(L("file"),L("con"),L("level"),L("full")) {
while (L("con")) {
L("file") << SpacesStr(L("level")+1) << conceptname(L("con"));
DisplayAttributes(L("file"),L("con"),L("full"),L("level"));
L("file") << "\n";
if (down(L("con"))) {
L("lev") = 1;
DisplayKBRecurse(L("file"),down(L("con")),L("level")+L("lev"),L("full"));
}
if (L("level") == 0)
return 0;
L("con") = next(L("con"));
}
}
DisplayAttributes(L("file"),L("con"),L("full"),L("level")) {
L("attrs") = findattrs(L("con"));
if (L("attrs")) L("file") << ": ";
if (L("full") && L("attrs")) L("file") << "\n";
L("first attr") = 1;
while (L("attrs")) {
L("vals") = attrvals(L("attrs"));
if (!L("full") && !L("first attr")) {
L("file") << ", ";
}
if (L("full")) {
if (!L("first attr")) L("file") << "\n";
L("file") << SpacesStr(L("level")+2);
}
L("file") << attrname(L("attrs")) << "=[";
L("first") = 1;
while (L("vals")) {
if (!L("first"))
L("file") << ",";
L("val") = getstrval(L("vals"));
L("num") = getnumval(L("vals"));
L("con") = getconval(L("vals"));
if (L("con")) {
L("file") << conceptpath(L("con"));
} else if (!L("full") && strlength(L("val")) > 20) {
L("shorty") = strpiece(L("val"),0,20);
L("file") << L("shorty");
L("file") << "...";
if (strendswith(L("val"),"\""))
L("file") << "\"";
} else if (L("num") > -1) {
L("file") << str(L("num"));
} else {
L("file") << L("val");
}
L("first") = 0;
L("vals") = nextval(L("vals"));
}
L("file") << "]";
L("first attr") = 0;
L("attrs") = nextattr(L("attrs"));
}
}
# Because NLP++ doesn't allow for empty strings,
# this function can only be called with "num" >= 1
SpacesStr(L("num")) {
L("n") = 1;
L("spaces") = " ";
while (L("n") < L("num")) {
L("spaces") = L("spaces") + " ";
L("n")++;
}
return L("spaces");
}
###############################################
# DICTIONARY FUNCTIONS
###############################################
DictionaryStart() {
G("attrs path") = G("$apppath") + "\\kb\\user\\attrs.kb";
G("attrs") = openfile(G("attrs path"));
}
DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) {
addword(L("word"));
addword(L("attrName"));
G("attrs") << "ind attr\n" << findwordpath(L("word")) << "\n0\n";
G("attrs") << findwordpath(L("attrName")) << "\n";
if (L("attrType") == "str")
G("attrs") << "pst\n" << "\"" << L("value") << "\"";
else if (L("attrType") == "num")
G("attrs") << "pnum\n" << str(L("value"));
else if (L("attrType") == "con")
G("attrs") << "pcon\n" << conceptpath(L("value"));
G("attrs") << "\nend ind\n\n";
}
DictionaryEnd() {
G("attrs") << "\nquit\n\n";
closefile(G("attrs"));
}
@@DECL
|
@CODE
prlit("output.xml", "</CandidateProfile>\n");
prlit("output.xml", "</Body>\n");
prlit("output.xml", "</BizTalk>\n");
@@CODE
@RULES
_xNIL <- _xNIL @@ |
@CODE
L("hello") = 0;
@@CODE
#@PATH _ROOT _TEXTZONE _sent _clause
@NODES _clause
# Looking for suitable by-actor adverbial.
# For passive, will support transformation to active.
@POST
if (N("by-actor") && !X("by-actor"))
X("by-actor") = N("by-actor");
if (N("subs"))
X("last np") = N("subs")[0];
noop();
@RULES
_xNIL <-
_advl
@@
@POST
L("x3") = pnparent(X()); # 07/13/12 AM.
X("last np") = N(1);
if (!pnvar(L("x3"),"subject")) # Find subject for sentence.
# X("subject",3) = N(1);
pnreplaceval(L("x3"),"subject",N(1)); # 07/13/12 AM.
@RULES
_xNIL <-
_np
@@
|
@CODE
L("hello") = 0;
@@CODE
@NODES _TEXTZONE
@POST
++S("don't splice");
single();
@RULES
_date [layer=(_noun _np)] <-
_xNUM
_xWILD [star match=(_xWHITE _nbsp)]
_xWILD [one match=(GMT PST MST CST EST)]
@@
@POST
++S("don't splice");
S("temporal") = 1;
S("dy") = num(N("$text",3));
S("mo") = num(N("$text",1));
S("yr") = num(N("$text",5));
S("sem") = "date";
++X("date ref");
# Normalize year.
if (S("yr") < 20)
S("yr") = S("yr") + 2000;
else if (S("yr") < 100)
S("yr") = S("yr") + 1900;
single();
@RULES
_date [layer=(_noun _np)] <-
_xNUM [s] ### (1)
\/ [s] ### (2)
_xNUM [s] ### (3)
\/ [s] ### (4)
_xNUM [s] ### (5)
@@
@CHECK
S("dy") = num(N("$text",4));
S("yr") = num(N("$text",7));
if (S("dy") > 31)
fail();
if (S("yr") < 100)
fail();
@POST
N("mypos",1) = "NP";
S("mo") = monthtonum(strtolower(N("$text",1)));
S("sem") = "date";
++X("date ref");
if (N(7))
S("don't splice") = 1; # Keep this np.
# (Don't want to lose useful work.)
S("bracket") = 1;
single();
@RULES
_date [layer=(_noun _np)] <-
_xWILD [s one match=(
_month
January February March April May June July August
September October November December
Jan Feb Mar Apr May Jun Jul Aug Sep Sept Oct Nov Dec
)]
\. [s opt]
_xWHITE [s star]
_xNUM [s]
\, [s opt]
_xWHITE [s star]
_xNUM [s]
@@
# num month num
# day month year
# year month day
@CHECK
L("xx") = num(N("$text",1));
if (!N(7))
{
if (L("xx") > 31 || L("xx") == 0)
fail();
L("dy") = L("xx");
succeed();
}
L("zz") = num(N("$text",7));
if (L("xx") <= 31)
{
# Check dd mo yy
S("dy") = L("xx");
S("yr") = L("zz");
}
else
{
# Check yy mo dd
S("yr") = L("xx");
S("dy") = L("zz");
}
if (S("dy") > 31)
fail();
if (S("yr") < 90
|| (S("yr") > 99 && S("yr") < 1900)
|| S("yr") > 2050)
fail();
@POST
if (N(7))
L("flag") = 1;
S("mo") = monthtonum(strtolower(N("$text",3)));
# (Don't want to lose useful work.)
group(1,7,"_date");
group(1,1,"_noun");
nountonp(1,1);
N("sem",1) = "date";
N("mo",1) = S("mo");
N("yr",1) = S("yr");
N("dy",1) = S("dy");
++X("date ref");
if (L("flag"))
{
N("don't splice",1) = 1; # Keep this np.
X("date node") = N(1);
}
getdate(N(1));
@RULES
_xNIL <-
_xNUM [s]
_xWILD [s star match=(_xWHITE \- )]
_xWILD [s one match=(
_month
January February March April May June July August
September October November December
Jan Feb Mar Apr May Jun Jul Aug Sep Sept Oct Nov Dec
)]
\. [s opt]
\, [s opt]
_xWILD [s star match=(_xWHITE \- )]
_xNUM [s opt]
@@
@CHECK
S("dy") = 0;
L("mooryr") = num(N("$text",5));
L("ch") = strpiece(N("$text",5),0,0);
if (L("ch") == "0" && L("mooryr") >= 0
&& L("mooryr") <= 9)
S("yr") = 2000 + L("mooryr");
else if (L("mooryr") >= 1900
&& L("mooryr") <= 2100)
S("yr") = L("mooryr");
else if (L("mooryr") >= 90 && L("mooryr") <= 99)
S("yr") = 1900 + L("mooryr");
else if (L("mooryr") <= 31)
S("dy") = L("mooryr");
else
fail();
@POST
chpos(N(1),"NP");
S("mo") = monthtonum(strtolower(N("$text",1)));
S("sem") = "date";
S("year") = N("$text",5); # 02/13/14 AM.
++X("date ref");
single();
@RULES
_date [layer=(_noun)] <-
_xWILD [s one match=(
_month
January February March April May June July August
September October November December
Jan Feb Mar Apr May Jun Jul Aug Sep Sept Oct Nov Dec
)]
\. [s opt]
\, [s opt]
_xWHITE [s star]
_xNUM [s]
@@
# pretagged corpus stuff.
@POST
S("sem") = "date";
++X("date ref");
single();
@RULES
_date [layer=(_noun)] <-
_month [s]
_xNUM
@@
@POST
S("day") = strtolower(N("$text"));
S("sem") = "date";
pncopyvars(1);
S("mypos") = "NP";
S("ne") = 1;
S("ne type") = "date";
++X("date ref");
S("ne type conf") = 95;
single();
@RULES
_day [layer=(_date _noun)] <- _xWILD [s one match=(
Sunday Monday Tuesday Wednesday Thursday Friday Saturday
)] @@
@POST
S("hr") = num(N("$text",1));
S("min") = num(N("$text",3));
if (N(5))
S("am/pm") = N("$text",5);
single();
@RULES
_daytime [layer=(_noun _np)] <-
_xNUM [s]
\: [s]
_xNUM [s]
_xWHITE [s star]
_xWILD [s opt match=(AM PM)]
@@
# Put some semantics on. #
@POST
if (G("conform treebank"))
{
chpos(N(1),"NN");
N("bracket") = 1;
}
N("sem") = "date";
N("advl") = 1; # Possible standalone adverbial.
@RULES
_xNIL <- _xWILD [s one match=(
today yesterday tomorrow
)] @@
@POST
N("sem") = "date";
++X("date ref");
@RULES
_xNIL <- _xWILD [s one match=(
millennium millennia millenniums
century centuries
decade decades
year years
month months
week weeks
day days
evening evenings
night nights
morning mornings
noon noons
afternoon afternoons
hour hours
minute minutes
second seconds
time date season # 01/12/05 AM.
holiday holidays
quarter quarters # ambig, of course.
)]
@@
|
@CODE
L("hello") = 0;
@@CODE
@NODES _sent
# vg np conj vg
@POST
# Align verb assignments.
L("pos") = vgpos(N(1));
L("voice") = N("voice",1);
fixvg(N(4),L("voice"),L("pos"));
@RULES
_xNIL <-
_vg
_np [opt]
_conj
_vg
@@
# vg np prep np vg
# threw people in a mine filled ....
@POST
fixvg(N(5),"passive","VBN");
@RULES
_xNIL <-
_vg
_np [lookahead]
_prep
_np
_vg
@@
# np vg np vg np
@PRE
<4,4> varz("voice");
@CHECK
if (N("glom",2) != "left" && N("glom",3) != "right")
fail();
@POST
fixvg(N(4),"active","VBP");
@RULES
_xNIL <-
_np
_vg [lookahead]
_np
_vg
_np
@@
# vg np vg
@PRE
<3,3> varz("phr50 vg-np-vg");
<3,3> varz("voice");
@POST
N("phr50 vg-np-vg",3) = 1;
if (verbfeat(N(1),"V8") && vconjq(N(3),"-en"))
fixvg(N(3),"passive","VBN");
@RULES
_xNIL <-
_vg
_np
_vg
@@
# In a larger context.
# prep dqan alpha
@CHECK
if (!N("noun",7))
fail();
if (N("verb",7))
{
if (!vconjq(N(7),"inf"))
fail();
}
@POST
L("tmp7") = N(7);
group(7,7,"_noun");
pncopyvars(L("tmp7"),N(7));
fixnoun(N(7));
@RULES
_xNIL <-
_xSTART
_np
\,
_xWILD [one match=(_prep)]
_proPoss [s opt]
_xWILD [plus match=(_det _quan _num _xNUM _adj _noun)]
_xALPHA
@@
# np conj np
@POST
group(3,5,"_np");
@RULES
_xNIL <-
_xWILD [one match=(_vg)]
_xWILD [opt match=(_prep)]
_np
_conj
_np
_xWILD [lookahead one match=(_qEOS _dbldash _xEND)]
@@
# np vg adj
@CHECK
S("v") = N("verb node",2);
if (!S("v"))
fail();
if (pnvar(S("v"),"mypos"))
fail();
@POST
fixvg(N(2),"active","VBP");
@RULES
_xNIL <-
_np
_vg
_adj [lookahead]
@@
# np apos
@POST
chpos(N(2),"POS");
@RULES
_xNIL <-
_np
_aposX
@@
# ^ np vg prep np advl np vg
@POST
N("ellipted-that",11) = 1;
group(2,10,"_clause");
setunsealed(2,"true"); # 07/10/12 AM.
@RULES
_xNIL <-
_xSTART
_xWILD [star match=(_adv _advl)]
_np
_xWILD [star match=(_adv _advl)]
_vg
_xWILD [star match=(_adv _advl)]
_prep
_xWILD [star match=(_adv _advl)]
_np
_xWILD [plus match=(_adv _advl)]
_np
_xWILD [star match=(_adv _advl)]
_vg
@@
|
@NODES _LINE
@PRE
<1,1> cap();
@RULES
# Ex: Projects
_ExperienceHeaderWord [layer=(_headerWord )] <- _xWILD [min=1 max=1 s match=("Projects" "Employment" "Experience")] @@
|
@NODES _ROOT
@POST
excise(1,1);
noop();
@RULES
_xNIL <-
_xBLANK [s] ### (1)
@@
|
@NODES _LINE
@RULES
# Ex: A.\_A.
_degreePhrase [layer=(_Caps )] <- _degree [s] @@
@PRE
<1,1> cap()
@RULES
# Ex: AS
_degreePhrase [layer=(_Caps )] <- AS [s] @@
# Ex: BA
_degreePhrase [layer=(_Caps )] <- BA [s] @@
# Ex: BS
_degreePhrase [layer=(_Caps )] <- BS [s] @@
# Ex: BSc
_degreePhrase [layer=(_Caps )] <- BSc [s] @@
# Ex: MA
_degreePhrase [layer=(_Caps )] <- MA [s] @@
# Ex: MS
_degreePhrase [layer=(_Caps )] <- MS [s] @@
# Ex: MSc
_degreePhrase [layer=(_Caps )] <- MSc [s] @@
# Ex: PHD
_degreePhrase [layer=(_Caps )] <- PHD [s] @@
# Ex: HSC
_degreePhrase [layer=(_Caps )] <- HSC [s] @@
|
###############################################
# FILE: cleanNotes
# SUBJ: comment
# AUTH: Ashton
# MODIFIED:
###############################################
@NODES _ROOT
@POST
setunsealed(1, "true");
noop();
@RULES
_xNIL <-
_xWILD [one matches=(_patientID _time _init)]
@@
|
@CODE
L("hello") = 0;
@@CODE
|
@CODE
G("Delimiter") = "\t";
@@CODE
|
@NODES _LINE
@POST
group(2,2,"_item");
@RULES
_xNIL <-
_xSTART ### (1)
_comma ### (2)
@@
@POST
group(2,2,"_item");
@RULES
_xNIL <-
_comma ### (1)
_comma ### (2)
@@
|
# Sort concept's immediate children in alphabetic order
L("con") = getconcept(findroot(),"top");
getconcept(L("con"),"32");
getconcept(L("con"),"3");
getconcept(L("con"),"33");
sortchilds(L("con"));
Before sorting, the children of "top" are ordered 32, 3, 33.
After sorting, the order is changed to 3, 32, 33. |
@NODES _LINE
@PRE
<1,1> cap();
@RULES
# Ex: MIT
_schoolPhrase [layer=(_Caps )] <- MIT [s] @@
# Ex: SJSU
_schoolPhrase [layer=(_Caps )] <- SJSU [s] @@
# Ex: UCLA
_schoolPhrase [layer=(_Caps )] <- UCLA [s] @@
# Ex: UCSB
_schoolPhrase [layer=(_Caps )] <- UCSB [s] @@
# Ex: UCSD
_schoolPhrase [layer=(_Caps )] <- UCSD [s] @@
# Ex: URI
_schoolPhrase [layer=(_Caps )] <- URI [s] @@
# Ex: UNH
_schoolPhrase [layer=(_Caps )] <- UNH [s] @@
# Ex: UMASS
_schoolPhrase [layer=(_Caps )] <- UMASS [s] @@
# Ex: UPENN
_schoolPhrase [layer=(_Caps )] <- UPENN [s] @@
# Ex: UNC
_schoolPhrase [layer=(_Caps )] <- UNC [s] @@
# Ex: UMISS
_schoolPhrase [layer=(_Caps )] <- UMISS [s] @@
# Ex: UH
_schoolPhrase [layer=(_Caps )] <- UH [s] @@
# Ex: UC
_schoolPhrase [layer=(_Caps )] <- UC [s] @@
# Ex: SUNY
_schoolPhrase [layer=(_Caps )] <- SUNY [s] @@
# Ex: CALTECH
_schoolPhrase [layer=(_Caps )] <- CALTECH [s] @@
# Ex: Citadel
_schoolPhrase [layer=(_Caps )] <- Citadel [s] @@
# Ex: USNA
_schoolPhrase [layer=(_Caps )] <- USNA [s] @@
# Ex: USMC
_schoolPhrase [layer=(_Caps )] <- USMC [s] @@
# Ex: USAFA
_schoolPhrase [layer=(_Caps )] <- USAFA [s] @@
@PRE
<1,1> cap();
<3,3> cap();
@RULES
# Ex: West\_Point
_schoolPhrase [layer=(_Caps )] <- West [s] _xWHITE [star s] Point [s] @@
@PRE
<1,1> cap();
@RULES
# Ex: SMU
_schoolPhrase [layer=(_Caps )] <- SMU [s] @@
# Ex: LACC
_schoolPhrase [layer=(_Caps )] <- LACC [s] @@
# Ex: CSU
_schoolPhrase [layer=(_Caps )] <- CSU [s] @@
# Ex: UNSW
_schoolPhrase [layer=(_Caps )] <- UNSW [s] @@
@PRE
<1,1> cap();
<3,3> cap();
<5,5> cap();
@RULES
# Ex: M.I.T.
_schoolPhrase [layer=(_Caps )] <- M [s] \. [s] I [s] \. [s] T [s] \. [s] @@
@PRE
<1,1> cap();
<3,3> cap();
<5,5> cap();
<7,7> cap();
@RULES
# Ex: S.J.S.U.
_schoolPhrase [layer=(_Caps )] <- S [s] \. [s] J [s] \. [s] S [s] \. [s] U [s] \. [s] @@
@PRE
<1,1> cap();
<3,3> cap();
@RULES
# Ex: U.C.L.A.
_schoolPhrase [layer=(_Caps )] <- U [s] \. [s] C [s] \. [s] _cityPhrase [s] @@
@PRE
<1,1> cap();
<3,3> cap();
<5,5> cap();
<7,7> cap();
@RULES
# Ex: U.C.S.B.
_schoolPhrase [layer=(_Caps )] <- U [s] \. [s] C [s] \. [s] S [s] \. [s] B [s] \. [s] @@
@PRE
<1,1> cap();
<3,3> cap();
@RULES
# Ex: U.C.S.D.
_schoolPhrase [layer=(_Caps )] <- U [s] \. [s] C [s] \. [s] _statePhrase [s] @@
@PRE
<1,1> cap();
@RULES
# Ex: U.R.I.
_schoolPhrase [layer=(_Caps )] <- U [s] \. [s] _statePhrase [s] @@
@PRE
<1,1> cap();
<3,3> cap();
@RULES
# Ex: U.MASS
_schoolPhrase [layer=(_Caps )] <- U [s] \. [s] MASS [s] @@
# Ex: U.PENN
_schoolPhrase [layer=(_Caps )] <- U [s] \. [s] PENN [s] @@
# Ex: U.MISS
_schoolPhrase [layer=(_Caps )] <- U [s] \. [s] MISS [s] @@
# Ex: U.H.
_schoolPhrase [layer=(_Caps )] <- U [s] \. [s] H [s] \. [s] @@
# Ex: U.C.
_schoolPhrase [layer=(_Caps )] <- U [s] \. [s] C [s] \. [s] @@
@PRE
<2,2> cap();
<4,4> cap();
@RULES
# Ex: U.S.N.A.
_schoolPhrase [layer=(_Caps )] <- _countryPhrase [s] N [trig s] \. [s] A [s] \. [s] @@
# Ex: U.S.M.C.
_schoolPhrase [layer=(_Caps )] <- _countryPhrase [s] M [trig s] \. [s] C [s] \. [s] @@
# Ex: U.S.A.F.A.
_schoolPhrase [layer=(_Caps )] <- _countryPhrase [s] F [trig s] \. [s] A [s] \. [s] @@
@PRE
<1,1> cap();
<3,3> cap();
<5,5> cap();
@RULES
# Ex: S.M.U.
_schoolPhrase [layer=(_Caps )] <- S [s] \. [s] M [s] \. [s] U [s] \. [s] @@
@PRE
<2,2> cap();
<4,4> cap();
@RULES
# Ex: L.A.C.C.
_schoolPhrase [layer=(_Caps )] <- _cityPhrase [s] C [trig s] \. [s] C [s] \. [s] @@
@PRE
<1,1> cap();
<3,3> cap();
<5,5> cap();
@RULES
# Ex: C.S.U.
_schoolPhrase [layer=(_Caps )] <- C [s] \. [s] S [s] \. [s] U [s] \. [s] @@
@PRE
<1,1> cap();
<3,3> cap();
<5,5> cap();
<7,7> cap();
@RULES
# Ex: U.N.S.W.
_schoolPhrase [layer=(_Caps )] <- U [s] \. [s] N [s] \. [s] S [s] \. [s] W [s] \. [s] @@
@PRE
<1,1> cap();
@RULES
# Ex: Penn
_schoolPhrase [layer=(_Caps )] <- Penn [s] @@
# Ex: Dartmouth
_schoolPhrase [layer=(_Caps )] <- Dartmouth [s] @@
# Ex: Columbia
_schoolPhrase [layer=(_Caps )] <- Columbia [s] @@
# Ex: Princeton
_schoolPhrase [layer=(_Caps )] <- Princeton [s] @@
# Ex: Brown
_schoolPhrase [layer=(_Caps )] <- Brown [s] @@
# Ex: Harvard
_schoolPhrase [layer=(_Caps )] <- Harvard [s] @@
# Ex: Yale
_schoolPhrase [layer=(_Caps )] <- Yale [s] @@
# Ex: Berkeley
_schoolPhrase [layer=(_Caps )] <- Berkeley [s] @@
# Ex: Stanford
_schoolPhrase [layer=(_Caps )] <- Stanford [s] @@
# Ex: Oxford
_schoolPhrase [layer=(_Caps )] <- Oxford [s] @@
# Ex: Cambridge\r
_schoolPhrase [layer=(_Caps )] <- Cambridge [s] _xWHITE [star s] @@
|
@CODE
G("dict") = findhierconcept("dict",findroot());
G("alphas") = findhierconcept("a",G("dict"));
G("letter") = down(G("alphas"));
G("nouns") = 0;
G("adjectives") = 0;
G("adverbs") = 0;
G("unmarked") = 0;
G("knowns") = 0;
while (G("letter"))
{
G("subletter") = down(G("letter"));
while (G("subletter"))
{
G("word") = down(G("subletter"));
while (G("word"))
{
G("word name") = conceptname(G("word"));
G("attrs") = findattrs(G("word"));
G("classified") = 0;
if (G("attrs"))
{
if (attrname(G("attrs")) == "pos")
{
G("vals") = attrvals(G("attrs"));
while (G("vals"))
{
addnumval(G("word"),getstrval(G("vals")),1);
G("vals") = nextval(G("vals"));
}
rmattr(G("word"),"pos");
}
# while (G("attrs"))
# {
# if (attrname(G("attrs")) != "pos")
# addstrval(G("word"),"pos",attrname(G("attrs")));
# G("attrs") = nextattr(G("attrs"));
# }
#
# G("attrs") = findattrs(G("word"));
# while (G("attrs"))
# {
# if (attrname(G("attrs")) != "pos")
# rmattr(G("word"),attrname(G("attrs")));
# G("attrs") = nextattr(G("attrs"));
# }
#
# if (strendswith(G("word name"),"i") ||
# strendswith(G("word name"),"encies") ||
# strendswith(G("word name"),"ibles") ||
# strendswith(G("word name"),"ller"))
# {
# addnumval(G("word"),"nouns",1);
# "nouns.txt" << G("word name") << "\n";
# G("nouns")++;
# G("classified") = 1;
# }
# else if (strendswith(G("word name"),"fuller") ||
# strendswith(G("word name"),"fullest"))
# {
# addnumval(G("word"),"adjective",1);
# "adjectives.txt" << G("word name") << "\n";
# G("adjectives")++;
# G("classified") = 1;
# }
# else if (strpiece(G("word name"),0,4) == "super" ||
# strpiece(G("word name"),0,4) == "inter" ||
# strpiece(G("word name"),0,4) == "under" ||
# strpiece(G("word name"),0,4) == "hyper" ||
# strpiece(G("word name"),0,4) == "multi" ||
# strpiece(G("word name"),0,4) == "ultra")
# {
# G("root concept") = dictfindword(strpiece(G("word name"),5,strlength(G("word name"))-1));
# if (G("root concept"))
# {
# G("attrs") = findattrs(G("root concept"));
# while (G("attrs"))
# {
# "verb.txt" << G("word name") << "\n";
# G("verbs")++;
# G("classified") = 1;
# addnumval(G("word"),attrname(G("attrs")),1);
# G("attrs") = nextattr(G("attrs"));
# }
# }
# }
# else if (strpiece(G("word name"),0,3) == "over" ||
# strpiece(G("word name"),0,3) == "post")
# {
# G("root concept") = dictfindword(strpiece(G("word name"),4,strlength(G("word name"))-1));
# if (G("root concept"))
# {
# G("attrs") = findattrs(G("root concept"));
# while (G("attrs"))
# {
# "verb.txt" << G("word name") << "\n";
# G("verbs")++;
# G("classified") = 1;
# addnumval(G("word"),attrname(G("attrs")),1);
# G("attrs") = nextattr(G("attrs"));
# }
# }
# }
# else if (strpiece(G("word name"),0,2) == "sub" ||
# strpiece(G("word name"),0,2) == "non" ||
# strpiece(G("word name"),0,2) == "mis" ||
# strpiece(G("word name"),0,2) == "pre" ||
# strpiece(G("word name"),0,2) == "out" ||
# strpiece(G("word name"),0,2) == "dis")
# {
# G("root concept") = dictfindword(strpiece(G("word name"),3,strlength(G("word name"))-1));
# if (G("root concept"))
# {
# G("attrs") = findattrs(G("root concept"));
# while (G("attrs"))
# {
# "verb.txt" << G("word name") << "\n";
# G("verbs")++;
# G("classified") = 1;
# addnumval(G("word"),attrname(G("attrs")),1);
# G("attrs") = nextattr(G("attrs"));
# }
# }
# }
# else if (strpiece(G("word name"),0,1) == "re" ||
# strpiece(G("word name"),0,1) == "up" ||
# strpiece(G("word name"),0,1) == "un")
# {
# G("root concept") = dictfindword(strpiece(G("word name"),2,strlength(G("word name"))-1));
# if (G("root concept"))
# {
# G("attrs") = findattrs(G("root concept"));
# while (G("attrs"))
# {
# "verb.txt" << G("word name") << "\n";
# G("verbs")++;
# G("classified") = 1;
# addnumval(G("word"),attrname(G("attrs")),1);
# G("attrs") = nextattr(G("attrs"));
# }
# }
# }
# addnumval(G("word"),"verb",1);
# "verb.txt" << G("word name") << "\n";
# G("verbs")++;
# G("classified") = 1;
# }
# if (strendswith(G("word name"),"mmed") ||
# strendswith(G("word name"),"pped"))
# {
# addnumval(G("word"),"verb",1);
# "verb.txt" << G("word name") << "\n";
# G("verbs")++;
# G("classified") = 1;
# }
# else if (strendswith(G("word name"),"ings") ||
# strendswith(G("word name"),"ers"))
# {
# addnumval(G("word"),"noun",1);
# "noun.txt" << G("word name") << "\n";
# G("nouns")++;
# G("classified") = 1;
# }
#
# if (findattrs(G("word")))
# {
# while (G("attrs"))
# {
# if (attrname(G("attrs")) != "pos")
# {
# rmattr(G("word"),attrname(G("attrs")));
# "nonpos.txt" << G("word name") << "\n";
# }
# G("attrs") = nextattr(G("attrs"));
# }
# }
# else if (G("attrs"))
# if (G("attrs"))
# {
# G("classified") = 0;
#
# if (attrname(G("attrs")) == "pos")
# {
# G("vals") = attrvals(G("attrs"));
# while (G("vals"))
# {
# addnumval(G("word"),getstrval(G("vals")),1);
# G("vals") = nextval(G("vals"));
# }
# rmattr(G("word"),"pos");
# }
#
# if (strpiece(G("word name"),0,3) == "anti")
# {
# if (!attrwithval(G("word"),"pos","noun"))
# addstrval(G("word"),"pos","noun");
# "anti.txt" << G("word name") << "\n";
# G("nouns")++;
# G("classified") = 1;
# }
#
# else if (strendswith(G("word name"),"s"))
# {
# G("word root") = strpiece(G("word name"),0,strlength(G("word name"))-2);
# G("word singular") = dictfindword(G("word root"));
# if (G("word singular"))
# {
# if (!findattrs(G("word singular")))
# {
# if (!attrwithval(G("word"),"pos","noun"))
# addstrval(G("word"),"pos","noun");
# if (!attrwithval(G("word singular"),"pos","noun"))
# addstrval(G("word singular"),"pos","noun");
# "noun2.txt" << conceptname(G("word singular")) << " - " << G("word name") << "\n";
# G("nouns") = G("nouns") + 2;
# G("classified") = 1;
# }
# }
# }
#
#
# if (strendswith(G("word name"),"ly"))
# {
# if (!attrwithval(G("word"),"pos","adverb"))
# addstrval(G("word"),"pos","adverb");
# "adv.txt" << G("word name") << "\n";
# G("adverbs")++;
# G("classified") = 1;
# }
# else if (strendswith(G("word name"),"tional") ||
# strendswith(G("word name"),"like") ||
# strendswith(G("word name"),"less") ||
# strendswith(G("word name"),"ier") ||
# strendswith(G("word name"),"some") ||
# strendswith(G("word name"),"iest"))
# {
# if (!attrwithval(G("word"),"pos","adjective"))
# addstrval(G("word"),"pos","adjective");
# G("adjectives")++;
# "adj.txt" << G("word name") << "\n";
# G("classified") = 1;
# }
# else if (strendswith(G("word name"),"ities") ||
# strendswith(G("word name"),"men") ||
# strendswith(G("word name"),"man"))
# {
# if (!attrwithval(G("word"),"pos","noun"))
# addstrval(G("word"),"pos","noun");
# G("nouns")++;
# "nouns.txt" << G("word name") << "\n";
# G("classified") = 1;
# }
#
# if (strendswith(G("word name"),"s"))
# {
# G("word root") = strpiece(G("word name"),0,strlength(G("word name"))-2);
# if (strendswith(G("word name"),"ies"))
# {
# G("word stripped") = strpiece(G("word name"),0,strlength(G("word name"))-4);
# G("word root") = G("word stripped") + "y";
# }
# else if (strendswith(G("word name"),"es"))
# {
# G("word stripped") = strpiece(G("word name"),0,strlength(G("word name"))-3);
# if (strendswith(G("word stripped"),"s"))
# G("word root") = G("word stripped");
# }
# else
# G("word stripped") = strpiece(G("word name"),0,strlength(G("word name"))-2);
# G("word ing") = G("word stripped") + "ing";
# G("word ed") = G("word stripped") + "ed";
# G("word concept") = dictfindword(G("word root"));
# G("word ing concept") = dictfindword(G("word ing"));
# G("word ed concept") = dictfindword(G("word ed"));
#
# if (G("word concept") && G("word ing concept") && G("word ed concept"))
# {
# if (!findattrs(G("word concept")) && !findattrs(G("word ing concept")) &&
# !findattrs(G("word ed concept")))
# {
# if (!attrwithval(G("word"),"pos","verb"))
# addstrval(G("word"),"pos","verb");
# if (!attrwithval(G("word concept"),"pos","verb"))
# addstrval(G("word concept"),"pos","verb");
# if (!attrwithval(G("word ing concept"),"pos","verb"))
# addstrval(G("word ing concept"),"pos","verb");
# if (!attrwithval(G("word ed concept"),"pos","verb"))
# addstrval(G("word ed concept"),"pos","verb");
# "verbs.txt" << G("word root") << " - " << G("word name") << " - " << G("word ing") << " - " << G("word ed") << "\n";
# G("verbs") = G("verbs") + 4;
# G("classified") = 1;
# }
# }
# else if (G("word concept"))
# {
# if (!findattrs(G("word concept")))
# {
# if (!attrwithval(G("word"),"pos","noun"))
# addstrval(G("word"),"pos","noun");
# if (!attrwithval(G("word concept"),"pos","noun"))
# addstrval(G("word concept"),"pos","noun");
# "nouns.txt" << G("word root") << " - " << G("word name") << "\n";
# G("nouns") = G("nouns") + 2;
# G("classified") = 1;
# }
# }
# }
# else if (strendswith(G("word name"),"ed"))
# {
# G("word root") = strpiece(G("word name"),0,strlength(G("word name"))-2);
# G("word stripped") = strpiece(G("word name"),0,strlength(G("word name"))-3);
# G("word ing") = G("word stripped") + "ing";
# G("word concept") = dictfindword(G("word root"));
# G("word ing concept") = dictfindword(G("word ing"));
#
# if (G("word concept") && G("word ing concept"))
# {
# if (!findattrs(G("word concept")) && !findattrs(G("word ing concept")))
# {
# if (!attrwithval(G("word"),"pos","verb"))
# addstrval(G("word"),"pos","verb");
# if (!attrwithval(G("word concept"),"pos","verb"))
# addstrval(G("word concept"),"pos","verb");
# if (!attrwithval(G("word ing concept"),"pos","verb"))
# addstrval(G("word ing concept"),"pos","verb");
# G("word es") = G("word root") + "s";
# G("word es concept") = dictfindword(G("word es"));
# if (!G("word es concept"))
# {
# G("word es concept") = addword(G("word es"));
# addstrval(G("word es concept"),"pos","verb");
# }
#
# "verbs.txt" << G("word root") << " - " << G("word name") << " - " << G("word ing") << "\n";
# G("verbs") = G("verbs") + 3;
# }
# }
# }
if (G("classified") == 0)
"rest.txt" << G("word name") << "\n";
G("unmarked")++;
"unmarked.txt" << G("word name") << "\n";
}
G("word") = next(G("word"));
}
G("subletter") = next(G("subletter"));
}
G("letter") = next(G("letter"));
}
"output.txt" << " Knowns: " << G("knowns") << "\n";
"output.txt" << " Unmarked: " << G("unmarked") << "\n";
"output.txt" << " nouns: " << G("nouns") << "\n";
"output.txt" << "adjectives: " << G("adjectives") << "\n";
"output.txt" << " verbs: " << G("verbs") << "\n";
"output.txt" << " adverbs: " << G("adverbs") << "\n";
"output.txt" << "-------------------\n";
"output.txt" << " diff: " << str(G("unmarked") - G("nouns") - G("adverbs") - G("verbs") - G("adjectives")) << "\n";
@@CODE
|
# Fetch attribute's name.
L("return_str") = attrname(L("attr")); |
@PATH _ROOT _LINE _Caps
@POST
++X("companyroots");
if (N("$end"))
++X("end companyroot");
@RULES
_xNIL <- _companyRoot [s] @@
@POST
++X("companymodroots");
if (N("$end"))
++X("end companymodroot");
@RULES
_xNIL <- _companyModroot [s] @@
@POST
++X("companymods");
@RULES
_xNIL <- _companyMod [s] @@
@POST
++X("companyphrase");
@RULES
_xNIL <- _companyPhrase [s] @@
|
@PATH _ROOT _headerZone
@POST
S("title") = N("title",1);
single();
@RULES
_conjugationGroup <-
_paraGroup ### (1)
_xWILD [plus fail=(_paraGroup)] ### (2)
@@
|
# "rough out" a set of nodes ending in a period,
@NODES _np
@POST
++X("nouns");
@RULES
_xNIL <- _noun @@ |
@POST
rfaregion(1, 2, 3, 4)
single()
@RULES
# Opt: try triggering the last elt.
_REGION [base] <- _PRES [opt] _CHECKS [opt] _POSTS [opt] _RULES @@
|
Subsets and Splits